Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RDMA/ocrdma: Add driver for Emulex OneConnect IBoE RDMA adapter

Signed-off-by: Parav Pandit <parav.pandit@emulex.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>

authored by

Parav Pandit and committed by
Roland Dreier
fe2caefc 045508a8

+8393
+1
drivers/infiniband/Kconfig
··· 51 51 source "drivers/infiniband/hw/cxgb4/Kconfig" 52 52 source "drivers/infiniband/hw/mlx4/Kconfig" 53 53 source "drivers/infiniband/hw/nes/Kconfig" 54 + source "drivers/infiniband/hw/ocrdma/Kconfig" 54 55 55 56 source "drivers/infiniband/ulp/ipoib/Kconfig" 56 57
+1
drivers/infiniband/Makefile
··· 8 8 obj-$(CONFIG_INFINIBAND_CXGB4) += hw/cxgb4/ 9 9 obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/ 10 10 obj-$(CONFIG_INFINIBAND_NES) += hw/nes/ 11 + obj-$(CONFIG_INFINIBAND_OCRDMA) += hw/ocrdma/ 11 12 obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ 12 13 obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/ 13 14 obj-$(CONFIG_INFINIBAND_SRPT) += ulp/srpt/
+8
drivers/infiniband/hw/ocrdma/Kconfig
··· 1 + config INFINIBAND_OCRDMA 2 + tristate "Emulex One Connect HCA support" 3 + depends on ETHERNET && NETDEVICES && PCI 4 + select NET_VENDOR_EMULEX 5 + select BE2NET 6 + ---help--- 7 + This driver provides low-level InfiniBand over Ethernet 8 + support for Emulex One Connect host channel adapters (HCAs).
+5
drivers/infiniband/hw/ocrdma/Makefile
··· 1 + ccflags-y := -Idrivers/net/ethernet/emulex/benet 2 + 3 + obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma.o 4 + 5 + ocrdma-y := ocrdma_main.o ocrdma_verbs.o ocrdma_hw.o ocrdma_ah.o
+392
drivers/infiniband/hw/ocrdma/ocrdma.h
··· 1 + /******************************************************************* 2 + * This file is part of the Emulex RoCE Device Driver for * 3 + * RoCE (RDMA over Converged Ethernet) adapters. * 4 + * Copyright (C) 2008-2012 Emulex. All rights reserved. * 5 + * EMULEX and SLI are trademarks of Emulex. * 6 + * www.emulex.com * 7 + * * 8 + * This program is free software; you can redistribute it and/or * 9 + * modify it under the terms of version 2 of the GNU General * 10 + * Public License as published by the Free Software Foundation. * 11 + * This program is distributed in the hope that it will be useful. * 12 + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 + * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 + * more details, a copy of which can be found in the file COPYING * 18 + * included with this package. * 19 + * 20 + * Contact Information: 21 + * linux-drivers@emulex.com 22 + * 23 + * Emulex 24 + * 3333 Susan Street 25 + * Costa Mesa, CA 92626 26 + *******************************************************************/ 27 + 28 + #ifndef __OCRDMA_H__ 29 + #define __OCRDMA_H__ 30 + 31 + #include <linux/mutex.h> 32 + #include <linux/list.h> 33 + #include <linux/spinlock.h> 34 + #include <linux/pci.h> 35 + 36 + #include <rdma/ib_verbs.h> 37 + #include <rdma/ib_user_verbs.h> 38 + 39 + #include <be_roce.h> 40 + #include "ocrdma_sli.h" 41 + 42 + #define OCRDMA_ROCE_DEV_VERSION "1.0.0" 43 + #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" 44 + 45 + #define ocrdma_err(format, arg...) printk(KERN_ERR format, ##arg) 46 + 47 + #define OCRDMA_MAX_AH 512 48 + 49 + #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) 50 + 51 + struct ocrdma_dev_attr { 52 + u8 fw_ver[32]; 53 + u32 vendor_id; 54 + u32 device_id; 55 + u16 max_pd; 56 + u16 max_cq; 57 + u16 max_cqe; 58 + u16 max_qp; 59 + u16 max_wqe; 60 + u16 max_rqe; 61 + u32 max_inline_data; 62 + int max_send_sge; 63 + int max_recv_sge; 64 + int max_mr; 65 + u64 max_mr_size; 66 + u32 max_num_mr_pbl; 67 + int max_fmr; 68 + int max_map_per_fmr; 69 + int max_pages_per_frmr; 70 + u16 max_ord_per_qp; 71 + u16 max_ird_per_qp; 72 + 73 + int device_cap_flags; 74 + u8 cq_overflow_detect; 75 + u8 srq_supported; 76 + 77 + u32 wqe_size; 78 + u32 rqe_size; 79 + u32 ird_page_size; 80 + u8 local_ca_ack_delay; 81 + u8 ird; 82 + u8 num_ird_pages; 83 + }; 84 + 85 + struct ocrdma_pbl { 86 + void *va; 87 + dma_addr_t pa; 88 + }; 89 + 90 + struct ocrdma_queue_info { 91 + void *va; 92 + dma_addr_t dma; 93 + u32 size; 94 + u16 len; 95 + u16 entry_size; /* Size of an element in the queue */ 96 + u16 id; /* qid, where to ring the doorbell. */ 97 + u16 head, tail; 98 + bool created; 99 + atomic_t used; /* Number of valid elements in the queue */ 100 + }; 101 + 102 + struct ocrdma_eq { 103 + struct ocrdma_queue_info q; 104 + u32 vector; 105 + int cq_cnt; 106 + struct ocrdma_dev *dev; 107 + char irq_name[32]; 108 + }; 109 + 110 + struct ocrdma_mq { 111 + struct ocrdma_queue_info sq; 112 + struct ocrdma_queue_info cq; 113 + bool rearm_cq; 114 + }; 115 + 116 + struct mqe_ctx { 117 + struct mutex lock; /* for serializing mailbox commands on MQ */ 118 + wait_queue_head_t cmd_wait; 119 + u32 tag; 120 + u16 cqe_status; 121 + u16 ext_status; 122 + bool cmd_done; 123 + }; 124 + 125 + struct ocrdma_dev { 126 + struct ib_device ibdev; 127 + struct ocrdma_dev_attr attr; 128 + 129 + struct mutex dev_lock; /* provides syncronise access to device data */ 130 + spinlock_t flush_q_lock ____cacheline_aligned; 131 + 132 + struct ocrdma_cq **cq_tbl; 133 + struct ocrdma_qp **qp_tbl; 134 + 135 + struct ocrdma_eq meq; 136 + struct ocrdma_eq *qp_eq_tbl; 137 + int eq_cnt; 138 + u16 base_eqid; 139 + u16 max_eq; 140 + 141 + union ib_gid *sgid_tbl; 142 + /* provided synchronization to sgid table for 143 + * updating gid entries triggered by notifier. 144 + */ 145 + spinlock_t sgid_lock; 146 + 147 + int gsi_qp_created; 148 + struct ocrdma_cq *gsi_sqcq; 149 + struct ocrdma_cq *gsi_rqcq; 150 + 151 + struct { 152 + struct ocrdma_av *va; 153 + dma_addr_t pa; 154 + u32 size; 155 + u32 num_ah; 156 + /* provide synchronization for av 157 + * entry allocations. 158 + */ 159 + spinlock_t lock; 160 + u32 ahid; 161 + struct ocrdma_pbl pbl; 162 + } av_tbl; 163 + 164 + void *mbx_cmd; 165 + struct ocrdma_mq mq; 166 + struct mqe_ctx mqe_ctx; 167 + 168 + struct be_dev_info nic_info; 169 + 170 + struct list_head entry; 171 + int id; 172 + }; 173 + 174 + struct ocrdma_cq { 175 + struct ib_cq ibcq; 176 + struct ocrdma_dev *dev; 177 + struct ocrdma_cqe *va; 178 + u32 phase; 179 + u32 getp; /* pointer to pending wrs to 180 + * return to stack, wrap arounds 181 + * at max_hw_cqe 182 + */ 183 + u32 max_hw_cqe; 184 + bool phase_change; 185 + bool armed, solicited; 186 + bool arm_needed; 187 + 188 + spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization 189 + * to cq polling 190 + */ 191 + /* syncronizes cq completion handler invoked from multiple context */ 192 + spinlock_t comp_handler_lock ____cacheline_aligned; 193 + u16 id; 194 + u16 eqn; 195 + 196 + struct ocrdma_ucontext *ucontext; 197 + dma_addr_t pa; 198 + u32 len; 199 + atomic_t use_cnt; 200 + 201 + /* head of all qp's sq and rq for which cqes need to be flushed 202 + * by the software. 203 + */ 204 + struct list_head sq_head, rq_head; 205 + }; 206 + 207 + struct ocrdma_pd { 208 + struct ib_pd ibpd; 209 + struct ocrdma_dev *dev; 210 + struct ocrdma_ucontext *uctx; 211 + atomic_t use_cnt; 212 + u32 id; 213 + int num_dpp_qp; 214 + u32 dpp_page; 215 + bool dpp_enabled; 216 + }; 217 + 218 + struct ocrdma_ah { 219 + struct ib_ah ibah; 220 + struct ocrdma_dev *dev; 221 + struct ocrdma_av *av; 222 + u16 sgid_index; 223 + u32 id; 224 + }; 225 + 226 + struct ocrdma_qp_hwq_info { 227 + u8 *va; /* virtual address */ 228 + u32 max_sges; 229 + u32 head, tail; 230 + u32 entry_size; 231 + u32 max_cnt; 232 + u32 max_wqe_idx; 233 + u32 free_delta; 234 + u16 dbid; /* qid, where to ring the doorbell. */ 235 + u32 len; 236 + dma_addr_t pa; 237 + }; 238 + 239 + struct ocrdma_srq { 240 + struct ib_srq ibsrq; 241 + struct ocrdma_dev *dev; 242 + u8 __iomem *db; 243 + /* provide synchronization to multiple context(s) posting rqe */ 244 + spinlock_t q_lock ____cacheline_aligned; 245 + 246 + struct ocrdma_qp_hwq_info rq; 247 + struct ocrdma_pd *pd; 248 + atomic_t use_cnt; 249 + u32 id; 250 + u64 *rqe_wr_id_tbl; 251 + u32 *idx_bit_fields; 252 + u32 bit_fields_len; 253 + }; 254 + 255 + struct ocrdma_qp { 256 + struct ib_qp ibqp; 257 + struct ocrdma_dev *dev; 258 + 259 + u8 __iomem *sq_db; 260 + /* provide synchronization to multiple context(s) posting wqe, rqe */ 261 + spinlock_t q_lock ____cacheline_aligned; 262 + struct ocrdma_qp_hwq_info sq; 263 + struct { 264 + uint64_t wrid; 265 + uint16_t dpp_wqe_idx; 266 + uint16_t dpp_wqe; 267 + uint8_t signaled; 268 + uint8_t rsvd[3]; 269 + } *wqe_wr_id_tbl; 270 + u32 max_inline_data; 271 + struct ocrdma_cq *sq_cq; 272 + /* list maintained per CQ to flush SQ errors */ 273 + struct list_head sq_entry; 274 + 275 + u8 __iomem *rq_db; 276 + struct ocrdma_qp_hwq_info rq; 277 + u64 *rqe_wr_id_tbl; 278 + struct ocrdma_cq *rq_cq; 279 + struct ocrdma_srq *srq; 280 + /* list maintained per CQ to flush RQ errors */ 281 + struct list_head rq_entry; 282 + 283 + enum ocrdma_qp_state state; /* QP state */ 284 + int cap_flags; 285 + u32 max_ord, max_ird; 286 + 287 + u32 id; 288 + struct ocrdma_pd *pd; 289 + 290 + enum ib_qp_type qp_type; 291 + 292 + int sgid_idx; 293 + u32 qkey; 294 + bool dpp_enabled; 295 + u8 *ird_q_va; 296 + }; 297 + 298 + #define OCRDMA_GET_NUM_POSTED_SHIFT_VAL(qp) \ 299 + (((qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) && \ 300 + (qp->id < 64)) ? 24 : 16) 301 + 302 + struct ocrdma_hw_mr { 303 + struct ocrdma_dev *dev; 304 + u32 lkey; 305 + u8 fr_mr; 306 + u8 remote_atomic; 307 + u8 remote_rd; 308 + u8 remote_wr; 309 + u8 local_rd; 310 + u8 local_wr; 311 + u8 mw_bind; 312 + u8 rsvd; 313 + u64 len; 314 + struct ocrdma_pbl *pbl_table; 315 + u32 num_pbls; 316 + u32 num_pbes; 317 + u32 pbl_size; 318 + u32 pbe_size; 319 + u64 fbo; 320 + u64 va; 321 + }; 322 + 323 + struct ocrdma_mr { 324 + struct ib_mr ibmr; 325 + struct ib_umem *umem; 326 + struct ocrdma_hw_mr hwmr; 327 + struct ocrdma_pd *pd; 328 + }; 329 + 330 + struct ocrdma_ucontext { 331 + struct ib_ucontext ibucontext; 332 + struct ocrdma_dev *dev; 333 + 334 + struct list_head mm_head; 335 + struct mutex mm_list_lock; /* protects list entries of mm type */ 336 + struct { 337 + u32 *va; 338 + dma_addr_t pa; 339 + u32 len; 340 + } ah_tbl; 341 + }; 342 + 343 + struct ocrdma_mm { 344 + struct { 345 + u64 phy_addr; 346 + unsigned long len; 347 + } key; 348 + struct list_head entry; 349 + }; 350 + 351 + static inline struct ocrdma_dev *get_ocrdma_dev(struct ib_device *ibdev) 352 + { 353 + return container_of(ibdev, struct ocrdma_dev, ibdev); 354 + } 355 + 356 + static inline struct ocrdma_ucontext *get_ocrdma_ucontext(struct ib_ucontext 357 + *ibucontext) 358 + { 359 + return container_of(ibucontext, struct ocrdma_ucontext, ibucontext); 360 + } 361 + 362 + static inline struct ocrdma_pd *get_ocrdma_pd(struct ib_pd *ibpd) 363 + { 364 + return container_of(ibpd, struct ocrdma_pd, ibpd); 365 + } 366 + 367 + static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq) 368 + { 369 + return container_of(ibcq, struct ocrdma_cq, ibcq); 370 + } 371 + 372 + static inline struct ocrdma_qp *get_ocrdma_qp(struct ib_qp *ibqp) 373 + { 374 + return container_of(ibqp, struct ocrdma_qp, ibqp); 375 + } 376 + 377 + static inline struct ocrdma_mr *get_ocrdma_mr(struct ib_mr *ibmr) 378 + { 379 + return container_of(ibmr, struct ocrdma_mr, ibmr); 380 + } 381 + 382 + static inline struct ocrdma_ah *get_ocrdma_ah(struct ib_ah *ibah) 383 + { 384 + return container_of(ibah, struct ocrdma_ah, ibah); 385 + } 386 + 387 + static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq) 388 + { 389 + return container_of(ibsrq, struct ocrdma_srq, ibsrq); 390 + } 391 + 392 + #endif
+134
drivers/infiniband/hw/ocrdma/ocrdma_abi.h
··· 1 + /******************************************************************* 2 + * This file is part of the Emulex RoCE Device Driver for * 3 + * RoCE (RDMA over Converged Ethernet) adapters. * 4 + * Copyright (C) 2008-2012 Emulex. All rights reserved. * 5 + * EMULEX and SLI are trademarks of Emulex. * 6 + * www.emulex.com * 7 + * * 8 + * This program is free software; you can redistribute it and/or * 9 + * modify it under the terms of version 2 of the GNU General * 10 + * Public License as published by the Free Software Foundation. * 11 + * This program is distributed in the hope that it will be useful. * 12 + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 + * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 + * more details, a copy of which can be found in the file COPYING * 18 + * included with this package. * 19 + * 20 + * Contact Information: 21 + * linux-drivers@emulex.com 22 + * 23 + * Emulex 24 + * 3333 Susan Street 25 + * Costa Mesa, CA 92626 26 + *******************************************************************/ 27 + 28 + #ifndef __OCRDMA_ABI_H__ 29 + #define __OCRDMA_ABI_H__ 30 + 31 + struct ocrdma_alloc_ucontext_resp { 32 + u32 dev_id; 33 + u32 wqe_size; 34 + u32 max_inline_data; 35 + u32 dpp_wqe_size; 36 + u64 ah_tbl_page; 37 + u32 ah_tbl_len; 38 + u32 rsvd; 39 + u8 fw_ver[32]; 40 + u32 rqe_size; 41 + u64 rsvd1; 42 + } __packed; 43 + 44 + /* user kernel communication data structures. */ 45 + struct ocrdma_alloc_pd_ureq { 46 + u64 rsvd1; 47 + } __packed; 48 + 49 + struct ocrdma_alloc_pd_uresp { 50 + u32 id; 51 + u32 dpp_enabled; 52 + u32 dpp_page_addr_hi; 53 + u32 dpp_page_addr_lo; 54 + u64 rsvd1; 55 + } __packed; 56 + 57 + struct ocrdma_create_cq_ureq { 58 + u32 dpp_cq; 59 + u32 rsvd; 60 + } __packed; 61 + 62 + #define MAX_CQ_PAGES 8 63 + struct ocrdma_create_cq_uresp { 64 + u32 cq_id; 65 + u32 page_size; 66 + u32 num_pages; 67 + u32 max_hw_cqe; 68 + u64 page_addr[MAX_CQ_PAGES]; 69 + u64 db_page_addr; 70 + u32 db_page_size; 71 + u32 phase_change; 72 + u64 rsvd1; 73 + u64 rsvd2; 74 + } __packed; 75 + 76 + #define MAX_QP_PAGES 8 77 + #define MAX_UD_AV_PAGES 8 78 + 79 + struct ocrdma_create_qp_ureq { 80 + u8 enable_dpp_cq; 81 + u8 rsvd; 82 + u16 dpp_cq_id; 83 + u32 rsvd1; 84 + }; 85 + 86 + struct ocrdma_create_qp_uresp { 87 + u16 qp_id; 88 + u16 sq_dbid; 89 + u16 rq_dbid; 90 + u16 resv0; 91 + u32 sq_page_size; 92 + u32 rq_page_size; 93 + u32 num_sq_pages; 94 + u32 num_rq_pages; 95 + u64 sq_page_addr[MAX_QP_PAGES]; 96 + u64 rq_page_addr[MAX_QP_PAGES]; 97 + u64 db_page_addr; 98 + u32 db_page_size; 99 + u32 dpp_credit; 100 + u32 dpp_offset; 101 + u32 rsvd1; 102 + u32 num_wqe_allocated; 103 + u32 num_rqe_allocated; 104 + u32 free_wqe_delta; 105 + u32 free_rqe_delta; 106 + u32 db_sq_offset; 107 + u32 db_rq_offset; 108 + u32 db_shift; 109 + u64 rsvd2; 110 + u64 rsvd3; 111 + } __packed; 112 + 113 + struct ocrdma_create_srq_uresp { 114 + u16 rq_dbid; 115 + u16 resv0; 116 + u32 resv1; 117 + 118 + u32 rq_page_size; 119 + u32 num_rq_pages; 120 + 121 + u64 rq_page_addr[MAX_QP_PAGES]; 122 + u64 db_page_addr; 123 + 124 + u32 db_page_size; 125 + u32 num_rqe_allocated; 126 + u32 db_rq_offset; 127 + u32 db_shift; 128 + 129 + u32 free_rqe_delta; 130 + u32 rsvd2; 131 + u64 rsvd3; 132 + } __packed; 133 + 134 + #endif /* __OCRDMA_ABI_H__ */
+172
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
··· 1 + /******************************************************************* 2 + * This file is part of the Emulex RoCE Device Driver for * 3 + * RoCE (RDMA over Converged Ethernet) adapters. * 4 + * Copyright (C) 2008-2012 Emulex. All rights reserved. * 5 + * EMULEX and SLI are trademarks of Emulex. * 6 + * www.emulex.com * 7 + * * 8 + * This program is free software; you can redistribute it and/or * 9 + * modify it under the terms of version 2 of the GNU General * 10 + * Public License as published by the Free Software Foundation. * 11 + * This program is distributed in the hope that it will be useful. * 12 + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 + * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 + * more details, a copy of which can be found in the file COPYING * 18 + * included with this package. * 19 + * 20 + * Contact Information: 21 + * linux-drivers@emulex.com 22 + * 23 + * Emulex 24 + * 3333 Susan Street 25 + * Costa Mesa, CA 92626 26 + *******************************************************************/ 27 + 28 + #include <net/neighbour.h> 29 + #include <net/netevent.h> 30 + 31 + #include <rdma/ib_addr.h> 32 + #include <rdma/ib_cache.h> 33 + 34 + #include "ocrdma.h" 35 + #include "ocrdma_verbs.h" 36 + #include "ocrdma_ah.h" 37 + #include "ocrdma_hw.h" 38 + 39 + static inline int set_av_attr(struct ocrdma_ah *ah, 40 + struct ib_ah_attr *attr, int pdid) 41 + { 42 + int status = 0; 43 + u16 vlan_tag; bool vlan_enabled = false; 44 + struct ocrdma_dev *dev = ah->dev; 45 + struct ocrdma_eth_vlan eth; 46 + struct ocrdma_grh grh; 47 + int eth_sz; 48 + 49 + memset(&eth, 0, sizeof(eth)); 50 + memset(&grh, 0, sizeof(grh)); 51 + 52 + ah->sgid_index = attr->grh.sgid_index; 53 + 54 + vlan_tag = rdma_get_vlan_id(&attr->grh.dgid); 55 + if (vlan_tag && (vlan_tag < 0x1000)) { 56 + eth.eth_type = cpu_to_be16(0x8100); 57 + eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); 58 + vlan_tag |= (attr->sl & 7) << 13; 59 + eth.vlan_tag = cpu_to_be16(vlan_tag); 60 + eth_sz = sizeof(struct ocrdma_eth_vlan); 61 + vlan_enabled = true; 62 + } else { 63 + eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); 64 + eth_sz = sizeof(struct ocrdma_eth_basic); 65 + } 66 + memcpy(&eth.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN); 67 + status = ocrdma_resolve_dgid(dev, &attr->grh.dgid, &eth.dmac[0]); 68 + if (status) 69 + return status; 70 + status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index, 71 + (union ib_gid *)&grh.sgid[0]); 72 + if (status) 73 + return status; 74 + 75 + grh.tclass_flow = cpu_to_be32((6 << 28) | 76 + (attr->grh.traffic_class << 24) | 77 + attr->grh.flow_label); 78 + /* 0x1b is next header value in GRH */ 79 + grh.pdid_hoplimit = cpu_to_be32((pdid << 16) | 80 + (0x1b << 8) | attr->grh.hop_limit); 81 + 82 + memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw)); 83 + memcpy(&ah->av->eth_hdr, &eth, eth_sz); 84 + memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh)); 85 + if (vlan_enabled) 86 + ah->av->valid |= OCRDMA_AV_VLAN_VALID; 87 + return status; 88 + } 89 + 90 + struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) 91 + { 92 + u32 *ahid_addr; 93 + int status; 94 + struct ocrdma_ah *ah; 95 + struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 96 + struct ocrdma_dev *dev = pd->dev; 97 + 98 + if (!(attr->ah_flags & IB_AH_GRH)) 99 + return ERR_PTR(-EINVAL); 100 + 101 + ah = kzalloc(sizeof *ah, GFP_ATOMIC); 102 + if (!ah) 103 + return ERR_PTR(-ENOMEM); 104 + ah->dev = pd->dev; 105 + 106 + status = ocrdma_alloc_av(dev, ah); 107 + if (status) 108 + goto av_err; 109 + status = set_av_attr(ah, attr, pd->id); 110 + if (status) 111 + goto av_conf_err; 112 + 113 + /* if pd is for the user process, pass the ah_id to user space */ 114 + if ((pd->uctx) && (pd->uctx->ah_tbl.va)) { 115 + ahid_addr = pd->uctx->ah_tbl.va + attr->dlid; 116 + *ahid_addr = ah->id; 117 + } 118 + return &ah->ibah; 119 + 120 + av_conf_err: 121 + ocrdma_free_av(dev, ah); 122 + av_err: 123 + kfree(ah); 124 + return ERR_PTR(status); 125 + } 126 + 127 + int ocrdma_destroy_ah(struct ib_ah *ibah) 128 + { 129 + struct ocrdma_ah *ah = get_ocrdma_ah(ibah); 130 + ocrdma_free_av(ah->dev, ah); 131 + kfree(ah); 132 + return 0; 133 + } 134 + 135 + int ocrdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr) 136 + { 137 + struct ocrdma_ah *ah = get_ocrdma_ah(ibah); 138 + struct ocrdma_av *av = ah->av; 139 + struct ocrdma_grh *grh; 140 + attr->ah_flags |= IB_AH_GRH; 141 + if (ah->av->valid & Bit(1)) { 142 + grh = (struct ocrdma_grh *)((u8 *)ah->av + 143 + sizeof(struct ocrdma_eth_vlan)); 144 + attr->sl = be16_to_cpu(av->eth_hdr.vlan_tag) >> 13; 145 + } else { 146 + grh = (struct ocrdma_grh *)((u8 *)ah->av + 147 + sizeof(struct ocrdma_eth_basic)); 148 + attr->sl = 0; 149 + } 150 + memcpy(&attr->grh.dgid.raw[0], &grh->dgid[0], sizeof(grh->dgid)); 151 + attr->grh.sgid_index = ah->sgid_index; 152 + attr->grh.hop_limit = be32_to_cpu(grh->pdid_hoplimit) & 0xff; 153 + attr->grh.traffic_class = be32_to_cpu(grh->tclass_flow) >> 24; 154 + attr->grh.flow_label = be32_to_cpu(grh->tclass_flow) & 0x00ffffffff; 155 + return 0; 156 + } 157 + 158 + int ocrdma_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr) 159 + { 160 + /* modify_ah is unsupported */ 161 + return -ENOSYS; 162 + } 163 + 164 + int ocrdma_process_mad(struct ib_device *ibdev, 165 + int process_mad_flags, 166 + u8 port_num, 167 + struct ib_wc *in_wc, 168 + struct ib_grh *in_grh, 169 + struct ib_mad *in_mad, struct ib_mad *out_mad) 170 + { 171 + return IB_MAD_RESULT_SUCCESS; 172 + }
+42
drivers/infiniband/hw/ocrdma/ocrdma_ah.h
··· 1 + /******************************************************************* 2 + * This file is part of the Emulex RoCE Device Driver for * 3 + * RoCE (RDMA over Converged Ethernet) adapters. * 4 + * Copyright (C) 2008-2012 Emulex. All rights reserved. * 5 + * EMULEX and SLI are trademarks of Emulex. * 6 + * www.emulex.com * 7 + * * 8 + * This program is free software; you can redistribute it and/or * 9 + * modify it under the terms of version 2 of the GNU General * 10 + * Public License as published by the Free Software Foundation. * 11 + * This program is distributed in the hope that it will be useful. * 12 + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 + * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 + * more details, a copy of which can be found in the file COPYING * 18 + * included with this package. * 19 + * 20 + * Contact Information: 21 + * linux-drivers@emulex.com 22 + * 23 + * Emulex 24 + * 3333 Susan Street 25 + * Costa Mesa, CA 92626 26 + *******************************************************************/ 27 + 28 + #ifndef __OCRDMA_AH_H__ 29 + #define __OCRDMA_AH_H__ 30 + 31 + struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *); 32 + int ocrdma_destroy_ah(struct ib_ah *); 33 + int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *); 34 + int ocrdma_modify_ah(struct ib_ah *, struct ib_ah_attr *); 35 + 36 + int ocrdma_process_mad(struct ib_device *, 37 + int process_mad_flags, 38 + u8 port_num, 39 + struct ib_wc *in_wc, 40 + struct ib_grh *in_grh, 41 + struct ib_mad *in_mad, struct ib_mad *out_mad); 42 + #endif /* __OCRDMA_AH_H__ */
+2640
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
··· 1 + /******************************************************************* 2 + * This file is part of the Emulex RoCE Device Driver for * 3 + * RoCE (RDMA over Converged Ethernet) CNA Adapters. * 4 + * Copyright (C) 2008-2012 Emulex. All rights reserved. * 5 + * EMULEX and SLI are trademarks of Emulex. * 6 + * www.emulex.com * 7 + * * 8 + * This program is free software; you can redistribute it and/or * 9 + * modify it under the terms of version 2 of the GNU General * 10 + * Public License as published by the Free Software Foundation. * 11 + * This program is distributed in the hope that it will be useful. * 12 + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 + * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 + * more details, a copy of which can be found in the file COPYING * 18 + * included with this package. * 19 + * 20 + * Contact Information: 21 + * linux-drivers@emulex.com 22 + * 23 + * Emulex 24 + * 3333 Susan Street 25 + * Costa Mesa, CA 92626 26 + *******************************************************************/ 27 + 28 + #include <linux/sched.h> 29 + #include <linux/interrupt.h> 30 + #include <linux/log2.h> 31 + #include <linux/dma-mapping.h> 32 + 33 + #include <rdma/ib_verbs.h> 34 + #include <rdma/ib_user_verbs.h> 35 + #include <rdma/ib_addr.h> 36 + 37 + #include "ocrdma.h" 38 + #include "ocrdma_hw.h" 39 + #include "ocrdma_verbs.h" 40 + #include "ocrdma_ah.h" 41 + 42 + enum mbx_status { 43 + OCRDMA_MBX_STATUS_FAILED = 1, 44 + OCRDMA_MBX_STATUS_ILLEGAL_FIELD = 3, 45 + OCRDMA_MBX_STATUS_OOR = 100, 46 + OCRDMA_MBX_STATUS_INVALID_PD = 101, 47 + OCRDMA_MBX_STATUS_PD_INUSE = 102, 48 + OCRDMA_MBX_STATUS_INVALID_CQ = 103, 49 + OCRDMA_MBX_STATUS_INVALID_QP = 104, 50 + OCRDMA_MBX_STATUS_INVALID_LKEY = 105, 51 + OCRDMA_MBX_STATUS_ORD_EXCEEDS = 106, 52 + OCRDMA_MBX_STATUS_IRD_EXCEEDS = 107, 53 + OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS = 108, 54 + OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS = 109, 55 + OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS = 110, 56 + OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS = 111, 57 + OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS = 112, 58 + OCRDMA_MBX_STATUS_INVALID_STATE_CHANGE = 113, 59 + OCRDMA_MBX_STATUS_MW_BOUND = 114, 60 + OCRDMA_MBX_STATUS_INVALID_VA = 115, 61 + OCRDMA_MBX_STATUS_INVALID_LENGTH = 116, 62 + OCRDMA_MBX_STATUS_INVALID_FBO = 117, 63 + OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS = 118, 64 + OCRDMA_MBX_STATUS_INVALID_PBE_SIZE = 119, 65 + OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY = 120, 66 + OCRDMA_MBX_STATUS_INVALID_PBL_SHIFT = 121, 67 + OCRDMA_MBX_STATUS_INVALID_SRQ_ID = 129, 68 + OCRDMA_MBX_STATUS_SRQ_ERROR = 133, 69 + OCRDMA_MBX_STATUS_RQE_EXCEEDS = 134, 70 + OCRDMA_MBX_STATUS_MTU_EXCEEDS = 135, 71 + OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS = 136, 72 + OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS = 137, 73 + OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS = 138, 74 + OCRDMA_MBX_STATUS_QP_BOUND = 130, 75 + OCRDMA_MBX_STATUS_INVALID_CHANGE = 139, 76 + OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP = 140, 77 + OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER = 141, 78 + OCRDMA_MBX_STATUS_MW_STILL_BOUND = 142, 79 + OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID = 143, 80 + OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS = 144 81 + }; 82 + 83 + enum additional_status { 84 + OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES = 22 85 + }; 86 + 87 + enum cqe_status { 88 + OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES = 1, 89 + OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER = 2, 90 + OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES = 3, 91 + OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING = 4, 92 + OCRDMA_MBX_CQE_STATUS_DMA_FAILED = 5 93 + }; 94 + 95 + static inline void *ocrdma_get_eqe(struct ocrdma_eq *eq) 96 + { 97 + return (u8 *)eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe)); 98 + } 99 + 100 + static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq) 101 + { 102 + eq->q.tail = (eq->q.tail + 1) & (OCRDMA_EQ_LEN - 1); 103 + } 104 + 105 + static inline void *ocrdma_get_mcqe(struct ocrdma_dev *dev) 106 + { 107 + struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *) 108 + ((u8 *) dev->mq.cq.va + 109 + (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe))); 110 + 111 + if (!(le32_to_cpu(cqe->valid_ae_cmpl_cons) & OCRDMA_MCQE_VALID_MASK)) 112 + return NULL; 113 + return cqe; 114 + } 115 + 116 + static inline void ocrdma_mcq_inc_tail(struct ocrdma_dev *dev) 117 + { 118 + dev->mq.cq.tail = (dev->mq.cq.tail + 1) & (OCRDMA_MQ_CQ_LEN - 1); 119 + } 120 + 121 + static inline struct ocrdma_mqe *ocrdma_get_mqe(struct ocrdma_dev *dev) 122 + { 123 + return (struct ocrdma_mqe *)((u8 *) dev->mq.sq.va + 124 + (dev->mq.sq.head * 125 + sizeof(struct ocrdma_mqe))); 126 + } 127 + 128 + static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev) 129 + { 130 + dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1); 131 + atomic_inc(&dev->mq.sq.used); 132 + } 133 + 134 + static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev *dev) 135 + { 136 + return (void *)((u8 *) dev->mq.sq.va + 137 + (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe))); 138 + } 139 + 140 + enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps) 141 + { 142 + switch (qps) { 143 + case OCRDMA_QPS_RST: 144 + return IB_QPS_RESET; 145 + case OCRDMA_QPS_INIT: 146 + return IB_QPS_INIT; 147 + case OCRDMA_QPS_RTR: 148 + return IB_QPS_RTR; 149 + case OCRDMA_QPS_RTS: 150 + return IB_QPS_RTS; 151 + case OCRDMA_QPS_SQD: 152 + case OCRDMA_QPS_SQ_DRAINING: 153 + return IB_QPS_SQD; 154 + case OCRDMA_QPS_SQE: 155 + return IB_QPS_SQE; 156 + case OCRDMA_QPS_ERR: 157 + return IB_QPS_ERR; 158 + }; 159 + return IB_QPS_ERR; 160 + } 161 + 162 + enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps) 163 + { 164 + switch (qps) { 165 + case IB_QPS_RESET: 166 + return OCRDMA_QPS_RST; 167 + case IB_QPS_INIT: 168 + return OCRDMA_QPS_INIT; 169 + case IB_QPS_RTR: 170 + return OCRDMA_QPS_RTR; 171 + case IB_QPS_RTS: 172 + return OCRDMA_QPS_RTS; 173 + case IB_QPS_SQD: 174 + return OCRDMA_QPS_SQD; 175 + case IB_QPS_SQE: 176 + return OCRDMA_QPS_SQE; 177 + case IB_QPS_ERR: 178 + return OCRDMA_QPS_ERR; 179 + }; 180 + return OCRDMA_QPS_ERR; 181 + } 182 + 183 + static int ocrdma_get_mbx_errno(u32 status) 184 + { 185 + int err_num = -EFAULT; 186 + u8 mbox_status = (status & OCRDMA_MBX_RSP_STATUS_MASK) >> 187 + OCRDMA_MBX_RSP_STATUS_SHIFT; 188 + u8 add_status = (status & OCRDMA_MBX_RSP_ASTATUS_MASK) >> 189 + OCRDMA_MBX_RSP_ASTATUS_SHIFT; 190 + 191 + switch (mbox_status) { 192 + case OCRDMA_MBX_STATUS_OOR: 193 + case OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS: 194 + err_num = -EAGAIN; 195 + break; 196 + 197 + case OCRDMA_MBX_STATUS_INVALID_PD: 198 + case OCRDMA_MBX_STATUS_INVALID_CQ: 199 + case OCRDMA_MBX_STATUS_INVALID_SRQ_ID: 200 + case OCRDMA_MBX_STATUS_INVALID_QP: 201 + case OCRDMA_MBX_STATUS_INVALID_CHANGE: 202 + case OCRDMA_MBX_STATUS_MTU_EXCEEDS: 203 + case OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER: 204 + case OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID: 205 + case OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS: 206 + case OCRDMA_MBX_STATUS_ILLEGAL_FIELD: 207 + case OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY: 208 + case OCRDMA_MBX_STATUS_INVALID_LKEY: 209 + case OCRDMA_MBX_STATUS_INVALID_VA: 210 + case OCRDMA_MBX_STATUS_INVALID_LENGTH: 211 + case OCRDMA_MBX_STATUS_INVALID_FBO: 212 + case OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS: 213 + case OCRDMA_MBX_STATUS_INVALID_PBE_SIZE: 214 + case OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP: 215 + case OCRDMA_MBX_STATUS_SRQ_ERROR: 216 + case OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS: 217 + err_num = -EINVAL; 218 + break; 219 + 220 + case OCRDMA_MBX_STATUS_PD_INUSE: 221 + case OCRDMA_MBX_STATUS_QP_BOUND: 222 + case OCRDMA_MBX_STATUS_MW_STILL_BOUND: 223 + case OCRDMA_MBX_STATUS_MW_BOUND: 224 + err_num = -EBUSY; 225 + break; 226 + 227 + case OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS: 228 + case OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS: 229 + case OCRDMA_MBX_STATUS_RQE_EXCEEDS: 230 + case OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS: 231 + case OCRDMA_MBX_STATUS_ORD_EXCEEDS: 232 + case OCRDMA_MBX_STATUS_IRD_EXCEEDS: 233 + case OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS: 234 + case OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS: 235 + case OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS: 236 + err_num = -ENOBUFS; 237 + break; 238 + 239 + case OCRDMA_MBX_STATUS_FAILED: 240 + switch (add_status) { 241 + case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES: 242 + err_num = -EAGAIN; 243 + break; 244 + } 245 + default: 246 + err_num = -EFAULT; 247 + } 248 + return err_num; 249 + } 250 + 251 + static int ocrdma_get_mbx_cqe_errno(u16 cqe_status) 252 + { 253 + int err_num = -EINVAL; 254 + 255 + switch (cqe_status) { 256 + case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES: 257 + err_num = -EPERM; 258 + break; 259 + case OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER: 260 + err_num = -EINVAL; 261 + break; 262 + case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES: 263 + case OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING: 264 + err_num = -EAGAIN; 265 + break; 266 + case OCRDMA_MBX_CQE_STATUS_DMA_FAILED: 267 + err_num = -EIO; 268 + break; 269 + } 270 + return err_num; 271 + } 272 + 273 + void ocrdma_ring_cq_db(struct ocrdma_dev *dev, u16 cq_id, bool armed, 274 + bool solicited, u16 cqe_popped) 275 + { 276 + u32 val = cq_id & OCRDMA_DB_CQ_RING_ID_MASK; 277 + 278 + val |= ((cq_id & OCRDMA_DB_CQ_RING_ID_EXT_MASK) << 279 + OCRDMA_DB_CQ_RING_ID_EXT_MASK_SHIFT); 280 + 281 + if (armed) 282 + val |= (1 << OCRDMA_DB_CQ_REARM_SHIFT); 283 + if (solicited) 284 + val |= (1 << OCRDMA_DB_CQ_SOLICIT_SHIFT); 285 + val |= (cqe_popped << OCRDMA_DB_CQ_NUM_POPPED_SHIFT); 286 + iowrite32(val, dev->nic_info.db + OCRDMA_DB_CQ_OFFSET); 287 + } 288 + 289 + static void ocrdma_ring_mq_db(struct ocrdma_dev *dev) 290 + { 291 + u32 val = 0; 292 + 293 + val |= dev->mq.sq.id & OCRDMA_MQ_ID_MASK; 294 + val |= 1 << OCRDMA_MQ_NUM_MQE_SHIFT; 295 + iowrite32(val, dev->nic_info.db + OCRDMA_DB_MQ_OFFSET); 296 + } 297 + 298 + static void ocrdma_ring_eq_db(struct ocrdma_dev *dev, u16 eq_id, 299 + bool arm, bool clear_int, u16 num_eqe) 300 + { 301 + u32 val = 0; 302 + 303 + val |= eq_id & OCRDMA_EQ_ID_MASK; 304 + val |= ((eq_id & OCRDMA_EQ_ID_EXT_MASK) << OCRDMA_EQ_ID_EXT_MASK_SHIFT); 305 + if (arm) 306 + val |= (1 << OCRDMA_REARM_SHIFT); 307 + if (clear_int) 308 + val |= (1 << OCRDMA_EQ_CLR_SHIFT); 309 + val |= (1 << OCRDMA_EQ_TYPE_SHIFT); 310 + val |= (num_eqe << OCRDMA_NUM_EQE_SHIFT); 311 + iowrite32(val, dev->nic_info.db + OCRDMA_DB_EQ_OFFSET); 312 + } 313 + 314 + static void ocrdma_init_mch(struct ocrdma_mbx_hdr *cmd_hdr, 315 + u8 opcode, u8 subsys, u32 cmd_len) 316 + { 317 + cmd_hdr->subsys_op = (opcode | (subsys << OCRDMA_MCH_SUBSYS_SHIFT)); 318 + cmd_hdr->timeout = 20; /* seconds */ 319 + cmd_hdr->cmd_len = cmd_len - sizeof(struct ocrdma_mbx_hdr); 320 + } 321 + 322 + static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len) 323 + { 324 + struct ocrdma_mqe *mqe; 325 + 326 + mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL); 327 + if (!mqe) 328 + return NULL; 329 + mqe->hdr.spcl_sge_cnt_emb |= 330 + (OCRDMA_MQE_EMBEDDED << OCRDMA_MQE_HDR_EMB_SHIFT) & 331 + OCRDMA_MQE_HDR_EMB_MASK; 332 + mqe->hdr.pyld_len = cmd_len - sizeof(struct ocrdma_mqe_hdr); 333 + 334 + ocrdma_init_mch(&mqe->u.emb_req.mch, opcode, OCRDMA_SUBSYS_ROCE, 335 + mqe->hdr.pyld_len); 336 + return mqe; 337 + } 338 + 339 + static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q) 340 + { 341 + dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma); 342 + } 343 + 344 + static int ocrdma_alloc_q(struct ocrdma_dev *dev, 345 + struct ocrdma_queue_info *q, u16 len, u16 entry_size) 346 + { 347 + memset(q, 0, sizeof(*q)); 348 + q->len = len; 349 + q->entry_size = entry_size; 350 + q->size = len * entry_size; 351 + q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size, 352 + &q->dma, GFP_KERNEL); 353 + if (!q->va) 354 + return -ENOMEM; 355 + memset(q->va, 0, q->size); 356 + return 0; 357 + } 358 + 359 + static void ocrdma_build_q_pages(struct ocrdma_pa *q_pa, int cnt, 360 + dma_addr_t host_pa, int hw_page_size) 361 + { 362 + int i; 363 + 364 + for (i = 0; i < cnt; i++) { 365 + q_pa[i].lo = (u32) (host_pa & 0xffffffff); 366 + q_pa[i].hi = (u32) upper_32_bits(host_pa); 367 + host_pa += hw_page_size; 368 + } 369 + } 370 + 371 + static void ocrdma_assign_eq_vect_gen2(struct ocrdma_dev *dev, 372 + struct ocrdma_eq *eq) 373 + { 374 + /* assign vector and update vector id for next EQ */ 375 + eq->vector = dev->nic_info.msix.start_vector; 376 + dev->nic_info.msix.start_vector += 1; 377 + } 378 + 379 + static void ocrdma_free_eq_vect_gen2(struct ocrdma_dev *dev) 380 + { 381 + /* this assumes that EQs are freed in exactly reverse order 382 + * as its allocation. 383 + */ 384 + dev->nic_info.msix.start_vector -= 1; 385 + } 386 + 387 + int ocrdma_mbx_delete_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q, 388 + int queue_type) 389 + { 390 + u8 opcode = 0; 391 + int status; 392 + struct ocrdma_delete_q_req *cmd = dev->mbx_cmd; 393 + 394 + switch (queue_type) { 395 + case QTYPE_MCCQ: 396 + opcode = OCRDMA_CMD_DELETE_MQ; 397 + break; 398 + case QTYPE_CQ: 399 + opcode = OCRDMA_CMD_DELETE_CQ; 400 + break; 401 + case QTYPE_EQ: 402 + opcode = OCRDMA_CMD_DELETE_EQ; 403 + break; 404 + default: 405 + BUG(); 406 + } 407 + memset(cmd, 0, sizeof(*cmd)); 408 + ocrdma_init_mch(&cmd->req, opcode, OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); 409 + cmd->id = q->id; 410 + 411 + status = be_roce_mcc_cmd(dev->nic_info.netdev, 412 + cmd, sizeof(*cmd), NULL, NULL); 413 + if (!status) 414 + q->created = false; 415 + return status; 416 + } 417 + 418 + static int ocrdma_mbx_create_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq) 419 + { 420 + int status; 421 + struct ocrdma_create_eq_req *cmd = dev->mbx_cmd; 422 + struct ocrdma_create_eq_rsp *rsp = dev->mbx_cmd; 423 + 424 + memset(cmd, 0, sizeof(*cmd)); 425 + ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_EQ, OCRDMA_SUBSYS_COMMON, 426 + sizeof(*cmd)); 427 + if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) 428 + cmd->req.rsvd_version = 0; 429 + else 430 + cmd->req.rsvd_version = 2; 431 + 432 + cmd->num_pages = 4; 433 + cmd->valid = OCRDMA_CREATE_EQ_VALID; 434 + cmd->cnt = 4 << OCRDMA_CREATE_EQ_CNT_SHIFT; 435 + 436 + ocrdma_build_q_pages(&cmd->pa[0], cmd->num_pages, eq->q.dma, 437 + PAGE_SIZE_4K); 438 + status = be_roce_mcc_cmd(dev->nic_info.netdev, cmd, sizeof(*cmd), NULL, 439 + NULL); 440 + if (!status) { 441 + eq->q.id = rsp->vector_eqid & 0xffff; 442 + if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) 443 + ocrdma_assign_eq_vect_gen2(dev, eq); 444 + else { 445 + eq->vector = (rsp->vector_eqid >> 16) & 0xffff; 446 + dev->nic_info.msix.start_vector += 1; 447 + } 448 + eq->q.created = true; 449 + } 450 + return status; 451 + } 452 + 453 + static int ocrdma_create_eq(struct ocrdma_dev *dev, 454 + struct ocrdma_eq *eq, u16 q_len) 455 + { 456 + int status; 457 + 458 + status = ocrdma_alloc_q(dev, &eq->q, OCRDMA_EQ_LEN, 459 + sizeof(struct ocrdma_eqe)); 460 + if (status) 461 + return status; 462 + 463 + status = ocrdma_mbx_create_eq(dev, eq); 464 + if (status) 465 + goto mbx_err; 466 + eq->dev = dev; 467 + ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0); 468 + 469 + return 0; 470 + mbx_err: 471 + ocrdma_free_q(dev, &eq->q); 472 + return status; 473 + } 474 + 475 + static int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq) 476 + { 477 + int irq; 478 + 479 + if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) 480 + irq = dev->nic_info.pdev->irq; 481 + else 482 + irq = dev->nic_info.msix.vector_list[eq->vector]; 483 + return irq; 484 + } 485 + 486 + static void _ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq) 487 + { 488 + if (eq->q.created) { 489 + ocrdma_mbx_delete_q(dev, &eq->q, QTYPE_EQ); 490 + if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) 491 + ocrdma_free_eq_vect_gen2(dev); 492 + ocrdma_free_q(dev, &eq->q); 493 + } 494 + } 495 + 496 + static void ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq) 497 + { 498 + int irq; 499 + 500 + /* disarm EQ so that interrupts are not generated 501 + * during freeing and EQ delete is in progress. 502 + */ 503 + ocrdma_ring_eq_db(dev, eq->q.id, false, false, 0); 504 + 505 + irq = ocrdma_get_irq(dev, eq); 506 + free_irq(irq, eq); 507 + _ocrdma_destroy_eq(dev, eq); 508 + } 509 + 510 + static void ocrdma_destroy_qp_eqs(struct ocrdma_dev *dev) 511 + { 512 + int i; 513 + 514 + /* deallocate the data path eqs */ 515 + for (i = 0; i < dev->eq_cnt; i++) 516 + ocrdma_destroy_eq(dev, &dev->qp_eq_tbl[i]); 517 + } 518 + 519 + int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev, 520 + struct ocrdma_queue_info *cq, 521 + struct ocrdma_queue_info *eq) 522 + { 523 + struct ocrdma_create_cq_cmd *cmd = dev->mbx_cmd; 524 + struct ocrdma_create_cq_cmd_rsp *rsp = dev->mbx_cmd; 525 + int status; 526 + 527 + memset(cmd, 0, sizeof(*cmd)); 528 + ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_CQ, 529 + OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); 530 + 531 + cmd->pgsz_pgcnt = PAGES_4K_SPANNED(cq->va, cq->size); 532 + cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS; 533 + cmd->eqn = (eq->id << OCRDMA_CREATE_CQ_EQID_SHIFT); 534 + 535 + ocrdma_build_q_pages(&cmd->pa[0], cmd->pgsz_pgcnt, 536 + cq->dma, PAGE_SIZE_4K); 537 + status = be_roce_mcc_cmd(dev->nic_info.netdev, 538 + cmd, sizeof(*cmd), NULL, NULL); 539 + if (!status) { 540 + cq->id = (rsp->cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK); 541 + cq->created = true; 542 + } 543 + return status; 544 + } 545 + 546 + static u32 ocrdma_encoded_q_len(int q_len) 547 + { 548 + u32 len_encoded = fls(q_len); /* log2(len) + 1 */ 549 + 550 + if (len_encoded == 16) 551 + len_encoded = 0; 552 + return len_encoded; 553 + } 554 + 555 + static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev, 556 + struct ocrdma_queue_info *mq, 557 + struct ocrdma_queue_info *cq) 558 + { 559 + int num_pages, status; 560 + struct ocrdma_create_mq_req *cmd = dev->mbx_cmd; 561 + struct ocrdma_create_mq_rsp *rsp = dev->mbx_cmd; 562 + struct ocrdma_pa *pa; 563 + 564 + memset(cmd, 0, sizeof(*cmd)); 565 + num_pages = PAGES_4K_SPANNED(mq->va, mq->size); 566 + 567 + if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { 568 + ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ, 569 + OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); 570 + cmd->v0.pages = num_pages; 571 + cmd->v0.async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID; 572 + cmd->v0.async_cqid_valid = (cq->id << 1); 573 + cmd->v0.cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) << 574 + OCRDMA_CREATE_MQ_RING_SIZE_SHIFT); 575 + cmd->v0.cqid_ringsize |= 576 + (cq->id << OCRDMA_CREATE_MQ_V0_CQ_ID_SHIFT); 577 + cmd->v0.valid = OCRDMA_CREATE_MQ_VALID; 578 + pa = &cmd->v0.pa[0]; 579 + } else { 580 + ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ_EXT, 581 + OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); 582 + cmd->req.rsvd_version = 1; 583 + cmd->v1.cqid_pages = num_pages; 584 + cmd->v1.cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT); 585 + cmd->v1.async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID; 586 + cmd->v1.async_event_bitmap = Bit(20); 587 + cmd->v1.async_cqid_ringsize = cq->id; 588 + cmd->v1.async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) << 589 + OCRDMA_CREATE_MQ_RING_SIZE_SHIFT); 590 + cmd->v1.valid = OCRDMA_CREATE_MQ_VALID; 591 + pa = &cmd->v1.pa[0]; 592 + } 593 + ocrdma_build_q_pages(pa, num_pages, mq->dma, PAGE_SIZE_4K); 594 + status = be_roce_mcc_cmd(dev->nic_info.netdev, 595 + cmd, sizeof(*cmd), NULL, NULL); 596 + if (!status) { 597 + mq->id = rsp->id; 598 + mq->created = true; 599 + } 600 + return status; 601 + } 602 + 603 + static int ocrdma_create_mq(struct ocrdma_dev *dev) 604 + { 605 + int status; 606 + 607 + /* Alloc completion queue for Mailbox queue */ 608 + status = ocrdma_alloc_q(dev, &dev->mq.cq, OCRDMA_MQ_CQ_LEN, 609 + sizeof(struct ocrdma_mcqe)); 610 + if (status) 611 + goto alloc_err; 612 + 613 + status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->meq.q); 614 + if (status) 615 + goto mbx_cq_free; 616 + 617 + memset(&dev->mqe_ctx, 0, sizeof(dev->mqe_ctx)); 618 + init_waitqueue_head(&dev->mqe_ctx.cmd_wait); 619 + mutex_init(&dev->mqe_ctx.lock); 620 + 621 + /* Alloc Mailbox queue */ 622 + status = ocrdma_alloc_q(dev, &dev->mq.sq, OCRDMA_MQ_LEN, 623 + sizeof(struct ocrdma_mqe)); 624 + if (status) 625 + goto mbx_cq_destroy; 626 + status = ocrdma_mbx_create_mq(dev, &dev->mq.sq, &dev->mq.cq); 627 + if (status) 628 + goto mbx_q_free; 629 + ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, 0); 630 + return 0; 631 + 632 + mbx_q_free: 633 + ocrdma_free_q(dev, &dev->mq.sq); 634 + mbx_cq_destroy: 635 + ocrdma_mbx_delete_q(dev, &dev->mq.cq, QTYPE_CQ); 636 + mbx_cq_free: 637 + ocrdma_free_q(dev, &dev->mq.cq); 638 + alloc_err: 639 + return status; 640 + } 641 + 642 + static void ocrdma_destroy_mq(struct ocrdma_dev *dev) 643 + { 644 + struct ocrdma_queue_info *mbxq, *cq; 645 + 646 + /* mqe_ctx lock synchronizes with any other pending cmds. */ 647 + mutex_lock(&dev->mqe_ctx.lock); 648 + mbxq = &dev->mq.sq; 649 + if (mbxq->created) { 650 + ocrdma_mbx_delete_q(dev, mbxq, QTYPE_MCCQ); 651 + ocrdma_free_q(dev, mbxq); 652 + } 653 + mutex_unlock(&dev->mqe_ctx.lock); 654 + 655 + cq = &dev->mq.cq; 656 + if (cq->created) { 657 + ocrdma_mbx_delete_q(dev, cq, QTYPE_CQ); 658 + ocrdma_free_q(dev, cq); 659 + } 660 + } 661 + 662 + static void ocrdma_process_qpcat_error(struct ocrdma_dev *dev, 663 + struct ocrdma_qp *qp) 664 + { 665 + enum ib_qp_state new_ib_qps = IB_QPS_ERR; 666 + enum ib_qp_state old_ib_qps; 667 + 668 + if (qp == NULL) 669 + BUG(); 670 + ocrdma_qp_state_machine(qp, new_ib_qps, &old_ib_qps); 671 + } 672 + 673 + static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev, 674 + struct ocrdma_ae_mcqe *cqe) 675 + { 676 + struct ocrdma_qp *qp = NULL; 677 + struct ocrdma_cq *cq = NULL; 678 + struct ib_event ib_evt = { 0 }; 679 + int cq_event = 0; 680 + int qp_event = 1; 681 + int srq_event = 0; 682 + int dev_event = 0; 683 + int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >> 684 + OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT; 685 + 686 + if (cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPVALID) 687 + qp = dev->qp_tbl[cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPID_MASK]; 688 + if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID) 689 + cq = dev->cq_tbl[cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK]; 690 + 691 + switch (type) { 692 + case OCRDMA_CQ_ERROR: 693 + ib_evt.element.cq = &cq->ibcq; 694 + ib_evt.event = IB_EVENT_CQ_ERR; 695 + cq_event = 1; 696 + qp_event = 0; 697 + break; 698 + case OCRDMA_CQ_OVERRUN_ERROR: 699 + ib_evt.element.cq = &cq->ibcq; 700 + ib_evt.event = IB_EVENT_CQ_ERR; 701 + break; 702 + case OCRDMA_CQ_QPCAT_ERROR: 703 + ib_evt.element.qp = &qp->ibqp; 704 + ib_evt.event = IB_EVENT_QP_FATAL; 705 + ocrdma_process_qpcat_error(dev, qp); 706 + break; 707 + case OCRDMA_QP_ACCESS_ERROR: 708 + ib_evt.element.qp = &qp->ibqp; 709 + ib_evt.event = IB_EVENT_QP_ACCESS_ERR; 710 + break; 711 + case OCRDMA_QP_COMM_EST_EVENT: 712 + ib_evt.element.qp = &qp->ibqp; 713 + ib_evt.event = IB_EVENT_COMM_EST; 714 + break; 715 + case OCRDMA_SQ_DRAINED_EVENT: 716 + ib_evt.element.qp = &qp->ibqp; 717 + ib_evt.event = IB_EVENT_SQ_DRAINED; 718 + break; 719 + case OCRDMA_DEVICE_FATAL_EVENT: 720 + ib_evt.element.port_num = 1; 721 + ib_evt.event = IB_EVENT_DEVICE_FATAL; 722 + qp_event = 0; 723 + dev_event = 1; 724 + break; 725 + case OCRDMA_SRQCAT_ERROR: 726 + ib_evt.element.srq = &qp->srq->ibsrq; 727 + ib_evt.event = IB_EVENT_SRQ_ERR; 728 + srq_event = 1; 729 + qp_event = 0; 730 + break; 731 + case OCRDMA_SRQ_LIMIT_EVENT: 732 + ib_evt.element.srq = &qp->srq->ibsrq; 733 + ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED; 734 + srq_event = 1; 735 + qp_event = 0; 736 + break; 737 + case OCRDMA_QP_LAST_WQE_EVENT: 738 + ib_evt.element.qp = &qp->ibqp; 739 + ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED; 740 + break; 741 + default: 742 + cq_event = 0; 743 + qp_event = 0; 744 + srq_event = 0; 745 + dev_event = 0; 746 + ocrdma_err("%s() unknown type=0x%x\n", __func__, type); 747 + break; 748 + } 749 + 750 + if (qp_event) { 751 + if (qp->ibqp.event_handler) 752 + qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context); 753 + } else if (cq_event) { 754 + if (cq->ibcq.event_handler) 755 + cq->ibcq.event_handler(&ib_evt, cq->ibcq.cq_context); 756 + } else if (srq_event) { 757 + if (qp->srq->ibsrq.event_handler) 758 + qp->srq->ibsrq.event_handler(&ib_evt, 759 + qp->srq->ibsrq. 760 + srq_context); 761 + } else if (dev_event) 762 + ib_dispatch_event(&ib_evt); 763 + 764 + } 765 + 766 + static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe) 767 + { 768 + /* async CQE processing */ 769 + struct ocrdma_ae_mcqe *cqe = ae_cqe; 770 + u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >> 771 + OCRDMA_AE_MCQE_EVENT_CODE_SHIFT; 772 + 773 + if (evt_code == OCRDMA_ASYNC_EVE_CODE) 774 + ocrdma_dispatch_ibevent(dev, cqe); 775 + else 776 + ocrdma_err("%s(%d) invalid evt code=0x%x\n", 777 + __func__, dev->id, evt_code); 778 + } 779 + 780 + static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe) 781 + { 782 + if (dev->mqe_ctx.tag == cqe->tag_lo && dev->mqe_ctx.cmd_done == false) { 783 + dev->mqe_ctx.cqe_status = (cqe->status & 784 + OCRDMA_MCQE_STATUS_MASK) >> OCRDMA_MCQE_STATUS_SHIFT; 785 + dev->mqe_ctx.ext_status = 786 + (cqe->status & OCRDMA_MCQE_ESTATUS_MASK) 787 + >> OCRDMA_MCQE_ESTATUS_SHIFT; 788 + dev->mqe_ctx.cmd_done = true; 789 + wake_up(&dev->mqe_ctx.cmd_wait); 790 + } else 791 + ocrdma_err("%s() cqe for invalid tag0x%x.expected=0x%x\n", 792 + __func__, cqe->tag_lo, dev->mqe_ctx.tag); 793 + } 794 + 795 + static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id) 796 + { 797 + u16 cqe_popped = 0; 798 + struct ocrdma_mcqe *cqe; 799 + 800 + while (1) { 801 + cqe = ocrdma_get_mcqe(dev); 802 + if (cqe == NULL) 803 + break; 804 + ocrdma_le32_to_cpu(cqe, sizeof(*cqe)); 805 + cqe_popped += 1; 806 + if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_AE_MASK) 807 + ocrdma_process_acqe(dev, cqe); 808 + else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK) 809 + ocrdma_process_mcqe(dev, cqe); 810 + else 811 + ocrdma_err("%s() cqe->compl is not set.\n", __func__); 812 + memset(cqe, 0, sizeof(struct ocrdma_mcqe)); 813 + ocrdma_mcq_inc_tail(dev); 814 + } 815 + ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, cqe_popped); 816 + return 0; 817 + } 818 + 819 + static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev, 820 + struct ocrdma_cq *cq) 821 + { 822 + unsigned long flags; 823 + struct ocrdma_qp *qp; 824 + bool buddy_cq_found = false; 825 + /* Go through list of QPs in error state which are using this CQ 826 + * and invoke its callback handler to trigger CQE processing for 827 + * error/flushed CQE. It is rare to find more than few entries in 828 + * this list as most consumers stops after getting error CQE. 829 + * List is traversed only once when a matching buddy cq found for a QP. 830 + */ 831 + spin_lock_irqsave(&dev->flush_q_lock, flags); 832 + list_for_each_entry(qp, &cq->sq_head, sq_entry) { 833 + if (qp->srq) 834 + continue; 835 + /* if wq and rq share the same cq, than comp_handler 836 + * is already invoked. 837 + */ 838 + if (qp->sq_cq == qp->rq_cq) 839 + continue; 840 + /* if completion came on sq, rq's cq is buddy cq. 841 + * if completion came on rq, sq's cq is buddy cq. 842 + */ 843 + if (qp->sq_cq == cq) 844 + cq = qp->rq_cq; 845 + else 846 + cq = qp->sq_cq; 847 + buddy_cq_found = true; 848 + break; 849 + } 850 + spin_unlock_irqrestore(&dev->flush_q_lock, flags); 851 + if (buddy_cq_found == false) 852 + return; 853 + if (cq->ibcq.comp_handler) { 854 + spin_lock_irqsave(&cq->comp_handler_lock, flags); 855 + (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); 856 + spin_unlock_irqrestore(&cq->comp_handler_lock, flags); 857 + } 858 + } 859 + 860 + static void ocrdma_qp_cq_handler(struct ocrdma_dev *dev, u16 cq_idx) 861 + { 862 + unsigned long flags; 863 + struct ocrdma_cq *cq; 864 + 865 + if (cq_idx >= OCRDMA_MAX_CQ) 866 + BUG(); 867 + 868 + cq = dev->cq_tbl[cq_idx]; 869 + if (cq == NULL) { 870 + ocrdma_err("%s%d invalid id=0x%x\n", __func__, dev->id, cq_idx); 871 + return; 872 + } 873 + spin_lock_irqsave(&cq->cq_lock, flags); 874 + cq->armed = false; 875 + cq->solicited = false; 876 + spin_unlock_irqrestore(&cq->cq_lock, flags); 877 + 878 + ocrdma_ring_cq_db(dev, cq->id, false, false, 0); 879 + 880 + if (cq->ibcq.comp_handler) { 881 + spin_lock_irqsave(&cq->comp_handler_lock, flags); 882 + (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); 883 + spin_unlock_irqrestore(&cq->comp_handler_lock, flags); 884 + } 885 + ocrdma_qp_buddy_cq_handler(dev, cq); 886 + } 887 + 888 + static void ocrdma_cq_handler(struct ocrdma_dev *dev, u16 cq_id) 889 + { 890 + /* process the MQ-CQE. */ 891 + if (cq_id == dev->mq.cq.id) 892 + ocrdma_mq_cq_handler(dev, cq_id); 893 + else 894 + ocrdma_qp_cq_handler(dev, cq_id); 895 + } 896 + 897 + static irqreturn_t ocrdma_irq_handler(int irq, void *handle) 898 + { 899 + struct ocrdma_eq *eq = handle; 900 + struct ocrdma_dev *dev = eq->dev; 901 + struct ocrdma_eqe eqe; 902 + struct ocrdma_eqe *ptr; 903 + u16 eqe_popped = 0; 904 + u16 cq_id; 905 + while (1) { 906 + ptr = ocrdma_get_eqe(eq); 907 + eqe = *ptr; 908 + ocrdma_le32_to_cpu(&eqe, sizeof(eqe)); 909 + if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0) 910 + break; 911 + eqe_popped += 1; 912 + ptr->id_valid = 0; 913 + /* check whether its CQE or not. */ 914 + if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) { 915 + cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT; 916 + ocrdma_cq_handler(dev, cq_id); 917 + } 918 + ocrdma_eq_inc_tail(eq); 919 + } 920 + ocrdma_ring_eq_db(dev, eq->q.id, true, true, eqe_popped); 921 + /* Ring EQ doorbell with num_popped to 0 to enable interrupts again. */ 922 + if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) 923 + ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0); 924 + return IRQ_HANDLED; 925 + } 926 + 927 + static void ocrdma_post_mqe(struct ocrdma_dev *dev, struct ocrdma_mqe *cmd) 928 + { 929 + struct ocrdma_mqe *mqe; 930 + 931 + dev->mqe_ctx.tag = dev->mq.sq.head; 932 + dev->mqe_ctx.cmd_done = false; 933 + mqe = ocrdma_get_mqe(dev); 934 + cmd->hdr.tag_lo = dev->mq.sq.head; 935 + ocrdma_copy_cpu_to_le32(mqe, cmd, sizeof(*mqe)); 936 + /* make sure descriptor is written before ringing doorbell */ 937 + wmb(); 938 + ocrdma_mq_inc_head(dev); 939 + ocrdma_ring_mq_db(dev); 940 + } 941 + 942 + static int ocrdma_wait_mqe_cmpl(struct ocrdma_dev *dev) 943 + { 944 + long status; 945 + /* 30 sec timeout */ 946 + status = wait_event_timeout(dev->mqe_ctx.cmd_wait, 947 + (dev->mqe_ctx.cmd_done != false), 948 + msecs_to_jiffies(30000)); 949 + if (status) 950 + return 0; 951 + else 952 + return -1; 953 + } 954 + 955 + /* issue a mailbox command on the MQ */ 956 + static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe) 957 + { 958 + int status = 0; 959 + u16 cqe_status, ext_status; 960 + struct ocrdma_mqe *rsp; 961 + 962 + mutex_lock(&dev->mqe_ctx.lock); 963 + ocrdma_post_mqe(dev, mqe); 964 + status = ocrdma_wait_mqe_cmpl(dev); 965 + if (status) 966 + goto mbx_err; 967 + cqe_status = dev->mqe_ctx.cqe_status; 968 + ext_status = dev->mqe_ctx.ext_status; 969 + rsp = ocrdma_get_mqe_rsp(dev); 970 + ocrdma_copy_le32_to_cpu(mqe, rsp, (sizeof(*mqe))); 971 + if (cqe_status || ext_status) { 972 + ocrdma_err 973 + ("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n", 974 + __func__, 975 + (rsp->u.rsp.subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >> 976 + OCRDMA_MBX_RSP_OPCODE_SHIFT, cqe_status, ext_status); 977 + status = ocrdma_get_mbx_cqe_errno(cqe_status); 978 + goto mbx_err; 979 + } 980 + if (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK) 981 + status = ocrdma_get_mbx_errno(mqe->u.rsp.status); 982 + mbx_err: 983 + mutex_unlock(&dev->mqe_ctx.lock); 984 + return status; 985 + } 986 + 987 + static void ocrdma_get_attr(struct ocrdma_dev *dev, 988 + struct ocrdma_dev_attr *attr, 989 + struct ocrdma_mbx_query_config *rsp) 990 + { 991 + int max_q_mem; 992 + 993 + attr->max_pd = 994 + (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >> 995 + OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT; 996 + attr->max_qp = 997 + (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >> 998 + OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT; 999 + attr->max_send_sge = ((rsp->max_write_send_sge & 1000 + OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >> 1001 + OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT); 1002 + attr->max_recv_sge = (rsp->max_write_send_sge & 1003 + OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >> 1004 + OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT; 1005 + attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp & 1006 + OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >> 1007 + OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT; 1008 + attr->max_ird_per_qp = (rsp->max_ird_ord_per_qp & 1009 + OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK) >> 1010 + OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT; 1011 + attr->cq_overflow_detect = (rsp->qp_srq_cq_ird_ord & 1012 + OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK) >> 1013 + OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT; 1014 + attr->srq_supported = (rsp->qp_srq_cq_ird_ord & 1015 + OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK) >> 1016 + OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_SHIFT; 1017 + attr->local_ca_ack_delay = (rsp->max_pd_ca_ack_delay & 1018 + OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK) >> 1019 + OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT; 1020 + attr->max_mr = rsp->max_mr; 1021 + attr->max_mr_size = ~0ull; 1022 + attr->max_fmr = 0; 1023 + attr->max_pages_per_frmr = rsp->max_pages_per_frmr; 1024 + attr->max_num_mr_pbl = rsp->max_num_mr_pbl; 1025 + attr->max_cqe = rsp->max_cq_cqes_per_cq & 1026 + OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_MASK; 1027 + attr->wqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs & 1028 + OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_MASK) >> 1029 + OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET) * 1030 + OCRDMA_WQE_STRIDE; 1031 + attr->rqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs & 1032 + OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_MASK) >> 1033 + OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET) * 1034 + OCRDMA_WQE_STRIDE; 1035 + attr->max_inline_data = 1036 + attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) + 1037 + sizeof(struct ocrdma_sge)); 1038 + max_q_mem = OCRDMA_Q_PAGE_BASE_SIZE << (OCRDMA_MAX_Q_PAGE_SIZE_CNT - 1); 1039 + /* hw can queue one less then the configured size, 1040 + * so publish less by one to stack. 1041 + */ 1042 + if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { 1043 + dev->attr.max_wqe = max_q_mem / dev->attr.wqe_size; 1044 + attr->ird = 1; 1045 + attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE; 1046 + attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES; 1047 + } else 1048 + dev->attr.max_wqe = (max_q_mem / dev->attr.wqe_size) - 1; 1049 + dev->attr.max_rqe = (max_q_mem / dev->attr.rqe_size) - 1; 1050 + } 1051 + 1052 + static int ocrdma_check_fw_config(struct ocrdma_dev *dev, 1053 + struct ocrdma_fw_conf_rsp *conf) 1054 + { 1055 + u32 fn_mode; 1056 + 1057 + fn_mode = conf->fn_mode & OCRDMA_FN_MODE_RDMA; 1058 + if (fn_mode != OCRDMA_FN_MODE_RDMA) 1059 + return -EINVAL; 1060 + dev->base_eqid = conf->base_eqid; 1061 + dev->max_eq = conf->max_eq; 1062 + dev->attr.max_cq = OCRDMA_MAX_CQ - 1; 1063 + return 0; 1064 + } 1065 + 1066 + /* can be issued only during init time. */ 1067 + static int ocrdma_mbx_query_fw_ver(struct ocrdma_dev *dev) 1068 + { 1069 + int status = -ENOMEM; 1070 + struct ocrdma_mqe *cmd; 1071 + struct ocrdma_fw_ver_rsp *rsp; 1072 + 1073 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_VER, sizeof(*cmd)); 1074 + if (!cmd) 1075 + return -ENOMEM; 1076 + ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0], 1077 + OCRDMA_CMD_GET_FW_VER, 1078 + OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); 1079 + 1080 + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1081 + if (status) 1082 + goto mbx_err; 1083 + rsp = (struct ocrdma_fw_ver_rsp *)cmd; 1084 + memset(&dev->attr.fw_ver[0], 0, sizeof(dev->attr.fw_ver)); 1085 + memcpy(&dev->attr.fw_ver[0], &rsp->running_ver[0], 1086 + sizeof(rsp->running_ver)); 1087 + ocrdma_le32_to_cpu(dev->attr.fw_ver, sizeof(rsp->running_ver)); 1088 + mbx_err: 1089 + kfree(cmd); 1090 + return status; 1091 + } 1092 + 1093 + /* can be issued only during init time. */ 1094 + static int ocrdma_mbx_query_fw_config(struct ocrdma_dev *dev) 1095 + { 1096 + int status = -ENOMEM; 1097 + struct ocrdma_mqe *cmd; 1098 + struct ocrdma_fw_conf_rsp *rsp; 1099 + 1100 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_CONFIG, sizeof(*cmd)); 1101 + if (!cmd) 1102 + return -ENOMEM; 1103 + ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0], 1104 + OCRDMA_CMD_GET_FW_CONFIG, 1105 + OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); 1106 + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1107 + if (status) 1108 + goto mbx_err; 1109 + rsp = (struct ocrdma_fw_conf_rsp *)cmd; 1110 + status = ocrdma_check_fw_config(dev, rsp); 1111 + mbx_err: 1112 + kfree(cmd); 1113 + return status; 1114 + } 1115 + 1116 + static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev) 1117 + { 1118 + int status = -ENOMEM; 1119 + struct ocrdma_mbx_query_config *rsp; 1120 + struct ocrdma_mqe *cmd; 1121 + 1122 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_CONFIG, sizeof(*cmd)); 1123 + if (!cmd) 1124 + return status; 1125 + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1126 + if (status) 1127 + goto mbx_err; 1128 + rsp = (struct ocrdma_mbx_query_config *)cmd; 1129 + ocrdma_get_attr(dev, &dev->attr, rsp); 1130 + mbx_err: 1131 + kfree(cmd); 1132 + return status; 1133 + } 1134 + 1135 + int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd) 1136 + { 1137 + int status = -ENOMEM; 1138 + struct ocrdma_alloc_pd *cmd; 1139 + struct ocrdma_alloc_pd_rsp *rsp; 1140 + 1141 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD, sizeof(*cmd)); 1142 + if (!cmd) 1143 + return status; 1144 + if (pd->dpp_enabled) 1145 + cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP; 1146 + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1147 + if (status) 1148 + goto mbx_err; 1149 + rsp = (struct ocrdma_alloc_pd_rsp *)cmd; 1150 + pd->id = rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_PDID_MASK; 1151 + if (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) { 1152 + pd->dpp_enabled = true; 1153 + pd->dpp_page = rsp->dpp_page_pdid >> 1154 + OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT; 1155 + } else { 1156 + pd->dpp_enabled = false; 1157 + pd->num_dpp_qp = 0; 1158 + } 1159 + mbx_err: 1160 + kfree(cmd); 1161 + return status; 1162 + } 1163 + 1164 + int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd) 1165 + { 1166 + int status = -ENOMEM; 1167 + struct ocrdma_dealloc_pd *cmd; 1168 + 1169 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD, sizeof(*cmd)); 1170 + if (!cmd) 1171 + return status; 1172 + cmd->id = pd->id; 1173 + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1174 + kfree(cmd); 1175 + return status; 1176 + } 1177 + 1178 + static int ocrdma_build_q_conf(u32 *num_entries, int entry_size, 1179 + int *num_pages, int *page_size) 1180 + { 1181 + int i; 1182 + int mem_size; 1183 + 1184 + *num_entries = roundup_pow_of_two(*num_entries); 1185 + mem_size = *num_entries * entry_size; 1186 + /* find the possible lowest possible multiplier */ 1187 + for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) { 1188 + if (mem_size <= (OCRDMA_Q_PAGE_BASE_SIZE << i)) 1189 + break; 1190 + } 1191 + if (i >= OCRDMA_MAX_Q_PAGE_SIZE_CNT) 1192 + return -EINVAL; 1193 + mem_size = roundup(mem_size, 1194 + ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES)); 1195 + *num_pages = 1196 + mem_size / ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES); 1197 + *page_size = ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES); 1198 + *num_entries = mem_size / entry_size; 1199 + return 0; 1200 + } 1201 + 1202 + static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev) 1203 + { 1204 + int i ; 1205 + int status = 0; 1206 + int max_ah; 1207 + struct ocrdma_create_ah_tbl *cmd; 1208 + struct ocrdma_create_ah_tbl_rsp *rsp; 1209 + struct pci_dev *pdev = dev->nic_info.pdev; 1210 + dma_addr_t pa; 1211 + struct ocrdma_pbe *pbes; 1212 + 1213 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_AH_TBL, sizeof(*cmd)); 1214 + if (!cmd) 1215 + return status; 1216 + 1217 + max_ah = OCRDMA_MAX_AH; 1218 + dev->av_tbl.size = sizeof(struct ocrdma_av) * max_ah; 1219 + 1220 + /* number of PBEs in PBL */ 1221 + cmd->ah_conf = (OCRDMA_AH_TBL_PAGES << 1222 + OCRDMA_CREATE_AH_NUM_PAGES_SHIFT) & 1223 + OCRDMA_CREATE_AH_NUM_PAGES_MASK; 1224 + 1225 + /* page size */ 1226 + for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) { 1227 + if (PAGE_SIZE == (OCRDMA_MIN_Q_PAGE_SIZE << i)) 1228 + break; 1229 + } 1230 + cmd->ah_conf |= (i << OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT) & 1231 + OCRDMA_CREATE_AH_PAGE_SIZE_MASK; 1232 + 1233 + /* ah_entry size */ 1234 + cmd->ah_conf |= (sizeof(struct ocrdma_av) << 1235 + OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT) & 1236 + OCRDMA_CREATE_AH_ENTRY_SIZE_MASK; 1237 + 1238 + dev->av_tbl.pbl.va = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 1239 + &dev->av_tbl.pbl.pa, 1240 + GFP_KERNEL); 1241 + if (dev->av_tbl.pbl.va == NULL) 1242 + goto mem_err; 1243 + 1244 + dev->av_tbl.va = dma_alloc_coherent(&pdev->dev, dev->av_tbl.size, 1245 + &pa, GFP_KERNEL); 1246 + if (dev->av_tbl.va == NULL) 1247 + goto mem_err_ah; 1248 + dev->av_tbl.pa = pa; 1249 + dev->av_tbl.num_ah = max_ah; 1250 + memset(dev->av_tbl.va, 0, dev->av_tbl.size); 1251 + 1252 + pbes = (struct ocrdma_pbe *)dev->av_tbl.pbl.va; 1253 + for (i = 0; i < dev->av_tbl.size / OCRDMA_MIN_Q_PAGE_SIZE; i++) { 1254 + pbes[i].pa_lo = (u32) (pa & 0xffffffff); 1255 + pbes[i].pa_hi = (u32) upper_32_bits(pa); 1256 + pa += PAGE_SIZE; 1257 + } 1258 + cmd->tbl_addr[0].lo = (u32)(dev->av_tbl.pbl.pa & 0xFFFFFFFF); 1259 + cmd->tbl_addr[0].hi = (u32)upper_32_bits(dev->av_tbl.pbl.pa); 1260 + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1261 + if (status) 1262 + goto mbx_err; 1263 + rsp = (struct ocrdma_create_ah_tbl_rsp *)cmd; 1264 + dev->av_tbl.ahid = rsp->ahid & 0xFFFF; 1265 + kfree(cmd); 1266 + return 0; 1267 + 1268 + mbx_err: 1269 + dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va, 1270 + dev->av_tbl.pa); 1271 + dev->av_tbl.va = NULL; 1272 + mem_err_ah: 1273 + dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va, 1274 + dev->av_tbl.pbl.pa); 1275 + dev->av_tbl.pbl.va = NULL; 1276 + dev->av_tbl.size = 0; 1277 + mem_err: 1278 + kfree(cmd); 1279 + return status; 1280 + } 1281 + 1282 + static void ocrdma_mbx_delete_ah_tbl(struct ocrdma_dev *dev) 1283 + { 1284 + struct ocrdma_delete_ah_tbl *cmd; 1285 + struct pci_dev *pdev = dev->nic_info.pdev; 1286 + 1287 + if (dev->av_tbl.va == NULL) 1288 + return; 1289 + 1290 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_AH_TBL, sizeof(*cmd)); 1291 + if (!cmd) 1292 + return; 1293 + cmd->ahid = dev->av_tbl.ahid; 1294 + 1295 + ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1296 + dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va, 1297 + dev->av_tbl.pa); 1298 + dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va, 1299 + dev->av_tbl.pbl.pa); 1300 + kfree(cmd); 1301 + } 1302 + 1303 + /* Multiple CQs uses the EQ. This routine returns least used 1304 + * EQ to associate with CQ. This will distributes the interrupt 1305 + * processing and CPU load to associated EQ, vector and so to that CPU. 1306 + */ 1307 + static u16 ocrdma_bind_eq(struct ocrdma_dev *dev) 1308 + { 1309 + int i, selected_eq = 0, cq_cnt = 0; 1310 + u16 eq_id; 1311 + 1312 + mutex_lock(&dev->dev_lock); 1313 + cq_cnt = dev->qp_eq_tbl[0].cq_cnt; 1314 + eq_id = dev->qp_eq_tbl[0].q.id; 1315 + /* find the EQ which is has the least number of 1316 + * CQs associated with it. 1317 + */ 1318 + for (i = 0; i < dev->eq_cnt; i++) { 1319 + if (dev->qp_eq_tbl[i].cq_cnt < cq_cnt) { 1320 + cq_cnt = dev->qp_eq_tbl[i].cq_cnt; 1321 + eq_id = dev->qp_eq_tbl[i].q.id; 1322 + selected_eq = i; 1323 + } 1324 + } 1325 + dev->qp_eq_tbl[selected_eq].cq_cnt += 1; 1326 + mutex_unlock(&dev->dev_lock); 1327 + return eq_id; 1328 + } 1329 + 1330 + static void ocrdma_unbind_eq(struct ocrdma_dev *dev, u16 eq_id) 1331 + { 1332 + int i; 1333 + 1334 + mutex_lock(&dev->dev_lock); 1335 + for (i = 0; i < dev->eq_cnt; i++) { 1336 + if (dev->qp_eq_tbl[i].q.id != eq_id) 1337 + continue; 1338 + dev->qp_eq_tbl[i].cq_cnt -= 1; 1339 + break; 1340 + } 1341 + mutex_unlock(&dev->dev_lock); 1342 + } 1343 + 1344 + int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq, 1345 + int entries, int dpp_cq) 1346 + { 1347 + int status = -ENOMEM; int max_hw_cqe; 1348 + struct pci_dev *pdev = dev->nic_info.pdev; 1349 + struct ocrdma_create_cq *cmd; 1350 + struct ocrdma_create_cq_rsp *rsp; 1351 + u32 hw_pages, cqe_size, page_size, cqe_count; 1352 + 1353 + if (dpp_cq) 1354 + return -EINVAL; 1355 + if (entries > dev->attr.max_cqe) { 1356 + ocrdma_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n", 1357 + __func__, dev->id, dev->attr.max_cqe, entries); 1358 + return -EINVAL; 1359 + } 1360 + if (dpp_cq && (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY)) 1361 + return -EINVAL; 1362 + 1363 + if (dpp_cq) { 1364 + cq->max_hw_cqe = 1; 1365 + max_hw_cqe = 1; 1366 + cqe_size = OCRDMA_DPP_CQE_SIZE; 1367 + hw_pages = 1; 1368 + } else { 1369 + cq->max_hw_cqe = dev->attr.max_cqe; 1370 + max_hw_cqe = dev->attr.max_cqe; 1371 + cqe_size = sizeof(struct ocrdma_cqe); 1372 + hw_pages = OCRDMA_CREATE_CQ_MAX_PAGES; 1373 + } 1374 + 1375 + cq->len = roundup(max_hw_cqe * cqe_size, OCRDMA_MIN_Q_PAGE_SIZE); 1376 + 1377 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_CQ, sizeof(*cmd)); 1378 + if (!cmd) 1379 + return -ENOMEM; 1380 + ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ, 1381 + OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); 1382 + cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL); 1383 + if (!cq->va) { 1384 + status = -ENOMEM; 1385 + goto mem_err; 1386 + } 1387 + memset(cq->va, 0, cq->len); 1388 + page_size = cq->len / hw_pages; 1389 + cmd->cmd.pgsz_pgcnt = (page_size / OCRDMA_MIN_Q_PAGE_SIZE) << 1390 + OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT; 1391 + cmd->cmd.pgsz_pgcnt |= hw_pages; 1392 + cmd->cmd.ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS; 1393 + 1394 + if (dev->eq_cnt < 0) 1395 + goto eq_err; 1396 + cq->eqn = ocrdma_bind_eq(dev); 1397 + cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER2; 1398 + cqe_count = cq->len / cqe_size; 1399 + if (cqe_count > 1024) 1400 + /* Set cnt to 3 to indicate more than 1024 cq entries */ 1401 + cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT); 1402 + else { 1403 + u8 count = 0; 1404 + switch (cqe_count) { 1405 + case 256: 1406 + count = 0; 1407 + break; 1408 + case 512: 1409 + count = 1; 1410 + break; 1411 + case 1024: 1412 + count = 2; 1413 + break; 1414 + default: 1415 + goto mbx_err; 1416 + } 1417 + cmd->cmd.ev_cnt_flags |= (count << OCRDMA_CREATE_CQ_CNT_SHIFT); 1418 + } 1419 + /* shared eq between all the consumer cqs. */ 1420 + cmd->cmd.eqn = cq->eqn; 1421 + if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { 1422 + if (dpp_cq) 1423 + cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP << 1424 + OCRDMA_CREATE_CQ_TYPE_SHIFT; 1425 + cq->phase_change = false; 1426 + cmd->cmd.cqe_count = (cq->len / cqe_size); 1427 + } else { 1428 + cmd->cmd.cqe_count = (cq->len / cqe_size) - 1; 1429 + cmd->cmd.ev_cnt_flags |= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID; 1430 + cq->phase_change = true; 1431 + } 1432 + 1433 + ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size); 1434 + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1435 + if (status) 1436 + goto mbx_err; 1437 + 1438 + rsp = (struct ocrdma_create_cq_rsp *)cmd; 1439 + cq->id = (u16) (rsp->rsp.cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK); 1440 + kfree(cmd); 1441 + return 0; 1442 + mbx_err: 1443 + ocrdma_unbind_eq(dev, cq->eqn); 1444 + eq_err: 1445 + dma_free_coherent(&pdev->dev, cq->len, cq->va, cq->pa); 1446 + mem_err: 1447 + kfree(cmd); 1448 + return status; 1449 + } 1450 + 1451 + int ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq) 1452 + { 1453 + int status = -ENOMEM; 1454 + struct ocrdma_destroy_cq *cmd; 1455 + 1456 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_CQ, sizeof(*cmd)); 1457 + if (!cmd) 1458 + return status; 1459 + ocrdma_init_mch(&cmd->req, OCRDMA_CMD_DELETE_CQ, 1460 + OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); 1461 + 1462 + cmd->bypass_flush_qid |= 1463 + (cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) & 1464 + OCRDMA_DESTROY_CQ_QID_MASK; 1465 + 1466 + ocrdma_unbind_eq(dev, cq->eqn); 1467 + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1468 + if (status) 1469 + goto mbx_err; 1470 + dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa); 1471 + mbx_err: 1472 + kfree(cmd); 1473 + return status; 1474 + } 1475 + 1476 + int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr, 1477 + u32 pdid, int addr_check) 1478 + { 1479 + int status = -ENOMEM; 1480 + struct ocrdma_alloc_lkey *cmd; 1481 + struct ocrdma_alloc_lkey_rsp *rsp; 1482 + 1483 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_LKEY, sizeof(*cmd)); 1484 + if (!cmd) 1485 + return status; 1486 + cmd->pdid = pdid; 1487 + cmd->pbl_sz_flags |= addr_check; 1488 + cmd->pbl_sz_flags |= (hwmr->fr_mr << OCRDMA_ALLOC_LKEY_FMR_SHIFT); 1489 + cmd->pbl_sz_flags |= 1490 + (hwmr->remote_wr << OCRDMA_ALLOC_LKEY_REMOTE_WR_SHIFT); 1491 + cmd->pbl_sz_flags |= 1492 + (hwmr->remote_rd << OCRDMA_ALLOC_LKEY_REMOTE_RD_SHIFT); 1493 + cmd->pbl_sz_flags |= 1494 + (hwmr->local_wr << OCRDMA_ALLOC_LKEY_LOCAL_WR_SHIFT); 1495 + cmd->pbl_sz_flags |= 1496 + (hwmr->remote_atomic << OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_SHIFT); 1497 + cmd->pbl_sz_flags |= 1498 + (hwmr->num_pbls << OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT); 1499 + 1500 + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1501 + if (status) 1502 + goto mbx_err; 1503 + rsp = (struct ocrdma_alloc_lkey_rsp *)cmd; 1504 + hwmr->lkey = rsp->lrkey; 1505 + mbx_err: 1506 + kfree(cmd); 1507 + return status; 1508 + } 1509 + 1510 + int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *dev, int fr_mr, u32 lkey) 1511 + { 1512 + int status = -ENOMEM; 1513 + struct ocrdma_dealloc_lkey *cmd; 1514 + 1515 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_LKEY, sizeof(*cmd)); 1516 + if (!cmd) 1517 + return -ENOMEM; 1518 + cmd->lkey = lkey; 1519 + cmd->rsvd_frmr = fr_mr ? 1 : 0; 1520 + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1521 + if (status) 1522 + goto mbx_err; 1523 + mbx_err: 1524 + kfree(cmd); 1525 + return status; 1526 + } 1527 + 1528 + static int ocrdma_mbx_reg_mr(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr, 1529 + u32 pdid, u32 pbl_cnt, u32 pbe_size, u32 last) 1530 + { 1531 + int status = -ENOMEM; 1532 + int i; 1533 + struct ocrdma_reg_nsmr *cmd; 1534 + struct ocrdma_reg_nsmr_rsp *rsp; 1535 + 1536 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR, sizeof(*cmd)); 1537 + if (!cmd) 1538 + return -ENOMEM; 1539 + cmd->num_pbl_pdid = 1540 + pdid | (hwmr->num_pbls << OCRDMA_REG_NSMR_NUM_PBL_SHIFT); 1541 + 1542 + cmd->flags_hpage_pbe_sz |= (hwmr->remote_wr << 1543 + OCRDMA_REG_NSMR_REMOTE_WR_SHIFT); 1544 + cmd->flags_hpage_pbe_sz |= (hwmr->remote_rd << 1545 + OCRDMA_REG_NSMR_REMOTE_RD_SHIFT); 1546 + cmd->flags_hpage_pbe_sz |= (hwmr->local_wr << 1547 + OCRDMA_REG_NSMR_LOCAL_WR_SHIFT); 1548 + cmd->flags_hpage_pbe_sz |= (hwmr->remote_atomic << 1549 + OCRDMA_REG_NSMR_REMOTE_ATOMIC_SHIFT); 1550 + cmd->flags_hpage_pbe_sz |= (hwmr->mw_bind << 1551 + OCRDMA_REG_NSMR_BIND_MEMWIN_SHIFT); 1552 + cmd->flags_hpage_pbe_sz |= (last << OCRDMA_REG_NSMR_LAST_SHIFT); 1553 + 1554 + cmd->flags_hpage_pbe_sz |= (hwmr->pbe_size / OCRDMA_MIN_HPAGE_SIZE); 1555 + cmd->flags_hpage_pbe_sz |= (hwmr->pbl_size / OCRDMA_MIN_HPAGE_SIZE) << 1556 + OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT; 1557 + cmd->totlen_low = hwmr->len; 1558 + cmd->totlen_high = upper_32_bits(hwmr->len); 1559 + cmd->fbo_low = (u32) (hwmr->fbo & 0xffffffff); 1560 + cmd->fbo_high = (u32) upper_32_bits(hwmr->fbo); 1561 + cmd->va_loaddr = (u32) hwmr->va; 1562 + cmd->va_hiaddr = (u32) upper_32_bits(hwmr->va); 1563 + 1564 + for (i = 0; i < pbl_cnt; i++) { 1565 + cmd->pbl[i].lo = (u32) (hwmr->pbl_table[i].pa & 0xffffffff); 1566 + cmd->pbl[i].hi = upper_32_bits(hwmr->pbl_table[i].pa); 1567 + } 1568 + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1569 + if (status) 1570 + goto mbx_err; 1571 + rsp = (struct ocrdma_reg_nsmr_rsp *)cmd; 1572 + hwmr->lkey = rsp->lrkey; 1573 + mbx_err: 1574 + kfree(cmd); 1575 + return status; 1576 + } 1577 + 1578 + static int ocrdma_mbx_reg_mr_cont(struct ocrdma_dev *dev, 1579 + struct ocrdma_hw_mr *hwmr, u32 pbl_cnt, 1580 + u32 pbl_offset, u32 last) 1581 + { 1582 + int status = -ENOMEM; 1583 + int i; 1584 + struct ocrdma_reg_nsmr_cont *cmd; 1585 + 1586 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR_CONT, sizeof(*cmd)); 1587 + if (!cmd) 1588 + return -ENOMEM; 1589 + cmd->lrkey = hwmr->lkey; 1590 + cmd->num_pbl_offset = (pbl_cnt << OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT) | 1591 + (pbl_offset & OCRDMA_REG_NSMR_CONT_PBL_SHIFT_MASK); 1592 + cmd->last = last << OCRDMA_REG_NSMR_CONT_LAST_SHIFT; 1593 + 1594 + for (i = 0; i < pbl_cnt; i++) { 1595 + cmd->pbl[i].lo = 1596 + (u32) (hwmr->pbl_table[i + pbl_offset].pa & 0xffffffff); 1597 + cmd->pbl[i].hi = 1598 + upper_32_bits(hwmr->pbl_table[i + pbl_offset].pa); 1599 + } 1600 + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1601 + if (status) 1602 + goto mbx_err; 1603 + mbx_err: 1604 + kfree(cmd); 1605 + return status; 1606 + } 1607 + 1608 + int ocrdma_reg_mr(struct ocrdma_dev *dev, 1609 + struct ocrdma_hw_mr *hwmr, u32 pdid, int acc) 1610 + { 1611 + int status; 1612 + u32 last = 0; 1613 + u32 cur_pbl_cnt, pbl_offset; 1614 + u32 pending_pbl_cnt = hwmr->num_pbls; 1615 + 1616 + pbl_offset = 0; 1617 + cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL); 1618 + if (cur_pbl_cnt == pending_pbl_cnt) 1619 + last = 1; 1620 + 1621 + status = ocrdma_mbx_reg_mr(dev, hwmr, pdid, 1622 + cur_pbl_cnt, hwmr->pbe_size, last); 1623 + if (status) { 1624 + ocrdma_err("%s() status=%d\n", __func__, status); 1625 + return status; 1626 + } 1627 + /* if there is no more pbls to register then exit. */ 1628 + if (last) 1629 + return 0; 1630 + 1631 + while (!last) { 1632 + pbl_offset += cur_pbl_cnt; 1633 + pending_pbl_cnt -= cur_pbl_cnt; 1634 + cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL); 1635 + /* if we reach the end of the pbls, then need to set the last 1636 + * bit, indicating no more pbls to register for this memory key. 1637 + */ 1638 + if (cur_pbl_cnt == pending_pbl_cnt) 1639 + last = 1; 1640 + 1641 + status = ocrdma_mbx_reg_mr_cont(dev, hwmr, cur_pbl_cnt, 1642 + pbl_offset, last); 1643 + if (status) 1644 + break; 1645 + } 1646 + if (status) 1647 + ocrdma_err("%s() err. status=%d\n", __func__, status); 1648 + 1649 + return status; 1650 + } 1651 + 1652 + bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp) 1653 + { 1654 + struct ocrdma_qp *tmp; 1655 + bool found = false; 1656 + list_for_each_entry(tmp, &cq->sq_head, sq_entry) { 1657 + if (qp == tmp) { 1658 + found = true; 1659 + break; 1660 + } 1661 + } 1662 + return found; 1663 + } 1664 + 1665 + bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp) 1666 + { 1667 + struct ocrdma_qp *tmp; 1668 + bool found = false; 1669 + list_for_each_entry(tmp, &cq->rq_head, rq_entry) { 1670 + if (qp == tmp) { 1671 + found = true; 1672 + break; 1673 + } 1674 + } 1675 + return found; 1676 + } 1677 + 1678 + void ocrdma_flush_qp(struct ocrdma_qp *qp) 1679 + { 1680 + bool found; 1681 + unsigned long flags; 1682 + 1683 + spin_lock_irqsave(&qp->dev->flush_q_lock, flags); 1684 + found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); 1685 + if (!found) 1686 + list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head); 1687 + if (!qp->srq) { 1688 + found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp); 1689 + if (!found) 1690 + list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head); 1691 + } 1692 + spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags); 1693 + } 1694 + 1695 + int ocrdma_qp_state_machine(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state, 1696 + enum ib_qp_state *old_ib_state) 1697 + { 1698 + unsigned long flags; 1699 + int status = 0; 1700 + enum ocrdma_qp_state new_state; 1701 + new_state = get_ocrdma_qp_state(new_ib_state); 1702 + 1703 + /* sync with wqe and rqe posting */ 1704 + spin_lock_irqsave(&qp->q_lock, flags); 1705 + 1706 + if (old_ib_state) 1707 + *old_ib_state = get_ibqp_state(qp->state); 1708 + if (new_state == qp->state) { 1709 + spin_unlock_irqrestore(&qp->q_lock, flags); 1710 + return 1; 1711 + } 1712 + 1713 + switch (qp->state) { 1714 + case OCRDMA_QPS_RST: 1715 + switch (new_state) { 1716 + case OCRDMA_QPS_RST: 1717 + case OCRDMA_QPS_INIT: 1718 + break; 1719 + default: 1720 + status = -EINVAL; 1721 + break; 1722 + }; 1723 + break; 1724 + case OCRDMA_QPS_INIT: 1725 + /* qps: INIT->XXX */ 1726 + switch (new_state) { 1727 + case OCRDMA_QPS_INIT: 1728 + case OCRDMA_QPS_RTR: 1729 + break; 1730 + case OCRDMA_QPS_ERR: 1731 + ocrdma_flush_qp(qp); 1732 + break; 1733 + default: 1734 + status = -EINVAL; 1735 + break; 1736 + }; 1737 + break; 1738 + case OCRDMA_QPS_RTR: 1739 + /* qps: RTS->XXX */ 1740 + switch (new_state) { 1741 + case OCRDMA_QPS_RTS: 1742 + break; 1743 + case OCRDMA_QPS_ERR: 1744 + ocrdma_flush_qp(qp); 1745 + break; 1746 + default: 1747 + status = -EINVAL; 1748 + break; 1749 + }; 1750 + break; 1751 + case OCRDMA_QPS_RTS: 1752 + /* qps: RTS->XXX */ 1753 + switch (new_state) { 1754 + case OCRDMA_QPS_SQD: 1755 + case OCRDMA_QPS_SQE: 1756 + break; 1757 + case OCRDMA_QPS_ERR: 1758 + ocrdma_flush_qp(qp); 1759 + break; 1760 + default: 1761 + status = -EINVAL; 1762 + break; 1763 + }; 1764 + break; 1765 + case OCRDMA_QPS_SQD: 1766 + /* qps: SQD->XXX */ 1767 + switch (new_state) { 1768 + case OCRDMA_QPS_RTS: 1769 + case OCRDMA_QPS_SQE: 1770 + case OCRDMA_QPS_ERR: 1771 + break; 1772 + default: 1773 + status = -EINVAL; 1774 + break; 1775 + }; 1776 + break; 1777 + case OCRDMA_QPS_SQE: 1778 + switch (new_state) { 1779 + case OCRDMA_QPS_RTS: 1780 + case OCRDMA_QPS_ERR: 1781 + break; 1782 + default: 1783 + status = -EINVAL; 1784 + break; 1785 + }; 1786 + break; 1787 + case OCRDMA_QPS_ERR: 1788 + /* qps: ERR->XXX */ 1789 + switch (new_state) { 1790 + case OCRDMA_QPS_RST: 1791 + break; 1792 + default: 1793 + status = -EINVAL; 1794 + break; 1795 + }; 1796 + break; 1797 + default: 1798 + status = -EINVAL; 1799 + break; 1800 + }; 1801 + if (!status) 1802 + qp->state = new_state; 1803 + 1804 + spin_unlock_irqrestore(&qp->q_lock, flags); 1805 + return status; 1806 + } 1807 + 1808 + static u32 ocrdma_set_create_qp_mbx_access_flags(struct ocrdma_qp *qp) 1809 + { 1810 + u32 flags = 0; 1811 + if (qp->cap_flags & OCRDMA_QP_INB_RD) 1812 + flags |= OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK; 1813 + if (qp->cap_flags & OCRDMA_QP_INB_WR) 1814 + flags |= OCRDMA_CREATE_QP_REQ_INB_WREN_MASK; 1815 + if (qp->cap_flags & OCRDMA_QP_MW_BIND) 1816 + flags |= OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK; 1817 + if (qp->cap_flags & OCRDMA_QP_LKEY0) 1818 + flags |= OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK; 1819 + if (qp->cap_flags & OCRDMA_QP_FAST_REG) 1820 + flags |= OCRDMA_CREATE_QP_REQ_FMR_EN_MASK; 1821 + return flags; 1822 + } 1823 + 1824 + static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd, 1825 + struct ib_qp_init_attr *attrs, 1826 + struct ocrdma_qp *qp) 1827 + { 1828 + int status; 1829 + u32 len, hw_pages, hw_page_size; 1830 + dma_addr_t pa; 1831 + struct ocrdma_dev *dev = qp->dev; 1832 + struct pci_dev *pdev = dev->nic_info.pdev; 1833 + u32 max_wqe_allocated; 1834 + u32 max_sges = attrs->cap.max_send_sge; 1835 + 1836 + max_wqe_allocated = attrs->cap.max_send_wr; 1837 + /* need to allocate one extra to for GEN1 family */ 1838 + if (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY) 1839 + max_wqe_allocated += 1; 1840 + 1841 + status = ocrdma_build_q_conf(&max_wqe_allocated, 1842 + dev->attr.wqe_size, &hw_pages, &hw_page_size); 1843 + if (status) { 1844 + ocrdma_err("%s() req. max_send_wr=0x%x\n", __func__, 1845 + max_wqe_allocated); 1846 + return -EINVAL; 1847 + } 1848 + qp->sq.max_cnt = max_wqe_allocated; 1849 + len = (hw_pages * hw_page_size); 1850 + 1851 + qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); 1852 + if (!qp->sq.va) 1853 + return -EINVAL; 1854 + memset(qp->sq.va, 0, len); 1855 + qp->sq.len = len; 1856 + qp->sq.pa = pa; 1857 + qp->sq.entry_size = dev->attr.wqe_size; 1858 + ocrdma_build_q_pages(&cmd->wq_addr[0], hw_pages, pa, hw_page_size); 1859 + 1860 + cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE) 1861 + << OCRDMA_CREATE_QP_REQ_SQ_PAGE_SIZE_SHIFT); 1862 + cmd->num_wq_rq_pages |= (hw_pages << 1863 + OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT) & 1864 + OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_MASK; 1865 + cmd->max_sge_send_write |= (max_sges << 1866 + OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT) & 1867 + OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_MASK; 1868 + cmd->max_sge_send_write |= (max_sges << 1869 + OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_SHIFT) & 1870 + OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_MASK; 1871 + cmd->max_wqe_rqe |= (ilog2(qp->sq.max_cnt) << 1872 + OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT) & 1873 + OCRDMA_CREATE_QP_REQ_MAX_WQE_MASK; 1874 + cmd->wqe_rqe_size |= (dev->attr.wqe_size << 1875 + OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT) & 1876 + OCRDMA_CREATE_QP_REQ_WQE_SIZE_MASK; 1877 + return 0; 1878 + } 1879 + 1880 + static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd, 1881 + struct ib_qp_init_attr *attrs, 1882 + struct ocrdma_qp *qp) 1883 + { 1884 + int status; 1885 + u32 len, hw_pages, hw_page_size; 1886 + dma_addr_t pa = 0; 1887 + struct ocrdma_dev *dev = qp->dev; 1888 + struct pci_dev *pdev = dev->nic_info.pdev; 1889 + u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1; 1890 + 1891 + status = ocrdma_build_q_conf(&max_rqe_allocated, dev->attr.rqe_size, 1892 + &hw_pages, &hw_page_size); 1893 + if (status) { 1894 + ocrdma_err("%s() req. max_recv_wr=0x%x\n", __func__, 1895 + attrs->cap.max_recv_wr + 1); 1896 + return status; 1897 + } 1898 + qp->rq.max_cnt = max_rqe_allocated; 1899 + len = (hw_pages * hw_page_size); 1900 + 1901 + qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); 1902 + if (!qp->rq.va) 1903 + return status; 1904 + memset(qp->rq.va, 0, len); 1905 + qp->rq.pa = pa; 1906 + qp->rq.len = len; 1907 + qp->rq.entry_size = dev->attr.rqe_size; 1908 + 1909 + ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size); 1910 + cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE) << 1911 + OCRDMA_CREATE_QP_REQ_RQ_PAGE_SIZE_SHIFT); 1912 + cmd->num_wq_rq_pages |= 1913 + (hw_pages << OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_SHIFT) & 1914 + OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_MASK; 1915 + cmd->max_sge_recv_flags |= (attrs->cap.max_recv_sge << 1916 + OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT) & 1917 + OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_MASK; 1918 + cmd->max_wqe_rqe |= (ilog2(qp->rq.max_cnt) << 1919 + OCRDMA_CREATE_QP_REQ_MAX_RQE_SHIFT) & 1920 + OCRDMA_CREATE_QP_REQ_MAX_RQE_MASK; 1921 + cmd->wqe_rqe_size |= (dev->attr.rqe_size << 1922 + OCRDMA_CREATE_QP_REQ_RQE_SIZE_SHIFT) & 1923 + OCRDMA_CREATE_QP_REQ_RQE_SIZE_MASK; 1924 + return 0; 1925 + } 1926 + 1927 + static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd, 1928 + struct ocrdma_pd *pd, 1929 + struct ocrdma_qp *qp, 1930 + u8 enable_dpp_cq, u16 dpp_cq_id) 1931 + { 1932 + pd->num_dpp_qp--; 1933 + qp->dpp_enabled = true; 1934 + cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK; 1935 + if (!enable_dpp_cq) 1936 + return; 1937 + cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK; 1938 + cmd->dpp_credits_cqid = dpp_cq_id; 1939 + cmd->dpp_credits_cqid |= OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT << 1940 + OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT; 1941 + } 1942 + 1943 + static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd, 1944 + struct ocrdma_qp *qp) 1945 + { 1946 + struct ocrdma_dev *dev = qp->dev; 1947 + struct pci_dev *pdev = dev->nic_info.pdev; 1948 + dma_addr_t pa = 0; 1949 + int ird_page_size = dev->attr.ird_page_size; 1950 + int ird_q_len = dev->attr.num_ird_pages * ird_page_size; 1951 + 1952 + if (dev->attr.ird == 0) 1953 + return 0; 1954 + 1955 + qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len, 1956 + &pa, GFP_KERNEL); 1957 + if (!qp->ird_q_va) 1958 + return -ENOMEM; 1959 + memset(qp->ird_q_va, 0, ird_q_len); 1960 + ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages, 1961 + pa, ird_page_size); 1962 + return 0; 1963 + } 1964 + 1965 + static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp, 1966 + struct ocrdma_qp *qp, 1967 + struct ib_qp_init_attr *attrs, 1968 + u16 *dpp_offset, u16 *dpp_credit_lmt) 1969 + { 1970 + u32 max_wqe_allocated, max_rqe_allocated; 1971 + qp->id = rsp->qp_id & OCRDMA_CREATE_QP_RSP_QP_ID_MASK; 1972 + qp->rq.dbid = rsp->sq_rq_id & OCRDMA_CREATE_QP_RSP_RQ_ID_MASK; 1973 + qp->sq.dbid = rsp->sq_rq_id >> OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT; 1974 + qp->max_ird = rsp->max_ord_ird & OCRDMA_CREATE_QP_RSP_MAX_IRD_MASK; 1975 + qp->max_ord = (rsp->max_ord_ird >> OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT); 1976 + qp->dpp_enabled = false; 1977 + if (rsp->dpp_response & OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK) { 1978 + qp->dpp_enabled = true; 1979 + *dpp_credit_lmt = (rsp->dpp_response & 1980 + OCRDMA_CREATE_QP_RSP_DPP_CREDITS_MASK) >> 1981 + OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT; 1982 + *dpp_offset = (rsp->dpp_response & 1983 + OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_MASK) >> 1984 + OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT; 1985 + } 1986 + max_wqe_allocated = 1987 + rsp->max_wqe_rqe >> OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT; 1988 + max_wqe_allocated = 1 << max_wqe_allocated; 1989 + max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe); 1990 + 1991 + if (qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { 1992 + qp->sq.free_delta = 0; 1993 + qp->rq.free_delta = 1; 1994 + } else 1995 + qp->sq.free_delta = 1; 1996 + 1997 + qp->sq.max_cnt = max_wqe_allocated; 1998 + qp->sq.max_wqe_idx = max_wqe_allocated - 1; 1999 + 2000 + if (!attrs->srq) { 2001 + qp->rq.max_cnt = max_rqe_allocated; 2002 + qp->rq.max_wqe_idx = max_rqe_allocated - 1; 2003 + qp->rq.free_delta = 1; 2004 + } 2005 + } 2006 + 2007 + int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs, 2008 + u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset, 2009 + u16 *dpp_credit_lmt) 2010 + { 2011 + int status = -ENOMEM; 2012 + u32 flags = 0; 2013 + struct ocrdma_dev *dev = qp->dev; 2014 + struct ocrdma_pd *pd = qp->pd; 2015 + struct pci_dev *pdev = dev->nic_info.pdev; 2016 + struct ocrdma_cq *cq; 2017 + struct ocrdma_create_qp_req *cmd; 2018 + struct ocrdma_create_qp_rsp *rsp; 2019 + int qptype; 2020 + 2021 + switch (attrs->qp_type) { 2022 + case IB_QPT_GSI: 2023 + qptype = OCRDMA_QPT_GSI; 2024 + break; 2025 + case IB_QPT_RC: 2026 + qptype = OCRDMA_QPT_RC; 2027 + break; 2028 + case IB_QPT_UD: 2029 + qptype = OCRDMA_QPT_UD; 2030 + break; 2031 + default: 2032 + return -EINVAL; 2033 + }; 2034 + 2035 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd)); 2036 + if (!cmd) 2037 + return status; 2038 + cmd->type_pgsz_pdn |= (qptype << OCRDMA_CREATE_QP_REQ_QPT_SHIFT) & 2039 + OCRDMA_CREATE_QP_REQ_QPT_MASK; 2040 + status = ocrdma_set_create_qp_sq_cmd(cmd, attrs, qp); 2041 + if (status) 2042 + goto sq_err; 2043 + 2044 + if (attrs->srq) { 2045 + struct ocrdma_srq *srq = get_ocrdma_srq(attrs->srq); 2046 + cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK; 2047 + cmd->rq_addr[0].lo = srq->id; 2048 + qp->srq = srq; 2049 + } else { 2050 + status = ocrdma_set_create_qp_rq_cmd(cmd, attrs, qp); 2051 + if (status) 2052 + goto rq_err; 2053 + } 2054 + 2055 + status = ocrdma_set_create_qp_ird_cmd(cmd, qp); 2056 + if (status) 2057 + goto mbx_err; 2058 + 2059 + cmd->type_pgsz_pdn |= (pd->id << OCRDMA_CREATE_QP_REQ_PD_ID_SHIFT) & 2060 + OCRDMA_CREATE_QP_REQ_PD_ID_MASK; 2061 + 2062 + flags = ocrdma_set_create_qp_mbx_access_flags(qp); 2063 + 2064 + cmd->max_sge_recv_flags |= flags; 2065 + cmd->max_ord_ird |= (dev->attr.max_ord_per_qp << 2066 + OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT) & 2067 + OCRDMA_CREATE_QP_REQ_MAX_ORD_MASK; 2068 + cmd->max_ord_ird |= (dev->attr.max_ird_per_qp << 2069 + OCRDMA_CREATE_QP_REQ_MAX_IRD_SHIFT) & 2070 + OCRDMA_CREATE_QP_REQ_MAX_IRD_MASK; 2071 + cq = get_ocrdma_cq(attrs->send_cq); 2072 + cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT) & 2073 + OCRDMA_CREATE_QP_REQ_WQ_CQID_MASK; 2074 + qp->sq_cq = cq; 2075 + cq = get_ocrdma_cq(attrs->recv_cq); 2076 + cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_RQ_CQID_SHIFT) & 2077 + OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK; 2078 + qp->rq_cq = cq; 2079 + 2080 + if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp && 2081 + (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) 2082 + ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq, 2083 + dpp_cq_id); 2084 + 2085 + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 2086 + if (status) 2087 + goto mbx_err; 2088 + rsp = (struct ocrdma_create_qp_rsp *)cmd; 2089 + ocrdma_get_create_qp_rsp(rsp, qp, attrs, dpp_offset, dpp_credit_lmt); 2090 + qp->state = OCRDMA_QPS_RST; 2091 + kfree(cmd); 2092 + return 0; 2093 + mbx_err: 2094 + if (qp->rq.va) 2095 + dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa); 2096 + rq_err: 2097 + ocrdma_err("%s(%d) rq_err\n", __func__, dev->id); 2098 + dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa); 2099 + sq_err: 2100 + ocrdma_err("%s(%d) sq_err\n", __func__, dev->id); 2101 + kfree(cmd); 2102 + return status; 2103 + } 2104 + 2105 + int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp, 2106 + struct ocrdma_qp_params *param) 2107 + { 2108 + int status = -ENOMEM; 2109 + struct ocrdma_query_qp *cmd; 2110 + struct ocrdma_query_qp_rsp *rsp; 2111 + 2112 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*cmd)); 2113 + if (!cmd) 2114 + return status; 2115 + cmd->qp_id = qp->id; 2116 + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 2117 + if (status) 2118 + goto mbx_err; 2119 + rsp = (struct ocrdma_query_qp_rsp *)cmd; 2120 + memcpy(param, &rsp->params, sizeof(struct ocrdma_qp_params)); 2121 + mbx_err: 2122 + kfree(cmd); 2123 + return status; 2124 + } 2125 + 2126 + int ocrdma_resolve_dgid(struct ocrdma_dev *dev, union ib_gid *dgid, 2127 + u8 *mac_addr) 2128 + { 2129 + struct in6_addr in6; 2130 + 2131 + memcpy(&in6, dgid, sizeof in6); 2132 + if (rdma_is_multicast_addr(&in6)) 2133 + rdma_get_mcast_mac(&in6, mac_addr); 2134 + else if (rdma_link_local_addr(&in6)) 2135 + rdma_get_ll_mac(&in6, mac_addr); 2136 + else { 2137 + ocrdma_err("%s() fail to resolve mac_addr.\n", __func__); 2138 + return -EINVAL; 2139 + } 2140 + return 0; 2141 + } 2142 + 2143 + static void ocrdma_set_av_params(struct ocrdma_qp *qp, 2144 + struct ocrdma_modify_qp *cmd, 2145 + struct ib_qp_attr *attrs) 2146 + { 2147 + struct ib_ah_attr *ah_attr = &attrs->ah_attr; 2148 + union ib_gid sgid; 2149 + u32 vlan_id; 2150 + u8 mac_addr[6]; 2151 + if ((ah_attr->ah_flags & IB_AH_GRH) == 0) 2152 + return; 2153 + cmd->params.tclass_sq_psn |= 2154 + (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT); 2155 + cmd->params.rnt_rc_sl_fl |= 2156 + (ah_attr->grh.flow_label & OCRDMA_QP_PARAMS_FLOW_LABEL_MASK); 2157 + cmd->params.hop_lmt_rq_psn |= 2158 + (ah_attr->grh.hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT); 2159 + cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID; 2160 + memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0], 2161 + sizeof(cmd->params.dgid)); 2162 + ocrdma_query_gid(&qp->dev->ibdev, 1, 2163 + ah_attr->grh.sgid_index, &sgid); 2164 + qp->sgid_idx = ah_attr->grh.sgid_index; 2165 + memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid)); 2166 + ocrdma_resolve_dgid(qp->dev, &ah_attr->grh.dgid, &mac_addr[0]); 2167 + cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) | 2168 + (mac_addr[2] << 16) | (mac_addr[3] << 24); 2169 + /* convert them to LE format. */ 2170 + ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid)); 2171 + ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid)); 2172 + cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8); 2173 + vlan_id = rdma_get_vlan_id(&sgid); 2174 + if (vlan_id && (vlan_id < 0x1000)) { 2175 + cmd->params.vlan_dmac_b4_to_b5 |= 2176 + vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; 2177 + cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID; 2178 + } 2179 + } 2180 + 2181 + static int ocrdma_set_qp_params(struct ocrdma_qp *qp, 2182 + struct ocrdma_modify_qp *cmd, 2183 + struct ib_qp_attr *attrs, int attr_mask, 2184 + enum ib_qp_state old_qps) 2185 + { 2186 + int status = 0; 2187 + struct net_device *netdev = qp->dev->nic_info.netdev; 2188 + int eth_mtu = iboe_get_mtu(netdev->mtu); 2189 + 2190 + if (attr_mask & IB_QP_PKEY_INDEX) { 2191 + cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index & 2192 + OCRDMA_QP_PARAMS_PKEY_INDEX_MASK); 2193 + cmd->flags |= OCRDMA_QP_PARA_PKEY_VALID; 2194 + } 2195 + if (attr_mask & IB_QP_QKEY) { 2196 + qp->qkey = attrs->qkey; 2197 + cmd->params.qkey = attrs->qkey; 2198 + cmd->flags |= OCRDMA_QP_PARA_QKEY_VALID; 2199 + } 2200 + if (attr_mask & IB_QP_AV) 2201 + ocrdma_set_av_params(qp, cmd, attrs); 2202 + else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) { 2203 + /* set the default mac address for UD, GSI QPs */ 2204 + cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] | 2205 + (qp->dev->nic_info.mac_addr[1] << 8) | 2206 + (qp->dev->nic_info.mac_addr[2] << 16) | 2207 + (qp->dev->nic_info.mac_addr[3] << 24); 2208 + cmd->params.vlan_dmac_b4_to_b5 = qp->dev->nic_info.mac_addr[4] | 2209 + (qp->dev->nic_info.mac_addr[5] << 8); 2210 + } 2211 + if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) && 2212 + attrs->en_sqd_async_notify) { 2213 + cmd->params.max_sge_recv_flags |= 2214 + OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC; 2215 + cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID; 2216 + } 2217 + if (attr_mask & IB_QP_DEST_QPN) { 2218 + cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->dest_qp_num & 2219 + OCRDMA_QP_PARAMS_DEST_QPN_MASK); 2220 + cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID; 2221 + } 2222 + if (attr_mask & IB_QP_PATH_MTU) { 2223 + if (ib_mtu_enum_to_int(eth_mtu) < 2224 + ib_mtu_enum_to_int(attrs->path_mtu)) { 2225 + status = -EINVAL; 2226 + goto pmtu_err; 2227 + } 2228 + cmd->params.path_mtu_pkey_indx |= 2229 + (ib_mtu_enum_to_int(attrs->path_mtu) << 2230 + OCRDMA_QP_PARAMS_PATH_MTU_SHIFT) & 2231 + OCRDMA_QP_PARAMS_PATH_MTU_MASK; 2232 + cmd->flags |= OCRDMA_QP_PARA_PMTU_VALID; 2233 + } 2234 + if (attr_mask & IB_QP_TIMEOUT) { 2235 + cmd->params.ack_to_rnr_rtc_dest_qpn |= attrs->timeout << 2236 + OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT; 2237 + cmd->flags |= OCRDMA_QP_PARA_ACK_TO_VALID; 2238 + } 2239 + if (attr_mask & IB_QP_RETRY_CNT) { 2240 + cmd->params.rnt_rc_sl_fl |= (attrs->retry_cnt << 2241 + OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT) & 2242 + OCRDMA_QP_PARAMS_RETRY_CNT_MASK; 2243 + cmd->flags |= OCRDMA_QP_PARA_RETRY_CNT_VALID; 2244 + } 2245 + if (attr_mask & IB_QP_MIN_RNR_TIMER) { 2246 + cmd->params.rnt_rc_sl_fl |= (attrs->min_rnr_timer << 2247 + OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT) & 2248 + OCRDMA_QP_PARAMS_RNR_NAK_TIMER_MASK; 2249 + cmd->flags |= OCRDMA_QP_PARA_RNT_VALID; 2250 + } 2251 + if (attr_mask & IB_QP_RNR_RETRY) { 2252 + cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->rnr_retry << 2253 + OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT) 2254 + & OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK; 2255 + cmd->flags |= OCRDMA_QP_PARA_RRC_VALID; 2256 + } 2257 + if (attr_mask & IB_QP_SQ_PSN) { 2258 + cmd->params.tclass_sq_psn |= (attrs->sq_psn & 0x00ffffff); 2259 + cmd->flags |= OCRDMA_QP_PARA_SQPSN_VALID; 2260 + } 2261 + if (attr_mask & IB_QP_RQ_PSN) { 2262 + cmd->params.hop_lmt_rq_psn |= (attrs->rq_psn & 0x00ffffff); 2263 + cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID; 2264 + } 2265 + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 2266 + if (attrs->max_rd_atomic > qp->dev->attr.max_ord_per_qp) { 2267 + status = -EINVAL; 2268 + goto pmtu_err; 2269 + } 2270 + qp->max_ord = attrs->max_rd_atomic; 2271 + cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID; 2272 + } 2273 + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 2274 + if (attrs->max_dest_rd_atomic > qp->dev->attr.max_ird_per_qp) { 2275 + status = -EINVAL; 2276 + goto pmtu_err; 2277 + } 2278 + qp->max_ird = attrs->max_dest_rd_atomic; 2279 + cmd->flags |= OCRDMA_QP_PARA_MAX_IRD_VALID; 2280 + } 2281 + cmd->params.max_ord_ird = (qp->max_ord << 2282 + OCRDMA_QP_PARAMS_MAX_ORD_SHIFT) | 2283 + (qp->max_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK); 2284 + pmtu_err: 2285 + return status; 2286 + } 2287 + 2288 + int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp, 2289 + struct ib_qp_attr *attrs, int attr_mask, 2290 + enum ib_qp_state old_qps) 2291 + { 2292 + int status = -ENOMEM; 2293 + struct ocrdma_modify_qp *cmd; 2294 + struct ocrdma_modify_qp_rsp *rsp; 2295 + 2296 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_QP, sizeof(*cmd)); 2297 + if (!cmd) 2298 + return status; 2299 + 2300 + cmd->params.id = qp->id; 2301 + cmd->flags = 0; 2302 + if (attr_mask & IB_QP_STATE) { 2303 + cmd->params.max_sge_recv_flags |= 2304 + (get_ocrdma_qp_state(attrs->qp_state) << 2305 + OCRDMA_QP_PARAMS_STATE_SHIFT) & 2306 + OCRDMA_QP_PARAMS_STATE_MASK; 2307 + cmd->flags |= OCRDMA_QP_PARA_QPS_VALID; 2308 + } else 2309 + cmd->params.max_sge_recv_flags |= 2310 + (qp->state << OCRDMA_QP_PARAMS_STATE_SHIFT) & 2311 + OCRDMA_QP_PARAMS_STATE_MASK; 2312 + status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask, old_qps); 2313 + if (status) 2314 + goto mbx_err; 2315 + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 2316 + if (status) 2317 + goto mbx_err; 2318 + rsp = (struct ocrdma_modify_qp_rsp *)cmd; 2319 + mbx_err: 2320 + kfree(cmd); 2321 + return status; 2322 + } 2323 + 2324 + int ocrdma_mbx_destroy_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp) 2325 + { 2326 + int status = -ENOMEM; 2327 + struct ocrdma_destroy_qp *cmd; 2328 + struct ocrdma_destroy_qp_rsp *rsp; 2329 + struct pci_dev *pdev = dev->nic_info.pdev; 2330 + 2331 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_QP, sizeof(*cmd)); 2332 + if (!cmd) 2333 + return status; 2334 + cmd->qp_id = qp->id; 2335 + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 2336 + if (status) 2337 + goto mbx_err; 2338 + rsp = (struct ocrdma_destroy_qp_rsp *)cmd; 2339 + mbx_err: 2340 + kfree(cmd); 2341 + if (qp->sq.va) 2342 + dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa); 2343 + if (!qp->srq && qp->rq.va) 2344 + dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa); 2345 + if (qp->dpp_enabled) 2346 + qp->pd->num_dpp_qp++; 2347 + return status; 2348 + } 2349 + 2350 + int ocrdma_mbx_create_srq(struct ocrdma_srq *srq, 2351 + struct ib_srq_init_attr *srq_attr, 2352 + struct ocrdma_pd *pd) 2353 + { 2354 + int status = -ENOMEM; 2355 + int hw_pages, hw_page_size; 2356 + int len; 2357 + struct ocrdma_create_srq_rsp *rsp; 2358 + struct ocrdma_create_srq *cmd; 2359 + dma_addr_t pa; 2360 + struct ocrdma_dev *dev = srq->dev; 2361 + struct pci_dev *pdev = dev->nic_info.pdev; 2362 + u32 max_rqe_allocated; 2363 + 2364 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd)); 2365 + if (!cmd) 2366 + return status; 2367 + 2368 + cmd->pgsz_pdid = pd->id & OCRDMA_CREATE_SRQ_PD_ID_MASK; 2369 + max_rqe_allocated = srq_attr->attr.max_wr + 1; 2370 + status = ocrdma_build_q_conf(&max_rqe_allocated, 2371 + dev->attr.rqe_size, 2372 + &hw_pages, &hw_page_size); 2373 + if (status) { 2374 + ocrdma_err("%s() req. max_wr=0x%x\n", __func__, 2375 + srq_attr->attr.max_wr); 2376 + status = -EINVAL; 2377 + goto ret; 2378 + } 2379 + len = hw_pages * hw_page_size; 2380 + srq->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); 2381 + if (!srq->rq.va) { 2382 + status = -ENOMEM; 2383 + goto ret; 2384 + } 2385 + ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size); 2386 + 2387 + srq->rq.entry_size = dev->attr.rqe_size; 2388 + srq->rq.pa = pa; 2389 + srq->rq.len = len; 2390 + srq->rq.max_cnt = max_rqe_allocated; 2391 + 2392 + cmd->max_sge_rqe = ilog2(max_rqe_allocated); 2393 + cmd->max_sge_rqe |= srq_attr->attr.max_sge << 2394 + OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT; 2395 + 2396 + cmd->pgsz_pdid |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE) 2397 + << OCRDMA_CREATE_SRQ_PG_SZ_SHIFT); 2398 + cmd->pages_rqe_sz |= (dev->attr.rqe_size 2399 + << OCRDMA_CREATE_SRQ_RQE_SIZE_SHIFT) 2400 + & OCRDMA_CREATE_SRQ_RQE_SIZE_MASK; 2401 + cmd->pages_rqe_sz |= hw_pages << OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT; 2402 + 2403 + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 2404 + if (status) 2405 + goto mbx_err; 2406 + rsp = (struct ocrdma_create_srq_rsp *)cmd; 2407 + srq->id = rsp->id; 2408 + srq->rq.dbid = rsp->id; 2409 + max_rqe_allocated = ((rsp->max_sge_rqe_allocated & 2410 + OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_MASK) >> 2411 + OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_SHIFT); 2412 + max_rqe_allocated = (1 << max_rqe_allocated); 2413 + srq->rq.max_cnt = max_rqe_allocated; 2414 + srq->rq.max_wqe_idx = max_rqe_allocated - 1; 2415 + srq->rq.max_sges = (rsp->max_sge_rqe_allocated & 2416 + OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_MASK) >> 2417 + OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT; 2418 + goto ret; 2419 + mbx_err: 2420 + dma_free_coherent(&pdev->dev, srq->rq.len, srq->rq.va, pa); 2421 + ret: 2422 + kfree(cmd); 2423 + return status; 2424 + } 2425 + 2426 + int ocrdma_mbx_modify_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr) 2427 + { 2428 + int status = -ENOMEM; 2429 + struct ocrdma_modify_srq *cmd; 2430 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd)); 2431 + if (!cmd) 2432 + return status; 2433 + cmd->id = srq->id; 2434 + cmd->limit_max_rqe |= srq_attr->srq_limit << 2435 + OCRDMA_MODIFY_SRQ_LIMIT_SHIFT; 2436 + status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd); 2437 + kfree(cmd); 2438 + return status; 2439 + } 2440 + 2441 + int ocrdma_mbx_query_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr) 2442 + { 2443 + int status = -ENOMEM; 2444 + struct ocrdma_query_srq *cmd; 2445 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd)); 2446 + if (!cmd) 2447 + return status; 2448 + cmd->id = srq->rq.dbid; 2449 + status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd); 2450 + if (status == 0) { 2451 + struct ocrdma_query_srq_rsp *rsp = 2452 + (struct ocrdma_query_srq_rsp *)cmd; 2453 + srq_attr->max_sge = 2454 + rsp->srq_lmt_max_sge & 2455 + OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_MASK; 2456 + srq_attr->max_wr = 2457 + rsp->max_rqe_pdid >> OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT; 2458 + srq_attr->srq_limit = rsp->srq_lmt_max_sge >> 2459 + OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT; 2460 + } 2461 + kfree(cmd); 2462 + return status; 2463 + } 2464 + 2465 + int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq) 2466 + { 2467 + int status = -ENOMEM; 2468 + struct ocrdma_destroy_srq *cmd; 2469 + struct pci_dev *pdev = dev->nic_info.pdev; 2470 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_SRQ, sizeof(*cmd)); 2471 + if (!cmd) 2472 + return status; 2473 + cmd->id = srq->id; 2474 + status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd); 2475 + if (srq->rq.va) 2476 + dma_free_coherent(&pdev->dev, srq->rq.len, 2477 + srq->rq.va, srq->rq.pa); 2478 + kfree(cmd); 2479 + return status; 2480 + } 2481 + 2482 + int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah) 2483 + { 2484 + int i; 2485 + int status = -EINVAL; 2486 + struct ocrdma_av *av; 2487 + unsigned long flags; 2488 + 2489 + av = dev->av_tbl.va; 2490 + spin_lock_irqsave(&dev->av_tbl.lock, flags); 2491 + for (i = 0; i < dev->av_tbl.num_ah; i++) { 2492 + if (av->valid == 0) { 2493 + av->valid = OCRDMA_AV_VALID; 2494 + ah->av = av; 2495 + ah->id = i; 2496 + status = 0; 2497 + break; 2498 + } 2499 + av++; 2500 + } 2501 + if (i == dev->av_tbl.num_ah) 2502 + status = -EAGAIN; 2503 + spin_unlock_irqrestore(&dev->av_tbl.lock, flags); 2504 + return status; 2505 + } 2506 + 2507 + int ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah) 2508 + { 2509 + unsigned long flags; 2510 + spin_lock_irqsave(&dev->av_tbl.lock, flags); 2511 + ah->av->valid = 0; 2512 + spin_unlock_irqrestore(&dev->av_tbl.lock, flags); 2513 + return 0; 2514 + } 2515 + 2516 + static int ocrdma_create_mq_eq(struct ocrdma_dev *dev) 2517 + { 2518 + int status; 2519 + int irq; 2520 + unsigned long flags = 0; 2521 + int num_eq = 0; 2522 + 2523 + if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) 2524 + flags = IRQF_SHARED; 2525 + else { 2526 + num_eq = dev->nic_info.msix.num_vectors - 2527 + dev->nic_info.msix.start_vector; 2528 + /* minimum two vectors/eq are required for rdma to work. 2529 + * one for control path and one for data path. 2530 + */ 2531 + if (num_eq < 2) 2532 + return -EBUSY; 2533 + } 2534 + 2535 + status = ocrdma_create_eq(dev, &dev->meq, OCRDMA_EQ_LEN); 2536 + if (status) 2537 + return status; 2538 + sprintf(dev->meq.irq_name, "ocrdma_mq%d", dev->id); 2539 + irq = ocrdma_get_irq(dev, &dev->meq); 2540 + status = request_irq(irq, ocrdma_irq_handler, flags, dev->meq.irq_name, 2541 + &dev->meq); 2542 + if (status) 2543 + _ocrdma_destroy_eq(dev, &dev->meq); 2544 + return status; 2545 + } 2546 + 2547 + static int ocrdma_create_qp_eqs(struct ocrdma_dev *dev) 2548 + { 2549 + int num_eq, i, status; 2550 + int irq; 2551 + unsigned long flags = 0; 2552 + 2553 + num_eq = dev->nic_info.msix.num_vectors - 2554 + dev->nic_info.msix.start_vector; 2555 + if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) { 2556 + num_eq = 1; 2557 + flags = IRQF_SHARED; 2558 + } else 2559 + num_eq = min_t(u32, num_eq, num_online_cpus()); 2560 + dev->qp_eq_tbl = kzalloc(sizeof(struct ocrdma_eq) * num_eq, GFP_KERNEL); 2561 + if (!dev->qp_eq_tbl) 2562 + return -ENOMEM; 2563 + 2564 + for (i = 0; i < num_eq; i++) { 2565 + status = ocrdma_create_eq(dev, &dev->qp_eq_tbl[i], 2566 + OCRDMA_EQ_LEN); 2567 + if (status) { 2568 + status = -EINVAL; 2569 + break; 2570 + } 2571 + sprintf(dev->qp_eq_tbl[i].irq_name, "ocrdma_qp%d-%d", 2572 + dev->id, i); 2573 + irq = ocrdma_get_irq(dev, &dev->qp_eq_tbl[i]); 2574 + status = request_irq(irq, ocrdma_irq_handler, flags, 2575 + dev->qp_eq_tbl[i].irq_name, 2576 + &dev->qp_eq_tbl[i]); 2577 + if (status) { 2578 + _ocrdma_destroy_eq(dev, &dev->qp_eq_tbl[i]); 2579 + status = -EINVAL; 2580 + break; 2581 + } 2582 + dev->eq_cnt += 1; 2583 + } 2584 + /* one eq is sufficient for data path to work */ 2585 + if (dev->eq_cnt >= 1) 2586 + return 0; 2587 + if (status) 2588 + ocrdma_destroy_qp_eqs(dev); 2589 + return status; 2590 + } 2591 + 2592 + int ocrdma_init_hw(struct ocrdma_dev *dev) 2593 + { 2594 + int status; 2595 + /* set up control path eq */ 2596 + status = ocrdma_create_mq_eq(dev); 2597 + if (status) 2598 + return status; 2599 + /* set up data path eq */ 2600 + status = ocrdma_create_qp_eqs(dev); 2601 + if (status) 2602 + goto qpeq_err; 2603 + status = ocrdma_create_mq(dev); 2604 + if (status) 2605 + goto mq_err; 2606 + status = ocrdma_mbx_query_fw_config(dev); 2607 + if (status) 2608 + goto conf_err; 2609 + status = ocrdma_mbx_query_dev(dev); 2610 + if (status) 2611 + goto conf_err; 2612 + status = ocrdma_mbx_query_fw_ver(dev); 2613 + if (status) 2614 + goto conf_err; 2615 + status = ocrdma_mbx_create_ah_tbl(dev); 2616 + if (status) 2617 + goto conf_err; 2618 + return 0; 2619 + 2620 + conf_err: 2621 + ocrdma_destroy_mq(dev); 2622 + mq_err: 2623 + ocrdma_destroy_qp_eqs(dev); 2624 + qpeq_err: 2625 + ocrdma_destroy_eq(dev, &dev->meq); 2626 + ocrdma_err("%s() status=%d\n", __func__, status); 2627 + return status; 2628 + } 2629 + 2630 + void ocrdma_cleanup_hw(struct ocrdma_dev *dev) 2631 + { 2632 + ocrdma_mbx_delete_ah_tbl(dev); 2633 + 2634 + /* cleanup the data path eqs */ 2635 + ocrdma_destroy_qp_eqs(dev); 2636 + 2637 + /* cleanup the control path */ 2638 + ocrdma_destroy_mq(dev); 2639 + ocrdma_destroy_eq(dev, &dev->meq); 2640 + }
+132
drivers/infiniband/hw/ocrdma/ocrdma_hw.h
··· 1 + /******************************************************************* 2 + * This file is part of the Emulex RoCE Device Driver for * 3 + * RoCE (RDMA over Converged Ethernet) CNA Adapters. * 4 + * Copyright (C) 2008-2012 Emulex. All rights reserved. * 5 + * EMULEX and SLI are trademarks of Emulex. * 6 + * www.emulex.com * 7 + * * 8 + * This program is free software; you can redistribute it and/or * 9 + * modify it under the terms of version 2 of the GNU General * 10 + * Public License as published by the Free Software Foundation. * 11 + * This program is distributed in the hope that it will be useful. * 12 + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 + * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 + * more details, a copy of which can be found in the file COPYING * 18 + * included with this package. * 19 + * 20 + * Contact Information: 21 + * linux-drivers@emulex.com 22 + * 23 + * Emulex 24 + * 3333 Susan Street 25 + * Costa Mesa, CA 92626 26 + *******************************************************************/ 27 + 28 + #ifndef __OCRDMA_HW_H__ 29 + #define __OCRDMA_HW_H__ 30 + 31 + #include "ocrdma_sli.h" 32 + 33 + static inline void ocrdma_cpu_to_le32(void *dst, u32 len) 34 + { 35 + #ifdef __BIG_ENDIAN 36 + int i = 0; 37 + u32 *src_ptr = dst; 38 + u32 *dst_ptr = dst; 39 + for (; i < (len / 4); i++) 40 + *(dst_ptr + i) = cpu_to_le32p(src_ptr + i); 41 + #endif 42 + } 43 + 44 + static inline void ocrdma_le32_to_cpu(void *dst, u32 len) 45 + { 46 + #ifdef __BIG_ENDIAN 47 + int i = 0; 48 + u32 *src_ptr = dst; 49 + u32 *dst_ptr = dst; 50 + for (; i < (len / sizeof(u32)); i++) 51 + *(dst_ptr + i) = le32_to_cpu(*(src_ptr + i)); 52 + #endif 53 + } 54 + 55 + static inline void ocrdma_copy_cpu_to_le32(void *dst, void *src, u32 len) 56 + { 57 + #ifdef __BIG_ENDIAN 58 + int i = 0; 59 + u32 *src_ptr = src; 60 + u32 *dst_ptr = dst; 61 + for (; i < (len / sizeof(u32)); i++) 62 + *(dst_ptr + i) = cpu_to_le32p(src_ptr + i); 63 + #else 64 + memcpy(dst, src, len); 65 + #endif 66 + } 67 + 68 + static inline void ocrdma_copy_le32_to_cpu(void *dst, void *src, u32 len) 69 + { 70 + #ifdef __BIG_ENDIAN 71 + int i = 0; 72 + u32 *src_ptr = src; 73 + u32 *dst_ptr = dst; 74 + for (; i < len / sizeof(u32); i++) 75 + *(dst_ptr + i) = le32_to_cpu(*(src_ptr + i)); 76 + #else 77 + memcpy(dst, src, len); 78 + #endif 79 + } 80 + 81 + int ocrdma_init_hw(struct ocrdma_dev *); 82 + void ocrdma_cleanup_hw(struct ocrdma_dev *); 83 + 84 + enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps); 85 + void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed, 86 + bool solicited, u16 cqe_popped); 87 + 88 + /* verbs specific mailbox commands */ 89 + int ocrdma_query_config(struct ocrdma_dev *, 90 + struct ocrdma_mbx_query_config *config); 91 + int ocrdma_resolve_dgid(struct ocrdma_dev *, union ib_gid *dgid, u8 *mac_addr); 92 + 93 + int ocrdma_mbx_alloc_pd(struct ocrdma_dev *, struct ocrdma_pd *); 94 + int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *, struct ocrdma_pd *); 95 + 96 + int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *, struct ocrdma_hw_mr *hwmr, 97 + u32 pd_id, int addr_check); 98 + int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *, int fmr, u32 lkey); 99 + 100 + int ocrdma_reg_mr(struct ocrdma_dev *, struct ocrdma_hw_mr *hwmr, 101 + u32 pd_id, int acc); 102 + int ocrdma_mbx_create_cq(struct ocrdma_dev *, struct ocrdma_cq *, 103 + int entries, int dpp_cq); 104 + int ocrdma_mbx_destroy_cq(struct ocrdma_dev *, struct ocrdma_cq *); 105 + 106 + int ocrdma_mbx_create_qp(struct ocrdma_qp *, struct ib_qp_init_attr *attrs, 107 + u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset, 108 + u16 *dpp_credit_lmt); 109 + int ocrdma_mbx_modify_qp(struct ocrdma_dev *, struct ocrdma_qp *, 110 + struct ib_qp_attr *attrs, int attr_mask, 111 + enum ib_qp_state old_qps); 112 + int ocrdma_mbx_query_qp(struct ocrdma_dev *, struct ocrdma_qp *, 113 + struct ocrdma_qp_params *param); 114 + int ocrdma_mbx_destroy_qp(struct ocrdma_dev *, struct ocrdma_qp *); 115 + 116 + int ocrdma_mbx_create_srq(struct ocrdma_srq *, 117 + struct ib_srq_init_attr *, 118 + struct ocrdma_pd *); 119 + int ocrdma_mbx_modify_srq(struct ocrdma_srq *, struct ib_srq_attr *); 120 + int ocrdma_mbx_query_srq(struct ocrdma_srq *, struct ib_srq_attr *); 121 + int ocrdma_mbx_destroy_srq(struct ocrdma_dev *, struct ocrdma_srq *); 122 + 123 + int ocrdma_alloc_av(struct ocrdma_dev *, struct ocrdma_ah *); 124 + int ocrdma_free_av(struct ocrdma_dev *, struct ocrdma_ah *); 125 + 126 + int ocrdma_qp_state_machine(struct ocrdma_qp *, enum ib_qp_state new_state, 127 + enum ib_qp_state *old_ib_state); 128 + bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *); 129 + bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *); 130 + void ocrdma_flush_qp(struct ocrdma_qp *); 131 + 132 + #endif /* __OCRDMA_HW_H__ */
+558
drivers/infiniband/hw/ocrdma/ocrdma_main.c
··· 1 + /******************************************************************* 2 + * This file is part of the Emulex RoCE Device Driver for * 3 + * RoCE (RDMA over Converged Ethernet) adapters. * 4 + * Copyright (C) 2008-2012 Emulex. All rights reserved. * 5 + * EMULEX and SLI are trademarks of Emulex. * 6 + * www.emulex.com * 7 + * * 8 + * This program is free software; you can redistribute it and/or * 9 + * modify it under the terms of version 2 of the GNU General * 10 + * Public License as published by the Free Software Foundation. * 11 + * This program is distributed in the hope that it will be useful. * 12 + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 + * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 + * more details, a copy of which can be found in the file COPYING * 18 + * included with this package. * 19 + * 20 + * Contact Information: 21 + * linux-drivers@emulex.com 22 + * 23 + * Emulex 24 + * 3333 Susan Street 25 + * Costa Mesa, CA 92626 26 + *******************************************************************/ 27 + 28 + #include <linux/module.h> 29 + #include <linux/version.h> 30 + #include <linux/idr.h> 31 + #include <rdma/ib_verbs.h> 32 + #include <rdma/ib_user_verbs.h> 33 + #include <rdma/ib_addr.h> 34 + 35 + #include <linux/netdevice.h> 36 + #include <net/addrconf.h> 37 + 38 + #include "ocrdma.h" 39 + #include "ocrdma_verbs.h" 40 + #include "ocrdma_ah.h" 41 + #include "be_roce.h" 42 + #include "ocrdma_hw.h" 43 + 44 + MODULE_VERSION(OCRDMA_ROCE_DEV_VERSION); 45 + MODULE_DESCRIPTION("Emulex RoCE HCA Driver"); 46 + MODULE_AUTHOR("Emulex Corporation"); 47 + MODULE_LICENSE("GPL"); 48 + 49 + static LIST_HEAD(ocrdma_dev_list); 50 + static DEFINE_MUTEX(ocrdma_devlist_lock); 51 + static DEFINE_IDR(ocrdma_dev_id); 52 + 53 + static union ib_gid ocrdma_zero_sgid; 54 + static int ocrdma_inet6addr_event(struct notifier_block *, 55 + unsigned long, void *); 56 + 57 + static struct notifier_block ocrdma_inet6addr_notifier = { 58 + .notifier_call = ocrdma_inet6addr_event 59 + }; 60 + 61 + int ocrdma_get_instance(void) 62 + { 63 + int instance = 0; 64 + 65 + /* Assign an unused number */ 66 + if (!idr_pre_get(&ocrdma_dev_id, GFP_KERNEL)) 67 + return -1; 68 + if (idr_get_new(&ocrdma_dev_id, NULL, &instance)) 69 + return -1; 70 + return instance; 71 + } 72 + 73 + void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid) 74 + { 75 + u8 mac_addr[6]; 76 + 77 + memcpy(&mac_addr[0], &dev->nic_info.mac_addr[0], ETH_ALEN); 78 + guid[0] = mac_addr[0] ^ 2; 79 + guid[1] = mac_addr[1]; 80 + guid[2] = mac_addr[2]; 81 + guid[3] = 0xff; 82 + guid[4] = 0xfe; 83 + guid[5] = mac_addr[3]; 84 + guid[6] = mac_addr[4]; 85 + guid[7] = mac_addr[5]; 86 + } 87 + 88 + static void ocrdma_build_sgid_mac(union ib_gid *sgid, unsigned char *mac_addr, 89 + bool is_vlan, u16 vlan_id) 90 + { 91 + sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); 92 + sgid->raw[8] = mac_addr[0] ^ 2; 93 + sgid->raw[9] = mac_addr[1]; 94 + sgid->raw[10] = mac_addr[2]; 95 + if (is_vlan) { 96 + sgid->raw[11] = vlan_id >> 8; 97 + sgid->raw[12] = vlan_id & 0xff; 98 + } else { 99 + sgid->raw[11] = 0xff; 100 + sgid->raw[12] = 0xfe; 101 + } 102 + sgid->raw[13] = mac_addr[3]; 103 + sgid->raw[14] = mac_addr[4]; 104 + sgid->raw[15] = mac_addr[5]; 105 + } 106 + 107 + static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr, 108 + bool is_vlan, u16 vlan_id) 109 + { 110 + int i; 111 + bool found = false; 112 + union ib_gid new_sgid; 113 + int free_idx = OCRDMA_MAX_SGID; 114 + unsigned long flags; 115 + 116 + memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid)); 117 + 118 + ocrdma_build_sgid_mac(&new_sgid, mac_addr, is_vlan, vlan_id); 119 + 120 + spin_lock_irqsave(&dev->sgid_lock, flags); 121 + for (i = 0; i < OCRDMA_MAX_SGID; i++) { 122 + if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid, 123 + sizeof(union ib_gid))) { 124 + /* found free entry */ 125 + if (!found) { 126 + free_idx = i; 127 + found = true; 128 + break; 129 + } 130 + } else if (!memcmp(&dev->sgid_tbl[i], &new_sgid, 131 + sizeof(union ib_gid))) { 132 + /* entry already present, no addition is required. */ 133 + spin_unlock_irqrestore(&dev->sgid_lock, flags); 134 + return; 135 + } 136 + } 137 + /* if entry doesn't exist and if table has some space, add entry */ 138 + if (found) 139 + memcpy(&dev->sgid_tbl[free_idx], &new_sgid, 140 + sizeof(union ib_gid)); 141 + spin_unlock_irqrestore(&dev->sgid_lock, flags); 142 + } 143 + 144 + static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr, 145 + bool is_vlan, u16 vlan_id) 146 + { 147 + int found = false; 148 + int i; 149 + union ib_gid sgid; 150 + unsigned long flags; 151 + 152 + ocrdma_build_sgid_mac(&sgid, mac_addr, is_vlan, vlan_id); 153 + 154 + spin_lock_irqsave(&dev->sgid_lock, flags); 155 + /* first is default sgid, which cannot be deleted. */ 156 + for (i = 1; i < OCRDMA_MAX_SGID; i++) { 157 + if (!memcmp(&dev->sgid_tbl[i], &sgid, sizeof(union ib_gid))) { 158 + /* found matching entry */ 159 + memset(&dev->sgid_tbl[i], 0, sizeof(union ib_gid)); 160 + found = true; 161 + break; 162 + } 163 + } 164 + spin_unlock_irqrestore(&dev->sgid_lock, flags); 165 + return found; 166 + } 167 + 168 + static void ocrdma_add_default_sgid(struct ocrdma_dev *dev) 169 + { 170 + /* GID Index 0 - Invariant manufacturer-assigned EUI-64 */ 171 + union ib_gid *sgid = &dev->sgid_tbl[0]; 172 + 173 + sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); 174 + ocrdma_get_guid(dev, &sgid->raw[8]); 175 + } 176 + 177 + static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev) 178 + { 179 + struct net_device *netdev, *tmp; 180 + u16 vlan_id; 181 + bool is_vlan; 182 + 183 + netdev = dev->nic_info.netdev; 184 + 185 + ocrdma_add_default_sgid(dev); 186 + 187 + rcu_read_lock(); 188 + for_each_netdev_rcu(&init_net, tmp) { 189 + if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) { 190 + if (!netif_running(tmp) || !netif_oper_up(tmp)) 191 + continue; 192 + if (netdev != tmp) { 193 + vlan_id = vlan_dev_vlan_id(tmp); 194 + is_vlan = true; 195 + } else { 196 + is_vlan = false; 197 + vlan_id = 0; 198 + tmp = netdev; 199 + } 200 + ocrdma_add_sgid(dev, tmp->dev_addr, is_vlan, vlan_id); 201 + } 202 + } 203 + rcu_read_unlock(); 204 + return 0; 205 + } 206 + 207 + static int ocrdma_inet6addr_event(struct notifier_block *notifier, 208 + unsigned long event, void *ptr) 209 + { 210 + struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; 211 + struct net_device *event_netdev = ifa->idev->dev; 212 + struct net_device *netdev = NULL; 213 + struct ib_event gid_event; 214 + struct ocrdma_dev *dev; 215 + bool found = false; 216 + bool is_vlan = false; 217 + u16 vid = 0; 218 + 219 + netdev = vlan_dev_real_dev(event_netdev); 220 + if (netdev != event_netdev) { 221 + is_vlan = true; 222 + vid = vlan_dev_vlan_id(event_netdev); 223 + } 224 + mutex_lock(&ocrdma_devlist_lock); 225 + list_for_each_entry(dev, &ocrdma_dev_list, entry) { 226 + if (dev->nic_info.netdev == netdev) { 227 + found = true; 228 + break; 229 + } 230 + } 231 + mutex_unlock(&ocrdma_devlist_lock); 232 + 233 + if (!found) 234 + return NOTIFY_DONE; 235 + if (!rdma_link_local_addr((struct in6_addr *)&ifa->addr)) 236 + return NOTIFY_DONE; 237 + 238 + mutex_lock(&dev->dev_lock); 239 + switch (event) { 240 + case NETDEV_UP: 241 + ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid); 242 + break; 243 + case NETDEV_DOWN: 244 + found = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid); 245 + if (found) { 246 + /* found the matching entry, notify 247 + * the consumers about it 248 + */ 249 + gid_event.device = &dev->ibdev; 250 + gid_event.element.port_num = 1; 251 + gid_event.event = IB_EVENT_GID_CHANGE; 252 + ib_dispatch_event(&gid_event); 253 + } 254 + break; 255 + default: 256 + break; 257 + } 258 + mutex_unlock(&dev->dev_lock); 259 + return NOTIFY_OK; 260 + } 261 + 262 + static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device, 263 + u8 port_num) 264 + { 265 + return IB_LINK_LAYER_ETHERNET; 266 + } 267 + 268 + int ocrdma_register_device(struct ocrdma_dev *dev) 269 + { 270 + strlcpy(dev->ibdev.name, "ocrdma%d", IB_DEVICE_NAME_MAX); 271 + ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid); 272 + memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC, 273 + sizeof(OCRDMA_NODE_DESC)); 274 + dev->ibdev.owner = THIS_MODULE; 275 + dev->ibdev.uverbs_cmd_mask = 276 + OCRDMA_UVERBS(GET_CONTEXT) | 277 + OCRDMA_UVERBS(QUERY_DEVICE) | 278 + OCRDMA_UVERBS(QUERY_PORT) | 279 + OCRDMA_UVERBS(ALLOC_PD) | 280 + OCRDMA_UVERBS(DEALLOC_PD) | 281 + OCRDMA_UVERBS(REG_MR) | 282 + OCRDMA_UVERBS(DEREG_MR) | 283 + OCRDMA_UVERBS(CREATE_COMP_CHANNEL) | 284 + OCRDMA_UVERBS(CREATE_CQ) | 285 + OCRDMA_UVERBS(RESIZE_CQ) | 286 + OCRDMA_UVERBS(DESTROY_CQ) | 287 + OCRDMA_UVERBS(REQ_NOTIFY_CQ) | 288 + OCRDMA_UVERBS(CREATE_QP) | 289 + OCRDMA_UVERBS(MODIFY_QP) | 290 + OCRDMA_UVERBS(QUERY_QP) | 291 + OCRDMA_UVERBS(DESTROY_QP) | 292 + OCRDMA_UVERBS(POLL_CQ) | 293 + OCRDMA_UVERBS(POST_SEND) | 294 + OCRDMA_UVERBS(POST_RECV); 295 + 296 + dev->ibdev.uverbs_cmd_mask |= 297 + OCRDMA_UVERBS(CREATE_AH) | 298 + OCRDMA_UVERBS(MODIFY_AH) | 299 + OCRDMA_UVERBS(QUERY_AH) | 300 + OCRDMA_UVERBS(DESTROY_AH); 301 + 302 + dev->ibdev.node_type = RDMA_NODE_IB_CA; 303 + dev->ibdev.phys_port_cnt = 1; 304 + dev->ibdev.num_comp_vectors = 1; 305 + 306 + /* mandatory verbs. */ 307 + dev->ibdev.query_device = ocrdma_query_device; 308 + dev->ibdev.query_port = ocrdma_query_port; 309 + dev->ibdev.modify_port = ocrdma_modify_port; 310 + dev->ibdev.query_gid = ocrdma_query_gid; 311 + dev->ibdev.get_link_layer = ocrdma_link_layer; 312 + dev->ibdev.alloc_pd = ocrdma_alloc_pd; 313 + dev->ibdev.dealloc_pd = ocrdma_dealloc_pd; 314 + 315 + dev->ibdev.create_cq = ocrdma_create_cq; 316 + dev->ibdev.destroy_cq = ocrdma_destroy_cq; 317 + dev->ibdev.resize_cq = ocrdma_resize_cq; 318 + 319 + dev->ibdev.create_qp = ocrdma_create_qp; 320 + dev->ibdev.modify_qp = ocrdma_modify_qp; 321 + dev->ibdev.query_qp = ocrdma_query_qp; 322 + dev->ibdev.destroy_qp = ocrdma_destroy_qp; 323 + 324 + dev->ibdev.query_pkey = ocrdma_query_pkey; 325 + dev->ibdev.create_ah = ocrdma_create_ah; 326 + dev->ibdev.destroy_ah = ocrdma_destroy_ah; 327 + dev->ibdev.query_ah = ocrdma_query_ah; 328 + dev->ibdev.modify_ah = ocrdma_modify_ah; 329 + 330 + dev->ibdev.poll_cq = ocrdma_poll_cq; 331 + dev->ibdev.post_send = ocrdma_post_send; 332 + dev->ibdev.post_recv = ocrdma_post_recv; 333 + dev->ibdev.req_notify_cq = ocrdma_arm_cq; 334 + 335 + dev->ibdev.get_dma_mr = ocrdma_get_dma_mr; 336 + dev->ibdev.dereg_mr = ocrdma_dereg_mr; 337 + dev->ibdev.reg_user_mr = ocrdma_reg_user_mr; 338 + 339 + /* mandatory to support user space verbs consumer. */ 340 + dev->ibdev.alloc_ucontext = ocrdma_alloc_ucontext; 341 + dev->ibdev.dealloc_ucontext = ocrdma_dealloc_ucontext; 342 + dev->ibdev.mmap = ocrdma_mmap; 343 + dev->ibdev.dma_device = &dev->nic_info.pdev->dev; 344 + 345 + dev->ibdev.process_mad = ocrdma_process_mad; 346 + 347 + if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { 348 + dev->ibdev.uverbs_cmd_mask |= 349 + OCRDMA_UVERBS(CREATE_SRQ) | 350 + OCRDMA_UVERBS(MODIFY_SRQ) | 351 + OCRDMA_UVERBS(QUERY_SRQ) | 352 + OCRDMA_UVERBS(DESTROY_SRQ) | 353 + OCRDMA_UVERBS(POST_SRQ_RECV); 354 + 355 + dev->ibdev.create_srq = ocrdma_create_srq; 356 + dev->ibdev.modify_srq = ocrdma_modify_srq; 357 + dev->ibdev.query_srq = ocrdma_query_srq; 358 + dev->ibdev.destroy_srq = ocrdma_destroy_srq; 359 + dev->ibdev.post_srq_recv = ocrdma_post_srq_recv; 360 + } 361 + return ib_register_device(&dev->ibdev, NULL); 362 + } 363 + 364 + static int ocrdma_alloc_resources(struct ocrdma_dev *dev) 365 + { 366 + mutex_init(&dev->dev_lock); 367 + dev->sgid_tbl = kzalloc(sizeof(union ib_gid) * 368 + OCRDMA_MAX_SGID, GFP_KERNEL); 369 + if (!dev->sgid_tbl) 370 + goto alloc_err; 371 + spin_lock_init(&dev->sgid_lock); 372 + 373 + dev->cq_tbl = kzalloc(sizeof(struct ocrdma_cq *) * 374 + OCRDMA_MAX_CQ, GFP_KERNEL); 375 + if (!dev->cq_tbl) 376 + goto alloc_err; 377 + 378 + if (dev->attr.max_qp) { 379 + dev->qp_tbl = kzalloc(sizeof(struct ocrdma_qp *) * 380 + OCRDMA_MAX_QP, GFP_KERNEL); 381 + if (!dev->qp_tbl) 382 + goto alloc_err; 383 + } 384 + spin_lock_init(&dev->av_tbl.lock); 385 + spin_lock_init(&dev->flush_q_lock); 386 + return 0; 387 + alloc_err: 388 + ocrdma_err("%s(%d) error.\n", __func__, dev->id); 389 + return -ENOMEM; 390 + } 391 + 392 + static void ocrdma_free_resources(struct ocrdma_dev *dev) 393 + { 394 + kfree(dev->qp_tbl); 395 + kfree(dev->cq_tbl); 396 + kfree(dev->sgid_tbl); 397 + } 398 + 399 + static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) 400 + { 401 + int status = 0; 402 + struct ocrdma_dev *dev; 403 + 404 + dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev)); 405 + if (!dev) { 406 + ocrdma_err("Unable to allocate ib device\n"); 407 + return NULL; 408 + } 409 + dev->mbx_cmd = kzalloc(sizeof(struct ocrdma_mqe_emb_cmd), GFP_KERNEL); 410 + if (!dev->mbx_cmd) 411 + goto idr_err; 412 + 413 + memcpy(&dev->nic_info, dev_info, sizeof(*dev_info)); 414 + dev->id = ocrdma_get_instance(); 415 + if (dev->id < 0) 416 + goto idr_err; 417 + 418 + status = ocrdma_init_hw(dev); 419 + if (status) 420 + goto init_err; 421 + 422 + status = ocrdma_alloc_resources(dev); 423 + if (status) 424 + goto alloc_err; 425 + 426 + status = ocrdma_build_sgid_tbl(dev); 427 + if (status) 428 + goto alloc_err; 429 + 430 + status = ocrdma_register_device(dev); 431 + if (status) 432 + goto alloc_err; 433 + 434 + mutex_lock(&ocrdma_devlist_lock); 435 + list_add_tail(&dev->entry, &ocrdma_dev_list); 436 + mutex_unlock(&ocrdma_devlist_lock); 437 + return dev; 438 + 439 + alloc_err: 440 + ocrdma_free_resources(dev); 441 + ocrdma_cleanup_hw(dev); 442 + init_err: 443 + idr_remove(&ocrdma_dev_id, dev->id); 444 + idr_err: 445 + kfree(dev->mbx_cmd); 446 + ib_dealloc_device(&dev->ibdev); 447 + ocrdma_err("%s() leaving. ret=%d\n", __func__, status); 448 + return NULL; 449 + } 450 + 451 + static void ocrdma_remove(struct ocrdma_dev *dev) 452 + { 453 + /* first unregister with stack to stop all the active traffic 454 + * of the registered clients. 455 + */ 456 + ib_unregister_device(&dev->ibdev); 457 + 458 + mutex_lock(&ocrdma_devlist_lock); 459 + list_del(&dev->entry); 460 + mutex_unlock(&ocrdma_devlist_lock); 461 + 462 + ocrdma_free_resources(dev); 463 + ocrdma_cleanup_hw(dev); 464 + 465 + idr_remove(&ocrdma_dev_id, dev->id); 466 + kfree(dev->mbx_cmd); 467 + ib_dealloc_device(&dev->ibdev); 468 + } 469 + 470 + static int ocrdma_open(struct ocrdma_dev *dev) 471 + { 472 + struct ib_event port_event; 473 + 474 + port_event.event = IB_EVENT_PORT_ACTIVE; 475 + port_event.element.port_num = 1; 476 + port_event.device = &dev->ibdev; 477 + ib_dispatch_event(&port_event); 478 + return 0; 479 + } 480 + 481 + static int ocrdma_close(struct ocrdma_dev *dev) 482 + { 483 + int i; 484 + struct ocrdma_qp *qp, **cur_qp; 485 + struct ib_event err_event; 486 + struct ib_qp_attr attrs; 487 + int attr_mask = IB_QP_STATE; 488 + 489 + attrs.qp_state = IB_QPS_ERR; 490 + mutex_lock(&dev->dev_lock); 491 + if (dev->qp_tbl) { 492 + cur_qp = dev->qp_tbl; 493 + for (i = 0; i < OCRDMA_MAX_QP; i++) { 494 + qp = cur_qp[i]; 495 + if (qp) { 496 + /* change the QP state to ERROR */ 497 + _ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask); 498 + 499 + err_event.event = IB_EVENT_QP_FATAL; 500 + err_event.element.qp = &qp->ibqp; 501 + err_event.device = &dev->ibdev; 502 + ib_dispatch_event(&err_event); 503 + } 504 + } 505 + } 506 + mutex_unlock(&dev->dev_lock); 507 + 508 + err_event.event = IB_EVENT_PORT_ERR; 509 + err_event.element.port_num = 1; 510 + err_event.device = &dev->ibdev; 511 + ib_dispatch_event(&err_event); 512 + return 0; 513 + } 514 + 515 + /* event handling via NIC driver ensures that all the NIC specific 516 + * initialization done before RoCE driver notifies 517 + * event to stack. 518 + */ 519 + static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event) 520 + { 521 + switch (event) { 522 + case BE_DEV_UP: 523 + ocrdma_open(dev); 524 + break; 525 + case BE_DEV_DOWN: 526 + ocrdma_close(dev); 527 + break; 528 + }; 529 + } 530 + 531 + struct ocrdma_driver ocrdma_drv = { 532 + .name = "ocrdma_driver", 533 + .add = ocrdma_add, 534 + .remove = ocrdma_remove, 535 + .state_change_handler = ocrdma_event_handler, 536 + }; 537 + 538 + static int __init ocrdma_init_module(void) 539 + { 540 + int status; 541 + 542 + status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier); 543 + if (status) 544 + return status; 545 + status = be_roce_register_driver(&ocrdma_drv); 546 + if (status) 547 + unregister_inet6addr_notifier(&ocrdma_inet6addr_notifier); 548 + return status; 549 + } 550 + 551 + static void __exit ocrdma_exit_module(void) 552 + { 553 + be_roce_unregister_driver(&ocrdma_drv); 554 + unregister_inet6addr_notifier(&ocrdma_inet6addr_notifier); 555 + } 556 + 557 + module_init(ocrdma_init_module); 558 + module_exit(ocrdma_exit_module);
+1672
drivers/infiniband/hw/ocrdma/ocrdma_sli.h
··· 1 + /******************************************************************* 2 + * This file is part of the Emulex RoCE Device Driver for * 3 + * RoCE (RDMA over Converged Ethernet) adapters. * 4 + * Copyright (C) 2008-2012 Emulex. All rights reserved. * 5 + * EMULEX and SLI are trademarks of Emulex. * 6 + * www.emulex.com * 7 + * * 8 + * This program is free software; you can redistribute it and/or * 9 + * modify it under the terms of version 2 of the GNU General * 10 + * Public License as published by the Free Software Foundation. * 11 + * This program is distributed in the hope that it will be useful. * 12 + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 + * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 + * more details, a copy of which can be found in the file COPYING * 18 + * included with this package. * 19 + * 20 + * Contact Information: 21 + * linux-drivers@emulex.com 22 + * 23 + * Emulex 24 + * 3333 Susan Street 25 + * Costa Mesa, CA 92626 26 + *******************************************************************/ 27 + 28 + #ifndef __OCRDMA_SLI_H__ 29 + #define __OCRDMA_SLI_H__ 30 + 31 + #define Bit(_b) (1 << (_b)) 32 + 33 + #define OCRDMA_GEN1_FAMILY 0xB 34 + #define OCRDMA_GEN2_FAMILY 0x2 35 + 36 + #define OCRDMA_SUBSYS_ROCE 10 37 + enum { 38 + OCRDMA_CMD_QUERY_CONFIG = 1, 39 + OCRDMA_CMD_ALLOC_PD, 40 + OCRDMA_CMD_DEALLOC_PD, 41 + 42 + OCRDMA_CMD_CREATE_AH_TBL, 43 + OCRDMA_CMD_DELETE_AH_TBL, 44 + 45 + OCRDMA_CMD_CREATE_QP, 46 + OCRDMA_CMD_QUERY_QP, 47 + OCRDMA_CMD_MODIFY_QP, 48 + OCRDMA_CMD_DELETE_QP, 49 + 50 + OCRDMA_CMD_RSVD1, 51 + OCRDMA_CMD_ALLOC_LKEY, 52 + OCRDMA_CMD_DEALLOC_LKEY, 53 + OCRDMA_CMD_REGISTER_NSMR, 54 + OCRDMA_CMD_REREGISTER_NSMR, 55 + OCRDMA_CMD_REGISTER_NSMR_CONT, 56 + OCRDMA_CMD_QUERY_NSMR, 57 + OCRDMA_CMD_ALLOC_MW, 58 + OCRDMA_CMD_QUERY_MW, 59 + 60 + OCRDMA_CMD_CREATE_SRQ, 61 + OCRDMA_CMD_QUERY_SRQ, 62 + OCRDMA_CMD_MODIFY_SRQ, 63 + OCRDMA_CMD_DELETE_SRQ, 64 + 65 + OCRDMA_CMD_ATTACH_MCAST, 66 + OCRDMA_CMD_DETACH_MCAST, 67 + 68 + OCRDMA_CMD_MAX 69 + }; 70 + 71 + #define OCRDMA_SUBSYS_COMMON 1 72 + enum { 73 + OCRDMA_CMD_CREATE_CQ = 12, 74 + OCRDMA_CMD_CREATE_EQ = 13, 75 + OCRDMA_CMD_CREATE_MQ = 21, 76 + OCRDMA_CMD_GET_FW_VER = 35, 77 + OCRDMA_CMD_DELETE_MQ = 53, 78 + OCRDMA_CMD_DELETE_CQ = 54, 79 + OCRDMA_CMD_DELETE_EQ = 55, 80 + OCRDMA_CMD_GET_FW_CONFIG = 58, 81 + OCRDMA_CMD_CREATE_MQ_EXT = 90 82 + }; 83 + 84 + enum { 85 + QTYPE_EQ = 1, 86 + QTYPE_CQ = 2, 87 + QTYPE_MCCQ = 3 88 + }; 89 + 90 + #define OCRDMA_MAX_SGID (8) 91 + 92 + #define OCRDMA_MAX_QP 2048 93 + #define OCRDMA_MAX_CQ 2048 94 + 95 + enum { 96 + OCRDMA_DB_RQ_OFFSET = 0xE0, 97 + OCRDMA_DB_GEN2_RQ1_OFFSET = 0x100, 98 + OCRDMA_DB_GEN2_RQ2_OFFSET = 0xC0, 99 + OCRDMA_DB_SQ_OFFSET = 0x60, 100 + OCRDMA_DB_GEN2_SQ_OFFSET = 0x1C0, 101 + OCRDMA_DB_SRQ_OFFSET = OCRDMA_DB_RQ_OFFSET, 102 + OCRDMA_DB_GEN2_SRQ_OFFSET = OCRDMA_DB_GEN2_RQ1_OFFSET, 103 + OCRDMA_DB_CQ_OFFSET = 0x120, 104 + OCRDMA_DB_EQ_OFFSET = OCRDMA_DB_CQ_OFFSET, 105 + OCRDMA_DB_MQ_OFFSET = 0x140 106 + }; 107 + 108 + #define OCRDMA_DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */ 109 + #define OCRDMA_DB_CQ_RING_ID_EXT_MASK 0x0C00 /* bits 10-11 of qid at 12-11 */ 110 + /* qid #2 msbits at 12-11 */ 111 + #define OCRDMA_DB_CQ_RING_ID_EXT_MASK_SHIFT 0x1 112 + #define OCRDMA_DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ 113 + /* Rearm bit */ 114 + #define OCRDMA_DB_CQ_REARM_SHIFT (29) /* bit 29 */ 115 + /* solicited bit */ 116 + #define OCRDMA_DB_CQ_SOLICIT_SHIFT (31) /* bit 31 */ 117 + 118 + #define OCRDMA_EQ_ID_MASK 0x1FF /* bits 0 - 8 */ 119 + #define OCRDMA_EQ_ID_EXT_MASK 0x3e00 /* bits 9-13 */ 120 + #define OCRDMA_EQ_ID_EXT_MASK_SHIFT (2) /* qid bits 9-13 at 11-15 */ 121 + 122 + /* Clear the interrupt for this eq */ 123 + #define OCRDMA_EQ_CLR_SHIFT (9) /* bit 9 */ 124 + /* Must be 1 */ 125 + #define OCRDMA_EQ_TYPE_SHIFT (10) /* bit 10 */ 126 + /* Number of event entries processed */ 127 + #define OCRDMA_NUM_EQE_SHIFT (16) /* bits 16 - 28 */ 128 + /* Rearm bit */ 129 + #define OCRDMA_REARM_SHIFT (29) /* bit 29 */ 130 + 131 + #define OCRDMA_MQ_ID_MASK 0x7FF /* bits 0 - 10 */ 132 + /* Number of entries posted */ 133 + #define OCRDMA_MQ_NUM_MQE_SHIFT (16) /* bits 16 - 29 */ 134 + 135 + #define OCRDMA_MIN_HPAGE_SIZE (4096) 136 + 137 + #define OCRDMA_MIN_Q_PAGE_SIZE (4096) 138 + #define OCRDMA_MAX_Q_PAGES (8) 139 + 140 + /* 141 + # 0: 4K Bytes 142 + # 1: 8K Bytes 143 + # 2: 16K Bytes 144 + # 3: 32K Bytes 145 + # 4: 64K Bytes 146 + */ 147 + #define OCRDMA_MAX_Q_PAGE_SIZE_CNT (5) 148 + #define OCRDMA_Q_PAGE_BASE_SIZE (OCRDMA_MIN_Q_PAGE_SIZE * OCRDMA_MAX_Q_PAGES) 149 + 150 + #define MAX_OCRDMA_QP_PAGES (8) 151 + #define OCRDMA_MAX_WQE_MEM_SIZE (MAX_OCRDMA_QP_PAGES * OCRDMA_MIN_HQ_PAGE_SIZE) 152 + 153 + #define OCRDMA_CREATE_CQ_MAX_PAGES (4) 154 + #define OCRDMA_DPP_CQE_SIZE (4) 155 + 156 + #define OCRDMA_GEN2_MAX_CQE 1024 157 + #define OCRDMA_GEN2_CQ_PAGE_SIZE 4096 158 + #define OCRDMA_GEN2_WQE_SIZE 256 159 + #define OCRDMA_MAX_CQE 4095 160 + #define OCRDMA_CQ_PAGE_SIZE 16384 161 + #define OCRDMA_WQE_SIZE 128 162 + #define OCRDMA_WQE_STRIDE 8 163 + #define OCRDMA_WQE_ALIGN_BYTES 16 164 + 165 + #define MAX_OCRDMA_SRQ_PAGES MAX_OCRDMA_QP_PAGES 166 + 167 + enum { 168 + OCRDMA_MCH_OPCODE_SHIFT = 0, 169 + OCRDMA_MCH_OPCODE_MASK = 0xFF, 170 + OCRDMA_MCH_SUBSYS_SHIFT = 8, 171 + OCRDMA_MCH_SUBSYS_MASK = 0xFF00 172 + }; 173 + 174 + /* mailbox cmd header */ 175 + struct ocrdma_mbx_hdr { 176 + u32 subsys_op; 177 + u32 timeout; /* in seconds */ 178 + u32 cmd_len; 179 + u32 rsvd_version; 180 + } __packed; 181 + 182 + enum { 183 + OCRDMA_MBX_RSP_OPCODE_SHIFT = 0, 184 + OCRDMA_MBX_RSP_OPCODE_MASK = 0xFF, 185 + OCRDMA_MBX_RSP_SUBSYS_SHIFT = 8, 186 + OCRDMA_MBX_RSP_SUBSYS_MASK = 0xFF << OCRDMA_MBX_RSP_SUBSYS_SHIFT, 187 + 188 + OCRDMA_MBX_RSP_STATUS_SHIFT = 0, 189 + OCRDMA_MBX_RSP_STATUS_MASK = 0xFF, 190 + OCRDMA_MBX_RSP_ASTATUS_SHIFT = 8, 191 + OCRDMA_MBX_RSP_ASTATUS_MASK = 0xFF << OCRDMA_MBX_RSP_ASTATUS_SHIFT 192 + }; 193 + 194 + /* mailbox cmd response */ 195 + struct ocrdma_mbx_rsp { 196 + u32 subsys_op; 197 + u32 status; 198 + u32 rsp_len; 199 + u32 add_rsp_len; 200 + } __packed; 201 + 202 + enum { 203 + OCRDMA_MQE_EMBEDDED = 1, 204 + OCRDMA_MQE_NONEMBEDDED = 0 205 + }; 206 + 207 + struct ocrdma_mqe_sge { 208 + u32 pa_lo; 209 + u32 pa_hi; 210 + u32 len; 211 + } __packed; 212 + 213 + enum { 214 + OCRDMA_MQE_HDR_EMB_SHIFT = 0, 215 + OCRDMA_MQE_HDR_EMB_MASK = Bit(0), 216 + OCRDMA_MQE_HDR_SGE_CNT_SHIFT = 3, 217 + OCRDMA_MQE_HDR_SGE_CNT_MASK = 0x1F << OCRDMA_MQE_HDR_SGE_CNT_SHIFT, 218 + OCRDMA_MQE_HDR_SPECIAL_SHIFT = 24, 219 + OCRDMA_MQE_HDR_SPECIAL_MASK = 0xFF << OCRDMA_MQE_HDR_SPECIAL_SHIFT 220 + }; 221 + 222 + struct ocrdma_mqe_hdr { 223 + u32 spcl_sge_cnt_emb; 224 + u32 pyld_len; 225 + u32 tag_lo; 226 + u32 tag_hi; 227 + u32 rsvd3; 228 + } __packed; 229 + 230 + struct ocrdma_mqe_emb_cmd { 231 + struct ocrdma_mbx_hdr mch; 232 + u8 pyld[220]; 233 + } __packed; 234 + 235 + struct ocrdma_mqe { 236 + struct ocrdma_mqe_hdr hdr; 237 + union { 238 + struct ocrdma_mqe_emb_cmd emb_req; 239 + struct { 240 + struct ocrdma_mqe_sge sge[19]; 241 + } nonemb_req; 242 + u8 cmd[236]; 243 + struct ocrdma_mbx_rsp rsp; 244 + } u; 245 + } __packed; 246 + 247 + #define OCRDMA_EQ_LEN 4096 248 + #define OCRDMA_MQ_CQ_LEN 256 249 + #define OCRDMA_MQ_LEN 128 250 + 251 + #define PAGE_SHIFT_4K 12 252 + #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) 253 + 254 + /* Returns number of pages spanned by the data starting at the given addr */ 255 + #define PAGES_4K_SPANNED(_address, size) \ 256 + ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \ 257 + (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K)) 258 + 259 + struct ocrdma_delete_q_req { 260 + struct ocrdma_mbx_hdr req; 261 + u32 id; 262 + } __packed; 263 + 264 + struct ocrdma_pa { 265 + u32 lo; 266 + u32 hi; 267 + } __packed; 268 + 269 + #define MAX_OCRDMA_EQ_PAGES (8) 270 + struct ocrdma_create_eq_req { 271 + struct ocrdma_mbx_hdr req; 272 + u32 num_pages; 273 + u32 valid; 274 + u32 cnt; 275 + u32 delay; 276 + u32 rsvd; 277 + struct ocrdma_pa pa[MAX_OCRDMA_EQ_PAGES]; 278 + } __packed; 279 + 280 + enum { 281 + OCRDMA_CREATE_EQ_VALID = Bit(29), 282 + OCRDMA_CREATE_EQ_CNT_SHIFT = 26, 283 + OCRDMA_CREATE_CQ_DELAY_SHIFT = 13, 284 + }; 285 + 286 + struct ocrdma_create_eq_rsp { 287 + struct ocrdma_mbx_rsp rsp; 288 + u32 vector_eqid; 289 + }; 290 + 291 + #define OCRDMA_EQ_MINOR_OTHER (0x1) 292 + 293 + enum { 294 + OCRDMA_MCQE_STATUS_SHIFT = 0, 295 + OCRDMA_MCQE_STATUS_MASK = 0xFFFF, 296 + OCRDMA_MCQE_ESTATUS_SHIFT = 16, 297 + OCRDMA_MCQE_ESTATUS_MASK = 0xFFFF << OCRDMA_MCQE_ESTATUS_SHIFT, 298 + OCRDMA_MCQE_CONS_SHIFT = 27, 299 + OCRDMA_MCQE_CONS_MASK = Bit(27), 300 + OCRDMA_MCQE_CMPL_SHIFT = 28, 301 + OCRDMA_MCQE_CMPL_MASK = Bit(28), 302 + OCRDMA_MCQE_AE_SHIFT = 30, 303 + OCRDMA_MCQE_AE_MASK = Bit(30), 304 + OCRDMA_MCQE_VALID_SHIFT = 31, 305 + OCRDMA_MCQE_VALID_MASK = Bit(31) 306 + }; 307 + 308 + struct ocrdma_mcqe { 309 + u32 status; 310 + u32 tag_lo; 311 + u32 tag_hi; 312 + u32 valid_ae_cmpl_cons; 313 + } __packed; 314 + 315 + enum { 316 + OCRDMA_AE_MCQE_QPVALID = Bit(31), 317 + OCRDMA_AE_MCQE_QPID_MASK = 0xFFFF, 318 + 319 + OCRDMA_AE_MCQE_CQVALID = Bit(31), 320 + OCRDMA_AE_MCQE_CQID_MASK = 0xFFFF, 321 + OCRDMA_AE_MCQE_VALID = Bit(31), 322 + OCRDMA_AE_MCQE_AE = Bit(30), 323 + OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT = 16, 324 + OCRDMA_AE_MCQE_EVENT_TYPE_MASK = 325 + 0xFF << OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT, 326 + OCRDMA_AE_MCQE_EVENT_CODE_SHIFT = 8, 327 + OCRDMA_AE_MCQE_EVENT_CODE_MASK = 328 + 0xFF << OCRDMA_AE_MCQE_EVENT_CODE_SHIFT 329 + }; 330 + struct ocrdma_ae_mcqe { 331 + u32 qpvalid_qpid; 332 + u32 cqvalid_cqid; 333 + u32 evt_tag; 334 + u32 valid_ae_event; 335 + } __packed; 336 + 337 + enum { 338 + OCRDMA_AE_MPA_MCQE_REQ_ID_SHIFT = 16, 339 + OCRDMA_AE_MPA_MCQE_REQ_ID_MASK = 0xFFFF << 340 + OCRDMA_AE_MPA_MCQE_REQ_ID_SHIFT, 341 + 342 + OCRDMA_AE_MPA_MCQE_EVENT_CODE_SHIFT = 8, 343 + OCRDMA_AE_MPA_MCQE_EVENT_CODE_MASK = 0xFF << 344 + OCRDMA_AE_MPA_MCQE_EVENT_CODE_SHIFT, 345 + OCRDMA_AE_MPA_MCQE_EVENT_TYPE_SHIFT = 16, 346 + OCRDMA_AE_MPA_MCQE_EVENT_TYPE_MASK = 0xFF << 347 + OCRDMA_AE_MPA_MCQE_EVENT_TYPE_SHIFT, 348 + OCRDMA_AE_MPA_MCQE_EVENT_AE_SHIFT = 30, 349 + OCRDMA_AE_MPA_MCQE_EVENT_AE_MASK = Bit(30), 350 + OCRDMA_AE_MPA_MCQE_EVENT_VALID_SHIFT = 31, 351 + OCRDMA_AE_MPA_MCQE_EVENT_VALID_MASK = Bit(31) 352 + }; 353 + 354 + struct ocrdma_ae_mpa_mcqe { 355 + u32 req_id; 356 + u32 w1; 357 + u32 w2; 358 + u32 valid_ae_event; 359 + } __packed; 360 + 361 + enum { 362 + OCRDMA_AE_QP_MCQE_NEW_QP_STATE_SHIFT = 0, 363 + OCRDMA_AE_QP_MCQE_NEW_QP_STATE_MASK = 0xFFFF, 364 + OCRDMA_AE_QP_MCQE_QP_ID_SHIFT = 16, 365 + OCRDMA_AE_QP_MCQE_QP_ID_MASK = 0xFFFF << 366 + OCRDMA_AE_QP_MCQE_QP_ID_SHIFT, 367 + 368 + OCRDMA_AE_QP_MCQE_EVENT_CODE_SHIFT = 8, 369 + OCRDMA_AE_QP_MCQE_EVENT_CODE_MASK = 0xFF << 370 + OCRDMA_AE_QP_MCQE_EVENT_CODE_SHIFT, 371 + OCRDMA_AE_QP_MCQE_EVENT_TYPE_SHIFT = 16, 372 + OCRDMA_AE_QP_MCQE_EVENT_TYPE_MASK = 0xFF << 373 + OCRDMA_AE_QP_MCQE_EVENT_TYPE_SHIFT, 374 + OCRDMA_AE_QP_MCQE_EVENT_AE_SHIFT = 30, 375 + OCRDMA_AE_QP_MCQE_EVENT_AE_MASK = Bit(30), 376 + OCRDMA_AE_QP_MCQE_EVENT_VALID_SHIFT = 31, 377 + OCRDMA_AE_QP_MCQE_EVENT_VALID_MASK = Bit(31) 378 + }; 379 + 380 + struct ocrdma_ae_qp_mcqe { 381 + u32 qp_id_state; 382 + u32 w1; 383 + u32 w2; 384 + u32 valid_ae_event; 385 + } __packed; 386 + 387 + #define OCRDMA_ASYNC_EVE_CODE 0x14 388 + 389 + enum OCRDMA_ASYNC_EVENT_TYPE { 390 + OCRDMA_CQ_ERROR = 0x00, 391 + OCRDMA_CQ_OVERRUN_ERROR = 0x01, 392 + OCRDMA_CQ_QPCAT_ERROR = 0x02, 393 + OCRDMA_QP_ACCESS_ERROR = 0x03, 394 + OCRDMA_QP_COMM_EST_EVENT = 0x04, 395 + OCRDMA_SQ_DRAINED_EVENT = 0x05, 396 + OCRDMA_DEVICE_FATAL_EVENT = 0x08, 397 + OCRDMA_SRQCAT_ERROR = 0x0E, 398 + OCRDMA_SRQ_LIMIT_EVENT = 0x0F, 399 + OCRDMA_QP_LAST_WQE_EVENT = 0x10 400 + }; 401 + 402 + /* mailbox command request and responses */ 403 + enum { 404 + OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT = 2, 405 + OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK = Bit(2), 406 + OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_SHIFT = 3, 407 + OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK = Bit(3), 408 + OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT = 8, 409 + OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK = 0xFFFFFF << 410 + OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT, 411 + 412 + OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT = 16, 413 + OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK = 0xFFFF << 414 + OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT, 415 + OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT = 8, 416 + OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK = 0xFF << 417 + OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT, 418 + 419 + OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0, 420 + OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF, 421 + 422 + OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0, 423 + OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF, 424 + OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT = 16, 425 + OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK = 0xFFFF << 426 + OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT, 427 + 428 + OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET = 24, 429 + OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_MASK = 0xFF << 430 + OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET, 431 + OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET = 16, 432 + OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_MASK = 0xFF << 433 + OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET, 434 + OCRDMA_MBX_QUERY_CFG_MAX_DPP_CQES_OFFSET = 0, 435 + OCRDMA_MBX_QUERY_CFG_MAX_DPP_CQES_MASK = 0xFFFF << 436 + OCRDMA_MBX_QUERY_CFG_MAX_DPP_CQES_OFFSET, 437 + 438 + OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET = 16, 439 + OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK = 0xFFFF << 440 + OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET, 441 + OCRDMA_MBX_QUERY_CFG_MAX_RPIR_QPS_OFFSET = 0, 442 + OCRDMA_MBX_QUERY_CFG_MAX_RPIR_QPS_MASK = 0xFFFF << 443 + OCRDMA_MBX_QUERY_CFG_MAX_RPIR_QPS_OFFSET, 444 + 445 + OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET = 16, 446 + OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK = 0xFFFF << 447 + OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET, 448 + OCRDMA_MBX_QUERY_CFG_MAX_DPP_CREDITS_OFFSET = 0, 449 + OCRDMA_MBX_QUERY_CFG_MAX_DPP_CREDITS_MASK = 0xFFFF << 450 + OCRDMA_MBX_QUERY_CFG_MAX_DPP_CREDITS_OFFSET, 451 + 452 + OCRDMA_MBX_QUERY_CFG_MAX_DPP_QPS_OFFSET = 0, 453 + OCRDMA_MBX_QUERY_CFG_MAX_DPP_QPS_MASK = 0xFFFF << 454 + OCRDMA_MBX_QUERY_CFG_MAX_DPP_QPS_OFFSET, 455 + 456 + OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET = 16, 457 + OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_MASK = 0xFFFF << 458 + OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET, 459 + OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET = 0, 460 + OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK = 0xFFFF << 461 + OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET, 462 + 463 + OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET = 16, 464 + OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK = 0xFFFF << 465 + OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET, 466 + OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_OFFSET = 0, 467 + OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_MASK = 0xFFFF << 468 + OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_OFFSET, 469 + 470 + OCRDMA_MBX_QUERY_CFG_MAX_SRQ_RQE_OFFSET = 16, 471 + OCRDMA_MBX_QUERY_CFG_MAX_SRQ_RQE_MASK = 0xFFFF << 472 + OCRDMA_MBX_QUERY_CFG_MAX_SRQ_RQE_OFFSET, 473 + OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET = 0, 474 + OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK = 0xFFFF << 475 + OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET, 476 + }; 477 + 478 + struct ocrdma_mbx_query_config { 479 + struct ocrdma_mqe_hdr hdr; 480 + struct ocrdma_mbx_rsp rsp; 481 + u32 qp_srq_cq_ird_ord; 482 + u32 max_pd_ca_ack_delay; 483 + u32 max_write_send_sge; 484 + u32 max_ird_ord_per_qp; 485 + u32 max_shared_ird_ord; 486 + u32 max_mr; 487 + u64 max_mr_size; 488 + u32 max_num_mr_pbl; 489 + u32 max_mw; 490 + u32 max_fmr; 491 + u32 max_pages_per_frmr; 492 + u32 max_mcast_group; 493 + u32 max_mcast_qp_attach; 494 + u32 max_total_mcast_qp_attach; 495 + u32 wqe_rqe_stride_max_dpp_cqs; 496 + u32 max_srq_rpir_qps; 497 + u32 max_dpp_pds_credits; 498 + u32 max_dpp_credits_pds_per_pd; 499 + u32 max_wqes_rqes_per_q; 500 + u32 max_cq_cqes_per_cq; 501 + u32 max_srq_rqe_sge; 502 + } __packed; 503 + 504 + struct ocrdma_fw_ver_rsp { 505 + struct ocrdma_mqe_hdr hdr; 506 + struct ocrdma_mbx_rsp rsp; 507 + 508 + u8 running_ver[32]; 509 + } __packed; 510 + 511 + struct ocrdma_fw_conf_rsp { 512 + struct ocrdma_mqe_hdr hdr; 513 + struct ocrdma_mbx_rsp rsp; 514 + 515 + u32 config_num; 516 + u32 asic_revision; 517 + u32 phy_port; 518 + u32 fn_mode; 519 + struct { 520 + u32 mode; 521 + u32 nic_wqid_base; 522 + u32 nic_wq_tot; 523 + u32 prot_wqid_base; 524 + u32 prot_wq_tot; 525 + u32 prot_rqid_base; 526 + u32 prot_rqid_tot; 527 + u32 rsvd[6]; 528 + } ulp[2]; 529 + u32 fn_capabilities; 530 + u32 rsvd1; 531 + u32 rsvd2; 532 + u32 base_eqid; 533 + u32 max_eq; 534 + 535 + } __packed; 536 + 537 + enum { 538 + OCRDMA_FN_MODE_RDMA = 0x4 539 + }; 540 + 541 + enum { 542 + OCRDMA_CREATE_CQ_VER2 = 2, 543 + 544 + OCRDMA_CREATE_CQ_PAGE_CNT_MASK = 0xFFFF, 545 + OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT = 16, 546 + OCRDMA_CREATE_CQ_PAGE_SIZE_MASK = 0xFF, 547 + 548 + OCRDMA_CREATE_CQ_COALESCWM_SHIFT = 12, 549 + OCRDMA_CREATE_CQ_COALESCWM_MASK = Bit(13) | Bit(12), 550 + OCRDMA_CREATE_CQ_FLAGS_NODELAY = Bit(14), 551 + OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID = Bit(15), 552 + 553 + OCRDMA_CREATE_CQ_EQ_ID_MASK = 0xFFFF, 554 + OCRDMA_CREATE_CQ_CQE_COUNT_MASK = 0xFFFF 555 + }; 556 + 557 + enum { 558 + OCRDMA_CREATE_CQ_VER0 = 0, 559 + OCRDMA_CREATE_CQ_DPP = 1, 560 + OCRDMA_CREATE_CQ_TYPE_SHIFT = 24, 561 + OCRDMA_CREATE_CQ_EQID_SHIFT = 22, 562 + 563 + OCRDMA_CREATE_CQ_CNT_SHIFT = 27, 564 + OCRDMA_CREATE_CQ_FLAGS_VALID = Bit(29), 565 + OCRDMA_CREATE_CQ_FLAGS_EVENTABLE = Bit(31), 566 + OCRDMA_CREATE_CQ_DEF_FLAGS = OCRDMA_CREATE_CQ_FLAGS_VALID | 567 + OCRDMA_CREATE_CQ_FLAGS_EVENTABLE | 568 + OCRDMA_CREATE_CQ_FLAGS_NODELAY 569 + }; 570 + 571 + struct ocrdma_create_cq_cmd { 572 + struct ocrdma_mbx_hdr req; 573 + u32 pgsz_pgcnt; 574 + u32 ev_cnt_flags; 575 + u32 eqn; 576 + u32 cqe_count; 577 + u32 rsvd6; 578 + struct ocrdma_pa pa[OCRDMA_CREATE_CQ_MAX_PAGES]; 579 + }; 580 + 581 + struct ocrdma_create_cq { 582 + struct ocrdma_mqe_hdr hdr; 583 + struct ocrdma_create_cq_cmd cmd; 584 + } __packed; 585 + 586 + enum { 587 + OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK = 0xFFFF 588 + }; 589 + 590 + struct ocrdma_create_cq_cmd_rsp { 591 + struct ocrdma_mbx_rsp rsp; 592 + u32 cq_id; 593 + } __packed; 594 + 595 + struct ocrdma_create_cq_rsp { 596 + struct ocrdma_mqe_hdr hdr; 597 + struct ocrdma_create_cq_cmd_rsp rsp; 598 + } __packed; 599 + 600 + enum { 601 + OCRDMA_CREATE_MQ_V0_CQ_ID_SHIFT = 22, 602 + OCRDMA_CREATE_MQ_CQ_ID_SHIFT = 16, 603 + OCRDMA_CREATE_MQ_RING_SIZE_SHIFT = 16, 604 + OCRDMA_CREATE_MQ_VALID = Bit(31), 605 + OCRDMA_CREATE_MQ_ASYNC_CQ_VALID = Bit(0) 606 + }; 607 + 608 + struct ocrdma_create_mq_v0 { 609 + u32 pages; 610 + u32 cqid_ringsize; 611 + u32 valid; 612 + u32 async_cqid_valid; 613 + u32 rsvd; 614 + struct ocrdma_pa pa[8]; 615 + } __packed; 616 + 617 + struct ocrdma_create_mq_v1 { 618 + u32 cqid_pages; 619 + u32 async_event_bitmap; 620 + u32 async_cqid_ringsize; 621 + u32 valid; 622 + u32 async_cqid_valid; 623 + u32 rsvd; 624 + struct ocrdma_pa pa[8]; 625 + } __packed; 626 + 627 + struct ocrdma_create_mq_req { 628 + struct ocrdma_mbx_hdr req; 629 + union { 630 + struct ocrdma_create_mq_v0 v0; 631 + struct ocrdma_create_mq_v1 v1; 632 + }; 633 + } __packed; 634 + 635 + struct ocrdma_create_mq_rsp { 636 + struct ocrdma_mbx_rsp rsp; 637 + u32 id; 638 + } __packed; 639 + 640 + enum { 641 + OCRDMA_DESTROY_CQ_QID_SHIFT = 0, 642 + OCRDMA_DESTROY_CQ_QID_MASK = 0xFFFF, 643 + OCRDMA_DESTROY_CQ_QID_BYPASS_FLUSH_SHIFT = 16, 644 + OCRDMA_DESTROY_CQ_QID_BYPASS_FLUSH_MASK = 0xFFFF << 645 + OCRDMA_DESTROY_CQ_QID_BYPASS_FLUSH_SHIFT 646 + }; 647 + 648 + struct ocrdma_destroy_cq { 649 + struct ocrdma_mqe_hdr hdr; 650 + struct ocrdma_mbx_hdr req; 651 + 652 + u32 bypass_flush_qid; 653 + } __packed; 654 + 655 + struct ocrdma_destroy_cq_rsp { 656 + struct ocrdma_mqe_hdr hdr; 657 + struct ocrdma_mbx_rsp rsp; 658 + } __packed; 659 + 660 + enum { 661 + OCRDMA_QPT_GSI = 1, 662 + OCRDMA_QPT_RC = 2, 663 + OCRDMA_QPT_UD = 4, 664 + }; 665 + 666 + enum { 667 + OCRDMA_CREATE_QP_REQ_PD_ID_SHIFT = 0, 668 + OCRDMA_CREATE_QP_REQ_PD_ID_MASK = 0xFFFF, 669 + OCRDMA_CREATE_QP_REQ_SQ_PAGE_SIZE_SHIFT = 16, 670 + OCRDMA_CREATE_QP_REQ_RQ_PAGE_SIZE_SHIFT = 19, 671 + OCRDMA_CREATE_QP_REQ_QPT_SHIFT = 29, 672 + OCRDMA_CREATE_QP_REQ_QPT_MASK = Bit(31) | Bit(30) | Bit(29), 673 + 674 + OCRDMA_CREATE_QP_REQ_MAX_RQE_SHIFT = 0, 675 + OCRDMA_CREATE_QP_REQ_MAX_RQE_MASK = 0xFFFF, 676 + OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT = 16, 677 + OCRDMA_CREATE_QP_REQ_MAX_WQE_MASK = 0xFFFF << 678 + OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT, 679 + 680 + OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_SHIFT = 0, 681 + OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_MASK = 0xFFFF, 682 + OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT = 16, 683 + OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_MASK = 0xFFFF << 684 + OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT, 685 + 686 + OCRDMA_CREATE_QP_REQ_FMR_EN_SHIFT = 0, 687 + OCRDMA_CREATE_QP_REQ_FMR_EN_MASK = Bit(0), 688 + OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_SHIFT = 1, 689 + OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK = Bit(1), 690 + OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_SHIFT = 2, 691 + OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK = Bit(2), 692 + OCRDMA_CREATE_QP_REQ_INB_WREN_SHIFT = 3, 693 + OCRDMA_CREATE_QP_REQ_INB_WREN_MASK = Bit(3), 694 + OCRDMA_CREATE_QP_REQ_INB_RDEN_SHIFT = 4, 695 + OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK = Bit(4), 696 + OCRDMA_CREATE_QP_REQ_USE_SRQ_SHIFT = 5, 697 + OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK = Bit(5), 698 + OCRDMA_CREATE_QP_REQ_ENABLE_RPIR_SHIFT = 6, 699 + OCRDMA_CREATE_QP_REQ_ENABLE_RPIR_MASK = Bit(6), 700 + OCRDMA_CREATE_QP_REQ_ENABLE_DPP_SHIFT = 7, 701 + OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK = Bit(7), 702 + OCRDMA_CREATE_QP_REQ_ENABLE_DPP_CQ_SHIFT = 8, 703 + OCRDMA_CREATE_QP_REQ_ENABLE_DPP_CQ_MASK = Bit(8), 704 + OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT = 16, 705 + OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_MASK = 0xFFFF << 706 + OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT, 707 + 708 + OCRDMA_CREATE_QP_REQ_MAX_IRD_SHIFT = 0, 709 + OCRDMA_CREATE_QP_REQ_MAX_IRD_MASK = 0xFFFF, 710 + OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT = 16, 711 + OCRDMA_CREATE_QP_REQ_MAX_ORD_MASK = 0xFFFF << 712 + OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT, 713 + 714 + OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_SHIFT = 0, 715 + OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_MASK = 0xFFFF, 716 + OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT = 16, 717 + OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_MASK = 0xFFFF << 718 + OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT, 719 + 720 + OCRDMA_CREATE_QP_REQ_RQE_SIZE_SHIFT = 0, 721 + OCRDMA_CREATE_QP_REQ_RQE_SIZE_MASK = 0xFFFF, 722 + OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT = 16, 723 + OCRDMA_CREATE_QP_REQ_WQE_SIZE_MASK = 0xFFFF << 724 + OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT, 725 + 726 + OCRDMA_CREATE_QP_REQ_RQ_CQID_SHIFT = 0, 727 + OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK = 0xFFFF, 728 + OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT = 16, 729 + OCRDMA_CREATE_QP_REQ_WQ_CQID_MASK = 0xFFFF << 730 + OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT, 731 + 732 + OCRDMA_CREATE_QP_REQ_DPP_CQPID_SHIFT = 0, 733 + OCRDMA_CREATE_QP_REQ_DPP_CQPID_MASK = 0xFFFF, 734 + OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT = 16, 735 + OCRDMA_CREATE_QP_REQ_DPP_CREDIT_MASK = 0xFFFF << 736 + OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT 737 + }; 738 + 739 + enum { 740 + OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT = 16, 741 + OCRDMA_CREATE_QP_RSP_DPP_PAGE_SHIFT = 1 742 + }; 743 + 744 + #define MAX_OCRDMA_IRD_PAGES 4 745 + 746 + enum ocrdma_qp_flags { 747 + OCRDMA_QP_MW_BIND = 1, 748 + OCRDMA_QP_LKEY0 = (1 << 1), 749 + OCRDMA_QP_FAST_REG = (1 << 2), 750 + OCRDMA_QP_INB_RD = (1 << 6), 751 + OCRDMA_QP_INB_WR = (1 << 7), 752 + }; 753 + 754 + enum ocrdma_qp_state { 755 + OCRDMA_QPS_RST = 0, 756 + OCRDMA_QPS_INIT = 1, 757 + OCRDMA_QPS_RTR = 2, 758 + OCRDMA_QPS_RTS = 3, 759 + OCRDMA_QPS_SQE = 4, 760 + OCRDMA_QPS_SQ_DRAINING = 5, 761 + OCRDMA_QPS_ERR = 6, 762 + OCRDMA_QPS_SQD = 7 763 + }; 764 + 765 + struct ocrdma_create_qp_req { 766 + struct ocrdma_mqe_hdr hdr; 767 + struct ocrdma_mbx_hdr req; 768 + 769 + u32 type_pgsz_pdn; 770 + u32 max_wqe_rqe; 771 + u32 max_sge_send_write; 772 + u32 max_sge_recv_flags; 773 + u32 max_ord_ird; 774 + u32 num_wq_rq_pages; 775 + u32 wqe_rqe_size; 776 + u32 wq_rq_cqid; 777 + struct ocrdma_pa wq_addr[MAX_OCRDMA_QP_PAGES]; 778 + struct ocrdma_pa rq_addr[MAX_OCRDMA_QP_PAGES]; 779 + u32 dpp_credits_cqid; 780 + u32 rpir_lkey; 781 + struct ocrdma_pa ird_addr[MAX_OCRDMA_IRD_PAGES]; 782 + } __packed; 783 + 784 + enum { 785 + OCRDMA_CREATE_QP_RSP_QP_ID_SHIFT = 0, 786 + OCRDMA_CREATE_QP_RSP_QP_ID_MASK = 0xFFFF, 787 + 788 + OCRDMA_CREATE_QP_RSP_MAX_RQE_SHIFT = 0, 789 + OCRDMA_CREATE_QP_RSP_MAX_RQE_MASK = 0xFFFF, 790 + OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT = 16, 791 + OCRDMA_CREATE_QP_RSP_MAX_WQE_MASK = 0xFFFF << 792 + OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT, 793 + 794 + OCRDMA_CREATE_QP_RSP_MAX_SGE_WRITE_SHIFT = 0, 795 + OCRDMA_CREATE_QP_RSP_MAX_SGE_WRITE_MASK = 0xFFFF, 796 + OCRDMA_CREATE_QP_RSP_MAX_SGE_SEND_SHIFT = 16, 797 + OCRDMA_CREATE_QP_RSP_MAX_SGE_SEND_MASK = 0xFFFF << 798 + OCRDMA_CREATE_QP_RSP_MAX_SGE_SEND_SHIFT, 799 + 800 + OCRDMA_CREATE_QP_RSP_MAX_SGE_RECV_SHIFT = 16, 801 + OCRDMA_CREATE_QP_RSP_MAX_SGE_RECV_MASK = 0xFFFF << 802 + OCRDMA_CREATE_QP_RSP_MAX_SGE_RECV_SHIFT, 803 + 804 + OCRDMA_CREATE_QP_RSP_MAX_IRD_SHIFT = 0, 805 + OCRDMA_CREATE_QP_RSP_MAX_IRD_MASK = 0xFFFF, 806 + OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT = 16, 807 + OCRDMA_CREATE_QP_RSP_MAX_ORD_MASK = 0xFFFF << 808 + OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT, 809 + 810 + OCRDMA_CREATE_QP_RSP_RQ_ID_SHIFT = 0, 811 + OCRDMA_CREATE_QP_RSP_RQ_ID_MASK = 0xFFFF, 812 + OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT = 16, 813 + OCRDMA_CREATE_QP_RSP_SQ_ID_MASK = 0xFFFF << 814 + OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT, 815 + 816 + OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK = Bit(0), 817 + OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT = 1, 818 + OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_MASK = 0x7FFF << 819 + OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT, 820 + OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT = 16, 821 + OCRDMA_CREATE_QP_RSP_DPP_CREDITS_MASK = 0xFFFF << 822 + OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT, 823 + }; 824 + 825 + struct ocrdma_create_qp_rsp { 826 + struct ocrdma_mqe_hdr hdr; 827 + struct ocrdma_mbx_rsp rsp; 828 + 829 + u32 qp_id; 830 + u32 max_wqe_rqe; 831 + u32 max_sge_send_write; 832 + u32 max_sge_recv; 833 + u32 max_ord_ird; 834 + u32 sq_rq_id; 835 + u32 dpp_response; 836 + } __packed; 837 + 838 + struct ocrdma_destroy_qp { 839 + struct ocrdma_mqe_hdr hdr; 840 + struct ocrdma_mbx_hdr req; 841 + u32 qp_id; 842 + } __packed; 843 + 844 + struct ocrdma_destroy_qp_rsp { 845 + struct ocrdma_mqe_hdr hdr; 846 + struct ocrdma_mbx_rsp rsp; 847 + } __packed; 848 + 849 + enum { 850 + OCRDMA_MODIFY_QP_ID_SHIFT = 0, 851 + OCRDMA_MODIFY_QP_ID_MASK = 0xFFFF, 852 + 853 + OCRDMA_QP_PARA_QPS_VALID = Bit(0), 854 + OCRDMA_QP_PARA_SQD_ASYNC_VALID = Bit(1), 855 + OCRDMA_QP_PARA_PKEY_VALID = Bit(2), 856 + OCRDMA_QP_PARA_QKEY_VALID = Bit(3), 857 + OCRDMA_QP_PARA_PMTU_VALID = Bit(4), 858 + OCRDMA_QP_PARA_ACK_TO_VALID = Bit(5), 859 + OCRDMA_QP_PARA_RETRY_CNT_VALID = Bit(6), 860 + OCRDMA_QP_PARA_RRC_VALID = Bit(7), 861 + OCRDMA_QP_PARA_RQPSN_VALID = Bit(8), 862 + OCRDMA_QP_PARA_MAX_IRD_VALID = Bit(9), 863 + OCRDMA_QP_PARA_MAX_ORD_VALID = Bit(10), 864 + OCRDMA_QP_PARA_RNT_VALID = Bit(11), 865 + OCRDMA_QP_PARA_SQPSN_VALID = Bit(12), 866 + OCRDMA_QP_PARA_DST_QPN_VALID = Bit(13), 867 + OCRDMA_QP_PARA_MAX_WQE_VALID = Bit(14), 868 + OCRDMA_QP_PARA_MAX_RQE_VALID = Bit(15), 869 + OCRDMA_QP_PARA_SGE_SEND_VALID = Bit(16), 870 + OCRDMA_QP_PARA_SGE_RECV_VALID = Bit(17), 871 + OCRDMA_QP_PARA_SGE_WR_VALID = Bit(18), 872 + OCRDMA_QP_PARA_INB_RDEN_VALID = Bit(19), 873 + OCRDMA_QP_PARA_INB_WREN_VALID = Bit(20), 874 + OCRDMA_QP_PARA_FLOW_LBL_VALID = Bit(21), 875 + OCRDMA_QP_PARA_BIND_EN_VALID = Bit(22), 876 + OCRDMA_QP_PARA_ZLKEY_EN_VALID = Bit(23), 877 + OCRDMA_QP_PARA_FMR_EN_VALID = Bit(24), 878 + OCRDMA_QP_PARA_INBAT_EN_VALID = Bit(25), 879 + OCRDMA_QP_PARA_VLAN_EN_VALID = Bit(26), 880 + 881 + OCRDMA_MODIFY_QP_FLAGS_RD = Bit(0), 882 + OCRDMA_MODIFY_QP_FLAGS_WR = Bit(1), 883 + OCRDMA_MODIFY_QP_FLAGS_SEND = Bit(2), 884 + OCRDMA_MODIFY_QP_FLAGS_ATOMIC = Bit(3) 885 + }; 886 + 887 + enum { 888 + OCRDMA_QP_PARAMS_SRQ_ID_SHIFT = 0, 889 + OCRDMA_QP_PARAMS_SRQ_ID_MASK = 0xFFFF, 890 + 891 + OCRDMA_QP_PARAMS_MAX_RQE_SHIFT = 0, 892 + OCRDMA_QP_PARAMS_MAX_RQE_MASK = 0xFFFF, 893 + OCRDMA_QP_PARAMS_MAX_WQE_SHIFT = 16, 894 + OCRDMA_QP_PARAMS_MAX_WQE_MASK = 0xFFFF << 895 + OCRDMA_QP_PARAMS_MAX_WQE_SHIFT, 896 + 897 + OCRDMA_QP_PARAMS_MAX_SGE_WRITE_SHIFT = 0, 898 + OCRDMA_QP_PARAMS_MAX_SGE_WRITE_MASK = 0xFFFF, 899 + OCRDMA_QP_PARAMS_MAX_SGE_SEND_SHIFT = 16, 900 + OCRDMA_QP_PARAMS_MAX_SGE_SEND_MASK = 0xFFFF << 901 + OCRDMA_QP_PARAMS_MAX_SGE_SEND_SHIFT, 902 + 903 + OCRDMA_QP_PARAMS_FLAGS_FMR_EN = Bit(0), 904 + OCRDMA_QP_PARAMS_FLAGS_LKEY_0_EN = Bit(1), 905 + OCRDMA_QP_PARAMS_FLAGS_BIND_MW_EN = Bit(2), 906 + OCRDMA_QP_PARAMS_FLAGS_INBWR_EN = Bit(3), 907 + OCRDMA_QP_PARAMS_FLAGS_INBRD_EN = Bit(4), 908 + OCRDMA_QP_PARAMS_STATE_SHIFT = 5, 909 + OCRDMA_QP_PARAMS_STATE_MASK = Bit(5) | Bit(6) | Bit(7), 910 + OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC = Bit(8), 911 + OCRDMA_QP_PARAMS_FLAGS_INB_ATEN = Bit(9), 912 + OCRDMA_QP_PARAMS_MAX_SGE_RECV_SHIFT = 16, 913 + OCRDMA_QP_PARAMS_MAX_SGE_RECV_MASK = 0xFFFF << 914 + OCRDMA_QP_PARAMS_MAX_SGE_RECV_SHIFT, 915 + 916 + OCRDMA_QP_PARAMS_MAX_IRD_SHIFT = 0, 917 + OCRDMA_QP_PARAMS_MAX_IRD_MASK = 0xFFFF, 918 + OCRDMA_QP_PARAMS_MAX_ORD_SHIFT = 16, 919 + OCRDMA_QP_PARAMS_MAX_ORD_MASK = 0xFFFF << 920 + OCRDMA_QP_PARAMS_MAX_ORD_SHIFT, 921 + 922 + OCRDMA_QP_PARAMS_RQ_CQID_SHIFT = 0, 923 + OCRDMA_QP_PARAMS_RQ_CQID_MASK = 0xFFFF, 924 + OCRDMA_QP_PARAMS_WQ_CQID_SHIFT = 16, 925 + OCRDMA_QP_PARAMS_WQ_CQID_MASK = 0xFFFF << 926 + OCRDMA_QP_PARAMS_WQ_CQID_SHIFT, 927 + 928 + OCRDMA_QP_PARAMS_RQ_PSN_SHIFT = 0, 929 + OCRDMA_QP_PARAMS_RQ_PSN_MASK = 0xFFFFFF, 930 + OCRDMA_QP_PARAMS_HOP_LMT_SHIFT = 24, 931 + OCRDMA_QP_PARAMS_HOP_LMT_MASK = 0xFF << 932 + OCRDMA_QP_PARAMS_HOP_LMT_SHIFT, 933 + 934 + OCRDMA_QP_PARAMS_SQ_PSN_SHIFT = 0, 935 + OCRDMA_QP_PARAMS_SQ_PSN_MASK = 0xFFFFFF, 936 + OCRDMA_QP_PARAMS_TCLASS_SHIFT = 24, 937 + OCRDMA_QP_PARAMS_TCLASS_MASK = 0xFF << 938 + OCRDMA_QP_PARAMS_TCLASS_SHIFT, 939 + 940 + OCRDMA_QP_PARAMS_DEST_QPN_SHIFT = 0, 941 + OCRDMA_QP_PARAMS_DEST_QPN_MASK = 0xFFFFFF, 942 + OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT = 24, 943 + OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK = 0x7 << 944 + OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT, 945 + OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT = 27, 946 + OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK = 0x1F << 947 + OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT, 948 + 949 + OCRDMA_QP_PARAMS_PKEY_IDNEX_SHIFT = 0, 950 + OCRDMA_QP_PARAMS_PKEY_INDEX_MASK = 0xFFFF, 951 + OCRDMA_QP_PARAMS_PATH_MTU_SHIFT = 18, 952 + OCRDMA_QP_PARAMS_PATH_MTU_MASK = 0x3FFF << 953 + OCRDMA_QP_PARAMS_PATH_MTU_SHIFT, 954 + 955 + OCRDMA_QP_PARAMS_FLOW_LABEL_SHIFT = 0, 956 + OCRDMA_QP_PARAMS_FLOW_LABEL_MASK = 0xFFFFF, 957 + OCRDMA_QP_PARAMS_SL_SHIFT = 20, 958 + OCRDMA_QP_PARAMS_SL_MASK = 0xF << 959 + OCRDMA_QP_PARAMS_SL_SHIFT, 960 + OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT = 24, 961 + OCRDMA_QP_PARAMS_RETRY_CNT_MASK = 0x7 << 962 + OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT, 963 + OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT = 27, 964 + OCRDMA_QP_PARAMS_RNR_NAK_TIMER_MASK = 0x1F << 965 + OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT, 966 + 967 + OCRDMA_QP_PARAMS_DMAC_B4_TO_B5_SHIFT = 0, 968 + OCRDMA_QP_PARAMS_DMAC_B4_TO_B5_MASK = 0xFFFF, 969 + OCRDMA_QP_PARAMS_VLAN_SHIFT = 16, 970 + OCRDMA_QP_PARAMS_VLAN_MASK = 0xFFFF << 971 + OCRDMA_QP_PARAMS_VLAN_SHIFT 972 + }; 973 + 974 + struct ocrdma_qp_params { 975 + u32 id; 976 + u32 max_wqe_rqe; 977 + u32 max_sge_send_write; 978 + u32 max_sge_recv_flags; 979 + u32 max_ord_ird; 980 + u32 wq_rq_cqid; 981 + u32 hop_lmt_rq_psn; 982 + u32 tclass_sq_psn; 983 + u32 ack_to_rnr_rtc_dest_qpn; 984 + u32 path_mtu_pkey_indx; 985 + u32 rnt_rc_sl_fl; 986 + u8 sgid[16]; 987 + u8 dgid[16]; 988 + u32 dmac_b0_to_b3; 989 + u32 vlan_dmac_b4_to_b5; 990 + u32 qkey; 991 + } __packed; 992 + 993 + 994 + struct ocrdma_modify_qp { 995 + struct ocrdma_mqe_hdr hdr; 996 + struct ocrdma_mbx_hdr req; 997 + 998 + struct ocrdma_qp_params params; 999 + u32 flags; 1000 + u32 rdma_flags; 1001 + u32 num_outstanding_atomic_rd; 1002 + } __packed; 1003 + 1004 + enum { 1005 + OCRDMA_MODIFY_QP_RSP_MAX_RQE_SHIFT = 0, 1006 + OCRDMA_MODIFY_QP_RSP_MAX_RQE_MASK = 0xFFFF, 1007 + OCRDMA_MODIFY_QP_RSP_MAX_WQE_SHIFT = 16, 1008 + OCRDMA_MODIFY_QP_RSP_MAX_WQE_MASK = 0xFFFF << 1009 + OCRDMA_MODIFY_QP_RSP_MAX_WQE_SHIFT, 1010 + 1011 + OCRDMA_MODIFY_QP_RSP_MAX_IRD_SHIFT = 0, 1012 + OCRDMA_MODIFY_QP_RSP_MAX_IRD_MASK = 0xFFFF, 1013 + OCRDMA_MODIFY_QP_RSP_MAX_ORD_SHIFT = 16, 1014 + OCRDMA_MODIFY_QP_RSP_MAX_ORD_MASK = 0xFFFF << 1015 + OCRDMA_MODIFY_QP_RSP_MAX_ORD_SHIFT 1016 + }; 1017 + struct ocrdma_modify_qp_rsp { 1018 + struct ocrdma_mqe_hdr hdr; 1019 + struct ocrdma_mbx_rsp rsp; 1020 + 1021 + u32 max_wqe_rqe; 1022 + u32 max_ord_ird; 1023 + } __packed; 1024 + 1025 + struct ocrdma_query_qp { 1026 + struct ocrdma_mqe_hdr hdr; 1027 + struct ocrdma_mbx_hdr req; 1028 + 1029 + #define OCRDMA_QUERY_UP_QP_ID_SHIFT 0 1030 + #define OCRDMA_QUERY_UP_QP_ID_MASK 0xFFFFFF 1031 + u32 qp_id; 1032 + } __packed; 1033 + 1034 + struct ocrdma_query_qp_rsp { 1035 + struct ocrdma_mqe_hdr hdr; 1036 + struct ocrdma_mbx_rsp rsp; 1037 + struct ocrdma_qp_params params; 1038 + } __packed; 1039 + 1040 + enum { 1041 + OCRDMA_CREATE_SRQ_PD_ID_SHIFT = 0, 1042 + OCRDMA_CREATE_SRQ_PD_ID_MASK = 0xFFFF, 1043 + OCRDMA_CREATE_SRQ_PG_SZ_SHIFT = 16, 1044 + OCRDMA_CREATE_SRQ_PG_SZ_MASK = 0x3 << 1045 + OCRDMA_CREATE_SRQ_PG_SZ_SHIFT, 1046 + 1047 + OCRDMA_CREATE_SRQ_MAX_RQE_SHIFT = 0, 1048 + OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT = 16, 1049 + OCRDMA_CREATE_SRQ_MAX_SGE_RECV_MASK = 0xFFFF << 1050 + OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT, 1051 + 1052 + OCRDMA_CREATE_SRQ_RQE_SIZE_SHIFT = 0, 1053 + OCRDMA_CREATE_SRQ_RQE_SIZE_MASK = 0xFFFF, 1054 + OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT = 16, 1055 + OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_MASK = 0xFFFF << 1056 + OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT 1057 + }; 1058 + 1059 + struct ocrdma_create_srq { 1060 + struct ocrdma_mqe_hdr hdr; 1061 + struct ocrdma_mbx_hdr req; 1062 + 1063 + u32 pgsz_pdid; 1064 + u32 max_sge_rqe; 1065 + u32 pages_rqe_sz; 1066 + struct ocrdma_pa rq_addr[MAX_OCRDMA_SRQ_PAGES]; 1067 + } __packed; 1068 + 1069 + enum { 1070 + OCRDMA_CREATE_SRQ_RSP_SRQ_ID_SHIFT = 0, 1071 + OCRDMA_CREATE_SRQ_RSP_SRQ_ID_MASK = 0xFFFFFF, 1072 + 1073 + OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_SHIFT = 0, 1074 + OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_MASK = 0xFFFF, 1075 + OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT = 16, 1076 + OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_MASK = 0xFFFF << 1077 + OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT 1078 + }; 1079 + 1080 + struct ocrdma_create_srq_rsp { 1081 + struct ocrdma_mqe_hdr hdr; 1082 + struct ocrdma_mbx_rsp rsp; 1083 + 1084 + u32 id; 1085 + u32 max_sge_rqe_allocated; 1086 + } __packed; 1087 + 1088 + enum { 1089 + OCRDMA_MODIFY_SRQ_ID_SHIFT = 0, 1090 + OCRDMA_MODIFY_SRQ_ID_MASK = 0xFFFFFF, 1091 + 1092 + OCRDMA_MODIFY_SRQ_MAX_RQE_SHIFT = 0, 1093 + OCRDMA_MODIFY_SRQ_MAX_RQE_MASK = 0xFFFF, 1094 + OCRDMA_MODIFY_SRQ_LIMIT_SHIFT = 16, 1095 + OCRDMA_MODIFY_SRQ__LIMIT_MASK = 0xFFFF << 1096 + OCRDMA_MODIFY_SRQ_LIMIT_SHIFT 1097 + }; 1098 + 1099 + struct ocrdma_modify_srq { 1100 + struct ocrdma_mqe_hdr hdr; 1101 + struct ocrdma_mbx_rsp rep; 1102 + 1103 + u32 id; 1104 + u32 limit_max_rqe; 1105 + } __packed; 1106 + 1107 + enum { 1108 + OCRDMA_QUERY_SRQ_ID_SHIFT = 0, 1109 + OCRDMA_QUERY_SRQ_ID_MASK = 0xFFFFFF 1110 + }; 1111 + 1112 + struct ocrdma_query_srq { 1113 + struct ocrdma_mqe_hdr hdr; 1114 + struct ocrdma_mbx_rsp req; 1115 + 1116 + u32 id; 1117 + } __packed; 1118 + 1119 + enum { 1120 + OCRDMA_QUERY_SRQ_RSP_PD_ID_SHIFT = 0, 1121 + OCRDMA_QUERY_SRQ_RSP_PD_ID_MASK = 0xFFFF, 1122 + OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT = 16, 1123 + OCRDMA_QUERY_SRQ_RSP_MAX_RQE_MASK = 0xFFFF << 1124 + OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT, 1125 + 1126 + OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_SHIFT = 0, 1127 + OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_MASK = 0xFFFF, 1128 + OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT = 16, 1129 + OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_MASK = 0xFFFF << 1130 + OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT 1131 + }; 1132 + 1133 + struct ocrdma_query_srq_rsp { 1134 + struct ocrdma_mqe_hdr hdr; 1135 + struct ocrdma_mbx_rsp req; 1136 + 1137 + u32 max_rqe_pdid; 1138 + u32 srq_lmt_max_sge; 1139 + } __packed; 1140 + 1141 + enum { 1142 + OCRDMA_DESTROY_SRQ_ID_SHIFT = 0, 1143 + OCRDMA_DESTROY_SRQ_ID_MASK = 0xFFFFFF 1144 + }; 1145 + 1146 + struct ocrdma_destroy_srq { 1147 + struct ocrdma_mqe_hdr hdr; 1148 + struct ocrdma_mbx_rsp req; 1149 + 1150 + u32 id; 1151 + } __packed; 1152 + 1153 + enum { 1154 + OCRDMA_ALLOC_PD_ENABLE_DPP = BIT(16), 1155 + OCRDMA_PD_MAX_DPP_ENABLED_QP = 8, 1156 + OCRDMA_DPP_PAGE_SIZE = 4096 1157 + }; 1158 + 1159 + struct ocrdma_alloc_pd { 1160 + struct ocrdma_mqe_hdr hdr; 1161 + struct ocrdma_mbx_hdr req; 1162 + u32 enable_dpp_rsvd; 1163 + } __packed; 1164 + 1165 + enum { 1166 + OCRDMA_ALLOC_PD_RSP_DPP = Bit(16), 1167 + OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT = 20, 1168 + OCRDMA_ALLOC_PD_RSP_PDID_MASK = 0xFFFF, 1169 + }; 1170 + 1171 + struct ocrdma_alloc_pd_rsp { 1172 + struct ocrdma_mqe_hdr hdr; 1173 + struct ocrdma_mbx_rsp rsp; 1174 + u32 dpp_page_pdid; 1175 + } __packed; 1176 + 1177 + struct ocrdma_dealloc_pd { 1178 + struct ocrdma_mqe_hdr hdr; 1179 + struct ocrdma_mbx_hdr req; 1180 + u32 id; 1181 + } __packed; 1182 + 1183 + struct ocrdma_dealloc_pd_rsp { 1184 + struct ocrdma_mqe_hdr hdr; 1185 + struct ocrdma_mbx_rsp rsp; 1186 + } __packed; 1187 + 1188 + enum { 1189 + OCRDMA_ADDR_CHECK_ENABLE = 1, 1190 + OCRDMA_ADDR_CHECK_DISABLE = 0 1191 + }; 1192 + 1193 + enum { 1194 + OCRDMA_ALLOC_LKEY_PD_ID_SHIFT = 0, 1195 + OCRDMA_ALLOC_LKEY_PD_ID_MASK = 0xFFFF, 1196 + 1197 + OCRDMA_ALLOC_LKEY_ADDR_CHECK_SHIFT = 0, 1198 + OCRDMA_ALLOC_LKEY_ADDR_CHECK_MASK = Bit(0), 1199 + OCRDMA_ALLOC_LKEY_FMR_SHIFT = 1, 1200 + OCRDMA_ALLOC_LKEY_FMR_MASK = Bit(1), 1201 + OCRDMA_ALLOC_LKEY_REMOTE_INV_SHIFT = 2, 1202 + OCRDMA_ALLOC_LKEY_REMOTE_INV_MASK = Bit(2), 1203 + OCRDMA_ALLOC_LKEY_REMOTE_WR_SHIFT = 3, 1204 + OCRDMA_ALLOC_LKEY_REMOTE_WR_MASK = Bit(3), 1205 + OCRDMA_ALLOC_LKEY_REMOTE_RD_SHIFT = 4, 1206 + OCRDMA_ALLOC_LKEY_REMOTE_RD_MASK = Bit(4), 1207 + OCRDMA_ALLOC_LKEY_LOCAL_WR_SHIFT = 5, 1208 + OCRDMA_ALLOC_LKEY_LOCAL_WR_MASK = Bit(5), 1209 + OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_MASK = Bit(6), 1210 + OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_SHIFT = 6, 1211 + OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT = 16, 1212 + OCRDMA_ALLOC_LKEY_PBL_SIZE_MASK = 0xFFFF << 1213 + OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT 1214 + }; 1215 + 1216 + struct ocrdma_alloc_lkey { 1217 + struct ocrdma_mqe_hdr hdr; 1218 + struct ocrdma_mbx_hdr req; 1219 + 1220 + u32 pdid; 1221 + u32 pbl_sz_flags; 1222 + } __packed; 1223 + 1224 + struct ocrdma_alloc_lkey_rsp { 1225 + struct ocrdma_mqe_hdr hdr; 1226 + struct ocrdma_mbx_rsp rsp; 1227 + 1228 + u32 lrkey; 1229 + u32 num_pbl_rsvd; 1230 + } __packed; 1231 + 1232 + struct ocrdma_dealloc_lkey { 1233 + struct ocrdma_mqe_hdr hdr; 1234 + struct ocrdma_mbx_hdr req; 1235 + 1236 + u32 lkey; 1237 + u32 rsvd_frmr; 1238 + } __packed; 1239 + 1240 + struct ocrdma_dealloc_lkey_rsp { 1241 + struct ocrdma_mqe_hdr hdr; 1242 + struct ocrdma_mbx_rsp rsp; 1243 + } __packed; 1244 + 1245 + #define MAX_OCRDMA_NSMR_PBL (u32)22 1246 + #define MAX_OCRDMA_PBL_SIZE 65536 1247 + #define MAX_OCRDMA_PBL_PER_LKEY 32767 1248 + 1249 + enum { 1250 + OCRDMA_REG_NSMR_LRKEY_INDEX_SHIFT = 0, 1251 + OCRDMA_REG_NSMR_LRKEY_INDEX_MASK = 0xFFFFFF, 1252 + OCRDMA_REG_NSMR_LRKEY_SHIFT = 24, 1253 + OCRDMA_REG_NSMR_LRKEY_MASK = 0xFF << 1254 + OCRDMA_REG_NSMR_LRKEY_SHIFT, 1255 + 1256 + OCRDMA_REG_NSMR_PD_ID_SHIFT = 0, 1257 + OCRDMA_REG_NSMR_PD_ID_MASK = 0xFFFF, 1258 + OCRDMA_REG_NSMR_NUM_PBL_SHIFT = 16, 1259 + OCRDMA_REG_NSMR_NUM_PBL_MASK = 0xFFFF << 1260 + OCRDMA_REG_NSMR_NUM_PBL_SHIFT, 1261 + 1262 + OCRDMA_REG_NSMR_PBE_SIZE_SHIFT = 0, 1263 + OCRDMA_REG_NSMR_PBE_SIZE_MASK = 0xFFFF, 1264 + OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT = 16, 1265 + OCRDMA_REG_NSMR_HPAGE_SIZE_MASK = 0xFF << 1266 + OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT, 1267 + OCRDMA_REG_NSMR_BIND_MEMWIN_SHIFT = 24, 1268 + OCRDMA_REG_NSMR_BIND_MEMWIN_MASK = Bit(24), 1269 + OCRDMA_REG_NSMR_ZB_SHIFT = 25, 1270 + OCRDMA_REG_NSMR_ZB_SHIFT_MASK = Bit(25), 1271 + OCRDMA_REG_NSMR_REMOTE_INV_SHIFT = 26, 1272 + OCRDMA_REG_NSMR_REMOTE_INV_MASK = Bit(26), 1273 + OCRDMA_REG_NSMR_REMOTE_WR_SHIFT = 27, 1274 + OCRDMA_REG_NSMR_REMOTE_WR_MASK = Bit(27), 1275 + OCRDMA_REG_NSMR_REMOTE_RD_SHIFT = 28, 1276 + OCRDMA_REG_NSMR_REMOTE_RD_MASK = Bit(28), 1277 + OCRDMA_REG_NSMR_LOCAL_WR_SHIFT = 29, 1278 + OCRDMA_REG_NSMR_LOCAL_WR_MASK = Bit(29), 1279 + OCRDMA_REG_NSMR_REMOTE_ATOMIC_SHIFT = 30, 1280 + OCRDMA_REG_NSMR_REMOTE_ATOMIC_MASK = Bit(30), 1281 + OCRDMA_REG_NSMR_LAST_SHIFT = 31, 1282 + OCRDMA_REG_NSMR_LAST_MASK = Bit(31) 1283 + }; 1284 + 1285 + struct ocrdma_reg_nsmr { 1286 + struct ocrdma_mqe_hdr hdr; 1287 + struct ocrdma_mbx_hdr cmd; 1288 + 1289 + u32 lrkey_key_index; 1290 + u32 num_pbl_pdid; 1291 + u32 flags_hpage_pbe_sz; 1292 + u32 totlen_low; 1293 + u32 totlen_high; 1294 + u32 fbo_low; 1295 + u32 fbo_high; 1296 + u32 va_loaddr; 1297 + u32 va_hiaddr; 1298 + struct ocrdma_pa pbl[MAX_OCRDMA_NSMR_PBL]; 1299 + } __packed; 1300 + 1301 + enum { 1302 + OCRDMA_REG_NSMR_CONT_PBL_SHIFT = 0, 1303 + OCRDMA_REG_NSMR_CONT_PBL_SHIFT_MASK = 0xFFFF, 1304 + OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT = 16, 1305 + OCRDMA_REG_NSMR_CONT_NUM_PBL_MASK = 0xFFFF << 1306 + OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT, 1307 + 1308 + OCRDMA_REG_NSMR_CONT_LAST_SHIFT = 31, 1309 + OCRDMA_REG_NSMR_CONT_LAST_MASK = Bit(31) 1310 + }; 1311 + 1312 + struct ocrdma_reg_nsmr_cont { 1313 + struct ocrdma_mqe_hdr hdr; 1314 + struct ocrdma_mbx_hdr cmd; 1315 + 1316 + u32 lrkey; 1317 + u32 num_pbl_offset; 1318 + u32 last; 1319 + 1320 + struct ocrdma_pa pbl[MAX_OCRDMA_NSMR_PBL]; 1321 + } __packed; 1322 + 1323 + struct ocrdma_pbe { 1324 + u32 pa_hi; 1325 + u32 pa_lo; 1326 + } __packed; 1327 + 1328 + enum { 1329 + OCRDMA_REG_NSMR_RSP_NUM_PBL_SHIFT = 16, 1330 + OCRDMA_REG_NSMR_RSP_NUM_PBL_MASK = 0xFFFF0000 1331 + }; 1332 + struct ocrdma_reg_nsmr_rsp { 1333 + struct ocrdma_mqe_hdr hdr; 1334 + struct ocrdma_mbx_rsp rsp; 1335 + 1336 + u32 lrkey; 1337 + u32 num_pbl; 1338 + } __packed; 1339 + 1340 + enum { 1341 + OCRDMA_REG_NSMR_CONT_RSP_LRKEY_INDEX_SHIFT = 0, 1342 + OCRDMA_REG_NSMR_CONT_RSP_LRKEY_INDEX_MASK = 0xFFFFFF, 1343 + OCRDMA_REG_NSMR_CONT_RSP_LRKEY_SHIFT = 24, 1344 + OCRDMA_REG_NSMR_CONT_RSP_LRKEY_MASK = 0xFF << 1345 + OCRDMA_REG_NSMR_CONT_RSP_LRKEY_SHIFT, 1346 + 1347 + OCRDMA_REG_NSMR_CONT_RSP_NUM_PBL_SHIFT = 16, 1348 + OCRDMA_REG_NSMR_CONT_RSP_NUM_PBL_MASK = 0xFFFF << 1349 + OCRDMA_REG_NSMR_CONT_RSP_NUM_PBL_SHIFT 1350 + }; 1351 + 1352 + struct ocrdma_reg_nsmr_cont_rsp { 1353 + struct ocrdma_mqe_hdr hdr; 1354 + struct ocrdma_mbx_rsp rsp; 1355 + 1356 + u32 lrkey_key_index; 1357 + u32 num_pbl; 1358 + } __packed; 1359 + 1360 + enum { 1361 + OCRDMA_ALLOC_MW_PD_ID_SHIFT = 0, 1362 + OCRDMA_ALLOC_MW_PD_ID_MASK = 0xFFFF 1363 + }; 1364 + 1365 + struct ocrdma_alloc_mw { 1366 + struct ocrdma_mqe_hdr hdr; 1367 + struct ocrdma_mbx_hdr req; 1368 + 1369 + u32 pdid; 1370 + } __packed; 1371 + 1372 + enum { 1373 + OCRDMA_ALLOC_MW_RSP_LRKEY_INDEX_SHIFT = 0, 1374 + OCRDMA_ALLOC_MW_RSP_LRKEY_INDEX_MASK = 0xFFFFFF 1375 + }; 1376 + 1377 + struct ocrdma_alloc_mw_rsp { 1378 + struct ocrdma_mqe_hdr hdr; 1379 + struct ocrdma_mbx_rsp rsp; 1380 + 1381 + u32 lrkey_index; 1382 + } __packed; 1383 + 1384 + struct ocrdma_attach_mcast { 1385 + struct ocrdma_mqe_hdr hdr; 1386 + struct ocrdma_mbx_hdr req; 1387 + u32 qp_id; 1388 + u8 mgid[16]; 1389 + u32 mac_b0_to_b3; 1390 + u32 vlan_mac_b4_to_b5; 1391 + } __packed; 1392 + 1393 + struct ocrdma_attach_mcast_rsp { 1394 + struct ocrdma_mqe_hdr hdr; 1395 + struct ocrdma_mbx_rsp rsp; 1396 + } __packed; 1397 + 1398 + struct ocrdma_detach_mcast { 1399 + struct ocrdma_mqe_hdr hdr; 1400 + struct ocrdma_mbx_hdr req; 1401 + u32 qp_id; 1402 + u8 mgid[16]; 1403 + u32 mac_b0_to_b3; 1404 + u32 vlan_mac_b4_to_b5; 1405 + } __packed; 1406 + 1407 + struct ocrdma_detach_mcast_rsp { 1408 + struct ocrdma_mqe_hdr hdr; 1409 + struct ocrdma_mbx_rsp rsp; 1410 + } __packed; 1411 + 1412 + enum { 1413 + OCRDMA_CREATE_AH_NUM_PAGES_SHIFT = 19, 1414 + OCRDMA_CREATE_AH_NUM_PAGES_MASK = 0xF << 1415 + OCRDMA_CREATE_AH_NUM_PAGES_SHIFT, 1416 + 1417 + OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT = 16, 1418 + OCRDMA_CREATE_AH_PAGE_SIZE_MASK = 0x7 << 1419 + OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT, 1420 + 1421 + OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT = 23, 1422 + OCRDMA_CREATE_AH_ENTRY_SIZE_MASK = 0x1FF << 1423 + OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT, 1424 + }; 1425 + 1426 + #define OCRDMA_AH_TBL_PAGES 8 1427 + 1428 + struct ocrdma_create_ah_tbl { 1429 + struct ocrdma_mqe_hdr hdr; 1430 + struct ocrdma_mbx_hdr req; 1431 + 1432 + u32 ah_conf; 1433 + struct ocrdma_pa tbl_addr[8]; 1434 + } __packed; 1435 + 1436 + struct ocrdma_create_ah_tbl_rsp { 1437 + struct ocrdma_mqe_hdr hdr; 1438 + struct ocrdma_mbx_rsp rsp; 1439 + u32 ahid; 1440 + } __packed; 1441 + 1442 + struct ocrdma_delete_ah_tbl { 1443 + struct ocrdma_mqe_hdr hdr; 1444 + struct ocrdma_mbx_hdr req; 1445 + u32 ahid; 1446 + } __packed; 1447 + 1448 + struct ocrdma_delete_ah_tbl_rsp { 1449 + struct ocrdma_mqe_hdr hdr; 1450 + struct ocrdma_mbx_rsp rsp; 1451 + } __packed; 1452 + 1453 + enum { 1454 + OCRDMA_EQE_VALID_SHIFT = 0, 1455 + OCRDMA_EQE_VALID_MASK = Bit(0), 1456 + OCRDMA_EQE_FOR_CQE_MASK = 0xFFFE, 1457 + OCRDMA_EQE_RESOURCE_ID_SHIFT = 16, 1458 + OCRDMA_EQE_RESOURCE_ID_MASK = 0xFFFF << 1459 + OCRDMA_EQE_RESOURCE_ID_SHIFT, 1460 + }; 1461 + 1462 + struct ocrdma_eqe { 1463 + u32 id_valid; 1464 + } __packed; 1465 + 1466 + enum OCRDMA_CQE_STATUS { 1467 + OCRDMA_CQE_SUCCESS = 0, 1468 + OCRDMA_CQE_LOC_LEN_ERR, 1469 + OCRDMA_CQE_LOC_QP_OP_ERR, 1470 + OCRDMA_CQE_LOC_EEC_OP_ERR, 1471 + OCRDMA_CQE_LOC_PROT_ERR, 1472 + OCRDMA_CQE_WR_FLUSH_ERR, 1473 + OCRDMA_CQE_MW_BIND_ERR, 1474 + OCRDMA_CQE_BAD_RESP_ERR, 1475 + OCRDMA_CQE_LOC_ACCESS_ERR, 1476 + OCRDMA_CQE_REM_INV_REQ_ERR, 1477 + OCRDMA_CQE_REM_ACCESS_ERR, 1478 + OCRDMA_CQE_REM_OP_ERR, 1479 + OCRDMA_CQE_RETRY_EXC_ERR, 1480 + OCRDMA_CQE_RNR_RETRY_EXC_ERR, 1481 + OCRDMA_CQE_LOC_RDD_VIOL_ERR, 1482 + OCRDMA_CQE_REM_INV_RD_REQ_ERR, 1483 + OCRDMA_CQE_REM_ABORT_ERR, 1484 + OCRDMA_CQE_INV_EECN_ERR, 1485 + OCRDMA_CQE_INV_EEC_STATE_ERR, 1486 + OCRDMA_CQE_FATAL_ERR, 1487 + OCRDMA_CQE_RESP_TIMEOUT_ERR, 1488 + OCRDMA_CQE_GENERAL_ERR 1489 + }; 1490 + 1491 + enum { 1492 + /* w0 */ 1493 + OCRDMA_CQE_WQEIDX_SHIFT = 0, 1494 + OCRDMA_CQE_WQEIDX_MASK = 0xFFFF, 1495 + 1496 + /* w1 */ 1497 + OCRDMA_CQE_UD_XFER_LEN_SHIFT = 16, 1498 + OCRDMA_CQE_PKEY_SHIFT = 0, 1499 + OCRDMA_CQE_PKEY_MASK = 0xFFFF, 1500 + 1501 + /* w2 */ 1502 + OCRDMA_CQE_QPN_SHIFT = 0, 1503 + OCRDMA_CQE_QPN_MASK = 0x0000FFFF, 1504 + 1505 + OCRDMA_CQE_BUFTAG_SHIFT = 16, 1506 + OCRDMA_CQE_BUFTAG_MASK = 0xFFFF << OCRDMA_CQE_BUFTAG_SHIFT, 1507 + 1508 + /* w3 */ 1509 + OCRDMA_CQE_UD_STATUS_SHIFT = 24, 1510 + OCRDMA_CQE_UD_STATUS_MASK = 0x7 << OCRDMA_CQE_UD_STATUS_SHIFT, 1511 + OCRDMA_CQE_STATUS_SHIFT = 16, 1512 + OCRDMA_CQE_STATUS_MASK = 0xFF << OCRDMA_CQE_STATUS_SHIFT, 1513 + OCRDMA_CQE_VALID = Bit(31), 1514 + OCRDMA_CQE_INVALIDATE = Bit(30), 1515 + OCRDMA_CQE_QTYPE = Bit(29), 1516 + OCRDMA_CQE_IMM = Bit(28), 1517 + OCRDMA_CQE_WRITE_IMM = Bit(27), 1518 + OCRDMA_CQE_QTYPE_SQ = 0, 1519 + OCRDMA_CQE_QTYPE_RQ = 1, 1520 + OCRDMA_CQE_SRCQP_MASK = 0xFFFFFF 1521 + }; 1522 + 1523 + struct ocrdma_cqe { 1524 + union { 1525 + /* w0 to w2 */ 1526 + struct { 1527 + u32 wqeidx; 1528 + u32 bytes_xfered; 1529 + u32 qpn; 1530 + } wq; 1531 + struct { 1532 + u32 lkey_immdt; 1533 + u32 rxlen; 1534 + u32 buftag_qpn; 1535 + } rq; 1536 + struct { 1537 + u32 lkey_immdt; 1538 + u32 rxlen_pkey; 1539 + u32 buftag_qpn; 1540 + } ud; 1541 + struct { 1542 + u32 word_0; 1543 + u32 word_1; 1544 + u32 qpn; 1545 + } cmn; 1546 + }; 1547 + u32 flags_status_srcqpn; /* w3 */ 1548 + } __packed; 1549 + 1550 + #define is_cqe_valid(cq, cqe) \ 1551 + (((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID)\ 1552 + == cq->phase) ? 1 : 0) 1553 + #define is_cqe_for_sq(cqe) \ 1554 + ((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_QTYPE) ? 0 : 1) 1555 + #define is_cqe_for_rq(cqe) \ 1556 + ((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_QTYPE) ? 1 : 0) 1557 + #define is_cqe_invalidated(cqe) \ 1558 + ((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_INVALIDATE) ? \ 1559 + 1 : 0) 1560 + #define is_cqe_imm(cqe) \ 1561 + ((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_IMM) ? 1 : 0) 1562 + #define is_cqe_wr_imm(cqe) \ 1563 + ((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_WRITE_IMM) ? 1 : 0) 1564 + 1565 + struct ocrdma_sge { 1566 + u32 addr_hi; 1567 + u32 addr_lo; 1568 + u32 lrkey; 1569 + u32 len; 1570 + } __packed; 1571 + 1572 + enum { 1573 + OCRDMA_FLAG_SIG = 0x1, 1574 + OCRDMA_FLAG_INV = 0x2, 1575 + OCRDMA_FLAG_FENCE_L = 0x4, 1576 + OCRDMA_FLAG_FENCE_R = 0x8, 1577 + OCRDMA_FLAG_SOLICIT = 0x10, 1578 + OCRDMA_FLAG_IMM = 0x20, 1579 + 1580 + /* Stag flags */ 1581 + OCRDMA_LKEY_FLAG_LOCAL_WR = 0x1, 1582 + OCRDMA_LKEY_FLAG_REMOTE_RD = 0x2, 1583 + OCRDMA_LKEY_FLAG_REMOTE_WR = 0x4, 1584 + OCRDMA_LKEY_FLAG_VATO = 0x8, 1585 + }; 1586 + 1587 + enum OCRDMA_WQE_OPCODE { 1588 + OCRDMA_WRITE = 0x06, 1589 + OCRDMA_READ = 0x0C, 1590 + OCRDMA_RESV0 = 0x02, 1591 + OCRDMA_SEND = 0x00, 1592 + OCRDMA_CMP_SWP = 0x14, 1593 + OCRDMA_BIND_MW = 0x10, 1594 + OCRDMA_RESV1 = 0x0A, 1595 + OCRDMA_LKEY_INV = 0x15, 1596 + OCRDMA_FETCH_ADD = 0x13, 1597 + OCRDMA_POST_RQ = 0x12 1598 + }; 1599 + 1600 + enum { 1601 + OCRDMA_TYPE_INLINE = 0x0, 1602 + OCRDMA_TYPE_LKEY = 0x1, 1603 + }; 1604 + 1605 + enum { 1606 + OCRDMA_WQE_OPCODE_SHIFT = 0, 1607 + OCRDMA_WQE_OPCODE_MASK = 0x0000001F, 1608 + OCRDMA_WQE_FLAGS_SHIFT = 5, 1609 + OCRDMA_WQE_TYPE_SHIFT = 16, 1610 + OCRDMA_WQE_TYPE_MASK = 0x00030000, 1611 + OCRDMA_WQE_SIZE_SHIFT = 18, 1612 + OCRDMA_WQE_SIZE_MASK = 0xFF, 1613 + OCRDMA_WQE_NXT_WQE_SIZE_SHIFT = 25, 1614 + 1615 + OCRDMA_WQE_LKEY_FLAGS_SHIFT = 0, 1616 + OCRDMA_WQE_LKEY_FLAGS_MASK = 0xF 1617 + }; 1618 + 1619 + /* header WQE for all the SQ and RQ operations */ 1620 + struct ocrdma_hdr_wqe { 1621 + u32 cw; 1622 + union { 1623 + u32 rsvd_tag; 1624 + u32 rsvd_lkey_flags; 1625 + }; 1626 + union { 1627 + u32 immdt; 1628 + u32 lkey; 1629 + }; 1630 + u32 total_len; 1631 + } __packed; 1632 + 1633 + struct ocrdma_ewqe_ud_hdr { 1634 + u32 rsvd_dest_qpn; 1635 + u32 qkey; 1636 + u32 rsvd_ahid; 1637 + u32 rsvd; 1638 + } __packed; 1639 + 1640 + struct ocrdma_eth_basic { 1641 + u8 dmac[6]; 1642 + u8 smac[6]; 1643 + __be16 eth_type; 1644 + } __packed; 1645 + 1646 + struct ocrdma_eth_vlan { 1647 + u8 dmac[6]; 1648 + u8 smac[6]; 1649 + __be16 eth_type; 1650 + __be16 vlan_tag; 1651 + #define OCRDMA_ROCE_ETH_TYPE 0x8915 1652 + __be16 roce_eth_type; 1653 + } __packed; 1654 + 1655 + struct ocrdma_grh { 1656 + __be32 tclass_flow; 1657 + __be32 pdid_hoplimit; 1658 + u8 sgid[16]; 1659 + u8 dgid[16]; 1660 + u16 rsvd; 1661 + } __packed; 1662 + 1663 + #define OCRDMA_AV_VALID Bit(0) 1664 + #define OCRDMA_AV_VLAN_VALID Bit(1) 1665 + 1666 + struct ocrdma_av { 1667 + struct ocrdma_eth_vlan eth_hdr; 1668 + struct ocrdma_grh grh; 1669 + u32 valid; 1670 + } __packed; 1671 + 1672 + #endif /* __OCRDMA_SLI_H__ */
+2542
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
··· 1 + /******************************************************************* 2 + * This file is part of the Emulex RoCE Device Driver for * 3 + * RoCE (RDMA over Converged Ethernet) adapters. * 4 + * Copyright (C) 2008-2012 Emulex. All rights reserved. * 5 + * EMULEX and SLI are trademarks of Emulex. * 6 + * www.emulex.com * 7 + * * 8 + * This program is free software; you can redistribute it and/or * 9 + * modify it under the terms of version 2 of the GNU General * 10 + * Public License as published by the Free Software Foundation. * 11 + * This program is distributed in the hope that it will be useful. * 12 + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 + * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 + * more details, a copy of which can be found in the file COPYING * 18 + * included with this package. * 19 + * 20 + * Contact Information: 21 + * linux-drivers@emulex.com 22 + * 23 + * Emulex 24 + * 3333 Susan Street 25 + * Costa Mesa, CA 92626 26 + *******************************************************************/ 27 + 28 + #include <linux/dma-mapping.h> 29 + #include <rdma/ib_verbs.h> 30 + #include <rdma/ib_user_verbs.h> 31 + #include <rdma/iw_cm.h> 32 + #include <rdma/ib_umem.h> 33 + #include <rdma/ib_addr.h> 34 + 35 + #include "ocrdma.h" 36 + #include "ocrdma_hw.h" 37 + #include "ocrdma_verbs.h" 38 + #include "ocrdma_abi.h" 39 + 40 + int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) 41 + { 42 + if (index > 1) 43 + return -EINVAL; 44 + 45 + *pkey = 0xffff; 46 + return 0; 47 + } 48 + 49 + int ocrdma_query_gid(struct ib_device *ibdev, u8 port, 50 + int index, union ib_gid *sgid) 51 + { 52 + struct ocrdma_dev *dev; 53 + 54 + dev = get_ocrdma_dev(ibdev); 55 + memset(sgid, 0, sizeof(*sgid)); 56 + if (index > OCRDMA_MAX_SGID) 57 + return -EINVAL; 58 + 59 + memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid)); 60 + 61 + return 0; 62 + } 63 + 64 + int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr) 65 + { 66 + struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); 67 + 68 + memset(attr, 0, sizeof *attr); 69 + memcpy(&attr->fw_ver, &dev->attr.fw_ver[0], 70 + min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver))); 71 + ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid); 72 + attr->max_mr_size = ~0ull; 73 + attr->page_size_cap = 0xffff000; 74 + attr->vendor_id = dev->nic_info.pdev->vendor; 75 + attr->vendor_part_id = dev->nic_info.pdev->device; 76 + attr->hw_ver = 0; 77 + attr->max_qp = dev->attr.max_qp; 78 + attr->max_ah = dev->attr.max_qp; 79 + attr->max_qp_wr = dev->attr.max_wqe; 80 + 81 + attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD | 82 + IB_DEVICE_RC_RNR_NAK_GEN | 83 + IB_DEVICE_SHUTDOWN_PORT | 84 + IB_DEVICE_SYS_IMAGE_GUID | 85 + IB_DEVICE_LOCAL_DMA_LKEY; 86 + attr->max_sge = dev->attr.max_send_sge; 87 + attr->max_sge_rd = dev->attr.max_send_sge; 88 + attr->max_cq = dev->attr.max_cq; 89 + attr->max_cqe = dev->attr.max_cqe; 90 + attr->max_mr = dev->attr.max_mr; 91 + attr->max_mw = 0; 92 + attr->max_pd = dev->attr.max_pd; 93 + attr->atomic_cap = 0; 94 + attr->max_fmr = 0; 95 + attr->max_map_per_fmr = 0; 96 + attr->max_qp_rd_atom = 97 + min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp); 98 + attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp; 99 + attr->max_srq = (dev->attr.max_qp - 1); 100 + attr->max_srq_sge = attr->max_sge; 101 + attr->max_srq_wr = dev->attr.max_rqe; 102 + attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; 103 + attr->max_fast_reg_page_list_len = 0; 104 + attr->max_pkeys = 1; 105 + return 0; 106 + } 107 + 108 + int ocrdma_query_port(struct ib_device *ibdev, 109 + u8 port, struct ib_port_attr *props) 110 + { 111 + enum ib_port_state port_state; 112 + struct ocrdma_dev *dev; 113 + struct net_device *netdev; 114 + 115 + dev = get_ocrdma_dev(ibdev); 116 + if (port > 1) { 117 + ocrdma_err("%s(%d) invalid_port=0x%x\n", __func__, 118 + dev->id, port); 119 + return -EINVAL; 120 + } 121 + netdev = dev->nic_info.netdev; 122 + if (netif_running(netdev) && netif_oper_up(netdev)) { 123 + port_state = IB_PORT_ACTIVE; 124 + props->phys_state = 5; 125 + } else { 126 + port_state = IB_PORT_DOWN; 127 + props->phys_state = 3; 128 + } 129 + props->max_mtu = IB_MTU_4096; 130 + props->active_mtu = iboe_get_mtu(netdev->mtu); 131 + props->lid = 0; 132 + props->lmc = 0; 133 + props->sm_lid = 0; 134 + props->sm_sl = 0; 135 + props->state = port_state; 136 + props->port_cap_flags = 137 + IB_PORT_CM_SUP | 138 + IB_PORT_REINIT_SUP | 139 + IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP; 140 + props->gid_tbl_len = OCRDMA_MAX_SGID; 141 + props->pkey_tbl_len = 1; 142 + props->bad_pkey_cntr = 0; 143 + props->qkey_viol_cntr = 0; 144 + props->active_width = IB_WIDTH_1X; 145 + props->active_speed = 4; 146 + props->max_msg_sz = 0x80000000; 147 + props->max_vl_num = 4; 148 + return 0; 149 + } 150 + 151 + int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask, 152 + struct ib_port_modify *props) 153 + { 154 + struct ocrdma_dev *dev; 155 + 156 + dev = get_ocrdma_dev(ibdev); 157 + if (port > 1) { 158 + ocrdma_err("%s(%d) invalid_port=0x%x\n", __func__, 159 + dev->id, port); 160 + return -EINVAL; 161 + } 162 + return 0; 163 + } 164 + 165 + static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, 166 + unsigned long len) 167 + { 168 + struct ocrdma_mm *mm; 169 + 170 + mm = kzalloc(sizeof(*mm), GFP_KERNEL); 171 + if (mm == NULL) 172 + return -ENOMEM; 173 + mm->key.phy_addr = phy_addr; 174 + mm->key.len = len; 175 + INIT_LIST_HEAD(&mm->entry); 176 + 177 + mutex_lock(&uctx->mm_list_lock); 178 + list_add_tail(&mm->entry, &uctx->mm_head); 179 + mutex_unlock(&uctx->mm_list_lock); 180 + return 0; 181 + } 182 + 183 + static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, 184 + unsigned long len) 185 + { 186 + struct ocrdma_mm *mm, *tmp; 187 + 188 + mutex_lock(&uctx->mm_list_lock); 189 + list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) { 190 + if (len != mm->key.len || phy_addr != mm->key.phy_addr) 191 + continue; 192 + 193 + list_del(&mm->entry); 194 + kfree(mm); 195 + break; 196 + } 197 + mutex_unlock(&uctx->mm_list_lock); 198 + } 199 + 200 + static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, 201 + unsigned long len) 202 + { 203 + bool found = false; 204 + struct ocrdma_mm *mm; 205 + 206 + mutex_lock(&uctx->mm_list_lock); 207 + list_for_each_entry(mm, &uctx->mm_head, entry) { 208 + if (len != mm->key.len || phy_addr != mm->key.phy_addr) 209 + continue; 210 + 211 + found = true; 212 + break; 213 + } 214 + mutex_unlock(&uctx->mm_list_lock); 215 + return found; 216 + } 217 + 218 + struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev, 219 + struct ib_udata *udata) 220 + { 221 + int status; 222 + struct ocrdma_ucontext *ctx; 223 + struct ocrdma_alloc_ucontext_resp resp; 224 + struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); 225 + struct pci_dev *pdev = dev->nic_info.pdev; 226 + u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE); 227 + 228 + if (!udata) 229 + return ERR_PTR(-EFAULT); 230 + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 231 + if (!ctx) 232 + return ERR_PTR(-ENOMEM); 233 + ctx->dev = dev; 234 + INIT_LIST_HEAD(&ctx->mm_head); 235 + mutex_init(&ctx->mm_list_lock); 236 + 237 + ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len, 238 + &ctx->ah_tbl.pa, GFP_KERNEL); 239 + if (!ctx->ah_tbl.va) { 240 + kfree(ctx); 241 + return ERR_PTR(-ENOMEM); 242 + } 243 + memset(ctx->ah_tbl.va, 0, map_len); 244 + ctx->ah_tbl.len = map_len; 245 + 246 + resp.ah_tbl_len = ctx->ah_tbl.len; 247 + resp.ah_tbl_page = ctx->ah_tbl.pa; 248 + 249 + status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len); 250 + if (status) 251 + goto map_err; 252 + resp.dev_id = dev->id; 253 + resp.max_inline_data = dev->attr.max_inline_data; 254 + resp.wqe_size = dev->attr.wqe_size; 255 + resp.rqe_size = dev->attr.rqe_size; 256 + resp.dpp_wqe_size = dev->attr.wqe_size; 257 + resp.rsvd = 0; 258 + 259 + memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver)); 260 + status = ib_copy_to_udata(udata, &resp, sizeof(resp)); 261 + if (status) 262 + goto cpy_err; 263 + return &ctx->ibucontext; 264 + 265 + cpy_err: 266 + ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len); 267 + map_err: 268 + dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va, 269 + ctx->ah_tbl.pa); 270 + kfree(ctx); 271 + return ERR_PTR(status); 272 + } 273 + 274 + int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx) 275 + { 276 + struct ocrdma_mm *mm, *tmp; 277 + struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx); 278 + struct pci_dev *pdev = uctx->dev->nic_info.pdev; 279 + 280 + ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len); 281 + dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va, 282 + uctx->ah_tbl.pa); 283 + 284 + list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) { 285 + list_del(&mm->entry); 286 + kfree(mm); 287 + } 288 + kfree(uctx); 289 + return 0; 290 + } 291 + 292 + int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 293 + { 294 + struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context); 295 + struct ocrdma_dev *dev = ucontext->dev; 296 + unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT; 297 + u64 unmapped_db = (u64) dev->nic_info.unmapped_db; 298 + unsigned long len = (vma->vm_end - vma->vm_start); 299 + int status = 0; 300 + bool found; 301 + 302 + if (vma->vm_start & (PAGE_SIZE - 1)) 303 + return -EINVAL; 304 + found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len); 305 + if (!found) 306 + return -EINVAL; 307 + 308 + if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db + 309 + dev->nic_info.db_total_size)) && 310 + (len <= dev->nic_info.db_page_size)) { 311 + /* doorbell mapping */ 312 + status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 313 + len, vma->vm_page_prot); 314 + } else if (dev->nic_info.dpp_unmapped_len && 315 + (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) && 316 + (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr + 317 + dev->nic_info.dpp_unmapped_len)) && 318 + (len <= dev->nic_info.dpp_unmapped_len)) { 319 + /* dpp area mapping */ 320 + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 321 + status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 322 + len, vma->vm_page_prot); 323 + } else { 324 + /* queue memory mapping */ 325 + status = remap_pfn_range(vma, vma->vm_start, 326 + vma->vm_pgoff, len, vma->vm_page_prot); 327 + } 328 + return status; 329 + } 330 + 331 + static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd, 332 + struct ib_ucontext *ib_ctx, 333 + struct ib_udata *udata) 334 + { 335 + int status; 336 + u64 db_page_addr; 337 + u64 dpp_page_addr; 338 + u32 db_page_size; 339 + struct ocrdma_alloc_pd_uresp rsp; 340 + struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); 341 + 342 + rsp.id = pd->id; 343 + rsp.dpp_enabled = pd->dpp_enabled; 344 + db_page_addr = pd->dev->nic_info.unmapped_db + 345 + (pd->id * pd->dev->nic_info.db_page_size); 346 + db_page_size = pd->dev->nic_info.db_page_size; 347 + 348 + status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size); 349 + if (status) 350 + return status; 351 + 352 + if (pd->dpp_enabled) { 353 + dpp_page_addr = pd->dev->nic_info.dpp_unmapped_addr + 354 + (pd->id * OCRDMA_DPP_PAGE_SIZE); 355 + status = ocrdma_add_mmap(uctx, dpp_page_addr, 356 + OCRDMA_DPP_PAGE_SIZE); 357 + if (status) 358 + goto dpp_map_err; 359 + rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr); 360 + rsp.dpp_page_addr_lo = dpp_page_addr; 361 + } 362 + 363 + status = ib_copy_to_udata(udata, &rsp, sizeof(rsp)); 364 + if (status) 365 + goto ucopy_err; 366 + 367 + pd->uctx = uctx; 368 + return 0; 369 + 370 + ucopy_err: 371 + ocrdma_del_mmap(pd->uctx, dpp_page_addr, OCRDMA_DPP_PAGE_SIZE); 372 + dpp_map_err: 373 + ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size); 374 + return status; 375 + } 376 + 377 + struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev, 378 + struct ib_ucontext *context, 379 + struct ib_udata *udata) 380 + { 381 + struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); 382 + struct ocrdma_pd *pd; 383 + int status; 384 + 385 + pd = kzalloc(sizeof(*pd), GFP_KERNEL); 386 + if (!pd) 387 + return ERR_PTR(-ENOMEM); 388 + pd->dev = dev; 389 + if (udata && context) { 390 + pd->dpp_enabled = (dev->nic_info.dev_family == 391 + OCRDMA_GEN2_FAMILY) ? true : false; 392 + pd->num_dpp_qp = 393 + pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0; 394 + } 395 + status = ocrdma_mbx_alloc_pd(dev, pd); 396 + if (status) { 397 + kfree(pd); 398 + return ERR_PTR(status); 399 + } 400 + atomic_set(&pd->use_cnt, 0); 401 + 402 + if (udata && context) { 403 + status = ocrdma_copy_pd_uresp(pd, context, udata); 404 + if (status) 405 + goto err; 406 + } 407 + return &pd->ibpd; 408 + 409 + err: 410 + ocrdma_dealloc_pd(&pd->ibpd); 411 + return ERR_PTR(status); 412 + } 413 + 414 + int ocrdma_dealloc_pd(struct ib_pd *ibpd) 415 + { 416 + struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 417 + struct ocrdma_dev *dev = pd->dev; 418 + int status; 419 + u64 usr_db; 420 + 421 + if (atomic_read(&pd->use_cnt)) { 422 + ocrdma_err("%s(%d) pd=0x%x is in use.\n", 423 + __func__, dev->id, pd->id); 424 + status = -EFAULT; 425 + goto dealloc_err; 426 + } 427 + status = ocrdma_mbx_dealloc_pd(dev, pd); 428 + if (pd->uctx) { 429 + u64 dpp_db = dev->nic_info.dpp_unmapped_addr + 430 + (pd->id * OCRDMA_DPP_PAGE_SIZE); 431 + if (pd->dpp_enabled) 432 + ocrdma_del_mmap(pd->uctx, dpp_db, OCRDMA_DPP_PAGE_SIZE); 433 + usr_db = dev->nic_info.unmapped_db + 434 + (pd->id * dev->nic_info.db_page_size); 435 + ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size); 436 + } 437 + kfree(pd); 438 + dealloc_err: 439 + return status; 440 + } 441 + 442 + static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd, 443 + int acc, u32 num_pbls, 444 + u32 addr_check) 445 + { 446 + int status; 447 + struct ocrdma_mr *mr; 448 + struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 449 + struct ocrdma_dev *dev = pd->dev; 450 + 451 + if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) { 452 + ocrdma_err("%s(%d) leaving err, invalid access rights\n", 453 + __func__, dev->id); 454 + return ERR_PTR(-EINVAL); 455 + } 456 + 457 + mr = kzalloc(sizeof(*mr), GFP_KERNEL); 458 + if (!mr) 459 + return ERR_PTR(-ENOMEM); 460 + mr->hwmr.dev = dev; 461 + mr->hwmr.fr_mr = 0; 462 + mr->hwmr.local_rd = 1; 463 + mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; 464 + mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; 465 + mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; 466 + mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0; 467 + mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; 468 + mr->hwmr.num_pbls = num_pbls; 469 + 470 + status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pd->id, addr_check); 471 + if (status) { 472 + kfree(mr); 473 + return ERR_PTR(-ENOMEM); 474 + } 475 + mr->pd = pd; 476 + atomic_inc(&pd->use_cnt); 477 + mr->ibmr.lkey = mr->hwmr.lkey; 478 + if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) 479 + mr->ibmr.rkey = mr->hwmr.lkey; 480 + return mr; 481 + } 482 + 483 + struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc) 484 + { 485 + struct ocrdma_mr *mr; 486 + 487 + mr = ocrdma_alloc_lkey(ibpd, acc, 0, OCRDMA_ADDR_CHECK_DISABLE); 488 + if (!mr) 489 + return ERR_PTR(-ENOMEM); 490 + 491 + return &mr->ibmr; 492 + } 493 + 494 + static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev, 495 + struct ocrdma_hw_mr *mr) 496 + { 497 + struct pci_dev *pdev = dev->nic_info.pdev; 498 + int i = 0; 499 + 500 + if (mr->pbl_table) { 501 + for (i = 0; i < mr->num_pbls; i++) { 502 + if (!mr->pbl_table[i].va) 503 + continue; 504 + dma_free_coherent(&pdev->dev, mr->pbl_size, 505 + mr->pbl_table[i].va, 506 + mr->pbl_table[i].pa); 507 + } 508 + kfree(mr->pbl_table); 509 + mr->pbl_table = NULL; 510 + } 511 + } 512 + 513 + static int ocrdma_get_pbl_info(struct ocrdma_mr *mr, u32 num_pbes) 514 + { 515 + u32 num_pbls = 0; 516 + u32 idx = 0; 517 + int status = 0; 518 + u32 pbl_size; 519 + 520 + do { 521 + pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx); 522 + if (pbl_size > MAX_OCRDMA_PBL_SIZE) { 523 + status = -EFAULT; 524 + break; 525 + } 526 + num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64))); 527 + num_pbls = num_pbls / (pbl_size / sizeof(u64)); 528 + idx++; 529 + } while (num_pbls >= mr->hwmr.dev->attr.max_num_mr_pbl); 530 + 531 + mr->hwmr.num_pbes = num_pbes; 532 + mr->hwmr.num_pbls = num_pbls; 533 + mr->hwmr.pbl_size = pbl_size; 534 + return status; 535 + } 536 + 537 + static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr) 538 + { 539 + int status = 0; 540 + int i; 541 + u32 dma_len = mr->pbl_size; 542 + struct pci_dev *pdev = dev->nic_info.pdev; 543 + void *va; 544 + dma_addr_t pa; 545 + 546 + mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) * 547 + mr->num_pbls, GFP_KERNEL); 548 + 549 + if (!mr->pbl_table) 550 + return -ENOMEM; 551 + 552 + for (i = 0; i < mr->num_pbls; i++) { 553 + va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL); 554 + if (!va) { 555 + ocrdma_free_mr_pbl_tbl(dev, mr); 556 + status = -ENOMEM; 557 + break; 558 + } 559 + memset(va, 0, dma_len); 560 + mr->pbl_table[i].va = va; 561 + mr->pbl_table[i].pa = pa; 562 + } 563 + return status; 564 + } 565 + 566 + static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr, 567 + u32 num_pbes) 568 + { 569 + struct ocrdma_pbe *pbe; 570 + struct ib_umem_chunk *chunk; 571 + struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table; 572 + struct ib_umem *umem = mr->umem; 573 + int i, shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0; 574 + 575 + if (!mr->hwmr.num_pbes) 576 + return; 577 + 578 + pbe = (struct ocrdma_pbe *)pbl_tbl->va; 579 + pbe_cnt = 0; 580 + 581 + shift = ilog2(umem->page_size); 582 + 583 + list_for_each_entry(chunk, &umem->chunk_list, list) { 584 + /* get all the dma regions from the chunk. */ 585 + for (i = 0; i < chunk->nmap; i++) { 586 + pages = sg_dma_len(&chunk->page_list[i]) >> shift; 587 + for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { 588 + /* store the page address in pbe */ 589 + pbe->pa_lo = 590 + cpu_to_le32(sg_dma_address 591 + (&chunk->page_list[i]) + 592 + (umem->page_size * pg_cnt)); 593 + pbe->pa_hi = 594 + cpu_to_le32(upper_32_bits 595 + ((sg_dma_address 596 + (&chunk->page_list[i]) + 597 + umem->page_size * pg_cnt))); 598 + pbe_cnt += 1; 599 + total_num_pbes += 1; 600 + pbe++; 601 + 602 + /* if done building pbes, issue the mbx cmd. */ 603 + if (total_num_pbes == num_pbes) 604 + return; 605 + 606 + /* if the given pbl is full storing the pbes, 607 + * move to next pbl. 608 + */ 609 + if (pbe_cnt == 610 + (mr->hwmr.pbl_size / sizeof(u64))) { 611 + pbl_tbl++; 612 + pbe = (struct ocrdma_pbe *)pbl_tbl->va; 613 + pbe_cnt = 0; 614 + } 615 + } 616 + } 617 + } 618 + } 619 + 620 + struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, 621 + u64 usr_addr, int acc, struct ib_udata *udata) 622 + { 623 + int status = -ENOMEM; 624 + struct ocrdma_dev *dev; 625 + struct ocrdma_mr *mr; 626 + struct ocrdma_pd *pd; 627 + struct pci_dev *pdev; 628 + u32 num_pbes; 629 + 630 + pd = get_ocrdma_pd(ibpd); 631 + dev = pd->dev; 632 + pdev = dev->nic_info.pdev; 633 + 634 + if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) 635 + return ERR_PTR(-EINVAL); 636 + 637 + mr = kzalloc(sizeof(*mr), GFP_KERNEL); 638 + if (!mr) 639 + return ERR_PTR(status); 640 + mr->hwmr.dev = dev; 641 + mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0); 642 + if (IS_ERR(mr->umem)) { 643 + status = -EFAULT; 644 + goto umem_err; 645 + } 646 + num_pbes = ib_umem_page_count(mr->umem); 647 + status = ocrdma_get_pbl_info(mr, num_pbes); 648 + if (status) 649 + goto umem_err; 650 + 651 + mr->hwmr.pbe_size = mr->umem->page_size; 652 + mr->hwmr.fbo = mr->umem->offset; 653 + mr->hwmr.va = usr_addr; 654 + mr->hwmr.len = len; 655 + mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; 656 + mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; 657 + mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; 658 + mr->hwmr.local_rd = 1; 659 + mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; 660 + status = ocrdma_build_pbl_tbl(dev, &mr->hwmr); 661 + if (status) 662 + goto umem_err; 663 + build_user_pbes(dev, mr, num_pbes); 664 + status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc); 665 + if (status) 666 + goto mbx_err; 667 + mr->pd = pd; 668 + atomic_inc(&pd->use_cnt); 669 + mr->ibmr.lkey = mr->hwmr.lkey; 670 + if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) 671 + mr->ibmr.rkey = mr->hwmr.lkey; 672 + 673 + return &mr->ibmr; 674 + 675 + mbx_err: 676 + ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); 677 + umem_err: 678 + kfree(mr); 679 + return ERR_PTR(status); 680 + } 681 + 682 + int ocrdma_dereg_mr(struct ib_mr *ib_mr) 683 + { 684 + struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr); 685 + struct ocrdma_dev *dev = mr->hwmr.dev; 686 + int status; 687 + 688 + status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey); 689 + 690 + if (mr->hwmr.fr_mr == 0) 691 + ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); 692 + 693 + atomic_dec(&mr->pd->use_cnt); 694 + /* it could be user registered memory. */ 695 + if (mr->umem) 696 + ib_umem_release(mr->umem); 697 + kfree(mr); 698 + return status; 699 + } 700 + 701 + static int ocrdma_copy_cq_uresp(struct ocrdma_cq *cq, struct ib_udata *udata, 702 + struct ib_ucontext *ib_ctx) 703 + { 704 + int status; 705 + struct ocrdma_ucontext *uctx; 706 + struct ocrdma_create_cq_uresp uresp; 707 + 708 + uresp.cq_id = cq->id; 709 + uresp.page_size = cq->len; 710 + uresp.num_pages = 1; 711 + uresp.max_hw_cqe = cq->max_hw_cqe; 712 + uresp.page_addr[0] = cq->pa; 713 + uresp.db_page_addr = cq->dev->nic_info.unmapped_db; 714 + uresp.db_page_size = cq->dev->nic_info.db_page_size; 715 + uresp.phase_change = cq->phase_change ? 1 : 0; 716 + status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 717 + if (status) { 718 + ocrdma_err("%s(%d) copy error cqid=0x%x.\n", 719 + __func__, cq->dev->id, cq->id); 720 + goto err; 721 + } 722 + uctx = get_ocrdma_ucontext(ib_ctx); 723 + status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size); 724 + if (status) 725 + goto err; 726 + status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size); 727 + if (status) { 728 + ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size); 729 + goto err; 730 + } 731 + cq->ucontext = uctx; 732 + err: 733 + return status; 734 + } 735 + 736 + struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector, 737 + struct ib_ucontext *ib_ctx, 738 + struct ib_udata *udata) 739 + { 740 + struct ocrdma_cq *cq; 741 + struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); 742 + int status; 743 + struct ocrdma_create_cq_ureq ureq; 744 + 745 + if (udata) { 746 + if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) 747 + return ERR_PTR(-EFAULT); 748 + } else 749 + ureq.dpp_cq = 0; 750 + cq = kzalloc(sizeof(*cq), GFP_KERNEL); 751 + if (!cq) 752 + return ERR_PTR(-ENOMEM); 753 + 754 + spin_lock_init(&cq->cq_lock); 755 + spin_lock_init(&cq->comp_handler_lock); 756 + atomic_set(&cq->use_cnt, 0); 757 + INIT_LIST_HEAD(&cq->sq_head); 758 + INIT_LIST_HEAD(&cq->rq_head); 759 + cq->dev = dev; 760 + 761 + status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq); 762 + if (status) { 763 + kfree(cq); 764 + return ERR_PTR(status); 765 + } 766 + if (ib_ctx) { 767 + status = ocrdma_copy_cq_uresp(cq, udata, ib_ctx); 768 + if (status) 769 + goto ctx_err; 770 + } 771 + cq->phase = OCRDMA_CQE_VALID; 772 + cq->arm_needed = true; 773 + dev->cq_tbl[cq->id] = cq; 774 + 775 + return &cq->ibcq; 776 + 777 + ctx_err: 778 + ocrdma_mbx_destroy_cq(dev, cq); 779 + kfree(cq); 780 + return ERR_PTR(status); 781 + } 782 + 783 + int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt, 784 + struct ib_udata *udata) 785 + { 786 + int status = 0; 787 + struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); 788 + 789 + if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) { 790 + status = -EINVAL; 791 + return status; 792 + } 793 + ibcq->cqe = new_cnt; 794 + return status; 795 + } 796 + 797 + int ocrdma_destroy_cq(struct ib_cq *ibcq) 798 + { 799 + int status; 800 + struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); 801 + struct ocrdma_dev *dev = cq->dev; 802 + 803 + if (atomic_read(&cq->use_cnt)) 804 + return -EINVAL; 805 + 806 + status = ocrdma_mbx_destroy_cq(dev, cq); 807 + 808 + if (cq->ucontext) { 809 + ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, cq->len); 810 + ocrdma_del_mmap(cq->ucontext, dev->nic_info.unmapped_db, 811 + dev->nic_info.db_page_size); 812 + } 813 + dev->cq_tbl[cq->id] = NULL; 814 + 815 + kfree(cq); 816 + return status; 817 + } 818 + 819 + static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) 820 + { 821 + int status = -EINVAL; 822 + 823 + if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) { 824 + dev->qp_tbl[qp->id] = qp; 825 + status = 0; 826 + } 827 + return status; 828 + } 829 + 830 + static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) 831 + { 832 + dev->qp_tbl[qp->id] = NULL; 833 + } 834 + 835 + static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev, 836 + struct ib_qp_init_attr *attrs) 837 + { 838 + if (attrs->qp_type != IB_QPT_GSI && 839 + attrs->qp_type != IB_QPT_RC && 840 + attrs->qp_type != IB_QPT_UD) { 841 + ocrdma_err("%s(%d) unsupported qp type=0x%x requested\n", 842 + __func__, dev->id, attrs->qp_type); 843 + return -EINVAL; 844 + } 845 + if (attrs->cap.max_send_wr > dev->attr.max_wqe) { 846 + ocrdma_err("%s(%d) unsupported send_wr=0x%x requested\n", 847 + __func__, dev->id, attrs->cap.max_send_wr); 848 + ocrdma_err("%s(%d) supported send_wr=0x%x\n", 849 + __func__, dev->id, dev->attr.max_wqe); 850 + return -EINVAL; 851 + } 852 + if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) { 853 + ocrdma_err("%s(%d) unsupported recv_wr=0x%x requested\n", 854 + __func__, dev->id, attrs->cap.max_recv_wr); 855 + ocrdma_err("%s(%d) supported recv_wr=0x%x\n", 856 + __func__, dev->id, dev->attr.max_rqe); 857 + return -EINVAL; 858 + } 859 + if (attrs->cap.max_inline_data > dev->attr.max_inline_data) { 860 + ocrdma_err("%s(%d) unsupported inline data size=0x%x" 861 + " requested\n", __func__, dev->id, 862 + attrs->cap.max_inline_data); 863 + ocrdma_err("%s(%d) supported inline data size=0x%x\n", 864 + __func__, dev->id, dev->attr.max_inline_data); 865 + return -EINVAL; 866 + } 867 + if (attrs->cap.max_send_sge > dev->attr.max_send_sge) { 868 + ocrdma_err("%s(%d) unsupported send_sge=0x%x requested\n", 869 + __func__, dev->id, attrs->cap.max_send_sge); 870 + ocrdma_err("%s(%d) supported send_sge=0x%x\n", 871 + __func__, dev->id, dev->attr.max_send_sge); 872 + return -EINVAL; 873 + } 874 + if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) { 875 + ocrdma_err("%s(%d) unsupported recv_sge=0x%x requested\n", 876 + __func__, dev->id, attrs->cap.max_recv_sge); 877 + ocrdma_err("%s(%d) supported recv_sge=0x%x\n", 878 + __func__, dev->id, dev->attr.max_recv_sge); 879 + return -EINVAL; 880 + } 881 + /* unprivileged user space cannot create special QP */ 882 + if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) { 883 + ocrdma_err 884 + ("%s(%d) Userspace can't create special QPs of type=0x%x\n", 885 + __func__, dev->id, attrs->qp_type); 886 + return -EINVAL; 887 + } 888 + /* allow creating only one GSI type of QP */ 889 + if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) { 890 + ocrdma_err("%s(%d) GSI special QPs already created.\n", 891 + __func__, dev->id); 892 + return -EINVAL; 893 + } 894 + /* verify consumer QPs are not trying to use GSI QP's CQ */ 895 + if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) { 896 + if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) || 897 + (dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq))) { 898 + ocrdma_err("%s(%d) Consumer QP cannot use GSI CQs.\n", 899 + __func__, dev->id); 900 + return -EINVAL; 901 + } 902 + } 903 + return 0; 904 + } 905 + 906 + static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, 907 + struct ib_udata *udata, int dpp_offset, 908 + int dpp_credit_lmt, int srq) 909 + { 910 + int status = 0; 911 + u64 usr_db; 912 + struct ocrdma_create_qp_uresp uresp; 913 + struct ocrdma_dev *dev = qp->dev; 914 + struct ocrdma_pd *pd = qp->pd; 915 + 916 + memset(&uresp, 0, sizeof(uresp)); 917 + usr_db = dev->nic_info.unmapped_db + 918 + (pd->id * dev->nic_info.db_page_size); 919 + uresp.qp_id = qp->id; 920 + uresp.sq_dbid = qp->sq.dbid; 921 + uresp.num_sq_pages = 1; 922 + uresp.sq_page_size = qp->sq.len; 923 + uresp.sq_page_addr[0] = qp->sq.pa; 924 + uresp.num_wqe_allocated = qp->sq.max_cnt; 925 + if (!srq) { 926 + uresp.rq_dbid = qp->rq.dbid; 927 + uresp.num_rq_pages = 1; 928 + uresp.rq_page_size = qp->rq.len; 929 + uresp.rq_page_addr[0] = qp->rq.pa; 930 + uresp.num_rqe_allocated = qp->rq.max_cnt; 931 + } 932 + uresp.db_page_addr = usr_db; 933 + uresp.db_page_size = dev->nic_info.db_page_size; 934 + if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { 935 + uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET; 936 + uresp.db_rq_offset = ((qp->id & 0xFFFF) < 128) ? 937 + OCRDMA_DB_GEN2_RQ1_OFFSET : OCRDMA_DB_GEN2_RQ2_OFFSET; 938 + uresp.db_shift = (qp->id < 128) ? 24 : 16; 939 + } else { 940 + uresp.db_sq_offset = OCRDMA_DB_SQ_OFFSET; 941 + uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET; 942 + uresp.db_shift = 16; 943 + } 944 + uresp.free_wqe_delta = qp->sq.free_delta; 945 + uresp.free_rqe_delta = qp->rq.free_delta; 946 + 947 + if (qp->dpp_enabled) { 948 + uresp.dpp_credit = dpp_credit_lmt; 949 + uresp.dpp_offset = dpp_offset; 950 + } 951 + status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 952 + if (status) { 953 + ocrdma_err("%s(%d) user copy error.\n", __func__, dev->id); 954 + goto err; 955 + } 956 + status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0], 957 + uresp.sq_page_size); 958 + if (status) 959 + goto err; 960 + 961 + if (!srq) { 962 + status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0], 963 + uresp.rq_page_size); 964 + if (status) 965 + goto rq_map_err; 966 + } 967 + return status; 968 + rq_map_err: 969 + ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size); 970 + err: 971 + return status; 972 + } 973 + 974 + static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp, 975 + struct ocrdma_pd *pd) 976 + { 977 + if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { 978 + qp->sq_db = dev->nic_info.db + 979 + (pd->id * dev->nic_info.db_page_size) + 980 + OCRDMA_DB_GEN2_SQ_OFFSET; 981 + qp->rq_db = dev->nic_info.db + 982 + (pd->id * dev->nic_info.db_page_size) + 983 + ((qp->id < 128) ? 984 + OCRDMA_DB_GEN2_RQ1_OFFSET : OCRDMA_DB_GEN2_RQ2_OFFSET); 985 + } else { 986 + qp->sq_db = dev->nic_info.db + 987 + (pd->id * dev->nic_info.db_page_size) + 988 + OCRDMA_DB_SQ_OFFSET; 989 + qp->rq_db = dev->nic_info.db + 990 + (pd->id * dev->nic_info.db_page_size) + 991 + OCRDMA_DB_RQ_OFFSET; 992 + } 993 + } 994 + 995 + static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp) 996 + { 997 + qp->wqe_wr_id_tbl = 998 + kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt, 999 + GFP_KERNEL); 1000 + if (qp->wqe_wr_id_tbl == NULL) 1001 + return -ENOMEM; 1002 + qp->rqe_wr_id_tbl = 1003 + kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL); 1004 + if (qp->rqe_wr_id_tbl == NULL) 1005 + return -ENOMEM; 1006 + 1007 + return 0; 1008 + } 1009 + 1010 + static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp, 1011 + struct ocrdma_pd *pd, 1012 + struct ib_qp_init_attr *attrs) 1013 + { 1014 + qp->pd = pd; 1015 + spin_lock_init(&qp->q_lock); 1016 + INIT_LIST_HEAD(&qp->sq_entry); 1017 + INIT_LIST_HEAD(&qp->rq_entry); 1018 + 1019 + qp->qp_type = attrs->qp_type; 1020 + qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR; 1021 + qp->max_inline_data = attrs->cap.max_inline_data; 1022 + qp->sq.max_sges = attrs->cap.max_send_sge; 1023 + qp->rq.max_sges = attrs->cap.max_recv_sge; 1024 + qp->state = OCRDMA_QPS_RST; 1025 + } 1026 + 1027 + static void ocrdma_set_qp_use_cnt(struct ocrdma_qp *qp, struct ocrdma_pd *pd) 1028 + { 1029 + atomic_inc(&pd->use_cnt); 1030 + atomic_inc(&qp->sq_cq->use_cnt); 1031 + atomic_inc(&qp->rq_cq->use_cnt); 1032 + if (qp->srq) 1033 + atomic_inc(&qp->srq->use_cnt); 1034 + qp->ibqp.qp_num = qp->id; 1035 + } 1036 + 1037 + static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev, 1038 + struct ib_qp_init_attr *attrs) 1039 + { 1040 + if (attrs->qp_type == IB_QPT_GSI) { 1041 + dev->gsi_qp_created = 1; 1042 + dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq); 1043 + dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq); 1044 + } 1045 + } 1046 + 1047 + struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd, 1048 + struct ib_qp_init_attr *attrs, 1049 + struct ib_udata *udata) 1050 + { 1051 + int status; 1052 + struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 1053 + struct ocrdma_qp *qp; 1054 + struct ocrdma_dev *dev = pd->dev; 1055 + struct ocrdma_create_qp_ureq ureq; 1056 + u16 dpp_credit_lmt, dpp_offset; 1057 + 1058 + status = ocrdma_check_qp_params(ibpd, dev, attrs); 1059 + if (status) 1060 + goto gen_err; 1061 + 1062 + memset(&ureq, 0, sizeof(ureq)); 1063 + if (udata) { 1064 + if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) 1065 + return ERR_PTR(-EFAULT); 1066 + } 1067 + qp = kzalloc(sizeof(*qp), GFP_KERNEL); 1068 + if (!qp) { 1069 + status = -ENOMEM; 1070 + goto gen_err; 1071 + } 1072 + qp->dev = dev; 1073 + ocrdma_set_qp_init_params(qp, pd, attrs); 1074 + 1075 + mutex_lock(&dev->dev_lock); 1076 + status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq, 1077 + ureq.dpp_cq_id, 1078 + &dpp_offset, &dpp_credit_lmt); 1079 + if (status) 1080 + goto mbx_err; 1081 + 1082 + /* user space QP's wr_id table are managed in library */ 1083 + if (udata == NULL) { 1084 + qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 | 1085 + OCRDMA_QP_FAST_REG); 1086 + status = ocrdma_alloc_wr_id_tbl(qp); 1087 + if (status) 1088 + goto map_err; 1089 + } 1090 + 1091 + status = ocrdma_add_qpn_map(dev, qp); 1092 + if (status) 1093 + goto map_err; 1094 + ocrdma_set_qp_db(dev, qp, pd); 1095 + if (udata) { 1096 + status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset, 1097 + dpp_credit_lmt, 1098 + (attrs->srq != NULL)); 1099 + if (status) 1100 + goto cpy_err; 1101 + } 1102 + ocrdma_store_gsi_qp_cq(dev, attrs); 1103 + ocrdma_set_qp_use_cnt(qp, pd); 1104 + mutex_unlock(&dev->dev_lock); 1105 + return &qp->ibqp; 1106 + 1107 + cpy_err: 1108 + ocrdma_del_qpn_map(dev, qp); 1109 + map_err: 1110 + ocrdma_mbx_destroy_qp(dev, qp); 1111 + mbx_err: 1112 + mutex_unlock(&dev->dev_lock); 1113 + kfree(qp->wqe_wr_id_tbl); 1114 + kfree(qp->rqe_wr_id_tbl); 1115 + kfree(qp); 1116 + ocrdma_err("%s(%d) error=%d\n", __func__, dev->id, status); 1117 + gen_err: 1118 + return ERR_PTR(status); 1119 + } 1120 + 1121 + int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1122 + int attr_mask) 1123 + { 1124 + int status = 0; 1125 + struct ocrdma_qp *qp; 1126 + struct ocrdma_dev *dev; 1127 + enum ib_qp_state old_qps; 1128 + 1129 + qp = get_ocrdma_qp(ibqp); 1130 + dev = qp->dev; 1131 + if (attr_mask & IB_QP_STATE) 1132 + status = ocrdma_qp_state_machine(qp, attr->qp_state, &old_qps); 1133 + /* if new and previous states are same hw doesn't need to 1134 + * know about it. 1135 + */ 1136 + if (status < 0) 1137 + return status; 1138 + status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask, old_qps); 1139 + return status; 1140 + } 1141 + 1142 + int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1143 + int attr_mask, struct ib_udata *udata) 1144 + { 1145 + unsigned long flags; 1146 + int status = -EINVAL; 1147 + struct ocrdma_qp *qp; 1148 + struct ocrdma_dev *dev; 1149 + enum ib_qp_state old_qps, new_qps; 1150 + 1151 + qp = get_ocrdma_qp(ibqp); 1152 + dev = qp->dev; 1153 + 1154 + /* syncronize with multiple context trying to change, retrive qps */ 1155 + mutex_lock(&dev->dev_lock); 1156 + /* syncronize with wqe, rqe posting and cqe processing contexts */ 1157 + spin_lock_irqsave(&qp->q_lock, flags); 1158 + old_qps = get_ibqp_state(qp->state); 1159 + if (attr_mask & IB_QP_STATE) 1160 + new_qps = attr->qp_state; 1161 + else 1162 + new_qps = old_qps; 1163 + spin_unlock_irqrestore(&qp->q_lock, flags); 1164 + 1165 + if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) { 1166 + ocrdma_err("%s(%d) invalid attribute mask=0x%x specified for " 1167 + "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n", 1168 + __func__, dev->id, attr_mask, qp->id, ibqp->qp_type, 1169 + old_qps, new_qps); 1170 + goto param_err; 1171 + } 1172 + 1173 + status = _ocrdma_modify_qp(ibqp, attr, attr_mask); 1174 + if (status > 0) 1175 + status = 0; 1176 + param_err: 1177 + mutex_unlock(&dev->dev_lock); 1178 + return status; 1179 + } 1180 + 1181 + static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu) 1182 + { 1183 + switch (mtu) { 1184 + case 256: 1185 + return IB_MTU_256; 1186 + case 512: 1187 + return IB_MTU_512; 1188 + case 1024: 1189 + return IB_MTU_1024; 1190 + case 2048: 1191 + return IB_MTU_2048; 1192 + case 4096: 1193 + return IB_MTU_4096; 1194 + default: 1195 + return IB_MTU_1024; 1196 + } 1197 + } 1198 + 1199 + static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags) 1200 + { 1201 + int ib_qp_acc_flags = 0; 1202 + 1203 + if (qp_cap_flags & OCRDMA_QP_INB_WR) 1204 + ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE; 1205 + if (qp_cap_flags & OCRDMA_QP_INB_RD) 1206 + ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE; 1207 + return ib_qp_acc_flags; 1208 + } 1209 + 1210 + int ocrdma_query_qp(struct ib_qp *ibqp, 1211 + struct ib_qp_attr *qp_attr, 1212 + int attr_mask, struct ib_qp_init_attr *qp_init_attr) 1213 + { 1214 + int status; 1215 + u32 qp_state; 1216 + struct ocrdma_qp_params params; 1217 + struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); 1218 + struct ocrdma_dev *dev = qp->dev; 1219 + 1220 + memset(&params, 0, sizeof(params)); 1221 + mutex_lock(&dev->dev_lock); 1222 + status = ocrdma_mbx_query_qp(dev, qp, &params); 1223 + mutex_unlock(&dev->dev_lock); 1224 + if (status) 1225 + goto mbx_err; 1226 + qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT); 1227 + qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT); 1228 + qp_attr->path_mtu = 1229 + ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx & 1230 + OCRDMA_QP_PARAMS_PATH_MTU_MASK) >> 1231 + OCRDMA_QP_PARAMS_PATH_MTU_SHIFT; 1232 + qp_attr->path_mig_state = IB_MIG_MIGRATED; 1233 + qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK; 1234 + qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK; 1235 + qp_attr->dest_qp_num = 1236 + params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK; 1237 + 1238 + qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags); 1239 + qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1; 1240 + qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1; 1241 + qp_attr->cap.max_send_sge = qp->sq.max_sges; 1242 + qp_attr->cap.max_recv_sge = qp->rq.max_sges; 1243 + qp_attr->cap.max_inline_data = dev->attr.max_inline_data; 1244 + qp_init_attr->cap = qp_attr->cap; 1245 + memcpy(&qp_attr->ah_attr.grh.dgid, &params.dgid[0], 1246 + sizeof(params.dgid)); 1247 + qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl & 1248 + OCRDMA_QP_PARAMS_FLOW_LABEL_MASK; 1249 + qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx; 1250 + qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn & 1251 + OCRDMA_QP_PARAMS_HOP_LMT_MASK) >> 1252 + OCRDMA_QP_PARAMS_HOP_LMT_SHIFT; 1253 + qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn & 1254 + OCRDMA_QP_PARAMS_SQ_PSN_MASK) >> 1255 + OCRDMA_QP_PARAMS_TCLASS_SHIFT; 1256 + 1257 + qp_attr->ah_attr.ah_flags = IB_AH_GRH; 1258 + qp_attr->ah_attr.port_num = 1; 1259 + qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl & 1260 + OCRDMA_QP_PARAMS_SL_MASK) >> 1261 + OCRDMA_QP_PARAMS_SL_SHIFT; 1262 + qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn & 1263 + OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >> 1264 + OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT; 1265 + qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn & 1266 + OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >> 1267 + OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT; 1268 + qp_attr->retry_cnt = 1269 + (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >> 1270 + OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT; 1271 + qp_attr->min_rnr_timer = 0; 1272 + qp_attr->pkey_index = 0; 1273 + qp_attr->port_num = 1; 1274 + qp_attr->ah_attr.src_path_bits = 0; 1275 + qp_attr->ah_attr.static_rate = 0; 1276 + qp_attr->alt_pkey_index = 0; 1277 + qp_attr->alt_port_num = 0; 1278 + qp_attr->alt_timeout = 0; 1279 + memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr)); 1280 + qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >> 1281 + OCRDMA_QP_PARAMS_STATE_SHIFT; 1282 + qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0; 1283 + qp_attr->max_dest_rd_atomic = 1284 + params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT; 1285 + qp_attr->max_rd_atomic = 1286 + params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK; 1287 + qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags & 1288 + OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0; 1289 + mbx_err: 1290 + return status; 1291 + } 1292 + 1293 + static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx) 1294 + { 1295 + int i = idx / 32; 1296 + unsigned int mask = (1 << (idx % 32)); 1297 + 1298 + if (srq->idx_bit_fields[i] & mask) 1299 + srq->idx_bit_fields[i] &= ~mask; 1300 + else 1301 + srq->idx_bit_fields[i] |= mask; 1302 + } 1303 + 1304 + static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q) 1305 + { 1306 + int free_cnt; 1307 + if (q->head >= q->tail) 1308 + free_cnt = (q->max_cnt - q->head) + q->tail; 1309 + else 1310 + free_cnt = q->tail - q->head; 1311 + if (q->free_delta) 1312 + free_cnt -= q->free_delta; 1313 + return free_cnt; 1314 + } 1315 + 1316 + static int is_hw_sq_empty(struct ocrdma_qp *qp) 1317 + { 1318 + return (qp->sq.tail == qp->sq.head && 1319 + ocrdma_hwq_free_cnt(&qp->sq) ? 1 : 0); 1320 + } 1321 + 1322 + static int is_hw_rq_empty(struct ocrdma_qp *qp) 1323 + { 1324 + return (qp->rq.tail == qp->rq.head) ? 1 : 0; 1325 + } 1326 + 1327 + static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q) 1328 + { 1329 + return q->va + (q->head * q->entry_size); 1330 + } 1331 + 1332 + static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q, 1333 + u32 idx) 1334 + { 1335 + return q->va + (idx * q->entry_size); 1336 + } 1337 + 1338 + static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q) 1339 + { 1340 + q->head = (q->head + 1) & q->max_wqe_idx; 1341 + } 1342 + 1343 + static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q) 1344 + { 1345 + q->tail = (q->tail + 1) & q->max_wqe_idx; 1346 + } 1347 + 1348 + /* discard the cqe for a given QP */ 1349 + static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq) 1350 + { 1351 + unsigned long cq_flags; 1352 + unsigned long flags; 1353 + int discard_cnt = 0; 1354 + u32 cur_getp, stop_getp; 1355 + struct ocrdma_cqe *cqe; 1356 + u32 qpn = 0; 1357 + 1358 + spin_lock_irqsave(&cq->cq_lock, cq_flags); 1359 + 1360 + /* traverse through the CQEs in the hw CQ, 1361 + * find the matching CQE for a given qp, 1362 + * mark the matching one discarded by clearing qpn. 1363 + * ring the doorbell in the poll_cq() as 1364 + * we don't complete out of order cqe. 1365 + */ 1366 + 1367 + cur_getp = cq->getp; 1368 + /* find upto when do we reap the cq. */ 1369 + stop_getp = cur_getp; 1370 + do { 1371 + if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp))) 1372 + break; 1373 + 1374 + cqe = cq->va + cur_getp; 1375 + /* if (a) done reaping whole hw cq, or 1376 + * (b) qp_xq becomes empty. 1377 + * then exit 1378 + */ 1379 + qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK; 1380 + /* if previously discarded cqe found, skip that too. */ 1381 + /* check for matching qp */ 1382 + if (qpn == 0 || qpn != qp->id) 1383 + goto skip_cqe; 1384 + 1385 + /* mark cqe discarded so that it is not picked up later 1386 + * in the poll_cq(). 1387 + */ 1388 + discard_cnt += 1; 1389 + cqe->cmn.qpn = 0; 1390 + if (is_cqe_for_sq(cqe)) 1391 + ocrdma_hwq_inc_tail(&qp->sq); 1392 + else { 1393 + if (qp->srq) { 1394 + spin_lock_irqsave(&qp->srq->q_lock, flags); 1395 + ocrdma_hwq_inc_tail(&qp->srq->rq); 1396 + ocrdma_srq_toggle_bit(qp->srq, cur_getp); 1397 + spin_unlock_irqrestore(&qp->srq->q_lock, flags); 1398 + 1399 + } else 1400 + ocrdma_hwq_inc_tail(&qp->rq); 1401 + } 1402 + skip_cqe: 1403 + cur_getp = (cur_getp + 1) % cq->max_hw_cqe; 1404 + } while (cur_getp != stop_getp); 1405 + spin_unlock_irqrestore(&cq->cq_lock, cq_flags); 1406 + } 1407 + 1408 + static void ocrdma_del_flush_qp(struct ocrdma_qp *qp) 1409 + { 1410 + int found = false; 1411 + unsigned long flags; 1412 + struct ocrdma_dev *dev = qp->dev; 1413 + /* sync with any active CQ poll */ 1414 + 1415 + spin_lock_irqsave(&dev->flush_q_lock, flags); 1416 + found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); 1417 + if (found) 1418 + list_del(&qp->sq_entry); 1419 + if (!qp->srq) { 1420 + found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp); 1421 + if (found) 1422 + list_del(&qp->rq_entry); 1423 + } 1424 + spin_unlock_irqrestore(&dev->flush_q_lock, flags); 1425 + } 1426 + 1427 + int ocrdma_destroy_qp(struct ib_qp *ibqp) 1428 + { 1429 + int status; 1430 + struct ocrdma_pd *pd; 1431 + struct ocrdma_qp *qp; 1432 + struct ocrdma_dev *dev; 1433 + struct ib_qp_attr attrs; 1434 + int attr_mask = IB_QP_STATE; 1435 + unsigned long wq_flags = 0, rq_flags = 0; 1436 + 1437 + qp = get_ocrdma_qp(ibqp); 1438 + dev = qp->dev; 1439 + 1440 + attrs.qp_state = IB_QPS_ERR; 1441 + pd = qp->pd; 1442 + 1443 + /* change the QP state to ERROR */ 1444 + _ocrdma_modify_qp(ibqp, &attrs, attr_mask); 1445 + 1446 + /* ensure that CQEs for newly created QP (whose id may be same with 1447 + * one which just getting destroyed are same), dont get 1448 + * discarded until the old CQEs are discarded. 1449 + */ 1450 + mutex_lock(&dev->dev_lock); 1451 + status = ocrdma_mbx_destroy_qp(dev, qp); 1452 + 1453 + /* 1454 + * acquire CQ lock while destroy is in progress, in order to 1455 + * protect against proessing in-flight CQEs for this QP. 1456 + */ 1457 + spin_lock_irqsave(&qp->sq_cq->cq_lock, wq_flags); 1458 + if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) 1459 + spin_lock_irqsave(&qp->rq_cq->cq_lock, rq_flags); 1460 + 1461 + ocrdma_del_qpn_map(dev, qp); 1462 + 1463 + if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) 1464 + spin_unlock_irqrestore(&qp->rq_cq->cq_lock, rq_flags); 1465 + spin_unlock_irqrestore(&qp->sq_cq->cq_lock, wq_flags); 1466 + 1467 + if (!pd->uctx) { 1468 + ocrdma_discard_cqes(qp, qp->sq_cq); 1469 + ocrdma_discard_cqes(qp, qp->rq_cq); 1470 + } 1471 + mutex_unlock(&dev->dev_lock); 1472 + 1473 + if (pd->uctx) { 1474 + ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa, qp->sq.len); 1475 + if (!qp->srq) 1476 + ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa, qp->rq.len); 1477 + } 1478 + 1479 + ocrdma_del_flush_qp(qp); 1480 + 1481 + atomic_dec(&qp->pd->use_cnt); 1482 + atomic_dec(&qp->sq_cq->use_cnt); 1483 + atomic_dec(&qp->rq_cq->use_cnt); 1484 + if (qp->srq) 1485 + atomic_dec(&qp->srq->use_cnt); 1486 + kfree(qp->wqe_wr_id_tbl); 1487 + kfree(qp->rqe_wr_id_tbl); 1488 + kfree(qp); 1489 + return status; 1490 + } 1491 + 1492 + static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata) 1493 + { 1494 + int status; 1495 + struct ocrdma_create_srq_uresp uresp; 1496 + 1497 + uresp.rq_dbid = srq->rq.dbid; 1498 + uresp.num_rq_pages = 1; 1499 + uresp.rq_page_addr[0] = srq->rq.pa; 1500 + uresp.rq_page_size = srq->rq.len; 1501 + uresp.db_page_addr = srq->dev->nic_info.unmapped_db + 1502 + (srq->pd->id * srq->dev->nic_info.db_page_size); 1503 + uresp.db_page_size = srq->dev->nic_info.db_page_size; 1504 + uresp.num_rqe_allocated = srq->rq.max_cnt; 1505 + uresp.free_rqe_delta = 1; 1506 + if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { 1507 + uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET; 1508 + uresp.db_shift = 24; 1509 + } else { 1510 + uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET; 1511 + uresp.db_shift = 16; 1512 + } 1513 + 1514 + status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 1515 + if (status) 1516 + return status; 1517 + status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0], 1518 + uresp.rq_page_size); 1519 + if (status) 1520 + return status; 1521 + return status; 1522 + } 1523 + 1524 + struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd, 1525 + struct ib_srq_init_attr *init_attr, 1526 + struct ib_udata *udata) 1527 + { 1528 + int status = -ENOMEM; 1529 + struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 1530 + struct ocrdma_dev *dev = pd->dev; 1531 + struct ocrdma_srq *srq; 1532 + 1533 + if (init_attr->attr.max_sge > dev->attr.max_recv_sge) 1534 + return ERR_PTR(-EINVAL); 1535 + if (init_attr->attr.max_wr > dev->attr.max_rqe) 1536 + return ERR_PTR(-EINVAL); 1537 + 1538 + srq = kzalloc(sizeof(*srq), GFP_KERNEL); 1539 + if (!srq) 1540 + return ERR_PTR(status); 1541 + 1542 + spin_lock_init(&srq->q_lock); 1543 + srq->dev = dev; 1544 + srq->pd = pd; 1545 + srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size); 1546 + status = ocrdma_mbx_create_srq(srq, init_attr, pd); 1547 + if (status) 1548 + goto err; 1549 + 1550 + if (udata == NULL) { 1551 + srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt, 1552 + GFP_KERNEL); 1553 + if (srq->rqe_wr_id_tbl == NULL) 1554 + goto arm_err; 1555 + 1556 + srq->bit_fields_len = (srq->rq.max_cnt / 32) + 1557 + (srq->rq.max_cnt % 32 ? 1 : 0); 1558 + srq->idx_bit_fields = 1559 + kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL); 1560 + if (srq->idx_bit_fields == NULL) 1561 + goto arm_err; 1562 + memset(srq->idx_bit_fields, 0xff, 1563 + srq->bit_fields_len * sizeof(u32)); 1564 + } 1565 + 1566 + if (init_attr->attr.srq_limit) { 1567 + status = ocrdma_mbx_modify_srq(srq, &init_attr->attr); 1568 + if (status) 1569 + goto arm_err; 1570 + } 1571 + 1572 + atomic_set(&srq->use_cnt, 0); 1573 + if (udata) { 1574 + status = ocrdma_copy_srq_uresp(srq, udata); 1575 + if (status) 1576 + goto arm_err; 1577 + } 1578 + 1579 + atomic_inc(&pd->use_cnt); 1580 + return &srq->ibsrq; 1581 + 1582 + arm_err: 1583 + ocrdma_mbx_destroy_srq(dev, srq); 1584 + err: 1585 + kfree(srq->rqe_wr_id_tbl); 1586 + kfree(srq->idx_bit_fields); 1587 + kfree(srq); 1588 + return ERR_PTR(status); 1589 + } 1590 + 1591 + int ocrdma_modify_srq(struct ib_srq *ibsrq, 1592 + struct ib_srq_attr *srq_attr, 1593 + enum ib_srq_attr_mask srq_attr_mask, 1594 + struct ib_udata *udata) 1595 + { 1596 + int status = 0; 1597 + struct ocrdma_srq *srq; 1598 + struct ocrdma_dev *dev; 1599 + 1600 + srq = get_ocrdma_srq(ibsrq); 1601 + dev = srq->dev; 1602 + if (srq_attr_mask & IB_SRQ_MAX_WR) 1603 + status = -EINVAL; 1604 + else 1605 + status = ocrdma_mbx_modify_srq(srq, srq_attr); 1606 + return status; 1607 + } 1608 + 1609 + int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) 1610 + { 1611 + int status; 1612 + struct ocrdma_srq *srq; 1613 + struct ocrdma_dev *dev; 1614 + 1615 + srq = get_ocrdma_srq(ibsrq); 1616 + dev = srq->dev; 1617 + status = ocrdma_mbx_query_srq(srq, srq_attr); 1618 + return status; 1619 + } 1620 + 1621 + int ocrdma_destroy_srq(struct ib_srq *ibsrq) 1622 + { 1623 + int status; 1624 + struct ocrdma_srq *srq; 1625 + struct ocrdma_dev *dev; 1626 + 1627 + srq = get_ocrdma_srq(ibsrq); 1628 + dev = srq->dev; 1629 + if (atomic_read(&srq->use_cnt)) { 1630 + ocrdma_err("%s(%d) err, srq=0x%x in use\n", 1631 + __func__, dev->id, srq->id); 1632 + return -EAGAIN; 1633 + } 1634 + 1635 + status = ocrdma_mbx_destroy_srq(dev, srq); 1636 + 1637 + if (srq->pd->uctx) 1638 + ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa, srq->rq.len); 1639 + 1640 + atomic_dec(&srq->pd->use_cnt); 1641 + kfree(srq->idx_bit_fields); 1642 + kfree(srq->rqe_wr_id_tbl); 1643 + kfree(srq); 1644 + return status; 1645 + } 1646 + 1647 + /* unprivileged verbs and their support functions. */ 1648 + static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp, 1649 + struct ocrdma_hdr_wqe *hdr, 1650 + struct ib_send_wr *wr) 1651 + { 1652 + struct ocrdma_ewqe_ud_hdr *ud_hdr = 1653 + (struct ocrdma_ewqe_ud_hdr *)(hdr + 1); 1654 + struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah); 1655 + 1656 + ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn; 1657 + if (qp->qp_type == IB_QPT_GSI) 1658 + ud_hdr->qkey = qp->qkey; 1659 + else 1660 + ud_hdr->qkey = wr->wr.ud.remote_qkey; 1661 + ud_hdr->rsvd_ahid = ah->id; 1662 + } 1663 + 1664 + static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr, 1665 + struct ocrdma_sge *sge, int num_sge, 1666 + struct ib_sge *sg_list) 1667 + { 1668 + int i; 1669 + 1670 + for (i = 0; i < num_sge; i++) { 1671 + sge[i].lrkey = sg_list[i].lkey; 1672 + sge[i].addr_lo = sg_list[i].addr; 1673 + sge[i].addr_hi = upper_32_bits(sg_list[i].addr); 1674 + sge[i].len = sg_list[i].length; 1675 + hdr->total_len += sg_list[i].length; 1676 + } 1677 + if (num_sge == 0) 1678 + memset(sge, 0, sizeof(*sge)); 1679 + } 1680 + 1681 + static int ocrdma_build_inline_sges(struct ocrdma_qp *qp, 1682 + struct ocrdma_hdr_wqe *hdr, 1683 + struct ocrdma_sge *sge, 1684 + struct ib_send_wr *wr, u32 wqe_size) 1685 + { 1686 + if (wr->send_flags & IB_SEND_INLINE) { 1687 + if (wr->sg_list[0].length > qp->max_inline_data) { 1688 + ocrdma_err("%s() supported_len=0x%x," 1689 + " unspported len req=0x%x\n", __func__, 1690 + qp->max_inline_data, wr->sg_list[0].length); 1691 + return -EINVAL; 1692 + } 1693 + memcpy(sge, 1694 + (void *)(unsigned long)wr->sg_list[0].addr, 1695 + wr->sg_list[0].length); 1696 + hdr->total_len = wr->sg_list[0].length; 1697 + wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES); 1698 + hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT); 1699 + } else { 1700 + ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list); 1701 + if (wr->num_sge) 1702 + wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge)); 1703 + else 1704 + wqe_size += sizeof(struct ocrdma_sge); 1705 + hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); 1706 + } 1707 + hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); 1708 + return 0; 1709 + } 1710 + 1711 + static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, 1712 + struct ib_send_wr *wr) 1713 + { 1714 + int status; 1715 + struct ocrdma_sge *sge; 1716 + u32 wqe_size = sizeof(*hdr); 1717 + 1718 + if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { 1719 + ocrdma_build_ud_hdr(qp, hdr, wr); 1720 + sge = (struct ocrdma_sge *)(hdr + 2); 1721 + wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr); 1722 + } else 1723 + sge = (struct ocrdma_sge *)(hdr + 1); 1724 + 1725 + status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); 1726 + return status; 1727 + } 1728 + 1729 + static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, 1730 + struct ib_send_wr *wr) 1731 + { 1732 + int status; 1733 + struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1); 1734 + struct ocrdma_sge *sge = ext_rw + 1; 1735 + u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw); 1736 + 1737 + status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); 1738 + if (status) 1739 + return status; 1740 + ext_rw->addr_lo = wr->wr.rdma.remote_addr; 1741 + ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr); 1742 + ext_rw->lrkey = wr->wr.rdma.rkey; 1743 + ext_rw->len = hdr->total_len; 1744 + return 0; 1745 + } 1746 + 1747 + static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, 1748 + struct ib_send_wr *wr) 1749 + { 1750 + struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1); 1751 + struct ocrdma_sge *sge = ext_rw + 1; 1752 + u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) + 1753 + sizeof(struct ocrdma_hdr_wqe); 1754 + 1755 + ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list); 1756 + hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); 1757 + hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT); 1758 + hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); 1759 + 1760 + ext_rw->addr_lo = wr->wr.rdma.remote_addr; 1761 + ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr); 1762 + ext_rw->lrkey = wr->wr.rdma.rkey; 1763 + ext_rw->len = hdr->total_len; 1764 + } 1765 + 1766 + static void ocrdma_ring_sq_db(struct ocrdma_qp *qp) 1767 + { 1768 + u32 val = qp->sq.dbid | (1 << 16); 1769 + 1770 + iowrite32(val, qp->sq_db); 1771 + } 1772 + 1773 + int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 1774 + struct ib_send_wr **bad_wr) 1775 + { 1776 + int status = 0; 1777 + struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); 1778 + struct ocrdma_hdr_wqe *hdr; 1779 + unsigned long flags; 1780 + 1781 + spin_lock_irqsave(&qp->q_lock, flags); 1782 + if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) { 1783 + spin_unlock_irqrestore(&qp->q_lock, flags); 1784 + return -EINVAL; 1785 + } 1786 + 1787 + while (wr) { 1788 + if (ocrdma_hwq_free_cnt(&qp->sq) == 0 || 1789 + wr->num_sge > qp->sq.max_sges) { 1790 + status = -ENOMEM; 1791 + break; 1792 + } 1793 + hdr = ocrdma_hwq_head(&qp->sq); 1794 + hdr->cw = 0; 1795 + if (wr->send_flags & IB_SEND_SIGNALED) 1796 + hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT); 1797 + if (wr->send_flags & IB_SEND_FENCE) 1798 + hdr->cw |= 1799 + (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT); 1800 + if (wr->send_flags & IB_SEND_SOLICITED) 1801 + hdr->cw |= 1802 + (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT); 1803 + hdr->total_len = 0; 1804 + switch (wr->opcode) { 1805 + case IB_WR_SEND_WITH_IMM: 1806 + hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); 1807 + hdr->immdt = ntohl(wr->ex.imm_data); 1808 + case IB_WR_SEND: 1809 + hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT); 1810 + ocrdma_build_send(qp, hdr, wr); 1811 + break; 1812 + case IB_WR_SEND_WITH_INV: 1813 + hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT); 1814 + hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT); 1815 + hdr->lkey = wr->ex.invalidate_rkey; 1816 + status = ocrdma_build_send(qp, hdr, wr); 1817 + break; 1818 + case IB_WR_RDMA_WRITE_WITH_IMM: 1819 + hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); 1820 + hdr->immdt = ntohl(wr->ex.imm_data); 1821 + case IB_WR_RDMA_WRITE: 1822 + hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT); 1823 + status = ocrdma_build_write(qp, hdr, wr); 1824 + break; 1825 + case IB_WR_RDMA_READ_WITH_INV: 1826 + hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT); 1827 + case IB_WR_RDMA_READ: 1828 + ocrdma_build_read(qp, hdr, wr); 1829 + break; 1830 + case IB_WR_LOCAL_INV: 1831 + hdr->cw |= 1832 + (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT); 1833 + hdr->cw |= (sizeof(struct ocrdma_hdr_wqe) / 1834 + OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT; 1835 + hdr->lkey = wr->ex.invalidate_rkey; 1836 + break; 1837 + default: 1838 + status = -EINVAL; 1839 + break; 1840 + } 1841 + if (status) { 1842 + *bad_wr = wr; 1843 + break; 1844 + } 1845 + if (wr->send_flags & IB_SEND_SIGNALED) 1846 + qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1; 1847 + else 1848 + qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0; 1849 + qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id; 1850 + ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) & 1851 + OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE); 1852 + /* make sure wqe is written before adapter can access it */ 1853 + wmb(); 1854 + /* inform hw to start processing it */ 1855 + ocrdma_ring_sq_db(qp); 1856 + 1857 + /* update pointer, counter for next wr */ 1858 + ocrdma_hwq_inc_head(&qp->sq); 1859 + wr = wr->next; 1860 + } 1861 + spin_unlock_irqrestore(&qp->q_lock, flags); 1862 + return status; 1863 + } 1864 + 1865 + static void ocrdma_ring_rq_db(struct ocrdma_qp *qp) 1866 + { 1867 + u32 val = qp->rq.dbid | (1 << OCRDMA_GET_NUM_POSTED_SHIFT_VAL(qp)); 1868 + 1869 + iowrite32(val, qp->rq_db); 1870 + } 1871 + 1872 + static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr, 1873 + u16 tag) 1874 + { 1875 + u32 wqe_size = 0; 1876 + struct ocrdma_sge *sge; 1877 + if (wr->num_sge) 1878 + wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe); 1879 + else 1880 + wqe_size = sizeof(*sge) + sizeof(*rqe); 1881 + 1882 + rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) << 1883 + OCRDMA_WQE_SIZE_SHIFT); 1884 + rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT); 1885 + rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); 1886 + rqe->total_len = 0; 1887 + rqe->rsvd_tag = tag; 1888 + sge = (struct ocrdma_sge *)(rqe + 1); 1889 + ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list); 1890 + ocrdma_cpu_to_le32(rqe, wqe_size); 1891 + } 1892 + 1893 + int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, 1894 + struct ib_recv_wr **bad_wr) 1895 + { 1896 + int status = 0; 1897 + unsigned long flags; 1898 + struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); 1899 + struct ocrdma_hdr_wqe *rqe; 1900 + 1901 + spin_lock_irqsave(&qp->q_lock, flags); 1902 + if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) { 1903 + spin_unlock_irqrestore(&qp->q_lock, flags); 1904 + *bad_wr = wr; 1905 + return -EINVAL; 1906 + } 1907 + while (wr) { 1908 + if (ocrdma_hwq_free_cnt(&qp->rq) == 0 || 1909 + wr->num_sge > qp->rq.max_sges) { 1910 + *bad_wr = wr; 1911 + status = -ENOMEM; 1912 + break; 1913 + } 1914 + rqe = ocrdma_hwq_head(&qp->rq); 1915 + ocrdma_build_rqe(rqe, wr, 0); 1916 + 1917 + qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id; 1918 + /* make sure rqe is written before adapter can access it */ 1919 + wmb(); 1920 + 1921 + /* inform hw to start processing it */ 1922 + ocrdma_ring_rq_db(qp); 1923 + 1924 + /* update pointer, counter for next wr */ 1925 + ocrdma_hwq_inc_head(&qp->rq); 1926 + wr = wr->next; 1927 + } 1928 + spin_unlock_irqrestore(&qp->q_lock, flags); 1929 + return status; 1930 + } 1931 + 1932 + /* cqe for srq's rqe can potentially arrive out of order. 1933 + * index gives the entry in the shadow table where to store 1934 + * the wr_id. tag/index is returned in cqe to reference back 1935 + * for a given rqe. 1936 + */ 1937 + static int ocrdma_srq_get_idx(struct ocrdma_srq *srq) 1938 + { 1939 + int row = 0; 1940 + int indx = 0; 1941 + 1942 + for (row = 0; row < srq->bit_fields_len; row++) { 1943 + if (srq->idx_bit_fields[row]) { 1944 + indx = ffs(srq->idx_bit_fields[row]); 1945 + indx = (row * 32) + (indx - 1); 1946 + if (indx >= srq->rq.max_cnt) 1947 + BUG(); 1948 + ocrdma_srq_toggle_bit(srq, indx); 1949 + break; 1950 + } 1951 + } 1952 + 1953 + if (row == srq->bit_fields_len) 1954 + BUG(); 1955 + return indx; 1956 + } 1957 + 1958 + static void ocrdma_ring_srq_db(struct ocrdma_srq *srq) 1959 + { 1960 + u32 val = srq->rq.dbid | (1 << 16); 1961 + 1962 + iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET); 1963 + } 1964 + 1965 + int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 1966 + struct ib_recv_wr **bad_wr) 1967 + { 1968 + int status = 0; 1969 + unsigned long flags; 1970 + struct ocrdma_srq *srq; 1971 + struct ocrdma_hdr_wqe *rqe; 1972 + u16 tag; 1973 + 1974 + srq = get_ocrdma_srq(ibsrq); 1975 + 1976 + spin_lock_irqsave(&srq->q_lock, flags); 1977 + while (wr) { 1978 + if (ocrdma_hwq_free_cnt(&srq->rq) == 0 || 1979 + wr->num_sge > srq->rq.max_sges) { 1980 + status = -ENOMEM; 1981 + *bad_wr = wr; 1982 + break; 1983 + } 1984 + tag = ocrdma_srq_get_idx(srq); 1985 + rqe = ocrdma_hwq_head(&srq->rq); 1986 + ocrdma_build_rqe(rqe, wr, tag); 1987 + 1988 + srq->rqe_wr_id_tbl[tag] = wr->wr_id; 1989 + /* make sure rqe is written before adapter can perform DMA */ 1990 + wmb(); 1991 + /* inform hw to start processing it */ 1992 + ocrdma_ring_srq_db(srq); 1993 + /* update pointer, counter for next wr */ 1994 + ocrdma_hwq_inc_head(&srq->rq); 1995 + wr = wr->next; 1996 + } 1997 + spin_unlock_irqrestore(&srq->q_lock, flags); 1998 + return status; 1999 + } 2000 + 2001 + static enum ib_wc_status ocrdma_to_ibwc_err(u16 status) 2002 + { 2003 + enum ib_wc_status ibwc_status = IB_WC_GENERAL_ERR; 2004 + 2005 + switch (status) { 2006 + case OCRDMA_CQE_GENERAL_ERR: 2007 + ibwc_status = IB_WC_GENERAL_ERR; 2008 + break; 2009 + case OCRDMA_CQE_LOC_LEN_ERR: 2010 + ibwc_status = IB_WC_LOC_LEN_ERR; 2011 + break; 2012 + case OCRDMA_CQE_LOC_QP_OP_ERR: 2013 + ibwc_status = IB_WC_LOC_QP_OP_ERR; 2014 + break; 2015 + case OCRDMA_CQE_LOC_EEC_OP_ERR: 2016 + ibwc_status = IB_WC_LOC_EEC_OP_ERR; 2017 + break; 2018 + case OCRDMA_CQE_LOC_PROT_ERR: 2019 + ibwc_status = IB_WC_LOC_PROT_ERR; 2020 + break; 2021 + case OCRDMA_CQE_WR_FLUSH_ERR: 2022 + ibwc_status = IB_WC_WR_FLUSH_ERR; 2023 + break; 2024 + case OCRDMA_CQE_MW_BIND_ERR: 2025 + ibwc_status = IB_WC_MW_BIND_ERR; 2026 + break; 2027 + case OCRDMA_CQE_BAD_RESP_ERR: 2028 + ibwc_status = IB_WC_BAD_RESP_ERR; 2029 + break; 2030 + case OCRDMA_CQE_LOC_ACCESS_ERR: 2031 + ibwc_status = IB_WC_LOC_ACCESS_ERR; 2032 + break; 2033 + case OCRDMA_CQE_REM_INV_REQ_ERR: 2034 + ibwc_status = IB_WC_REM_INV_REQ_ERR; 2035 + break; 2036 + case OCRDMA_CQE_REM_ACCESS_ERR: 2037 + ibwc_status = IB_WC_REM_ACCESS_ERR; 2038 + break; 2039 + case OCRDMA_CQE_REM_OP_ERR: 2040 + ibwc_status = IB_WC_REM_OP_ERR; 2041 + break; 2042 + case OCRDMA_CQE_RETRY_EXC_ERR: 2043 + ibwc_status = IB_WC_RETRY_EXC_ERR; 2044 + break; 2045 + case OCRDMA_CQE_RNR_RETRY_EXC_ERR: 2046 + ibwc_status = IB_WC_RNR_RETRY_EXC_ERR; 2047 + break; 2048 + case OCRDMA_CQE_LOC_RDD_VIOL_ERR: 2049 + ibwc_status = IB_WC_LOC_RDD_VIOL_ERR; 2050 + break; 2051 + case OCRDMA_CQE_REM_INV_RD_REQ_ERR: 2052 + ibwc_status = IB_WC_REM_INV_RD_REQ_ERR; 2053 + break; 2054 + case OCRDMA_CQE_REM_ABORT_ERR: 2055 + ibwc_status = IB_WC_REM_ABORT_ERR; 2056 + break; 2057 + case OCRDMA_CQE_INV_EECN_ERR: 2058 + ibwc_status = IB_WC_INV_EECN_ERR; 2059 + break; 2060 + case OCRDMA_CQE_INV_EEC_STATE_ERR: 2061 + ibwc_status = IB_WC_INV_EEC_STATE_ERR; 2062 + break; 2063 + case OCRDMA_CQE_FATAL_ERR: 2064 + ibwc_status = IB_WC_FATAL_ERR; 2065 + break; 2066 + case OCRDMA_CQE_RESP_TIMEOUT_ERR: 2067 + ibwc_status = IB_WC_RESP_TIMEOUT_ERR; 2068 + break; 2069 + default: 2070 + ibwc_status = IB_WC_GENERAL_ERR; 2071 + break; 2072 + }; 2073 + return ibwc_status; 2074 + } 2075 + 2076 + static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc, 2077 + u32 wqe_idx) 2078 + { 2079 + struct ocrdma_hdr_wqe *hdr; 2080 + struct ocrdma_sge *rw; 2081 + int opcode; 2082 + 2083 + hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx); 2084 + 2085 + ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid; 2086 + /* Undo the hdr->cw swap */ 2087 + opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK; 2088 + switch (opcode) { 2089 + case OCRDMA_WRITE: 2090 + ibwc->opcode = IB_WC_RDMA_WRITE; 2091 + break; 2092 + case OCRDMA_READ: 2093 + rw = (struct ocrdma_sge *)(hdr + 1); 2094 + ibwc->opcode = IB_WC_RDMA_READ; 2095 + ibwc->byte_len = rw->len; 2096 + break; 2097 + case OCRDMA_SEND: 2098 + ibwc->opcode = IB_WC_SEND; 2099 + break; 2100 + case OCRDMA_LKEY_INV: 2101 + ibwc->opcode = IB_WC_LOCAL_INV; 2102 + break; 2103 + default: 2104 + ibwc->status = IB_WC_GENERAL_ERR; 2105 + ocrdma_err("%s() invalid opcode received = 0x%x\n", 2106 + __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK); 2107 + break; 2108 + }; 2109 + } 2110 + 2111 + static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp, 2112 + struct ocrdma_cqe *cqe) 2113 + { 2114 + if (is_cqe_for_sq(cqe)) { 2115 + cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( 2116 + cqe->flags_status_srcqpn) & 2117 + ~OCRDMA_CQE_STATUS_MASK); 2118 + cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( 2119 + cqe->flags_status_srcqpn) | 2120 + (OCRDMA_CQE_WR_FLUSH_ERR << 2121 + OCRDMA_CQE_STATUS_SHIFT)); 2122 + } else { 2123 + if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { 2124 + cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( 2125 + cqe->flags_status_srcqpn) & 2126 + ~OCRDMA_CQE_UD_STATUS_MASK); 2127 + cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( 2128 + cqe->flags_status_srcqpn) | 2129 + (OCRDMA_CQE_WR_FLUSH_ERR << 2130 + OCRDMA_CQE_UD_STATUS_SHIFT)); 2131 + } else { 2132 + cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( 2133 + cqe->flags_status_srcqpn) & 2134 + ~OCRDMA_CQE_STATUS_MASK); 2135 + cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( 2136 + cqe->flags_status_srcqpn) | 2137 + (OCRDMA_CQE_WR_FLUSH_ERR << 2138 + OCRDMA_CQE_STATUS_SHIFT)); 2139 + } 2140 + } 2141 + } 2142 + 2143 + static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, 2144 + struct ocrdma_qp *qp, int status) 2145 + { 2146 + bool expand = false; 2147 + 2148 + ibwc->byte_len = 0; 2149 + ibwc->qp = &qp->ibqp; 2150 + ibwc->status = ocrdma_to_ibwc_err(status); 2151 + 2152 + ocrdma_flush_qp(qp); 2153 + ocrdma_qp_state_machine(qp, IB_QPS_ERR, NULL); 2154 + 2155 + /* if wqe/rqe pending for which cqe needs to be returned, 2156 + * trigger inflating it. 2157 + */ 2158 + if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) { 2159 + expand = true; 2160 + ocrdma_set_cqe_status_flushed(qp, cqe); 2161 + } 2162 + return expand; 2163 + } 2164 + 2165 + static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, 2166 + struct ocrdma_qp *qp, int status) 2167 + { 2168 + ibwc->opcode = IB_WC_RECV; 2169 + ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; 2170 + ocrdma_hwq_inc_tail(&qp->rq); 2171 + 2172 + return ocrdma_update_err_cqe(ibwc, cqe, qp, status); 2173 + } 2174 + 2175 + static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, 2176 + struct ocrdma_qp *qp, int status) 2177 + { 2178 + ocrdma_update_wc(qp, ibwc, qp->sq.tail); 2179 + ocrdma_hwq_inc_tail(&qp->sq); 2180 + 2181 + return ocrdma_update_err_cqe(ibwc, cqe, qp, status); 2182 + } 2183 + 2184 + 2185 + static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp, 2186 + struct ocrdma_cqe *cqe, struct ib_wc *ibwc, 2187 + bool *polled, bool *stop) 2188 + { 2189 + bool expand; 2190 + int status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2191 + OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; 2192 + 2193 + /* when hw sq is empty, but rq is not empty, so we continue 2194 + * to keep the cqe in order to get the cq event again. 2195 + */ 2196 + if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) { 2197 + /* when cq for rq and sq is same, it is safe to return 2198 + * flush cqe for RQEs. 2199 + */ 2200 + if (!qp->srq && (qp->sq_cq == qp->rq_cq)) { 2201 + *polled = true; 2202 + status = OCRDMA_CQE_WR_FLUSH_ERR; 2203 + expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); 2204 + } else { 2205 + /* stop processing further cqe as this cqe is used for 2206 + * triggering cq event on buddy cq of RQ. 2207 + * When QP is destroyed, this cqe will be removed 2208 + * from the cq's hardware q. 2209 + */ 2210 + *polled = false; 2211 + *stop = true; 2212 + expand = false; 2213 + } 2214 + } else { 2215 + *polled = true; 2216 + expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); 2217 + } 2218 + return expand; 2219 + } 2220 + 2221 + static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp, 2222 + struct ocrdma_cqe *cqe, 2223 + struct ib_wc *ibwc, bool *polled) 2224 + { 2225 + bool expand = false; 2226 + int tail = qp->sq.tail; 2227 + u32 wqe_idx; 2228 + 2229 + if (!qp->wqe_wr_id_tbl[tail].signaled) { 2230 + expand = true; /* CQE cannot be consumed yet */ 2231 + *polled = false; /* WC cannot be consumed yet */ 2232 + } else { 2233 + ibwc->status = IB_WC_SUCCESS; 2234 + ibwc->wc_flags = 0; 2235 + ibwc->qp = &qp->ibqp; 2236 + ocrdma_update_wc(qp, ibwc, tail); 2237 + *polled = true; 2238 + wqe_idx = le32_to_cpu(cqe->wq.wqeidx) & OCRDMA_CQE_WQEIDX_MASK; 2239 + if (tail != wqe_idx) 2240 + expand = true; /* Coalesced CQE can't be consumed yet */ 2241 + } 2242 + ocrdma_hwq_inc_tail(&qp->sq); 2243 + return expand; 2244 + } 2245 + 2246 + static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, 2247 + struct ib_wc *ibwc, bool *polled, bool *stop) 2248 + { 2249 + int status; 2250 + bool expand; 2251 + 2252 + status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2253 + OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; 2254 + 2255 + if (status == OCRDMA_CQE_SUCCESS) 2256 + expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled); 2257 + else 2258 + expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop); 2259 + return expand; 2260 + } 2261 + 2262 + static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe) 2263 + { 2264 + int status; 2265 + 2266 + status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2267 + OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT; 2268 + ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) & 2269 + OCRDMA_CQE_SRCQP_MASK; 2270 + ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) & 2271 + OCRDMA_CQE_PKEY_MASK; 2272 + ibwc->wc_flags = IB_WC_GRH; 2273 + ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >> 2274 + OCRDMA_CQE_UD_XFER_LEN_SHIFT); 2275 + return status; 2276 + } 2277 + 2278 + static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc, 2279 + struct ocrdma_cqe *cqe, 2280 + struct ocrdma_qp *qp) 2281 + { 2282 + unsigned long flags; 2283 + struct ocrdma_srq *srq; 2284 + u32 wqe_idx; 2285 + 2286 + srq = get_ocrdma_srq(qp->ibqp.srq); 2287 + wqe_idx = le32_to_cpu(cqe->rq.buftag_qpn) >> OCRDMA_CQE_BUFTAG_SHIFT; 2288 + ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx]; 2289 + spin_lock_irqsave(&srq->q_lock, flags); 2290 + ocrdma_srq_toggle_bit(srq, wqe_idx); 2291 + spin_unlock_irqrestore(&srq->q_lock, flags); 2292 + ocrdma_hwq_inc_tail(&srq->rq); 2293 + } 2294 + 2295 + static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, 2296 + struct ib_wc *ibwc, bool *polled, bool *stop, 2297 + int status) 2298 + { 2299 + bool expand; 2300 + 2301 + /* when hw_rq is empty, but wq is not empty, so continue 2302 + * to keep the cqe to get the cq event again. 2303 + */ 2304 + if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) { 2305 + if (!qp->srq && (qp->sq_cq == qp->rq_cq)) { 2306 + *polled = true; 2307 + status = OCRDMA_CQE_WR_FLUSH_ERR; 2308 + expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); 2309 + } else { 2310 + *polled = false; 2311 + *stop = true; 2312 + expand = false; 2313 + } 2314 + } else 2315 + expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); 2316 + return expand; 2317 + } 2318 + 2319 + static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp, 2320 + struct ocrdma_cqe *cqe, struct ib_wc *ibwc) 2321 + { 2322 + ibwc->opcode = IB_WC_RECV; 2323 + ibwc->qp = &qp->ibqp; 2324 + ibwc->status = IB_WC_SUCCESS; 2325 + 2326 + if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) 2327 + ocrdma_update_ud_rcqe(ibwc, cqe); 2328 + else 2329 + ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen); 2330 + 2331 + if (is_cqe_imm(cqe)) { 2332 + ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt)); 2333 + ibwc->wc_flags |= IB_WC_WITH_IMM; 2334 + } else if (is_cqe_wr_imm(cqe)) { 2335 + ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM; 2336 + ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt)); 2337 + ibwc->wc_flags |= IB_WC_WITH_IMM; 2338 + } else if (is_cqe_invalidated(cqe)) { 2339 + ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt); 2340 + ibwc->wc_flags |= IB_WC_WITH_INVALIDATE; 2341 + } 2342 + if (qp->ibqp.srq) 2343 + ocrdma_update_free_srq_cqe(ibwc, cqe, qp); 2344 + else { 2345 + ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; 2346 + ocrdma_hwq_inc_tail(&qp->rq); 2347 + } 2348 + } 2349 + 2350 + static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, 2351 + struct ib_wc *ibwc, bool *polled, bool *stop) 2352 + { 2353 + int status; 2354 + bool expand = false; 2355 + 2356 + ibwc->wc_flags = 0; 2357 + if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) 2358 + status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2359 + OCRDMA_CQE_UD_STATUS_MASK) >> 2360 + OCRDMA_CQE_UD_STATUS_SHIFT; 2361 + else 2362 + status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2363 + OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; 2364 + 2365 + if (status == OCRDMA_CQE_SUCCESS) { 2366 + *polled = true; 2367 + ocrdma_poll_success_rcqe(qp, cqe, ibwc); 2368 + } else { 2369 + expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop, 2370 + status); 2371 + } 2372 + return expand; 2373 + } 2374 + 2375 + static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe, 2376 + u16 cur_getp) 2377 + { 2378 + if (cq->phase_change) { 2379 + if (cur_getp == 0) 2380 + cq->phase = (~cq->phase & OCRDMA_CQE_VALID); 2381 + } else 2382 + /* clear valid bit */ 2383 + cqe->flags_status_srcqpn = 0; 2384 + } 2385 + 2386 + static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries, 2387 + struct ib_wc *ibwc) 2388 + { 2389 + u16 qpn = 0; 2390 + int i = 0; 2391 + bool expand = false; 2392 + int polled_hw_cqes = 0; 2393 + struct ocrdma_qp *qp = NULL; 2394 + struct ocrdma_dev *dev = cq->dev; 2395 + struct ocrdma_cqe *cqe; 2396 + u16 cur_getp; bool polled = false; bool stop = false; 2397 + 2398 + cur_getp = cq->getp; 2399 + while (num_entries) { 2400 + cqe = cq->va + cur_getp; 2401 + /* check whether valid cqe or not */ 2402 + if (!is_cqe_valid(cq, cqe)) 2403 + break; 2404 + qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK); 2405 + /* ignore discarded cqe */ 2406 + if (qpn == 0) 2407 + goto skip_cqe; 2408 + qp = dev->qp_tbl[qpn]; 2409 + BUG_ON(qp == NULL); 2410 + 2411 + if (is_cqe_for_sq(cqe)) { 2412 + expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled, 2413 + &stop); 2414 + } else { 2415 + expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled, 2416 + &stop); 2417 + } 2418 + if (expand) 2419 + goto expand_cqe; 2420 + if (stop) 2421 + goto stop_cqe; 2422 + /* clear qpn to avoid duplicate processing by discard_cqe() */ 2423 + cqe->cmn.qpn = 0; 2424 + skip_cqe: 2425 + polled_hw_cqes += 1; 2426 + cur_getp = (cur_getp + 1) % cq->max_hw_cqe; 2427 + ocrdma_change_cq_phase(cq, cqe, cur_getp); 2428 + expand_cqe: 2429 + if (polled) { 2430 + num_entries -= 1; 2431 + i += 1; 2432 + ibwc = ibwc + 1; 2433 + polled = false; 2434 + } 2435 + } 2436 + stop_cqe: 2437 + cq->getp = cur_getp; 2438 + if (polled_hw_cqes || expand || stop) { 2439 + ocrdma_ring_cq_db(dev, cq->id, cq->armed, cq->solicited, 2440 + polled_hw_cqes); 2441 + } 2442 + return i; 2443 + } 2444 + 2445 + /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */ 2446 + static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries, 2447 + struct ocrdma_qp *qp, struct ib_wc *ibwc) 2448 + { 2449 + int err_cqes = 0; 2450 + 2451 + while (num_entries) { 2452 + if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp)) 2453 + break; 2454 + if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) { 2455 + ocrdma_update_wc(qp, ibwc, qp->sq.tail); 2456 + ocrdma_hwq_inc_tail(&qp->sq); 2457 + } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) { 2458 + ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; 2459 + ocrdma_hwq_inc_tail(&qp->rq); 2460 + } else 2461 + return err_cqes; 2462 + ibwc->byte_len = 0; 2463 + ibwc->status = IB_WC_WR_FLUSH_ERR; 2464 + ibwc = ibwc + 1; 2465 + err_cqes += 1; 2466 + num_entries -= 1; 2467 + } 2468 + return err_cqes; 2469 + } 2470 + 2471 + int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) 2472 + { 2473 + int cqes_to_poll = num_entries; 2474 + struct ocrdma_cq *cq = NULL; 2475 + unsigned long flags; 2476 + struct ocrdma_dev *dev; 2477 + int num_os_cqe = 0, err_cqes = 0; 2478 + struct ocrdma_qp *qp; 2479 + 2480 + cq = get_ocrdma_cq(ibcq); 2481 + dev = cq->dev; 2482 + 2483 + /* poll cqes from adapter CQ */ 2484 + spin_lock_irqsave(&cq->cq_lock, flags); 2485 + num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc); 2486 + spin_unlock_irqrestore(&cq->cq_lock, flags); 2487 + cqes_to_poll -= num_os_cqe; 2488 + 2489 + if (cqes_to_poll) { 2490 + wc = wc + num_os_cqe; 2491 + /* adapter returns single error cqe when qp moves to 2492 + * error state. So insert error cqes with wc_status as 2493 + * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ 2494 + * respectively which uses this CQ. 2495 + */ 2496 + spin_lock_irqsave(&dev->flush_q_lock, flags); 2497 + list_for_each_entry(qp, &cq->sq_head, sq_entry) { 2498 + if (cqes_to_poll == 0) 2499 + break; 2500 + err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc); 2501 + cqes_to_poll -= err_cqes; 2502 + num_os_cqe += err_cqes; 2503 + wc = wc + err_cqes; 2504 + } 2505 + spin_unlock_irqrestore(&dev->flush_q_lock, flags); 2506 + } 2507 + return num_os_cqe; 2508 + } 2509 + 2510 + int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags) 2511 + { 2512 + struct ocrdma_cq *cq; 2513 + unsigned long flags; 2514 + struct ocrdma_dev *dev; 2515 + u16 cq_id; 2516 + u16 cur_getp; 2517 + struct ocrdma_cqe *cqe; 2518 + 2519 + cq = get_ocrdma_cq(ibcq); 2520 + cq_id = cq->id; 2521 + dev = cq->dev; 2522 + 2523 + spin_lock_irqsave(&cq->cq_lock, flags); 2524 + if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED) 2525 + cq->armed = true; 2526 + if (cq_flags & IB_CQ_SOLICITED) 2527 + cq->solicited = true; 2528 + 2529 + cur_getp = cq->getp; 2530 + cqe = cq->va + cur_getp; 2531 + 2532 + /* check whether any valid cqe exist or not, if not then safe to 2533 + * arm. If cqe is not yet consumed, then let it get consumed and then 2534 + * we arm it to avoid false interrupts. 2535 + */ 2536 + if (!is_cqe_valid(cq, cqe) || cq->arm_needed) { 2537 + cq->arm_needed = false; 2538 + ocrdma_ring_cq_db(dev, cq_id, cq->armed, cq->solicited, 0); 2539 + } 2540 + spin_unlock_irqrestore(&cq->cq_lock, flags); 2541 + return 0; 2542 + }
+94
drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
··· 1 + /******************************************************************* 2 + * This file is part of the Emulex RoCE Device Driver for * 3 + * RoCE (RDMA over Converged Ethernet) adapters. * 4 + * Copyright (C) 2008-2012 Emulex. All rights reserved. * 5 + * EMULEX and SLI are trademarks of Emulex. * 6 + * www.emulex.com * 7 + * * 8 + * This program is free software; you can redistribute it and/or * 9 + * modify it under the terms of version 2 of the GNU General * 10 + * Public License as published by the Free Software Foundation. * 11 + * This program is distributed in the hope that it will be useful. * 12 + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 + * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 + * more details, a copy of which can be found in the file COPYING * 18 + * included with this package. * 19 + * 20 + * Contact Information: 21 + * linux-drivers@emulex.com 22 + * 23 + * Emulex 24 + * 3333 Susan Street 25 + * Costa Mesa, CA 92626 26 + *******************************************************************/ 27 + 28 + #ifndef __OCRDMA_VERBS_H__ 29 + #define __OCRDMA_VERBS_H__ 30 + 31 + #include <linux/version.h> 32 + int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *, 33 + struct ib_send_wr **bad_wr); 34 + int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *, 35 + struct ib_recv_wr **bad_wr); 36 + 37 + int ocrdma_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc); 38 + int ocrdma_arm_cq(struct ib_cq *, enum ib_cq_notify_flags flags); 39 + 40 + int ocrdma_query_device(struct ib_device *, struct ib_device_attr *props); 41 + int ocrdma_query_port(struct ib_device *, u8 port, struct ib_port_attr *props); 42 + int ocrdma_modify_port(struct ib_device *, u8 port, int mask, 43 + struct ib_port_modify *props); 44 + 45 + void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid); 46 + int ocrdma_query_gid(struct ib_device *, u8 port, 47 + int index, union ib_gid *gid); 48 + int ocrdma_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey); 49 + 50 + struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *, 51 + struct ib_udata *); 52 + int ocrdma_dealloc_ucontext(struct ib_ucontext *); 53 + 54 + int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma); 55 + 56 + struct ib_pd *ocrdma_alloc_pd(struct ib_device *, 57 + struct ib_ucontext *, struct ib_udata *); 58 + int ocrdma_dealloc_pd(struct ib_pd *pd); 59 + 60 + struct ib_cq *ocrdma_create_cq(struct ib_device *, int entries, int vector, 61 + struct ib_ucontext *, struct ib_udata *); 62 + int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); 63 + int ocrdma_destroy_cq(struct ib_cq *); 64 + 65 + struct ib_qp *ocrdma_create_qp(struct ib_pd *, 66 + struct ib_qp_init_attr *attrs, 67 + struct ib_udata *); 68 + int _ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr, 69 + int attr_mask); 70 + int ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr, 71 + int attr_mask, struct ib_udata *udata); 72 + int ocrdma_query_qp(struct ib_qp *, 73 + struct ib_qp_attr *qp_attr, 74 + int qp_attr_mask, struct ib_qp_init_attr *); 75 + int ocrdma_destroy_qp(struct ib_qp *); 76 + 77 + struct ib_srq *ocrdma_create_srq(struct ib_pd *, struct ib_srq_init_attr *, 78 + struct ib_udata *); 79 + int ocrdma_modify_srq(struct ib_srq *, struct ib_srq_attr *, 80 + enum ib_srq_attr_mask, struct ib_udata *); 81 + int ocrdma_query_srq(struct ib_srq *, struct ib_srq_attr *); 82 + int ocrdma_destroy_srq(struct ib_srq *); 83 + int ocrdma_post_srq_recv(struct ib_srq *, struct ib_recv_wr *, 84 + struct ib_recv_wr **bad_recv_wr); 85 + 86 + int ocrdma_dereg_mr(struct ib_mr *); 87 + struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *, int acc); 88 + struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *, 89 + struct ib_phys_buf *buffer_list, 90 + int num_phys_buf, int acc, u64 *iova_start); 91 + struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length, 92 + u64 virt, int acc, struct ib_udata *); 93 + 94 + #endif /* __OCRDMA_VERBS_H__ */