Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

qedr: Add support for RoCE HW init

Allocate and setup RoCE resources, interrupts and completion queues.
Adds device attributes.

Signed-off-by: Rajesh Borundia <rajesh.borundia@cavium.com>
Signed-off-by: Ram Amrani <Ram.Amrani@cavium.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>

authored by

Ram Amrani and committed by
Doug Ledford
ec72fce4 2e0cbc4d

+691 -2
+416 -2
drivers/infiniband/hw/qedr/main.c
··· 36 36 #include <linux/iommu.h> 37 37 #include <net/addrconf.h> 38 38 #include <linux/qed/qede_roce.h> 39 + #include <linux/qed/qed_chain.h> 40 + #include <linux/qed/qed_if.h> 39 41 #include "qedr.h" 40 42 41 43 MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver"); ··· 63 61 return IB_LINK_LAYER_ETHERNET; 64 62 } 65 63 64 + static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str, 65 + size_t str_len) 66 + { 67 + struct qedr_dev *qedr = get_qedr_dev(ibdev); 68 + u32 fw_ver = (u32)qedr->attr.fw_ver; 69 + 70 + snprintf(str, str_len, "%d. %d. %d. %d", 71 + (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF, 72 + (fw_ver >> 8) & 0xFF, fw_ver & 0xFF); 73 + } 74 + 66 75 static int qedr_register_device(struct qedr_dev *dev) 67 76 { 68 77 strlcpy(dev->ibdev.name, "qedr%d", IB_DEVICE_NAME_MAX); ··· 82 69 dev->ibdev.owner = THIS_MODULE; 83 70 84 71 dev->ibdev.get_link_layer = qedr_link_layer; 72 + dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str; 85 73 86 74 return 0; 75 + } 76 + 77 + /* This function allocates fast-path status block memory */ 78 + static int qedr_alloc_mem_sb(struct qedr_dev *dev, 79 + struct qed_sb_info *sb_info, u16 sb_id) 80 + { 81 + struct status_block *sb_virt; 82 + dma_addr_t sb_phys; 83 + int rc; 84 + 85 + sb_virt = dma_alloc_coherent(&dev->pdev->dev, 86 + sizeof(*sb_virt), &sb_phys, GFP_KERNEL); 87 + if (!sb_virt) 88 + return -ENOMEM; 89 + 90 + rc = dev->ops->common->sb_init(dev->cdev, sb_info, 91 + sb_virt, sb_phys, sb_id, 92 + QED_SB_TYPE_CNQ); 93 + if (rc) { 94 + pr_err("Status block initialization failed\n"); 95 + dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt), 96 + sb_virt, sb_phys); 97 + return rc; 98 + } 99 + 100 + return 0; 101 + } 102 + 103 + static void qedr_free_mem_sb(struct qedr_dev *dev, 104 + struct qed_sb_info *sb_info, int sb_id) 105 + { 106 + if (sb_info->sb_virt) { 107 + dev->ops->common->sb_release(dev->cdev, sb_info, sb_id); 108 + dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt), 109 + (void *)sb_info->sb_virt, sb_info->sb_phys); 110 + } 111 + } 112 + 113 + static void qedr_free_resources(struct qedr_dev *dev) 114 + { 115 + int i; 116 + 117 + for (i = 0; i < dev->num_cnq; i++) { 118 + qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i); 119 + dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl); 120 + } 121 + 122 + kfree(dev->cnq_array); 123 + kfree(dev->sb_array); 124 + kfree(dev->sgid_tbl); 125 + } 126 + 127 + static int qedr_alloc_resources(struct qedr_dev *dev) 128 + { 129 + struct qedr_cnq *cnq; 130 + __le16 *cons_pi; 131 + u16 n_entries; 132 + int i, rc; 133 + 134 + dev->sgid_tbl = kzalloc(sizeof(union ib_gid) * 135 + QEDR_MAX_SGID, GFP_KERNEL); 136 + if (!dev->sgid_tbl) 137 + return -ENOMEM; 138 + 139 + spin_lock_init(&dev->sgid_lock); 140 + 141 + /* Allocate Status blocks for CNQ */ 142 + dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array), 143 + GFP_KERNEL); 144 + if (!dev->sb_array) { 145 + rc = -ENOMEM; 146 + goto err1; 147 + } 148 + 149 + dev->cnq_array = kcalloc(dev->num_cnq, 150 + sizeof(*dev->cnq_array), GFP_KERNEL); 151 + if (!dev->cnq_array) { 152 + rc = -ENOMEM; 153 + goto err2; 154 + } 155 + 156 + dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev); 157 + 158 + /* Allocate CNQ PBLs */ 159 + n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE); 160 + for (i = 0; i < dev->num_cnq; i++) { 161 + cnq = &dev->cnq_array[i]; 162 + 163 + rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i], 164 + dev->sb_start + i); 165 + if (rc) 166 + goto err3; 167 + 168 + rc = dev->ops->common->chain_alloc(dev->cdev, 169 + QED_CHAIN_USE_TO_CONSUME, 170 + QED_CHAIN_MODE_PBL, 171 + QED_CHAIN_CNT_TYPE_U16, 172 + n_entries, 173 + sizeof(struct regpair *), 174 + &cnq->pbl); 175 + if (rc) 176 + goto err4; 177 + 178 + cnq->dev = dev; 179 + cnq->sb = &dev->sb_array[i]; 180 + cons_pi = dev->sb_array[i].sb_virt->pi_array; 181 + cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX]; 182 + cnq->index = i; 183 + sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev)); 184 + 185 + DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n", 186 + i, qed_chain_get_cons_idx(&cnq->pbl)); 187 + } 188 + 189 + return 0; 190 + err4: 191 + qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i); 192 + err3: 193 + for (--i; i >= 0; i--) { 194 + dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl); 195 + qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i); 196 + } 197 + kfree(dev->cnq_array); 198 + err2: 199 + kfree(dev->sb_array); 200 + err1: 201 + kfree(dev->sgid_tbl); 202 + return rc; 87 203 } 88 204 89 205 /* QEDR sysfs interface */ ··· 288 146 } 289 147 } 290 148 149 + static const struct qed_rdma_ops *qed_ops; 150 + 151 + #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) 152 + 153 + static irqreturn_t qedr_irq_handler(int irq, void *handle) 154 + { 155 + u16 hw_comp_cons, sw_comp_cons; 156 + struct qedr_cnq *cnq = handle; 157 + 158 + qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0); 159 + 160 + qed_sb_update_sb_idx(cnq->sb); 161 + 162 + hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr); 163 + sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl); 164 + 165 + /* Align protocol-index and chain reads */ 166 + rmb(); 167 + 168 + while (sw_comp_cons != hw_comp_cons) { 169 + sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl); 170 + cnq->n_comp++; 171 + } 172 + 173 + qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index, 174 + sw_comp_cons); 175 + 176 + qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1); 177 + 178 + return IRQ_HANDLED; 179 + } 180 + 181 + static void qedr_sync_free_irqs(struct qedr_dev *dev) 182 + { 183 + u32 vector; 184 + int i; 185 + 186 + for (i = 0; i < dev->int_info.used_cnt; i++) { 187 + if (dev->int_info.msix_cnt) { 188 + vector = dev->int_info.msix[i * dev->num_hwfns].vector; 189 + synchronize_irq(vector); 190 + free_irq(vector, &dev->cnq_array[i]); 191 + } 192 + } 193 + 194 + dev->int_info.used_cnt = 0; 195 + } 196 + 197 + static int qedr_req_msix_irqs(struct qedr_dev *dev) 198 + { 199 + int i, rc = 0; 200 + 201 + if (dev->num_cnq > dev->int_info.msix_cnt) { 202 + DP_ERR(dev, 203 + "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n", 204 + dev->num_cnq, dev->int_info.msix_cnt); 205 + return -EINVAL; 206 + } 207 + 208 + for (i = 0; i < dev->num_cnq; i++) { 209 + rc = request_irq(dev->int_info.msix[i * dev->num_hwfns].vector, 210 + qedr_irq_handler, 0, dev->cnq_array[i].name, 211 + &dev->cnq_array[i]); 212 + if (rc) { 213 + DP_ERR(dev, "Request cnq %d irq failed\n", i); 214 + qedr_sync_free_irqs(dev); 215 + } else { 216 + DP_DEBUG(dev, QEDR_MSG_INIT, 217 + "Requested cnq irq for %s [entry %d]. Cookie is at %p\n", 218 + dev->cnq_array[i].name, i, 219 + &dev->cnq_array[i]); 220 + dev->int_info.used_cnt++; 221 + } 222 + } 223 + 224 + return rc; 225 + } 226 + 227 + static int qedr_setup_irqs(struct qedr_dev *dev) 228 + { 229 + int rc; 230 + 231 + DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n"); 232 + 233 + /* Learn Interrupt configuration */ 234 + rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq); 235 + if (rc < 0) 236 + return rc; 237 + 238 + rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info); 239 + if (rc) { 240 + DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n"); 241 + return rc; 242 + } 243 + 244 + if (dev->int_info.msix_cnt) { 245 + DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n", 246 + dev->int_info.msix_cnt); 247 + rc = qedr_req_msix_irqs(dev); 248 + if (rc) 249 + return rc; 250 + } 251 + 252 + DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n"); 253 + 254 + return 0; 255 + } 256 + 257 + static int qedr_set_device_attr(struct qedr_dev *dev) 258 + { 259 + struct qed_rdma_device *qed_attr; 260 + struct qedr_device_attr *attr; 261 + u32 page_size; 262 + 263 + /* Part 1 - query core capabilities */ 264 + qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx); 265 + 266 + /* Part 2 - check capabilities */ 267 + page_size = ~dev->attr.page_size_caps + 1; 268 + if (page_size > PAGE_SIZE) { 269 + DP_ERR(dev, 270 + "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n", 271 + PAGE_SIZE, page_size); 272 + return -ENODEV; 273 + } 274 + 275 + /* Part 3 - copy and update capabilities */ 276 + attr = &dev->attr; 277 + attr->vendor_id = qed_attr->vendor_id; 278 + attr->vendor_part_id = qed_attr->vendor_part_id; 279 + attr->hw_ver = qed_attr->hw_ver; 280 + attr->fw_ver = qed_attr->fw_ver; 281 + attr->node_guid = qed_attr->node_guid; 282 + attr->sys_image_guid = qed_attr->sys_image_guid; 283 + attr->max_cnq = qed_attr->max_cnq; 284 + attr->max_sge = qed_attr->max_sge; 285 + attr->max_inline = qed_attr->max_inline; 286 + attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE); 287 + attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE); 288 + attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc; 289 + attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc; 290 + attr->max_dev_resp_rd_atomic_resc = 291 + qed_attr->max_dev_resp_rd_atomic_resc; 292 + attr->max_cq = qed_attr->max_cq; 293 + attr->max_qp = qed_attr->max_qp; 294 + attr->max_mr = qed_attr->max_mr; 295 + attr->max_mr_size = qed_attr->max_mr_size; 296 + attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES); 297 + attr->max_mw = qed_attr->max_mw; 298 + attr->max_fmr = qed_attr->max_fmr; 299 + attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl; 300 + attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size; 301 + attr->max_pd = qed_attr->max_pd; 302 + attr->max_ah = qed_attr->max_ah; 303 + attr->max_pkey = qed_attr->max_pkey; 304 + attr->max_srq = qed_attr->max_srq; 305 + attr->max_srq_wr = qed_attr->max_srq_wr; 306 + attr->dev_caps = qed_attr->dev_caps; 307 + attr->page_size_caps = qed_attr->page_size_caps; 308 + attr->dev_ack_delay = qed_attr->dev_ack_delay; 309 + attr->reserved_lkey = qed_attr->reserved_lkey; 310 + attr->bad_pkey_counter = qed_attr->bad_pkey_counter; 311 + attr->max_stats_queues = qed_attr->max_stats_queues; 312 + 313 + return 0; 314 + } 315 + 316 + static int qedr_init_hw(struct qedr_dev *dev) 317 + { 318 + struct qed_rdma_add_user_out_params out_params; 319 + struct qed_rdma_start_in_params *in_params; 320 + struct qed_rdma_cnq_params *cur_pbl; 321 + struct qed_rdma_events events; 322 + dma_addr_t p_phys_table; 323 + u32 page_cnt; 324 + int rc = 0; 325 + int i; 326 + 327 + in_params = kzalloc(sizeof(*in_params), GFP_KERNEL); 328 + if (!in_params) { 329 + rc = -ENOMEM; 330 + goto out; 331 + } 332 + 333 + in_params->desired_cnq = dev->num_cnq; 334 + for (i = 0; i < dev->num_cnq; i++) { 335 + cur_pbl = &in_params->cnq_pbl_list[i]; 336 + 337 + page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl); 338 + cur_pbl->num_pbl_pages = page_cnt; 339 + 340 + p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl); 341 + cur_pbl->pbl_ptr = (u64)p_phys_table; 342 + } 343 + 344 + events.context = dev; 345 + 346 + in_params->events = &events; 347 + in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS; 348 + in_params->max_mtu = dev->ndev->mtu; 349 + ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr); 350 + 351 + rc = dev->ops->rdma_init(dev->cdev, in_params); 352 + if (rc) 353 + goto out; 354 + 355 + rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params); 356 + if (rc) 357 + goto out; 358 + 359 + dev->db_addr = (void *)(uintptr_t)out_params.dpi_addr; 360 + dev->db_phys_addr = out_params.dpi_phys_addr; 361 + dev->db_size = out_params.dpi_size; 362 + dev->dpi = out_params.dpi; 363 + 364 + rc = qedr_set_device_attr(dev); 365 + out: 366 + kfree(in_params); 367 + if (rc) 368 + DP_ERR(dev, "Init HW Failed rc = %d\n", rc); 369 + 370 + return rc; 371 + } 372 + 373 + void qedr_stop_hw(struct qedr_dev *dev) 374 + { 375 + dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi); 376 + dev->ops->rdma_stop(dev->rdma_ctx); 377 + } 378 + 291 379 static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev, 292 380 struct net_device *ndev) 293 381 { 382 + struct qed_dev_rdma_info dev_info; 294 383 struct qedr_dev *dev; 295 384 int rc = 0, i; 296 385 ··· 537 164 dev->ndev = ndev; 538 165 dev->cdev = cdev; 539 166 167 + qed_ops = qed_get_rdma_ops(); 168 + if (!qed_ops) { 169 + DP_ERR(dev, "Failed to get qed roce operations\n"); 170 + goto init_err; 171 + } 172 + 173 + dev->ops = qed_ops; 174 + rc = qed_ops->fill_dev_info(cdev, &dev_info); 175 + if (rc) 176 + goto init_err; 177 + 178 + dev->num_hwfns = dev_info.common.num_hwfns; 179 + dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev); 180 + 181 + dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev); 182 + if (!dev->num_cnq) { 183 + DP_ERR(dev, "not enough CNQ resources.\n"); 184 + goto init_err; 185 + } 186 + 540 187 qedr_pci_set_atomic(dev, pdev); 188 + 189 + rc = qedr_alloc_resources(dev); 190 + if (rc) 191 + goto init_err; 192 + 193 + rc = qedr_init_hw(dev); 194 + if (rc) 195 + goto alloc_err; 196 + 197 + rc = qedr_setup_irqs(dev); 198 + if (rc) 199 + goto irq_err; 541 200 542 201 rc = qedr_register_device(dev); 543 202 if (rc) { 544 203 DP_ERR(dev, "Unable to allocate register device\n"); 545 - goto init_err; 204 + goto reg_err; 546 205 } 547 206 548 207 for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++) 549 208 if (device_create_file(&dev->ibdev.dev, qedr_attributes[i])) 550 - goto init_err; 209 + goto reg_err; 551 210 552 211 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n"); 553 212 return dev; 554 213 214 + reg_err: 215 + qedr_sync_free_irqs(dev); 216 + irq_err: 217 + qedr_stop_hw(dev); 218 + alloc_err: 219 + qedr_free_resources(dev); 555 220 init_err: 556 221 ib_dealloc_device(&dev->ibdev); 557 222 DP_ERR(dev, "qedr driver load failed rc=%d\n", rc); ··· 604 193 */ 605 194 qedr_remove_sysfiles(dev); 606 195 196 + qedr_stop_hw(dev); 197 + qedr_sync_free_irqs(dev); 198 + qedr_free_resources(dev); 607 199 ib_dealloc_device(&dev->ibdev); 608 200 } 609 201
+123
drivers/infiniband/hw/qedr/qedr.h
··· 35 35 #include <linux/pci.h> 36 36 #include <rdma/ib_addr.h> 37 37 #include <linux/qed/qed_if.h> 38 + #include <linux/qed/qed_chain.h> 39 + #include <linux/qed/qed_roce_if.h> 38 40 #include <linux/qed/qede_roce.h> 41 + #include "qedr_hsi.h" 39 42 40 43 #define QEDR_MODULE_VERSION "8.10.10.0" 41 44 #define QEDR_NODE_DESC "QLogic 579xx RoCE HCA" ··· 50 47 51 48 #define QEDR_MSG_INIT "INIT" 52 49 50 + struct qedr_dev; 51 + 52 + struct qedr_cnq { 53 + struct qedr_dev *dev; 54 + struct qed_chain pbl; 55 + struct qed_sb_info *sb; 56 + char name[32]; 57 + u64 n_comp; 58 + __le16 *hw_cons_ptr; 59 + u8 index; 60 + }; 61 + 62 + #define QEDR_MAX_SGID 128 63 + 64 + struct qedr_device_attr { 65 + u32 vendor_id; 66 + u32 vendor_part_id; 67 + u32 hw_ver; 68 + u64 fw_ver; 69 + u64 node_guid; 70 + u64 sys_image_guid; 71 + u8 max_cnq; 72 + u8 max_sge; 73 + u16 max_inline; 74 + u32 max_sqe; 75 + u32 max_rqe; 76 + u8 max_qp_resp_rd_atomic_resc; 77 + u8 max_qp_req_rd_atomic_resc; 78 + u64 max_dev_resp_rd_atomic_resc; 79 + u32 max_cq; 80 + u32 max_qp; 81 + u32 max_mr; 82 + u64 max_mr_size; 83 + u32 max_cqe; 84 + u32 max_mw; 85 + u32 max_fmr; 86 + u32 max_mr_mw_fmr_pbl; 87 + u64 max_mr_mw_fmr_size; 88 + u32 max_pd; 89 + u32 max_ah; 90 + u8 max_pkey; 91 + u32 max_srq; 92 + u32 max_srq_wr; 93 + u8 max_srq_sge; 94 + u8 max_stats_queues; 95 + u32 dev_caps; 96 + 97 + u64 page_size_caps; 98 + u8 dev_ack_delay; 99 + u32 reserved_lkey; 100 + u32 bad_pkey_counter; 101 + struct qed_rdma_events events; 102 + }; 103 + 53 104 struct qedr_dev { 54 105 struct ib_device ibdev; 55 106 struct qed_dev *cdev; ··· 112 55 113 56 enum ib_atomic_cap atomic_cap; 114 57 58 + void *rdma_ctx; 59 + struct qedr_device_attr attr; 60 + 61 + const struct qed_rdma_ops *ops; 62 + struct qed_int_info int_info; 63 + 64 + struct qed_sb_info *sb_array; 65 + struct qedr_cnq *cnq_array; 66 + int num_cnq; 67 + int sb_start; 68 + 69 + void __iomem *db_addr; 70 + u64 db_phys_addr; 71 + u32 db_size; 72 + u16 dpi; 73 + 74 + union ib_gid *sgid_tbl; 75 + 76 + /* Lock for sgid table */ 77 + spinlock_t sgid_lock; 78 + 79 + u64 guid; 80 + 115 81 u32 dp_module; 116 82 u8 dp_level; 83 + u8 num_hwfns; 117 84 }; 85 + 86 + #define QEDR_MAX_SQ_PBL (0x8000) 87 + #define QEDR_MAX_SQ_PBL_ENTRIES (0x10000 / sizeof(void *)) 88 + #define QEDR_SQE_ELEMENT_SIZE (sizeof(struct rdma_sq_sge)) 89 + #define QEDR_MAX_SQE_ELEMENTS_PER_SQE (ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE / \ 90 + QEDR_SQE_ELEMENT_SIZE) 91 + #define QEDR_MAX_SQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \ 92 + QEDR_SQE_ELEMENT_SIZE) 93 + #define QEDR_MAX_SQE ((QEDR_MAX_SQ_PBL_ENTRIES) *\ 94 + (RDMA_RING_PAGE_SIZE) / \ 95 + (QEDR_SQE_ELEMENT_SIZE) /\ 96 + (QEDR_MAX_SQE_ELEMENTS_PER_SQE)) 97 + /* RQ */ 98 + #define QEDR_MAX_RQ_PBL (0x2000) 99 + #define QEDR_MAX_RQ_PBL_ENTRIES (0x10000 / sizeof(void *)) 100 + #define QEDR_RQE_ELEMENT_SIZE (sizeof(struct rdma_rq_sge)) 101 + #define QEDR_MAX_RQE_ELEMENTS_PER_RQE (RDMA_MAX_SGE_PER_RQ_WQE) 102 + #define QEDR_MAX_RQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \ 103 + QEDR_RQE_ELEMENT_SIZE) 104 + #define QEDR_MAX_RQE ((QEDR_MAX_RQ_PBL_ENTRIES) *\ 105 + (RDMA_RING_PAGE_SIZE) / \ 106 + (QEDR_RQE_ELEMENT_SIZE) /\ 107 + (QEDR_MAX_RQE_ELEMENTS_PER_RQE)) 108 + 109 + #define QEDR_CQE_SIZE (sizeof(union rdma_cqe)) 110 + #define QEDR_MAX_CQE_PBL_SIZE (512 * 1024) 111 + #define QEDR_MAX_CQE_PBL_ENTRIES (((QEDR_MAX_CQE_PBL_SIZE) / \ 112 + sizeof(u64)) - 1) 113 + #define QEDR_MAX_CQES ((u32)((QEDR_MAX_CQE_PBL_ENTRIES) * \ 114 + (QED_CHAIN_PAGE_SIZE) / QEDR_CQE_SIZE)) 115 + 116 + #define QEDR_ROCE_MAX_CNQ_SIZE (0x4000) 117 + 118 + #define QEDR_MAX_PORT (1) 119 + 120 + #define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) 121 + 122 + static inline struct qedr_dev *get_qedr_dev(struct ib_device *ibdev) 123 + { 124 + return container_of(ibdev, struct qedr_dev, ibdev); 125 + } 126 + 118 127 #endif
+56
drivers/infiniband/hw/qedr/qedr_hsi.h
··· 1 + /* QLogic qedr NIC Driver 2 + * Copyright (c) 2015-2016 QLogic Corporation 3 + * 4 + * This software is available to you under a choice of one of two 5 + * licenses. You may choose to be licensed under the terms of the GNU 6 + * General Public License (GPL) Version 2, available from the file 7 + * COPYING in the main directory of this source tree, or the 8 + * OpenIB.org BSD license below: 9 + * 10 + * Redistribution and use in source and binary forms, with or 11 + * without modification, are permitted provided that the following 12 + * conditions are met: 13 + * 14 + * - Redistributions of source code must retain the above 15 + * copyright notice, this list of conditions and the following 16 + * disclaimer. 17 + * 18 + * - Redistributions in binary form must reproduce the above 19 + * copyright notice, this list of conditions and the following 20 + * disclaimer in the documentation and /or other materials 21 + * provided with the distribution. 22 + * 23 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 + * SOFTWARE. 31 + */ 32 + #ifndef __QED_HSI_ROCE__ 33 + #define __QED_HSI_ROCE__ 34 + 35 + #include <linux/qed/common_hsi.h> 36 + #include <linux/qed/roce_common.h> 37 + #include "qedr_hsi_rdma.h" 38 + 39 + /* Affiliated asynchronous events / errors enumeration */ 40 + enum roce_async_events_type { 41 + ROCE_ASYNC_EVENT_NONE = 0, 42 + ROCE_ASYNC_EVENT_COMM_EST = 1, 43 + ROCE_ASYNC_EVENT_SQ_DRAINED, 44 + ROCE_ASYNC_EVENT_SRQ_LIMIT, 45 + ROCE_ASYNC_EVENT_LAST_WQE_REACHED, 46 + ROCE_ASYNC_EVENT_CQ_ERR, 47 + ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR, 48 + ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR, 49 + ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR, 50 + ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR, 51 + ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR, 52 + ROCE_ASYNC_EVENT_SRQ_EMPTY, 53 + MAX_ROCE_ASYNC_EVENTS_TYPE 54 + }; 55 + 56 + #endif /* __QED_HSI_ROCE__ */
+96
drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
··· 1 + /* QLogic qedr NIC Driver 2 + * Copyright (c) 2015-2016 QLogic Corporation 3 + * 4 + * This software is available to you under a choice of one of two 5 + * licenses. You may choose to be licensed under the terms of the GNU 6 + * General Public License (GPL) Version 2, available from the file 7 + * COPYING in the main directory of this source tree, or the 8 + * OpenIB.org BSD license below: 9 + * 10 + * Redistribution and use in source and binary forms, with or 11 + * without modification, are permitted provided that the following 12 + * conditions are met: 13 + * 14 + * - Redistributions of source code must retain the above 15 + * copyright notice, this list of conditions and the following 16 + * disclaimer. 17 + * 18 + * - Redistributions in binary form must reproduce the above 19 + * copyright notice, this list of conditions and the following 20 + * disclaimer in the documentation and /or other materials 21 + * provided with the distribution. 22 + * 23 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 + * SOFTWARE. 31 + */ 32 + #ifndef __QED_HSI_RDMA__ 33 + #define __QED_HSI_RDMA__ 34 + 35 + #include <linux/qed/rdma_common.h> 36 + 37 + /* rdma completion notification queue element */ 38 + struct rdma_cnqe { 39 + struct regpair cq_handle; 40 + }; 41 + 42 + struct rdma_cqe_responder { 43 + struct regpair srq_wr_id; 44 + struct regpair qp_handle; 45 + __le32 imm_data_or_inv_r_Key; 46 + __le32 length; 47 + __le32 imm_data_hi; 48 + __le16 rq_cons; 49 + u8 flags; 50 + }; 51 + 52 + struct rdma_cqe_requester { 53 + __le16 sq_cons; 54 + __le16 reserved0; 55 + __le32 reserved1; 56 + struct regpair qp_handle; 57 + struct regpair reserved2; 58 + __le32 reserved3; 59 + __le16 reserved4; 60 + u8 flags; 61 + u8 status; 62 + }; 63 + 64 + struct rdma_cqe_common { 65 + struct regpair reserved0; 66 + struct regpair qp_handle; 67 + __le16 reserved1[7]; 68 + u8 flags; 69 + u8 status; 70 + }; 71 + 72 + /* rdma completion queue element */ 73 + union rdma_cqe { 74 + struct rdma_cqe_responder resp; 75 + struct rdma_cqe_requester req; 76 + struct rdma_cqe_common cmn; 77 + }; 78 + 79 + struct rdma_sq_sge { 80 + __le32 length; 81 + struct regpair addr; 82 + __le32 l_key; 83 + }; 84 + 85 + struct rdma_rq_sge { 86 + struct regpair addr; 87 + __le32 length; 88 + __le32 flags; 89 + }; 90 + 91 + struct rdma_srq_sge { 92 + struct regpair addr; 93 + __le32 length; 94 + __le32 l_key; 95 + }; 96 + #endif /* __QED_HSI_RDMA__ */