···11+/*******************************************************************22+ * This file is part of the Emulex RoCE Device Driver for *33+ * RoCE (RDMA over Converged Ethernet) adapters. *44+ * Copyright (C) 2008-2012 Emulex. All rights reserved. *55+ * EMULEX and SLI are trademarks of Emulex. *66+ * www.emulex.com *77+ * *88+ * This program is free software; you can redistribute it and/or *99+ * modify it under the terms of version 2 of the GNU General *1010+ * Public License as published by the Free Software Foundation. *1111+ * This program is distributed in the hope that it will be useful. *1212+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *1313+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *1414+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *1515+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *1616+ * TO BE LEGALLY INVALID. See the GNU General Public License for *1717+ * more details, a copy of which can be found in the file COPYING *1818+ * included with this package. *1919+ *2020+ * Contact Information:2121+ * linux-drivers@emulex.com2222+ *2323+ * Emulex2424+ * 3333 Susan Street2525+ * Costa Mesa, CA 926262626+ *******************************************************************/2727+2828+#ifndef __OCRDMA_H__2929+#define __OCRDMA_H__3030+3131+#include <linux/mutex.h>3232+#include <linux/list.h>3333+#include <linux/spinlock.h>3434+#include <linux/pci.h>3535+3636+#include <rdma/ib_verbs.h>3737+#include <rdma/ib_user_verbs.h>3838+3939+#include <be_roce.h>4040+#include "ocrdma_sli.h"4141+4242+#define OCRDMA_ROCE_DEV_VERSION "1.0.0"4343+#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"4444+4545+#define ocrdma_err(format, arg...) printk(KERN_ERR format, ##arg)4646+4747+#define OCRDMA_MAX_AH 5124848+4949+#define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)5050+5151+struct ocrdma_dev_attr {5252+ u8 fw_ver[32];5353+ u32 vendor_id;5454+ u32 device_id;5555+ u16 max_pd;5656+ u16 max_cq;5757+ u16 max_cqe;5858+ u16 max_qp;5959+ u16 max_wqe;6060+ u16 max_rqe;6161+ u32 max_inline_data;6262+ int max_send_sge;6363+ int max_recv_sge;6464+ int max_mr;6565+ u64 max_mr_size;6666+ u32 max_num_mr_pbl;6767+ int max_fmr;6868+ int max_map_per_fmr;6969+ int max_pages_per_frmr;7070+ u16 max_ord_per_qp;7171+ u16 max_ird_per_qp;7272+7373+ int device_cap_flags;7474+ u8 cq_overflow_detect;7575+ u8 srq_supported;7676+7777+ u32 wqe_size;7878+ u32 rqe_size;7979+ u32 ird_page_size;8080+ u8 local_ca_ack_delay;8181+ u8 ird;8282+ u8 num_ird_pages;8383+};8484+8585+struct ocrdma_pbl {8686+ void *va;8787+ dma_addr_t pa;8888+};8989+9090+struct ocrdma_queue_info {9191+ void *va;9292+ dma_addr_t dma;9393+ u32 size;9494+ u16 len;9595+ u16 entry_size; /* Size of an element in the queue */9696+ u16 id; /* qid, where to ring the doorbell. */9797+ u16 head, tail;9898+ bool created;9999+ atomic_t used; /* Number of valid elements in the queue */100100+};101101+102102+struct ocrdma_eq {103103+ struct ocrdma_queue_info q;104104+ u32 vector;105105+ int cq_cnt;106106+ struct ocrdma_dev *dev;107107+ char irq_name[32];108108+};109109+110110+struct ocrdma_mq {111111+ struct ocrdma_queue_info sq;112112+ struct ocrdma_queue_info cq;113113+ bool rearm_cq;114114+};115115+116116+struct mqe_ctx {117117+ struct mutex lock; /* for serializing mailbox commands on MQ */118118+ wait_queue_head_t cmd_wait;119119+ u32 tag;120120+ u16 cqe_status;121121+ u16 ext_status;122122+ bool cmd_done;123123+};124124+125125+struct ocrdma_dev {126126+ struct ib_device ibdev;127127+ struct ocrdma_dev_attr attr;128128+129129+ struct mutex dev_lock; /* provides syncronise access to device data */130130+ spinlock_t flush_q_lock ____cacheline_aligned;131131+132132+ struct ocrdma_cq **cq_tbl;133133+ struct ocrdma_qp **qp_tbl;134134+135135+ struct ocrdma_eq meq;136136+ struct ocrdma_eq *qp_eq_tbl;137137+ int eq_cnt;138138+ u16 base_eqid;139139+ u16 max_eq;140140+141141+ union ib_gid *sgid_tbl;142142+ /* provided synchronization to sgid table for143143+ * updating gid entries triggered by notifier.144144+ */145145+ spinlock_t sgid_lock;146146+147147+ int gsi_qp_created;148148+ struct ocrdma_cq *gsi_sqcq;149149+ struct ocrdma_cq *gsi_rqcq;150150+151151+ struct {152152+ struct ocrdma_av *va;153153+ dma_addr_t pa;154154+ u32 size;155155+ u32 num_ah;156156+ /* provide synchronization for av157157+ * entry allocations.158158+ */159159+ spinlock_t lock;160160+ u32 ahid;161161+ struct ocrdma_pbl pbl;162162+ } av_tbl;163163+164164+ void *mbx_cmd;165165+ struct ocrdma_mq mq;166166+ struct mqe_ctx mqe_ctx;167167+168168+ struct be_dev_info nic_info;169169+170170+ struct list_head entry;171171+ int id;172172+};173173+174174+struct ocrdma_cq {175175+ struct ib_cq ibcq;176176+ struct ocrdma_dev *dev;177177+ struct ocrdma_cqe *va;178178+ u32 phase;179179+ u32 getp; /* pointer to pending wrs to180180+ * return to stack, wrap arounds181181+ * at max_hw_cqe182182+ */183183+ u32 max_hw_cqe;184184+ bool phase_change;185185+ bool armed, solicited;186186+ bool arm_needed;187187+188188+ spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization189189+ * to cq polling190190+ */191191+ /* syncronizes cq completion handler invoked from multiple context */192192+ spinlock_t comp_handler_lock ____cacheline_aligned;193193+ u16 id;194194+ u16 eqn;195195+196196+ struct ocrdma_ucontext *ucontext;197197+ dma_addr_t pa;198198+ u32 len;199199+ atomic_t use_cnt;200200+201201+ /* head of all qp's sq and rq for which cqes need to be flushed202202+ * by the software.203203+ */204204+ struct list_head sq_head, rq_head;205205+};206206+207207+struct ocrdma_pd {208208+ struct ib_pd ibpd;209209+ struct ocrdma_dev *dev;210210+ struct ocrdma_ucontext *uctx;211211+ atomic_t use_cnt;212212+ u32 id;213213+ int num_dpp_qp;214214+ u32 dpp_page;215215+ bool dpp_enabled;216216+};217217+218218+struct ocrdma_ah {219219+ struct ib_ah ibah;220220+ struct ocrdma_dev *dev;221221+ struct ocrdma_av *av;222222+ u16 sgid_index;223223+ u32 id;224224+};225225+226226+struct ocrdma_qp_hwq_info {227227+ u8 *va; /* virtual address */228228+ u32 max_sges;229229+ u32 head, tail;230230+ u32 entry_size;231231+ u32 max_cnt;232232+ u32 max_wqe_idx;233233+ u32 free_delta;234234+ u16 dbid; /* qid, where to ring the doorbell. */235235+ u32 len;236236+ dma_addr_t pa;237237+};238238+239239+struct ocrdma_srq {240240+ struct ib_srq ibsrq;241241+ struct ocrdma_dev *dev;242242+ u8 __iomem *db;243243+ /* provide synchronization to multiple context(s) posting rqe */244244+ spinlock_t q_lock ____cacheline_aligned;245245+246246+ struct ocrdma_qp_hwq_info rq;247247+ struct ocrdma_pd *pd;248248+ atomic_t use_cnt;249249+ u32 id;250250+ u64 *rqe_wr_id_tbl;251251+ u32 *idx_bit_fields;252252+ u32 bit_fields_len;253253+};254254+255255+struct ocrdma_qp {256256+ struct ib_qp ibqp;257257+ struct ocrdma_dev *dev;258258+259259+ u8 __iomem *sq_db;260260+ /* provide synchronization to multiple context(s) posting wqe, rqe */261261+ spinlock_t q_lock ____cacheline_aligned;262262+ struct ocrdma_qp_hwq_info sq;263263+ struct {264264+ uint64_t wrid;265265+ uint16_t dpp_wqe_idx;266266+ uint16_t dpp_wqe;267267+ uint8_t signaled;268268+ uint8_t rsvd[3];269269+ } *wqe_wr_id_tbl;270270+ u32 max_inline_data;271271+ struct ocrdma_cq *sq_cq;272272+ /* list maintained per CQ to flush SQ errors */273273+ struct list_head sq_entry;274274+275275+ u8 __iomem *rq_db;276276+ struct ocrdma_qp_hwq_info rq;277277+ u64 *rqe_wr_id_tbl;278278+ struct ocrdma_cq *rq_cq;279279+ struct ocrdma_srq *srq;280280+ /* list maintained per CQ to flush RQ errors */281281+ struct list_head rq_entry;282282+283283+ enum ocrdma_qp_state state; /* QP state */284284+ int cap_flags;285285+ u32 max_ord, max_ird;286286+287287+ u32 id;288288+ struct ocrdma_pd *pd;289289+290290+ enum ib_qp_type qp_type;291291+292292+ int sgid_idx;293293+ u32 qkey;294294+ bool dpp_enabled;295295+ u8 *ird_q_va;296296+};297297+298298+#define OCRDMA_GET_NUM_POSTED_SHIFT_VAL(qp) \299299+ (((qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) && \300300+ (qp->id < 64)) ? 24 : 16)301301+302302+struct ocrdma_hw_mr {303303+ struct ocrdma_dev *dev;304304+ u32 lkey;305305+ u8 fr_mr;306306+ u8 remote_atomic;307307+ u8 remote_rd;308308+ u8 remote_wr;309309+ u8 local_rd;310310+ u8 local_wr;311311+ u8 mw_bind;312312+ u8 rsvd;313313+ u64 len;314314+ struct ocrdma_pbl *pbl_table;315315+ u32 num_pbls;316316+ u32 num_pbes;317317+ u32 pbl_size;318318+ u32 pbe_size;319319+ u64 fbo;320320+ u64 va;321321+};322322+323323+struct ocrdma_mr {324324+ struct ib_mr ibmr;325325+ struct ib_umem *umem;326326+ struct ocrdma_hw_mr hwmr;327327+ struct ocrdma_pd *pd;328328+};329329+330330+struct ocrdma_ucontext {331331+ struct ib_ucontext ibucontext;332332+ struct ocrdma_dev *dev;333333+334334+ struct list_head mm_head;335335+ struct mutex mm_list_lock; /* protects list entries of mm type */336336+ struct {337337+ u32 *va;338338+ dma_addr_t pa;339339+ u32 len;340340+ } ah_tbl;341341+};342342+343343+struct ocrdma_mm {344344+ struct {345345+ u64 phy_addr;346346+ unsigned long len;347347+ } key;348348+ struct list_head entry;349349+};350350+351351+static inline struct ocrdma_dev *get_ocrdma_dev(struct ib_device *ibdev)352352+{353353+ return container_of(ibdev, struct ocrdma_dev, ibdev);354354+}355355+356356+static inline struct ocrdma_ucontext *get_ocrdma_ucontext(struct ib_ucontext357357+ *ibucontext)358358+{359359+ return container_of(ibucontext, struct ocrdma_ucontext, ibucontext);360360+}361361+362362+static inline struct ocrdma_pd *get_ocrdma_pd(struct ib_pd *ibpd)363363+{364364+ return container_of(ibpd, struct ocrdma_pd, ibpd);365365+}366366+367367+static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq)368368+{369369+ return container_of(ibcq, struct ocrdma_cq, ibcq);370370+}371371+372372+static inline struct ocrdma_qp *get_ocrdma_qp(struct ib_qp *ibqp)373373+{374374+ return container_of(ibqp, struct ocrdma_qp, ibqp);375375+}376376+377377+static inline struct ocrdma_mr *get_ocrdma_mr(struct ib_mr *ibmr)378378+{379379+ return container_of(ibmr, struct ocrdma_mr, ibmr);380380+}381381+382382+static inline struct ocrdma_ah *get_ocrdma_ah(struct ib_ah *ibah)383383+{384384+ return container_of(ibah, struct ocrdma_ah, ibah);385385+}386386+387387+static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq)388388+{389389+ return container_of(ibsrq, struct ocrdma_srq, ibsrq);390390+}391391+392392+#endif
+134
drivers/infiniband/hw/ocrdma/ocrdma_abi.h
···11+/*******************************************************************22+ * This file is part of the Emulex RoCE Device Driver for *33+ * RoCE (RDMA over Converged Ethernet) adapters. *44+ * Copyright (C) 2008-2012 Emulex. All rights reserved. *55+ * EMULEX and SLI are trademarks of Emulex. *66+ * www.emulex.com *77+ * *88+ * This program is free software; you can redistribute it and/or *99+ * modify it under the terms of version 2 of the GNU General *1010+ * Public License as published by the Free Software Foundation. *1111+ * This program is distributed in the hope that it will be useful. *1212+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *1313+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *1414+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *1515+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *1616+ * TO BE LEGALLY INVALID. See the GNU General Public License for *1717+ * more details, a copy of which can be found in the file COPYING *1818+ * included with this package. *1919+ *2020+ * Contact Information:2121+ * linux-drivers@emulex.com2222+ *2323+ * Emulex2424+ * 3333 Susan Street2525+ * Costa Mesa, CA 926262626+ *******************************************************************/2727+2828+#ifndef __OCRDMA_ABI_H__2929+#define __OCRDMA_ABI_H__3030+3131+struct ocrdma_alloc_ucontext_resp {3232+ u32 dev_id;3333+ u32 wqe_size;3434+ u32 max_inline_data;3535+ u32 dpp_wqe_size;3636+ u64 ah_tbl_page;3737+ u32 ah_tbl_len;3838+ u32 rsvd;3939+ u8 fw_ver[32];4040+ u32 rqe_size;4141+ u64 rsvd1;4242+} __packed;4343+4444+/* user kernel communication data structures. */4545+struct ocrdma_alloc_pd_ureq {4646+ u64 rsvd1;4747+} __packed;4848+4949+struct ocrdma_alloc_pd_uresp {5050+ u32 id;5151+ u32 dpp_enabled;5252+ u32 dpp_page_addr_hi;5353+ u32 dpp_page_addr_lo;5454+ u64 rsvd1;5555+} __packed;5656+5757+struct ocrdma_create_cq_ureq {5858+ u32 dpp_cq;5959+ u32 rsvd;6060+} __packed;6161+6262+#define MAX_CQ_PAGES 86363+struct ocrdma_create_cq_uresp {6464+ u32 cq_id;6565+ u32 page_size;6666+ u32 num_pages;6767+ u32 max_hw_cqe;6868+ u64 page_addr[MAX_CQ_PAGES];6969+ u64 db_page_addr;7070+ u32 db_page_size;7171+ u32 phase_change;7272+ u64 rsvd1;7373+ u64 rsvd2;7474+} __packed;7575+7676+#define MAX_QP_PAGES 87777+#define MAX_UD_AV_PAGES 87878+7979+struct ocrdma_create_qp_ureq {8080+ u8 enable_dpp_cq;8181+ u8 rsvd;8282+ u16 dpp_cq_id;8383+ u32 rsvd1;8484+};8585+8686+struct ocrdma_create_qp_uresp {8787+ u16 qp_id;8888+ u16 sq_dbid;8989+ u16 rq_dbid;9090+ u16 resv0;9191+ u32 sq_page_size;9292+ u32 rq_page_size;9393+ u32 num_sq_pages;9494+ u32 num_rq_pages;9595+ u64 sq_page_addr[MAX_QP_PAGES];9696+ u64 rq_page_addr[MAX_QP_PAGES];9797+ u64 db_page_addr;9898+ u32 db_page_size;9999+ u32 dpp_credit;100100+ u32 dpp_offset;101101+ u32 rsvd1;102102+ u32 num_wqe_allocated;103103+ u32 num_rqe_allocated;104104+ u32 free_wqe_delta;105105+ u32 free_rqe_delta;106106+ u32 db_sq_offset;107107+ u32 db_rq_offset;108108+ u32 db_shift;109109+ u64 rsvd2;110110+ u64 rsvd3;111111+} __packed;112112+113113+struct ocrdma_create_srq_uresp {114114+ u16 rq_dbid;115115+ u16 resv0;116116+ u32 resv1;117117+118118+ u32 rq_page_size;119119+ u32 num_rq_pages;120120+121121+ u64 rq_page_addr[MAX_QP_PAGES];122122+ u64 db_page_addr;123123+124124+ u32 db_page_size;125125+ u32 num_rqe_allocated;126126+ u32 db_rq_offset;127127+ u32 db_shift;128128+129129+ u32 free_rqe_delta;130130+ u32 rsvd2;131131+ u64 rsvd3;132132+} __packed;133133+134134+#endif /* __OCRDMA_ABI_H__ */
+172
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
···11+/*******************************************************************22+ * This file is part of the Emulex RoCE Device Driver for *33+ * RoCE (RDMA over Converged Ethernet) adapters. *44+ * Copyright (C) 2008-2012 Emulex. All rights reserved. *55+ * EMULEX and SLI are trademarks of Emulex. *66+ * www.emulex.com *77+ * *88+ * This program is free software; you can redistribute it and/or *99+ * modify it under the terms of version 2 of the GNU General *1010+ * Public License as published by the Free Software Foundation. *1111+ * This program is distributed in the hope that it will be useful. *1212+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *1313+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *1414+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *1515+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *1616+ * TO BE LEGALLY INVALID. See the GNU General Public License for *1717+ * more details, a copy of which can be found in the file COPYING *1818+ * included with this package. *1919+ *2020+ * Contact Information:2121+ * linux-drivers@emulex.com2222+ *2323+ * Emulex2424+ * 3333 Susan Street2525+ * Costa Mesa, CA 926262626+ *******************************************************************/2727+2828+#include <net/neighbour.h>2929+#include <net/netevent.h>3030+3131+#include <rdma/ib_addr.h>3232+#include <rdma/ib_cache.h>3333+3434+#include "ocrdma.h"3535+#include "ocrdma_verbs.h"3636+#include "ocrdma_ah.h"3737+#include "ocrdma_hw.h"3838+3939+static inline int set_av_attr(struct ocrdma_ah *ah,4040+ struct ib_ah_attr *attr, int pdid)4141+{4242+ int status = 0;4343+ u16 vlan_tag; bool vlan_enabled = false;4444+ struct ocrdma_dev *dev = ah->dev;4545+ struct ocrdma_eth_vlan eth;4646+ struct ocrdma_grh grh;4747+ int eth_sz;4848+4949+ memset(ð, 0, sizeof(eth));5050+ memset(&grh, 0, sizeof(grh));5151+5252+ ah->sgid_index = attr->grh.sgid_index;5353+5454+ vlan_tag = rdma_get_vlan_id(&attr->grh.dgid);5555+ if (vlan_tag && (vlan_tag < 0x1000)) {5656+ eth.eth_type = cpu_to_be16(0x8100);5757+ eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);5858+ vlan_tag |= (attr->sl & 7) << 13;5959+ eth.vlan_tag = cpu_to_be16(vlan_tag);6060+ eth_sz = sizeof(struct ocrdma_eth_vlan);6161+ vlan_enabled = true;6262+ } else {6363+ eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);6464+ eth_sz = sizeof(struct ocrdma_eth_basic);6565+ }6666+ memcpy(ð.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN);6767+ status = ocrdma_resolve_dgid(dev, &attr->grh.dgid, ð.dmac[0]);6868+ if (status)6969+ return status;7070+ status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index,7171+ (union ib_gid *)&grh.sgid[0]);7272+ if (status)7373+ return status;7474+7575+ grh.tclass_flow = cpu_to_be32((6 << 28) |7676+ (attr->grh.traffic_class << 24) |7777+ attr->grh.flow_label);7878+ /* 0x1b is next header value in GRH */7979+ grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |8080+ (0x1b << 8) | attr->grh.hop_limit);8181+8282+ memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw));8383+ memcpy(&ah->av->eth_hdr, ð, eth_sz);8484+ memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));8585+ if (vlan_enabled)8686+ ah->av->valid |= OCRDMA_AV_VLAN_VALID;8787+ return status;8888+}8989+9090+struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)9191+{9292+ u32 *ahid_addr;9393+ int status;9494+ struct ocrdma_ah *ah;9595+ struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);9696+ struct ocrdma_dev *dev = pd->dev;9797+9898+ if (!(attr->ah_flags & IB_AH_GRH))9999+ return ERR_PTR(-EINVAL);100100+101101+ ah = kzalloc(sizeof *ah, GFP_ATOMIC);102102+ if (!ah)103103+ return ERR_PTR(-ENOMEM);104104+ ah->dev = pd->dev;105105+106106+ status = ocrdma_alloc_av(dev, ah);107107+ if (status)108108+ goto av_err;109109+ status = set_av_attr(ah, attr, pd->id);110110+ if (status)111111+ goto av_conf_err;112112+113113+ /* if pd is for the user process, pass the ah_id to user space */114114+ if ((pd->uctx) && (pd->uctx->ah_tbl.va)) {115115+ ahid_addr = pd->uctx->ah_tbl.va + attr->dlid;116116+ *ahid_addr = ah->id;117117+ }118118+ return &ah->ibah;119119+120120+av_conf_err:121121+ ocrdma_free_av(dev, ah);122122+av_err:123123+ kfree(ah);124124+ return ERR_PTR(status);125125+}126126+127127+int ocrdma_destroy_ah(struct ib_ah *ibah)128128+{129129+ struct ocrdma_ah *ah = get_ocrdma_ah(ibah);130130+ ocrdma_free_av(ah->dev, ah);131131+ kfree(ah);132132+ return 0;133133+}134134+135135+int ocrdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)136136+{137137+ struct ocrdma_ah *ah = get_ocrdma_ah(ibah);138138+ struct ocrdma_av *av = ah->av;139139+ struct ocrdma_grh *grh;140140+ attr->ah_flags |= IB_AH_GRH;141141+ if (ah->av->valid & Bit(1)) {142142+ grh = (struct ocrdma_grh *)((u8 *)ah->av +143143+ sizeof(struct ocrdma_eth_vlan));144144+ attr->sl = be16_to_cpu(av->eth_hdr.vlan_tag) >> 13;145145+ } else {146146+ grh = (struct ocrdma_grh *)((u8 *)ah->av +147147+ sizeof(struct ocrdma_eth_basic));148148+ attr->sl = 0;149149+ }150150+ memcpy(&attr->grh.dgid.raw[0], &grh->dgid[0], sizeof(grh->dgid));151151+ attr->grh.sgid_index = ah->sgid_index;152152+ attr->grh.hop_limit = be32_to_cpu(grh->pdid_hoplimit) & 0xff;153153+ attr->grh.traffic_class = be32_to_cpu(grh->tclass_flow) >> 24;154154+ attr->grh.flow_label = be32_to_cpu(grh->tclass_flow) & 0x00ffffffff;155155+ return 0;156156+}157157+158158+int ocrdma_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)159159+{160160+ /* modify_ah is unsupported */161161+ return -ENOSYS;162162+}163163+164164+int ocrdma_process_mad(struct ib_device *ibdev,165165+ int process_mad_flags,166166+ u8 port_num,167167+ struct ib_wc *in_wc,168168+ struct ib_grh *in_grh,169169+ struct ib_mad *in_mad, struct ib_mad *out_mad)170170+{171171+ return IB_MAD_RESULT_SUCCESS;172172+}
+42
drivers/infiniband/hw/ocrdma/ocrdma_ah.h
···11+/*******************************************************************22+ * This file is part of the Emulex RoCE Device Driver for *33+ * RoCE (RDMA over Converged Ethernet) adapters. *44+ * Copyright (C) 2008-2012 Emulex. All rights reserved. *55+ * EMULEX and SLI are trademarks of Emulex. *66+ * www.emulex.com *77+ * *88+ * This program is free software; you can redistribute it and/or *99+ * modify it under the terms of version 2 of the GNU General *1010+ * Public License as published by the Free Software Foundation. *1111+ * This program is distributed in the hope that it will be useful. *1212+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *1313+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *1414+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *1515+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *1616+ * TO BE LEGALLY INVALID. See the GNU General Public License for *1717+ * more details, a copy of which can be found in the file COPYING *1818+ * included with this package. *1919+ *2020+ * Contact Information:2121+ * linux-drivers@emulex.com2222+ *2323+ * Emulex2424+ * 3333 Susan Street2525+ * Costa Mesa, CA 926262626+ *******************************************************************/2727+2828+#ifndef __OCRDMA_AH_H__2929+#define __OCRDMA_AH_H__3030+3131+struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *);3232+int ocrdma_destroy_ah(struct ib_ah *);3333+int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *);3434+int ocrdma_modify_ah(struct ib_ah *, struct ib_ah_attr *);3535+3636+int ocrdma_process_mad(struct ib_device *,3737+ int process_mad_flags,3838+ u8 port_num,3939+ struct ib_wc *in_wc,4040+ struct ib_grh *in_grh,4141+ struct ib_mad *in_mad, struct ib_mad *out_mad);4242+#endif /* __OCRDMA_AH_H__ */
+2640
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
···11+/*******************************************************************22+ * This file is part of the Emulex RoCE Device Driver for *33+ * RoCE (RDMA over Converged Ethernet) CNA Adapters. *44+ * Copyright (C) 2008-2012 Emulex. All rights reserved. *55+ * EMULEX and SLI are trademarks of Emulex. *66+ * www.emulex.com *77+ * *88+ * This program is free software; you can redistribute it and/or *99+ * modify it under the terms of version 2 of the GNU General *1010+ * Public License as published by the Free Software Foundation. *1111+ * This program is distributed in the hope that it will be useful. *1212+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *1313+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *1414+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *1515+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *1616+ * TO BE LEGALLY INVALID. See the GNU General Public License for *1717+ * more details, a copy of which can be found in the file COPYING *1818+ * included with this package. *1919+ *2020+ * Contact Information:2121+ * linux-drivers@emulex.com2222+ *2323+ * Emulex2424+ * 3333 Susan Street2525+ * Costa Mesa, CA 926262626+ *******************************************************************/2727+2828+#include <linux/sched.h>2929+#include <linux/interrupt.h>3030+#include <linux/log2.h>3131+#include <linux/dma-mapping.h>3232+3333+#include <rdma/ib_verbs.h>3434+#include <rdma/ib_user_verbs.h>3535+#include <rdma/ib_addr.h>3636+3737+#include "ocrdma.h"3838+#include "ocrdma_hw.h"3939+#include "ocrdma_verbs.h"4040+#include "ocrdma_ah.h"4141+4242+enum mbx_status {4343+ OCRDMA_MBX_STATUS_FAILED = 1,4444+ OCRDMA_MBX_STATUS_ILLEGAL_FIELD = 3,4545+ OCRDMA_MBX_STATUS_OOR = 100,4646+ OCRDMA_MBX_STATUS_INVALID_PD = 101,4747+ OCRDMA_MBX_STATUS_PD_INUSE = 102,4848+ OCRDMA_MBX_STATUS_INVALID_CQ = 103,4949+ OCRDMA_MBX_STATUS_INVALID_QP = 104,5050+ OCRDMA_MBX_STATUS_INVALID_LKEY = 105,5151+ OCRDMA_MBX_STATUS_ORD_EXCEEDS = 106,5252+ OCRDMA_MBX_STATUS_IRD_EXCEEDS = 107,5353+ OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS = 108,5454+ OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS = 109,5555+ OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS = 110,5656+ OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS = 111,5757+ OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS = 112,5858+ OCRDMA_MBX_STATUS_INVALID_STATE_CHANGE = 113,5959+ OCRDMA_MBX_STATUS_MW_BOUND = 114,6060+ OCRDMA_MBX_STATUS_INVALID_VA = 115,6161+ OCRDMA_MBX_STATUS_INVALID_LENGTH = 116,6262+ OCRDMA_MBX_STATUS_INVALID_FBO = 117,6363+ OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS = 118,6464+ OCRDMA_MBX_STATUS_INVALID_PBE_SIZE = 119,6565+ OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY = 120,6666+ OCRDMA_MBX_STATUS_INVALID_PBL_SHIFT = 121,6767+ OCRDMA_MBX_STATUS_INVALID_SRQ_ID = 129,6868+ OCRDMA_MBX_STATUS_SRQ_ERROR = 133,6969+ OCRDMA_MBX_STATUS_RQE_EXCEEDS = 134,7070+ OCRDMA_MBX_STATUS_MTU_EXCEEDS = 135,7171+ OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS = 136,7272+ OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS = 137,7373+ OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS = 138,7474+ OCRDMA_MBX_STATUS_QP_BOUND = 130,7575+ OCRDMA_MBX_STATUS_INVALID_CHANGE = 139,7676+ OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP = 140,7777+ OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER = 141,7878+ OCRDMA_MBX_STATUS_MW_STILL_BOUND = 142,7979+ OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID = 143,8080+ OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS = 1448181+};8282+8383+enum additional_status {8484+ OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES = 228585+};8686+8787+enum cqe_status {8888+ OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES = 1,8989+ OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER = 2,9090+ OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES = 3,9191+ OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING = 4,9292+ OCRDMA_MBX_CQE_STATUS_DMA_FAILED = 59393+};9494+9595+static inline void *ocrdma_get_eqe(struct ocrdma_eq *eq)9696+{9797+ return (u8 *)eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe));9898+}9999+100100+static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq)101101+{102102+ eq->q.tail = (eq->q.tail + 1) & (OCRDMA_EQ_LEN - 1);103103+}104104+105105+static inline void *ocrdma_get_mcqe(struct ocrdma_dev *dev)106106+{107107+ struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *)108108+ ((u8 *) dev->mq.cq.va +109109+ (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe)));110110+111111+ if (!(le32_to_cpu(cqe->valid_ae_cmpl_cons) & OCRDMA_MCQE_VALID_MASK))112112+ return NULL;113113+ return cqe;114114+}115115+116116+static inline void ocrdma_mcq_inc_tail(struct ocrdma_dev *dev)117117+{118118+ dev->mq.cq.tail = (dev->mq.cq.tail + 1) & (OCRDMA_MQ_CQ_LEN - 1);119119+}120120+121121+static inline struct ocrdma_mqe *ocrdma_get_mqe(struct ocrdma_dev *dev)122122+{123123+ return (struct ocrdma_mqe *)((u8 *) dev->mq.sq.va +124124+ (dev->mq.sq.head *125125+ sizeof(struct ocrdma_mqe)));126126+}127127+128128+static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev)129129+{130130+ dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1);131131+ atomic_inc(&dev->mq.sq.used);132132+}133133+134134+static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev *dev)135135+{136136+ return (void *)((u8 *) dev->mq.sq.va +137137+ (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe)));138138+}139139+140140+enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)141141+{142142+ switch (qps) {143143+ case OCRDMA_QPS_RST:144144+ return IB_QPS_RESET;145145+ case OCRDMA_QPS_INIT:146146+ return IB_QPS_INIT;147147+ case OCRDMA_QPS_RTR:148148+ return IB_QPS_RTR;149149+ case OCRDMA_QPS_RTS:150150+ return IB_QPS_RTS;151151+ case OCRDMA_QPS_SQD:152152+ case OCRDMA_QPS_SQ_DRAINING:153153+ return IB_QPS_SQD;154154+ case OCRDMA_QPS_SQE:155155+ return IB_QPS_SQE;156156+ case OCRDMA_QPS_ERR:157157+ return IB_QPS_ERR;158158+ };159159+ return IB_QPS_ERR;160160+}161161+162162+enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)163163+{164164+ switch (qps) {165165+ case IB_QPS_RESET:166166+ return OCRDMA_QPS_RST;167167+ case IB_QPS_INIT:168168+ return OCRDMA_QPS_INIT;169169+ case IB_QPS_RTR:170170+ return OCRDMA_QPS_RTR;171171+ case IB_QPS_RTS:172172+ return OCRDMA_QPS_RTS;173173+ case IB_QPS_SQD:174174+ return OCRDMA_QPS_SQD;175175+ case IB_QPS_SQE:176176+ return OCRDMA_QPS_SQE;177177+ case IB_QPS_ERR:178178+ return OCRDMA_QPS_ERR;179179+ };180180+ return OCRDMA_QPS_ERR;181181+}182182+183183+static int ocrdma_get_mbx_errno(u32 status)184184+{185185+ int err_num = -EFAULT;186186+ u8 mbox_status = (status & OCRDMA_MBX_RSP_STATUS_MASK) >>187187+ OCRDMA_MBX_RSP_STATUS_SHIFT;188188+ u8 add_status = (status & OCRDMA_MBX_RSP_ASTATUS_MASK) >>189189+ OCRDMA_MBX_RSP_ASTATUS_SHIFT;190190+191191+ switch (mbox_status) {192192+ case OCRDMA_MBX_STATUS_OOR:193193+ case OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS:194194+ err_num = -EAGAIN;195195+ break;196196+197197+ case OCRDMA_MBX_STATUS_INVALID_PD:198198+ case OCRDMA_MBX_STATUS_INVALID_CQ:199199+ case OCRDMA_MBX_STATUS_INVALID_SRQ_ID:200200+ case OCRDMA_MBX_STATUS_INVALID_QP:201201+ case OCRDMA_MBX_STATUS_INVALID_CHANGE:202202+ case OCRDMA_MBX_STATUS_MTU_EXCEEDS:203203+ case OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER:204204+ case OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID:205205+ case OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS:206206+ case OCRDMA_MBX_STATUS_ILLEGAL_FIELD:207207+ case OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY:208208+ case OCRDMA_MBX_STATUS_INVALID_LKEY:209209+ case OCRDMA_MBX_STATUS_INVALID_VA:210210+ case OCRDMA_MBX_STATUS_INVALID_LENGTH:211211+ case OCRDMA_MBX_STATUS_INVALID_FBO:212212+ case OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS:213213+ case OCRDMA_MBX_STATUS_INVALID_PBE_SIZE:214214+ case OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP:215215+ case OCRDMA_MBX_STATUS_SRQ_ERROR:216216+ case OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS:217217+ err_num = -EINVAL;218218+ break;219219+220220+ case OCRDMA_MBX_STATUS_PD_INUSE:221221+ case OCRDMA_MBX_STATUS_QP_BOUND:222222+ case OCRDMA_MBX_STATUS_MW_STILL_BOUND:223223+ case OCRDMA_MBX_STATUS_MW_BOUND:224224+ err_num = -EBUSY;225225+ break;226226+227227+ case OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS:228228+ case OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS:229229+ case OCRDMA_MBX_STATUS_RQE_EXCEEDS:230230+ case OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS:231231+ case OCRDMA_MBX_STATUS_ORD_EXCEEDS:232232+ case OCRDMA_MBX_STATUS_IRD_EXCEEDS:233233+ case OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS:234234+ case OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS:235235+ case OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS:236236+ err_num = -ENOBUFS;237237+ break;238238+239239+ case OCRDMA_MBX_STATUS_FAILED:240240+ switch (add_status) {241241+ case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES:242242+ err_num = -EAGAIN;243243+ break;244244+ }245245+ default:246246+ err_num = -EFAULT;247247+ }248248+ return err_num;249249+}250250+251251+static int ocrdma_get_mbx_cqe_errno(u16 cqe_status)252252+{253253+ int err_num = -EINVAL;254254+255255+ switch (cqe_status) {256256+ case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES:257257+ err_num = -EPERM;258258+ break;259259+ case OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER:260260+ err_num = -EINVAL;261261+ break;262262+ case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES:263263+ case OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING:264264+ err_num = -EAGAIN;265265+ break;266266+ case OCRDMA_MBX_CQE_STATUS_DMA_FAILED:267267+ err_num = -EIO;268268+ break;269269+ }270270+ return err_num;271271+}272272+273273+void ocrdma_ring_cq_db(struct ocrdma_dev *dev, u16 cq_id, bool armed,274274+ bool solicited, u16 cqe_popped)275275+{276276+ u32 val = cq_id & OCRDMA_DB_CQ_RING_ID_MASK;277277+278278+ val |= ((cq_id & OCRDMA_DB_CQ_RING_ID_EXT_MASK) <<279279+ OCRDMA_DB_CQ_RING_ID_EXT_MASK_SHIFT);280280+281281+ if (armed)282282+ val |= (1 << OCRDMA_DB_CQ_REARM_SHIFT);283283+ if (solicited)284284+ val |= (1 << OCRDMA_DB_CQ_SOLICIT_SHIFT);285285+ val |= (cqe_popped << OCRDMA_DB_CQ_NUM_POPPED_SHIFT);286286+ iowrite32(val, dev->nic_info.db + OCRDMA_DB_CQ_OFFSET);287287+}288288+289289+static void ocrdma_ring_mq_db(struct ocrdma_dev *dev)290290+{291291+ u32 val = 0;292292+293293+ val |= dev->mq.sq.id & OCRDMA_MQ_ID_MASK;294294+ val |= 1 << OCRDMA_MQ_NUM_MQE_SHIFT;295295+ iowrite32(val, dev->nic_info.db + OCRDMA_DB_MQ_OFFSET);296296+}297297+298298+static void ocrdma_ring_eq_db(struct ocrdma_dev *dev, u16 eq_id,299299+ bool arm, bool clear_int, u16 num_eqe)300300+{301301+ u32 val = 0;302302+303303+ val |= eq_id & OCRDMA_EQ_ID_MASK;304304+ val |= ((eq_id & OCRDMA_EQ_ID_EXT_MASK) << OCRDMA_EQ_ID_EXT_MASK_SHIFT);305305+ if (arm)306306+ val |= (1 << OCRDMA_REARM_SHIFT);307307+ if (clear_int)308308+ val |= (1 << OCRDMA_EQ_CLR_SHIFT);309309+ val |= (1 << OCRDMA_EQ_TYPE_SHIFT);310310+ val |= (num_eqe << OCRDMA_NUM_EQE_SHIFT);311311+ iowrite32(val, dev->nic_info.db + OCRDMA_DB_EQ_OFFSET);312312+}313313+314314+static void ocrdma_init_mch(struct ocrdma_mbx_hdr *cmd_hdr,315315+ u8 opcode, u8 subsys, u32 cmd_len)316316+{317317+ cmd_hdr->subsys_op = (opcode | (subsys << OCRDMA_MCH_SUBSYS_SHIFT));318318+ cmd_hdr->timeout = 20; /* seconds */319319+ cmd_hdr->cmd_len = cmd_len - sizeof(struct ocrdma_mbx_hdr);320320+}321321+322322+static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len)323323+{324324+ struct ocrdma_mqe *mqe;325325+326326+ mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);327327+ if (!mqe)328328+ return NULL;329329+ mqe->hdr.spcl_sge_cnt_emb |=330330+ (OCRDMA_MQE_EMBEDDED << OCRDMA_MQE_HDR_EMB_SHIFT) &331331+ OCRDMA_MQE_HDR_EMB_MASK;332332+ mqe->hdr.pyld_len = cmd_len - sizeof(struct ocrdma_mqe_hdr);333333+334334+ ocrdma_init_mch(&mqe->u.emb_req.mch, opcode, OCRDMA_SUBSYS_ROCE,335335+ mqe->hdr.pyld_len);336336+ return mqe;337337+}338338+339339+static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q)340340+{341341+ dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma);342342+}343343+344344+static int ocrdma_alloc_q(struct ocrdma_dev *dev,345345+ struct ocrdma_queue_info *q, u16 len, u16 entry_size)346346+{347347+ memset(q, 0, sizeof(*q));348348+ q->len = len;349349+ q->entry_size = entry_size;350350+ q->size = len * entry_size;351351+ q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size,352352+ &q->dma, GFP_KERNEL);353353+ if (!q->va)354354+ return -ENOMEM;355355+ memset(q->va, 0, q->size);356356+ return 0;357357+}358358+359359+static void ocrdma_build_q_pages(struct ocrdma_pa *q_pa, int cnt,360360+ dma_addr_t host_pa, int hw_page_size)361361+{362362+ int i;363363+364364+ for (i = 0; i < cnt; i++) {365365+ q_pa[i].lo = (u32) (host_pa & 0xffffffff);366366+ q_pa[i].hi = (u32) upper_32_bits(host_pa);367367+ host_pa += hw_page_size;368368+ }369369+}370370+371371+static void ocrdma_assign_eq_vect_gen2(struct ocrdma_dev *dev,372372+ struct ocrdma_eq *eq)373373+{374374+ /* assign vector and update vector id for next EQ */375375+ eq->vector = dev->nic_info.msix.start_vector;376376+ dev->nic_info.msix.start_vector += 1;377377+}378378+379379+static void ocrdma_free_eq_vect_gen2(struct ocrdma_dev *dev)380380+{381381+ /* this assumes that EQs are freed in exactly reverse order382382+ * as its allocation.383383+ */384384+ dev->nic_info.msix.start_vector -= 1;385385+}386386+387387+int ocrdma_mbx_delete_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q,388388+ int queue_type)389389+{390390+ u8 opcode = 0;391391+ int status;392392+ struct ocrdma_delete_q_req *cmd = dev->mbx_cmd;393393+394394+ switch (queue_type) {395395+ case QTYPE_MCCQ:396396+ opcode = OCRDMA_CMD_DELETE_MQ;397397+ break;398398+ case QTYPE_CQ:399399+ opcode = OCRDMA_CMD_DELETE_CQ;400400+ break;401401+ case QTYPE_EQ:402402+ opcode = OCRDMA_CMD_DELETE_EQ;403403+ break;404404+ default:405405+ BUG();406406+ }407407+ memset(cmd, 0, sizeof(*cmd));408408+ ocrdma_init_mch(&cmd->req, opcode, OCRDMA_SUBSYS_COMMON, sizeof(*cmd));409409+ cmd->id = q->id;410410+411411+ status = be_roce_mcc_cmd(dev->nic_info.netdev,412412+ cmd, sizeof(*cmd), NULL, NULL);413413+ if (!status)414414+ q->created = false;415415+ return status;416416+}417417+418418+static int ocrdma_mbx_create_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)419419+{420420+ int status;421421+ struct ocrdma_create_eq_req *cmd = dev->mbx_cmd;422422+ struct ocrdma_create_eq_rsp *rsp = dev->mbx_cmd;423423+424424+ memset(cmd, 0, sizeof(*cmd));425425+ ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_EQ, OCRDMA_SUBSYS_COMMON,426426+ sizeof(*cmd));427427+ if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY)428428+ cmd->req.rsvd_version = 0;429429+ else430430+ cmd->req.rsvd_version = 2;431431+432432+ cmd->num_pages = 4;433433+ cmd->valid = OCRDMA_CREATE_EQ_VALID;434434+ cmd->cnt = 4 << OCRDMA_CREATE_EQ_CNT_SHIFT;435435+436436+ ocrdma_build_q_pages(&cmd->pa[0], cmd->num_pages, eq->q.dma,437437+ PAGE_SIZE_4K);438438+ status = be_roce_mcc_cmd(dev->nic_info.netdev, cmd, sizeof(*cmd), NULL,439439+ NULL);440440+ if (!status) {441441+ eq->q.id = rsp->vector_eqid & 0xffff;442442+ if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY)443443+ ocrdma_assign_eq_vect_gen2(dev, eq);444444+ else {445445+ eq->vector = (rsp->vector_eqid >> 16) & 0xffff;446446+ dev->nic_info.msix.start_vector += 1;447447+ }448448+ eq->q.created = true;449449+ }450450+ return status;451451+}452452+453453+static int ocrdma_create_eq(struct ocrdma_dev *dev,454454+ struct ocrdma_eq *eq, u16 q_len)455455+{456456+ int status;457457+458458+ status = ocrdma_alloc_q(dev, &eq->q, OCRDMA_EQ_LEN,459459+ sizeof(struct ocrdma_eqe));460460+ if (status)461461+ return status;462462+463463+ status = ocrdma_mbx_create_eq(dev, eq);464464+ if (status)465465+ goto mbx_err;466466+ eq->dev = dev;467467+ ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);468468+469469+ return 0;470470+mbx_err:471471+ ocrdma_free_q(dev, &eq->q);472472+ return status;473473+}474474+475475+static int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)476476+{477477+ int irq;478478+479479+ if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)480480+ irq = dev->nic_info.pdev->irq;481481+ else482482+ irq = dev->nic_info.msix.vector_list[eq->vector];483483+ return irq;484484+}485485+486486+static void _ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)487487+{488488+ if (eq->q.created) {489489+ ocrdma_mbx_delete_q(dev, &eq->q, QTYPE_EQ);490490+ if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY)491491+ ocrdma_free_eq_vect_gen2(dev);492492+ ocrdma_free_q(dev, &eq->q);493493+ }494494+}495495+496496+static void ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)497497+{498498+ int irq;499499+500500+ /* disarm EQ so that interrupts are not generated501501+ * during freeing and EQ delete is in progress.502502+ */503503+ ocrdma_ring_eq_db(dev, eq->q.id, false, false, 0);504504+505505+ irq = ocrdma_get_irq(dev, eq);506506+ free_irq(irq, eq);507507+ _ocrdma_destroy_eq(dev, eq);508508+}509509+510510+static void ocrdma_destroy_qp_eqs(struct ocrdma_dev *dev)511511+{512512+ int i;513513+514514+ /* deallocate the data path eqs */515515+ for (i = 0; i < dev->eq_cnt; i++)516516+ ocrdma_destroy_eq(dev, &dev->qp_eq_tbl[i]);517517+}518518+519519+int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev,520520+ struct ocrdma_queue_info *cq,521521+ struct ocrdma_queue_info *eq)522522+{523523+ struct ocrdma_create_cq_cmd *cmd = dev->mbx_cmd;524524+ struct ocrdma_create_cq_cmd_rsp *rsp = dev->mbx_cmd;525525+ int status;526526+527527+ memset(cmd, 0, sizeof(*cmd));528528+ ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_CQ,529529+ OCRDMA_SUBSYS_COMMON, sizeof(*cmd));530530+531531+ cmd->pgsz_pgcnt = PAGES_4K_SPANNED(cq->va, cq->size);532532+ cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;533533+ cmd->eqn = (eq->id << OCRDMA_CREATE_CQ_EQID_SHIFT);534534+535535+ ocrdma_build_q_pages(&cmd->pa[0], cmd->pgsz_pgcnt,536536+ cq->dma, PAGE_SIZE_4K);537537+ status = be_roce_mcc_cmd(dev->nic_info.netdev,538538+ cmd, sizeof(*cmd), NULL, NULL);539539+ if (!status) {540540+ cq->id = (rsp->cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);541541+ cq->created = true;542542+ }543543+ return status;544544+}545545+546546+static u32 ocrdma_encoded_q_len(int q_len)547547+{548548+ u32 len_encoded = fls(q_len); /* log2(len) + 1 */549549+550550+ if (len_encoded == 16)551551+ len_encoded = 0;552552+ return len_encoded;553553+}554554+555555+static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,556556+ struct ocrdma_queue_info *mq,557557+ struct ocrdma_queue_info *cq)558558+{559559+ int num_pages, status;560560+ struct ocrdma_create_mq_req *cmd = dev->mbx_cmd;561561+ struct ocrdma_create_mq_rsp *rsp = dev->mbx_cmd;562562+ struct ocrdma_pa *pa;563563+564564+ memset(cmd, 0, sizeof(*cmd));565565+ num_pages = PAGES_4K_SPANNED(mq->va, mq->size);566566+567567+ if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {568568+ ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ,569569+ OCRDMA_SUBSYS_COMMON, sizeof(*cmd));570570+ cmd->v0.pages = num_pages;571571+ cmd->v0.async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID;572572+ cmd->v0.async_cqid_valid = (cq->id << 1);573573+ cmd->v0.cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<574574+ OCRDMA_CREATE_MQ_RING_SIZE_SHIFT);575575+ cmd->v0.cqid_ringsize |=576576+ (cq->id << OCRDMA_CREATE_MQ_V0_CQ_ID_SHIFT);577577+ cmd->v0.valid = OCRDMA_CREATE_MQ_VALID;578578+ pa = &cmd->v0.pa[0];579579+ } else {580580+ ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ_EXT,581581+ OCRDMA_SUBSYS_COMMON, sizeof(*cmd));582582+ cmd->req.rsvd_version = 1;583583+ cmd->v1.cqid_pages = num_pages;584584+ cmd->v1.cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT);585585+ cmd->v1.async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID;586586+ cmd->v1.async_event_bitmap = Bit(20);587587+ cmd->v1.async_cqid_ringsize = cq->id;588588+ cmd->v1.async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<589589+ OCRDMA_CREATE_MQ_RING_SIZE_SHIFT);590590+ cmd->v1.valid = OCRDMA_CREATE_MQ_VALID;591591+ pa = &cmd->v1.pa[0];592592+ }593593+ ocrdma_build_q_pages(pa, num_pages, mq->dma, PAGE_SIZE_4K);594594+ status = be_roce_mcc_cmd(dev->nic_info.netdev,595595+ cmd, sizeof(*cmd), NULL, NULL);596596+ if (!status) {597597+ mq->id = rsp->id;598598+ mq->created = true;599599+ }600600+ return status;601601+}602602+603603+static int ocrdma_create_mq(struct ocrdma_dev *dev)604604+{605605+ int status;606606+607607+ /* Alloc completion queue for Mailbox queue */608608+ status = ocrdma_alloc_q(dev, &dev->mq.cq, OCRDMA_MQ_CQ_LEN,609609+ sizeof(struct ocrdma_mcqe));610610+ if (status)611611+ goto alloc_err;612612+613613+ status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->meq.q);614614+ if (status)615615+ goto mbx_cq_free;616616+617617+ memset(&dev->mqe_ctx, 0, sizeof(dev->mqe_ctx));618618+ init_waitqueue_head(&dev->mqe_ctx.cmd_wait);619619+ mutex_init(&dev->mqe_ctx.lock);620620+621621+ /* Alloc Mailbox queue */622622+ status = ocrdma_alloc_q(dev, &dev->mq.sq, OCRDMA_MQ_LEN,623623+ sizeof(struct ocrdma_mqe));624624+ if (status)625625+ goto mbx_cq_destroy;626626+ status = ocrdma_mbx_create_mq(dev, &dev->mq.sq, &dev->mq.cq);627627+ if (status)628628+ goto mbx_q_free;629629+ ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, 0);630630+ return 0;631631+632632+mbx_q_free:633633+ ocrdma_free_q(dev, &dev->mq.sq);634634+mbx_cq_destroy:635635+ ocrdma_mbx_delete_q(dev, &dev->mq.cq, QTYPE_CQ);636636+mbx_cq_free:637637+ ocrdma_free_q(dev, &dev->mq.cq);638638+alloc_err:639639+ return status;640640+}641641+642642+static void ocrdma_destroy_mq(struct ocrdma_dev *dev)643643+{644644+ struct ocrdma_queue_info *mbxq, *cq;645645+646646+ /* mqe_ctx lock synchronizes with any other pending cmds. */647647+ mutex_lock(&dev->mqe_ctx.lock);648648+ mbxq = &dev->mq.sq;649649+ if (mbxq->created) {650650+ ocrdma_mbx_delete_q(dev, mbxq, QTYPE_MCCQ);651651+ ocrdma_free_q(dev, mbxq);652652+ }653653+ mutex_unlock(&dev->mqe_ctx.lock);654654+655655+ cq = &dev->mq.cq;656656+ if (cq->created) {657657+ ocrdma_mbx_delete_q(dev, cq, QTYPE_CQ);658658+ ocrdma_free_q(dev, cq);659659+ }660660+}661661+662662+static void ocrdma_process_qpcat_error(struct ocrdma_dev *dev,663663+ struct ocrdma_qp *qp)664664+{665665+ enum ib_qp_state new_ib_qps = IB_QPS_ERR;666666+ enum ib_qp_state old_ib_qps;667667+668668+ if (qp == NULL)669669+ BUG();670670+ ocrdma_qp_state_machine(qp, new_ib_qps, &old_ib_qps);671671+}672672+673673+static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,674674+ struct ocrdma_ae_mcqe *cqe)675675+{676676+ struct ocrdma_qp *qp = NULL;677677+ struct ocrdma_cq *cq = NULL;678678+ struct ib_event ib_evt = { 0 };679679+ int cq_event = 0;680680+ int qp_event = 1;681681+ int srq_event = 0;682682+ int dev_event = 0;683683+ int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>684684+ OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT;685685+686686+ if (cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPVALID)687687+ qp = dev->qp_tbl[cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPID_MASK];688688+ if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID)689689+ cq = dev->cq_tbl[cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK];690690+691691+ switch (type) {692692+ case OCRDMA_CQ_ERROR:693693+ ib_evt.element.cq = &cq->ibcq;694694+ ib_evt.event = IB_EVENT_CQ_ERR;695695+ cq_event = 1;696696+ qp_event = 0;697697+ break;698698+ case OCRDMA_CQ_OVERRUN_ERROR:699699+ ib_evt.element.cq = &cq->ibcq;700700+ ib_evt.event = IB_EVENT_CQ_ERR;701701+ break;702702+ case OCRDMA_CQ_QPCAT_ERROR:703703+ ib_evt.element.qp = &qp->ibqp;704704+ ib_evt.event = IB_EVENT_QP_FATAL;705705+ ocrdma_process_qpcat_error(dev, qp);706706+ break;707707+ case OCRDMA_QP_ACCESS_ERROR:708708+ ib_evt.element.qp = &qp->ibqp;709709+ ib_evt.event = IB_EVENT_QP_ACCESS_ERR;710710+ break;711711+ case OCRDMA_QP_COMM_EST_EVENT:712712+ ib_evt.element.qp = &qp->ibqp;713713+ ib_evt.event = IB_EVENT_COMM_EST;714714+ break;715715+ case OCRDMA_SQ_DRAINED_EVENT:716716+ ib_evt.element.qp = &qp->ibqp;717717+ ib_evt.event = IB_EVENT_SQ_DRAINED;718718+ break;719719+ case OCRDMA_DEVICE_FATAL_EVENT:720720+ ib_evt.element.port_num = 1;721721+ ib_evt.event = IB_EVENT_DEVICE_FATAL;722722+ qp_event = 0;723723+ dev_event = 1;724724+ break;725725+ case OCRDMA_SRQCAT_ERROR:726726+ ib_evt.element.srq = &qp->srq->ibsrq;727727+ ib_evt.event = IB_EVENT_SRQ_ERR;728728+ srq_event = 1;729729+ qp_event = 0;730730+ break;731731+ case OCRDMA_SRQ_LIMIT_EVENT:732732+ ib_evt.element.srq = &qp->srq->ibsrq;733733+ ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED;734734+ srq_event = 1;735735+ qp_event = 0;736736+ break;737737+ case OCRDMA_QP_LAST_WQE_EVENT:738738+ ib_evt.element.qp = &qp->ibqp;739739+ ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED;740740+ break;741741+ default:742742+ cq_event = 0;743743+ qp_event = 0;744744+ srq_event = 0;745745+ dev_event = 0;746746+ ocrdma_err("%s() unknown type=0x%x\n", __func__, type);747747+ break;748748+ }749749+750750+ if (qp_event) {751751+ if (qp->ibqp.event_handler)752752+ qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context);753753+ } else if (cq_event) {754754+ if (cq->ibcq.event_handler)755755+ cq->ibcq.event_handler(&ib_evt, cq->ibcq.cq_context);756756+ } else if (srq_event) {757757+ if (qp->srq->ibsrq.event_handler)758758+ qp->srq->ibsrq.event_handler(&ib_evt,759759+ qp->srq->ibsrq.760760+ srq_context);761761+ } else if (dev_event)762762+ ib_dispatch_event(&ib_evt);763763+764764+}765765+766766+static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)767767+{768768+ /* async CQE processing */769769+ struct ocrdma_ae_mcqe *cqe = ae_cqe;770770+ u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >>771771+ OCRDMA_AE_MCQE_EVENT_CODE_SHIFT;772772+773773+ if (evt_code == OCRDMA_ASYNC_EVE_CODE)774774+ ocrdma_dispatch_ibevent(dev, cqe);775775+ else776776+ ocrdma_err("%s(%d) invalid evt code=0x%x\n",777777+ __func__, dev->id, evt_code);778778+}779779+780780+static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe)781781+{782782+ if (dev->mqe_ctx.tag == cqe->tag_lo && dev->mqe_ctx.cmd_done == false) {783783+ dev->mqe_ctx.cqe_status = (cqe->status &784784+ OCRDMA_MCQE_STATUS_MASK) >> OCRDMA_MCQE_STATUS_SHIFT;785785+ dev->mqe_ctx.ext_status =786786+ (cqe->status & OCRDMA_MCQE_ESTATUS_MASK)787787+ >> OCRDMA_MCQE_ESTATUS_SHIFT;788788+ dev->mqe_ctx.cmd_done = true;789789+ wake_up(&dev->mqe_ctx.cmd_wait);790790+ } else791791+ ocrdma_err("%s() cqe for invalid tag0x%x.expected=0x%x\n",792792+ __func__, cqe->tag_lo, dev->mqe_ctx.tag);793793+}794794+795795+static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)796796+{797797+ u16 cqe_popped = 0;798798+ struct ocrdma_mcqe *cqe;799799+800800+ while (1) {801801+ cqe = ocrdma_get_mcqe(dev);802802+ if (cqe == NULL)803803+ break;804804+ ocrdma_le32_to_cpu(cqe, sizeof(*cqe));805805+ cqe_popped += 1;806806+ if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_AE_MASK)807807+ ocrdma_process_acqe(dev, cqe);808808+ else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK)809809+ ocrdma_process_mcqe(dev, cqe);810810+ else811811+ ocrdma_err("%s() cqe->compl is not set.\n", __func__);812812+ memset(cqe, 0, sizeof(struct ocrdma_mcqe));813813+ ocrdma_mcq_inc_tail(dev);814814+ }815815+ ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, cqe_popped);816816+ return 0;817817+}818818+819819+static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,820820+ struct ocrdma_cq *cq)821821+{822822+ unsigned long flags;823823+ struct ocrdma_qp *qp;824824+ bool buddy_cq_found = false;825825+ /* Go through list of QPs in error state which are using this CQ826826+ * and invoke its callback handler to trigger CQE processing for827827+ * error/flushed CQE. It is rare to find more than few entries in828828+ * this list as most consumers stops after getting error CQE.829829+ * List is traversed only once when a matching buddy cq found for a QP.830830+ */831831+ spin_lock_irqsave(&dev->flush_q_lock, flags);832832+ list_for_each_entry(qp, &cq->sq_head, sq_entry) {833833+ if (qp->srq)834834+ continue;835835+ /* if wq and rq share the same cq, than comp_handler836836+ * is already invoked.837837+ */838838+ if (qp->sq_cq == qp->rq_cq)839839+ continue;840840+ /* if completion came on sq, rq's cq is buddy cq.841841+ * if completion came on rq, sq's cq is buddy cq.842842+ */843843+ if (qp->sq_cq == cq)844844+ cq = qp->rq_cq;845845+ else846846+ cq = qp->sq_cq;847847+ buddy_cq_found = true;848848+ break;849849+ }850850+ spin_unlock_irqrestore(&dev->flush_q_lock, flags);851851+ if (buddy_cq_found == false)852852+ return;853853+ if (cq->ibcq.comp_handler) {854854+ spin_lock_irqsave(&cq->comp_handler_lock, flags);855855+ (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);856856+ spin_unlock_irqrestore(&cq->comp_handler_lock, flags);857857+ }858858+}859859+860860+static void ocrdma_qp_cq_handler(struct ocrdma_dev *dev, u16 cq_idx)861861+{862862+ unsigned long flags;863863+ struct ocrdma_cq *cq;864864+865865+ if (cq_idx >= OCRDMA_MAX_CQ)866866+ BUG();867867+868868+ cq = dev->cq_tbl[cq_idx];869869+ if (cq == NULL) {870870+ ocrdma_err("%s%d invalid id=0x%x\n", __func__, dev->id, cq_idx);871871+ return;872872+ }873873+ spin_lock_irqsave(&cq->cq_lock, flags);874874+ cq->armed = false;875875+ cq->solicited = false;876876+ spin_unlock_irqrestore(&cq->cq_lock, flags);877877+878878+ ocrdma_ring_cq_db(dev, cq->id, false, false, 0);879879+880880+ if (cq->ibcq.comp_handler) {881881+ spin_lock_irqsave(&cq->comp_handler_lock, flags);882882+ (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);883883+ spin_unlock_irqrestore(&cq->comp_handler_lock, flags);884884+ }885885+ ocrdma_qp_buddy_cq_handler(dev, cq);886886+}887887+888888+static void ocrdma_cq_handler(struct ocrdma_dev *dev, u16 cq_id)889889+{890890+ /* process the MQ-CQE. */891891+ if (cq_id == dev->mq.cq.id)892892+ ocrdma_mq_cq_handler(dev, cq_id);893893+ else894894+ ocrdma_qp_cq_handler(dev, cq_id);895895+}896896+897897+static irqreturn_t ocrdma_irq_handler(int irq, void *handle)898898+{899899+ struct ocrdma_eq *eq = handle;900900+ struct ocrdma_dev *dev = eq->dev;901901+ struct ocrdma_eqe eqe;902902+ struct ocrdma_eqe *ptr;903903+ u16 eqe_popped = 0;904904+ u16 cq_id;905905+ while (1) {906906+ ptr = ocrdma_get_eqe(eq);907907+ eqe = *ptr;908908+ ocrdma_le32_to_cpu(&eqe, sizeof(eqe));909909+ if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0)910910+ break;911911+ eqe_popped += 1;912912+ ptr->id_valid = 0;913913+ /* check whether its CQE or not. */914914+ if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) {915915+ cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT;916916+ ocrdma_cq_handler(dev, cq_id);917917+ }918918+ ocrdma_eq_inc_tail(eq);919919+ }920920+ ocrdma_ring_eq_db(dev, eq->q.id, true, true, eqe_popped);921921+ /* Ring EQ doorbell with num_popped to 0 to enable interrupts again. */922922+ if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)923923+ ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);924924+ return IRQ_HANDLED;925925+}926926+927927+static void ocrdma_post_mqe(struct ocrdma_dev *dev, struct ocrdma_mqe *cmd)928928+{929929+ struct ocrdma_mqe *mqe;930930+931931+ dev->mqe_ctx.tag = dev->mq.sq.head;932932+ dev->mqe_ctx.cmd_done = false;933933+ mqe = ocrdma_get_mqe(dev);934934+ cmd->hdr.tag_lo = dev->mq.sq.head;935935+ ocrdma_copy_cpu_to_le32(mqe, cmd, sizeof(*mqe));936936+ /* make sure descriptor is written before ringing doorbell */937937+ wmb();938938+ ocrdma_mq_inc_head(dev);939939+ ocrdma_ring_mq_db(dev);940940+}941941+942942+static int ocrdma_wait_mqe_cmpl(struct ocrdma_dev *dev)943943+{944944+ long status;945945+ /* 30 sec timeout */946946+ status = wait_event_timeout(dev->mqe_ctx.cmd_wait,947947+ (dev->mqe_ctx.cmd_done != false),948948+ msecs_to_jiffies(30000));949949+ if (status)950950+ return 0;951951+ else952952+ return -1;953953+}954954+955955+/* issue a mailbox command on the MQ */956956+static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)957957+{958958+ int status = 0;959959+ u16 cqe_status, ext_status;960960+ struct ocrdma_mqe *rsp;961961+962962+ mutex_lock(&dev->mqe_ctx.lock);963963+ ocrdma_post_mqe(dev, mqe);964964+ status = ocrdma_wait_mqe_cmpl(dev);965965+ if (status)966966+ goto mbx_err;967967+ cqe_status = dev->mqe_ctx.cqe_status;968968+ ext_status = dev->mqe_ctx.ext_status;969969+ rsp = ocrdma_get_mqe_rsp(dev);970970+ ocrdma_copy_le32_to_cpu(mqe, rsp, (sizeof(*mqe)));971971+ if (cqe_status || ext_status) {972972+ ocrdma_err973973+ ("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n",974974+ __func__,975975+ (rsp->u.rsp.subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>976976+ OCRDMA_MBX_RSP_OPCODE_SHIFT, cqe_status, ext_status);977977+ status = ocrdma_get_mbx_cqe_errno(cqe_status);978978+ goto mbx_err;979979+ }980980+ if (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK)981981+ status = ocrdma_get_mbx_errno(mqe->u.rsp.status);982982+mbx_err:983983+ mutex_unlock(&dev->mqe_ctx.lock);984984+ return status;985985+}986986+987987+static void ocrdma_get_attr(struct ocrdma_dev *dev,988988+ struct ocrdma_dev_attr *attr,989989+ struct ocrdma_mbx_query_config *rsp)990990+{991991+ int max_q_mem;992992+993993+ attr->max_pd =994994+ (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>995995+ OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;996996+ attr->max_qp =997997+ (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>998998+ OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;999999+ attr->max_send_sge = ((rsp->max_write_send_sge &10001000+ OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>10011001+ OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT);10021002+ attr->max_recv_sge = (rsp->max_write_send_sge &10031003+ OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>10041004+ OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT;10051005+ attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &10061006+ OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>10071007+ OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;10081008+ attr->max_ird_per_qp = (rsp->max_ird_ord_per_qp &10091009+ OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK) >>10101010+ OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT;10111011+ attr->cq_overflow_detect = (rsp->qp_srq_cq_ird_ord &10121012+ OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK) >>10131013+ OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT;10141014+ attr->srq_supported = (rsp->qp_srq_cq_ird_ord &10151015+ OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK) >>10161016+ OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_SHIFT;10171017+ attr->local_ca_ack_delay = (rsp->max_pd_ca_ack_delay &10181018+ OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK) >>10191019+ OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT;10201020+ attr->max_mr = rsp->max_mr;10211021+ attr->max_mr_size = ~0ull;10221022+ attr->max_fmr = 0;10231023+ attr->max_pages_per_frmr = rsp->max_pages_per_frmr;10241024+ attr->max_num_mr_pbl = rsp->max_num_mr_pbl;10251025+ attr->max_cqe = rsp->max_cq_cqes_per_cq &10261026+ OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_MASK;10271027+ attr->wqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &10281028+ OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_MASK) >>10291029+ OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET) *10301030+ OCRDMA_WQE_STRIDE;10311031+ attr->rqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &10321032+ OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_MASK) >>10331033+ OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET) *10341034+ OCRDMA_WQE_STRIDE;10351035+ attr->max_inline_data =10361036+ attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +10371037+ sizeof(struct ocrdma_sge));10381038+ max_q_mem = OCRDMA_Q_PAGE_BASE_SIZE << (OCRDMA_MAX_Q_PAGE_SIZE_CNT - 1);10391039+ /* hw can queue one less then the configured size,10401040+ * so publish less by one to stack.10411041+ */10421042+ if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {10431043+ dev->attr.max_wqe = max_q_mem / dev->attr.wqe_size;10441044+ attr->ird = 1;10451045+ attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;10461046+ attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;10471047+ } else10481048+ dev->attr.max_wqe = (max_q_mem / dev->attr.wqe_size) - 1;10491049+ dev->attr.max_rqe = (max_q_mem / dev->attr.rqe_size) - 1;10501050+}10511051+10521052+static int ocrdma_check_fw_config(struct ocrdma_dev *dev,10531053+ struct ocrdma_fw_conf_rsp *conf)10541054+{10551055+ u32 fn_mode;10561056+10571057+ fn_mode = conf->fn_mode & OCRDMA_FN_MODE_RDMA;10581058+ if (fn_mode != OCRDMA_FN_MODE_RDMA)10591059+ return -EINVAL;10601060+ dev->base_eqid = conf->base_eqid;10611061+ dev->max_eq = conf->max_eq;10621062+ dev->attr.max_cq = OCRDMA_MAX_CQ - 1;10631063+ return 0;10641064+}10651065+10661066+/* can be issued only during init time. */10671067+static int ocrdma_mbx_query_fw_ver(struct ocrdma_dev *dev)10681068+{10691069+ int status = -ENOMEM;10701070+ struct ocrdma_mqe *cmd;10711071+ struct ocrdma_fw_ver_rsp *rsp;10721072+10731073+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_VER, sizeof(*cmd));10741074+ if (!cmd)10751075+ return -ENOMEM;10761076+ ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],10771077+ OCRDMA_CMD_GET_FW_VER,10781078+ OCRDMA_SUBSYS_COMMON, sizeof(*cmd));10791079+10801080+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);10811081+ if (status)10821082+ goto mbx_err;10831083+ rsp = (struct ocrdma_fw_ver_rsp *)cmd;10841084+ memset(&dev->attr.fw_ver[0], 0, sizeof(dev->attr.fw_ver));10851085+ memcpy(&dev->attr.fw_ver[0], &rsp->running_ver[0],10861086+ sizeof(rsp->running_ver));10871087+ ocrdma_le32_to_cpu(dev->attr.fw_ver, sizeof(rsp->running_ver));10881088+mbx_err:10891089+ kfree(cmd);10901090+ return status;10911091+}10921092+10931093+/* can be issued only during init time. */10941094+static int ocrdma_mbx_query_fw_config(struct ocrdma_dev *dev)10951095+{10961096+ int status = -ENOMEM;10971097+ struct ocrdma_mqe *cmd;10981098+ struct ocrdma_fw_conf_rsp *rsp;10991099+11001100+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_CONFIG, sizeof(*cmd));11011101+ if (!cmd)11021102+ return -ENOMEM;11031103+ ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],11041104+ OCRDMA_CMD_GET_FW_CONFIG,11051105+ OCRDMA_SUBSYS_COMMON, sizeof(*cmd));11061106+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);11071107+ if (status)11081108+ goto mbx_err;11091109+ rsp = (struct ocrdma_fw_conf_rsp *)cmd;11101110+ status = ocrdma_check_fw_config(dev, rsp);11111111+mbx_err:11121112+ kfree(cmd);11131113+ return status;11141114+}11151115+11161116+static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev)11171117+{11181118+ int status = -ENOMEM;11191119+ struct ocrdma_mbx_query_config *rsp;11201120+ struct ocrdma_mqe *cmd;11211121+11221122+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_CONFIG, sizeof(*cmd));11231123+ if (!cmd)11241124+ return status;11251125+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);11261126+ if (status)11271127+ goto mbx_err;11281128+ rsp = (struct ocrdma_mbx_query_config *)cmd;11291129+ ocrdma_get_attr(dev, &dev->attr, rsp);11301130+mbx_err:11311131+ kfree(cmd);11321132+ return status;11331133+}11341134+11351135+int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)11361136+{11371137+ int status = -ENOMEM;11381138+ struct ocrdma_alloc_pd *cmd;11391139+ struct ocrdma_alloc_pd_rsp *rsp;11401140+11411141+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD, sizeof(*cmd));11421142+ if (!cmd)11431143+ return status;11441144+ if (pd->dpp_enabled)11451145+ cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;11461146+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);11471147+ if (status)11481148+ goto mbx_err;11491149+ rsp = (struct ocrdma_alloc_pd_rsp *)cmd;11501150+ pd->id = rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_PDID_MASK;11511151+ if (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) {11521152+ pd->dpp_enabled = true;11531153+ pd->dpp_page = rsp->dpp_page_pdid >>11541154+ OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;11551155+ } else {11561156+ pd->dpp_enabled = false;11571157+ pd->num_dpp_qp = 0;11581158+ }11591159+mbx_err:11601160+ kfree(cmd);11611161+ return status;11621162+}11631163+11641164+int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)11651165+{11661166+ int status = -ENOMEM;11671167+ struct ocrdma_dealloc_pd *cmd;11681168+11691169+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD, sizeof(*cmd));11701170+ if (!cmd)11711171+ return status;11721172+ cmd->id = pd->id;11731173+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);11741174+ kfree(cmd);11751175+ return status;11761176+}11771177+11781178+static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,11791179+ int *num_pages, int *page_size)11801180+{11811181+ int i;11821182+ int mem_size;11831183+11841184+ *num_entries = roundup_pow_of_two(*num_entries);11851185+ mem_size = *num_entries * entry_size;11861186+ /* find the possible lowest possible multiplier */11871187+ for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {11881188+ if (mem_size <= (OCRDMA_Q_PAGE_BASE_SIZE << i))11891189+ break;11901190+ }11911191+ if (i >= OCRDMA_MAX_Q_PAGE_SIZE_CNT)11921192+ return -EINVAL;11931193+ mem_size = roundup(mem_size,11941194+ ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES));11951195+ *num_pages =11961196+ mem_size / ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);11971197+ *page_size = ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);11981198+ *num_entries = mem_size / entry_size;11991199+ return 0;12001200+}12011201+12021202+static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)12031203+{12041204+ int i ;12051205+ int status = 0;12061206+ int max_ah;12071207+ struct ocrdma_create_ah_tbl *cmd;12081208+ struct ocrdma_create_ah_tbl_rsp *rsp;12091209+ struct pci_dev *pdev = dev->nic_info.pdev;12101210+ dma_addr_t pa;12111211+ struct ocrdma_pbe *pbes;12121212+12131213+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_AH_TBL, sizeof(*cmd));12141214+ if (!cmd)12151215+ return status;12161216+12171217+ max_ah = OCRDMA_MAX_AH;12181218+ dev->av_tbl.size = sizeof(struct ocrdma_av) * max_ah;12191219+12201220+ /* number of PBEs in PBL */12211221+ cmd->ah_conf = (OCRDMA_AH_TBL_PAGES <<12221222+ OCRDMA_CREATE_AH_NUM_PAGES_SHIFT) &12231223+ OCRDMA_CREATE_AH_NUM_PAGES_MASK;12241224+12251225+ /* page size */12261226+ for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {12271227+ if (PAGE_SIZE == (OCRDMA_MIN_Q_PAGE_SIZE << i))12281228+ break;12291229+ }12301230+ cmd->ah_conf |= (i << OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT) &12311231+ OCRDMA_CREATE_AH_PAGE_SIZE_MASK;12321232+12331233+ /* ah_entry size */12341234+ cmd->ah_conf |= (sizeof(struct ocrdma_av) <<12351235+ OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT) &12361236+ OCRDMA_CREATE_AH_ENTRY_SIZE_MASK;12371237+12381238+ dev->av_tbl.pbl.va = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,12391239+ &dev->av_tbl.pbl.pa,12401240+ GFP_KERNEL);12411241+ if (dev->av_tbl.pbl.va == NULL)12421242+ goto mem_err;12431243+12441244+ dev->av_tbl.va = dma_alloc_coherent(&pdev->dev, dev->av_tbl.size,12451245+ &pa, GFP_KERNEL);12461246+ if (dev->av_tbl.va == NULL)12471247+ goto mem_err_ah;12481248+ dev->av_tbl.pa = pa;12491249+ dev->av_tbl.num_ah = max_ah;12501250+ memset(dev->av_tbl.va, 0, dev->av_tbl.size);12511251+12521252+ pbes = (struct ocrdma_pbe *)dev->av_tbl.pbl.va;12531253+ for (i = 0; i < dev->av_tbl.size / OCRDMA_MIN_Q_PAGE_SIZE; i++) {12541254+ pbes[i].pa_lo = (u32) (pa & 0xffffffff);12551255+ pbes[i].pa_hi = (u32) upper_32_bits(pa);12561256+ pa += PAGE_SIZE;12571257+ }12581258+ cmd->tbl_addr[0].lo = (u32)(dev->av_tbl.pbl.pa & 0xFFFFFFFF);12591259+ cmd->tbl_addr[0].hi = (u32)upper_32_bits(dev->av_tbl.pbl.pa);12601260+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);12611261+ if (status)12621262+ goto mbx_err;12631263+ rsp = (struct ocrdma_create_ah_tbl_rsp *)cmd;12641264+ dev->av_tbl.ahid = rsp->ahid & 0xFFFF;12651265+ kfree(cmd);12661266+ return 0;12671267+12681268+mbx_err:12691269+ dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,12701270+ dev->av_tbl.pa);12711271+ dev->av_tbl.va = NULL;12721272+mem_err_ah:12731273+ dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,12741274+ dev->av_tbl.pbl.pa);12751275+ dev->av_tbl.pbl.va = NULL;12761276+ dev->av_tbl.size = 0;12771277+mem_err:12781278+ kfree(cmd);12791279+ return status;12801280+}12811281+12821282+static void ocrdma_mbx_delete_ah_tbl(struct ocrdma_dev *dev)12831283+{12841284+ struct ocrdma_delete_ah_tbl *cmd;12851285+ struct pci_dev *pdev = dev->nic_info.pdev;12861286+12871287+ if (dev->av_tbl.va == NULL)12881288+ return;12891289+12901290+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_AH_TBL, sizeof(*cmd));12911291+ if (!cmd)12921292+ return;12931293+ cmd->ahid = dev->av_tbl.ahid;12941294+12951295+ ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);12961296+ dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,12971297+ dev->av_tbl.pa);12981298+ dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,12991299+ dev->av_tbl.pbl.pa);13001300+ kfree(cmd);13011301+}13021302+13031303+/* Multiple CQs uses the EQ. This routine returns least used13041304+ * EQ to associate with CQ. This will distributes the interrupt13051305+ * processing and CPU load to associated EQ, vector and so to that CPU.13061306+ */13071307+static u16 ocrdma_bind_eq(struct ocrdma_dev *dev)13081308+{13091309+ int i, selected_eq = 0, cq_cnt = 0;13101310+ u16 eq_id;13111311+13121312+ mutex_lock(&dev->dev_lock);13131313+ cq_cnt = dev->qp_eq_tbl[0].cq_cnt;13141314+ eq_id = dev->qp_eq_tbl[0].q.id;13151315+ /* find the EQ which is has the least number of13161316+ * CQs associated with it.13171317+ */13181318+ for (i = 0; i < dev->eq_cnt; i++) {13191319+ if (dev->qp_eq_tbl[i].cq_cnt < cq_cnt) {13201320+ cq_cnt = dev->qp_eq_tbl[i].cq_cnt;13211321+ eq_id = dev->qp_eq_tbl[i].q.id;13221322+ selected_eq = i;13231323+ }13241324+ }13251325+ dev->qp_eq_tbl[selected_eq].cq_cnt += 1;13261326+ mutex_unlock(&dev->dev_lock);13271327+ return eq_id;13281328+}13291329+13301330+static void ocrdma_unbind_eq(struct ocrdma_dev *dev, u16 eq_id)13311331+{13321332+ int i;13331333+13341334+ mutex_lock(&dev->dev_lock);13351335+ for (i = 0; i < dev->eq_cnt; i++) {13361336+ if (dev->qp_eq_tbl[i].q.id != eq_id)13371337+ continue;13381338+ dev->qp_eq_tbl[i].cq_cnt -= 1;13391339+ break;13401340+ }13411341+ mutex_unlock(&dev->dev_lock);13421342+}13431343+13441344+int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,13451345+ int entries, int dpp_cq)13461346+{13471347+ int status = -ENOMEM; int max_hw_cqe;13481348+ struct pci_dev *pdev = dev->nic_info.pdev;13491349+ struct ocrdma_create_cq *cmd;13501350+ struct ocrdma_create_cq_rsp *rsp;13511351+ u32 hw_pages, cqe_size, page_size, cqe_count;13521352+13531353+ if (dpp_cq)13541354+ return -EINVAL;13551355+ if (entries > dev->attr.max_cqe) {13561356+ ocrdma_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n",13571357+ __func__, dev->id, dev->attr.max_cqe, entries);13581358+ return -EINVAL;13591359+ }13601360+ if (dpp_cq && (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY))13611361+ return -EINVAL;13621362+13631363+ if (dpp_cq) {13641364+ cq->max_hw_cqe = 1;13651365+ max_hw_cqe = 1;13661366+ cqe_size = OCRDMA_DPP_CQE_SIZE;13671367+ hw_pages = 1;13681368+ } else {13691369+ cq->max_hw_cqe = dev->attr.max_cqe;13701370+ max_hw_cqe = dev->attr.max_cqe;13711371+ cqe_size = sizeof(struct ocrdma_cqe);13721372+ hw_pages = OCRDMA_CREATE_CQ_MAX_PAGES;13731373+ }13741374+13751375+ cq->len = roundup(max_hw_cqe * cqe_size, OCRDMA_MIN_Q_PAGE_SIZE);13761376+13771377+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_CQ, sizeof(*cmd));13781378+ if (!cmd)13791379+ return -ENOMEM;13801380+ ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,13811381+ OCRDMA_SUBSYS_COMMON, sizeof(*cmd));13821382+ cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);13831383+ if (!cq->va) {13841384+ status = -ENOMEM;13851385+ goto mem_err;13861386+ }13871387+ memset(cq->va, 0, cq->len);13881388+ page_size = cq->len / hw_pages;13891389+ cmd->cmd.pgsz_pgcnt = (page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<13901390+ OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;13911391+ cmd->cmd.pgsz_pgcnt |= hw_pages;13921392+ cmd->cmd.ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;13931393+13941394+ if (dev->eq_cnt < 0)13951395+ goto eq_err;13961396+ cq->eqn = ocrdma_bind_eq(dev);13971397+ cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER2;13981398+ cqe_count = cq->len / cqe_size;13991399+ if (cqe_count > 1024)14001400+ /* Set cnt to 3 to indicate more than 1024 cq entries */14011401+ cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT);14021402+ else {14031403+ u8 count = 0;14041404+ switch (cqe_count) {14051405+ case 256:14061406+ count = 0;14071407+ break;14081408+ case 512:14091409+ count = 1;14101410+ break;14111411+ case 1024:14121412+ count = 2;14131413+ break;14141414+ default:14151415+ goto mbx_err;14161416+ }14171417+ cmd->cmd.ev_cnt_flags |= (count << OCRDMA_CREATE_CQ_CNT_SHIFT);14181418+ }14191419+ /* shared eq between all the consumer cqs. */14201420+ cmd->cmd.eqn = cq->eqn;14211421+ if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {14221422+ if (dpp_cq)14231423+ cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP <<14241424+ OCRDMA_CREATE_CQ_TYPE_SHIFT;14251425+ cq->phase_change = false;14261426+ cmd->cmd.cqe_count = (cq->len / cqe_size);14271427+ } else {14281428+ cmd->cmd.cqe_count = (cq->len / cqe_size) - 1;14291429+ cmd->cmd.ev_cnt_flags |= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID;14301430+ cq->phase_change = true;14311431+ }14321432+14331433+ ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size);14341434+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);14351435+ if (status)14361436+ goto mbx_err;14371437+14381438+ rsp = (struct ocrdma_create_cq_rsp *)cmd;14391439+ cq->id = (u16) (rsp->rsp.cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);14401440+ kfree(cmd);14411441+ return 0;14421442+mbx_err:14431443+ ocrdma_unbind_eq(dev, cq->eqn);14441444+eq_err:14451445+ dma_free_coherent(&pdev->dev, cq->len, cq->va, cq->pa);14461446+mem_err:14471447+ kfree(cmd);14481448+ return status;14491449+}14501450+14511451+int ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq)14521452+{14531453+ int status = -ENOMEM;14541454+ struct ocrdma_destroy_cq *cmd;14551455+14561456+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_CQ, sizeof(*cmd));14571457+ if (!cmd)14581458+ return status;14591459+ ocrdma_init_mch(&cmd->req, OCRDMA_CMD_DELETE_CQ,14601460+ OCRDMA_SUBSYS_COMMON, sizeof(*cmd));14611461+14621462+ cmd->bypass_flush_qid |=14631463+ (cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) &14641464+ OCRDMA_DESTROY_CQ_QID_MASK;14651465+14661466+ ocrdma_unbind_eq(dev, cq->eqn);14671467+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);14681468+ if (status)14691469+ goto mbx_err;14701470+ dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa);14711471+mbx_err:14721472+ kfree(cmd);14731473+ return status;14741474+}14751475+14761476+int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,14771477+ u32 pdid, int addr_check)14781478+{14791479+ int status = -ENOMEM;14801480+ struct ocrdma_alloc_lkey *cmd;14811481+ struct ocrdma_alloc_lkey_rsp *rsp;14821482+14831483+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_LKEY, sizeof(*cmd));14841484+ if (!cmd)14851485+ return status;14861486+ cmd->pdid = pdid;14871487+ cmd->pbl_sz_flags |= addr_check;14881488+ cmd->pbl_sz_flags |= (hwmr->fr_mr << OCRDMA_ALLOC_LKEY_FMR_SHIFT);14891489+ cmd->pbl_sz_flags |=14901490+ (hwmr->remote_wr << OCRDMA_ALLOC_LKEY_REMOTE_WR_SHIFT);14911491+ cmd->pbl_sz_flags |=14921492+ (hwmr->remote_rd << OCRDMA_ALLOC_LKEY_REMOTE_RD_SHIFT);14931493+ cmd->pbl_sz_flags |=14941494+ (hwmr->local_wr << OCRDMA_ALLOC_LKEY_LOCAL_WR_SHIFT);14951495+ cmd->pbl_sz_flags |=14961496+ (hwmr->remote_atomic << OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_SHIFT);14971497+ cmd->pbl_sz_flags |=14981498+ (hwmr->num_pbls << OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT);14991499+15001500+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);15011501+ if (status)15021502+ goto mbx_err;15031503+ rsp = (struct ocrdma_alloc_lkey_rsp *)cmd;15041504+ hwmr->lkey = rsp->lrkey;15051505+mbx_err:15061506+ kfree(cmd);15071507+ return status;15081508+}15091509+15101510+int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *dev, int fr_mr, u32 lkey)15111511+{15121512+ int status = -ENOMEM;15131513+ struct ocrdma_dealloc_lkey *cmd;15141514+15151515+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_LKEY, sizeof(*cmd));15161516+ if (!cmd)15171517+ return -ENOMEM;15181518+ cmd->lkey = lkey;15191519+ cmd->rsvd_frmr = fr_mr ? 1 : 0;15201520+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);15211521+ if (status)15221522+ goto mbx_err;15231523+mbx_err:15241524+ kfree(cmd);15251525+ return status;15261526+}15271527+15281528+static int ocrdma_mbx_reg_mr(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,15291529+ u32 pdid, u32 pbl_cnt, u32 pbe_size, u32 last)15301530+{15311531+ int status = -ENOMEM;15321532+ int i;15331533+ struct ocrdma_reg_nsmr *cmd;15341534+ struct ocrdma_reg_nsmr_rsp *rsp;15351535+15361536+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR, sizeof(*cmd));15371537+ if (!cmd)15381538+ return -ENOMEM;15391539+ cmd->num_pbl_pdid =15401540+ pdid | (hwmr->num_pbls << OCRDMA_REG_NSMR_NUM_PBL_SHIFT);15411541+15421542+ cmd->flags_hpage_pbe_sz |= (hwmr->remote_wr <<15431543+ OCRDMA_REG_NSMR_REMOTE_WR_SHIFT);15441544+ cmd->flags_hpage_pbe_sz |= (hwmr->remote_rd <<15451545+ OCRDMA_REG_NSMR_REMOTE_RD_SHIFT);15461546+ cmd->flags_hpage_pbe_sz |= (hwmr->local_wr <<15471547+ OCRDMA_REG_NSMR_LOCAL_WR_SHIFT);15481548+ cmd->flags_hpage_pbe_sz |= (hwmr->remote_atomic <<15491549+ OCRDMA_REG_NSMR_REMOTE_ATOMIC_SHIFT);15501550+ cmd->flags_hpage_pbe_sz |= (hwmr->mw_bind <<15511551+ OCRDMA_REG_NSMR_BIND_MEMWIN_SHIFT);15521552+ cmd->flags_hpage_pbe_sz |= (last << OCRDMA_REG_NSMR_LAST_SHIFT);15531553+15541554+ cmd->flags_hpage_pbe_sz |= (hwmr->pbe_size / OCRDMA_MIN_HPAGE_SIZE);15551555+ cmd->flags_hpage_pbe_sz |= (hwmr->pbl_size / OCRDMA_MIN_HPAGE_SIZE) <<15561556+ OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT;15571557+ cmd->totlen_low = hwmr->len;15581558+ cmd->totlen_high = upper_32_bits(hwmr->len);15591559+ cmd->fbo_low = (u32) (hwmr->fbo & 0xffffffff);15601560+ cmd->fbo_high = (u32) upper_32_bits(hwmr->fbo);15611561+ cmd->va_loaddr = (u32) hwmr->va;15621562+ cmd->va_hiaddr = (u32) upper_32_bits(hwmr->va);15631563+15641564+ for (i = 0; i < pbl_cnt; i++) {15651565+ cmd->pbl[i].lo = (u32) (hwmr->pbl_table[i].pa & 0xffffffff);15661566+ cmd->pbl[i].hi = upper_32_bits(hwmr->pbl_table[i].pa);15671567+ }15681568+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);15691569+ if (status)15701570+ goto mbx_err;15711571+ rsp = (struct ocrdma_reg_nsmr_rsp *)cmd;15721572+ hwmr->lkey = rsp->lrkey;15731573+mbx_err:15741574+ kfree(cmd);15751575+ return status;15761576+}15771577+15781578+static int ocrdma_mbx_reg_mr_cont(struct ocrdma_dev *dev,15791579+ struct ocrdma_hw_mr *hwmr, u32 pbl_cnt,15801580+ u32 pbl_offset, u32 last)15811581+{15821582+ int status = -ENOMEM;15831583+ int i;15841584+ struct ocrdma_reg_nsmr_cont *cmd;15851585+15861586+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR_CONT, sizeof(*cmd));15871587+ if (!cmd)15881588+ return -ENOMEM;15891589+ cmd->lrkey = hwmr->lkey;15901590+ cmd->num_pbl_offset = (pbl_cnt << OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT) |15911591+ (pbl_offset & OCRDMA_REG_NSMR_CONT_PBL_SHIFT_MASK);15921592+ cmd->last = last << OCRDMA_REG_NSMR_CONT_LAST_SHIFT;15931593+15941594+ for (i = 0; i < pbl_cnt; i++) {15951595+ cmd->pbl[i].lo =15961596+ (u32) (hwmr->pbl_table[i + pbl_offset].pa & 0xffffffff);15971597+ cmd->pbl[i].hi =15981598+ upper_32_bits(hwmr->pbl_table[i + pbl_offset].pa);15991599+ }16001600+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);16011601+ if (status)16021602+ goto mbx_err;16031603+mbx_err:16041604+ kfree(cmd);16051605+ return status;16061606+}16071607+16081608+int ocrdma_reg_mr(struct ocrdma_dev *dev,16091609+ struct ocrdma_hw_mr *hwmr, u32 pdid, int acc)16101610+{16111611+ int status;16121612+ u32 last = 0;16131613+ u32 cur_pbl_cnt, pbl_offset;16141614+ u32 pending_pbl_cnt = hwmr->num_pbls;16151615+16161616+ pbl_offset = 0;16171617+ cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);16181618+ if (cur_pbl_cnt == pending_pbl_cnt)16191619+ last = 1;16201620+16211621+ status = ocrdma_mbx_reg_mr(dev, hwmr, pdid,16221622+ cur_pbl_cnt, hwmr->pbe_size, last);16231623+ if (status) {16241624+ ocrdma_err("%s() status=%d\n", __func__, status);16251625+ return status;16261626+ }16271627+ /* if there is no more pbls to register then exit. */16281628+ if (last)16291629+ return 0;16301630+16311631+ while (!last) {16321632+ pbl_offset += cur_pbl_cnt;16331633+ pending_pbl_cnt -= cur_pbl_cnt;16341634+ cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);16351635+ /* if we reach the end of the pbls, then need to set the last16361636+ * bit, indicating no more pbls to register for this memory key.16371637+ */16381638+ if (cur_pbl_cnt == pending_pbl_cnt)16391639+ last = 1;16401640+16411641+ status = ocrdma_mbx_reg_mr_cont(dev, hwmr, cur_pbl_cnt,16421642+ pbl_offset, last);16431643+ if (status)16441644+ break;16451645+ }16461646+ if (status)16471647+ ocrdma_err("%s() err. status=%d\n", __func__, status);16481648+16491649+ return status;16501650+}16511651+16521652+bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp)16531653+{16541654+ struct ocrdma_qp *tmp;16551655+ bool found = false;16561656+ list_for_each_entry(tmp, &cq->sq_head, sq_entry) {16571657+ if (qp == tmp) {16581658+ found = true;16591659+ break;16601660+ }16611661+ }16621662+ return found;16631663+}16641664+16651665+bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp)16661666+{16671667+ struct ocrdma_qp *tmp;16681668+ bool found = false;16691669+ list_for_each_entry(tmp, &cq->rq_head, rq_entry) {16701670+ if (qp == tmp) {16711671+ found = true;16721672+ break;16731673+ }16741674+ }16751675+ return found;16761676+}16771677+16781678+void ocrdma_flush_qp(struct ocrdma_qp *qp)16791679+{16801680+ bool found;16811681+ unsigned long flags;16821682+16831683+ spin_lock_irqsave(&qp->dev->flush_q_lock, flags);16841684+ found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);16851685+ if (!found)16861686+ list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head);16871687+ if (!qp->srq) {16881688+ found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);16891689+ if (!found)16901690+ list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head);16911691+ }16921692+ spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags);16931693+}16941694+16951695+int ocrdma_qp_state_machine(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,16961696+ enum ib_qp_state *old_ib_state)16971697+{16981698+ unsigned long flags;16991699+ int status = 0;17001700+ enum ocrdma_qp_state new_state;17011701+ new_state = get_ocrdma_qp_state(new_ib_state);17021702+17031703+ /* sync with wqe and rqe posting */17041704+ spin_lock_irqsave(&qp->q_lock, flags);17051705+17061706+ if (old_ib_state)17071707+ *old_ib_state = get_ibqp_state(qp->state);17081708+ if (new_state == qp->state) {17091709+ spin_unlock_irqrestore(&qp->q_lock, flags);17101710+ return 1;17111711+ }17121712+17131713+ switch (qp->state) {17141714+ case OCRDMA_QPS_RST:17151715+ switch (new_state) {17161716+ case OCRDMA_QPS_RST:17171717+ case OCRDMA_QPS_INIT:17181718+ break;17191719+ default:17201720+ status = -EINVAL;17211721+ break;17221722+ };17231723+ break;17241724+ case OCRDMA_QPS_INIT:17251725+ /* qps: INIT->XXX */17261726+ switch (new_state) {17271727+ case OCRDMA_QPS_INIT:17281728+ case OCRDMA_QPS_RTR:17291729+ break;17301730+ case OCRDMA_QPS_ERR:17311731+ ocrdma_flush_qp(qp);17321732+ break;17331733+ default:17341734+ status = -EINVAL;17351735+ break;17361736+ };17371737+ break;17381738+ case OCRDMA_QPS_RTR:17391739+ /* qps: RTS->XXX */17401740+ switch (new_state) {17411741+ case OCRDMA_QPS_RTS:17421742+ break;17431743+ case OCRDMA_QPS_ERR:17441744+ ocrdma_flush_qp(qp);17451745+ break;17461746+ default:17471747+ status = -EINVAL;17481748+ break;17491749+ };17501750+ break;17511751+ case OCRDMA_QPS_RTS:17521752+ /* qps: RTS->XXX */17531753+ switch (new_state) {17541754+ case OCRDMA_QPS_SQD:17551755+ case OCRDMA_QPS_SQE:17561756+ break;17571757+ case OCRDMA_QPS_ERR:17581758+ ocrdma_flush_qp(qp);17591759+ break;17601760+ default:17611761+ status = -EINVAL;17621762+ break;17631763+ };17641764+ break;17651765+ case OCRDMA_QPS_SQD:17661766+ /* qps: SQD->XXX */17671767+ switch (new_state) {17681768+ case OCRDMA_QPS_RTS:17691769+ case OCRDMA_QPS_SQE:17701770+ case OCRDMA_QPS_ERR:17711771+ break;17721772+ default:17731773+ status = -EINVAL;17741774+ break;17751775+ };17761776+ break;17771777+ case OCRDMA_QPS_SQE:17781778+ switch (new_state) {17791779+ case OCRDMA_QPS_RTS:17801780+ case OCRDMA_QPS_ERR:17811781+ break;17821782+ default:17831783+ status = -EINVAL;17841784+ break;17851785+ };17861786+ break;17871787+ case OCRDMA_QPS_ERR:17881788+ /* qps: ERR->XXX */17891789+ switch (new_state) {17901790+ case OCRDMA_QPS_RST:17911791+ break;17921792+ default:17931793+ status = -EINVAL;17941794+ break;17951795+ };17961796+ break;17971797+ default:17981798+ status = -EINVAL;17991799+ break;18001800+ };18011801+ if (!status)18021802+ qp->state = new_state;18031803+18041804+ spin_unlock_irqrestore(&qp->q_lock, flags);18051805+ return status;18061806+}18071807+18081808+static u32 ocrdma_set_create_qp_mbx_access_flags(struct ocrdma_qp *qp)18091809+{18101810+ u32 flags = 0;18111811+ if (qp->cap_flags & OCRDMA_QP_INB_RD)18121812+ flags |= OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK;18131813+ if (qp->cap_flags & OCRDMA_QP_INB_WR)18141814+ flags |= OCRDMA_CREATE_QP_REQ_INB_WREN_MASK;18151815+ if (qp->cap_flags & OCRDMA_QP_MW_BIND)18161816+ flags |= OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK;18171817+ if (qp->cap_flags & OCRDMA_QP_LKEY0)18181818+ flags |= OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK;18191819+ if (qp->cap_flags & OCRDMA_QP_FAST_REG)18201820+ flags |= OCRDMA_CREATE_QP_REQ_FMR_EN_MASK;18211821+ return flags;18221822+}18231823+18241824+static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,18251825+ struct ib_qp_init_attr *attrs,18261826+ struct ocrdma_qp *qp)18271827+{18281828+ int status;18291829+ u32 len, hw_pages, hw_page_size;18301830+ dma_addr_t pa;18311831+ struct ocrdma_dev *dev = qp->dev;18321832+ struct pci_dev *pdev = dev->nic_info.pdev;18331833+ u32 max_wqe_allocated;18341834+ u32 max_sges = attrs->cap.max_send_sge;18351835+18361836+ max_wqe_allocated = attrs->cap.max_send_wr;18371837+ /* need to allocate one extra to for GEN1 family */18381838+ if (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY)18391839+ max_wqe_allocated += 1;18401840+18411841+ status = ocrdma_build_q_conf(&max_wqe_allocated,18421842+ dev->attr.wqe_size, &hw_pages, &hw_page_size);18431843+ if (status) {18441844+ ocrdma_err("%s() req. max_send_wr=0x%x\n", __func__,18451845+ max_wqe_allocated);18461846+ return -EINVAL;18471847+ }18481848+ qp->sq.max_cnt = max_wqe_allocated;18491849+ len = (hw_pages * hw_page_size);18501850+18511851+ qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);18521852+ if (!qp->sq.va)18531853+ return -EINVAL;18541854+ memset(qp->sq.va, 0, len);18551855+ qp->sq.len = len;18561856+ qp->sq.pa = pa;18571857+ qp->sq.entry_size = dev->attr.wqe_size;18581858+ ocrdma_build_q_pages(&cmd->wq_addr[0], hw_pages, pa, hw_page_size);18591859+18601860+ cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)18611861+ << OCRDMA_CREATE_QP_REQ_SQ_PAGE_SIZE_SHIFT);18621862+ cmd->num_wq_rq_pages |= (hw_pages <<18631863+ OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT) &18641864+ OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_MASK;18651865+ cmd->max_sge_send_write |= (max_sges <<18661866+ OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT) &18671867+ OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_MASK;18681868+ cmd->max_sge_send_write |= (max_sges <<18691869+ OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_SHIFT) &18701870+ OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_MASK;18711871+ cmd->max_wqe_rqe |= (ilog2(qp->sq.max_cnt) <<18721872+ OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT) &18731873+ OCRDMA_CREATE_QP_REQ_MAX_WQE_MASK;18741874+ cmd->wqe_rqe_size |= (dev->attr.wqe_size <<18751875+ OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT) &18761876+ OCRDMA_CREATE_QP_REQ_WQE_SIZE_MASK;18771877+ return 0;18781878+}18791879+18801880+static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,18811881+ struct ib_qp_init_attr *attrs,18821882+ struct ocrdma_qp *qp)18831883+{18841884+ int status;18851885+ u32 len, hw_pages, hw_page_size;18861886+ dma_addr_t pa = 0;18871887+ struct ocrdma_dev *dev = qp->dev;18881888+ struct pci_dev *pdev = dev->nic_info.pdev;18891889+ u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1;18901890+18911891+ status = ocrdma_build_q_conf(&max_rqe_allocated, dev->attr.rqe_size,18921892+ &hw_pages, &hw_page_size);18931893+ if (status) {18941894+ ocrdma_err("%s() req. max_recv_wr=0x%x\n", __func__,18951895+ attrs->cap.max_recv_wr + 1);18961896+ return status;18971897+ }18981898+ qp->rq.max_cnt = max_rqe_allocated;18991899+ len = (hw_pages * hw_page_size);19001900+19011901+ qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);19021902+ if (!qp->rq.va)19031903+ return status;19041904+ memset(qp->rq.va, 0, len);19051905+ qp->rq.pa = pa;19061906+ qp->rq.len = len;19071907+ qp->rq.entry_size = dev->attr.rqe_size;19081908+19091909+ ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);19101910+ cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<19111911+ OCRDMA_CREATE_QP_REQ_RQ_PAGE_SIZE_SHIFT);19121912+ cmd->num_wq_rq_pages |=19131913+ (hw_pages << OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_SHIFT) &19141914+ OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_MASK;19151915+ cmd->max_sge_recv_flags |= (attrs->cap.max_recv_sge <<19161916+ OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT) &19171917+ OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_MASK;19181918+ cmd->max_wqe_rqe |= (ilog2(qp->rq.max_cnt) <<19191919+ OCRDMA_CREATE_QP_REQ_MAX_RQE_SHIFT) &19201920+ OCRDMA_CREATE_QP_REQ_MAX_RQE_MASK;19211921+ cmd->wqe_rqe_size |= (dev->attr.rqe_size <<19221922+ OCRDMA_CREATE_QP_REQ_RQE_SIZE_SHIFT) &19231923+ OCRDMA_CREATE_QP_REQ_RQE_SIZE_MASK;19241924+ return 0;19251925+}19261926+19271927+static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd,19281928+ struct ocrdma_pd *pd,19291929+ struct ocrdma_qp *qp,19301930+ u8 enable_dpp_cq, u16 dpp_cq_id)19311931+{19321932+ pd->num_dpp_qp--;19331933+ qp->dpp_enabled = true;19341934+ cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK;19351935+ if (!enable_dpp_cq)19361936+ return;19371937+ cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK;19381938+ cmd->dpp_credits_cqid = dpp_cq_id;19391939+ cmd->dpp_credits_cqid |= OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT <<19401940+ OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT;19411941+}19421942+19431943+static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,19441944+ struct ocrdma_qp *qp)19451945+{19461946+ struct ocrdma_dev *dev = qp->dev;19471947+ struct pci_dev *pdev = dev->nic_info.pdev;19481948+ dma_addr_t pa = 0;19491949+ int ird_page_size = dev->attr.ird_page_size;19501950+ int ird_q_len = dev->attr.num_ird_pages * ird_page_size;19511951+19521952+ if (dev->attr.ird == 0)19531953+ return 0;19541954+19551955+ qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len,19561956+ &pa, GFP_KERNEL);19571957+ if (!qp->ird_q_va)19581958+ return -ENOMEM;19591959+ memset(qp->ird_q_va, 0, ird_q_len);19601960+ ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,19611961+ pa, ird_page_size);19621962+ return 0;19631963+}19641964+19651965+static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp,19661966+ struct ocrdma_qp *qp,19671967+ struct ib_qp_init_attr *attrs,19681968+ u16 *dpp_offset, u16 *dpp_credit_lmt)19691969+{19701970+ u32 max_wqe_allocated, max_rqe_allocated;19711971+ qp->id = rsp->qp_id & OCRDMA_CREATE_QP_RSP_QP_ID_MASK;19721972+ qp->rq.dbid = rsp->sq_rq_id & OCRDMA_CREATE_QP_RSP_RQ_ID_MASK;19731973+ qp->sq.dbid = rsp->sq_rq_id >> OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT;19741974+ qp->max_ird = rsp->max_ord_ird & OCRDMA_CREATE_QP_RSP_MAX_IRD_MASK;19751975+ qp->max_ord = (rsp->max_ord_ird >> OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT);19761976+ qp->dpp_enabled = false;19771977+ if (rsp->dpp_response & OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK) {19781978+ qp->dpp_enabled = true;19791979+ *dpp_credit_lmt = (rsp->dpp_response &19801980+ OCRDMA_CREATE_QP_RSP_DPP_CREDITS_MASK) >>19811981+ OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT;19821982+ *dpp_offset = (rsp->dpp_response &19831983+ OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_MASK) >>19841984+ OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT;19851985+ }19861986+ max_wqe_allocated =19871987+ rsp->max_wqe_rqe >> OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT;19881988+ max_wqe_allocated = 1 << max_wqe_allocated;19891989+ max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe);19901990+19911991+ if (qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {19921992+ qp->sq.free_delta = 0;19931993+ qp->rq.free_delta = 1;19941994+ } else19951995+ qp->sq.free_delta = 1;19961996+19971997+ qp->sq.max_cnt = max_wqe_allocated;19981998+ qp->sq.max_wqe_idx = max_wqe_allocated - 1;19991999+20002000+ if (!attrs->srq) {20012001+ qp->rq.max_cnt = max_rqe_allocated;20022002+ qp->rq.max_wqe_idx = max_rqe_allocated - 1;20032003+ qp->rq.free_delta = 1;20042004+ }20052005+}20062006+20072007+int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,20082008+ u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset,20092009+ u16 *dpp_credit_lmt)20102010+{20112011+ int status = -ENOMEM;20122012+ u32 flags = 0;20132013+ struct ocrdma_dev *dev = qp->dev;20142014+ struct ocrdma_pd *pd = qp->pd;20152015+ struct pci_dev *pdev = dev->nic_info.pdev;20162016+ struct ocrdma_cq *cq;20172017+ struct ocrdma_create_qp_req *cmd;20182018+ struct ocrdma_create_qp_rsp *rsp;20192019+ int qptype;20202020+20212021+ switch (attrs->qp_type) {20222022+ case IB_QPT_GSI:20232023+ qptype = OCRDMA_QPT_GSI;20242024+ break;20252025+ case IB_QPT_RC:20262026+ qptype = OCRDMA_QPT_RC;20272027+ break;20282028+ case IB_QPT_UD:20292029+ qptype = OCRDMA_QPT_UD;20302030+ break;20312031+ default:20322032+ return -EINVAL;20332033+ };20342034+20352035+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd));20362036+ if (!cmd)20372037+ return status;20382038+ cmd->type_pgsz_pdn |= (qptype << OCRDMA_CREATE_QP_REQ_QPT_SHIFT) &20392039+ OCRDMA_CREATE_QP_REQ_QPT_MASK;20402040+ status = ocrdma_set_create_qp_sq_cmd(cmd, attrs, qp);20412041+ if (status)20422042+ goto sq_err;20432043+20442044+ if (attrs->srq) {20452045+ struct ocrdma_srq *srq = get_ocrdma_srq(attrs->srq);20462046+ cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK;20472047+ cmd->rq_addr[0].lo = srq->id;20482048+ qp->srq = srq;20492049+ } else {20502050+ status = ocrdma_set_create_qp_rq_cmd(cmd, attrs, qp);20512051+ if (status)20522052+ goto rq_err;20532053+ }20542054+20552055+ status = ocrdma_set_create_qp_ird_cmd(cmd, qp);20562056+ if (status)20572057+ goto mbx_err;20582058+20592059+ cmd->type_pgsz_pdn |= (pd->id << OCRDMA_CREATE_QP_REQ_PD_ID_SHIFT) &20602060+ OCRDMA_CREATE_QP_REQ_PD_ID_MASK;20612061+20622062+ flags = ocrdma_set_create_qp_mbx_access_flags(qp);20632063+20642064+ cmd->max_sge_recv_flags |= flags;20652065+ cmd->max_ord_ird |= (dev->attr.max_ord_per_qp <<20662066+ OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT) &20672067+ OCRDMA_CREATE_QP_REQ_MAX_ORD_MASK;20682068+ cmd->max_ord_ird |= (dev->attr.max_ird_per_qp <<20692069+ OCRDMA_CREATE_QP_REQ_MAX_IRD_SHIFT) &20702070+ OCRDMA_CREATE_QP_REQ_MAX_IRD_MASK;20712071+ cq = get_ocrdma_cq(attrs->send_cq);20722072+ cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT) &20732073+ OCRDMA_CREATE_QP_REQ_WQ_CQID_MASK;20742074+ qp->sq_cq = cq;20752075+ cq = get_ocrdma_cq(attrs->recv_cq);20762076+ cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_RQ_CQID_SHIFT) &20772077+ OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK;20782078+ qp->rq_cq = cq;20792079+20802080+ if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp &&20812081+ (attrs->cap.max_inline_data <= dev->attr.max_inline_data))20822082+ ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,20832083+ dpp_cq_id);20842084+20852085+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);20862086+ if (status)20872087+ goto mbx_err;20882088+ rsp = (struct ocrdma_create_qp_rsp *)cmd;20892089+ ocrdma_get_create_qp_rsp(rsp, qp, attrs, dpp_offset, dpp_credit_lmt);20902090+ qp->state = OCRDMA_QPS_RST;20912091+ kfree(cmd);20922092+ return 0;20932093+mbx_err:20942094+ if (qp->rq.va)20952095+ dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);20962096+rq_err:20972097+ ocrdma_err("%s(%d) rq_err\n", __func__, dev->id);20982098+ dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);20992099+sq_err:21002100+ ocrdma_err("%s(%d) sq_err\n", __func__, dev->id);21012101+ kfree(cmd);21022102+ return status;21032103+}21042104+21052105+int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,21062106+ struct ocrdma_qp_params *param)21072107+{21082108+ int status = -ENOMEM;21092109+ struct ocrdma_query_qp *cmd;21102110+ struct ocrdma_query_qp_rsp *rsp;21112111+21122112+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*cmd));21132113+ if (!cmd)21142114+ return status;21152115+ cmd->qp_id = qp->id;21162116+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);21172117+ if (status)21182118+ goto mbx_err;21192119+ rsp = (struct ocrdma_query_qp_rsp *)cmd;21202120+ memcpy(param, &rsp->params, sizeof(struct ocrdma_qp_params));21212121+mbx_err:21222122+ kfree(cmd);21232123+ return status;21242124+}21252125+21262126+int ocrdma_resolve_dgid(struct ocrdma_dev *dev, union ib_gid *dgid,21272127+ u8 *mac_addr)21282128+{21292129+ struct in6_addr in6;21302130+21312131+ memcpy(&in6, dgid, sizeof in6);21322132+ if (rdma_is_multicast_addr(&in6))21332133+ rdma_get_mcast_mac(&in6, mac_addr);21342134+ else if (rdma_link_local_addr(&in6))21352135+ rdma_get_ll_mac(&in6, mac_addr);21362136+ else {21372137+ ocrdma_err("%s() fail to resolve mac_addr.\n", __func__);21382138+ return -EINVAL;21392139+ }21402140+ return 0;21412141+}21422142+21432143+static void ocrdma_set_av_params(struct ocrdma_qp *qp,21442144+ struct ocrdma_modify_qp *cmd,21452145+ struct ib_qp_attr *attrs)21462146+{21472147+ struct ib_ah_attr *ah_attr = &attrs->ah_attr;21482148+ union ib_gid sgid;21492149+ u32 vlan_id;21502150+ u8 mac_addr[6];21512151+ if ((ah_attr->ah_flags & IB_AH_GRH) == 0)21522152+ return;21532153+ cmd->params.tclass_sq_psn |=21542154+ (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);21552155+ cmd->params.rnt_rc_sl_fl |=21562156+ (ah_attr->grh.flow_label & OCRDMA_QP_PARAMS_FLOW_LABEL_MASK);21572157+ cmd->params.hop_lmt_rq_psn |=21582158+ (ah_attr->grh.hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT);21592159+ cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;21602160+ memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],21612161+ sizeof(cmd->params.dgid));21622162+ ocrdma_query_gid(&qp->dev->ibdev, 1,21632163+ ah_attr->grh.sgid_index, &sgid);21642164+ qp->sgid_idx = ah_attr->grh.sgid_index;21652165+ memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));21662166+ ocrdma_resolve_dgid(qp->dev, &ah_attr->grh.dgid, &mac_addr[0]);21672167+ cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |21682168+ (mac_addr[2] << 16) | (mac_addr[3] << 24);21692169+ /* convert them to LE format. */21702170+ ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid));21712171+ ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));21722172+ cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);21732173+ vlan_id = rdma_get_vlan_id(&sgid);21742174+ if (vlan_id && (vlan_id < 0x1000)) {21752175+ cmd->params.vlan_dmac_b4_to_b5 |=21762176+ vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;21772177+ cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;21782178+ }21792179+}21802180+21812181+static int ocrdma_set_qp_params(struct ocrdma_qp *qp,21822182+ struct ocrdma_modify_qp *cmd,21832183+ struct ib_qp_attr *attrs, int attr_mask,21842184+ enum ib_qp_state old_qps)21852185+{21862186+ int status = 0;21872187+ struct net_device *netdev = qp->dev->nic_info.netdev;21882188+ int eth_mtu = iboe_get_mtu(netdev->mtu);21892189+21902190+ if (attr_mask & IB_QP_PKEY_INDEX) {21912191+ cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &21922192+ OCRDMA_QP_PARAMS_PKEY_INDEX_MASK);21932193+ cmd->flags |= OCRDMA_QP_PARA_PKEY_VALID;21942194+ }21952195+ if (attr_mask & IB_QP_QKEY) {21962196+ qp->qkey = attrs->qkey;21972197+ cmd->params.qkey = attrs->qkey;21982198+ cmd->flags |= OCRDMA_QP_PARA_QKEY_VALID;21992199+ }22002200+ if (attr_mask & IB_QP_AV)22012201+ ocrdma_set_av_params(qp, cmd, attrs);22022202+ else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {22032203+ /* set the default mac address for UD, GSI QPs */22042204+ cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] |22052205+ (qp->dev->nic_info.mac_addr[1] << 8) |22062206+ (qp->dev->nic_info.mac_addr[2] << 16) |22072207+ (qp->dev->nic_info.mac_addr[3] << 24);22082208+ cmd->params.vlan_dmac_b4_to_b5 = qp->dev->nic_info.mac_addr[4] |22092209+ (qp->dev->nic_info.mac_addr[5] << 8);22102210+ }22112211+ if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&22122212+ attrs->en_sqd_async_notify) {22132213+ cmd->params.max_sge_recv_flags |=22142214+ OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC;22152215+ cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;22162216+ }22172217+ if (attr_mask & IB_QP_DEST_QPN) {22182218+ cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->dest_qp_num &22192219+ OCRDMA_QP_PARAMS_DEST_QPN_MASK);22202220+ cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;22212221+ }22222222+ if (attr_mask & IB_QP_PATH_MTU) {22232223+ if (ib_mtu_enum_to_int(eth_mtu) <22242224+ ib_mtu_enum_to_int(attrs->path_mtu)) {22252225+ status = -EINVAL;22262226+ goto pmtu_err;22272227+ }22282228+ cmd->params.path_mtu_pkey_indx |=22292229+ (ib_mtu_enum_to_int(attrs->path_mtu) <<22302230+ OCRDMA_QP_PARAMS_PATH_MTU_SHIFT) &22312231+ OCRDMA_QP_PARAMS_PATH_MTU_MASK;22322232+ cmd->flags |= OCRDMA_QP_PARA_PMTU_VALID;22332233+ }22342234+ if (attr_mask & IB_QP_TIMEOUT) {22352235+ cmd->params.ack_to_rnr_rtc_dest_qpn |= attrs->timeout <<22362236+ OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;22372237+ cmd->flags |= OCRDMA_QP_PARA_ACK_TO_VALID;22382238+ }22392239+ if (attr_mask & IB_QP_RETRY_CNT) {22402240+ cmd->params.rnt_rc_sl_fl |= (attrs->retry_cnt <<22412241+ OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT) &22422242+ OCRDMA_QP_PARAMS_RETRY_CNT_MASK;22432243+ cmd->flags |= OCRDMA_QP_PARA_RETRY_CNT_VALID;22442244+ }22452245+ if (attr_mask & IB_QP_MIN_RNR_TIMER) {22462246+ cmd->params.rnt_rc_sl_fl |= (attrs->min_rnr_timer <<22472247+ OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT) &22482248+ OCRDMA_QP_PARAMS_RNR_NAK_TIMER_MASK;22492249+ cmd->flags |= OCRDMA_QP_PARA_RNT_VALID;22502250+ }22512251+ if (attr_mask & IB_QP_RNR_RETRY) {22522252+ cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->rnr_retry <<22532253+ OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT)22542254+ & OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK;22552255+ cmd->flags |= OCRDMA_QP_PARA_RRC_VALID;22562256+ }22572257+ if (attr_mask & IB_QP_SQ_PSN) {22582258+ cmd->params.tclass_sq_psn |= (attrs->sq_psn & 0x00ffffff);22592259+ cmd->flags |= OCRDMA_QP_PARA_SQPSN_VALID;22602260+ }22612261+ if (attr_mask & IB_QP_RQ_PSN) {22622262+ cmd->params.hop_lmt_rq_psn |= (attrs->rq_psn & 0x00ffffff);22632263+ cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID;22642264+ }22652265+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {22662266+ if (attrs->max_rd_atomic > qp->dev->attr.max_ord_per_qp) {22672267+ status = -EINVAL;22682268+ goto pmtu_err;22692269+ }22702270+ qp->max_ord = attrs->max_rd_atomic;22712271+ cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID;22722272+ }22732273+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {22742274+ if (attrs->max_dest_rd_atomic > qp->dev->attr.max_ird_per_qp) {22752275+ status = -EINVAL;22762276+ goto pmtu_err;22772277+ }22782278+ qp->max_ird = attrs->max_dest_rd_atomic;22792279+ cmd->flags |= OCRDMA_QP_PARA_MAX_IRD_VALID;22802280+ }22812281+ cmd->params.max_ord_ird = (qp->max_ord <<22822282+ OCRDMA_QP_PARAMS_MAX_ORD_SHIFT) |22832283+ (qp->max_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK);22842284+pmtu_err:22852285+ return status;22862286+}22872287+22882288+int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,22892289+ struct ib_qp_attr *attrs, int attr_mask,22902290+ enum ib_qp_state old_qps)22912291+{22922292+ int status = -ENOMEM;22932293+ struct ocrdma_modify_qp *cmd;22942294+ struct ocrdma_modify_qp_rsp *rsp;22952295+22962296+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_QP, sizeof(*cmd));22972297+ if (!cmd)22982298+ return status;22992299+23002300+ cmd->params.id = qp->id;23012301+ cmd->flags = 0;23022302+ if (attr_mask & IB_QP_STATE) {23032303+ cmd->params.max_sge_recv_flags |=23042304+ (get_ocrdma_qp_state(attrs->qp_state) <<23052305+ OCRDMA_QP_PARAMS_STATE_SHIFT) &23062306+ OCRDMA_QP_PARAMS_STATE_MASK;23072307+ cmd->flags |= OCRDMA_QP_PARA_QPS_VALID;23082308+ } else23092309+ cmd->params.max_sge_recv_flags |=23102310+ (qp->state << OCRDMA_QP_PARAMS_STATE_SHIFT) &23112311+ OCRDMA_QP_PARAMS_STATE_MASK;23122312+ status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask, old_qps);23132313+ if (status)23142314+ goto mbx_err;23152315+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);23162316+ if (status)23172317+ goto mbx_err;23182318+ rsp = (struct ocrdma_modify_qp_rsp *)cmd;23192319+mbx_err:23202320+ kfree(cmd);23212321+ return status;23222322+}23232323+23242324+int ocrdma_mbx_destroy_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp)23252325+{23262326+ int status = -ENOMEM;23272327+ struct ocrdma_destroy_qp *cmd;23282328+ struct ocrdma_destroy_qp_rsp *rsp;23292329+ struct pci_dev *pdev = dev->nic_info.pdev;23302330+23312331+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_QP, sizeof(*cmd));23322332+ if (!cmd)23332333+ return status;23342334+ cmd->qp_id = qp->id;23352335+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);23362336+ if (status)23372337+ goto mbx_err;23382338+ rsp = (struct ocrdma_destroy_qp_rsp *)cmd;23392339+mbx_err:23402340+ kfree(cmd);23412341+ if (qp->sq.va)23422342+ dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);23432343+ if (!qp->srq && qp->rq.va)23442344+ dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);23452345+ if (qp->dpp_enabled)23462346+ qp->pd->num_dpp_qp++;23472347+ return status;23482348+}23492349+23502350+int ocrdma_mbx_create_srq(struct ocrdma_srq *srq,23512351+ struct ib_srq_init_attr *srq_attr,23522352+ struct ocrdma_pd *pd)23532353+{23542354+ int status = -ENOMEM;23552355+ int hw_pages, hw_page_size;23562356+ int len;23572357+ struct ocrdma_create_srq_rsp *rsp;23582358+ struct ocrdma_create_srq *cmd;23592359+ dma_addr_t pa;23602360+ struct ocrdma_dev *dev = srq->dev;23612361+ struct pci_dev *pdev = dev->nic_info.pdev;23622362+ u32 max_rqe_allocated;23632363+23642364+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));23652365+ if (!cmd)23662366+ return status;23672367+23682368+ cmd->pgsz_pdid = pd->id & OCRDMA_CREATE_SRQ_PD_ID_MASK;23692369+ max_rqe_allocated = srq_attr->attr.max_wr + 1;23702370+ status = ocrdma_build_q_conf(&max_rqe_allocated,23712371+ dev->attr.rqe_size,23722372+ &hw_pages, &hw_page_size);23732373+ if (status) {23742374+ ocrdma_err("%s() req. max_wr=0x%x\n", __func__,23752375+ srq_attr->attr.max_wr);23762376+ status = -EINVAL;23772377+ goto ret;23782378+ }23792379+ len = hw_pages * hw_page_size;23802380+ srq->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);23812381+ if (!srq->rq.va) {23822382+ status = -ENOMEM;23832383+ goto ret;23842384+ }23852385+ ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);23862386+23872387+ srq->rq.entry_size = dev->attr.rqe_size;23882388+ srq->rq.pa = pa;23892389+ srq->rq.len = len;23902390+ srq->rq.max_cnt = max_rqe_allocated;23912391+23922392+ cmd->max_sge_rqe = ilog2(max_rqe_allocated);23932393+ cmd->max_sge_rqe |= srq_attr->attr.max_sge <<23942394+ OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT;23952395+23962396+ cmd->pgsz_pdid |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)23972397+ << OCRDMA_CREATE_SRQ_PG_SZ_SHIFT);23982398+ cmd->pages_rqe_sz |= (dev->attr.rqe_size23992399+ << OCRDMA_CREATE_SRQ_RQE_SIZE_SHIFT)24002400+ & OCRDMA_CREATE_SRQ_RQE_SIZE_MASK;24012401+ cmd->pages_rqe_sz |= hw_pages << OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT;24022402+24032403+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);24042404+ if (status)24052405+ goto mbx_err;24062406+ rsp = (struct ocrdma_create_srq_rsp *)cmd;24072407+ srq->id = rsp->id;24082408+ srq->rq.dbid = rsp->id;24092409+ max_rqe_allocated = ((rsp->max_sge_rqe_allocated &24102410+ OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_MASK) >>24112411+ OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_SHIFT);24122412+ max_rqe_allocated = (1 << max_rqe_allocated);24132413+ srq->rq.max_cnt = max_rqe_allocated;24142414+ srq->rq.max_wqe_idx = max_rqe_allocated - 1;24152415+ srq->rq.max_sges = (rsp->max_sge_rqe_allocated &24162416+ OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_MASK) >>24172417+ OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT;24182418+ goto ret;24192419+mbx_err:24202420+ dma_free_coherent(&pdev->dev, srq->rq.len, srq->rq.va, pa);24212421+ret:24222422+ kfree(cmd);24232423+ return status;24242424+}24252425+24262426+int ocrdma_mbx_modify_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)24272427+{24282428+ int status = -ENOMEM;24292429+ struct ocrdma_modify_srq *cmd;24302430+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));24312431+ if (!cmd)24322432+ return status;24332433+ cmd->id = srq->id;24342434+ cmd->limit_max_rqe |= srq_attr->srq_limit <<24352435+ OCRDMA_MODIFY_SRQ_LIMIT_SHIFT;24362436+ status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd);24372437+ kfree(cmd);24382438+ return status;24392439+}24402440+24412441+int ocrdma_mbx_query_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)24422442+{24432443+ int status = -ENOMEM;24442444+ struct ocrdma_query_srq *cmd;24452445+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));24462446+ if (!cmd)24472447+ return status;24482448+ cmd->id = srq->rq.dbid;24492449+ status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd);24502450+ if (status == 0) {24512451+ struct ocrdma_query_srq_rsp *rsp =24522452+ (struct ocrdma_query_srq_rsp *)cmd;24532453+ srq_attr->max_sge =24542454+ rsp->srq_lmt_max_sge &24552455+ OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_MASK;24562456+ srq_attr->max_wr =24572457+ rsp->max_rqe_pdid >> OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT;24582458+ srq_attr->srq_limit = rsp->srq_lmt_max_sge >>24592459+ OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT;24602460+ }24612461+ kfree(cmd);24622462+ return status;24632463+}24642464+24652465+int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)24662466+{24672467+ int status = -ENOMEM;24682468+ struct ocrdma_destroy_srq *cmd;24692469+ struct pci_dev *pdev = dev->nic_info.pdev;24702470+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_SRQ, sizeof(*cmd));24712471+ if (!cmd)24722472+ return status;24732473+ cmd->id = srq->id;24742474+ status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd);24752475+ if (srq->rq.va)24762476+ dma_free_coherent(&pdev->dev, srq->rq.len,24772477+ srq->rq.va, srq->rq.pa);24782478+ kfree(cmd);24792479+ return status;24802480+}24812481+24822482+int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)24832483+{24842484+ int i;24852485+ int status = -EINVAL;24862486+ struct ocrdma_av *av;24872487+ unsigned long flags;24882488+24892489+ av = dev->av_tbl.va;24902490+ spin_lock_irqsave(&dev->av_tbl.lock, flags);24912491+ for (i = 0; i < dev->av_tbl.num_ah; i++) {24922492+ if (av->valid == 0) {24932493+ av->valid = OCRDMA_AV_VALID;24942494+ ah->av = av;24952495+ ah->id = i;24962496+ status = 0;24972497+ break;24982498+ }24992499+ av++;25002500+ }25012501+ if (i == dev->av_tbl.num_ah)25022502+ status = -EAGAIN;25032503+ spin_unlock_irqrestore(&dev->av_tbl.lock, flags);25042504+ return status;25052505+}25062506+25072507+int ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)25082508+{25092509+ unsigned long flags;25102510+ spin_lock_irqsave(&dev->av_tbl.lock, flags);25112511+ ah->av->valid = 0;25122512+ spin_unlock_irqrestore(&dev->av_tbl.lock, flags);25132513+ return 0;25142514+}25152515+25162516+static int ocrdma_create_mq_eq(struct ocrdma_dev *dev)25172517+{25182518+ int status;25192519+ int irq;25202520+ unsigned long flags = 0;25212521+ int num_eq = 0;25222522+25232523+ if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)25242524+ flags = IRQF_SHARED;25252525+ else {25262526+ num_eq = dev->nic_info.msix.num_vectors -25272527+ dev->nic_info.msix.start_vector;25282528+ /* minimum two vectors/eq are required for rdma to work.25292529+ * one for control path and one for data path.25302530+ */25312531+ if (num_eq < 2)25322532+ return -EBUSY;25332533+ }25342534+25352535+ status = ocrdma_create_eq(dev, &dev->meq, OCRDMA_EQ_LEN);25362536+ if (status)25372537+ return status;25382538+ sprintf(dev->meq.irq_name, "ocrdma_mq%d", dev->id);25392539+ irq = ocrdma_get_irq(dev, &dev->meq);25402540+ status = request_irq(irq, ocrdma_irq_handler, flags, dev->meq.irq_name,25412541+ &dev->meq);25422542+ if (status)25432543+ _ocrdma_destroy_eq(dev, &dev->meq);25442544+ return status;25452545+}25462546+25472547+static int ocrdma_create_qp_eqs(struct ocrdma_dev *dev)25482548+{25492549+ int num_eq, i, status;25502550+ int irq;25512551+ unsigned long flags = 0;25522552+25532553+ num_eq = dev->nic_info.msix.num_vectors -25542554+ dev->nic_info.msix.start_vector;25552555+ if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) {25562556+ num_eq = 1;25572557+ flags = IRQF_SHARED;25582558+ } else25592559+ num_eq = min_t(u32, num_eq, num_online_cpus());25602560+ dev->qp_eq_tbl = kzalloc(sizeof(struct ocrdma_eq) * num_eq, GFP_KERNEL);25612561+ if (!dev->qp_eq_tbl)25622562+ return -ENOMEM;25632563+25642564+ for (i = 0; i < num_eq; i++) {25652565+ status = ocrdma_create_eq(dev, &dev->qp_eq_tbl[i],25662566+ OCRDMA_EQ_LEN);25672567+ if (status) {25682568+ status = -EINVAL;25692569+ break;25702570+ }25712571+ sprintf(dev->qp_eq_tbl[i].irq_name, "ocrdma_qp%d-%d",25722572+ dev->id, i);25732573+ irq = ocrdma_get_irq(dev, &dev->qp_eq_tbl[i]);25742574+ status = request_irq(irq, ocrdma_irq_handler, flags,25752575+ dev->qp_eq_tbl[i].irq_name,25762576+ &dev->qp_eq_tbl[i]);25772577+ if (status) {25782578+ _ocrdma_destroy_eq(dev, &dev->qp_eq_tbl[i]);25792579+ status = -EINVAL;25802580+ break;25812581+ }25822582+ dev->eq_cnt += 1;25832583+ }25842584+ /* one eq is sufficient for data path to work */25852585+ if (dev->eq_cnt >= 1)25862586+ return 0;25872587+ if (status)25882588+ ocrdma_destroy_qp_eqs(dev);25892589+ return status;25902590+}25912591+25922592+int ocrdma_init_hw(struct ocrdma_dev *dev)25932593+{25942594+ int status;25952595+ /* set up control path eq */25962596+ status = ocrdma_create_mq_eq(dev);25972597+ if (status)25982598+ return status;25992599+ /* set up data path eq */26002600+ status = ocrdma_create_qp_eqs(dev);26012601+ if (status)26022602+ goto qpeq_err;26032603+ status = ocrdma_create_mq(dev);26042604+ if (status)26052605+ goto mq_err;26062606+ status = ocrdma_mbx_query_fw_config(dev);26072607+ if (status)26082608+ goto conf_err;26092609+ status = ocrdma_mbx_query_dev(dev);26102610+ if (status)26112611+ goto conf_err;26122612+ status = ocrdma_mbx_query_fw_ver(dev);26132613+ if (status)26142614+ goto conf_err;26152615+ status = ocrdma_mbx_create_ah_tbl(dev);26162616+ if (status)26172617+ goto conf_err;26182618+ return 0;26192619+26202620+conf_err:26212621+ ocrdma_destroy_mq(dev);26222622+mq_err:26232623+ ocrdma_destroy_qp_eqs(dev);26242624+qpeq_err:26252625+ ocrdma_destroy_eq(dev, &dev->meq);26262626+ ocrdma_err("%s() status=%d\n", __func__, status);26272627+ return status;26282628+}26292629+26302630+void ocrdma_cleanup_hw(struct ocrdma_dev *dev)26312631+{26322632+ ocrdma_mbx_delete_ah_tbl(dev);26332633+26342634+ /* cleanup the data path eqs */26352635+ ocrdma_destroy_qp_eqs(dev);26362636+26372637+ /* cleanup the control path */26382638+ ocrdma_destroy_mq(dev);26392639+ ocrdma_destroy_eq(dev, &dev->meq);26402640+}
+132
drivers/infiniband/hw/ocrdma/ocrdma_hw.h
···11+/*******************************************************************22+ * This file is part of the Emulex RoCE Device Driver for *33+ * RoCE (RDMA over Converged Ethernet) CNA Adapters. *44+ * Copyright (C) 2008-2012 Emulex. All rights reserved. *55+ * EMULEX and SLI are trademarks of Emulex. *66+ * www.emulex.com *77+ * *88+ * This program is free software; you can redistribute it and/or *99+ * modify it under the terms of version 2 of the GNU General *1010+ * Public License as published by the Free Software Foundation. *1111+ * This program is distributed in the hope that it will be useful. *1212+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *1313+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *1414+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *1515+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *1616+ * TO BE LEGALLY INVALID. See the GNU General Public License for *1717+ * more details, a copy of which can be found in the file COPYING *1818+ * included with this package. *1919+ *2020+ * Contact Information:2121+ * linux-drivers@emulex.com2222+ *2323+ * Emulex2424+ * 3333 Susan Street2525+ * Costa Mesa, CA 926262626+ *******************************************************************/2727+2828+#ifndef __OCRDMA_HW_H__2929+#define __OCRDMA_HW_H__3030+3131+#include "ocrdma_sli.h"3232+3333+static inline void ocrdma_cpu_to_le32(void *dst, u32 len)3434+{3535+#ifdef __BIG_ENDIAN3636+ int i = 0;3737+ u32 *src_ptr = dst;3838+ u32 *dst_ptr = dst;3939+ for (; i < (len / 4); i++)4040+ *(dst_ptr + i) = cpu_to_le32p(src_ptr + i);4141+#endif4242+}4343+4444+static inline void ocrdma_le32_to_cpu(void *dst, u32 len)4545+{4646+#ifdef __BIG_ENDIAN4747+ int i = 0;4848+ u32 *src_ptr = dst;4949+ u32 *dst_ptr = dst;5050+ for (; i < (len / sizeof(u32)); i++)5151+ *(dst_ptr + i) = le32_to_cpu(*(src_ptr + i));5252+#endif5353+}5454+5555+static inline void ocrdma_copy_cpu_to_le32(void *dst, void *src, u32 len)5656+{5757+#ifdef __BIG_ENDIAN5858+ int i = 0;5959+ u32 *src_ptr = src;6060+ u32 *dst_ptr = dst;6161+ for (; i < (len / sizeof(u32)); i++)6262+ *(dst_ptr + i) = cpu_to_le32p(src_ptr + i);6363+#else6464+ memcpy(dst, src, len);6565+#endif6666+}6767+6868+static inline void ocrdma_copy_le32_to_cpu(void *dst, void *src, u32 len)6969+{7070+#ifdef __BIG_ENDIAN7171+ int i = 0;7272+ u32 *src_ptr = src;7373+ u32 *dst_ptr = dst;7474+ for (; i < len / sizeof(u32); i++)7575+ *(dst_ptr + i) = le32_to_cpu(*(src_ptr + i));7676+#else7777+ memcpy(dst, src, len);7878+#endif7979+}8080+8181+int ocrdma_init_hw(struct ocrdma_dev *);8282+void ocrdma_cleanup_hw(struct ocrdma_dev *);8383+8484+enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps);8585+void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed,8686+ bool solicited, u16 cqe_popped);8787+8888+/* verbs specific mailbox commands */8989+int ocrdma_query_config(struct ocrdma_dev *,9090+ struct ocrdma_mbx_query_config *config);9191+int ocrdma_resolve_dgid(struct ocrdma_dev *, union ib_gid *dgid, u8 *mac_addr);9292+9393+int ocrdma_mbx_alloc_pd(struct ocrdma_dev *, struct ocrdma_pd *);9494+int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *, struct ocrdma_pd *);9595+9696+int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *, struct ocrdma_hw_mr *hwmr,9797+ u32 pd_id, int addr_check);9898+int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *, int fmr, u32 lkey);9999+100100+int ocrdma_reg_mr(struct ocrdma_dev *, struct ocrdma_hw_mr *hwmr,101101+ u32 pd_id, int acc);102102+int ocrdma_mbx_create_cq(struct ocrdma_dev *, struct ocrdma_cq *,103103+ int entries, int dpp_cq);104104+int ocrdma_mbx_destroy_cq(struct ocrdma_dev *, struct ocrdma_cq *);105105+106106+int ocrdma_mbx_create_qp(struct ocrdma_qp *, struct ib_qp_init_attr *attrs,107107+ u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset,108108+ u16 *dpp_credit_lmt);109109+int ocrdma_mbx_modify_qp(struct ocrdma_dev *, struct ocrdma_qp *,110110+ struct ib_qp_attr *attrs, int attr_mask,111111+ enum ib_qp_state old_qps);112112+int ocrdma_mbx_query_qp(struct ocrdma_dev *, struct ocrdma_qp *,113113+ struct ocrdma_qp_params *param);114114+int ocrdma_mbx_destroy_qp(struct ocrdma_dev *, struct ocrdma_qp *);115115+116116+int ocrdma_mbx_create_srq(struct ocrdma_srq *,117117+ struct ib_srq_init_attr *,118118+ struct ocrdma_pd *);119119+int ocrdma_mbx_modify_srq(struct ocrdma_srq *, struct ib_srq_attr *);120120+int ocrdma_mbx_query_srq(struct ocrdma_srq *, struct ib_srq_attr *);121121+int ocrdma_mbx_destroy_srq(struct ocrdma_dev *, struct ocrdma_srq *);122122+123123+int ocrdma_alloc_av(struct ocrdma_dev *, struct ocrdma_ah *);124124+int ocrdma_free_av(struct ocrdma_dev *, struct ocrdma_ah *);125125+126126+int ocrdma_qp_state_machine(struct ocrdma_qp *, enum ib_qp_state new_state,127127+ enum ib_qp_state *old_ib_state);128128+bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);129129+bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);130130+void ocrdma_flush_qp(struct ocrdma_qp *);131131+132132+#endif /* __OCRDMA_HW_H__ */
+558
drivers/infiniband/hw/ocrdma/ocrdma_main.c
···11+/*******************************************************************22+ * This file is part of the Emulex RoCE Device Driver for *33+ * RoCE (RDMA over Converged Ethernet) adapters. *44+ * Copyright (C) 2008-2012 Emulex. All rights reserved. *55+ * EMULEX and SLI are trademarks of Emulex. *66+ * www.emulex.com *77+ * *88+ * This program is free software; you can redistribute it and/or *99+ * modify it under the terms of version 2 of the GNU General *1010+ * Public License as published by the Free Software Foundation. *1111+ * This program is distributed in the hope that it will be useful. *1212+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *1313+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *1414+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *1515+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *1616+ * TO BE LEGALLY INVALID. See the GNU General Public License for *1717+ * more details, a copy of which can be found in the file COPYING *1818+ * included with this package. *1919+ *2020+ * Contact Information:2121+ * linux-drivers@emulex.com2222+ *2323+ * Emulex2424+ * 3333 Susan Street2525+ * Costa Mesa, CA 926262626+ *******************************************************************/2727+2828+#include <linux/module.h>2929+#include <linux/version.h>3030+#include <linux/idr.h>3131+#include <rdma/ib_verbs.h>3232+#include <rdma/ib_user_verbs.h>3333+#include <rdma/ib_addr.h>3434+3535+#include <linux/netdevice.h>3636+#include <net/addrconf.h>3737+3838+#include "ocrdma.h"3939+#include "ocrdma_verbs.h"4040+#include "ocrdma_ah.h"4141+#include "be_roce.h"4242+#include "ocrdma_hw.h"4343+4444+MODULE_VERSION(OCRDMA_ROCE_DEV_VERSION);4545+MODULE_DESCRIPTION("Emulex RoCE HCA Driver");4646+MODULE_AUTHOR("Emulex Corporation");4747+MODULE_LICENSE("GPL");4848+4949+static LIST_HEAD(ocrdma_dev_list);5050+static DEFINE_MUTEX(ocrdma_devlist_lock);5151+static DEFINE_IDR(ocrdma_dev_id);5252+5353+static union ib_gid ocrdma_zero_sgid;5454+static int ocrdma_inet6addr_event(struct notifier_block *,5555+ unsigned long, void *);5656+5757+static struct notifier_block ocrdma_inet6addr_notifier = {5858+ .notifier_call = ocrdma_inet6addr_event5959+};6060+6161+int ocrdma_get_instance(void)6262+{6363+ int instance = 0;6464+6565+ /* Assign an unused number */6666+ if (!idr_pre_get(&ocrdma_dev_id, GFP_KERNEL))6767+ return -1;6868+ if (idr_get_new(&ocrdma_dev_id, NULL, &instance))6969+ return -1;7070+ return instance;7171+}7272+7373+void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)7474+{7575+ u8 mac_addr[6];7676+7777+ memcpy(&mac_addr[0], &dev->nic_info.mac_addr[0], ETH_ALEN);7878+ guid[0] = mac_addr[0] ^ 2;7979+ guid[1] = mac_addr[1];8080+ guid[2] = mac_addr[2];8181+ guid[3] = 0xff;8282+ guid[4] = 0xfe;8383+ guid[5] = mac_addr[3];8484+ guid[6] = mac_addr[4];8585+ guid[7] = mac_addr[5];8686+}8787+8888+static void ocrdma_build_sgid_mac(union ib_gid *sgid, unsigned char *mac_addr,8989+ bool is_vlan, u16 vlan_id)9090+{9191+ sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);9292+ sgid->raw[8] = mac_addr[0] ^ 2;9393+ sgid->raw[9] = mac_addr[1];9494+ sgid->raw[10] = mac_addr[2];9595+ if (is_vlan) {9696+ sgid->raw[11] = vlan_id >> 8;9797+ sgid->raw[12] = vlan_id & 0xff;9898+ } else {9999+ sgid->raw[11] = 0xff;100100+ sgid->raw[12] = 0xfe;101101+ }102102+ sgid->raw[13] = mac_addr[3];103103+ sgid->raw[14] = mac_addr[4];104104+ sgid->raw[15] = mac_addr[5];105105+}106106+107107+static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,108108+ bool is_vlan, u16 vlan_id)109109+{110110+ int i;111111+ bool found = false;112112+ union ib_gid new_sgid;113113+ int free_idx = OCRDMA_MAX_SGID;114114+ unsigned long flags;115115+116116+ memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid));117117+118118+ ocrdma_build_sgid_mac(&new_sgid, mac_addr, is_vlan, vlan_id);119119+120120+ spin_lock_irqsave(&dev->sgid_lock, flags);121121+ for (i = 0; i < OCRDMA_MAX_SGID; i++) {122122+ if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid,123123+ sizeof(union ib_gid))) {124124+ /* found free entry */125125+ if (!found) {126126+ free_idx = i;127127+ found = true;128128+ break;129129+ }130130+ } else if (!memcmp(&dev->sgid_tbl[i], &new_sgid,131131+ sizeof(union ib_gid))) {132132+ /* entry already present, no addition is required. */133133+ spin_unlock_irqrestore(&dev->sgid_lock, flags);134134+ return;135135+ }136136+ }137137+ /* if entry doesn't exist and if table has some space, add entry */138138+ if (found)139139+ memcpy(&dev->sgid_tbl[free_idx], &new_sgid,140140+ sizeof(union ib_gid));141141+ spin_unlock_irqrestore(&dev->sgid_lock, flags);142142+}143143+144144+static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,145145+ bool is_vlan, u16 vlan_id)146146+{147147+ int found = false;148148+ int i;149149+ union ib_gid sgid;150150+ unsigned long flags;151151+152152+ ocrdma_build_sgid_mac(&sgid, mac_addr, is_vlan, vlan_id);153153+154154+ spin_lock_irqsave(&dev->sgid_lock, flags);155155+ /* first is default sgid, which cannot be deleted. */156156+ for (i = 1; i < OCRDMA_MAX_SGID; i++) {157157+ if (!memcmp(&dev->sgid_tbl[i], &sgid, sizeof(union ib_gid))) {158158+ /* found matching entry */159159+ memset(&dev->sgid_tbl[i], 0, sizeof(union ib_gid));160160+ found = true;161161+ break;162162+ }163163+ }164164+ spin_unlock_irqrestore(&dev->sgid_lock, flags);165165+ return found;166166+}167167+168168+static void ocrdma_add_default_sgid(struct ocrdma_dev *dev)169169+{170170+ /* GID Index 0 - Invariant manufacturer-assigned EUI-64 */171171+ union ib_gid *sgid = &dev->sgid_tbl[0];172172+173173+ sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);174174+ ocrdma_get_guid(dev, &sgid->raw[8]);175175+}176176+177177+static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)178178+{179179+ struct net_device *netdev, *tmp;180180+ u16 vlan_id;181181+ bool is_vlan;182182+183183+ netdev = dev->nic_info.netdev;184184+185185+ ocrdma_add_default_sgid(dev);186186+187187+ rcu_read_lock();188188+ for_each_netdev_rcu(&init_net, tmp) {189189+ if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) {190190+ if (!netif_running(tmp) || !netif_oper_up(tmp))191191+ continue;192192+ if (netdev != tmp) {193193+ vlan_id = vlan_dev_vlan_id(tmp);194194+ is_vlan = true;195195+ } else {196196+ is_vlan = false;197197+ vlan_id = 0;198198+ tmp = netdev;199199+ }200200+ ocrdma_add_sgid(dev, tmp->dev_addr, is_vlan, vlan_id);201201+ }202202+ }203203+ rcu_read_unlock();204204+ return 0;205205+}206206+207207+static int ocrdma_inet6addr_event(struct notifier_block *notifier,208208+ unsigned long event, void *ptr)209209+{210210+ struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;211211+ struct net_device *event_netdev = ifa->idev->dev;212212+ struct net_device *netdev = NULL;213213+ struct ib_event gid_event;214214+ struct ocrdma_dev *dev;215215+ bool found = false;216216+ bool is_vlan = false;217217+ u16 vid = 0;218218+219219+ netdev = vlan_dev_real_dev(event_netdev);220220+ if (netdev != event_netdev) {221221+ is_vlan = true;222222+ vid = vlan_dev_vlan_id(event_netdev);223223+ }224224+ mutex_lock(&ocrdma_devlist_lock);225225+ list_for_each_entry(dev, &ocrdma_dev_list, entry) {226226+ if (dev->nic_info.netdev == netdev) {227227+ found = true;228228+ break;229229+ }230230+ }231231+ mutex_unlock(&ocrdma_devlist_lock);232232+233233+ if (!found)234234+ return NOTIFY_DONE;235235+ if (!rdma_link_local_addr((struct in6_addr *)&ifa->addr))236236+ return NOTIFY_DONE;237237+238238+ mutex_lock(&dev->dev_lock);239239+ switch (event) {240240+ case NETDEV_UP:241241+ ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid);242242+ break;243243+ case NETDEV_DOWN:244244+ found = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid);245245+ if (found) {246246+ /* found the matching entry, notify247247+ * the consumers about it248248+ */249249+ gid_event.device = &dev->ibdev;250250+ gid_event.element.port_num = 1;251251+ gid_event.event = IB_EVENT_GID_CHANGE;252252+ ib_dispatch_event(&gid_event);253253+ }254254+ break;255255+ default:256256+ break;257257+ }258258+ mutex_unlock(&dev->dev_lock);259259+ return NOTIFY_OK;260260+}261261+262262+static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,263263+ u8 port_num)264264+{265265+ return IB_LINK_LAYER_ETHERNET;266266+}267267+268268+int ocrdma_register_device(struct ocrdma_dev *dev)269269+{270270+ strlcpy(dev->ibdev.name, "ocrdma%d", IB_DEVICE_NAME_MAX);271271+ ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid);272272+ memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC,273273+ sizeof(OCRDMA_NODE_DESC));274274+ dev->ibdev.owner = THIS_MODULE;275275+ dev->ibdev.uverbs_cmd_mask =276276+ OCRDMA_UVERBS(GET_CONTEXT) |277277+ OCRDMA_UVERBS(QUERY_DEVICE) |278278+ OCRDMA_UVERBS(QUERY_PORT) |279279+ OCRDMA_UVERBS(ALLOC_PD) |280280+ OCRDMA_UVERBS(DEALLOC_PD) |281281+ OCRDMA_UVERBS(REG_MR) |282282+ OCRDMA_UVERBS(DEREG_MR) |283283+ OCRDMA_UVERBS(CREATE_COMP_CHANNEL) |284284+ OCRDMA_UVERBS(CREATE_CQ) |285285+ OCRDMA_UVERBS(RESIZE_CQ) |286286+ OCRDMA_UVERBS(DESTROY_CQ) |287287+ OCRDMA_UVERBS(REQ_NOTIFY_CQ) |288288+ OCRDMA_UVERBS(CREATE_QP) |289289+ OCRDMA_UVERBS(MODIFY_QP) |290290+ OCRDMA_UVERBS(QUERY_QP) |291291+ OCRDMA_UVERBS(DESTROY_QP) |292292+ OCRDMA_UVERBS(POLL_CQ) |293293+ OCRDMA_UVERBS(POST_SEND) |294294+ OCRDMA_UVERBS(POST_RECV);295295+296296+ dev->ibdev.uverbs_cmd_mask |=297297+ OCRDMA_UVERBS(CREATE_AH) |298298+ OCRDMA_UVERBS(MODIFY_AH) |299299+ OCRDMA_UVERBS(QUERY_AH) |300300+ OCRDMA_UVERBS(DESTROY_AH);301301+302302+ dev->ibdev.node_type = RDMA_NODE_IB_CA;303303+ dev->ibdev.phys_port_cnt = 1;304304+ dev->ibdev.num_comp_vectors = 1;305305+306306+ /* mandatory verbs. */307307+ dev->ibdev.query_device = ocrdma_query_device;308308+ dev->ibdev.query_port = ocrdma_query_port;309309+ dev->ibdev.modify_port = ocrdma_modify_port;310310+ dev->ibdev.query_gid = ocrdma_query_gid;311311+ dev->ibdev.get_link_layer = ocrdma_link_layer;312312+ dev->ibdev.alloc_pd = ocrdma_alloc_pd;313313+ dev->ibdev.dealloc_pd = ocrdma_dealloc_pd;314314+315315+ dev->ibdev.create_cq = ocrdma_create_cq;316316+ dev->ibdev.destroy_cq = ocrdma_destroy_cq;317317+ dev->ibdev.resize_cq = ocrdma_resize_cq;318318+319319+ dev->ibdev.create_qp = ocrdma_create_qp;320320+ dev->ibdev.modify_qp = ocrdma_modify_qp;321321+ dev->ibdev.query_qp = ocrdma_query_qp;322322+ dev->ibdev.destroy_qp = ocrdma_destroy_qp;323323+324324+ dev->ibdev.query_pkey = ocrdma_query_pkey;325325+ dev->ibdev.create_ah = ocrdma_create_ah;326326+ dev->ibdev.destroy_ah = ocrdma_destroy_ah;327327+ dev->ibdev.query_ah = ocrdma_query_ah;328328+ dev->ibdev.modify_ah = ocrdma_modify_ah;329329+330330+ dev->ibdev.poll_cq = ocrdma_poll_cq;331331+ dev->ibdev.post_send = ocrdma_post_send;332332+ dev->ibdev.post_recv = ocrdma_post_recv;333333+ dev->ibdev.req_notify_cq = ocrdma_arm_cq;334334+335335+ dev->ibdev.get_dma_mr = ocrdma_get_dma_mr;336336+ dev->ibdev.dereg_mr = ocrdma_dereg_mr;337337+ dev->ibdev.reg_user_mr = ocrdma_reg_user_mr;338338+339339+ /* mandatory to support user space verbs consumer. */340340+ dev->ibdev.alloc_ucontext = ocrdma_alloc_ucontext;341341+ dev->ibdev.dealloc_ucontext = ocrdma_dealloc_ucontext;342342+ dev->ibdev.mmap = ocrdma_mmap;343343+ dev->ibdev.dma_device = &dev->nic_info.pdev->dev;344344+345345+ dev->ibdev.process_mad = ocrdma_process_mad;346346+347347+ if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {348348+ dev->ibdev.uverbs_cmd_mask |=349349+ OCRDMA_UVERBS(CREATE_SRQ) |350350+ OCRDMA_UVERBS(MODIFY_SRQ) |351351+ OCRDMA_UVERBS(QUERY_SRQ) |352352+ OCRDMA_UVERBS(DESTROY_SRQ) |353353+ OCRDMA_UVERBS(POST_SRQ_RECV);354354+355355+ dev->ibdev.create_srq = ocrdma_create_srq;356356+ dev->ibdev.modify_srq = ocrdma_modify_srq;357357+ dev->ibdev.query_srq = ocrdma_query_srq;358358+ dev->ibdev.destroy_srq = ocrdma_destroy_srq;359359+ dev->ibdev.post_srq_recv = ocrdma_post_srq_recv;360360+ }361361+ return ib_register_device(&dev->ibdev, NULL);362362+}363363+364364+static int ocrdma_alloc_resources(struct ocrdma_dev *dev)365365+{366366+ mutex_init(&dev->dev_lock);367367+ dev->sgid_tbl = kzalloc(sizeof(union ib_gid) *368368+ OCRDMA_MAX_SGID, GFP_KERNEL);369369+ if (!dev->sgid_tbl)370370+ goto alloc_err;371371+ spin_lock_init(&dev->sgid_lock);372372+373373+ dev->cq_tbl = kzalloc(sizeof(struct ocrdma_cq *) *374374+ OCRDMA_MAX_CQ, GFP_KERNEL);375375+ if (!dev->cq_tbl)376376+ goto alloc_err;377377+378378+ if (dev->attr.max_qp) {379379+ dev->qp_tbl = kzalloc(sizeof(struct ocrdma_qp *) *380380+ OCRDMA_MAX_QP, GFP_KERNEL);381381+ if (!dev->qp_tbl)382382+ goto alloc_err;383383+ }384384+ spin_lock_init(&dev->av_tbl.lock);385385+ spin_lock_init(&dev->flush_q_lock);386386+ return 0;387387+alloc_err:388388+ ocrdma_err("%s(%d) error.\n", __func__, dev->id);389389+ return -ENOMEM;390390+}391391+392392+static void ocrdma_free_resources(struct ocrdma_dev *dev)393393+{394394+ kfree(dev->qp_tbl);395395+ kfree(dev->cq_tbl);396396+ kfree(dev->sgid_tbl);397397+}398398+399399+static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)400400+{401401+ int status = 0;402402+ struct ocrdma_dev *dev;403403+404404+ dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev));405405+ if (!dev) {406406+ ocrdma_err("Unable to allocate ib device\n");407407+ return NULL;408408+ }409409+ dev->mbx_cmd = kzalloc(sizeof(struct ocrdma_mqe_emb_cmd), GFP_KERNEL);410410+ if (!dev->mbx_cmd)411411+ goto idr_err;412412+413413+ memcpy(&dev->nic_info, dev_info, sizeof(*dev_info));414414+ dev->id = ocrdma_get_instance();415415+ if (dev->id < 0)416416+ goto idr_err;417417+418418+ status = ocrdma_init_hw(dev);419419+ if (status)420420+ goto init_err;421421+422422+ status = ocrdma_alloc_resources(dev);423423+ if (status)424424+ goto alloc_err;425425+426426+ status = ocrdma_build_sgid_tbl(dev);427427+ if (status)428428+ goto alloc_err;429429+430430+ status = ocrdma_register_device(dev);431431+ if (status)432432+ goto alloc_err;433433+434434+ mutex_lock(&ocrdma_devlist_lock);435435+ list_add_tail(&dev->entry, &ocrdma_dev_list);436436+ mutex_unlock(&ocrdma_devlist_lock);437437+ return dev;438438+439439+alloc_err:440440+ ocrdma_free_resources(dev);441441+ ocrdma_cleanup_hw(dev);442442+init_err:443443+ idr_remove(&ocrdma_dev_id, dev->id);444444+idr_err:445445+ kfree(dev->mbx_cmd);446446+ ib_dealloc_device(&dev->ibdev);447447+ ocrdma_err("%s() leaving. ret=%d\n", __func__, status);448448+ return NULL;449449+}450450+451451+static void ocrdma_remove(struct ocrdma_dev *dev)452452+{453453+ /* first unregister with stack to stop all the active traffic454454+ * of the registered clients.455455+ */456456+ ib_unregister_device(&dev->ibdev);457457+458458+ mutex_lock(&ocrdma_devlist_lock);459459+ list_del(&dev->entry);460460+ mutex_unlock(&ocrdma_devlist_lock);461461+462462+ ocrdma_free_resources(dev);463463+ ocrdma_cleanup_hw(dev);464464+465465+ idr_remove(&ocrdma_dev_id, dev->id);466466+ kfree(dev->mbx_cmd);467467+ ib_dealloc_device(&dev->ibdev);468468+}469469+470470+static int ocrdma_open(struct ocrdma_dev *dev)471471+{472472+ struct ib_event port_event;473473+474474+ port_event.event = IB_EVENT_PORT_ACTIVE;475475+ port_event.element.port_num = 1;476476+ port_event.device = &dev->ibdev;477477+ ib_dispatch_event(&port_event);478478+ return 0;479479+}480480+481481+static int ocrdma_close(struct ocrdma_dev *dev)482482+{483483+ int i;484484+ struct ocrdma_qp *qp, **cur_qp;485485+ struct ib_event err_event;486486+ struct ib_qp_attr attrs;487487+ int attr_mask = IB_QP_STATE;488488+489489+ attrs.qp_state = IB_QPS_ERR;490490+ mutex_lock(&dev->dev_lock);491491+ if (dev->qp_tbl) {492492+ cur_qp = dev->qp_tbl;493493+ for (i = 0; i < OCRDMA_MAX_QP; i++) {494494+ qp = cur_qp[i];495495+ if (qp) {496496+ /* change the QP state to ERROR */497497+ _ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask);498498+499499+ err_event.event = IB_EVENT_QP_FATAL;500500+ err_event.element.qp = &qp->ibqp;501501+ err_event.device = &dev->ibdev;502502+ ib_dispatch_event(&err_event);503503+ }504504+ }505505+ }506506+ mutex_unlock(&dev->dev_lock);507507+508508+ err_event.event = IB_EVENT_PORT_ERR;509509+ err_event.element.port_num = 1;510510+ err_event.device = &dev->ibdev;511511+ ib_dispatch_event(&err_event);512512+ return 0;513513+}514514+515515+/* event handling via NIC driver ensures that all the NIC specific516516+ * initialization done before RoCE driver notifies517517+ * event to stack.518518+ */519519+static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)520520+{521521+ switch (event) {522522+ case BE_DEV_UP:523523+ ocrdma_open(dev);524524+ break;525525+ case BE_DEV_DOWN:526526+ ocrdma_close(dev);527527+ break;528528+ };529529+}530530+531531+struct ocrdma_driver ocrdma_drv = {532532+ .name = "ocrdma_driver",533533+ .add = ocrdma_add,534534+ .remove = ocrdma_remove,535535+ .state_change_handler = ocrdma_event_handler,536536+};537537+538538+static int __init ocrdma_init_module(void)539539+{540540+ int status;541541+542542+ status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier);543543+ if (status)544544+ return status;545545+ status = be_roce_register_driver(&ocrdma_drv);546546+ if (status)547547+ unregister_inet6addr_notifier(&ocrdma_inet6addr_notifier);548548+ return status;549549+}550550+551551+static void __exit ocrdma_exit_module(void)552552+{553553+ be_roce_unregister_driver(&ocrdma_drv);554554+ unregister_inet6addr_notifier(&ocrdma_inet6addr_notifier);555555+}556556+557557+module_init(ocrdma_init_module);558558+module_exit(ocrdma_exit_module);
···11+/*******************************************************************22+ * This file is part of the Emulex RoCE Device Driver for *33+ * RoCE (RDMA over Converged Ethernet) adapters. *44+ * Copyright (C) 2008-2012 Emulex. All rights reserved. *55+ * EMULEX and SLI are trademarks of Emulex. *66+ * www.emulex.com *77+ * *88+ * This program is free software; you can redistribute it and/or *99+ * modify it under the terms of version 2 of the GNU General *1010+ * Public License as published by the Free Software Foundation. *1111+ * This program is distributed in the hope that it will be useful. *1212+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *1313+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *1414+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *1515+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *1616+ * TO BE LEGALLY INVALID. See the GNU General Public License for *1717+ * more details, a copy of which can be found in the file COPYING *1818+ * included with this package. *1919+ *2020+ * Contact Information:2121+ * linux-drivers@emulex.com2222+ *2323+ * Emulex2424+ * 3333 Susan Street2525+ * Costa Mesa, CA 926262626+ *******************************************************************/2727+2828+#include <linux/dma-mapping.h>2929+#include <rdma/ib_verbs.h>3030+#include <rdma/ib_user_verbs.h>3131+#include <rdma/iw_cm.h>3232+#include <rdma/ib_umem.h>3333+#include <rdma/ib_addr.h>3434+3535+#include "ocrdma.h"3636+#include "ocrdma_hw.h"3737+#include "ocrdma_verbs.h"3838+#include "ocrdma_abi.h"3939+4040+int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)4141+{4242+ if (index > 1)4343+ return -EINVAL;4444+4545+ *pkey = 0xffff;4646+ return 0;4747+}4848+4949+int ocrdma_query_gid(struct ib_device *ibdev, u8 port,5050+ int index, union ib_gid *sgid)5151+{5252+ struct ocrdma_dev *dev;5353+5454+ dev = get_ocrdma_dev(ibdev);5555+ memset(sgid, 0, sizeof(*sgid));5656+ if (index > OCRDMA_MAX_SGID)5757+ return -EINVAL;5858+5959+ memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));6060+6161+ return 0;6262+}6363+6464+int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)6565+{6666+ struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);6767+6868+ memset(attr, 0, sizeof *attr);6969+ memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],7070+ min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));7171+ ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);7272+ attr->max_mr_size = ~0ull;7373+ attr->page_size_cap = 0xffff000;7474+ attr->vendor_id = dev->nic_info.pdev->vendor;7575+ attr->vendor_part_id = dev->nic_info.pdev->device;7676+ attr->hw_ver = 0;7777+ attr->max_qp = dev->attr.max_qp;7878+ attr->max_ah = dev->attr.max_qp;7979+ attr->max_qp_wr = dev->attr.max_wqe;8080+8181+ attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |8282+ IB_DEVICE_RC_RNR_NAK_GEN |8383+ IB_DEVICE_SHUTDOWN_PORT |8484+ IB_DEVICE_SYS_IMAGE_GUID |8585+ IB_DEVICE_LOCAL_DMA_LKEY;8686+ attr->max_sge = dev->attr.max_send_sge;8787+ attr->max_sge_rd = dev->attr.max_send_sge;8888+ attr->max_cq = dev->attr.max_cq;8989+ attr->max_cqe = dev->attr.max_cqe;9090+ attr->max_mr = dev->attr.max_mr;9191+ attr->max_mw = 0;9292+ attr->max_pd = dev->attr.max_pd;9393+ attr->atomic_cap = 0;9494+ attr->max_fmr = 0;9595+ attr->max_map_per_fmr = 0;9696+ attr->max_qp_rd_atom =9797+ min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);9898+ attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;9999+ attr->max_srq = (dev->attr.max_qp - 1);100100+ attr->max_srq_sge = attr->max_sge;101101+ attr->max_srq_wr = dev->attr.max_rqe;102102+ attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;103103+ attr->max_fast_reg_page_list_len = 0;104104+ attr->max_pkeys = 1;105105+ return 0;106106+}107107+108108+int ocrdma_query_port(struct ib_device *ibdev,109109+ u8 port, struct ib_port_attr *props)110110+{111111+ enum ib_port_state port_state;112112+ struct ocrdma_dev *dev;113113+ struct net_device *netdev;114114+115115+ dev = get_ocrdma_dev(ibdev);116116+ if (port > 1) {117117+ ocrdma_err("%s(%d) invalid_port=0x%x\n", __func__,118118+ dev->id, port);119119+ return -EINVAL;120120+ }121121+ netdev = dev->nic_info.netdev;122122+ if (netif_running(netdev) && netif_oper_up(netdev)) {123123+ port_state = IB_PORT_ACTIVE;124124+ props->phys_state = 5;125125+ } else {126126+ port_state = IB_PORT_DOWN;127127+ props->phys_state = 3;128128+ }129129+ props->max_mtu = IB_MTU_4096;130130+ props->active_mtu = iboe_get_mtu(netdev->mtu);131131+ props->lid = 0;132132+ props->lmc = 0;133133+ props->sm_lid = 0;134134+ props->sm_sl = 0;135135+ props->state = port_state;136136+ props->port_cap_flags =137137+ IB_PORT_CM_SUP |138138+ IB_PORT_REINIT_SUP |139139+ IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP;140140+ props->gid_tbl_len = OCRDMA_MAX_SGID;141141+ props->pkey_tbl_len = 1;142142+ props->bad_pkey_cntr = 0;143143+ props->qkey_viol_cntr = 0;144144+ props->active_width = IB_WIDTH_1X;145145+ props->active_speed = 4;146146+ props->max_msg_sz = 0x80000000;147147+ props->max_vl_num = 4;148148+ return 0;149149+}150150+151151+int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,152152+ struct ib_port_modify *props)153153+{154154+ struct ocrdma_dev *dev;155155+156156+ dev = get_ocrdma_dev(ibdev);157157+ if (port > 1) {158158+ ocrdma_err("%s(%d) invalid_port=0x%x\n", __func__,159159+ dev->id, port);160160+ return -EINVAL;161161+ }162162+ return 0;163163+}164164+165165+static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,166166+ unsigned long len)167167+{168168+ struct ocrdma_mm *mm;169169+170170+ mm = kzalloc(sizeof(*mm), GFP_KERNEL);171171+ if (mm == NULL)172172+ return -ENOMEM;173173+ mm->key.phy_addr = phy_addr;174174+ mm->key.len = len;175175+ INIT_LIST_HEAD(&mm->entry);176176+177177+ mutex_lock(&uctx->mm_list_lock);178178+ list_add_tail(&mm->entry, &uctx->mm_head);179179+ mutex_unlock(&uctx->mm_list_lock);180180+ return 0;181181+}182182+183183+static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,184184+ unsigned long len)185185+{186186+ struct ocrdma_mm *mm, *tmp;187187+188188+ mutex_lock(&uctx->mm_list_lock);189189+ list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {190190+ if (len != mm->key.len || phy_addr != mm->key.phy_addr)191191+ continue;192192+193193+ list_del(&mm->entry);194194+ kfree(mm);195195+ break;196196+ }197197+ mutex_unlock(&uctx->mm_list_lock);198198+}199199+200200+static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,201201+ unsigned long len)202202+{203203+ bool found = false;204204+ struct ocrdma_mm *mm;205205+206206+ mutex_lock(&uctx->mm_list_lock);207207+ list_for_each_entry(mm, &uctx->mm_head, entry) {208208+ if (len != mm->key.len || phy_addr != mm->key.phy_addr)209209+ continue;210210+211211+ found = true;212212+ break;213213+ }214214+ mutex_unlock(&uctx->mm_list_lock);215215+ return found;216216+}217217+218218+struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,219219+ struct ib_udata *udata)220220+{221221+ int status;222222+ struct ocrdma_ucontext *ctx;223223+ struct ocrdma_alloc_ucontext_resp resp;224224+ struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);225225+ struct pci_dev *pdev = dev->nic_info.pdev;226226+ u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);227227+228228+ if (!udata)229229+ return ERR_PTR(-EFAULT);230230+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);231231+ if (!ctx)232232+ return ERR_PTR(-ENOMEM);233233+ ctx->dev = dev;234234+ INIT_LIST_HEAD(&ctx->mm_head);235235+ mutex_init(&ctx->mm_list_lock);236236+237237+ ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,238238+ &ctx->ah_tbl.pa, GFP_KERNEL);239239+ if (!ctx->ah_tbl.va) {240240+ kfree(ctx);241241+ return ERR_PTR(-ENOMEM);242242+ }243243+ memset(ctx->ah_tbl.va, 0, map_len);244244+ ctx->ah_tbl.len = map_len;245245+246246+ resp.ah_tbl_len = ctx->ah_tbl.len;247247+ resp.ah_tbl_page = ctx->ah_tbl.pa;248248+249249+ status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);250250+ if (status)251251+ goto map_err;252252+ resp.dev_id = dev->id;253253+ resp.max_inline_data = dev->attr.max_inline_data;254254+ resp.wqe_size = dev->attr.wqe_size;255255+ resp.rqe_size = dev->attr.rqe_size;256256+ resp.dpp_wqe_size = dev->attr.wqe_size;257257+ resp.rsvd = 0;258258+259259+ memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));260260+ status = ib_copy_to_udata(udata, &resp, sizeof(resp));261261+ if (status)262262+ goto cpy_err;263263+ return &ctx->ibucontext;264264+265265+cpy_err:266266+ ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);267267+map_err:268268+ dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,269269+ ctx->ah_tbl.pa);270270+ kfree(ctx);271271+ return ERR_PTR(status);272272+}273273+274274+int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)275275+{276276+ struct ocrdma_mm *mm, *tmp;277277+ struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);278278+ struct pci_dev *pdev = uctx->dev->nic_info.pdev;279279+280280+ ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);281281+ dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,282282+ uctx->ah_tbl.pa);283283+284284+ list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {285285+ list_del(&mm->entry);286286+ kfree(mm);287287+ }288288+ kfree(uctx);289289+ return 0;290290+}291291+292292+int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)293293+{294294+ struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);295295+ struct ocrdma_dev *dev = ucontext->dev;296296+ unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;297297+ u64 unmapped_db = (u64) dev->nic_info.unmapped_db;298298+ unsigned long len = (vma->vm_end - vma->vm_start);299299+ int status = 0;300300+ bool found;301301+302302+ if (vma->vm_start & (PAGE_SIZE - 1))303303+ return -EINVAL;304304+ found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);305305+ if (!found)306306+ return -EINVAL;307307+308308+ if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +309309+ dev->nic_info.db_total_size)) &&310310+ (len <= dev->nic_info.db_page_size)) {311311+ /* doorbell mapping */312312+ status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,313313+ len, vma->vm_page_prot);314314+ } else if (dev->nic_info.dpp_unmapped_len &&315315+ (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&316316+ (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +317317+ dev->nic_info.dpp_unmapped_len)) &&318318+ (len <= dev->nic_info.dpp_unmapped_len)) {319319+ /* dpp area mapping */320320+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);321321+ status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,322322+ len, vma->vm_page_prot);323323+ } else {324324+ /* queue memory mapping */325325+ status = remap_pfn_range(vma, vma->vm_start,326326+ vma->vm_pgoff, len, vma->vm_page_prot);327327+ }328328+ return status;329329+}330330+331331+static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd,332332+ struct ib_ucontext *ib_ctx,333333+ struct ib_udata *udata)334334+{335335+ int status;336336+ u64 db_page_addr;337337+ u64 dpp_page_addr;338338+ u32 db_page_size;339339+ struct ocrdma_alloc_pd_uresp rsp;340340+ struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);341341+342342+ rsp.id = pd->id;343343+ rsp.dpp_enabled = pd->dpp_enabled;344344+ db_page_addr = pd->dev->nic_info.unmapped_db +345345+ (pd->id * pd->dev->nic_info.db_page_size);346346+ db_page_size = pd->dev->nic_info.db_page_size;347347+348348+ status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);349349+ if (status)350350+ return status;351351+352352+ if (pd->dpp_enabled) {353353+ dpp_page_addr = pd->dev->nic_info.dpp_unmapped_addr +354354+ (pd->id * OCRDMA_DPP_PAGE_SIZE);355355+ status = ocrdma_add_mmap(uctx, dpp_page_addr,356356+ OCRDMA_DPP_PAGE_SIZE);357357+ if (status)358358+ goto dpp_map_err;359359+ rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);360360+ rsp.dpp_page_addr_lo = dpp_page_addr;361361+ }362362+363363+ status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));364364+ if (status)365365+ goto ucopy_err;366366+367367+ pd->uctx = uctx;368368+ return 0;369369+370370+ucopy_err:371371+ ocrdma_del_mmap(pd->uctx, dpp_page_addr, OCRDMA_DPP_PAGE_SIZE);372372+dpp_map_err:373373+ ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);374374+ return status;375375+}376376+377377+struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,378378+ struct ib_ucontext *context,379379+ struct ib_udata *udata)380380+{381381+ struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);382382+ struct ocrdma_pd *pd;383383+ int status;384384+385385+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);386386+ if (!pd)387387+ return ERR_PTR(-ENOMEM);388388+ pd->dev = dev;389389+ if (udata && context) {390390+ pd->dpp_enabled = (dev->nic_info.dev_family ==391391+ OCRDMA_GEN2_FAMILY) ? true : false;392392+ pd->num_dpp_qp =393393+ pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0;394394+ }395395+ status = ocrdma_mbx_alloc_pd(dev, pd);396396+ if (status) {397397+ kfree(pd);398398+ return ERR_PTR(status);399399+ }400400+ atomic_set(&pd->use_cnt, 0);401401+402402+ if (udata && context) {403403+ status = ocrdma_copy_pd_uresp(pd, context, udata);404404+ if (status)405405+ goto err;406406+ }407407+ return &pd->ibpd;408408+409409+err:410410+ ocrdma_dealloc_pd(&pd->ibpd);411411+ return ERR_PTR(status);412412+}413413+414414+int ocrdma_dealloc_pd(struct ib_pd *ibpd)415415+{416416+ struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);417417+ struct ocrdma_dev *dev = pd->dev;418418+ int status;419419+ u64 usr_db;420420+421421+ if (atomic_read(&pd->use_cnt)) {422422+ ocrdma_err("%s(%d) pd=0x%x is in use.\n",423423+ __func__, dev->id, pd->id);424424+ status = -EFAULT;425425+ goto dealloc_err;426426+ }427427+ status = ocrdma_mbx_dealloc_pd(dev, pd);428428+ if (pd->uctx) {429429+ u64 dpp_db = dev->nic_info.dpp_unmapped_addr +430430+ (pd->id * OCRDMA_DPP_PAGE_SIZE);431431+ if (pd->dpp_enabled)432432+ ocrdma_del_mmap(pd->uctx, dpp_db, OCRDMA_DPP_PAGE_SIZE);433433+ usr_db = dev->nic_info.unmapped_db +434434+ (pd->id * dev->nic_info.db_page_size);435435+ ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);436436+ }437437+ kfree(pd);438438+dealloc_err:439439+ return status;440440+}441441+442442+static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd,443443+ int acc, u32 num_pbls,444444+ u32 addr_check)445445+{446446+ int status;447447+ struct ocrdma_mr *mr;448448+ struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);449449+ struct ocrdma_dev *dev = pd->dev;450450+451451+ if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {452452+ ocrdma_err("%s(%d) leaving err, invalid access rights\n",453453+ __func__, dev->id);454454+ return ERR_PTR(-EINVAL);455455+ }456456+457457+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);458458+ if (!mr)459459+ return ERR_PTR(-ENOMEM);460460+ mr->hwmr.dev = dev;461461+ mr->hwmr.fr_mr = 0;462462+ mr->hwmr.local_rd = 1;463463+ mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;464464+ mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;465465+ mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;466466+ mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;467467+ mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;468468+ mr->hwmr.num_pbls = num_pbls;469469+470470+ status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pd->id, addr_check);471471+ if (status) {472472+ kfree(mr);473473+ return ERR_PTR(-ENOMEM);474474+ }475475+ mr->pd = pd;476476+ atomic_inc(&pd->use_cnt);477477+ mr->ibmr.lkey = mr->hwmr.lkey;478478+ if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)479479+ mr->ibmr.rkey = mr->hwmr.lkey;480480+ return mr;481481+}482482+483483+struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)484484+{485485+ struct ocrdma_mr *mr;486486+487487+ mr = ocrdma_alloc_lkey(ibpd, acc, 0, OCRDMA_ADDR_CHECK_DISABLE);488488+ if (!mr)489489+ return ERR_PTR(-ENOMEM);490490+491491+ return &mr->ibmr;492492+}493493+494494+static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,495495+ struct ocrdma_hw_mr *mr)496496+{497497+ struct pci_dev *pdev = dev->nic_info.pdev;498498+ int i = 0;499499+500500+ if (mr->pbl_table) {501501+ for (i = 0; i < mr->num_pbls; i++) {502502+ if (!mr->pbl_table[i].va)503503+ continue;504504+ dma_free_coherent(&pdev->dev, mr->pbl_size,505505+ mr->pbl_table[i].va,506506+ mr->pbl_table[i].pa);507507+ }508508+ kfree(mr->pbl_table);509509+ mr->pbl_table = NULL;510510+ }511511+}512512+513513+static int ocrdma_get_pbl_info(struct ocrdma_mr *mr, u32 num_pbes)514514+{515515+ u32 num_pbls = 0;516516+ u32 idx = 0;517517+ int status = 0;518518+ u32 pbl_size;519519+520520+ do {521521+ pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);522522+ if (pbl_size > MAX_OCRDMA_PBL_SIZE) {523523+ status = -EFAULT;524524+ break;525525+ }526526+ num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));527527+ num_pbls = num_pbls / (pbl_size / sizeof(u64));528528+ idx++;529529+ } while (num_pbls >= mr->hwmr.dev->attr.max_num_mr_pbl);530530+531531+ mr->hwmr.num_pbes = num_pbes;532532+ mr->hwmr.num_pbls = num_pbls;533533+ mr->hwmr.pbl_size = pbl_size;534534+ return status;535535+}536536+537537+static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)538538+{539539+ int status = 0;540540+ int i;541541+ u32 dma_len = mr->pbl_size;542542+ struct pci_dev *pdev = dev->nic_info.pdev;543543+ void *va;544544+ dma_addr_t pa;545545+546546+ mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) *547547+ mr->num_pbls, GFP_KERNEL);548548+549549+ if (!mr->pbl_table)550550+ return -ENOMEM;551551+552552+ for (i = 0; i < mr->num_pbls; i++) {553553+ va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);554554+ if (!va) {555555+ ocrdma_free_mr_pbl_tbl(dev, mr);556556+ status = -ENOMEM;557557+ break;558558+ }559559+ memset(va, 0, dma_len);560560+ mr->pbl_table[i].va = va;561561+ mr->pbl_table[i].pa = pa;562562+ }563563+ return status;564564+}565565+566566+static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,567567+ u32 num_pbes)568568+{569569+ struct ocrdma_pbe *pbe;570570+ struct ib_umem_chunk *chunk;571571+ struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;572572+ struct ib_umem *umem = mr->umem;573573+ int i, shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;574574+575575+ if (!mr->hwmr.num_pbes)576576+ return;577577+578578+ pbe = (struct ocrdma_pbe *)pbl_tbl->va;579579+ pbe_cnt = 0;580580+581581+ shift = ilog2(umem->page_size);582582+583583+ list_for_each_entry(chunk, &umem->chunk_list, list) {584584+ /* get all the dma regions from the chunk. */585585+ for (i = 0; i < chunk->nmap; i++) {586586+ pages = sg_dma_len(&chunk->page_list[i]) >> shift;587587+ for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {588588+ /* store the page address in pbe */589589+ pbe->pa_lo =590590+ cpu_to_le32(sg_dma_address591591+ (&chunk->page_list[i]) +592592+ (umem->page_size * pg_cnt));593593+ pbe->pa_hi =594594+ cpu_to_le32(upper_32_bits595595+ ((sg_dma_address596596+ (&chunk->page_list[i]) +597597+ umem->page_size * pg_cnt)));598598+ pbe_cnt += 1;599599+ total_num_pbes += 1;600600+ pbe++;601601+602602+ /* if done building pbes, issue the mbx cmd. */603603+ if (total_num_pbes == num_pbes)604604+ return;605605+606606+ /* if the given pbl is full storing the pbes,607607+ * move to next pbl.608608+ */609609+ if (pbe_cnt ==610610+ (mr->hwmr.pbl_size / sizeof(u64))) {611611+ pbl_tbl++;612612+ pbe = (struct ocrdma_pbe *)pbl_tbl->va;613613+ pbe_cnt = 0;614614+ }615615+ }616616+ }617617+ }618618+}619619+620620+struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,621621+ u64 usr_addr, int acc, struct ib_udata *udata)622622+{623623+ int status = -ENOMEM;624624+ struct ocrdma_dev *dev;625625+ struct ocrdma_mr *mr;626626+ struct ocrdma_pd *pd;627627+ struct pci_dev *pdev;628628+ u32 num_pbes;629629+630630+ pd = get_ocrdma_pd(ibpd);631631+ dev = pd->dev;632632+ pdev = dev->nic_info.pdev;633633+634634+ if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))635635+ return ERR_PTR(-EINVAL);636636+637637+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);638638+ if (!mr)639639+ return ERR_PTR(status);640640+ mr->hwmr.dev = dev;641641+ mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);642642+ if (IS_ERR(mr->umem)) {643643+ status = -EFAULT;644644+ goto umem_err;645645+ }646646+ num_pbes = ib_umem_page_count(mr->umem);647647+ status = ocrdma_get_pbl_info(mr, num_pbes);648648+ if (status)649649+ goto umem_err;650650+651651+ mr->hwmr.pbe_size = mr->umem->page_size;652652+ mr->hwmr.fbo = mr->umem->offset;653653+ mr->hwmr.va = usr_addr;654654+ mr->hwmr.len = len;655655+ mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;656656+ mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;657657+ mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;658658+ mr->hwmr.local_rd = 1;659659+ mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;660660+ status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);661661+ if (status)662662+ goto umem_err;663663+ build_user_pbes(dev, mr, num_pbes);664664+ status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);665665+ if (status)666666+ goto mbx_err;667667+ mr->pd = pd;668668+ atomic_inc(&pd->use_cnt);669669+ mr->ibmr.lkey = mr->hwmr.lkey;670670+ if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)671671+ mr->ibmr.rkey = mr->hwmr.lkey;672672+673673+ return &mr->ibmr;674674+675675+mbx_err:676676+ ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);677677+umem_err:678678+ kfree(mr);679679+ return ERR_PTR(status);680680+}681681+682682+int ocrdma_dereg_mr(struct ib_mr *ib_mr)683683+{684684+ struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);685685+ struct ocrdma_dev *dev = mr->hwmr.dev;686686+ int status;687687+688688+ status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);689689+690690+ if (mr->hwmr.fr_mr == 0)691691+ ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);692692+693693+ atomic_dec(&mr->pd->use_cnt);694694+ /* it could be user registered memory. */695695+ if (mr->umem)696696+ ib_umem_release(mr->umem);697697+ kfree(mr);698698+ return status;699699+}700700+701701+static int ocrdma_copy_cq_uresp(struct ocrdma_cq *cq, struct ib_udata *udata,702702+ struct ib_ucontext *ib_ctx)703703+{704704+ int status;705705+ struct ocrdma_ucontext *uctx;706706+ struct ocrdma_create_cq_uresp uresp;707707+708708+ uresp.cq_id = cq->id;709709+ uresp.page_size = cq->len;710710+ uresp.num_pages = 1;711711+ uresp.max_hw_cqe = cq->max_hw_cqe;712712+ uresp.page_addr[0] = cq->pa;713713+ uresp.db_page_addr = cq->dev->nic_info.unmapped_db;714714+ uresp.db_page_size = cq->dev->nic_info.db_page_size;715715+ uresp.phase_change = cq->phase_change ? 1 : 0;716716+ status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));717717+ if (status) {718718+ ocrdma_err("%s(%d) copy error cqid=0x%x.\n",719719+ __func__, cq->dev->id, cq->id);720720+ goto err;721721+ }722722+ uctx = get_ocrdma_ucontext(ib_ctx);723723+ status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);724724+ if (status)725725+ goto err;726726+ status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);727727+ if (status) {728728+ ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);729729+ goto err;730730+ }731731+ cq->ucontext = uctx;732732+err:733733+ return status;734734+}735735+736736+struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,737737+ struct ib_ucontext *ib_ctx,738738+ struct ib_udata *udata)739739+{740740+ struct ocrdma_cq *cq;741741+ struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);742742+ int status;743743+ struct ocrdma_create_cq_ureq ureq;744744+745745+ if (udata) {746746+ if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))747747+ return ERR_PTR(-EFAULT);748748+ } else749749+ ureq.dpp_cq = 0;750750+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);751751+ if (!cq)752752+ return ERR_PTR(-ENOMEM);753753+754754+ spin_lock_init(&cq->cq_lock);755755+ spin_lock_init(&cq->comp_handler_lock);756756+ atomic_set(&cq->use_cnt, 0);757757+ INIT_LIST_HEAD(&cq->sq_head);758758+ INIT_LIST_HEAD(&cq->rq_head);759759+ cq->dev = dev;760760+761761+ status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq);762762+ if (status) {763763+ kfree(cq);764764+ return ERR_PTR(status);765765+ }766766+ if (ib_ctx) {767767+ status = ocrdma_copy_cq_uresp(cq, udata, ib_ctx);768768+ if (status)769769+ goto ctx_err;770770+ }771771+ cq->phase = OCRDMA_CQE_VALID;772772+ cq->arm_needed = true;773773+ dev->cq_tbl[cq->id] = cq;774774+775775+ return &cq->ibcq;776776+777777+ctx_err:778778+ ocrdma_mbx_destroy_cq(dev, cq);779779+ kfree(cq);780780+ return ERR_PTR(status);781781+}782782+783783+int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,784784+ struct ib_udata *udata)785785+{786786+ int status = 0;787787+ struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);788788+789789+ if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {790790+ status = -EINVAL;791791+ return status;792792+ }793793+ ibcq->cqe = new_cnt;794794+ return status;795795+}796796+797797+int ocrdma_destroy_cq(struct ib_cq *ibcq)798798+{799799+ int status;800800+ struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);801801+ struct ocrdma_dev *dev = cq->dev;802802+803803+ if (atomic_read(&cq->use_cnt))804804+ return -EINVAL;805805+806806+ status = ocrdma_mbx_destroy_cq(dev, cq);807807+808808+ if (cq->ucontext) {809809+ ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, cq->len);810810+ ocrdma_del_mmap(cq->ucontext, dev->nic_info.unmapped_db,811811+ dev->nic_info.db_page_size);812812+ }813813+ dev->cq_tbl[cq->id] = NULL;814814+815815+ kfree(cq);816816+ return status;817817+}818818+819819+static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)820820+{821821+ int status = -EINVAL;822822+823823+ if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {824824+ dev->qp_tbl[qp->id] = qp;825825+ status = 0;826826+ }827827+ return status;828828+}829829+830830+static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)831831+{832832+ dev->qp_tbl[qp->id] = NULL;833833+}834834+835835+static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,836836+ struct ib_qp_init_attr *attrs)837837+{838838+ if (attrs->qp_type != IB_QPT_GSI &&839839+ attrs->qp_type != IB_QPT_RC &&840840+ attrs->qp_type != IB_QPT_UD) {841841+ ocrdma_err("%s(%d) unsupported qp type=0x%x requested\n",842842+ __func__, dev->id, attrs->qp_type);843843+ return -EINVAL;844844+ }845845+ if (attrs->cap.max_send_wr > dev->attr.max_wqe) {846846+ ocrdma_err("%s(%d) unsupported send_wr=0x%x requested\n",847847+ __func__, dev->id, attrs->cap.max_send_wr);848848+ ocrdma_err("%s(%d) supported send_wr=0x%x\n",849849+ __func__, dev->id, dev->attr.max_wqe);850850+ return -EINVAL;851851+ }852852+ if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {853853+ ocrdma_err("%s(%d) unsupported recv_wr=0x%x requested\n",854854+ __func__, dev->id, attrs->cap.max_recv_wr);855855+ ocrdma_err("%s(%d) supported recv_wr=0x%x\n",856856+ __func__, dev->id, dev->attr.max_rqe);857857+ return -EINVAL;858858+ }859859+ if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {860860+ ocrdma_err("%s(%d) unsupported inline data size=0x%x"861861+ " requested\n", __func__, dev->id,862862+ attrs->cap.max_inline_data);863863+ ocrdma_err("%s(%d) supported inline data size=0x%x\n",864864+ __func__, dev->id, dev->attr.max_inline_data);865865+ return -EINVAL;866866+ }867867+ if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {868868+ ocrdma_err("%s(%d) unsupported send_sge=0x%x requested\n",869869+ __func__, dev->id, attrs->cap.max_send_sge);870870+ ocrdma_err("%s(%d) supported send_sge=0x%x\n",871871+ __func__, dev->id, dev->attr.max_send_sge);872872+ return -EINVAL;873873+ }874874+ if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {875875+ ocrdma_err("%s(%d) unsupported recv_sge=0x%x requested\n",876876+ __func__, dev->id, attrs->cap.max_recv_sge);877877+ ocrdma_err("%s(%d) supported recv_sge=0x%x\n",878878+ __func__, dev->id, dev->attr.max_recv_sge);879879+ return -EINVAL;880880+ }881881+ /* unprivileged user space cannot create special QP */882882+ if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {883883+ ocrdma_err884884+ ("%s(%d) Userspace can't create special QPs of type=0x%x\n",885885+ __func__, dev->id, attrs->qp_type);886886+ return -EINVAL;887887+ }888888+ /* allow creating only one GSI type of QP */889889+ if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {890890+ ocrdma_err("%s(%d) GSI special QPs already created.\n",891891+ __func__, dev->id);892892+ return -EINVAL;893893+ }894894+ /* verify consumer QPs are not trying to use GSI QP's CQ */895895+ if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {896896+ if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||897897+ (dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq))) {898898+ ocrdma_err("%s(%d) Consumer QP cannot use GSI CQs.\n",899899+ __func__, dev->id);900900+ return -EINVAL;901901+ }902902+ }903903+ return 0;904904+}905905+906906+static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,907907+ struct ib_udata *udata, int dpp_offset,908908+ int dpp_credit_lmt, int srq)909909+{910910+ int status = 0;911911+ u64 usr_db;912912+ struct ocrdma_create_qp_uresp uresp;913913+ struct ocrdma_dev *dev = qp->dev;914914+ struct ocrdma_pd *pd = qp->pd;915915+916916+ memset(&uresp, 0, sizeof(uresp));917917+ usr_db = dev->nic_info.unmapped_db +918918+ (pd->id * dev->nic_info.db_page_size);919919+ uresp.qp_id = qp->id;920920+ uresp.sq_dbid = qp->sq.dbid;921921+ uresp.num_sq_pages = 1;922922+ uresp.sq_page_size = qp->sq.len;923923+ uresp.sq_page_addr[0] = qp->sq.pa;924924+ uresp.num_wqe_allocated = qp->sq.max_cnt;925925+ if (!srq) {926926+ uresp.rq_dbid = qp->rq.dbid;927927+ uresp.num_rq_pages = 1;928928+ uresp.rq_page_size = qp->rq.len;929929+ uresp.rq_page_addr[0] = qp->rq.pa;930930+ uresp.num_rqe_allocated = qp->rq.max_cnt;931931+ }932932+ uresp.db_page_addr = usr_db;933933+ uresp.db_page_size = dev->nic_info.db_page_size;934934+ if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {935935+ uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;936936+ uresp.db_rq_offset = ((qp->id & 0xFFFF) < 128) ?937937+ OCRDMA_DB_GEN2_RQ1_OFFSET : OCRDMA_DB_GEN2_RQ2_OFFSET;938938+ uresp.db_shift = (qp->id < 128) ? 24 : 16;939939+ } else {940940+ uresp.db_sq_offset = OCRDMA_DB_SQ_OFFSET;941941+ uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;942942+ uresp.db_shift = 16;943943+ }944944+ uresp.free_wqe_delta = qp->sq.free_delta;945945+ uresp.free_rqe_delta = qp->rq.free_delta;946946+947947+ if (qp->dpp_enabled) {948948+ uresp.dpp_credit = dpp_credit_lmt;949949+ uresp.dpp_offset = dpp_offset;950950+ }951951+ status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));952952+ if (status) {953953+ ocrdma_err("%s(%d) user copy error.\n", __func__, dev->id);954954+ goto err;955955+ }956956+ status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],957957+ uresp.sq_page_size);958958+ if (status)959959+ goto err;960960+961961+ if (!srq) {962962+ status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],963963+ uresp.rq_page_size);964964+ if (status)965965+ goto rq_map_err;966966+ }967967+ return status;968968+rq_map_err:969969+ ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);970970+err:971971+ return status;972972+}973973+974974+static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,975975+ struct ocrdma_pd *pd)976976+{977977+ if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {978978+ qp->sq_db = dev->nic_info.db +979979+ (pd->id * dev->nic_info.db_page_size) +980980+ OCRDMA_DB_GEN2_SQ_OFFSET;981981+ qp->rq_db = dev->nic_info.db +982982+ (pd->id * dev->nic_info.db_page_size) +983983+ ((qp->id < 128) ?984984+ OCRDMA_DB_GEN2_RQ1_OFFSET : OCRDMA_DB_GEN2_RQ2_OFFSET);985985+ } else {986986+ qp->sq_db = dev->nic_info.db +987987+ (pd->id * dev->nic_info.db_page_size) +988988+ OCRDMA_DB_SQ_OFFSET;989989+ qp->rq_db = dev->nic_info.db +990990+ (pd->id * dev->nic_info.db_page_size) +991991+ OCRDMA_DB_RQ_OFFSET;992992+ }993993+}994994+995995+static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)996996+{997997+ qp->wqe_wr_id_tbl =998998+ kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt,999999+ GFP_KERNEL);10001000+ if (qp->wqe_wr_id_tbl == NULL)10011001+ return -ENOMEM;10021002+ qp->rqe_wr_id_tbl =10031003+ kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL);10041004+ if (qp->rqe_wr_id_tbl == NULL)10051005+ return -ENOMEM;10061006+10071007+ return 0;10081008+}10091009+10101010+static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,10111011+ struct ocrdma_pd *pd,10121012+ struct ib_qp_init_attr *attrs)10131013+{10141014+ qp->pd = pd;10151015+ spin_lock_init(&qp->q_lock);10161016+ INIT_LIST_HEAD(&qp->sq_entry);10171017+ INIT_LIST_HEAD(&qp->rq_entry);10181018+10191019+ qp->qp_type = attrs->qp_type;10201020+ qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;10211021+ qp->max_inline_data = attrs->cap.max_inline_data;10221022+ qp->sq.max_sges = attrs->cap.max_send_sge;10231023+ qp->rq.max_sges = attrs->cap.max_recv_sge;10241024+ qp->state = OCRDMA_QPS_RST;10251025+}10261026+10271027+static void ocrdma_set_qp_use_cnt(struct ocrdma_qp *qp, struct ocrdma_pd *pd)10281028+{10291029+ atomic_inc(&pd->use_cnt);10301030+ atomic_inc(&qp->sq_cq->use_cnt);10311031+ atomic_inc(&qp->rq_cq->use_cnt);10321032+ if (qp->srq)10331033+ atomic_inc(&qp->srq->use_cnt);10341034+ qp->ibqp.qp_num = qp->id;10351035+}10361036+10371037+static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,10381038+ struct ib_qp_init_attr *attrs)10391039+{10401040+ if (attrs->qp_type == IB_QPT_GSI) {10411041+ dev->gsi_qp_created = 1;10421042+ dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);10431043+ dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);10441044+ }10451045+}10461046+10471047+struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,10481048+ struct ib_qp_init_attr *attrs,10491049+ struct ib_udata *udata)10501050+{10511051+ int status;10521052+ struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);10531053+ struct ocrdma_qp *qp;10541054+ struct ocrdma_dev *dev = pd->dev;10551055+ struct ocrdma_create_qp_ureq ureq;10561056+ u16 dpp_credit_lmt, dpp_offset;10571057+10581058+ status = ocrdma_check_qp_params(ibpd, dev, attrs);10591059+ if (status)10601060+ goto gen_err;10611061+10621062+ memset(&ureq, 0, sizeof(ureq));10631063+ if (udata) {10641064+ if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))10651065+ return ERR_PTR(-EFAULT);10661066+ }10671067+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);10681068+ if (!qp) {10691069+ status = -ENOMEM;10701070+ goto gen_err;10711071+ }10721072+ qp->dev = dev;10731073+ ocrdma_set_qp_init_params(qp, pd, attrs);10741074+10751075+ mutex_lock(&dev->dev_lock);10761076+ status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,10771077+ ureq.dpp_cq_id,10781078+ &dpp_offset, &dpp_credit_lmt);10791079+ if (status)10801080+ goto mbx_err;10811081+10821082+ /* user space QP's wr_id table are managed in library */10831083+ if (udata == NULL) {10841084+ qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |10851085+ OCRDMA_QP_FAST_REG);10861086+ status = ocrdma_alloc_wr_id_tbl(qp);10871087+ if (status)10881088+ goto map_err;10891089+ }10901090+10911091+ status = ocrdma_add_qpn_map(dev, qp);10921092+ if (status)10931093+ goto map_err;10941094+ ocrdma_set_qp_db(dev, qp, pd);10951095+ if (udata) {10961096+ status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,10971097+ dpp_credit_lmt,10981098+ (attrs->srq != NULL));10991099+ if (status)11001100+ goto cpy_err;11011101+ }11021102+ ocrdma_store_gsi_qp_cq(dev, attrs);11031103+ ocrdma_set_qp_use_cnt(qp, pd);11041104+ mutex_unlock(&dev->dev_lock);11051105+ return &qp->ibqp;11061106+11071107+cpy_err:11081108+ ocrdma_del_qpn_map(dev, qp);11091109+map_err:11101110+ ocrdma_mbx_destroy_qp(dev, qp);11111111+mbx_err:11121112+ mutex_unlock(&dev->dev_lock);11131113+ kfree(qp->wqe_wr_id_tbl);11141114+ kfree(qp->rqe_wr_id_tbl);11151115+ kfree(qp);11161116+ ocrdma_err("%s(%d) error=%d\n", __func__, dev->id, status);11171117+gen_err:11181118+ return ERR_PTR(status);11191119+}11201120+11211121+int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,11221122+ int attr_mask)11231123+{11241124+ int status = 0;11251125+ struct ocrdma_qp *qp;11261126+ struct ocrdma_dev *dev;11271127+ enum ib_qp_state old_qps;11281128+11291129+ qp = get_ocrdma_qp(ibqp);11301130+ dev = qp->dev;11311131+ if (attr_mask & IB_QP_STATE)11321132+ status = ocrdma_qp_state_machine(qp, attr->qp_state, &old_qps);11331133+ /* if new and previous states are same hw doesn't need to11341134+ * know about it.11351135+ */11361136+ if (status < 0)11371137+ return status;11381138+ status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask, old_qps);11391139+ return status;11401140+}11411141+11421142+int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,11431143+ int attr_mask, struct ib_udata *udata)11441144+{11451145+ unsigned long flags;11461146+ int status = -EINVAL;11471147+ struct ocrdma_qp *qp;11481148+ struct ocrdma_dev *dev;11491149+ enum ib_qp_state old_qps, new_qps;11501150+11511151+ qp = get_ocrdma_qp(ibqp);11521152+ dev = qp->dev;11531153+11541154+ /* syncronize with multiple context trying to change, retrive qps */11551155+ mutex_lock(&dev->dev_lock);11561156+ /* syncronize with wqe, rqe posting and cqe processing contexts */11571157+ spin_lock_irqsave(&qp->q_lock, flags);11581158+ old_qps = get_ibqp_state(qp->state);11591159+ if (attr_mask & IB_QP_STATE)11601160+ new_qps = attr->qp_state;11611161+ else11621162+ new_qps = old_qps;11631163+ spin_unlock_irqrestore(&qp->q_lock, flags);11641164+11651165+ if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {11661166+ ocrdma_err("%s(%d) invalid attribute mask=0x%x specified for "11671167+ "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",11681168+ __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,11691169+ old_qps, new_qps);11701170+ goto param_err;11711171+ }11721172+11731173+ status = _ocrdma_modify_qp(ibqp, attr, attr_mask);11741174+ if (status > 0)11751175+ status = 0;11761176+param_err:11771177+ mutex_unlock(&dev->dev_lock);11781178+ return status;11791179+}11801180+11811181+static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)11821182+{11831183+ switch (mtu) {11841184+ case 256:11851185+ return IB_MTU_256;11861186+ case 512:11871187+ return IB_MTU_512;11881188+ case 1024:11891189+ return IB_MTU_1024;11901190+ case 2048:11911191+ return IB_MTU_2048;11921192+ case 4096:11931193+ return IB_MTU_4096;11941194+ default:11951195+ return IB_MTU_1024;11961196+ }11971197+}11981198+11991199+static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)12001200+{12011201+ int ib_qp_acc_flags = 0;12021202+12031203+ if (qp_cap_flags & OCRDMA_QP_INB_WR)12041204+ ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;12051205+ if (qp_cap_flags & OCRDMA_QP_INB_RD)12061206+ ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;12071207+ return ib_qp_acc_flags;12081208+}12091209+12101210+int ocrdma_query_qp(struct ib_qp *ibqp,12111211+ struct ib_qp_attr *qp_attr,12121212+ int attr_mask, struct ib_qp_init_attr *qp_init_attr)12131213+{12141214+ int status;12151215+ u32 qp_state;12161216+ struct ocrdma_qp_params params;12171217+ struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);12181218+ struct ocrdma_dev *dev = qp->dev;12191219+12201220+ memset(¶ms, 0, sizeof(params));12211221+ mutex_lock(&dev->dev_lock);12221222+ status = ocrdma_mbx_query_qp(dev, qp, ¶ms);12231223+ mutex_unlock(&dev->dev_lock);12241224+ if (status)12251225+ goto mbx_err;12261226+ qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT);12271227+ qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT);12281228+ qp_attr->path_mtu =12291229+ ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &12301230+ OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>12311231+ OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;12321232+ qp_attr->path_mig_state = IB_MIG_MIGRATED;12331233+ qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;12341234+ qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;12351235+ qp_attr->dest_qp_num =12361236+ params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;12371237+12381238+ qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);12391239+ qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;12401240+ qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;12411241+ qp_attr->cap.max_send_sge = qp->sq.max_sges;12421242+ qp_attr->cap.max_recv_sge = qp->rq.max_sges;12431243+ qp_attr->cap.max_inline_data = dev->attr.max_inline_data;12441244+ qp_init_attr->cap = qp_attr->cap;12451245+ memcpy(&qp_attr->ah_attr.grh.dgid, ¶ms.dgid[0],12461246+ sizeof(params.dgid));12471247+ qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl &12481248+ OCRDMA_QP_PARAMS_FLOW_LABEL_MASK;12491249+ qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;12501250+ qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn &12511251+ OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>12521252+ OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;12531253+ qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &12541254+ OCRDMA_QP_PARAMS_SQ_PSN_MASK) >>12551255+ OCRDMA_QP_PARAMS_TCLASS_SHIFT;12561256+12571257+ qp_attr->ah_attr.ah_flags = IB_AH_GRH;12581258+ qp_attr->ah_attr.port_num = 1;12591259+ qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl &12601260+ OCRDMA_QP_PARAMS_SL_MASK) >>12611261+ OCRDMA_QP_PARAMS_SL_SHIFT;12621262+ qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &12631263+ OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>12641264+ OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;12651265+ qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &12661266+ OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>12671267+ OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;12681268+ qp_attr->retry_cnt =12691269+ (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>12701270+ OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;12711271+ qp_attr->min_rnr_timer = 0;12721272+ qp_attr->pkey_index = 0;12731273+ qp_attr->port_num = 1;12741274+ qp_attr->ah_attr.src_path_bits = 0;12751275+ qp_attr->ah_attr.static_rate = 0;12761276+ qp_attr->alt_pkey_index = 0;12771277+ qp_attr->alt_port_num = 0;12781278+ qp_attr->alt_timeout = 0;12791279+ memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));12801280+ qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>12811281+ OCRDMA_QP_PARAMS_STATE_SHIFT;12821282+ qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;12831283+ qp_attr->max_dest_rd_atomic =12841284+ params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;12851285+ qp_attr->max_rd_atomic =12861286+ params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;12871287+ qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &12881288+ OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;12891289+mbx_err:12901290+ return status;12911291+}12921292+12931293+static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx)12941294+{12951295+ int i = idx / 32;12961296+ unsigned int mask = (1 << (idx % 32));12971297+12981298+ if (srq->idx_bit_fields[i] & mask)12991299+ srq->idx_bit_fields[i] &= ~mask;13001300+ else13011301+ srq->idx_bit_fields[i] |= mask;13021302+}13031303+13041304+static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)13051305+{13061306+ int free_cnt;13071307+ if (q->head >= q->tail)13081308+ free_cnt = (q->max_cnt - q->head) + q->tail;13091309+ else13101310+ free_cnt = q->tail - q->head;13111311+ if (q->free_delta)13121312+ free_cnt -= q->free_delta;13131313+ return free_cnt;13141314+}13151315+13161316+static int is_hw_sq_empty(struct ocrdma_qp *qp)13171317+{13181318+ return (qp->sq.tail == qp->sq.head &&13191319+ ocrdma_hwq_free_cnt(&qp->sq) ? 1 : 0);13201320+}13211321+13221322+static int is_hw_rq_empty(struct ocrdma_qp *qp)13231323+{13241324+ return (qp->rq.tail == qp->rq.head) ? 1 : 0;13251325+}13261326+13271327+static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)13281328+{13291329+ return q->va + (q->head * q->entry_size);13301330+}13311331+13321332+static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,13331333+ u32 idx)13341334+{13351335+ return q->va + (idx * q->entry_size);13361336+}13371337+13381338+static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)13391339+{13401340+ q->head = (q->head + 1) & q->max_wqe_idx;13411341+}13421342+13431343+static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)13441344+{13451345+ q->tail = (q->tail + 1) & q->max_wqe_idx;13461346+}13471347+13481348+/* discard the cqe for a given QP */13491349+static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)13501350+{13511351+ unsigned long cq_flags;13521352+ unsigned long flags;13531353+ int discard_cnt = 0;13541354+ u32 cur_getp, stop_getp;13551355+ struct ocrdma_cqe *cqe;13561356+ u32 qpn = 0;13571357+13581358+ spin_lock_irqsave(&cq->cq_lock, cq_flags);13591359+13601360+ /* traverse through the CQEs in the hw CQ,13611361+ * find the matching CQE for a given qp,13621362+ * mark the matching one discarded by clearing qpn.13631363+ * ring the doorbell in the poll_cq() as13641364+ * we don't complete out of order cqe.13651365+ */13661366+13671367+ cur_getp = cq->getp;13681368+ /* find upto when do we reap the cq. */13691369+ stop_getp = cur_getp;13701370+ do {13711371+ if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))13721372+ break;13731373+13741374+ cqe = cq->va + cur_getp;13751375+ /* if (a) done reaping whole hw cq, or13761376+ * (b) qp_xq becomes empty.13771377+ * then exit13781378+ */13791379+ qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;13801380+ /* if previously discarded cqe found, skip that too. */13811381+ /* check for matching qp */13821382+ if (qpn == 0 || qpn != qp->id)13831383+ goto skip_cqe;13841384+13851385+ /* mark cqe discarded so that it is not picked up later13861386+ * in the poll_cq().13871387+ */13881388+ discard_cnt += 1;13891389+ cqe->cmn.qpn = 0;13901390+ if (is_cqe_for_sq(cqe))13911391+ ocrdma_hwq_inc_tail(&qp->sq);13921392+ else {13931393+ if (qp->srq) {13941394+ spin_lock_irqsave(&qp->srq->q_lock, flags);13951395+ ocrdma_hwq_inc_tail(&qp->srq->rq);13961396+ ocrdma_srq_toggle_bit(qp->srq, cur_getp);13971397+ spin_unlock_irqrestore(&qp->srq->q_lock, flags);13981398+13991399+ } else14001400+ ocrdma_hwq_inc_tail(&qp->rq);14011401+ }14021402+skip_cqe:14031403+ cur_getp = (cur_getp + 1) % cq->max_hw_cqe;14041404+ } while (cur_getp != stop_getp);14051405+ spin_unlock_irqrestore(&cq->cq_lock, cq_flags);14061406+}14071407+14081408+static void ocrdma_del_flush_qp(struct ocrdma_qp *qp)14091409+{14101410+ int found = false;14111411+ unsigned long flags;14121412+ struct ocrdma_dev *dev = qp->dev;14131413+ /* sync with any active CQ poll */14141414+14151415+ spin_lock_irqsave(&dev->flush_q_lock, flags);14161416+ found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);14171417+ if (found)14181418+ list_del(&qp->sq_entry);14191419+ if (!qp->srq) {14201420+ found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);14211421+ if (found)14221422+ list_del(&qp->rq_entry);14231423+ }14241424+ spin_unlock_irqrestore(&dev->flush_q_lock, flags);14251425+}14261426+14271427+int ocrdma_destroy_qp(struct ib_qp *ibqp)14281428+{14291429+ int status;14301430+ struct ocrdma_pd *pd;14311431+ struct ocrdma_qp *qp;14321432+ struct ocrdma_dev *dev;14331433+ struct ib_qp_attr attrs;14341434+ int attr_mask = IB_QP_STATE;14351435+ unsigned long wq_flags = 0, rq_flags = 0;14361436+14371437+ qp = get_ocrdma_qp(ibqp);14381438+ dev = qp->dev;14391439+14401440+ attrs.qp_state = IB_QPS_ERR;14411441+ pd = qp->pd;14421442+14431443+ /* change the QP state to ERROR */14441444+ _ocrdma_modify_qp(ibqp, &attrs, attr_mask);14451445+14461446+ /* ensure that CQEs for newly created QP (whose id may be same with14471447+ * one which just getting destroyed are same), dont get14481448+ * discarded until the old CQEs are discarded.14491449+ */14501450+ mutex_lock(&dev->dev_lock);14511451+ status = ocrdma_mbx_destroy_qp(dev, qp);14521452+14531453+ /*14541454+ * acquire CQ lock while destroy is in progress, in order to14551455+ * protect against proessing in-flight CQEs for this QP.14561456+ */14571457+ spin_lock_irqsave(&qp->sq_cq->cq_lock, wq_flags);14581458+ if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))14591459+ spin_lock_irqsave(&qp->rq_cq->cq_lock, rq_flags);14601460+14611461+ ocrdma_del_qpn_map(dev, qp);14621462+14631463+ if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))14641464+ spin_unlock_irqrestore(&qp->rq_cq->cq_lock, rq_flags);14651465+ spin_unlock_irqrestore(&qp->sq_cq->cq_lock, wq_flags);14661466+14671467+ if (!pd->uctx) {14681468+ ocrdma_discard_cqes(qp, qp->sq_cq);14691469+ ocrdma_discard_cqes(qp, qp->rq_cq);14701470+ }14711471+ mutex_unlock(&dev->dev_lock);14721472+14731473+ if (pd->uctx) {14741474+ ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa, qp->sq.len);14751475+ if (!qp->srq)14761476+ ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa, qp->rq.len);14771477+ }14781478+14791479+ ocrdma_del_flush_qp(qp);14801480+14811481+ atomic_dec(&qp->pd->use_cnt);14821482+ atomic_dec(&qp->sq_cq->use_cnt);14831483+ atomic_dec(&qp->rq_cq->use_cnt);14841484+ if (qp->srq)14851485+ atomic_dec(&qp->srq->use_cnt);14861486+ kfree(qp->wqe_wr_id_tbl);14871487+ kfree(qp->rqe_wr_id_tbl);14881488+ kfree(qp);14891489+ return status;14901490+}14911491+14921492+static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata)14931493+{14941494+ int status;14951495+ struct ocrdma_create_srq_uresp uresp;14961496+14971497+ uresp.rq_dbid = srq->rq.dbid;14981498+ uresp.num_rq_pages = 1;14991499+ uresp.rq_page_addr[0] = srq->rq.pa;15001500+ uresp.rq_page_size = srq->rq.len;15011501+ uresp.db_page_addr = srq->dev->nic_info.unmapped_db +15021502+ (srq->pd->id * srq->dev->nic_info.db_page_size);15031503+ uresp.db_page_size = srq->dev->nic_info.db_page_size;15041504+ uresp.num_rqe_allocated = srq->rq.max_cnt;15051505+ uresp.free_rqe_delta = 1;15061506+ if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {15071507+ uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET;15081508+ uresp.db_shift = 24;15091509+ } else {15101510+ uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;15111511+ uresp.db_shift = 16;15121512+ }15131513+15141514+ status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));15151515+ if (status)15161516+ return status;15171517+ status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],15181518+ uresp.rq_page_size);15191519+ if (status)15201520+ return status;15211521+ return status;15221522+}15231523+15241524+struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,15251525+ struct ib_srq_init_attr *init_attr,15261526+ struct ib_udata *udata)15271527+{15281528+ int status = -ENOMEM;15291529+ struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);15301530+ struct ocrdma_dev *dev = pd->dev;15311531+ struct ocrdma_srq *srq;15321532+15331533+ if (init_attr->attr.max_sge > dev->attr.max_recv_sge)15341534+ return ERR_PTR(-EINVAL);15351535+ if (init_attr->attr.max_wr > dev->attr.max_rqe)15361536+ return ERR_PTR(-EINVAL);15371537+15381538+ srq = kzalloc(sizeof(*srq), GFP_KERNEL);15391539+ if (!srq)15401540+ return ERR_PTR(status);15411541+15421542+ spin_lock_init(&srq->q_lock);15431543+ srq->dev = dev;15441544+ srq->pd = pd;15451545+ srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);15461546+ status = ocrdma_mbx_create_srq(srq, init_attr, pd);15471547+ if (status)15481548+ goto err;15491549+15501550+ if (udata == NULL) {15511551+ srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,15521552+ GFP_KERNEL);15531553+ if (srq->rqe_wr_id_tbl == NULL)15541554+ goto arm_err;15551555+15561556+ srq->bit_fields_len = (srq->rq.max_cnt / 32) +15571557+ (srq->rq.max_cnt % 32 ? 1 : 0);15581558+ srq->idx_bit_fields =15591559+ kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL);15601560+ if (srq->idx_bit_fields == NULL)15611561+ goto arm_err;15621562+ memset(srq->idx_bit_fields, 0xff,15631563+ srq->bit_fields_len * sizeof(u32));15641564+ }15651565+15661566+ if (init_attr->attr.srq_limit) {15671567+ status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);15681568+ if (status)15691569+ goto arm_err;15701570+ }15711571+15721572+ atomic_set(&srq->use_cnt, 0);15731573+ if (udata) {15741574+ status = ocrdma_copy_srq_uresp(srq, udata);15751575+ if (status)15761576+ goto arm_err;15771577+ }15781578+15791579+ atomic_inc(&pd->use_cnt);15801580+ return &srq->ibsrq;15811581+15821582+arm_err:15831583+ ocrdma_mbx_destroy_srq(dev, srq);15841584+err:15851585+ kfree(srq->rqe_wr_id_tbl);15861586+ kfree(srq->idx_bit_fields);15871587+ kfree(srq);15881588+ return ERR_PTR(status);15891589+}15901590+15911591+int ocrdma_modify_srq(struct ib_srq *ibsrq,15921592+ struct ib_srq_attr *srq_attr,15931593+ enum ib_srq_attr_mask srq_attr_mask,15941594+ struct ib_udata *udata)15951595+{15961596+ int status = 0;15971597+ struct ocrdma_srq *srq;15981598+ struct ocrdma_dev *dev;15991599+16001600+ srq = get_ocrdma_srq(ibsrq);16011601+ dev = srq->dev;16021602+ if (srq_attr_mask & IB_SRQ_MAX_WR)16031603+ status = -EINVAL;16041604+ else16051605+ status = ocrdma_mbx_modify_srq(srq, srq_attr);16061606+ return status;16071607+}16081608+16091609+int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)16101610+{16111611+ int status;16121612+ struct ocrdma_srq *srq;16131613+ struct ocrdma_dev *dev;16141614+16151615+ srq = get_ocrdma_srq(ibsrq);16161616+ dev = srq->dev;16171617+ status = ocrdma_mbx_query_srq(srq, srq_attr);16181618+ return status;16191619+}16201620+16211621+int ocrdma_destroy_srq(struct ib_srq *ibsrq)16221622+{16231623+ int status;16241624+ struct ocrdma_srq *srq;16251625+ struct ocrdma_dev *dev;16261626+16271627+ srq = get_ocrdma_srq(ibsrq);16281628+ dev = srq->dev;16291629+ if (atomic_read(&srq->use_cnt)) {16301630+ ocrdma_err("%s(%d) err, srq=0x%x in use\n",16311631+ __func__, dev->id, srq->id);16321632+ return -EAGAIN;16331633+ }16341634+16351635+ status = ocrdma_mbx_destroy_srq(dev, srq);16361636+16371637+ if (srq->pd->uctx)16381638+ ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa, srq->rq.len);16391639+16401640+ atomic_dec(&srq->pd->use_cnt);16411641+ kfree(srq->idx_bit_fields);16421642+ kfree(srq->rqe_wr_id_tbl);16431643+ kfree(srq);16441644+ return status;16451645+}16461646+16471647+/* unprivileged verbs and their support functions. */16481648+static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,16491649+ struct ocrdma_hdr_wqe *hdr,16501650+ struct ib_send_wr *wr)16511651+{16521652+ struct ocrdma_ewqe_ud_hdr *ud_hdr =16531653+ (struct ocrdma_ewqe_ud_hdr *)(hdr + 1);16541654+ struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah);16551655+16561656+ ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn;16571657+ if (qp->qp_type == IB_QPT_GSI)16581658+ ud_hdr->qkey = qp->qkey;16591659+ else16601660+ ud_hdr->qkey = wr->wr.ud.remote_qkey;16611661+ ud_hdr->rsvd_ahid = ah->id;16621662+}16631663+16641664+static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,16651665+ struct ocrdma_sge *sge, int num_sge,16661666+ struct ib_sge *sg_list)16671667+{16681668+ int i;16691669+16701670+ for (i = 0; i < num_sge; i++) {16711671+ sge[i].lrkey = sg_list[i].lkey;16721672+ sge[i].addr_lo = sg_list[i].addr;16731673+ sge[i].addr_hi = upper_32_bits(sg_list[i].addr);16741674+ sge[i].len = sg_list[i].length;16751675+ hdr->total_len += sg_list[i].length;16761676+ }16771677+ if (num_sge == 0)16781678+ memset(sge, 0, sizeof(*sge));16791679+}16801680+16811681+static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,16821682+ struct ocrdma_hdr_wqe *hdr,16831683+ struct ocrdma_sge *sge,16841684+ struct ib_send_wr *wr, u32 wqe_size)16851685+{16861686+ if (wr->send_flags & IB_SEND_INLINE) {16871687+ if (wr->sg_list[0].length > qp->max_inline_data) {16881688+ ocrdma_err("%s() supported_len=0x%x,"16891689+ " unspported len req=0x%x\n", __func__,16901690+ qp->max_inline_data, wr->sg_list[0].length);16911691+ return -EINVAL;16921692+ }16931693+ memcpy(sge,16941694+ (void *)(unsigned long)wr->sg_list[0].addr,16951695+ wr->sg_list[0].length);16961696+ hdr->total_len = wr->sg_list[0].length;16971697+ wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);16981698+ hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);16991699+ } else {17001700+ ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);17011701+ if (wr->num_sge)17021702+ wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));17031703+ else17041704+ wqe_size += sizeof(struct ocrdma_sge);17051705+ hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);17061706+ }17071707+ hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);17081708+ return 0;17091709+}17101710+17111711+static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,17121712+ struct ib_send_wr *wr)17131713+{17141714+ int status;17151715+ struct ocrdma_sge *sge;17161716+ u32 wqe_size = sizeof(*hdr);17171717+17181718+ if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {17191719+ ocrdma_build_ud_hdr(qp, hdr, wr);17201720+ sge = (struct ocrdma_sge *)(hdr + 2);17211721+ wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);17221722+ } else17231723+ sge = (struct ocrdma_sge *)(hdr + 1);17241724+17251725+ status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);17261726+ return status;17271727+}17281728+17291729+static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,17301730+ struct ib_send_wr *wr)17311731+{17321732+ int status;17331733+ struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);17341734+ struct ocrdma_sge *sge = ext_rw + 1;17351735+ u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);17361736+17371737+ status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);17381738+ if (status)17391739+ return status;17401740+ ext_rw->addr_lo = wr->wr.rdma.remote_addr;17411741+ ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);17421742+ ext_rw->lrkey = wr->wr.rdma.rkey;17431743+ ext_rw->len = hdr->total_len;17441744+ return 0;17451745+}17461746+17471747+static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,17481748+ struct ib_send_wr *wr)17491749+{17501750+ struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);17511751+ struct ocrdma_sge *sge = ext_rw + 1;17521752+ u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +17531753+ sizeof(struct ocrdma_hdr_wqe);17541754+17551755+ ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);17561756+ hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);17571757+ hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);17581758+ hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);17591759+17601760+ ext_rw->addr_lo = wr->wr.rdma.remote_addr;17611761+ ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);17621762+ ext_rw->lrkey = wr->wr.rdma.rkey;17631763+ ext_rw->len = hdr->total_len;17641764+}17651765+17661766+static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)17671767+{17681768+ u32 val = qp->sq.dbid | (1 << 16);17691769+17701770+ iowrite32(val, qp->sq_db);17711771+}17721772+17731773+int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,17741774+ struct ib_send_wr **bad_wr)17751775+{17761776+ int status = 0;17771777+ struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);17781778+ struct ocrdma_hdr_wqe *hdr;17791779+ unsigned long flags;17801780+17811781+ spin_lock_irqsave(&qp->q_lock, flags);17821782+ if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {17831783+ spin_unlock_irqrestore(&qp->q_lock, flags);17841784+ return -EINVAL;17851785+ }17861786+17871787+ while (wr) {17881788+ if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||17891789+ wr->num_sge > qp->sq.max_sges) {17901790+ status = -ENOMEM;17911791+ break;17921792+ }17931793+ hdr = ocrdma_hwq_head(&qp->sq);17941794+ hdr->cw = 0;17951795+ if (wr->send_flags & IB_SEND_SIGNALED)17961796+ hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);17971797+ if (wr->send_flags & IB_SEND_FENCE)17981798+ hdr->cw |=17991799+ (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);18001800+ if (wr->send_flags & IB_SEND_SOLICITED)18011801+ hdr->cw |=18021802+ (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);18031803+ hdr->total_len = 0;18041804+ switch (wr->opcode) {18051805+ case IB_WR_SEND_WITH_IMM:18061806+ hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);18071807+ hdr->immdt = ntohl(wr->ex.imm_data);18081808+ case IB_WR_SEND:18091809+ hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);18101810+ ocrdma_build_send(qp, hdr, wr);18111811+ break;18121812+ case IB_WR_SEND_WITH_INV:18131813+ hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);18141814+ hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);18151815+ hdr->lkey = wr->ex.invalidate_rkey;18161816+ status = ocrdma_build_send(qp, hdr, wr);18171817+ break;18181818+ case IB_WR_RDMA_WRITE_WITH_IMM:18191819+ hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);18201820+ hdr->immdt = ntohl(wr->ex.imm_data);18211821+ case IB_WR_RDMA_WRITE:18221822+ hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);18231823+ status = ocrdma_build_write(qp, hdr, wr);18241824+ break;18251825+ case IB_WR_RDMA_READ_WITH_INV:18261826+ hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);18271827+ case IB_WR_RDMA_READ:18281828+ ocrdma_build_read(qp, hdr, wr);18291829+ break;18301830+ case IB_WR_LOCAL_INV:18311831+ hdr->cw |=18321832+ (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);18331833+ hdr->cw |= (sizeof(struct ocrdma_hdr_wqe) /18341834+ OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;18351835+ hdr->lkey = wr->ex.invalidate_rkey;18361836+ break;18371837+ default:18381838+ status = -EINVAL;18391839+ break;18401840+ }18411841+ if (status) {18421842+ *bad_wr = wr;18431843+ break;18441844+ }18451845+ if (wr->send_flags & IB_SEND_SIGNALED)18461846+ qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;18471847+ else18481848+ qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;18491849+ qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;18501850+ ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &18511851+ OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);18521852+ /* make sure wqe is written before adapter can access it */18531853+ wmb();18541854+ /* inform hw to start processing it */18551855+ ocrdma_ring_sq_db(qp);18561856+18571857+ /* update pointer, counter for next wr */18581858+ ocrdma_hwq_inc_head(&qp->sq);18591859+ wr = wr->next;18601860+ }18611861+ spin_unlock_irqrestore(&qp->q_lock, flags);18621862+ return status;18631863+}18641864+18651865+static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)18661866+{18671867+ u32 val = qp->rq.dbid | (1 << OCRDMA_GET_NUM_POSTED_SHIFT_VAL(qp));18681868+18691869+ iowrite32(val, qp->rq_db);18701870+}18711871+18721872+static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,18731873+ u16 tag)18741874+{18751875+ u32 wqe_size = 0;18761876+ struct ocrdma_sge *sge;18771877+ if (wr->num_sge)18781878+ wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);18791879+ else18801880+ wqe_size = sizeof(*sge) + sizeof(*rqe);18811881+18821882+ rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<18831883+ OCRDMA_WQE_SIZE_SHIFT);18841884+ rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);18851885+ rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);18861886+ rqe->total_len = 0;18871887+ rqe->rsvd_tag = tag;18881888+ sge = (struct ocrdma_sge *)(rqe + 1);18891889+ ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);18901890+ ocrdma_cpu_to_le32(rqe, wqe_size);18911891+}18921892+18931893+int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,18941894+ struct ib_recv_wr **bad_wr)18951895+{18961896+ int status = 0;18971897+ unsigned long flags;18981898+ struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);18991899+ struct ocrdma_hdr_wqe *rqe;19001900+19011901+ spin_lock_irqsave(&qp->q_lock, flags);19021902+ if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {19031903+ spin_unlock_irqrestore(&qp->q_lock, flags);19041904+ *bad_wr = wr;19051905+ return -EINVAL;19061906+ }19071907+ while (wr) {19081908+ if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||19091909+ wr->num_sge > qp->rq.max_sges) {19101910+ *bad_wr = wr;19111911+ status = -ENOMEM;19121912+ break;19131913+ }19141914+ rqe = ocrdma_hwq_head(&qp->rq);19151915+ ocrdma_build_rqe(rqe, wr, 0);19161916+19171917+ qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;19181918+ /* make sure rqe is written before adapter can access it */19191919+ wmb();19201920+19211921+ /* inform hw to start processing it */19221922+ ocrdma_ring_rq_db(qp);19231923+19241924+ /* update pointer, counter for next wr */19251925+ ocrdma_hwq_inc_head(&qp->rq);19261926+ wr = wr->next;19271927+ }19281928+ spin_unlock_irqrestore(&qp->q_lock, flags);19291929+ return status;19301930+}19311931+19321932+/* cqe for srq's rqe can potentially arrive out of order.19331933+ * index gives the entry in the shadow table where to store19341934+ * the wr_id. tag/index is returned in cqe to reference back19351935+ * for a given rqe.19361936+ */19371937+static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)19381938+{19391939+ int row = 0;19401940+ int indx = 0;19411941+19421942+ for (row = 0; row < srq->bit_fields_len; row++) {19431943+ if (srq->idx_bit_fields[row]) {19441944+ indx = ffs(srq->idx_bit_fields[row]);19451945+ indx = (row * 32) + (indx - 1);19461946+ if (indx >= srq->rq.max_cnt)19471947+ BUG();19481948+ ocrdma_srq_toggle_bit(srq, indx);19491949+ break;19501950+ }19511951+ }19521952+19531953+ if (row == srq->bit_fields_len)19541954+ BUG();19551955+ return indx;19561956+}19571957+19581958+static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)19591959+{19601960+ u32 val = srq->rq.dbid | (1 << 16);19611961+19621962+ iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);19631963+}19641964+19651965+int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,19661966+ struct ib_recv_wr **bad_wr)19671967+{19681968+ int status = 0;19691969+ unsigned long flags;19701970+ struct ocrdma_srq *srq;19711971+ struct ocrdma_hdr_wqe *rqe;19721972+ u16 tag;19731973+19741974+ srq = get_ocrdma_srq(ibsrq);19751975+19761976+ spin_lock_irqsave(&srq->q_lock, flags);19771977+ while (wr) {19781978+ if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||19791979+ wr->num_sge > srq->rq.max_sges) {19801980+ status = -ENOMEM;19811981+ *bad_wr = wr;19821982+ break;19831983+ }19841984+ tag = ocrdma_srq_get_idx(srq);19851985+ rqe = ocrdma_hwq_head(&srq->rq);19861986+ ocrdma_build_rqe(rqe, wr, tag);19871987+19881988+ srq->rqe_wr_id_tbl[tag] = wr->wr_id;19891989+ /* make sure rqe is written before adapter can perform DMA */19901990+ wmb();19911991+ /* inform hw to start processing it */19921992+ ocrdma_ring_srq_db(srq);19931993+ /* update pointer, counter for next wr */19941994+ ocrdma_hwq_inc_head(&srq->rq);19951995+ wr = wr->next;19961996+ }19971997+ spin_unlock_irqrestore(&srq->q_lock, flags);19981998+ return status;19991999+}20002000+20012001+static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)20022002+{20032003+ enum ib_wc_status ibwc_status = IB_WC_GENERAL_ERR;20042004+20052005+ switch (status) {20062006+ case OCRDMA_CQE_GENERAL_ERR:20072007+ ibwc_status = IB_WC_GENERAL_ERR;20082008+ break;20092009+ case OCRDMA_CQE_LOC_LEN_ERR:20102010+ ibwc_status = IB_WC_LOC_LEN_ERR;20112011+ break;20122012+ case OCRDMA_CQE_LOC_QP_OP_ERR:20132013+ ibwc_status = IB_WC_LOC_QP_OP_ERR;20142014+ break;20152015+ case OCRDMA_CQE_LOC_EEC_OP_ERR:20162016+ ibwc_status = IB_WC_LOC_EEC_OP_ERR;20172017+ break;20182018+ case OCRDMA_CQE_LOC_PROT_ERR:20192019+ ibwc_status = IB_WC_LOC_PROT_ERR;20202020+ break;20212021+ case OCRDMA_CQE_WR_FLUSH_ERR:20222022+ ibwc_status = IB_WC_WR_FLUSH_ERR;20232023+ break;20242024+ case OCRDMA_CQE_MW_BIND_ERR:20252025+ ibwc_status = IB_WC_MW_BIND_ERR;20262026+ break;20272027+ case OCRDMA_CQE_BAD_RESP_ERR:20282028+ ibwc_status = IB_WC_BAD_RESP_ERR;20292029+ break;20302030+ case OCRDMA_CQE_LOC_ACCESS_ERR:20312031+ ibwc_status = IB_WC_LOC_ACCESS_ERR;20322032+ break;20332033+ case OCRDMA_CQE_REM_INV_REQ_ERR:20342034+ ibwc_status = IB_WC_REM_INV_REQ_ERR;20352035+ break;20362036+ case OCRDMA_CQE_REM_ACCESS_ERR:20372037+ ibwc_status = IB_WC_REM_ACCESS_ERR;20382038+ break;20392039+ case OCRDMA_CQE_REM_OP_ERR:20402040+ ibwc_status = IB_WC_REM_OP_ERR;20412041+ break;20422042+ case OCRDMA_CQE_RETRY_EXC_ERR:20432043+ ibwc_status = IB_WC_RETRY_EXC_ERR;20442044+ break;20452045+ case OCRDMA_CQE_RNR_RETRY_EXC_ERR:20462046+ ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;20472047+ break;20482048+ case OCRDMA_CQE_LOC_RDD_VIOL_ERR:20492049+ ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;20502050+ break;20512051+ case OCRDMA_CQE_REM_INV_RD_REQ_ERR:20522052+ ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;20532053+ break;20542054+ case OCRDMA_CQE_REM_ABORT_ERR:20552055+ ibwc_status = IB_WC_REM_ABORT_ERR;20562056+ break;20572057+ case OCRDMA_CQE_INV_EECN_ERR:20582058+ ibwc_status = IB_WC_INV_EECN_ERR;20592059+ break;20602060+ case OCRDMA_CQE_INV_EEC_STATE_ERR:20612061+ ibwc_status = IB_WC_INV_EEC_STATE_ERR;20622062+ break;20632063+ case OCRDMA_CQE_FATAL_ERR:20642064+ ibwc_status = IB_WC_FATAL_ERR;20652065+ break;20662066+ case OCRDMA_CQE_RESP_TIMEOUT_ERR:20672067+ ibwc_status = IB_WC_RESP_TIMEOUT_ERR;20682068+ break;20692069+ default:20702070+ ibwc_status = IB_WC_GENERAL_ERR;20712071+ break;20722072+ };20732073+ return ibwc_status;20742074+}20752075+20762076+static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,20772077+ u32 wqe_idx)20782078+{20792079+ struct ocrdma_hdr_wqe *hdr;20802080+ struct ocrdma_sge *rw;20812081+ int opcode;20822082+20832083+ hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);20842084+20852085+ ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;20862086+ /* Undo the hdr->cw swap */20872087+ opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;20882088+ switch (opcode) {20892089+ case OCRDMA_WRITE:20902090+ ibwc->opcode = IB_WC_RDMA_WRITE;20912091+ break;20922092+ case OCRDMA_READ:20932093+ rw = (struct ocrdma_sge *)(hdr + 1);20942094+ ibwc->opcode = IB_WC_RDMA_READ;20952095+ ibwc->byte_len = rw->len;20962096+ break;20972097+ case OCRDMA_SEND:20982098+ ibwc->opcode = IB_WC_SEND;20992099+ break;21002100+ case OCRDMA_LKEY_INV:21012101+ ibwc->opcode = IB_WC_LOCAL_INV;21022102+ break;21032103+ default:21042104+ ibwc->status = IB_WC_GENERAL_ERR;21052105+ ocrdma_err("%s() invalid opcode received = 0x%x\n",21062106+ __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);21072107+ break;21082108+ };21092109+}21102110+21112111+static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,21122112+ struct ocrdma_cqe *cqe)21132113+{21142114+ if (is_cqe_for_sq(cqe)) {21152115+ cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(21162116+ cqe->flags_status_srcqpn) &21172117+ ~OCRDMA_CQE_STATUS_MASK);21182118+ cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(21192119+ cqe->flags_status_srcqpn) |21202120+ (OCRDMA_CQE_WR_FLUSH_ERR <<21212121+ OCRDMA_CQE_STATUS_SHIFT));21222122+ } else {21232123+ if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {21242124+ cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(21252125+ cqe->flags_status_srcqpn) &21262126+ ~OCRDMA_CQE_UD_STATUS_MASK);21272127+ cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(21282128+ cqe->flags_status_srcqpn) |21292129+ (OCRDMA_CQE_WR_FLUSH_ERR <<21302130+ OCRDMA_CQE_UD_STATUS_SHIFT));21312131+ } else {21322132+ cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(21332133+ cqe->flags_status_srcqpn) &21342134+ ~OCRDMA_CQE_STATUS_MASK);21352135+ cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(21362136+ cqe->flags_status_srcqpn) |21372137+ (OCRDMA_CQE_WR_FLUSH_ERR <<21382138+ OCRDMA_CQE_STATUS_SHIFT));21392139+ }21402140+ }21412141+}21422142+21432143+static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,21442144+ struct ocrdma_qp *qp, int status)21452145+{21462146+ bool expand = false;21472147+21482148+ ibwc->byte_len = 0;21492149+ ibwc->qp = &qp->ibqp;21502150+ ibwc->status = ocrdma_to_ibwc_err(status);21512151+21522152+ ocrdma_flush_qp(qp);21532153+ ocrdma_qp_state_machine(qp, IB_QPS_ERR, NULL);21542154+21552155+ /* if wqe/rqe pending for which cqe needs to be returned,21562156+ * trigger inflating it.21572157+ */21582158+ if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {21592159+ expand = true;21602160+ ocrdma_set_cqe_status_flushed(qp, cqe);21612161+ }21622162+ return expand;21632163+}21642164+21652165+static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,21662166+ struct ocrdma_qp *qp, int status)21672167+{21682168+ ibwc->opcode = IB_WC_RECV;21692169+ ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];21702170+ ocrdma_hwq_inc_tail(&qp->rq);21712171+21722172+ return ocrdma_update_err_cqe(ibwc, cqe, qp, status);21732173+}21742174+21752175+static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,21762176+ struct ocrdma_qp *qp, int status)21772177+{21782178+ ocrdma_update_wc(qp, ibwc, qp->sq.tail);21792179+ ocrdma_hwq_inc_tail(&qp->sq);21802180+21812181+ return ocrdma_update_err_cqe(ibwc, cqe, qp, status);21822182+}21832183+21842184+21852185+static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,21862186+ struct ocrdma_cqe *cqe, struct ib_wc *ibwc,21872187+ bool *polled, bool *stop)21882188+{21892189+ bool expand;21902190+ int status = (le32_to_cpu(cqe->flags_status_srcqpn) &21912191+ OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;21922192+21932193+ /* when hw sq is empty, but rq is not empty, so we continue21942194+ * to keep the cqe in order to get the cq event again.21952195+ */21962196+ if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {21972197+ /* when cq for rq and sq is same, it is safe to return21982198+ * flush cqe for RQEs.21992199+ */22002200+ if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {22012201+ *polled = true;22022202+ status = OCRDMA_CQE_WR_FLUSH_ERR;22032203+ expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);22042204+ } else {22052205+ /* stop processing further cqe as this cqe is used for22062206+ * triggering cq event on buddy cq of RQ.22072207+ * When QP is destroyed, this cqe will be removed22082208+ * from the cq's hardware q.22092209+ */22102210+ *polled = false;22112211+ *stop = true;22122212+ expand = false;22132213+ }22142214+ } else {22152215+ *polled = true;22162216+ expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);22172217+ }22182218+ return expand;22192219+}22202220+22212221+static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,22222222+ struct ocrdma_cqe *cqe,22232223+ struct ib_wc *ibwc, bool *polled)22242224+{22252225+ bool expand = false;22262226+ int tail = qp->sq.tail;22272227+ u32 wqe_idx;22282228+22292229+ if (!qp->wqe_wr_id_tbl[tail].signaled) {22302230+ expand = true; /* CQE cannot be consumed yet */22312231+ *polled = false; /* WC cannot be consumed yet */22322232+ } else {22332233+ ibwc->status = IB_WC_SUCCESS;22342234+ ibwc->wc_flags = 0;22352235+ ibwc->qp = &qp->ibqp;22362236+ ocrdma_update_wc(qp, ibwc, tail);22372237+ *polled = true;22382238+ wqe_idx = le32_to_cpu(cqe->wq.wqeidx) & OCRDMA_CQE_WQEIDX_MASK;22392239+ if (tail != wqe_idx)22402240+ expand = true; /* Coalesced CQE can't be consumed yet */22412241+ }22422242+ ocrdma_hwq_inc_tail(&qp->sq);22432243+ return expand;22442244+}22452245+22462246+static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,22472247+ struct ib_wc *ibwc, bool *polled, bool *stop)22482248+{22492249+ int status;22502250+ bool expand;22512251+22522252+ status = (le32_to_cpu(cqe->flags_status_srcqpn) &22532253+ OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;22542254+22552255+ if (status == OCRDMA_CQE_SUCCESS)22562256+ expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);22572257+ else22582258+ expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);22592259+ return expand;22602260+}22612261+22622262+static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)22632263+{22642264+ int status;22652265+22662266+ status = (le32_to_cpu(cqe->flags_status_srcqpn) &22672267+ OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;22682268+ ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &22692269+ OCRDMA_CQE_SRCQP_MASK;22702270+ ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) &22712271+ OCRDMA_CQE_PKEY_MASK;22722272+ ibwc->wc_flags = IB_WC_GRH;22732273+ ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>22742274+ OCRDMA_CQE_UD_XFER_LEN_SHIFT);22752275+ return status;22762276+}22772277+22782278+static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,22792279+ struct ocrdma_cqe *cqe,22802280+ struct ocrdma_qp *qp)22812281+{22822282+ unsigned long flags;22832283+ struct ocrdma_srq *srq;22842284+ u32 wqe_idx;22852285+22862286+ srq = get_ocrdma_srq(qp->ibqp.srq);22872287+ wqe_idx = le32_to_cpu(cqe->rq.buftag_qpn) >> OCRDMA_CQE_BUFTAG_SHIFT;22882288+ ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];22892289+ spin_lock_irqsave(&srq->q_lock, flags);22902290+ ocrdma_srq_toggle_bit(srq, wqe_idx);22912291+ spin_unlock_irqrestore(&srq->q_lock, flags);22922292+ ocrdma_hwq_inc_tail(&srq->rq);22932293+}22942294+22952295+static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,22962296+ struct ib_wc *ibwc, bool *polled, bool *stop,22972297+ int status)22982298+{22992299+ bool expand;23002300+23012301+ /* when hw_rq is empty, but wq is not empty, so continue23022302+ * to keep the cqe to get the cq event again.23032303+ */23042304+ if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {23052305+ if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {23062306+ *polled = true;23072307+ status = OCRDMA_CQE_WR_FLUSH_ERR;23082308+ expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);23092309+ } else {23102310+ *polled = false;23112311+ *stop = true;23122312+ expand = false;23132313+ }23142314+ } else23152315+ expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);23162316+ return expand;23172317+}23182318+23192319+static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,23202320+ struct ocrdma_cqe *cqe, struct ib_wc *ibwc)23212321+{23222322+ ibwc->opcode = IB_WC_RECV;23232323+ ibwc->qp = &qp->ibqp;23242324+ ibwc->status = IB_WC_SUCCESS;23252325+23262326+ if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)23272327+ ocrdma_update_ud_rcqe(ibwc, cqe);23282328+ else23292329+ ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);23302330+23312331+ if (is_cqe_imm(cqe)) {23322332+ ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));23332333+ ibwc->wc_flags |= IB_WC_WITH_IMM;23342334+ } else if (is_cqe_wr_imm(cqe)) {23352335+ ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;23362336+ ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));23372337+ ibwc->wc_flags |= IB_WC_WITH_IMM;23382338+ } else if (is_cqe_invalidated(cqe)) {23392339+ ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);23402340+ ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;23412341+ }23422342+ if (qp->ibqp.srq)23432343+ ocrdma_update_free_srq_cqe(ibwc, cqe, qp);23442344+ else {23452345+ ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];23462346+ ocrdma_hwq_inc_tail(&qp->rq);23472347+ }23482348+}23492349+23502350+static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,23512351+ struct ib_wc *ibwc, bool *polled, bool *stop)23522352+{23532353+ int status;23542354+ bool expand = false;23552355+23562356+ ibwc->wc_flags = 0;23572357+ if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)23582358+ status = (le32_to_cpu(cqe->flags_status_srcqpn) &23592359+ OCRDMA_CQE_UD_STATUS_MASK) >>23602360+ OCRDMA_CQE_UD_STATUS_SHIFT;23612361+ else23622362+ status = (le32_to_cpu(cqe->flags_status_srcqpn) &23632363+ OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;23642364+23652365+ if (status == OCRDMA_CQE_SUCCESS) {23662366+ *polled = true;23672367+ ocrdma_poll_success_rcqe(qp, cqe, ibwc);23682368+ } else {23692369+ expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,23702370+ status);23712371+ }23722372+ return expand;23732373+}23742374+23752375+static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,23762376+ u16 cur_getp)23772377+{23782378+ if (cq->phase_change) {23792379+ if (cur_getp == 0)23802380+ cq->phase = (~cq->phase & OCRDMA_CQE_VALID);23812381+ } else23822382+ /* clear valid bit */23832383+ cqe->flags_status_srcqpn = 0;23842384+}23852385+23862386+static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,23872387+ struct ib_wc *ibwc)23882388+{23892389+ u16 qpn = 0;23902390+ int i = 0;23912391+ bool expand = false;23922392+ int polled_hw_cqes = 0;23932393+ struct ocrdma_qp *qp = NULL;23942394+ struct ocrdma_dev *dev = cq->dev;23952395+ struct ocrdma_cqe *cqe;23962396+ u16 cur_getp; bool polled = false; bool stop = false;23972397+23982398+ cur_getp = cq->getp;23992399+ while (num_entries) {24002400+ cqe = cq->va + cur_getp;24012401+ /* check whether valid cqe or not */24022402+ if (!is_cqe_valid(cq, cqe))24032403+ break;24042404+ qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);24052405+ /* ignore discarded cqe */24062406+ if (qpn == 0)24072407+ goto skip_cqe;24082408+ qp = dev->qp_tbl[qpn];24092409+ BUG_ON(qp == NULL);24102410+24112411+ if (is_cqe_for_sq(cqe)) {24122412+ expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,24132413+ &stop);24142414+ } else {24152415+ expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,24162416+ &stop);24172417+ }24182418+ if (expand)24192419+ goto expand_cqe;24202420+ if (stop)24212421+ goto stop_cqe;24222422+ /* clear qpn to avoid duplicate processing by discard_cqe() */24232423+ cqe->cmn.qpn = 0;24242424+skip_cqe:24252425+ polled_hw_cqes += 1;24262426+ cur_getp = (cur_getp + 1) % cq->max_hw_cqe;24272427+ ocrdma_change_cq_phase(cq, cqe, cur_getp);24282428+expand_cqe:24292429+ if (polled) {24302430+ num_entries -= 1;24312431+ i += 1;24322432+ ibwc = ibwc + 1;24332433+ polled = false;24342434+ }24352435+ }24362436+stop_cqe:24372437+ cq->getp = cur_getp;24382438+ if (polled_hw_cqes || expand || stop) {24392439+ ocrdma_ring_cq_db(dev, cq->id, cq->armed, cq->solicited,24402440+ polled_hw_cqes);24412441+ }24422442+ return i;24432443+}24442444+24452445+/* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */24462446+static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,24472447+ struct ocrdma_qp *qp, struct ib_wc *ibwc)24482448+{24492449+ int err_cqes = 0;24502450+24512451+ while (num_entries) {24522452+ if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))24532453+ break;24542454+ if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {24552455+ ocrdma_update_wc(qp, ibwc, qp->sq.tail);24562456+ ocrdma_hwq_inc_tail(&qp->sq);24572457+ } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {24582458+ ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];24592459+ ocrdma_hwq_inc_tail(&qp->rq);24602460+ } else24612461+ return err_cqes;24622462+ ibwc->byte_len = 0;24632463+ ibwc->status = IB_WC_WR_FLUSH_ERR;24642464+ ibwc = ibwc + 1;24652465+ err_cqes += 1;24662466+ num_entries -= 1;24672467+ }24682468+ return err_cqes;24692469+}24702470+24712471+int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)24722472+{24732473+ int cqes_to_poll = num_entries;24742474+ struct ocrdma_cq *cq = NULL;24752475+ unsigned long flags;24762476+ struct ocrdma_dev *dev;24772477+ int num_os_cqe = 0, err_cqes = 0;24782478+ struct ocrdma_qp *qp;24792479+24802480+ cq = get_ocrdma_cq(ibcq);24812481+ dev = cq->dev;24822482+24832483+ /* poll cqes from adapter CQ */24842484+ spin_lock_irqsave(&cq->cq_lock, flags);24852485+ num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);24862486+ spin_unlock_irqrestore(&cq->cq_lock, flags);24872487+ cqes_to_poll -= num_os_cqe;24882488+24892489+ if (cqes_to_poll) {24902490+ wc = wc + num_os_cqe;24912491+ /* adapter returns single error cqe when qp moves to24922492+ * error state. So insert error cqes with wc_status as24932493+ * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ24942494+ * respectively which uses this CQ.24952495+ */24962496+ spin_lock_irqsave(&dev->flush_q_lock, flags);24972497+ list_for_each_entry(qp, &cq->sq_head, sq_entry) {24982498+ if (cqes_to_poll == 0)24992499+ break;25002500+ err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);25012501+ cqes_to_poll -= err_cqes;25022502+ num_os_cqe += err_cqes;25032503+ wc = wc + err_cqes;25042504+ }25052505+ spin_unlock_irqrestore(&dev->flush_q_lock, flags);25062506+ }25072507+ return num_os_cqe;25082508+}25092509+25102510+int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)25112511+{25122512+ struct ocrdma_cq *cq;25132513+ unsigned long flags;25142514+ struct ocrdma_dev *dev;25152515+ u16 cq_id;25162516+ u16 cur_getp;25172517+ struct ocrdma_cqe *cqe;25182518+25192519+ cq = get_ocrdma_cq(ibcq);25202520+ cq_id = cq->id;25212521+ dev = cq->dev;25222522+25232523+ spin_lock_irqsave(&cq->cq_lock, flags);25242524+ if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)25252525+ cq->armed = true;25262526+ if (cq_flags & IB_CQ_SOLICITED)25272527+ cq->solicited = true;25282528+25292529+ cur_getp = cq->getp;25302530+ cqe = cq->va + cur_getp;25312531+25322532+ /* check whether any valid cqe exist or not, if not then safe to25332533+ * arm. If cqe is not yet consumed, then let it get consumed and then25342534+ * we arm it to avoid false interrupts.25352535+ */25362536+ if (!is_cqe_valid(cq, cqe) || cq->arm_needed) {25372537+ cq->arm_needed = false;25382538+ ocrdma_ring_cq_db(dev, cq_id, cq->armed, cq->solicited, 0);25392539+ }25402540+ spin_unlock_irqrestore(&cq->cq_lock, flags);25412541+ return 0;25422542+}
+94
drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
···11+/*******************************************************************22+ * This file is part of the Emulex RoCE Device Driver for *33+ * RoCE (RDMA over Converged Ethernet) adapters. *44+ * Copyright (C) 2008-2012 Emulex. All rights reserved. *55+ * EMULEX and SLI are trademarks of Emulex. *66+ * www.emulex.com *77+ * *88+ * This program is free software; you can redistribute it and/or *99+ * modify it under the terms of version 2 of the GNU General *1010+ * Public License as published by the Free Software Foundation. *1111+ * This program is distributed in the hope that it will be useful. *1212+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *1313+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *1414+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *1515+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *1616+ * TO BE LEGALLY INVALID. See the GNU General Public License for *1717+ * more details, a copy of which can be found in the file COPYING *1818+ * included with this package. *1919+ *2020+ * Contact Information:2121+ * linux-drivers@emulex.com2222+ *2323+ * Emulex2424+ * 3333 Susan Street2525+ * Costa Mesa, CA 926262626+ *******************************************************************/2727+2828+#ifndef __OCRDMA_VERBS_H__2929+#define __OCRDMA_VERBS_H__3030+3131+#include <linux/version.h>3232+int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *,3333+ struct ib_send_wr **bad_wr);3434+int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *,3535+ struct ib_recv_wr **bad_wr);3636+3737+int ocrdma_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc);3838+int ocrdma_arm_cq(struct ib_cq *, enum ib_cq_notify_flags flags);3939+4040+int ocrdma_query_device(struct ib_device *, struct ib_device_attr *props);4141+int ocrdma_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);4242+int ocrdma_modify_port(struct ib_device *, u8 port, int mask,4343+ struct ib_port_modify *props);4444+4545+void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid);4646+int ocrdma_query_gid(struct ib_device *, u8 port,4747+ int index, union ib_gid *gid);4848+int ocrdma_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);4949+5050+struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *,5151+ struct ib_udata *);5252+int ocrdma_dealloc_ucontext(struct ib_ucontext *);5353+5454+int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma);5555+5656+struct ib_pd *ocrdma_alloc_pd(struct ib_device *,5757+ struct ib_ucontext *, struct ib_udata *);5858+int ocrdma_dealloc_pd(struct ib_pd *pd);5959+6060+struct ib_cq *ocrdma_create_cq(struct ib_device *, int entries, int vector,6161+ struct ib_ucontext *, struct ib_udata *);6262+int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);6363+int ocrdma_destroy_cq(struct ib_cq *);6464+6565+struct ib_qp *ocrdma_create_qp(struct ib_pd *,6666+ struct ib_qp_init_attr *attrs,6767+ struct ib_udata *);6868+int _ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,6969+ int attr_mask);7070+int ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,7171+ int attr_mask, struct ib_udata *udata);7272+int ocrdma_query_qp(struct ib_qp *,7373+ struct ib_qp_attr *qp_attr,7474+ int qp_attr_mask, struct ib_qp_init_attr *);7575+int ocrdma_destroy_qp(struct ib_qp *);7676+7777+struct ib_srq *ocrdma_create_srq(struct ib_pd *, struct ib_srq_init_attr *,7878+ struct ib_udata *);7979+int ocrdma_modify_srq(struct ib_srq *, struct ib_srq_attr *,8080+ enum ib_srq_attr_mask, struct ib_udata *);8181+int ocrdma_query_srq(struct ib_srq *, struct ib_srq_attr *);8282+int ocrdma_destroy_srq(struct ib_srq *);8383+int ocrdma_post_srq_recv(struct ib_srq *, struct ib_recv_wr *,8484+ struct ib_recv_wr **bad_recv_wr);8585+8686+int ocrdma_dereg_mr(struct ib_mr *);8787+struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *, int acc);8888+struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *,8989+ struct ib_phys_buf *buffer_list,9090+ int num_phys_buf, int acc, u64 *iova_start);9191+struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length,9292+ u64 virt, int acc, struct ib_udata *);9393+9494+#endif /* __OCRDMA_VERBS_H__ */