Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma updates from Jason Gunthorpe:
"Another small cycle. Mostly cleanups and bug fixes, quite a bit
assisted from bots. There are a few new syzkaller splats that haven't
been solved yet but they should get into the rcs in a few weeks, I
think.

Summary:

- Update drivers to use common helpers for GUIDs, pkeys, bitmaps,
memset_startat, and others

- General code cleanups from bots

- Simplify some of the rxe pool code in preparation for a larger
rework

- Clean out old stuff from hns, including all support for hip06
devices

- Fix a bug where GID table entries could be missed if the table had
holes in it

- Rename paths and sessions in rtrs for better understandability

- Consolidate the roce source port selection code

- NDR speed support in mlx5"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (83 commits)
RDMA/irdma: Remove the redundant return
RDMA/rxe: Use the standard method to produce udp source port
RDMA/irdma: Make the source udp port vary
RDMA/hns: Replace get_udp_sport with rdma_get_udp_sport
RDMA/core: Calculate UDP source port based on flow label or lqpn/rqpn
IB/qib: Fix typos
RDMA/rtrs-clt: Rename rtrs_clt to rtrs_clt_sess
RDMA/rtrs-srv: Rename rtrs_srv to rtrs_srv_sess
RDMA/rtrs-clt: Rename rtrs_clt_sess to rtrs_clt_path
RDMA/rtrs-srv: Rename rtrs_srv_sess to rtrs_srv_path
RDMA/rtrs: Rename rtrs_sess to rtrs_path
RDMA/hns: Modify the hop num of HIP09 EQ to 1
IB/iser: Align coding style across driver
IB/iser: Remove un-needed casting to/from void pointer
IB/iser: Don't suppress send completions
IB/iser: Rename ib_ret local variable
IB/iser: Fix RNR errors
IB/iser: Remove deprecated pi_guard module param
IB/mlx5: Expose NDR speed through MAD
RDMA/cxgb4: Set queue pair state when being queried
...

+2229 -8794
+2 -2
drivers/block/rnbd/rnbd-clt.c
··· 433 433 schedule_work(&iu->work); 434 434 } 435 435 436 - static int send_usr_msg(struct rtrs_clt *rtrs, int dir, 436 + static int send_usr_msg(struct rtrs_clt_sess *rtrs, int dir, 437 437 struct rnbd_iu *iu, struct kvec *vec, 438 438 size_t len, struct scatterlist *sg, unsigned int sg_len, 439 439 void (*conf)(struct work_struct *work), ··· 1010 1010 struct request *rq, 1011 1011 struct rnbd_iu *iu) 1012 1012 { 1013 - struct rtrs_clt *rtrs = dev->sess->rtrs; 1013 + struct rtrs_clt_sess *rtrs = dev->sess->rtrs; 1014 1014 struct rtrs_permit *permit = iu->permit; 1015 1015 struct rnbd_msg_io msg; 1016 1016 struct rtrs_clt_req_ops req_ops;
+1 -1
drivers/block/rnbd/rnbd-clt.h
··· 75 75 76 76 struct rnbd_clt_session { 77 77 struct list_head list; 78 - struct rtrs_clt *rtrs; 78 + struct rtrs_clt_sess *rtrs; 79 79 wait_queue_head_t rtrs_waitq; 80 80 bool rtrs_ready; 81 81 struct rnbd_cpu_qlist __percpu
+8 -8
drivers/block/rnbd/rnbd-srv.c
··· 263 263 kfree(srv_sess); 264 264 } 265 265 266 - static int create_sess(struct rtrs_srv *rtrs) 266 + static int create_sess(struct rtrs_srv_sess *rtrs) 267 267 { 268 268 struct rnbd_srv_session *srv_sess; 269 - char sessname[NAME_MAX]; 269 + char pathname[NAME_MAX]; 270 270 int err; 271 271 272 - err = rtrs_srv_get_sess_name(rtrs, sessname, sizeof(sessname)); 272 + err = rtrs_srv_get_path_name(rtrs, pathname, sizeof(pathname)); 273 273 if (err) { 274 - pr_err("rtrs_srv_get_sess_name(%s): %d\n", sessname, err); 274 + pr_err("rtrs_srv_get_path_name(%s): %d\n", pathname, err); 275 275 276 276 return err; 277 277 } ··· 284 284 offsetof(struct rnbd_dev_blk_io, bio), 285 285 BIOSET_NEED_BVECS); 286 286 if (err) { 287 - pr_err("Allocating srv_session for session %s failed\n", 288 - sessname); 287 + pr_err("Allocating srv_session for path %s failed\n", 288 + pathname); 289 289 kfree(srv_sess); 290 290 return err; 291 291 } ··· 298 298 mutex_unlock(&sess_lock); 299 299 300 300 srv_sess->rtrs = rtrs; 301 - strscpy(srv_sess->sessname, sessname, sizeof(srv_sess->sessname)); 301 + strscpy(srv_sess->sessname, pathname, sizeof(srv_sess->sessname)); 302 302 303 303 rtrs_srv_set_sess_priv(rtrs, srv_sess); 304 304 305 305 return 0; 306 306 } 307 307 308 - static int rnbd_srv_link_ev(struct rtrs_srv *rtrs, 308 + static int rnbd_srv_link_ev(struct rtrs_srv_sess *rtrs, 309 309 enum rtrs_srv_link_ev ev, void *priv) 310 310 { 311 311 struct rnbd_srv_session *srv_sess = priv;
+1 -1
drivers/block/rnbd/rnbd-srv.h
··· 20 20 struct rnbd_srv_session { 21 21 /* Entry inside global sess_list */ 22 22 struct list_head list; 23 - struct rtrs_srv *rtrs; 23 + struct rtrs_srv_sess *rtrs; 24 24 char sessname[NAME_MAX]; 25 25 int queue_depth; 26 26 struct bio_set sess_bio_set;
+9 -3
drivers/infiniband/core/cache.c
··· 956 956 { 957 957 struct ib_gid_table *table; 958 958 unsigned long flags; 959 - int res = -EINVAL; 959 + int res; 960 960 961 961 if (!rdma_is_port_valid(device, port_num)) 962 962 return -EINVAL; ··· 964 964 table = rdma_gid_table(device, port_num); 965 965 read_lock_irqsave(&table->rwlock, flags); 966 966 967 - if (index < 0 || index >= table->sz || 968 - !is_gid_entry_valid(table->data_vec[index])) 967 + if (index < 0 || index >= table->sz) { 968 + res = -EINVAL; 969 969 goto done; 970 + } 971 + 972 + if (!is_gid_entry_valid(table->data_vec[index])) { 973 + res = -ENOENT; 974 + goto done; 975 + } 970 976 971 977 memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid)); 972 978 res = 0;
+11 -7
drivers/infiniband/core/cma.c
··· 766 766 unsigned int p; 767 767 u16 pkey, index; 768 768 enum ib_port_state port_state; 769 + int ret; 769 770 int i; 770 771 771 772 cma_dev = NULL; ··· 785 784 786 785 if (ib_get_cached_port_state(cur_dev->device, p, &port_state)) 787 786 continue; 788 - for (i = 0; !rdma_query_gid(cur_dev->device, 789 - p, i, &gid); 790 - i++) { 787 + 788 + for (i = 0; i < cur_dev->device->port_data[p].immutable.gid_tbl_len; 789 + ++i) { 790 + ret = rdma_query_gid(cur_dev->device, p, i, 791 + &gid); 792 + if (ret) 793 + continue; 794 + 791 795 if (!memcmp(&gid, dgid, sizeof(gid))) { 792 796 cma_dev = cur_dev; 793 797 sgid = gid; ··· 4039 4033 4040 4034 memset(&req, 0, sizeof req); 4041 4035 offset = cma_user_data_offset(id_priv); 4042 - req.private_data_len = offset + conn_param->private_data_len; 4043 - if (req.private_data_len < conn_param->private_data_len) 4036 + if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len)) 4044 4037 return -EINVAL; 4045 4038 4046 4039 if (req.private_data_len) { ··· 4098 4093 4099 4094 memset(&req, 0, sizeof req); 4100 4095 offset = cma_user_data_offset(id_priv); 4101 - req.private_data_len = offset + conn_param->private_data_len; 4102 - if (req.private_data_len < conn_param->private_data_len) 4096 + if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len)) 4103 4097 return -EINVAL; 4104 4098 4105 4099 if (req.private_data_len) {
+2 -1
drivers/infiniband/core/device.c
··· 2461 2461 ++i) { 2462 2462 ret = rdma_query_gid(device, port, i, &tmp_gid); 2463 2463 if (ret) 2464 - return ret; 2464 + continue; 2465 + 2465 2466 if (!memcmp(&tmp_gid, gid, sizeof *gid)) { 2466 2467 *port_num = port; 2467 2468 if (index)
+2 -1
drivers/infiniband/core/sysfs.c
··· 433 433 &ib_port_attr_link_layer.attr, 434 434 NULL 435 435 }; 436 + ATTRIBUTE_GROUPS(port_default); 436 437 437 438 static ssize_t print_ndev(const struct ib_gid_attr *gid_attr, char *buf) 438 439 { ··· 775 774 static struct kobj_type port_type = { 776 775 .release = ib_port_release, 777 776 .sysfs_ops = &port_sysfs_ops, 778 - .default_attrs = port_default_attrs 777 + .default_groups = port_default_groups, 779 778 }; 780 779 781 780 static struct kobj_type gid_attr_type = {
+1 -2
drivers/infiniband/core/umem_odp.c
··· 227 227 const struct mmu_interval_notifier_ops *ops) 228 228 { 229 229 struct ib_umem_odp *umem_odp; 230 - struct mm_struct *mm; 231 230 int ret; 232 231 233 232 if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND))) ··· 240 241 umem_odp->umem.length = size; 241 242 umem_odp->umem.address = addr; 242 243 umem_odp->umem.writable = ib_access_writable(access); 243 - umem_odp->umem.owning_mm = mm = current->mm; 244 + umem_odp->umem.owning_mm = current->mm; 244 245 umem_odp->notifier.ops = ops; 245 246 246 247 umem_odp->page_shift = PAGE_SHIFT;
-1
drivers/infiniband/core/uverbs_cmd.c
··· 1399 1399 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR : 1400 1400 IB_SIGNAL_REQ_WR; 1401 1401 attr.qp_type = cmd->qp_type; 1402 - attr.create_flags = 0; 1403 1402 1404 1403 attr.cap.max_send_wr = cmd->max_send_wr; 1405 1404 attr.cap.max_recv_wr = cmd->max_recv_wr;
+4 -5
drivers/infiniband/hw/bnxt_re/ib_verbs.c
··· 262 262 int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num, 263 263 u16 index, u16 *pkey) 264 264 { 265 - struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); 265 + if (index > 0) 266 + return -EINVAL; 266 267 267 - /* Ignore port_num */ 268 + *pkey = IB_DEFAULT_PKEY_FULL; 268 269 269 - memset(pkey, 0, sizeof(*pkey)); 270 - return bnxt_qplib_get_pkey(&rdev->qplib_res, 271 - &rdev->qplib_res.pkey_tbl, index, pkey); 270 + return 0; 272 271 } 273 272 274 273 int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
+1 -2
drivers/infiniband/hw/bnxt_re/main.c
··· 893 893 struct bnxt_re_srq *srq = container_of(handle, struct bnxt_re_srq, 894 894 qplib_srq); 895 895 struct ib_event ib_event; 896 - int rc = 0; 897 896 898 897 ib_event.device = &srq->rdev->ibdev; 899 898 ib_event.element.srq = &srq->ib_srq; ··· 906 907 (*srq->ib_srq.event_handler)(&ib_event, 907 908 srq->ib_srq.srq_context); 908 909 } 909 - return rc; 910 + return 0; 910 911 } 911 912 912 913 static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
+5 -6
drivers/infiniband/hw/bnxt_re/qplib_fp.c
··· 46 46 #include <linux/delay.h> 47 47 #include <linux/prefetch.h> 48 48 #include <linux/if_ether.h> 49 + #include <rdma/ib_mad.h> 49 50 50 51 #include "roce_hsi.h" 51 52 ··· 1233 1232 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 1234 1233 struct cmdq_modify_qp req; 1235 1234 struct creq_modify_qp_resp resp; 1236 - u16 cmd_flags = 0, pkey; 1235 + u16 cmd_flags = 0; 1237 1236 u32 temp32[4]; 1238 1237 u32 bmask; 1239 1238 int rc; ··· 1256 1255 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS) 1257 1256 req.access = qp->access; 1258 1257 1259 - if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) { 1260 - if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl, 1261 - qp->pkey_index, &pkey)) 1262 - req.pkey = cpu_to_le16(pkey); 1263 - } 1258 + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) 1259 + req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL); 1260 + 1264 1261 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY) 1265 1262 req.qkey = cpu_to_le32(qp->qkey); 1266 1263
+4 -8
drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
··· 555 555 556 556 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) 557 557 { 558 - kfree(rcfw->cmdq.cmdq_bitmap); 558 + bitmap_free(rcfw->cmdq.cmdq_bitmap); 559 559 kfree(rcfw->qp_tbl); 560 560 kfree(rcfw->crsqe_tbl); 561 561 bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq.hwq); ··· 572 572 struct bnxt_qplib_sg_info sginfo = {}; 573 573 struct bnxt_qplib_cmdq_ctx *cmdq; 574 574 struct bnxt_qplib_creq_ctx *creq; 575 - u32 bmap_size = 0; 576 575 577 576 rcfw->pdev = res->pdev; 578 577 cmdq = &rcfw->cmdq; ··· 612 613 if (!rcfw->crsqe_tbl) 613 614 goto fail; 614 615 615 - bmap_size = BITS_TO_LONGS(rcfw->cmdq_depth) * sizeof(unsigned long); 616 - cmdq->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL); 616 + cmdq->cmdq_bitmap = bitmap_zalloc(rcfw->cmdq_depth, GFP_KERNEL); 617 617 if (!cmdq->cmdq_bitmap) 618 618 goto fail; 619 - 620 - cmdq->bmap_size = bmap_size; 621 619 622 620 /* Allocate one extra to hold the QP1 entries */ 623 621 rcfw->qp_tbl_size = qp_tbl_sz + 1; ··· 663 667 iounmap(cmdq->cmdq_mbox.reg.bar_reg); 664 668 iounmap(creq->creq_db.reg.bar_reg); 665 669 666 - indx = find_first_bit(cmdq->cmdq_bitmap, cmdq->bmap_size); 667 - if (indx != cmdq->bmap_size) 670 + indx = find_first_bit(cmdq->cmdq_bitmap, rcfw->cmdq_depth); 671 + if (indx != rcfw->cmdq_depth) 668 672 dev_err(&rcfw->pdev->dev, 669 673 "disabling RCFW with pending cmd-bit %lx\n", indx); 670 674
-1
drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
··· 152 152 wait_queue_head_t waitq; 153 153 unsigned long flags; 154 154 unsigned long *cmdq_bitmap; 155 - u32 bmap_size; 156 155 u32 seq_num; 157 156 }; 158 157
-50
drivers/infiniband/hw/bnxt_re/qplib_res.c
··· 649 649 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); 650 650 } 651 651 652 - static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res, 653 - struct bnxt_qplib_pkey_tbl *pkey_tbl) 654 - { 655 - if (!pkey_tbl->tbl) 656 - dev_dbg(&res->pdev->dev, "PKEY tbl not present\n"); 657 - else 658 - kfree(pkey_tbl->tbl); 659 - 660 - pkey_tbl->tbl = NULL; 661 - pkey_tbl->max = 0; 662 - pkey_tbl->active = 0; 663 - } 664 - 665 - static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res, 666 - struct bnxt_qplib_pkey_tbl *pkey_tbl, 667 - u16 max) 668 - { 669 - pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL); 670 - if (!pkey_tbl->tbl) 671 - return -ENOMEM; 672 - 673 - pkey_tbl->max = max; 674 - return 0; 675 - }; 676 - 677 652 /* PDs */ 678 653 int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd) 679 654 { ··· 818 843 return -ENOMEM; 819 844 } 820 845 821 - /* PKEYs */ 822 - static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl) 823 - { 824 - memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max); 825 - pkey_tbl->active = 0; 826 - } 827 - 828 - static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res, 829 - struct bnxt_qplib_pkey_tbl *pkey_tbl) 830 - { 831 - u16 pkey = 0xFFFF; 832 - 833 - memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max); 834 - 835 - /* pkey default = 0xFFFF */ 836 - bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false); 837 - } 838 - 839 846 /* Stats */ 840 847 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev, 841 848 struct bnxt_qplib_stats *stats) ··· 848 891 849 892 void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res) 850 893 { 851 - bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl); 852 894 bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl); 853 895 } 854 896 855 897 int bnxt_qplib_init_res(struct bnxt_qplib_res *res) 856 898 { 857 899 bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev); 858 - bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl); 859 900 860 901 return 0; 861 902 } 862 903 863 904 void bnxt_qplib_free_res(struct bnxt_qplib_res *res) 864 905 { 865 - bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl); 866 906 bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl); 867 907 bnxt_qplib_free_pd_tbl(&res->pd_tbl); 868 908 bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl); ··· 875 921 res->netdev = netdev; 876 922 877 923 rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid); 878 - if (rc) 879 - goto fail; 880 - 881 - rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey); 882 924 if (rc) 883 925 goto fail; 884 926
-7
drivers/infiniband/hw/bnxt_re/qplib_res.h
··· 185 185 u8 *vlan; 186 186 }; 187 187 188 - struct bnxt_qplib_pkey_tbl { 189 - u16 *tbl; 190 - u16 max; 191 - u16 active; 192 - }; 193 - 194 188 struct bnxt_qplib_dpi { 195 189 u32 dpi; 196 190 void __iomem *dbr; ··· 252 258 struct bnxt_qplib_rcfw *rcfw; 253 259 struct bnxt_qplib_pd_tbl pd_tbl; 254 260 struct bnxt_qplib_sgid_tbl sgid_tbl; 255 - struct bnxt_qplib_pkey_tbl pkey_tbl; 256 261 struct bnxt_qplib_dpi_tbl dpi_tbl; 257 262 bool prio; 258 263 bool is_vf;
+1 -98
drivers/infiniband/hw/bnxt_re/qplib_sp.c
··· 146 146 attr->max_srq = le16_to_cpu(sb->max_srq); 147 147 attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1; 148 148 attr->max_srq_sges = sb->max_srq_sge; 149 - attr->max_pkey = le32_to_cpu(sb->max_pkeys); 150 - /* 151 - * Some versions of FW reports more than 0xFFFF. 152 - * Restrict it for now to 0xFFFF to avoid 153 - * reporting trucated value 154 - */ 155 - if (attr->max_pkey > 0xFFFF) { 156 - /* ib_port_attr::pkey_tbl_len is u16 */ 157 - attr->max_pkey = 0xFFFF; 158 - } 159 - 149 + attr->max_pkey = 1; 160 150 attr->max_inline_data = le32_to_cpu(sb->max_inline_data); 161 151 attr->l2_db_size = (sb->l2_db_space_size + 1) * 162 152 (0x01 << RCFW_DBR_BASE_PAGE_SHIFT); ··· 401 411 402 412 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 403 413 (void *)&resp, NULL, 0); 404 - return rc; 405 - } 406 - 407 - /* pkeys */ 408 - int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res, 409 - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index, 410 - u16 *pkey) 411 - { 412 - if (index == 0xFFFF) { 413 - *pkey = 0xFFFF; 414 - return 0; 415 - } 416 - if (index >= pkey_tbl->max) { 417 - dev_err(&res->pdev->dev, 418 - "Index %d exceeded PKEY table max (%d)\n", 419 - index, pkey_tbl->max); 420 - return -EINVAL; 421 - } 422 - memcpy(pkey, &pkey_tbl->tbl[index], sizeof(*pkey)); 423 - return 0; 424 - } 425 - 426 - int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res, 427 - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey, 428 - bool update) 429 - { 430 - int i, rc = 0; 431 - 432 - if (!pkey_tbl) { 433 - dev_err(&res->pdev->dev, "PKEY table not allocated\n"); 434 - return -EINVAL; 435 - } 436 - 437 - /* Do we need a pkey_lock here? */ 438 - if (!pkey_tbl->active) { 439 - dev_err(&res->pdev->dev, "PKEY table has no active entries\n"); 440 - return -ENOMEM; 441 - } 442 - for (i = 0; i < pkey_tbl->max; i++) { 443 - if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey))) 444 - break; 445 - } 446 - if (i == pkey_tbl->max) { 447 - dev_err(&res->pdev->dev, 448 - "PKEY 0x%04x not found in the pkey table\n", *pkey); 449 - return -ENOMEM; 450 - } 451 - memset(&pkey_tbl->tbl[i], 0, sizeof(*pkey)); 452 - pkey_tbl->active--; 453 - 454 - /* unlock */ 455 - return rc; 456 - } 457 - 458 - int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res, 459 - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey, 460 - bool update) 461 - { 462 - int i, free_idx, rc = 0; 463 - 464 - if (!pkey_tbl) { 465 - dev_err(&res->pdev->dev, "PKEY table not allocated\n"); 466 - return -EINVAL; 467 - } 468 - 469 - /* Do we need a pkey_lock here? */ 470 - if (pkey_tbl->active == pkey_tbl->max) { 471 - dev_err(&res->pdev->dev, "PKEY table is full\n"); 472 - return -ENOMEM; 473 - } 474 - free_idx = pkey_tbl->max; 475 - for (i = 0; i < pkey_tbl->max; i++) { 476 - if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey))) 477 - return -EALREADY; 478 - else if (!pkey_tbl->tbl[i] && free_idx == pkey_tbl->max) 479 - free_idx = i; 480 - } 481 - if (free_idx == pkey_tbl->max) { 482 - dev_err(&res->pdev->dev, 483 - "PKEY table is FULL but count is not MAX??\n"); 484 - return -ENOMEM; 485 - } 486 - /* Add PKEY to the pkey_tbl */ 487 - memcpy(&pkey_tbl->tbl[free_idx], pkey, sizeof(*pkey)); 488 - pkey_tbl->active++; 489 - 490 - /* unlock */ 491 414 return rc; 492 415 } 493 416
-9
drivers/infiniband/hw/bnxt_re/qplib_sp.h
··· 255 255 int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, 256 256 struct bnxt_qplib_gid *gid, u16 gid_idx, 257 257 const u8 *smac); 258 - int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res, 259 - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index, 260 - u16 *pkey); 261 - int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res, 262 - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey, 263 - bool update); 264 - int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res, 265 - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey, 266 - bool update); 267 258 int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, 268 259 struct bnxt_qplib_dev_attr *attr, bool vf); 269 260 int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
+3 -2
drivers/infiniband/hw/cxgb4/cm.c
··· 2471 2471 skb_get(skb); 2472 2472 rpl = cplhdr(skb); 2473 2473 if (!is_t4(adapter_type)) { 2474 - skb_trim(skb, roundup(sizeof(*rpl5), 16)); 2474 + BUILD_BUG_ON(sizeof(*rpl5) != roundup(sizeof(*rpl5), 16)); 2475 + skb_trim(skb, sizeof(*rpl5)); 2475 2476 rpl5 = (void *)rpl; 2476 2477 INIT_TP_WR(rpl5, ep->hwtid); 2477 2478 } else { ··· 2488 2487 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); 2489 2488 opt2 |= T5_ISS_F; 2490 2489 rpl5 = (void *)rpl; 2491 - memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16)); 2490 + memset_after(rpl5, 0, iss); 2492 2491 if (peer2peer) 2493 2492 isn += 4; 2494 2493 rpl5->iss = cpu_to_be32(isn);
+6 -11
drivers/infiniband/hw/cxgb4/id_table.c
··· 59 59 alloc->last = obj + 1; 60 60 if (alloc->last >= alloc->max) 61 61 alloc->last = 0; 62 - set_bit(obj, alloc->table); 62 + __set_bit(obj, alloc->table); 63 63 obj += alloc->start; 64 64 } else 65 65 obj = -1; ··· 75 75 obj -= alloc->start; 76 76 77 77 spin_lock_irqsave(&alloc->lock, flags); 78 - clear_bit(obj, alloc->table); 78 + __clear_bit(obj, alloc->table); 79 79 spin_unlock_irqrestore(&alloc->lock, flags); 80 80 } 81 81 82 82 int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num, 83 83 u32 reserved, u32 flags) 84 84 { 85 - int i; 86 - 87 85 alloc->start = start; 88 86 alloc->flags = flags; 89 87 if (flags & C4IW_ID_TABLE_F_RANDOM) 90 88 alloc->last = prandom_u32() % RANDOM_SKIP; 91 89 else 92 90 alloc->last = 0; 93 - alloc->max = num; 91 + alloc->max = num; 94 92 spin_lock_init(&alloc->lock); 95 - alloc->table = kmalloc_array(BITS_TO_LONGS(num), sizeof(long), 96 - GFP_KERNEL); 93 + alloc->table = bitmap_zalloc(num, GFP_KERNEL); 97 94 if (!alloc->table) 98 95 return -ENOMEM; 99 96 100 - bitmap_zero(alloc->table, num); 101 97 if (!(alloc->flags & C4IW_ID_TABLE_F_EMPTY)) 102 - for (i = 0; i < reserved; ++i) 103 - set_bit(i, alloc->table); 98 + bitmap_set(alloc->table, 0, reserved); 104 99 105 100 return 0; 106 101 } 107 102 108 103 void c4iw_id_table_free(struct c4iw_id_table *alloc) 109 104 { 110 - kfree(alloc->table); 105 + bitmap_free(alloc->table); 111 106 }
+5 -3
drivers/infiniband/hw/cxgb4/provider.c
··· 41 41 #include <linux/ethtool.h> 42 42 #include <linux/rtnetlink.h> 43 43 #include <linux/inetdevice.h> 44 + #include <net/addrconf.h> 44 45 #include <linux/io.h> 45 46 46 47 #include <asm/irq.h> ··· 265 264 return -EINVAL; 266 265 267 266 dev = to_c4iw_dev(ibdev); 268 - memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6); 267 + addrconf_addr_eui48((u8 *)&props->sys_image_guid, 268 + dev->rdev.lldi.ports[0]->dev_addr); 269 269 props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type); 270 270 props->fw_ver = dev->rdev.lldi.fw_vers; 271 271 props->device_cap_flags = dev->device_cap_flags; ··· 527 525 struct c4iw_dev *dev = ctx->dev; 528 526 529 527 pr_debug("c4iw_dev %p\n", dev); 530 - memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); 531 - memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6); 528 + addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid, 529 + dev->rdev.lldi.ports[0]->dev_addr); 532 530 dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW; 533 531 if (fastreg_support) 534 532 dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
+1
drivers/infiniband/hw/cxgb4/qp.c
··· 2460 2460 memset(attr, 0, sizeof(*attr)); 2461 2461 memset(init_attr, 0, sizeof(*init_attr)); 2462 2462 attr->qp_state = to_ib_qp_state(qhp->attr.state); 2463 + attr->cur_qp_state = to_ib_qp_state(qhp->attr.state); 2463 2464 init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; 2464 2465 init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; 2465 2466 init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
+3 -5
drivers/infiniband/hw/hfi1/user_sdma.c
··· 161 161 if (!pq->reqs) 162 162 goto pq_reqs_nomem; 163 163 164 - pq->req_in_use = kcalloc(BITS_TO_LONGS(hfi1_sdma_comp_ring_size), 165 - sizeof(*pq->req_in_use), 166 - GFP_KERNEL); 164 + pq->req_in_use = bitmap_zalloc(hfi1_sdma_comp_ring_size, GFP_KERNEL); 167 165 if (!pq->req_in_use) 168 166 goto pq_reqs_no_in_use; 169 167 ··· 208 210 cq_nomem: 209 211 kmem_cache_destroy(pq->txreq_cache); 210 212 pq_txreq_nomem: 211 - kfree(pq->req_in_use); 213 + bitmap_free(pq->req_in_use); 212 214 pq_reqs_no_in_use: 213 215 kfree(pq->reqs); 214 216 pq_reqs_nomem: ··· 255 257 pq->wait, 256 258 !atomic_read(&pq->n_reqs)); 257 259 kfree(pq->reqs); 258 - kfree(pq->req_in_use); 260 + bitmap_free(pq->req_in_use); 259 261 kmem_cache_destroy(pq->txreq_cache); 260 262 flush_pq_iowait(pq); 261 263 kfree(pq);
+2 -15
drivers/infiniband/hw/hns/Kconfig
··· 5 5 depends on ARM64 || (COMPILE_TEST && 64BIT) 6 6 depends on (HNS_DSAF && HNS_ENET) || HNS3 7 7 help 8 - This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine 9 - is used in Hisilicon Hip06 and more further ICT SoC based on 10 - platform device. 8 + This is a RoCE/RDMA driver for the Hisilicon RoCE engine. 11 9 12 - To compile HIP06 or HIP08 driver as module, choose M here. 13 - 14 - config INFINIBAND_HNS_HIP06 15 - bool "Hisilicon Hip06 Family RoCE support" 16 - depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET 17 - depends on INFINIBAND_HNS=m || (HNS_DSAF=y && HNS_ENET=y) 18 - help 19 - RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and 20 - Hip07 SoC. These RoCE engines are platform devices. 21 - 22 - To compile this driver, choose Y here: if INFINIBAND_HNS is m, this 23 - module will be called hns-roce-hw-v1 10 + To compile HIP08 driver as module, choose M here. 24 11 25 12 config INFINIBAND_HNS_HIP08 26 13 bool "Hisilicon Hip08 Family RoCE support"
-5
drivers/infiniband/hw/hns/Makefile
··· 9 9 hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \ 10 10 hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o 11 11 12 - ifdef CONFIG_INFINIBAND_HNS_HIP06 13 - hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs) 14 - obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v1.o 15 - endif 16 - 17 12 ifdef CONFIG_INFINIBAND_HNS_HIP08 18 13 hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs) 19 14 obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o
+2 -3
drivers/infiniband/hw/hns/hns_roce_ah.c
··· 30 30 * SOFTWARE. 31 31 */ 32 32 33 - #include <linux/platform_device.h> 34 33 #include <linux/pci.h> 35 34 #include <rdma/ib_addr.h> 36 35 #include <rdma/ib_cache.h> ··· 60 61 struct hns_roce_ah *ah = to_hr_ah(ibah); 61 62 int ret = 0; 62 63 63 - if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 && udata) 64 + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata) 64 65 return -EOPNOTSUPP; 65 66 66 67 ah->av.port = rdma_ah_get_port_num(ah_attr); ··· 79 80 memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN); 80 81 81 82 /* HIP08 needs to record vlan info in Address Vector */ 82 - if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08) { 83 + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { 83 84 ret = rdma_read_gid_l2_fields(ah_attr->grh.sgid_attr, 84 85 &ah->av.vlan_id, NULL); 85 86 if (ret)
+1 -2
drivers/infiniband/hw/hns/hns_roce_alloc.c
··· 31 31 * SOFTWARE. 32 32 */ 33 33 34 - #include <linux/platform_device.h> 35 34 #include <linux/vmalloc.h> 36 - #include "hns_roce_device.h" 37 35 #include <rdma/ib_umem.h> 36 + #include "hns_roce_device.h" 38 37 39 38 void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf) 40 39 {
+5 -6
drivers/infiniband/hw/hns/hns_roce_cmd.c
··· 31 31 */ 32 32 33 33 #include <linux/dmapool.h> 34 - #include <linux/platform_device.h> 35 34 #include "hns_roce_common.h" 36 35 #include "hns_roce_device.h" 37 36 #include "hns_roce_cmd.h" ··· 60 61 CMD_POLL_TOKEN, 0); 61 62 if (ret) { 62 63 dev_err_ratelimited(hr_dev->dev, 63 - "failed to post mailbox %x in poll mode, ret = %d.\n", 64 + "failed to post mailbox 0x%x in poll mode, ret = %d.\n", 64 65 op, ret); 65 66 return ret; 66 67 } ··· 90 91 91 92 if (unlikely(token != context->token)) { 92 93 dev_err_ratelimited(hr_dev->dev, 93 - "[cmd] invalid ae token %x,context token is %x!\n", 94 + "[cmd] invalid ae token 0x%x, context token is 0x%x.\n", 94 95 token, context->token); 95 96 return; 96 97 } ··· 129 130 context->token, 1); 130 131 if (ret) { 131 132 dev_err_ratelimited(dev, 132 - "failed to post mailbox %x in event mode, ret = %d.\n", 133 + "failed to post mailbox 0x%x in event mode, ret = %d.\n", 133 134 op, ret); 134 135 goto out; 135 136 } 136 137 137 138 if (!wait_for_completion_timeout(&context->done, 138 139 msecs_to_jiffies(timeout))) { 139 - dev_err_ratelimited(dev, "[cmd] token %x mailbox %x timeout.\n", 140 + dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x timeout.\n", 140 141 context->token, op); 141 142 ret = -EBUSY; 142 143 goto out; ··· 144 145 145 146 ret = context->result; 146 147 if (ret) 147 - dev_err_ratelimited(dev, "[cmd] token %x mailbox %x error %d\n", 148 + dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x error %d.\n", 148 149 context->token, op, ret); 149 150 150 151 out:
-202
drivers/infiniband/hw/hns/hns_roce_common.h
··· 104 104 105 105 #define hr_reg_read(ptr, field) _hr_reg_read(ptr, field) 106 106 107 - #define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3 108 - #define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4 109 - 110 - #define ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S 5 111 - 112 - #define ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S 6 113 - 114 - #define ROCEE_GLB_CFG_ROCEE_PORT_ST_S 10 115 - #define ROCEE_GLB_CFG_ROCEE_PORT_ST_M \ 116 - (((1UL << 6) - 1) << ROCEE_GLB_CFG_ROCEE_PORT_ST_S) 117 - 118 - #define ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S 16 119 - 120 - #define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S 0 121 - #define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M \ 122 - (((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S) 123 - 124 - #define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S 24 125 - #define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M \ 126 - (((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S) 127 - 128 - #define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S 0 129 - #define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M \ 130 - (((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S) 131 - 132 - #define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S 24 133 - #define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M \ 134 - (((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S) 135 - 136 - #define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S 0 137 - #define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M \ 138 - (((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S) 139 - 140 - #define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S 16 141 - #define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M \ 142 - (((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S) 143 - 144 - #define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S 0 145 - #define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M \ 146 - (((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S) 147 - 148 - #define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S 16 149 - #define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M \ 150 - (((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S) 151 - 152 - #define ROCEE_RAQ_WL_ROCEE_RAQ_WL_S 0 153 - #define ROCEE_RAQ_WL_ROCEE_RAQ_WL_M \ 154 - (((1UL << 8) - 1) << ROCEE_RAQ_WL_ROCEE_RAQ_WL_S) 155 - 156 - #define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S 0 157 - #define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M \ 158 - (((1UL << 15) - 1) << \ 159 - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S) 160 - 161 - #define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S 16 162 - #define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M \ 163 - (((1UL << 4) - 1) << \ 164 - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S) 165 - 166 - #define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S 20 167 - 168 - #define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE 21 169 - 170 - #define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S 0 171 - #define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M \ 172 - (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S) 173 - 174 - #define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S 5 175 - #define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M \ 176 - (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S) 177 - 178 - #define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S 0 179 - #define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M \ 180 - (((1UL << 5) - 1) << ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S) 181 - 182 - #define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S 5 183 - #define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M \ 184 - (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S) 185 - 186 - #define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S 0 187 - #define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M \ 188 - (((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S) 189 - 190 - #define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S 8 191 - #define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M \ 192 - (((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S) 193 - 194 - #define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S 0 195 - #define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M \ 196 - (((1UL << 19) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S) 197 - 198 - #define ROCEE_BT_CMD_H_ROCEE_BT_CMD_S 19 199 - 200 - #define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S 20 201 - #define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M \ 202 - (((1UL << 2) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S) 203 - 204 - #define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S 22 205 - #define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M \ 206 - (((1UL << 5) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S) 207 - 208 - #define ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S 31 209 - 210 - #define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S 0 211 - #define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M \ 212 - (((1UL << 3) - 1) << ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S) 213 - 214 - #define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S 0 215 - #define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M \ 216 - (((1UL << 15) - 1) << ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S) 217 - 218 - #define ROCEE_MB6_ROCEE_MB_CMD_S 0 219 - #define ROCEE_MB6_ROCEE_MB_CMD_M \ 220 - (((1UL << 8) - 1) << ROCEE_MB6_ROCEE_MB_CMD_S) 221 - 222 - #define ROCEE_MB6_ROCEE_MB_CMD_MDF_S 8 223 - #define ROCEE_MB6_ROCEE_MB_CMD_MDF_M \ 224 - (((1UL << 4) - 1) << ROCEE_MB6_ROCEE_MB_CMD_MDF_S) 225 - 226 - #define ROCEE_MB6_ROCEE_MB_EVENT_S 14 227 - 228 - #define ROCEE_MB6_ROCEE_MB_HW_RUN_S 15 229 - 230 - #define ROCEE_MB6_ROCEE_MB_TOKEN_S 16 231 - #define ROCEE_MB6_ROCEE_MB_TOKEN_M \ 232 - (((1UL << 16) - 1) << ROCEE_MB6_ROCEE_MB_TOKEN_S) 233 - 234 - #define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S 0 235 - #define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M \ 236 - (((1UL << 24) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S) 237 - 238 - #define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S 24 239 - #define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M \ 240 - (((1UL << 4) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S) 241 - 242 - #define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S 28 243 - #define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M \ 244 - (((1UL << 3) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S) 245 - 246 - #define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S 31 247 - 248 - #define ROCEE_SMAC_H_ROCEE_SMAC_H_S 0 249 - #define ROCEE_SMAC_H_ROCEE_SMAC_H_M \ 250 - (((1UL << 16) - 1) << ROCEE_SMAC_H_ROCEE_SMAC_H_S) 251 - 252 - #define ROCEE_SMAC_H_ROCEE_PORT_MTU_S 16 253 - #define ROCEE_SMAC_H_ROCEE_PORT_MTU_M \ 254 - (((1UL << 4) - 1) << ROCEE_SMAC_H_ROCEE_PORT_MTU_S) 255 - 256 - #define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S 0 257 - #define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M \ 258 - (((1UL << 2) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S) 259 - 260 - #define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S 8 261 - #define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M \ 262 - (((1UL << 4) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S) 263 - 264 - #define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S 17 265 - 266 - #define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S 0 267 - #define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M \ 268 - (((1UL << 5) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S) 269 - 270 - #define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S 16 271 - #define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M \ 272 - (((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S) 273 - 274 - #define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S 0 275 - #define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M \ 276 - (((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S) 277 - 278 - #define ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S 16 279 - #define ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S 1 280 - #define ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S 0 281 - 282 - #define ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S 0 283 - #define ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S 1 284 - 285 - #define ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S 0 286 - 287 - #define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S 0 288 - #define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M \ 289 - (((1UL << 28) - 1) << ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S) 290 - 291 - #define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S 0 292 - #define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M \ 293 - (((1UL << 28) - 1) << ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) 294 - 295 - #define ROCEE_SDB_INV_CNT_SDB_INV_CNT_S 0 296 - #define ROCEE_SDB_INV_CNT_SDB_INV_CNT_M \ 297 - (((1UL << 16) - 1) << ROCEE_SDB_INV_CNT_SDB_INV_CNT_S) 298 - 299 - #define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S 0 300 - #define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M \ 301 - (((1UL << 16) - 1) << ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S) 302 - 303 - #define ROCEE_SDB_CNT_CMP_BITS 16 304 - 305 - #define ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S 20 306 - 307 - #define ROCEE_CNT_CLR_CE_CNT_CLR_CE_S 0 308 - 309 107 /*************ROCEE_REG DEFINITION****************/ 310 108 #define ROCEE_VENDOR_ID_REG 0x0 311 109 #define ROCEE_VENDOR_PART_ID_REG 0x4
-13
drivers/infiniband/hw/hns/hns_roce_cq.c
··· 30 30 * SOFTWARE. 31 31 */ 32 32 33 - #include <linux/platform_device.h> 34 33 #include <rdma/ib_umem.h> 35 34 #include <rdma/uverbs_ioctl.h> 36 35 #include "hns_roce_device.h" ··· 405 406 goto err_cqn; 406 407 } 407 408 408 - /* 409 - * For the QP created by kernel space, tptr value should be initialized 410 - * to zero; For the QP created by user space, it will cause synchronous 411 - * problems if tptr is set to zero here, so we initialize it in user 412 - * space. 413 - */ 414 - if (!udata && hr_cq->tptr_addr) 415 - *hr_cq->tptr_addr = 0; 416 - 417 409 if (udata) { 418 410 resp.cqn = hr_cq->cqn; 419 411 ret = ib_copy_to_udata(udata, &resp, ··· 430 440 { 431 441 struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); 432 442 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); 433 - 434 - if (hr_dev->hw->destroy_cq) 435 - hr_dev->hw->destroy_cq(ib_cq, udata); 436 443 437 444 free_cqc(hr_dev, hr_cq); 438 445 free_cqn(hr_dev, hr_cq->cqn);
-1
drivers/infiniband/hw/hns/hns_roce_db.c
··· 4 4 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 5 5 */ 6 6 7 - #include <linux/platform_device.h> 8 7 #include <rdma/ib_umem.h> 9 8 #include "hns_roce_device.h" 10 9
+24 -84
drivers/infiniband/hw/hns/hns_roce_device.h
··· 36 36 #include <rdma/ib_verbs.h> 37 37 #include <rdma/hns-abi.h> 38 38 39 - #define DRV_NAME "hns_roce" 40 - 41 39 #define PCI_REVISION_ID_HIP08 0x21 42 40 #define PCI_REVISION_ID_HIP09 0x30 43 - 44 - #define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6') 45 41 46 42 #define HNS_ROCE_MAX_MSG_LEN 0x80000000 47 43 ··· 45 49 46 50 #define BA_BYTE_LEN 8 47 51 48 - /* Hardware specification only for v1 engine */ 49 52 #define HNS_ROCE_MIN_CQE_NUM 0x40 50 - #define HNS_ROCE_MIN_WQE_NUM 0x20 51 53 #define HNS_ROCE_MIN_SRQ_WQE_NUM 1 52 - 53 - /* Hardware specification only for v1 engine */ 54 - #define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7 55 - #define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000 56 - 57 - #define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS 20 58 - #define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT \ 59 - (5000 / HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS) 60 - #define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2 61 - #define HNS_ROCE_MIN_CQE_CNT 16 62 - 63 - #define HNS_ROCE_RESERVED_SGE 1 64 54 65 55 #define HNS_ROCE_MAX_IRQ_NUM 128 66 56 ··· 84 102 #define HNS_ROCE_FRMR_MAX_PA 512 85 103 86 104 #define PKEY_ID 0xffff 87 - #define GUID_LEN 8 88 105 #define NODE_DESC_SIZE 64 89 106 #define DB_REG_OFFSET 0x1000 90 107 91 108 /* Configure to HW for PAGE_SIZE larger than 4KB */ 92 109 #define PG_SHIFT_OFFSET (PAGE_SHIFT - 12) 93 - 94 - #define PAGES_SHIFT_8 8 95 - #define PAGES_SHIFT_16 16 96 - #define PAGES_SHIFT_24 24 97 - #define PAGES_SHIFT_32 32 98 110 99 111 #define HNS_ROCE_IDX_QUE_ENTRY_SZ 4 100 112 #define SRQ_DB_REG 0x230 ··· 97 121 #define HNS_ROCE_CQ_BANK_NUM 4 98 122 99 123 #define CQ_BANKID_SHIFT 2 100 - 101 - /* The chip implementation of the consumer index is calculated 102 - * according to twice the actual EQ depth 103 - */ 104 - #define EQ_DEPTH_COEFF 2 105 124 106 125 enum { 107 126 SERV_TYPE_RC, ··· 153 182 HNS_ROCE_CAP_FLAG_FRMR = BIT(8), 154 183 HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9), 155 184 HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10), 185 + HNS_ROCE_CAP_FLAG_DIRECT_WQE = BIT(12), 156 186 HNS_ROCE_CAP_FLAG_SDI_MODE = BIT(14), 157 187 HNS_ROCE_CAP_FLAG_STASH = BIT(17), 158 188 }; ··· 199 227 200 228 enum hns_roce_mmap_type { 201 229 HNS_ROCE_MMAP_TYPE_DB = 1, 202 - HNS_ROCE_MMAP_TYPE_TPTR, 230 + HNS_ROCE_MMAP_TYPE_DWQE, 203 231 }; 204 232 205 233 struct hns_user_mmap_entry { ··· 214 242 struct list_head page_list; 215 243 struct mutex page_mutex; 216 244 struct hns_user_mmap_entry *db_mmap_entry; 217 - struct hns_user_mmap_entry *tptr_mmap_entry; 218 245 }; 219 246 220 247 struct hns_roce_pd { ··· 316 345 u32 pbl_buf_pg_sz; 317 346 }; 318 347 319 - /* Only support 4K page size for mr register */ 320 - #define MR_SIZE_4K 0 321 - 322 348 struct hns_roce_mr { 323 349 struct ib_mr ibmr; 324 350 u64 iova; /* MR's virtual original addr */ 325 351 u64 size; /* Address range of MR */ 326 352 u32 key; /* Key of MR */ 327 353 u32 pd; /* PD num of MR */ 328 - u32 access; /* Access permission of MR */ 354 + u32 access; /* Access permission of MR */ 329 355 int enabled; /* MR's active status */ 330 - int type; /* MR's register type */ 331 - u32 pbl_hop_num; /* multi-hop number */ 356 + int type; /* MR's register type */ 357 + u32 pbl_hop_num; /* multi-hop number */ 332 358 struct hns_roce_mtr pbl_mtr; 333 359 u32 npages; 334 360 dma_addr_t *page_list; ··· 342 374 u32 wqe_cnt; /* WQE num */ 343 375 u32 max_gs; 344 376 u32 rsv_sge; 345 - int offset; 346 - int wqe_shift; /* WQE size */ 377 + u32 offset; 378 + u32 wqe_shift; /* WQE size */ 347 379 u32 head; 348 380 u32 tail; 349 381 void __iomem *db_reg; 350 382 }; 351 383 352 384 struct hns_roce_sge { 353 - unsigned int sge_cnt; /* SGE num */ 354 - int offset; 355 - int sge_shift; /* SGE size */ 385 + unsigned int sge_cnt; /* SGE num */ 386 + u32 offset; 387 + u32 sge_shift; /* SGE size */ 356 388 }; 357 389 358 390 struct hns_roce_buf_list { ··· 421 453 u32 cons_index; 422 454 u32 *set_ci_db; 423 455 void __iomem *db_reg; 424 - u16 *tptr_addr; 425 456 int arm_sn; 426 457 int cqe_size; 427 458 unsigned long cqn; ··· 435 468 436 469 struct hns_roce_idx_que { 437 470 struct hns_roce_mtr mtr; 438 - int entry_shift; 471 + u32 entry_shift; 439 472 unsigned long *bitmap; 440 473 u32 head; 441 474 u32 tail; ··· 447 480 u32 wqe_cnt; 448 481 int max_gs; 449 482 u32 rsv_sge; 450 - int wqe_shift; 483 + u32 wqe_shift; 451 484 u32 cqn; 452 485 u32 xrcdn; 453 486 void __iomem *db_reg; ··· 504 537 struct hns_roce_ida srq_ida; 505 538 struct xarray xa; 506 539 struct hns_roce_hem_table table; 507 - }; 508 - 509 - struct hns_roce_raq_table { 510 - struct hns_roce_buf_list *e_raq_buf; 511 540 }; 512 541 513 542 struct hns_roce_av { ··· 590 627 u32 queue_num; 591 628 }; 592 629 593 - enum { 594 - HNS_ROCE_QP_CAP_DIRECT_WQE = BIT(5), 595 - }; 596 - 597 630 struct hns_roce_qp { 598 631 struct ib_qp ibqp; 599 632 struct hns_roce_wq rq; ··· 609 650 u8 sl; 610 651 u8 resp_depth; 611 652 u8 state; 612 - u32 access_flags; 613 653 u32 atomic_rd_en; 614 - u32 pkey_index; 615 654 u32 qkey; 616 655 void (*event)(struct hns_roce_qp *qp, 617 656 enum hns_roce_event event_type); ··· 629 672 unsigned long flush_flag; 630 673 struct hns_roce_work flush_work; 631 674 struct hns_roce_rinl_buf rq_inl_buf; 632 - struct list_head node; /* all qps are on a list */ 633 - struct list_head rq_node; /* all recv qps are on a list */ 634 - struct list_head sq_node; /* all send qps are on a list */ 675 + struct list_head node; /* all qps are on a list */ 676 + struct list_head rq_node; /* all recv qps are on a list */ 677 + struct list_head sq_node; /* all send qps are on a list */ 678 + struct hns_user_mmap_entry *dwqe_mmap_entry; 635 679 }; 636 680 637 681 struct hns_roce_ib_iboe { ··· 640 682 struct net_device *netdevs[HNS_ROCE_MAX_PORTS]; 641 683 struct notifier_block nb; 642 684 u8 phy_port[HNS_ROCE_MAX_PORTS]; 643 - }; 644 - 645 - enum { 646 - HNS_ROCE_EQ_STAT_INVALID = 0, 647 - HNS_ROCE_EQ_STAT_VALID = 2, 648 685 }; 649 686 650 687 struct hns_roce_ceqe { ··· 673 720 int type_flag; /* Aeq:1 ceq:0 */ 674 721 int eqn; 675 722 u32 entries; 676 - u32 log_entries; 677 723 int eqe_size; 678 724 int irq; 679 - int log_page_size; 680 725 u32 cons_index; 681 - struct hns_roce_buf_list *buf_list; 682 726 int over_ignore; 683 727 int coalesce; 684 728 int arm_st; ··· 690 740 691 741 struct hns_roce_eq_table { 692 742 struct hns_roce_eq *eq; 693 - void __iomem **eqc_base; /* only for hw v1 */ 694 743 }; 695 744 696 745 enum cong_type { ··· 716 767 u32 reserved_qps; 717 768 int num_qpc_timer; 718 769 int num_cqc_timer; 719 - int num_srqs; 770 + u32 num_srqs; 720 771 u32 max_wqes; 721 772 u32 max_srq_wrs; 722 773 u32 max_srq_sges; ··· 730 781 u32 min_cqes; 731 782 u32 min_wqes; 732 783 u32 reserved_cqs; 733 - int reserved_srqs; 784 + u32 reserved_srqs; 734 785 int num_aeq_vectors; 735 786 int num_comp_vectors; 736 787 int num_other_vectors; ··· 804 855 u32 cqc_timer_ba_pg_sz; 805 856 u32 cqc_timer_buf_pg_sz; 806 857 u32 cqc_timer_hop_num; 807 - u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */ 858 + u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */ 808 859 u32 cqe_buf_pg_sz; 809 860 u32 cqe_hop_num; 810 861 u32 srqwqe_ba_pg_sz; ··· 823 874 u32 gmv_hop_num; 824 875 u32 sl_num; 825 876 u32 llm_buf_pg_sz; 826 - u32 chunk_sz; /* chunk size in non multihop mode */ 877 + u32 chunk_sz; /* chunk size in non multihop mode */ 827 878 u64 flags; 828 879 u16 default_ceq_max_cnt; 829 880 u16 default_ceq_period; ··· 846 897 }; 847 898 848 899 struct hns_roce_hw { 849 - int (*reset)(struct hns_roce_dev *hr_dev, bool enable); 850 900 int (*cmq_init)(struct hns_roce_dev *hr_dev); 851 901 void (*cmq_exit)(struct hns_roce_dev *hr_dev); 852 902 int (*hw_profile)(struct hns_roce_dev *hr_dev); ··· 857 909 int (*poll_mbox_done)(struct hns_roce_dev *hr_dev, 858 910 unsigned int timeout); 859 911 bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy); 860 - int (*set_gid)(struct hns_roce_dev *hr_dev, u32 port, int gid_index, 912 + int (*set_gid)(struct hns_roce_dev *hr_dev, int gid_index, 861 913 const union ib_gid *gid, const struct ib_gid_attr *attr); 862 914 int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, 863 915 const u8 *addr); 864 - void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port, 865 - enum ib_mtu mtu); 866 916 int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf, 867 - struct hns_roce_mr *mr, unsigned long mtpt_idx); 917 + struct hns_roce_mr *mr); 868 918 int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev, 869 919 struct hns_roce_mr *mr, int flags, 870 920 void *mb_buf); ··· 882 936 enum ib_qp_state new_state); 883 937 int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev, 884 938 struct hns_roce_qp *hr_qp); 885 - int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, 886 - struct ib_udata *udata); 887 - int (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata); 888 939 int (*init_eq)(struct hns_roce_dev *hr_dev); 889 940 void (*cleanup_eq)(struct hns_roce_dev *hr_dev); 890 941 int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf); ··· 891 948 892 949 struct hns_roce_dev { 893 950 struct ib_device ib_dev; 894 - struct platform_device *pdev; 895 951 struct pci_dev *pci_dev; 896 952 struct device *dev; 897 953 struct hns_roce_uar priv_uar; 898 954 const char *irq_names[HNS_ROCE_MAX_IRQ_NUM]; 899 955 spinlock_t sm_lock; 900 - spinlock_t bt_cmd_lock; 901 956 bool active; 902 957 bool is_reset; 903 958 bool dis_db; ··· 942 1001 int loop_idc; 943 1002 u32 sdb_offset; 944 1003 u32 odb_offset; 945 - dma_addr_t tptr_dma_addr; /* only for hw v1 */ 946 - u32 tptr_size; /* only for hw v1 */ 947 1004 const struct hns_roce_hw *hw; 948 1005 void *priv; 949 1006 struct workqueue_struct *irq_workq; ··· 949 1010 u32 func_num; 950 1011 u32 is_vf; 951 1012 u32 cong_algo_tmpl_id; 1013 + u64 dwqe_page; 952 1014 }; 953 1015 954 1016 static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev) ··· 1098 1158 /* hns roce hw need current block and next block addr from mtt */ 1099 1159 #define MTT_MIN_COUNT 2 1100 1160 int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, 1101 - int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr); 1161 + u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr); 1102 1162 int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, 1103 1163 struct hns_roce_buf_attr *buf_attr, 1104 1164 unsigned int page_shift, struct ib_udata *udata,
-1
drivers/infiniband/hw/hns/hns_roce_hem.c
··· 31 31 * SOFTWARE. 32 32 */ 33 33 34 - #include <linux/platform_device.h> 35 34 #include "hns_roce_device.h" 36 35 #include "hns_roce_hem.h" 37 36 #include "hns_roce_common.h"
-4675
drivers/infiniband/hw/hns/hns_roce_hw_v1.c
··· 1 - /* 2 - * Copyright (c) 2016 Hisilicon Limited. 3 - * 4 - * This software is available to you under a choice of one of two 5 - * licenses. You may choose to be licensed under the terms of the GNU 6 - * General Public License (GPL) Version 2, available from the file 7 - * COPYING in the main directory of this source tree, or the 8 - * OpenIB.org BSD license below: 9 - * 10 - * Redistribution and use in source and binary forms, with or 11 - * without modification, are permitted provided that the following 12 - * conditions are met: 13 - * 14 - * - Redistributions of source code must retain the above 15 - * copyright notice, this list of conditions and the following 16 - * disclaimer. 17 - * 18 - * - Redistributions in binary form must reproduce the above 19 - * copyright notice, this list of conditions and the following 20 - * disclaimer in the documentation and/or other materials 21 - * provided with the distribution. 22 - * 23 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 - * SOFTWARE. 31 - */ 32 - 33 - #include <linux/platform_device.h> 34 - #include <linux/acpi.h> 35 - #include <linux/etherdevice.h> 36 - #include <linux/interrupt.h> 37 - #include <linux/of.h> 38 - #include <linux/of_platform.h> 39 - #include <rdma/ib_umem.h> 40 - #include "hns_roce_common.h" 41 - #include "hns_roce_device.h" 42 - #include "hns_roce_cmd.h" 43 - #include "hns_roce_hem.h" 44 - #include "hns_roce_hw_v1.h" 45 - 46 - /** 47 - * hns_get_gid_index - Get gid index. 48 - * @hr_dev: pointer to structure hns_roce_dev. 49 - * @port: port, value range: 0 ~ MAX 50 - * @gid_index: gid_index, value range: 0 ~ MAX 51 - * Description: 52 - * N ports shared gids, allocation method as follow: 53 - * GID[0][0], GID[1][0],.....GID[N - 1][0], 54 - * GID[0][0], GID[1][0],.....GID[N - 1][0], 55 - * And so on 56 - */ 57 - u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index) 58 - { 59 - return gid_index * hr_dev->caps.num_ports + port; 60 - } 61 - 62 - static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg) 63 - { 64 - dseg->lkey = cpu_to_le32(sg->lkey); 65 - dseg->addr = cpu_to_le64(sg->addr); 66 - dseg->len = cpu_to_le32(sg->length); 67 - } 68 - 69 - static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr, 70 - u32 rkey) 71 - { 72 - rseg->raddr = cpu_to_le64(remote_addr); 73 - rseg->rkey = cpu_to_le32(rkey); 74 - rseg->len = 0; 75 - } 76 - 77 - static int hns_roce_v1_post_send(struct ib_qp *ibqp, 78 - const struct ib_send_wr *wr, 79 - const struct ib_send_wr **bad_wr) 80 - { 81 - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 82 - struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah); 83 - struct hns_roce_ud_send_wqe *ud_sq_wqe = NULL; 84 - struct hns_roce_wqe_ctrl_seg *ctrl = NULL; 85 - struct hns_roce_wqe_data_seg *dseg = NULL; 86 - struct hns_roce_qp *qp = to_hr_qp(ibqp); 87 - struct device *dev = &hr_dev->pdev->dev; 88 - struct hns_roce_sq_db sq_db = {}; 89 - int ps_opcode, i; 90 - unsigned long flags = 0; 91 - void *wqe = NULL; 92 - __le32 doorbell[2]; 93 - const u8 *smac; 94 - int ret = 0; 95 - int loopback; 96 - u32 wqe_idx; 97 - int nreq; 98 - 99 - if (unlikely(ibqp->qp_type != IB_QPT_GSI && 100 - ibqp->qp_type != IB_QPT_RC)) { 101 - dev_err(dev, "un-supported QP type\n"); 102 - *bad_wr = NULL; 103 - return -EOPNOTSUPP; 104 - } 105 - 106 - spin_lock_irqsave(&qp->sq.lock, flags); 107 - 108 - for (nreq = 0; wr; ++nreq, wr = wr->next) { 109 - if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { 110 - ret = -ENOMEM; 111 - *bad_wr = wr; 112 - goto out; 113 - } 114 - 115 - wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); 116 - 117 - if (unlikely(wr->num_sge > qp->sq.max_gs)) { 118 - dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n", 119 - wr->num_sge, qp->sq.max_gs); 120 - ret = -EINVAL; 121 - *bad_wr = wr; 122 - goto out; 123 - } 124 - 125 - wqe = hns_roce_get_send_wqe(qp, wqe_idx); 126 - qp->sq.wrid[wqe_idx] = wr->wr_id; 127 - 128 - /* Corresponding to the RC and RD type wqe process separately */ 129 - if (ibqp->qp_type == IB_QPT_GSI) { 130 - ud_sq_wqe = wqe; 131 - roce_set_field(ud_sq_wqe->dmac_h, 132 - UD_SEND_WQE_U32_4_DMAC_0_M, 133 - UD_SEND_WQE_U32_4_DMAC_0_S, 134 - ah->av.mac[0]); 135 - roce_set_field(ud_sq_wqe->dmac_h, 136 - UD_SEND_WQE_U32_4_DMAC_1_M, 137 - UD_SEND_WQE_U32_4_DMAC_1_S, 138 - ah->av.mac[1]); 139 - roce_set_field(ud_sq_wqe->dmac_h, 140 - UD_SEND_WQE_U32_4_DMAC_2_M, 141 - UD_SEND_WQE_U32_4_DMAC_2_S, 142 - ah->av.mac[2]); 143 - roce_set_field(ud_sq_wqe->dmac_h, 144 - UD_SEND_WQE_U32_4_DMAC_3_M, 145 - UD_SEND_WQE_U32_4_DMAC_3_S, 146 - ah->av.mac[3]); 147 - 148 - roce_set_field(ud_sq_wqe->u32_8, 149 - UD_SEND_WQE_U32_8_DMAC_4_M, 150 - UD_SEND_WQE_U32_8_DMAC_4_S, 151 - ah->av.mac[4]); 152 - roce_set_field(ud_sq_wqe->u32_8, 153 - UD_SEND_WQE_U32_8_DMAC_5_M, 154 - UD_SEND_WQE_U32_8_DMAC_5_S, 155 - ah->av.mac[5]); 156 - 157 - smac = (const u8 *)hr_dev->dev_addr[qp->port]; 158 - loopback = ether_addr_equal_unaligned(ah->av.mac, 159 - smac) ? 1 : 0; 160 - roce_set_bit(ud_sq_wqe->u32_8, 161 - UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S, 162 - loopback); 163 - 164 - roce_set_field(ud_sq_wqe->u32_8, 165 - UD_SEND_WQE_U32_8_OPERATION_TYPE_M, 166 - UD_SEND_WQE_U32_8_OPERATION_TYPE_S, 167 - HNS_ROCE_WQE_OPCODE_SEND); 168 - roce_set_field(ud_sq_wqe->u32_8, 169 - UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M, 170 - UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S, 171 - 2); 172 - roce_set_bit(ud_sq_wqe->u32_8, 173 - UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S, 174 - 1); 175 - 176 - ud_sq_wqe->u32_8 |= (wr->send_flags & IB_SEND_SIGNALED ? 177 - cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) | 178 - (wr->send_flags & IB_SEND_SOLICITED ? 179 - cpu_to_le32(HNS_ROCE_WQE_SE) : 0) | 180 - ((wr->opcode == IB_WR_SEND_WITH_IMM) ? 181 - cpu_to_le32(HNS_ROCE_WQE_IMM) : 0); 182 - 183 - roce_set_field(ud_sq_wqe->u32_16, 184 - UD_SEND_WQE_U32_16_DEST_QP_M, 185 - UD_SEND_WQE_U32_16_DEST_QP_S, 186 - ud_wr(wr)->remote_qpn); 187 - roce_set_field(ud_sq_wqe->u32_16, 188 - UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M, 189 - UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S, 190 - ah->av.stat_rate); 191 - 192 - roce_set_field(ud_sq_wqe->u32_36, 193 - UD_SEND_WQE_U32_36_FLOW_LABEL_M, 194 - UD_SEND_WQE_U32_36_FLOW_LABEL_S, 195 - ah->av.flowlabel); 196 - roce_set_field(ud_sq_wqe->u32_36, 197 - UD_SEND_WQE_U32_36_PRIORITY_M, 198 - UD_SEND_WQE_U32_36_PRIORITY_S, 199 - ah->av.sl); 200 - roce_set_field(ud_sq_wqe->u32_36, 201 - UD_SEND_WQE_U32_36_SGID_INDEX_M, 202 - UD_SEND_WQE_U32_36_SGID_INDEX_S, 203 - hns_get_gid_index(hr_dev, qp->phy_port, 204 - ah->av.gid_index)); 205 - 206 - roce_set_field(ud_sq_wqe->u32_40, 207 - UD_SEND_WQE_U32_40_HOP_LIMIT_M, 208 - UD_SEND_WQE_U32_40_HOP_LIMIT_S, 209 - ah->av.hop_limit); 210 - roce_set_field(ud_sq_wqe->u32_40, 211 - UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M, 212 - UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S, 213 - ah->av.tclass); 214 - 215 - memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN); 216 - 217 - ud_sq_wqe->va0_l = 218 - cpu_to_le32((u32)wr->sg_list[0].addr); 219 - ud_sq_wqe->va0_h = 220 - cpu_to_le32((wr->sg_list[0].addr) >> 32); 221 - ud_sq_wqe->l_key0 = 222 - cpu_to_le32(wr->sg_list[0].lkey); 223 - 224 - ud_sq_wqe->va1_l = 225 - cpu_to_le32((u32)wr->sg_list[1].addr); 226 - ud_sq_wqe->va1_h = 227 - cpu_to_le32((wr->sg_list[1].addr) >> 32); 228 - ud_sq_wqe->l_key1 = 229 - cpu_to_le32(wr->sg_list[1].lkey); 230 - } else if (ibqp->qp_type == IB_QPT_RC) { 231 - u32 tmp_len = 0; 232 - 233 - ctrl = wqe; 234 - memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg)); 235 - for (i = 0; i < wr->num_sge; i++) 236 - tmp_len += wr->sg_list[i].length; 237 - 238 - ctrl->msg_length = 239 - cpu_to_le32(le32_to_cpu(ctrl->msg_length) + tmp_len); 240 - 241 - ctrl->sgl_pa_h = 0; 242 - ctrl->flag = 0; 243 - 244 - switch (wr->opcode) { 245 - case IB_WR_SEND_WITH_IMM: 246 - case IB_WR_RDMA_WRITE_WITH_IMM: 247 - ctrl->imm_data = wr->ex.imm_data; 248 - break; 249 - case IB_WR_SEND_WITH_INV: 250 - ctrl->inv_key = 251 - cpu_to_le32(wr->ex.invalidate_rkey); 252 - break; 253 - default: 254 - ctrl->imm_data = 0; 255 - break; 256 - } 257 - 258 - /* Ctrl field, ctrl set type: sig, solic, imm, fence */ 259 - /* SO wait for conforming application scenarios */ 260 - ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ? 261 - cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) | 262 - (wr->send_flags & IB_SEND_SOLICITED ? 263 - cpu_to_le32(HNS_ROCE_WQE_SE) : 0) | 264 - ((wr->opcode == IB_WR_SEND_WITH_IMM || 265 - wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ? 266 - cpu_to_le32(HNS_ROCE_WQE_IMM) : 0) | 267 - (wr->send_flags & IB_SEND_FENCE ? 268 - (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0); 269 - 270 - wqe += sizeof(struct hns_roce_wqe_ctrl_seg); 271 - 272 - switch (wr->opcode) { 273 - case IB_WR_RDMA_READ: 274 - ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ; 275 - set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, 276 - rdma_wr(wr)->rkey); 277 - break; 278 - case IB_WR_RDMA_WRITE: 279 - case IB_WR_RDMA_WRITE_WITH_IMM: 280 - ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE; 281 - set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, 282 - rdma_wr(wr)->rkey); 283 - break; 284 - case IB_WR_SEND: 285 - case IB_WR_SEND_WITH_INV: 286 - case IB_WR_SEND_WITH_IMM: 287 - ps_opcode = HNS_ROCE_WQE_OPCODE_SEND; 288 - break; 289 - case IB_WR_LOCAL_INV: 290 - case IB_WR_ATOMIC_CMP_AND_SWP: 291 - case IB_WR_ATOMIC_FETCH_AND_ADD: 292 - case IB_WR_LSO: 293 - default: 294 - ps_opcode = HNS_ROCE_WQE_OPCODE_MASK; 295 - break; 296 - } 297 - ctrl->flag |= cpu_to_le32(ps_opcode); 298 - wqe += sizeof(struct hns_roce_wqe_raddr_seg); 299 - 300 - dseg = wqe; 301 - if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) { 302 - if (le32_to_cpu(ctrl->msg_length) > 303 - hr_dev->caps.max_sq_inline) { 304 - ret = -EINVAL; 305 - *bad_wr = wr; 306 - dev_err(dev, "inline len(1-%d)=%d, illegal", 307 - le32_to_cpu(ctrl->msg_length), 308 - hr_dev->caps.max_sq_inline); 309 - goto out; 310 - } 311 - for (i = 0; i < wr->num_sge; i++) { 312 - memcpy(wqe, ((void *) (uintptr_t) 313 - wr->sg_list[i].addr), 314 - wr->sg_list[i].length); 315 - wqe += wr->sg_list[i].length; 316 - } 317 - ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE); 318 - } else { 319 - /* sqe num is two */ 320 - for (i = 0; i < wr->num_sge; i++) 321 - set_data_seg(dseg + i, wr->sg_list + i); 322 - 323 - ctrl->flag |= cpu_to_le32(wr->num_sge << 324 - HNS_ROCE_WQE_SGE_NUM_BIT); 325 - } 326 - } 327 - } 328 - 329 - out: 330 - /* Set DB return */ 331 - if (likely(nreq)) { 332 - qp->sq.head += nreq; 333 - 334 - roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M, 335 - SQ_DOORBELL_U32_4_SQ_HEAD_S, 336 - (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1))); 337 - roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M, 338 - SQ_DOORBELL_U32_4_SL_S, qp->sl); 339 - roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M, 340 - SQ_DOORBELL_U32_4_PORT_S, qp->phy_port); 341 - roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M, 342 - SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn); 343 - roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1); 344 - 345 - doorbell[0] = sq_db.u32_4; 346 - doorbell[1] = sq_db.u32_8; 347 - 348 - hns_roce_write64_k(doorbell, qp->sq.db_reg); 349 - } 350 - 351 - spin_unlock_irqrestore(&qp->sq.lock, flags); 352 - 353 - return ret; 354 - } 355 - 356 - static int hns_roce_v1_post_recv(struct ib_qp *ibqp, 357 - const struct ib_recv_wr *wr, 358 - const struct ib_recv_wr **bad_wr) 359 - { 360 - struct hns_roce_rq_wqe_ctrl *ctrl = NULL; 361 - struct hns_roce_wqe_data_seg *scat = NULL; 362 - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 363 - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 364 - struct device *dev = &hr_dev->pdev->dev; 365 - struct hns_roce_rq_db rq_db = {}; 366 - __le32 doorbell[2] = {0}; 367 - unsigned long flags = 0; 368 - unsigned int wqe_idx; 369 - int ret = 0; 370 - int nreq; 371 - int i; 372 - u32 reg_val; 373 - 374 - spin_lock_irqsave(&hr_qp->rq.lock, flags); 375 - 376 - for (nreq = 0; wr; ++nreq, wr = wr->next) { 377 - if (hns_roce_wq_overflow(&hr_qp->rq, nreq, 378 - hr_qp->ibqp.recv_cq)) { 379 - ret = -ENOMEM; 380 - *bad_wr = wr; 381 - goto out; 382 - } 383 - 384 - wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); 385 - 386 - if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) { 387 - dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n", 388 - wr->num_sge, hr_qp->rq.max_gs); 389 - ret = -EINVAL; 390 - *bad_wr = wr; 391 - goto out; 392 - } 393 - 394 - ctrl = hns_roce_get_recv_wqe(hr_qp, wqe_idx); 395 - 396 - roce_set_field(ctrl->rwqe_byte_12, 397 - RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M, 398 - RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S, 399 - wr->num_sge); 400 - 401 - scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1); 402 - 403 - for (i = 0; i < wr->num_sge; i++) 404 - set_data_seg(scat + i, wr->sg_list + i); 405 - 406 - hr_qp->rq.wrid[wqe_idx] = wr->wr_id; 407 - } 408 - 409 - out: 410 - if (likely(nreq)) { 411 - hr_qp->rq.head += nreq; 412 - 413 - if (ibqp->qp_type == IB_QPT_GSI) { 414 - __le32 tmp; 415 - 416 - /* SW update GSI rq header */ 417 - reg_val = roce_read(to_hr_dev(ibqp->device), 418 - ROCEE_QP1C_CFG3_0_REG + 419 - QP1C_CFGN_OFFSET * hr_qp->phy_port); 420 - tmp = cpu_to_le32(reg_val); 421 - roce_set_field(tmp, 422 - ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M, 423 - ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S, 424 - hr_qp->rq.head); 425 - reg_val = le32_to_cpu(tmp); 426 - roce_write(to_hr_dev(ibqp->device), 427 - ROCEE_QP1C_CFG3_0_REG + 428 - QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val); 429 - } else { 430 - roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M, 431 - RQ_DOORBELL_U32_4_RQ_HEAD_S, 432 - hr_qp->rq.head); 433 - roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M, 434 - RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn); 435 - roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M, 436 - RQ_DOORBELL_U32_8_CMD_S, 1); 437 - roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S, 438 - 1); 439 - 440 - doorbell[0] = rq_db.u32_4; 441 - doorbell[1] = rq_db.u32_8; 442 - 443 - hns_roce_write64_k(doorbell, hr_qp->rq.db_reg); 444 - } 445 - } 446 - spin_unlock_irqrestore(&hr_qp->rq.lock, flags); 447 - 448 - return ret; 449 - } 450 - 451 - static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev, 452 - int sdb_mode, int odb_mode) 453 - { 454 - __le32 tmp; 455 - u32 val; 456 - 457 - val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); 458 - tmp = cpu_to_le32(val); 459 - roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode); 460 - roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode); 461 - val = le32_to_cpu(tmp); 462 - roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); 463 - } 464 - 465 - static int hns_roce_v1_set_hem(struct hns_roce_dev *hr_dev, 466 - struct hns_roce_hem_table *table, int obj, 467 - int step_idx) 468 - { 469 - spinlock_t *lock = &hr_dev->bt_cmd_lock; 470 - struct device *dev = hr_dev->dev; 471 - struct hns_roce_hem_iter iter; 472 - void __iomem *bt_cmd; 473 - __le32 bt_cmd_val[2]; 474 - __le32 bt_cmd_h = 0; 475 - unsigned long flags; 476 - __le32 bt_cmd_l; 477 - int ret = 0; 478 - u64 bt_ba; 479 - long end; 480 - 481 - /* Find the HEM(Hardware Entry Memory) entry */ 482 - unsigned long i = obj / (table->table_chunk_size / table->obj_size); 483 - 484 - switch (table->type) { 485 - case HEM_TYPE_QPC: 486 - case HEM_TYPE_MTPT: 487 - case HEM_TYPE_CQC: 488 - case HEM_TYPE_SRQC: 489 - roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, 490 - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type); 491 - break; 492 - default: 493 - return ret; 494 - } 495 - 496 - roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, 497 - ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); 498 - roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); 499 - roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1); 500 - 501 - /* Currently iter only a chunk */ 502 - for (hns_roce_hem_first(table->hem[i], &iter); 503 - !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) { 504 - bt_ba = hns_roce_hem_addr(&iter) >> HNS_HW_PAGE_SHIFT; 505 - 506 - spin_lock_irqsave(lock, flags); 507 - 508 - bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; 509 - 510 - end = HW_SYNC_TIMEOUT_MSECS; 511 - while (end > 0) { 512 - if (!(readl(bt_cmd) >> BT_CMD_SYNC_SHIFT)) 513 - break; 514 - 515 - mdelay(HW_SYNC_SLEEP_TIME_INTERVAL); 516 - end -= HW_SYNC_SLEEP_TIME_INTERVAL; 517 - } 518 - 519 - if (end <= 0) { 520 - dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n"); 521 - spin_unlock_irqrestore(lock, flags); 522 - return -EBUSY; 523 - } 524 - 525 - bt_cmd_l = cpu_to_le32(bt_ba); 526 - roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, 527 - ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, 528 - upper_32_bits(bt_ba)); 529 - 530 - bt_cmd_val[0] = bt_cmd_l; 531 - bt_cmd_val[1] = bt_cmd_h; 532 - hns_roce_write64_k(bt_cmd_val, 533 - hr_dev->reg_base + ROCEE_BT_CMD_L_REG); 534 - spin_unlock_irqrestore(lock, flags); 535 - } 536 - 537 - return ret; 538 - } 539 - 540 - static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode, 541 - u32 odb_mode) 542 - { 543 - __le32 tmp; 544 - u32 val; 545 - 546 - /* Configure SDB/ODB extend mode */ 547 - val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); 548 - tmp = cpu_to_le32(val); 549 - roce_set_bit(tmp, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode); 550 - roce_set_bit(tmp, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode); 551 - val = le32_to_cpu(tmp); 552 - roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); 553 - } 554 - 555 - static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept, 556 - u32 sdb_alful) 557 - { 558 - __le32 tmp; 559 - u32 val; 560 - 561 - /* Configure SDB */ 562 - val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG); 563 - tmp = cpu_to_le32(val); 564 - roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M, 565 - ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful); 566 - roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M, 567 - ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept); 568 - val = le32_to_cpu(tmp); 569 - roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val); 570 - } 571 - 572 - static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept, 573 - u32 odb_alful) 574 - { 575 - __le32 tmp; 576 - u32 val; 577 - 578 - /* Configure ODB */ 579 - val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG); 580 - tmp = cpu_to_le32(val); 581 - roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M, 582 - ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful); 583 - roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M, 584 - ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept); 585 - val = le32_to_cpu(tmp); 586 - roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val); 587 - } 588 - 589 - static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept, 590 - u32 ext_sdb_alful) 591 - { 592 - struct hns_roce_v1_priv *priv = hr_dev->priv; 593 - struct hns_roce_db_table *db = &priv->db_table; 594 - struct device *dev = &hr_dev->pdev->dev; 595 - dma_addr_t sdb_dma_addr; 596 - __le32 tmp; 597 - u32 val; 598 - 599 - /* Configure extend SDB threshold */ 600 - roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept); 601 - roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful); 602 - 603 - /* Configure extend SDB base addr */ 604 - sdb_dma_addr = db->ext_db->sdb_buf_list->map; 605 - roce_write(hr_dev, ROCEE_EXT_DB_SQ_REG, (u32)(sdb_dma_addr >> 12)); 606 - 607 - /* Configure extend SDB depth */ 608 - val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG); 609 - tmp = cpu_to_le32(val); 610 - roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M, 611 - ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S, 612 - db->ext_db->esdb_dep); 613 - /* 614 - * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of 615 - * using 4K page, and shift more 32 because of 616 - * calculating the high 32 bit value evaluated to hardware. 617 - */ 618 - roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M, 619 - ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44); 620 - val = le32_to_cpu(tmp); 621 - roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val); 622 - 623 - dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep); 624 - dev_dbg(dev, "ext SDB threshold: empty: 0x%x, ful: 0x%x\n", 625 - ext_sdb_alept, ext_sdb_alful); 626 - } 627 - 628 - static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept, 629 - u32 ext_odb_alful) 630 - { 631 - struct hns_roce_v1_priv *priv = hr_dev->priv; 632 - struct hns_roce_db_table *db = &priv->db_table; 633 - struct device *dev = &hr_dev->pdev->dev; 634 - dma_addr_t odb_dma_addr; 635 - __le32 tmp; 636 - u32 val; 637 - 638 - /* Configure extend ODB threshold */ 639 - roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept); 640 - roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful); 641 - 642 - /* Configure extend ODB base addr */ 643 - odb_dma_addr = db->ext_db->odb_buf_list->map; 644 - roce_write(hr_dev, ROCEE_EXT_DB_OTH_REG, (u32)(odb_dma_addr >> 12)); 645 - 646 - /* Configure extend ODB depth */ 647 - val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG); 648 - tmp = cpu_to_le32(val); 649 - roce_set_field(tmp, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M, 650 - ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S, 651 - db->ext_db->eodb_dep); 652 - roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M, 653 - ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S, 654 - db->ext_db->eodb_dep); 655 - val = le32_to_cpu(tmp); 656 - roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val); 657 - 658 - dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep); 659 - dev_dbg(dev, "ext ODB threshold: empty: 0x%x, ful: 0x%x\n", 660 - ext_odb_alept, ext_odb_alful); 661 - } 662 - 663 - static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod, 664 - u32 odb_ext_mod) 665 - { 666 - struct hns_roce_v1_priv *priv = hr_dev->priv; 667 - struct hns_roce_db_table *db = &priv->db_table; 668 - struct device *dev = &hr_dev->pdev->dev; 669 - dma_addr_t sdb_dma_addr; 670 - dma_addr_t odb_dma_addr; 671 - int ret = 0; 672 - 673 - db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL); 674 - if (!db->ext_db) 675 - return -ENOMEM; 676 - 677 - if (sdb_ext_mod) { 678 - db->ext_db->sdb_buf_list = kmalloc( 679 - sizeof(*db->ext_db->sdb_buf_list), GFP_KERNEL); 680 - if (!db->ext_db->sdb_buf_list) { 681 - ret = -ENOMEM; 682 - goto ext_sdb_buf_fail_out; 683 - } 684 - 685 - db->ext_db->sdb_buf_list->buf = dma_alloc_coherent(dev, 686 - HNS_ROCE_V1_EXT_SDB_SIZE, 687 - &sdb_dma_addr, GFP_KERNEL); 688 - if (!db->ext_db->sdb_buf_list->buf) { 689 - ret = -ENOMEM; 690 - goto alloc_sq_db_buf_fail; 691 - } 692 - db->ext_db->sdb_buf_list->map = sdb_dma_addr; 693 - 694 - db->ext_db->esdb_dep = ilog2(HNS_ROCE_V1_EXT_SDB_DEPTH); 695 - hns_roce_set_sdb_ext(hr_dev, HNS_ROCE_V1_EXT_SDB_ALEPT, 696 - HNS_ROCE_V1_EXT_SDB_ALFUL); 697 - } else 698 - hns_roce_set_sdb(hr_dev, HNS_ROCE_V1_SDB_ALEPT, 699 - HNS_ROCE_V1_SDB_ALFUL); 700 - 701 - if (odb_ext_mod) { 702 - db->ext_db->odb_buf_list = kmalloc( 703 - sizeof(*db->ext_db->odb_buf_list), GFP_KERNEL); 704 - if (!db->ext_db->odb_buf_list) { 705 - ret = -ENOMEM; 706 - goto ext_odb_buf_fail_out; 707 - } 708 - 709 - db->ext_db->odb_buf_list->buf = dma_alloc_coherent(dev, 710 - HNS_ROCE_V1_EXT_ODB_SIZE, 711 - &odb_dma_addr, GFP_KERNEL); 712 - if (!db->ext_db->odb_buf_list->buf) { 713 - ret = -ENOMEM; 714 - goto alloc_otr_db_buf_fail; 715 - } 716 - db->ext_db->odb_buf_list->map = odb_dma_addr; 717 - 718 - db->ext_db->eodb_dep = ilog2(HNS_ROCE_V1_EXT_ODB_DEPTH); 719 - hns_roce_set_odb_ext(hr_dev, HNS_ROCE_V1_EXT_ODB_ALEPT, 720 - HNS_ROCE_V1_EXT_ODB_ALFUL); 721 - } else 722 - hns_roce_set_odb(hr_dev, HNS_ROCE_V1_ODB_ALEPT, 723 - HNS_ROCE_V1_ODB_ALFUL); 724 - 725 - hns_roce_set_db_ext_mode(hr_dev, sdb_ext_mod, odb_ext_mod); 726 - 727 - return 0; 728 - 729 - alloc_otr_db_buf_fail: 730 - kfree(db->ext_db->odb_buf_list); 731 - 732 - ext_odb_buf_fail_out: 733 - if (sdb_ext_mod) { 734 - dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE, 735 - db->ext_db->sdb_buf_list->buf, 736 - db->ext_db->sdb_buf_list->map); 737 - } 738 - 739 - alloc_sq_db_buf_fail: 740 - if (sdb_ext_mod) 741 - kfree(db->ext_db->sdb_buf_list); 742 - 743 - ext_sdb_buf_fail_out: 744 - kfree(db->ext_db); 745 - return ret; 746 - } 747 - 748 - static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev, 749 - struct ib_pd *pd) 750 - { 751 - struct device *dev = &hr_dev->pdev->dev; 752 - struct ib_qp_init_attr init_attr; 753 - struct ib_qp *qp; 754 - 755 - memset(&init_attr, 0, sizeof(struct ib_qp_init_attr)); 756 - init_attr.qp_type = IB_QPT_RC; 757 - init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; 758 - init_attr.cap.max_recv_wr = HNS_ROCE_MIN_WQE_NUM; 759 - init_attr.cap.max_send_wr = HNS_ROCE_MIN_WQE_NUM; 760 - 761 - qp = ib_create_qp(pd, &init_attr); 762 - if (IS_ERR(qp)) { 763 - dev_err(dev, "Create loop qp for mr free failed!"); 764 - return NULL; 765 - } 766 - 767 - return to_hr_qp(qp); 768 - } 769 - 770 - static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) 771 - { 772 - struct hns_roce_v1_priv *priv = hr_dev->priv; 773 - struct hns_roce_free_mr *free_mr = &priv->free_mr; 774 - struct hns_roce_caps *caps = &hr_dev->caps; 775 - struct ib_device *ibdev = &hr_dev->ib_dev; 776 - struct device *dev = &hr_dev->pdev->dev; 777 - struct ib_cq_init_attr cq_init_attr; 778 - struct ib_qp_attr attr = { 0 }; 779 - struct hns_roce_qp *hr_qp; 780 - struct ib_cq *cq; 781 - struct ib_pd *pd; 782 - union ib_gid dgid; 783 - __be64 subnet_prefix; 784 - int attr_mask = 0; 785 - int ret; 786 - int i, j; 787 - u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 }; 788 - u8 phy_port; 789 - u32 port = 0; 790 - u8 sl; 791 - 792 - /* Reserved cq for loop qp */ 793 - cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2; 794 - cq_init_attr.comp_vector = 0; 795 - 796 - cq = rdma_zalloc_drv_obj(ibdev, ib_cq); 797 - if (!cq) 798 - return -ENOMEM; 799 - 800 - ret = hns_roce_create_cq(cq, &cq_init_attr, NULL); 801 - if (ret) { 802 - dev_err(dev, "Create cq for reserved loop qp failed!"); 803 - goto alloc_cq_failed; 804 - } 805 - free_mr->mr_free_cq = to_hr_cq(cq); 806 - free_mr->mr_free_cq->ib_cq.device = &hr_dev->ib_dev; 807 - free_mr->mr_free_cq->ib_cq.uobject = NULL; 808 - free_mr->mr_free_cq->ib_cq.comp_handler = NULL; 809 - free_mr->mr_free_cq->ib_cq.event_handler = NULL; 810 - free_mr->mr_free_cq->ib_cq.cq_context = NULL; 811 - atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0); 812 - 813 - pd = rdma_zalloc_drv_obj(ibdev, ib_pd); 814 - if (!pd) { 815 - ret = -ENOMEM; 816 - goto alloc_mem_failed; 817 - } 818 - 819 - pd->device = ibdev; 820 - ret = hns_roce_alloc_pd(pd, NULL); 821 - if (ret) 822 - goto alloc_pd_failed; 823 - 824 - free_mr->mr_free_pd = to_hr_pd(pd); 825 - free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev; 826 - free_mr->mr_free_pd->ibpd.uobject = NULL; 827 - free_mr->mr_free_pd->ibpd.__internal_mr = NULL; 828 - atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0); 829 - 830 - attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE; 831 - attr.pkey_index = 0; 832 - attr.min_rnr_timer = 0; 833 - /* Disable read ability */ 834 - attr.max_dest_rd_atomic = 0; 835 - attr.max_rd_atomic = 0; 836 - /* Use arbitrary values as rq_psn and sq_psn */ 837 - attr.rq_psn = 0x0808; 838 - attr.sq_psn = 0x0808; 839 - attr.retry_cnt = 7; 840 - attr.rnr_retry = 7; 841 - attr.timeout = 0x12; 842 - attr.path_mtu = IB_MTU_256; 843 - attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; 844 - rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0); 845 - rdma_ah_set_static_rate(&attr.ah_attr, 3); 846 - 847 - subnet_prefix = cpu_to_be64(0xfe80000000000000LL); 848 - for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { 849 - phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) : 850 - (i % HNS_ROCE_MAX_PORTS); 851 - sl = i / HNS_ROCE_MAX_PORTS; 852 - 853 - for (j = 0; j < caps->num_ports; j++) { 854 - if (hr_dev->iboe.phy_port[j] == phy_port) { 855 - queue_en[i] = 1; 856 - port = j; 857 - break; 858 - } 859 - } 860 - 861 - if (!queue_en[i]) 862 - continue; 863 - 864 - free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd); 865 - if (!free_mr->mr_free_qp[i]) { 866 - dev_err(dev, "Create loop qp failed!\n"); 867 - ret = -ENOMEM; 868 - goto create_lp_qp_failed; 869 - } 870 - hr_qp = free_mr->mr_free_qp[i]; 871 - 872 - hr_qp->port = port; 873 - hr_qp->phy_port = phy_port; 874 - hr_qp->ibqp.qp_type = IB_QPT_RC; 875 - hr_qp->ibqp.device = &hr_dev->ib_dev; 876 - hr_qp->ibqp.uobject = NULL; 877 - atomic_set(&hr_qp->ibqp.usecnt, 0); 878 - hr_qp->ibqp.pd = pd; 879 - hr_qp->ibqp.recv_cq = cq; 880 - hr_qp->ibqp.send_cq = cq; 881 - 882 - rdma_ah_set_port_num(&attr.ah_attr, port + 1); 883 - rdma_ah_set_sl(&attr.ah_attr, sl); 884 - attr.port_num = port + 1; 885 - 886 - attr.dest_qp_num = hr_qp->qpn; 887 - memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr), 888 - hr_dev->dev_addr[port], 889 - ETH_ALEN); 890 - 891 - memcpy(&dgid.raw, &subnet_prefix, sizeof(u64)); 892 - memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3); 893 - memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3); 894 - dgid.raw[11] = 0xff; 895 - dgid.raw[12] = 0xfe; 896 - dgid.raw[8] ^= 2; 897 - rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw); 898 - 899 - ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask, 900 - IB_QPS_RESET, IB_QPS_INIT); 901 - if (ret) { 902 - dev_err(dev, "modify qp failed(%d)!\n", ret); 903 - goto create_lp_qp_failed; 904 - } 905 - 906 - ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, IB_QP_DEST_QPN, 907 - IB_QPS_INIT, IB_QPS_RTR); 908 - if (ret) { 909 - dev_err(dev, "modify qp failed(%d)!\n", ret); 910 - goto create_lp_qp_failed; 911 - } 912 - 913 - ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask, 914 - IB_QPS_RTR, IB_QPS_RTS); 915 - if (ret) { 916 - dev_err(dev, "modify qp failed(%d)!\n", ret); 917 - goto create_lp_qp_failed; 918 - } 919 - } 920 - 921 - return 0; 922 - 923 - create_lp_qp_failed: 924 - for (i -= 1; i >= 0; i--) { 925 - hr_qp = free_mr->mr_free_qp[i]; 926 - if (ib_destroy_qp(&hr_qp->ibqp)) 927 - dev_err(dev, "Destroy qp %d for mr free failed!\n", i); 928 - } 929 - 930 - hns_roce_dealloc_pd(pd, NULL); 931 - 932 - alloc_pd_failed: 933 - kfree(pd); 934 - 935 - alloc_mem_failed: 936 - hns_roce_destroy_cq(cq, NULL); 937 - alloc_cq_failed: 938 - kfree(cq); 939 - return ret; 940 - } 941 - 942 - static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev) 943 - { 944 - struct hns_roce_v1_priv *priv = hr_dev->priv; 945 - struct hns_roce_free_mr *free_mr = &priv->free_mr; 946 - struct device *dev = &hr_dev->pdev->dev; 947 - struct hns_roce_qp *hr_qp; 948 - int ret; 949 - int i; 950 - 951 - for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { 952 - hr_qp = free_mr->mr_free_qp[i]; 953 - if (!hr_qp) 954 - continue; 955 - 956 - ret = ib_destroy_qp(&hr_qp->ibqp); 957 - if (ret) 958 - dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n", 959 - i, ret); 960 - } 961 - 962 - hns_roce_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL); 963 - kfree(&free_mr->mr_free_cq->ib_cq); 964 - hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL); 965 - kfree(&free_mr->mr_free_pd->ibpd); 966 - } 967 - 968 - static int hns_roce_db_init(struct hns_roce_dev *hr_dev) 969 - { 970 - struct hns_roce_v1_priv *priv = hr_dev->priv; 971 - struct hns_roce_db_table *db = &priv->db_table; 972 - struct device *dev = &hr_dev->pdev->dev; 973 - u32 sdb_ext_mod; 974 - u32 odb_ext_mod; 975 - u32 sdb_evt_mod; 976 - u32 odb_evt_mod; 977 - int ret; 978 - 979 - memset(db, 0, sizeof(*db)); 980 - 981 - /* Default DB mode */ 982 - sdb_ext_mod = HNS_ROCE_SDB_EXTEND_MODE; 983 - odb_ext_mod = HNS_ROCE_ODB_EXTEND_MODE; 984 - sdb_evt_mod = HNS_ROCE_SDB_NORMAL_MODE; 985 - odb_evt_mod = HNS_ROCE_ODB_POLL_MODE; 986 - 987 - db->sdb_ext_mod = sdb_ext_mod; 988 - db->odb_ext_mod = odb_ext_mod; 989 - 990 - /* Init extend DB */ 991 - ret = hns_roce_db_ext_init(hr_dev, sdb_ext_mod, odb_ext_mod); 992 - if (ret) { 993 - dev_err(dev, "Failed in extend DB configuration.\n"); 994 - return ret; 995 - } 996 - 997 - hns_roce_set_db_event_mode(hr_dev, sdb_evt_mod, odb_evt_mod); 998 - 999 - return 0; 1000 - } 1001 - 1002 - static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work) 1003 - { 1004 - struct hns_roce_recreate_lp_qp_work *lp_qp_work; 1005 - struct hns_roce_dev *hr_dev; 1006 - 1007 - lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work, 1008 - work); 1009 - hr_dev = to_hr_dev(lp_qp_work->ib_dev); 1010 - 1011 - hns_roce_v1_release_lp_qp(hr_dev); 1012 - 1013 - if (hns_roce_v1_rsv_lp_qp(hr_dev)) 1014 - dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n"); 1015 - 1016 - if (lp_qp_work->comp_flag) 1017 - complete(lp_qp_work->comp); 1018 - 1019 - kfree(lp_qp_work); 1020 - } 1021 - 1022 - static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev) 1023 - { 1024 - long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS; 1025 - struct hns_roce_v1_priv *priv = hr_dev->priv; 1026 - struct hns_roce_free_mr *free_mr = &priv->free_mr; 1027 - struct hns_roce_recreate_lp_qp_work *lp_qp_work; 1028 - struct device *dev = &hr_dev->pdev->dev; 1029 - struct completion comp; 1030 - 1031 - lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work), 1032 - GFP_KERNEL); 1033 - if (!lp_qp_work) 1034 - return -ENOMEM; 1035 - 1036 - INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn); 1037 - 1038 - lp_qp_work->ib_dev = &(hr_dev->ib_dev); 1039 - lp_qp_work->comp = &comp; 1040 - lp_qp_work->comp_flag = 1; 1041 - 1042 - init_completion(lp_qp_work->comp); 1043 - 1044 - queue_work(free_mr->free_mr_wq, &(lp_qp_work->work)); 1045 - 1046 - while (end > 0) { 1047 - if (try_wait_for_completion(&comp)) 1048 - return 0; 1049 - msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE); 1050 - end -= HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE; 1051 - } 1052 - 1053 - lp_qp_work->comp_flag = 0; 1054 - if (try_wait_for_completion(&comp)) 1055 - return 0; 1056 - 1057 - dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n"); 1058 - return -ETIMEDOUT; 1059 - } 1060 - 1061 - static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp) 1062 - { 1063 - struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device); 1064 - struct device *dev = &hr_dev->pdev->dev; 1065 - struct ib_send_wr send_wr; 1066 - const struct ib_send_wr *bad_wr; 1067 - int ret; 1068 - 1069 - memset(&send_wr, 0, sizeof(send_wr)); 1070 - send_wr.next = NULL; 1071 - send_wr.num_sge = 0; 1072 - send_wr.send_flags = 0; 1073 - send_wr.sg_list = NULL; 1074 - send_wr.wr_id = (unsigned long long)&send_wr; 1075 - send_wr.opcode = IB_WR_RDMA_WRITE; 1076 - 1077 - ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr); 1078 - if (ret) { 1079 - dev_err(dev, "Post write wqe for mr free failed(%d)!", ret); 1080 - return ret; 1081 - } 1082 - 1083 - return 0; 1084 - } 1085 - 1086 - static void hns_roce_v1_mr_free_work_fn(struct work_struct *work) 1087 - { 1088 - unsigned long end = 1089 - msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies; 1090 - struct hns_roce_mr_free_work *mr_work = 1091 - container_of(work, struct hns_roce_mr_free_work, work); 1092 - struct hns_roce_dev *hr_dev = to_hr_dev(mr_work->ib_dev); 1093 - struct hns_roce_v1_priv *priv = hr_dev->priv; 1094 - struct hns_roce_free_mr *free_mr = &priv->free_mr; 1095 - struct hns_roce_cq *mr_free_cq = free_mr->mr_free_cq; 1096 - struct hns_roce_mr *hr_mr = mr_work->mr; 1097 - struct device *dev = &hr_dev->pdev->dev; 1098 - struct ib_wc wc[HNS_ROCE_V1_RESV_QP]; 1099 - struct hns_roce_qp *hr_qp; 1100 - int ne = 0; 1101 - int ret; 1102 - int i; 1103 - 1104 - for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { 1105 - hr_qp = free_mr->mr_free_qp[i]; 1106 - if (!hr_qp) 1107 - continue; 1108 - ne++; 1109 - 1110 - ret = hns_roce_v1_send_lp_wqe(hr_qp); 1111 - if (ret) { 1112 - dev_err(dev, 1113 - "Send wqe (qp:0x%lx) for mr free failed(%d)!\n", 1114 - hr_qp->qpn, ret); 1115 - goto free_work; 1116 - } 1117 - } 1118 - 1119 - if (!ne) { 1120 - dev_err(dev, "Reserved loop qp is absent!\n"); 1121 - goto free_work; 1122 - } 1123 - 1124 - do { 1125 - ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc); 1126 - if (ret < 0 && hr_qp) { 1127 - dev_err(dev, 1128 - "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n", 1129 - hr_qp->qpn, ret, hr_mr->key, ne); 1130 - goto free_work; 1131 - } 1132 - ne -= ret; 1133 - usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000, 1134 - (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000); 1135 - } while (ne && time_before_eq(jiffies, end)); 1136 - 1137 - if (ne != 0) 1138 - dev_err(dev, 1139 - "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n", 1140 - hr_mr->key, ne); 1141 - 1142 - free_work: 1143 - if (mr_work->comp_flag) 1144 - complete(mr_work->comp); 1145 - kfree(mr_work); 1146 - } 1147 - 1148 - static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, 1149 - struct hns_roce_mr *mr, struct ib_udata *udata) 1150 - { 1151 - struct hns_roce_v1_priv *priv = hr_dev->priv; 1152 - struct hns_roce_free_mr *free_mr = &priv->free_mr; 1153 - long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS; 1154 - struct device *dev = &hr_dev->pdev->dev; 1155 - struct hns_roce_mr_free_work *mr_work; 1156 - unsigned long start = jiffies; 1157 - struct completion comp; 1158 - int ret = 0; 1159 - 1160 - if (mr->enabled) { 1161 - if (hns_roce_hw_destroy_mpt(hr_dev, NULL, 1162 - key_to_hw_index(mr->key) & 1163 - (hr_dev->caps.num_mtpts - 1))) 1164 - dev_warn(dev, "DESTROY_MPT failed!\n"); 1165 - } 1166 - 1167 - mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL); 1168 - if (!mr_work) { 1169 - ret = -ENOMEM; 1170 - goto free_mr; 1171 - } 1172 - 1173 - INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn); 1174 - 1175 - mr_work->ib_dev = &(hr_dev->ib_dev); 1176 - mr_work->comp = &comp; 1177 - mr_work->comp_flag = 1; 1178 - mr_work->mr = (void *)mr; 1179 - init_completion(mr_work->comp); 1180 - 1181 - queue_work(free_mr->free_mr_wq, &(mr_work->work)); 1182 - 1183 - while (end > 0) { 1184 - if (try_wait_for_completion(&comp)) 1185 - goto free_mr; 1186 - msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE); 1187 - end -= HNS_ROCE_V1_FREE_MR_WAIT_VALUE; 1188 - } 1189 - 1190 - mr_work->comp_flag = 0; 1191 - if (try_wait_for_completion(&comp)) 1192 - goto free_mr; 1193 - 1194 - dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key); 1195 - ret = -ETIMEDOUT; 1196 - 1197 - free_mr: 1198 - dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n", 1199 - mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start)); 1200 - 1201 - ida_free(&hr_dev->mr_table.mtpt_ida.ida, (int)key_to_hw_index(mr->key)); 1202 - hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr); 1203 - kfree(mr); 1204 - 1205 - return ret; 1206 - } 1207 - 1208 - static void hns_roce_db_free(struct hns_roce_dev *hr_dev) 1209 - { 1210 - struct hns_roce_v1_priv *priv = hr_dev->priv; 1211 - struct hns_roce_db_table *db = &priv->db_table; 1212 - struct device *dev = &hr_dev->pdev->dev; 1213 - 1214 - if (db->sdb_ext_mod) { 1215 - dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE, 1216 - db->ext_db->sdb_buf_list->buf, 1217 - db->ext_db->sdb_buf_list->map); 1218 - kfree(db->ext_db->sdb_buf_list); 1219 - } 1220 - 1221 - if (db->odb_ext_mod) { 1222 - dma_free_coherent(dev, HNS_ROCE_V1_EXT_ODB_SIZE, 1223 - db->ext_db->odb_buf_list->buf, 1224 - db->ext_db->odb_buf_list->map); 1225 - kfree(db->ext_db->odb_buf_list); 1226 - } 1227 - 1228 - kfree(db->ext_db); 1229 - } 1230 - 1231 - static int hns_roce_raq_init(struct hns_roce_dev *hr_dev) 1232 - { 1233 - struct hns_roce_v1_priv *priv = hr_dev->priv; 1234 - struct hns_roce_raq_table *raq = &priv->raq_table; 1235 - struct device *dev = &hr_dev->pdev->dev; 1236 - dma_addr_t addr; 1237 - int raq_shift; 1238 - __le32 tmp; 1239 - u32 val; 1240 - int ret; 1241 - 1242 - raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL); 1243 - if (!raq->e_raq_buf) 1244 - return -ENOMEM; 1245 - 1246 - raq->e_raq_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, 1247 - &addr, GFP_KERNEL); 1248 - if (!raq->e_raq_buf->buf) { 1249 - ret = -ENOMEM; 1250 - goto err_dma_alloc_raq; 1251 - } 1252 - raq->e_raq_buf->map = addr; 1253 - 1254 - /* Configure raq extended address. 48bit 4K align */ 1255 - roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12); 1256 - 1257 - /* Configure raq_shift */ 1258 - raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY); 1259 - val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG); 1260 - tmp = cpu_to_le32(val); 1261 - roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M, 1262 - ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift); 1263 - /* 1264 - * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of 1265 - * using 4K page, and shift more 32 because of 1266 - * calculating the high 32 bit value evaluated to hardware. 1267 - */ 1268 - roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M, 1269 - ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S, 1270 - raq->e_raq_buf->map >> 44); 1271 - val = le32_to_cpu(tmp); 1272 - roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val); 1273 - dev_dbg(dev, "Configure raq_shift 0x%x.\n", val); 1274 - 1275 - /* Configure raq threshold */ 1276 - val = roce_read(hr_dev, ROCEE_RAQ_WL_REG); 1277 - tmp = cpu_to_le32(val); 1278 - roce_set_field(tmp, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M, 1279 - ROCEE_RAQ_WL_ROCEE_RAQ_WL_S, 1280 - HNS_ROCE_V1_EXT_RAQ_WF); 1281 - val = le32_to_cpu(tmp); 1282 - roce_write(hr_dev, ROCEE_RAQ_WL_REG, val); 1283 - dev_dbg(dev, "Configure raq_wl 0x%x.\n", val); 1284 - 1285 - /* Enable extend raq */ 1286 - val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG); 1287 - tmp = cpu_to_le32(val); 1288 - roce_set_field(tmp, 1289 - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M, 1290 - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S, 1291 - POL_TIME_INTERVAL_VAL); 1292 - roce_set_bit(tmp, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1); 1293 - roce_set_field(tmp, 1294 - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M, 1295 - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S, 1296 - 2); 1297 - roce_set_bit(tmp, 1298 - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1); 1299 - val = le32_to_cpu(tmp); 1300 - roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val); 1301 - dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val); 1302 - 1303 - /* Enable raq drop */ 1304 - val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); 1305 - tmp = cpu_to_le32(val); 1306 - roce_set_bit(tmp, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1); 1307 - val = le32_to_cpu(tmp); 1308 - roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); 1309 - dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val); 1310 - 1311 - return 0; 1312 - 1313 - err_dma_alloc_raq: 1314 - kfree(raq->e_raq_buf); 1315 - return ret; 1316 - } 1317 - 1318 - static void hns_roce_raq_free(struct hns_roce_dev *hr_dev) 1319 - { 1320 - struct hns_roce_v1_priv *priv = hr_dev->priv; 1321 - struct hns_roce_raq_table *raq = &priv->raq_table; 1322 - struct device *dev = &hr_dev->pdev->dev; 1323 - 1324 - dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf, 1325 - raq->e_raq_buf->map); 1326 - kfree(raq->e_raq_buf); 1327 - } 1328 - 1329 - static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag) 1330 - { 1331 - __le32 tmp; 1332 - u32 val; 1333 - 1334 - if (enable_flag) { 1335 - val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); 1336 - /* Open all ports */ 1337 - tmp = cpu_to_le32(val); 1338 - roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M, 1339 - ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 1340 - ALL_PORT_VAL_OPEN); 1341 - val = le32_to_cpu(tmp); 1342 - roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); 1343 - } else { 1344 - val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); 1345 - /* Close all ports */ 1346 - tmp = cpu_to_le32(val); 1347 - roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M, 1348 - ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0); 1349 - val = le32_to_cpu(tmp); 1350 - roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); 1351 - } 1352 - } 1353 - 1354 - static int hns_roce_bt_init(struct hns_roce_dev *hr_dev) 1355 - { 1356 - struct hns_roce_v1_priv *priv = hr_dev->priv; 1357 - struct device *dev = &hr_dev->pdev->dev; 1358 - int ret; 1359 - 1360 - priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev, 1361 - HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map, 1362 - GFP_KERNEL); 1363 - if (!priv->bt_table.qpc_buf.buf) 1364 - return -ENOMEM; 1365 - 1366 - priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev, 1367 - HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map, 1368 - GFP_KERNEL); 1369 - if (!priv->bt_table.mtpt_buf.buf) { 1370 - ret = -ENOMEM; 1371 - goto err_failed_alloc_mtpt_buf; 1372 - } 1373 - 1374 - priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev, 1375 - HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map, 1376 - GFP_KERNEL); 1377 - if (!priv->bt_table.cqc_buf.buf) { 1378 - ret = -ENOMEM; 1379 - goto err_failed_alloc_cqc_buf; 1380 - } 1381 - 1382 - return 0; 1383 - 1384 - err_failed_alloc_cqc_buf: 1385 - dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, 1386 - priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map); 1387 - 1388 - err_failed_alloc_mtpt_buf: 1389 - dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, 1390 - priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map); 1391 - 1392 - return ret; 1393 - } 1394 - 1395 - static void hns_roce_bt_free(struct hns_roce_dev *hr_dev) 1396 - { 1397 - struct hns_roce_v1_priv *priv = hr_dev->priv; 1398 - struct device *dev = &hr_dev->pdev->dev; 1399 - 1400 - dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, 1401 - priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map); 1402 - 1403 - dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, 1404 - priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map); 1405 - 1406 - dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, 1407 - priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map); 1408 - } 1409 - 1410 - static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev) 1411 - { 1412 - struct hns_roce_v1_priv *priv = hr_dev->priv; 1413 - struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf; 1414 - struct device *dev = &hr_dev->pdev->dev; 1415 - 1416 - /* 1417 - * This buffer will be used for CQ's tptr(tail pointer), also 1418 - * named ci(customer index). Every CQ will use 2 bytes to save 1419 - * cqe ci in hip06. Hardware will read this area to get new ci 1420 - * when the queue is almost full. 1421 - */ 1422 - tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE, 1423 - &tptr_buf->map, GFP_KERNEL); 1424 - if (!tptr_buf->buf) 1425 - return -ENOMEM; 1426 - 1427 - hr_dev->tptr_dma_addr = tptr_buf->map; 1428 - hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE; 1429 - 1430 - return 0; 1431 - } 1432 - 1433 - static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev) 1434 - { 1435 - struct hns_roce_v1_priv *priv = hr_dev->priv; 1436 - struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf; 1437 - struct device *dev = &hr_dev->pdev->dev; 1438 - 1439 - dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE, 1440 - tptr_buf->buf, tptr_buf->map); 1441 - } 1442 - 1443 - static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev) 1444 - { 1445 - struct hns_roce_v1_priv *priv = hr_dev->priv; 1446 - struct hns_roce_free_mr *free_mr = &priv->free_mr; 1447 - struct device *dev = &hr_dev->pdev->dev; 1448 - int ret; 1449 - 1450 - free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr"); 1451 - if (!free_mr->free_mr_wq) { 1452 - dev_err(dev, "Create free mr workqueue failed!\n"); 1453 - return -ENOMEM; 1454 - } 1455 - 1456 - ret = hns_roce_v1_rsv_lp_qp(hr_dev); 1457 - if (ret) { 1458 - dev_err(dev, "Reserved loop qp failed(%d)!\n", ret); 1459 - destroy_workqueue(free_mr->free_mr_wq); 1460 - } 1461 - 1462 - return ret; 1463 - } 1464 - 1465 - static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev) 1466 - { 1467 - struct hns_roce_v1_priv *priv = hr_dev->priv; 1468 - struct hns_roce_free_mr *free_mr = &priv->free_mr; 1469 - 1470 - destroy_workqueue(free_mr->free_mr_wq); 1471 - 1472 - hns_roce_v1_release_lp_qp(hr_dev); 1473 - } 1474 - 1475 - /** 1476 - * hns_roce_v1_reset - reset RoCE 1477 - * @hr_dev: RoCE device struct pointer 1478 - * @dereset: true -- drop reset, false -- reset 1479 - * return 0 - success , negative --fail 1480 - */ 1481 - static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset) 1482 - { 1483 - struct device_node *dsaf_node; 1484 - struct device *dev = &hr_dev->pdev->dev; 1485 - struct device_node *np = dev->of_node; 1486 - struct fwnode_handle *fwnode; 1487 - int ret; 1488 - 1489 - /* check if this is DT/ACPI case */ 1490 - if (dev_of_node(dev)) { 1491 - dsaf_node = of_parse_phandle(np, "dsaf-handle", 0); 1492 - if (!dsaf_node) { 1493 - dev_err(dev, "could not find dsaf-handle\n"); 1494 - return -EINVAL; 1495 - } 1496 - fwnode = &dsaf_node->fwnode; 1497 - } else if (is_acpi_device_node(dev->fwnode)) { 1498 - struct fwnode_reference_args args; 1499 - 1500 - ret = acpi_node_get_property_reference(dev->fwnode, 1501 - "dsaf-handle", 0, &args); 1502 - if (ret) { 1503 - dev_err(dev, "could not find dsaf-handle\n"); 1504 - return ret; 1505 - } 1506 - fwnode = args.fwnode; 1507 - } else { 1508 - dev_err(dev, "cannot read data from DT or ACPI\n"); 1509 - return -ENXIO; 1510 - } 1511 - 1512 - ret = hns_dsaf_roce_reset(fwnode, false); 1513 - if (ret) 1514 - return ret; 1515 - 1516 - if (dereset) { 1517 - msleep(SLEEP_TIME_INTERVAL); 1518 - ret = hns_dsaf_roce_reset(fwnode, true); 1519 - } 1520 - 1521 - return ret; 1522 - } 1523 - 1524 - static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev) 1525 - { 1526 - struct hns_roce_caps *caps = &hr_dev->caps; 1527 - int i; 1528 - 1529 - hr_dev->vendor_id = roce_read(hr_dev, ROCEE_VENDOR_ID_REG); 1530 - hr_dev->vendor_part_id = roce_read(hr_dev, ROCEE_VENDOR_PART_ID_REG); 1531 - hr_dev->sys_image_guid = roce_read(hr_dev, ROCEE_SYS_IMAGE_GUID_L_REG) | 1532 - ((u64)roce_read(hr_dev, 1533 - ROCEE_SYS_IMAGE_GUID_H_REG) << 32); 1534 - hr_dev->hw_rev = HNS_ROCE_HW_VER1; 1535 - 1536 - caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM; 1537 - caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM; 1538 - caps->min_wqes = HNS_ROCE_MIN_WQE_NUM; 1539 - caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM; 1540 - caps->min_cqes = HNS_ROCE_MIN_CQE_NUM; 1541 - caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM; 1542 - caps->max_sq_sg = HNS_ROCE_V1_SG_NUM; 1543 - caps->max_rq_sg = HNS_ROCE_V1_SG_NUM; 1544 - caps->max_sq_inline = HNS_ROCE_V1_INLINE_SIZE; 1545 - caps->num_uars = HNS_ROCE_V1_UAR_NUM; 1546 - caps->phy_num_uars = HNS_ROCE_V1_PHY_UAR_NUM; 1547 - caps->num_aeq_vectors = HNS_ROCE_V1_AEQE_VEC_NUM; 1548 - caps->num_comp_vectors = HNS_ROCE_V1_COMP_VEC_NUM; 1549 - caps->num_other_vectors = HNS_ROCE_V1_ABNORMAL_VEC_NUM; 1550 - caps->num_mtpts = HNS_ROCE_V1_MAX_MTPT_NUM; 1551 - caps->num_mtt_segs = HNS_ROCE_V1_MAX_MTT_SEGS; 1552 - caps->num_pds = HNS_ROCE_V1_MAX_PD_NUM; 1553 - caps->max_qp_init_rdma = HNS_ROCE_V1_MAX_QP_INIT_RDMA; 1554 - caps->max_qp_dest_rdma = HNS_ROCE_V1_MAX_QP_DEST_RDMA; 1555 - caps->max_sq_desc_sz = HNS_ROCE_V1_MAX_SQ_DESC_SZ; 1556 - caps->max_rq_desc_sz = HNS_ROCE_V1_MAX_RQ_DESC_SZ; 1557 - caps->qpc_sz = HNS_ROCE_V1_QPC_SIZE; 1558 - caps->irrl_entry_sz = HNS_ROCE_V1_IRRL_ENTRY_SIZE; 1559 - caps->cqc_entry_sz = HNS_ROCE_V1_CQC_ENTRY_SIZE; 1560 - caps->mtpt_entry_sz = HNS_ROCE_V1_MTPT_ENTRY_SIZE; 1561 - caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE; 1562 - caps->cqe_sz = HNS_ROCE_V1_CQE_SIZE; 1563 - caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT; 1564 - caps->reserved_lkey = 0; 1565 - caps->reserved_pds = 0; 1566 - caps->reserved_mrws = 1; 1567 - caps->reserved_uars = 0; 1568 - caps->reserved_cqs = 0; 1569 - caps->reserved_qps = 12; /* 2 SQP per port, six ports total 12 */ 1570 - caps->chunk_sz = HNS_ROCE_V1_TABLE_CHUNK_SIZE; 1571 - 1572 - for (i = 0; i < caps->num_ports; i++) 1573 - caps->pkey_table_len[i] = 1; 1574 - 1575 - for (i = 0; i < caps->num_ports; i++) { 1576 - /* Six ports shared 16 GID in v1 engine */ 1577 - if (i >= (HNS_ROCE_V1_GID_NUM % caps->num_ports)) 1578 - caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM / 1579 - caps->num_ports; 1580 - else 1581 - caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM / 1582 - caps->num_ports + 1; 1583 - } 1584 - 1585 - caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM; 1586 - caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM; 1587 - caps->local_ca_ack_delay = roce_read(hr_dev, ROCEE_ACK_DELAY_REG); 1588 - caps->max_mtu = IB_MTU_2048; 1589 - 1590 - return 0; 1591 - } 1592 - 1593 - static int hns_roce_v1_init(struct hns_roce_dev *hr_dev) 1594 - { 1595 - int ret; 1596 - u32 val; 1597 - __le32 tmp; 1598 - struct device *dev = &hr_dev->pdev->dev; 1599 - 1600 - /* DMAE user config */ 1601 - val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG); 1602 - tmp = cpu_to_le32(val); 1603 - roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M, 1604 - ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf); 1605 - roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M, 1606 - ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S, 1607 - 1 << PAGES_SHIFT_16); 1608 - val = le32_to_cpu(tmp); 1609 - roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val); 1610 - 1611 - val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG); 1612 - tmp = cpu_to_le32(val); 1613 - roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M, 1614 - ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf); 1615 - roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M, 1616 - ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S, 1617 - 1 << PAGES_SHIFT_16); 1618 - 1619 - ret = hns_roce_db_init(hr_dev); 1620 - if (ret) { 1621 - dev_err(dev, "doorbell init failed!\n"); 1622 - return ret; 1623 - } 1624 - 1625 - ret = hns_roce_raq_init(hr_dev); 1626 - if (ret) { 1627 - dev_err(dev, "raq init failed!\n"); 1628 - goto error_failed_raq_init; 1629 - } 1630 - 1631 - ret = hns_roce_bt_init(hr_dev); 1632 - if (ret) { 1633 - dev_err(dev, "bt init failed!\n"); 1634 - goto error_failed_bt_init; 1635 - } 1636 - 1637 - ret = hns_roce_tptr_init(hr_dev); 1638 - if (ret) { 1639 - dev_err(dev, "tptr init failed!\n"); 1640 - goto error_failed_tptr_init; 1641 - } 1642 - 1643 - ret = hns_roce_free_mr_init(hr_dev); 1644 - if (ret) { 1645 - dev_err(dev, "free mr init failed!\n"); 1646 - goto error_failed_free_mr_init; 1647 - } 1648 - 1649 - hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP); 1650 - 1651 - return 0; 1652 - 1653 - error_failed_free_mr_init: 1654 - hns_roce_tptr_free(hr_dev); 1655 - 1656 - error_failed_tptr_init: 1657 - hns_roce_bt_free(hr_dev); 1658 - 1659 - error_failed_bt_init: 1660 - hns_roce_raq_free(hr_dev); 1661 - 1662 - error_failed_raq_init: 1663 - hns_roce_db_free(hr_dev); 1664 - return ret; 1665 - } 1666 - 1667 - static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev) 1668 - { 1669 - hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN); 1670 - hns_roce_free_mr_free(hr_dev); 1671 - hns_roce_tptr_free(hr_dev); 1672 - hns_roce_bt_free(hr_dev); 1673 - hns_roce_raq_free(hr_dev); 1674 - hns_roce_db_free(hr_dev); 1675 - } 1676 - 1677 - static int hns_roce_v1_cmd_pending(struct hns_roce_dev *hr_dev) 1678 - { 1679 - u32 status = readl(hr_dev->reg_base + ROCEE_MB6_REG); 1680 - 1681 - return (!!(status & (1 << HCR_GO_BIT))); 1682 - } 1683 - 1684 - static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param, 1685 - u64 out_param, u32 in_modifier, u8 op_modifier, 1686 - u16 op, u16 token, int event) 1687 - { 1688 - u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG); 1689 - unsigned long end; 1690 - u32 val = 0; 1691 - __le32 tmp; 1692 - 1693 - end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies; 1694 - while (hns_roce_v1_cmd_pending(hr_dev)) { 1695 - if (time_after(jiffies, end)) { 1696 - dev_err(hr_dev->dev, "jiffies=%d end=%d\n", 1697 - (int)jiffies, (int)end); 1698 - return -EAGAIN; 1699 - } 1700 - cond_resched(); 1701 - } 1702 - 1703 - tmp = cpu_to_le32(val); 1704 - roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S, 1705 - op); 1706 - roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_MDF_M, 1707 - ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier); 1708 - roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_EVENT_S, event); 1709 - roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1); 1710 - roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_TOKEN_M, 1711 - ROCEE_MB6_ROCEE_MB_TOKEN_S, token); 1712 - 1713 - val = le32_to_cpu(tmp); 1714 - writeq(in_param, hcr + 0); 1715 - writeq(out_param, hcr + 2); 1716 - writel(in_modifier, hcr + 4); 1717 - /* Memory barrier */ 1718 - wmb(); 1719 - 1720 - writel(val, hcr + 5); 1721 - 1722 - return 0; 1723 - } 1724 - 1725 - static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev, 1726 - unsigned int timeout) 1727 - { 1728 - u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG; 1729 - unsigned long end; 1730 - u32 status = 0; 1731 - 1732 - end = msecs_to_jiffies(timeout) + jiffies; 1733 - while (hns_roce_v1_cmd_pending(hr_dev) && time_before(jiffies, end)) 1734 - cond_resched(); 1735 - 1736 - if (hns_roce_v1_cmd_pending(hr_dev)) { 1737 - dev_err(hr_dev->dev, "[cmd_poll]hw run cmd TIMEDOUT!\n"); 1738 - return -ETIMEDOUT; 1739 - } 1740 - 1741 - status = le32_to_cpu((__force __le32) 1742 - __raw_readl(hcr + HCR_STATUS_OFFSET)); 1743 - if ((status & STATUS_MASK) != 0x1) { 1744 - dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status); 1745 - return -EBUSY; 1746 - } 1747 - 1748 - return 0; 1749 - } 1750 - 1751 - static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u32 port, 1752 - int gid_index, const union ib_gid *gid, 1753 - const struct ib_gid_attr *attr) 1754 - { 1755 - unsigned long flags; 1756 - u32 *p = NULL; 1757 - u8 gid_idx; 1758 - 1759 - gid_idx = hns_get_gid_index(hr_dev, port, gid_index); 1760 - 1761 - spin_lock_irqsave(&hr_dev->iboe.lock, flags); 1762 - 1763 - p = (u32 *)&gid->raw[0]; 1764 - roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG + 1765 - (HNS_ROCE_V1_GID_NUM * gid_idx)); 1766 - 1767 - p = (u32 *)&gid->raw[4]; 1768 - roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG + 1769 - (HNS_ROCE_V1_GID_NUM * gid_idx)); 1770 - 1771 - p = (u32 *)&gid->raw[8]; 1772 - roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG + 1773 - (HNS_ROCE_V1_GID_NUM * gid_idx)); 1774 - 1775 - p = (u32 *)&gid->raw[0xc]; 1776 - roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG + 1777 - (HNS_ROCE_V1_GID_NUM * gid_idx)); 1778 - 1779 - spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); 1780 - 1781 - return 0; 1782 - } 1783 - 1784 - static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, 1785 - const u8 *addr) 1786 - { 1787 - u32 reg_smac_l; 1788 - u16 reg_smac_h; 1789 - __le32 tmp; 1790 - u16 *p_h; 1791 - u32 *p; 1792 - u32 val; 1793 - 1794 - /* 1795 - * When mac changed, loopback may fail 1796 - * because of smac not equal to dmac. 1797 - * We Need to release and create reserved qp again. 1798 - */ 1799 - if (hr_dev->hw->dereg_mr) { 1800 - int ret; 1801 - 1802 - ret = hns_roce_v1_recreate_lp_qp(hr_dev); 1803 - if (ret && ret != -ETIMEDOUT) 1804 - return ret; 1805 - } 1806 - 1807 - p = (u32 *)(&addr[0]); 1808 - reg_smac_l = *p; 1809 - roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG + 1810 - PHY_PORT_OFFSET * phy_port); 1811 - 1812 - val = roce_read(hr_dev, 1813 - ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET); 1814 - tmp = cpu_to_le32(val); 1815 - p_h = (u16 *)(&addr[4]); 1816 - reg_smac_h = *p_h; 1817 - roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_SMAC_H_M, 1818 - ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h); 1819 - val = le32_to_cpu(tmp); 1820 - roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET, 1821 - val); 1822 - 1823 - return 0; 1824 - } 1825 - 1826 - static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port, 1827 - enum ib_mtu mtu) 1828 - { 1829 - __le32 tmp; 1830 - u32 val; 1831 - 1832 - val = roce_read(hr_dev, 1833 - ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET); 1834 - tmp = cpu_to_le32(val); 1835 - roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_PORT_MTU_M, 1836 - ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu); 1837 - val = le32_to_cpu(tmp); 1838 - roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET, 1839 - val); 1840 - } 1841 - 1842 - static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf, 1843 - struct hns_roce_mr *mr, 1844 - unsigned long mtpt_idx) 1845 - { 1846 - u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 }; 1847 - struct ib_device *ibdev = &hr_dev->ib_dev; 1848 - struct hns_roce_v1_mpt_entry *mpt_entry; 1849 - dma_addr_t pbl_ba; 1850 - int count; 1851 - int i; 1852 - 1853 - /* MPT filled into mailbox buf */ 1854 - mpt_entry = (struct hns_roce_v1_mpt_entry *)mb_buf; 1855 - memset(mpt_entry, 0, sizeof(*mpt_entry)); 1856 - 1857 - roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_STATE_M, 1858 - MPT_BYTE_4_KEY_STATE_S, KEY_VALID); 1859 - roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_M, 1860 - MPT_BYTE_4_KEY_S, mr->key); 1861 - roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_PAGE_SIZE_M, 1862 - MPT_BYTE_4_PAGE_SIZE_S, MR_SIZE_4K); 1863 - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_TYPE_S, 0); 1864 - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_BIND_ENABLE_S, 1865 - (mr->access & IB_ACCESS_MW_BIND ? 1 : 0)); 1866 - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_OWN_S, 0); 1867 - roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_MEMORY_LOCATION_TYPE_M, 1868 - MPT_BYTE_4_MEMORY_LOCATION_TYPE_S, mr->type); 1869 - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_ATOMIC_S, 0); 1870 - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_LOCAL_WRITE_S, 1871 - (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0)); 1872 - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_WRITE_S, 1873 - (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0)); 1874 - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_READ_S, 1875 - (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0)); 1876 - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_INVAL_ENABLE_S, 1877 - 0); 1878 - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_ADDRESS_TYPE_S, 0); 1879 - 1880 - roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M, 1881 - MPT_BYTE_12_PBL_ADDR_H_S, 0); 1882 - roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M, 1883 - MPT_BYTE_12_MW_BIND_COUNTER_S, 0); 1884 - 1885 - mpt_entry->virt_addr_l = cpu_to_le32((u32)mr->iova); 1886 - mpt_entry->virt_addr_h = cpu_to_le32((u32)(mr->iova >> 32)); 1887 - mpt_entry->length = cpu_to_le32((u32)mr->size); 1888 - 1889 - roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M, 1890 - MPT_BYTE_28_PD_S, mr->pd); 1891 - roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_L_KEY_IDX_L_M, 1892 - MPT_BYTE_28_L_KEY_IDX_L_S, mtpt_idx); 1893 - roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M, 1894 - MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT); 1895 - 1896 - /* DMA memory register */ 1897 - if (mr->type == MR_TYPE_DMA) 1898 - return 0; 1899 - 1900 - count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages, 1901 - ARRAY_SIZE(pages), &pbl_ba); 1902 - if (count < 1) { 1903 - ibdev_err(ibdev, "failed to find PBL mtr, count = %d.", count); 1904 - return -ENOBUFS; 1905 - } 1906 - 1907 - /* Register user mr */ 1908 - for (i = 0; i < count; i++) { 1909 - switch (i) { 1910 - case 0: 1911 - mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i])); 1912 - roce_set_field(mpt_entry->mpt_byte_36, 1913 - MPT_BYTE_36_PA0_H_M, 1914 - MPT_BYTE_36_PA0_H_S, 1915 - (u32)(pages[i] >> PAGES_SHIFT_32)); 1916 - break; 1917 - case 1: 1918 - roce_set_field(mpt_entry->mpt_byte_36, 1919 - MPT_BYTE_36_PA1_L_M, 1920 - MPT_BYTE_36_PA1_L_S, (u32)(pages[i])); 1921 - roce_set_field(mpt_entry->mpt_byte_40, 1922 - MPT_BYTE_40_PA1_H_M, 1923 - MPT_BYTE_40_PA1_H_S, 1924 - (u32)(pages[i] >> PAGES_SHIFT_24)); 1925 - break; 1926 - case 2: 1927 - roce_set_field(mpt_entry->mpt_byte_40, 1928 - MPT_BYTE_40_PA2_L_M, 1929 - MPT_BYTE_40_PA2_L_S, (u32)(pages[i])); 1930 - roce_set_field(mpt_entry->mpt_byte_44, 1931 - MPT_BYTE_44_PA2_H_M, 1932 - MPT_BYTE_44_PA2_H_S, 1933 - (u32)(pages[i] >> PAGES_SHIFT_16)); 1934 - break; 1935 - case 3: 1936 - roce_set_field(mpt_entry->mpt_byte_44, 1937 - MPT_BYTE_44_PA3_L_M, 1938 - MPT_BYTE_44_PA3_L_S, (u32)(pages[i])); 1939 - roce_set_field(mpt_entry->mpt_byte_48, 1940 - MPT_BYTE_48_PA3_H_M, 1941 - MPT_BYTE_48_PA3_H_S, 1942 - (u32)(pages[i] >> PAGES_SHIFT_8)); 1943 - break; 1944 - case 4: 1945 - mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i])); 1946 - roce_set_field(mpt_entry->mpt_byte_56, 1947 - MPT_BYTE_56_PA4_H_M, 1948 - MPT_BYTE_56_PA4_H_S, 1949 - (u32)(pages[i] >> PAGES_SHIFT_32)); 1950 - break; 1951 - case 5: 1952 - roce_set_field(mpt_entry->mpt_byte_56, 1953 - MPT_BYTE_56_PA5_L_M, 1954 - MPT_BYTE_56_PA5_L_S, (u32)(pages[i])); 1955 - roce_set_field(mpt_entry->mpt_byte_60, 1956 - MPT_BYTE_60_PA5_H_M, 1957 - MPT_BYTE_60_PA5_H_S, 1958 - (u32)(pages[i] >> PAGES_SHIFT_24)); 1959 - break; 1960 - case 6: 1961 - roce_set_field(mpt_entry->mpt_byte_60, 1962 - MPT_BYTE_60_PA6_L_M, 1963 - MPT_BYTE_60_PA6_L_S, (u32)(pages[i])); 1964 - roce_set_field(mpt_entry->mpt_byte_64, 1965 - MPT_BYTE_64_PA6_H_M, 1966 - MPT_BYTE_64_PA6_H_S, 1967 - (u32)(pages[i] >> PAGES_SHIFT_16)); 1968 - break; 1969 - default: 1970 - break; 1971 - } 1972 - } 1973 - 1974 - mpt_entry->pbl_addr_l = cpu_to_le32(pbl_ba); 1975 - roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M, 1976 - MPT_BYTE_12_PBL_ADDR_H_S, upper_32_bits(pbl_ba)); 1977 - 1978 - return 0; 1979 - } 1980 - 1981 - static void *get_cqe(struct hns_roce_cq *hr_cq, int n) 1982 - { 1983 - return hns_roce_buf_offset(hr_cq->mtr.kmem, n * HNS_ROCE_V1_CQE_SIZE); 1984 - } 1985 - 1986 - static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n) 1987 - { 1988 - struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe); 1989 - 1990 - /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */ 1991 - return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^ 1992 - !!(n & hr_cq->cq_depth)) ? hr_cqe : NULL; 1993 - } 1994 - 1995 - static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq) 1996 - { 1997 - return get_sw_cqe(hr_cq, hr_cq->cons_index); 1998 - } 1999 - 2000 - static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index) 2001 - { 2002 - __le32 doorbell[2]; 2003 - 2004 - doorbell[0] = cpu_to_le32(cons_index & ((hr_cq->cq_depth << 1) - 1)); 2005 - doorbell[1] = 0; 2006 - roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1); 2007 - roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M, 2008 - ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3); 2009 - roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M, 2010 - ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 0); 2011 - roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M, 2012 - ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn); 2013 - 2014 - hns_roce_write64_k(doorbell, hr_cq->db_reg); 2015 - } 2016 - 2017 - static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, 2018 - struct hns_roce_srq *srq) 2019 - { 2020 - struct hns_roce_cqe *cqe, *dest; 2021 - u32 prod_index; 2022 - int nfreed = 0; 2023 - u8 owner_bit; 2024 - 2025 - for (prod_index = hr_cq->cons_index; get_sw_cqe(hr_cq, prod_index); 2026 - ++prod_index) { 2027 - if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe) 2028 - break; 2029 - } 2030 - 2031 - /* 2032 - * Now backwards through the CQ, removing CQ entries 2033 - * that match our QP by overwriting them with next entries. 2034 - */ 2035 - while ((int) --prod_index - (int) hr_cq->cons_index >= 0) { 2036 - cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe); 2037 - if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M, 2038 - CQE_BYTE_16_LOCAL_QPN_S) & 2039 - HNS_ROCE_CQE_QPN_MASK) == qpn) { 2040 - /* In v1 engine, not support SRQ */ 2041 - ++nfreed; 2042 - } else if (nfreed) { 2043 - dest = get_cqe(hr_cq, (prod_index + nfreed) & 2044 - hr_cq->ib_cq.cqe); 2045 - owner_bit = roce_get_bit(dest->cqe_byte_4, 2046 - CQE_BYTE_4_OWNER_S); 2047 - memcpy(dest, cqe, sizeof(*cqe)); 2048 - roce_set_bit(dest->cqe_byte_4, CQE_BYTE_4_OWNER_S, 2049 - owner_bit); 2050 - } 2051 - } 2052 - 2053 - if (nfreed) { 2054 - hr_cq->cons_index += nfreed; 2055 - hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index); 2056 - } 2057 - } 2058 - 2059 - static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, 2060 - struct hns_roce_srq *srq) 2061 - { 2062 - spin_lock_irq(&hr_cq->lock); 2063 - __hns_roce_v1_cq_clean(hr_cq, qpn, srq); 2064 - spin_unlock_irq(&hr_cq->lock); 2065 - } 2066 - 2067 - static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev, 2068 - struct hns_roce_cq *hr_cq, void *mb_buf, 2069 - u64 *mtts, dma_addr_t dma_handle) 2070 - { 2071 - struct hns_roce_v1_priv *priv = hr_dev->priv; 2072 - struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf; 2073 - struct hns_roce_cq_context *cq_context = mb_buf; 2074 - dma_addr_t tptr_dma_addr; 2075 - int offset; 2076 - 2077 - memset(cq_context, 0, sizeof(*cq_context)); 2078 - 2079 - /* Get the tptr for this CQ. */ 2080 - offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE; 2081 - tptr_dma_addr = tptr_buf->map + offset; 2082 - hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset); 2083 - 2084 - /* Register cq_context members */ 2085 - roce_set_field(cq_context->cqc_byte_4, 2086 - CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M, 2087 - CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID); 2088 - roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M, 2089 - CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn); 2090 - 2091 - cq_context->cq_bt_l = cpu_to_le32((u32)dma_handle); 2092 - 2093 - roce_set_field(cq_context->cqc_byte_12, 2094 - CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M, 2095 - CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S, 2096 - ((u64)dma_handle >> 32)); 2097 - roce_set_field(cq_context->cqc_byte_12, 2098 - CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M, 2099 - CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S, 2100 - ilog2(hr_cq->cq_depth)); 2101 - roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M, 2102 - CQ_CONTEXT_CQC_BYTE_12_CEQN_S, hr_cq->vector); 2103 - 2104 - cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0])); 2105 - 2106 - roce_set_field(cq_context->cqc_byte_20, 2107 - CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M, 2108 - CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S, (mtts[0]) >> 32); 2109 - /* Dedicated hardware, directly set 0 */ 2110 - roce_set_field(cq_context->cqc_byte_20, 2111 - CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M, 2112 - CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S, 0); 2113 - /** 2114 - * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of 2115 - * using 4K page, and shift more 32 because of 2116 - * calculating the high 32 bit value evaluated to hardware. 2117 - */ 2118 - roce_set_field(cq_context->cqc_byte_20, 2119 - CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M, 2120 - CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S, 2121 - tptr_dma_addr >> 44); 2122 - 2123 - cq_context->cqe_tptr_addr_l = cpu_to_le32((u32)(tptr_dma_addr >> 12)); 2124 - 2125 - roce_set_field(cq_context->cqc_byte_32, 2126 - CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M, 2127 - CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S, 0); 2128 - roce_set_bit(cq_context->cqc_byte_32, 2129 - CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S, 0); 2130 - roce_set_bit(cq_context->cqc_byte_32, 2131 - CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S, 0); 2132 - roce_set_bit(cq_context->cqc_byte_32, 2133 - CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S, 0); 2134 - roce_set_bit(cq_context->cqc_byte_32, 2135 - CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S, 2136 - 0); 2137 - /* The initial value of cq's ci is 0 */ 2138 - roce_set_field(cq_context->cqc_byte_32, 2139 - CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M, 2140 - CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0); 2141 - } 2142 - 2143 - static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq, 2144 - enum ib_cq_notify_flags flags) 2145 - { 2146 - struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); 2147 - u32 notification_flag; 2148 - __le32 doorbell[2] = {}; 2149 - 2150 - notification_flag = (flags & IB_CQ_SOLICITED_MASK) == 2151 - IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL; 2152 - /* 2153 - * flags = 0; Notification Flag = 1, next 2154 - * flags = 1; Notification Flag = 0, solocited 2155 - */ 2156 - doorbell[0] = 2157 - cpu_to_le32(hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1)); 2158 - roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1); 2159 - roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M, 2160 - ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3); 2161 - roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M, 2162 - ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 1); 2163 - roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M, 2164 - ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, 2165 - hr_cq->cqn | notification_flag); 2166 - 2167 - hns_roce_write64_k(doorbell, hr_cq->db_reg); 2168 - 2169 - return 0; 2170 - } 2171 - 2172 - static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq, 2173 - struct hns_roce_qp **cur_qp, struct ib_wc *wc) 2174 - { 2175 - int qpn; 2176 - int is_send; 2177 - u16 wqe_ctr; 2178 - u32 status; 2179 - u32 opcode; 2180 - struct hns_roce_cqe *cqe; 2181 - struct hns_roce_qp *hr_qp; 2182 - struct hns_roce_wq *wq; 2183 - struct hns_roce_wqe_ctrl_seg *sq_wqe; 2184 - struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device); 2185 - struct device *dev = &hr_dev->pdev->dev; 2186 - 2187 - /* Find cqe according consumer index */ 2188 - cqe = next_cqe_sw(hr_cq); 2189 - if (!cqe) 2190 - return -EAGAIN; 2191 - 2192 - ++hr_cq->cons_index; 2193 - /* Memory barrier */ 2194 - rmb(); 2195 - /* 0->SQ, 1->RQ */ 2196 - is_send = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S)); 2197 - 2198 - /* Local_qpn in UD cqe is always 1, so it needs to compute new qpn */ 2199 - if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M, 2200 - CQE_BYTE_16_LOCAL_QPN_S) <= 1) { 2201 - qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M, 2202 - CQE_BYTE_20_PORT_NUM_S) + 2203 - roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M, 2204 - CQE_BYTE_16_LOCAL_QPN_S) * 2205 - HNS_ROCE_MAX_PORTS; 2206 - } else { 2207 - qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M, 2208 - CQE_BYTE_16_LOCAL_QPN_S); 2209 - } 2210 - 2211 - if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) { 2212 - hr_qp = __hns_roce_qp_lookup(hr_dev, qpn); 2213 - if (unlikely(!hr_qp)) { 2214 - dev_err(dev, "CQ %06lx with entry for unknown QPN %06x\n", 2215 - hr_cq->cqn, (qpn & HNS_ROCE_CQE_QPN_MASK)); 2216 - return -EINVAL; 2217 - } 2218 - 2219 - *cur_qp = hr_qp; 2220 - } 2221 - 2222 - wc->qp = &(*cur_qp)->ibqp; 2223 - wc->vendor_err = 0; 2224 - 2225 - status = roce_get_field(cqe->cqe_byte_4, 2226 - CQE_BYTE_4_STATUS_OF_THE_OPERATION_M, 2227 - CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) & 2228 - HNS_ROCE_CQE_STATUS_MASK; 2229 - switch (status) { 2230 - case HNS_ROCE_CQE_SUCCESS: 2231 - wc->status = IB_WC_SUCCESS; 2232 - break; 2233 - case HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR: 2234 - wc->status = IB_WC_LOC_LEN_ERR; 2235 - break; 2236 - case HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR: 2237 - wc->status = IB_WC_LOC_QP_OP_ERR; 2238 - break; 2239 - case HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR: 2240 - wc->status = IB_WC_LOC_PROT_ERR; 2241 - break; 2242 - case HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR: 2243 - wc->status = IB_WC_WR_FLUSH_ERR; 2244 - break; 2245 - case HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR: 2246 - wc->status = IB_WC_MW_BIND_ERR; 2247 - break; 2248 - case HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR: 2249 - wc->status = IB_WC_BAD_RESP_ERR; 2250 - break; 2251 - case HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR: 2252 - wc->status = IB_WC_LOC_ACCESS_ERR; 2253 - break; 2254 - case HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR: 2255 - wc->status = IB_WC_REM_INV_REQ_ERR; 2256 - break; 2257 - case HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR: 2258 - wc->status = IB_WC_REM_ACCESS_ERR; 2259 - break; 2260 - case HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR: 2261 - wc->status = IB_WC_REM_OP_ERR; 2262 - break; 2263 - case HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR: 2264 - wc->status = IB_WC_RETRY_EXC_ERR; 2265 - break; 2266 - case HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR: 2267 - wc->status = IB_WC_RNR_RETRY_EXC_ERR; 2268 - break; 2269 - default: 2270 - wc->status = IB_WC_GENERAL_ERR; 2271 - break; 2272 - } 2273 - 2274 - /* CQE status error, directly return */ 2275 - if (wc->status != IB_WC_SUCCESS) 2276 - return 0; 2277 - 2278 - if (is_send) { 2279 - /* SQ conrespond to CQE */ 2280 - sq_wqe = hns_roce_get_send_wqe(*cur_qp, 2281 - roce_get_field(cqe->cqe_byte_4, 2282 - CQE_BYTE_4_WQE_INDEX_M, 2283 - CQE_BYTE_4_WQE_INDEX_S) & 2284 - ((*cur_qp)->sq.wqe_cnt-1)); 2285 - switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) { 2286 - case HNS_ROCE_WQE_OPCODE_SEND: 2287 - wc->opcode = IB_WC_SEND; 2288 - break; 2289 - case HNS_ROCE_WQE_OPCODE_RDMA_READ: 2290 - wc->opcode = IB_WC_RDMA_READ; 2291 - wc->byte_len = le32_to_cpu(cqe->byte_cnt); 2292 - break; 2293 - case HNS_ROCE_WQE_OPCODE_RDMA_WRITE: 2294 - wc->opcode = IB_WC_RDMA_WRITE; 2295 - break; 2296 - case HNS_ROCE_WQE_OPCODE_LOCAL_INV: 2297 - wc->opcode = IB_WC_LOCAL_INV; 2298 - break; 2299 - case HNS_ROCE_WQE_OPCODE_UD_SEND: 2300 - wc->opcode = IB_WC_SEND; 2301 - break; 2302 - default: 2303 - wc->status = IB_WC_GENERAL_ERR; 2304 - break; 2305 - } 2306 - wc->wc_flags = (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_IMM ? 2307 - IB_WC_WITH_IMM : 0); 2308 - 2309 - wq = &(*cur_qp)->sq; 2310 - if ((*cur_qp)->sq_signal_bits) { 2311 - /* 2312 - * If sg_signal_bit is 1, 2313 - * firstly tail pointer updated to wqe 2314 - * which current cqe correspond to 2315 - */ 2316 - wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4, 2317 - CQE_BYTE_4_WQE_INDEX_M, 2318 - CQE_BYTE_4_WQE_INDEX_S); 2319 - wq->tail += (wqe_ctr - (u16)wq->tail) & 2320 - (wq->wqe_cnt - 1); 2321 - } 2322 - wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 2323 - ++wq->tail; 2324 - } else { 2325 - /* RQ conrespond to CQE */ 2326 - wc->byte_len = le32_to_cpu(cqe->byte_cnt); 2327 - opcode = roce_get_field(cqe->cqe_byte_4, 2328 - CQE_BYTE_4_OPERATION_TYPE_M, 2329 - CQE_BYTE_4_OPERATION_TYPE_S) & 2330 - HNS_ROCE_CQE_OPCODE_MASK; 2331 - switch (opcode) { 2332 - case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE: 2333 - wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; 2334 - wc->wc_flags = IB_WC_WITH_IMM; 2335 - wc->ex.imm_data = 2336 - cpu_to_be32(le32_to_cpu(cqe->immediate_data)); 2337 - break; 2338 - case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE: 2339 - if (roce_get_bit(cqe->cqe_byte_4, 2340 - CQE_BYTE_4_IMM_INDICATOR_S)) { 2341 - wc->opcode = IB_WC_RECV; 2342 - wc->wc_flags = IB_WC_WITH_IMM; 2343 - wc->ex.imm_data = cpu_to_be32( 2344 - le32_to_cpu(cqe->immediate_data)); 2345 - } else { 2346 - wc->opcode = IB_WC_RECV; 2347 - wc->wc_flags = 0; 2348 - } 2349 - break; 2350 - default: 2351 - wc->status = IB_WC_GENERAL_ERR; 2352 - break; 2353 - } 2354 - 2355 - /* Update tail pointer, record wr_id */ 2356 - wq = &(*cur_qp)->rq; 2357 - wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 2358 - ++wq->tail; 2359 - wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M, 2360 - CQE_BYTE_20_SL_S); 2361 - wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20, 2362 - CQE_BYTE_20_REMOTE_QPN_M, 2363 - CQE_BYTE_20_REMOTE_QPN_S); 2364 - wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20, 2365 - CQE_BYTE_20_GRH_PRESENT_S) ? 2366 - IB_WC_GRH : 0); 2367 - wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28, 2368 - CQE_BYTE_28_P_KEY_IDX_M, 2369 - CQE_BYTE_28_P_KEY_IDX_S); 2370 - } 2371 - 2372 - return 0; 2373 - } 2374 - 2375 - int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) 2376 - { 2377 - struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); 2378 - struct hns_roce_qp *cur_qp = NULL; 2379 - unsigned long flags; 2380 - int npolled; 2381 - int ret; 2382 - 2383 - spin_lock_irqsave(&hr_cq->lock, flags); 2384 - 2385 - for (npolled = 0; npolled < num_entries; ++npolled) { 2386 - ret = hns_roce_v1_poll_one(hr_cq, &cur_qp, wc + npolled); 2387 - if (ret) 2388 - break; 2389 - } 2390 - 2391 - if (npolled) { 2392 - *hr_cq->tptr_addr = hr_cq->cons_index & 2393 - ((hr_cq->cq_depth << 1) - 1); 2394 - 2395 - hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index); 2396 - } 2397 - 2398 - spin_unlock_irqrestore(&hr_cq->lock, flags); 2399 - 2400 - if (ret == 0 || ret == -EAGAIN) 2401 - return npolled; 2402 - else 2403 - return ret; 2404 - } 2405 - 2406 - static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, 2407 - struct hns_roce_hem_table *table, int obj, 2408 - int step_idx) 2409 - { 2410 - struct hns_roce_v1_priv *priv = hr_dev->priv; 2411 - struct device *dev = &hr_dev->pdev->dev; 2412 - long end = HW_SYNC_TIMEOUT_MSECS; 2413 - __le32 bt_cmd_val[2] = {0}; 2414 - unsigned long flags = 0; 2415 - void __iomem *bt_cmd; 2416 - u64 bt_ba = 0; 2417 - 2418 - switch (table->type) { 2419 - case HEM_TYPE_QPC: 2420 - bt_ba = priv->bt_table.qpc_buf.map >> 12; 2421 - break; 2422 - case HEM_TYPE_MTPT: 2423 - bt_ba = priv->bt_table.mtpt_buf.map >> 12; 2424 - break; 2425 - case HEM_TYPE_CQC: 2426 - bt_ba = priv->bt_table.cqc_buf.map >> 12; 2427 - break; 2428 - case HEM_TYPE_SRQC: 2429 - dev_dbg(dev, "HEM_TYPE_SRQC not support.\n"); 2430 - return -EINVAL; 2431 - default: 2432 - return 0; 2433 - } 2434 - roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, 2435 - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type); 2436 - roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, 2437 - ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); 2438 - roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); 2439 - roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1); 2440 - 2441 - spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags); 2442 - 2443 - bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; 2444 - 2445 - while (1) { 2446 - if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) { 2447 - if (!end) { 2448 - dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n"); 2449 - spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, 2450 - flags); 2451 - return -EBUSY; 2452 - } 2453 - } else { 2454 - break; 2455 - } 2456 - mdelay(HW_SYNC_SLEEP_TIME_INTERVAL); 2457 - end -= HW_SYNC_SLEEP_TIME_INTERVAL; 2458 - } 2459 - 2460 - bt_cmd_val[0] = cpu_to_le32(bt_ba); 2461 - roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, 2462 - ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32); 2463 - hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG); 2464 - 2465 - spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags); 2466 - 2467 - return 0; 2468 - } 2469 - 2470 - static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev, 2471 - enum hns_roce_qp_state cur_state, 2472 - enum hns_roce_qp_state new_state, 2473 - struct hns_roce_qp_context *context, 2474 - struct hns_roce_qp *hr_qp) 2475 - { 2476 - static const u16 2477 - op[HNS_ROCE_QP_NUM_STATE][HNS_ROCE_QP_NUM_STATE] = { 2478 - [HNS_ROCE_QP_STATE_RST] = { 2479 - [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, 2480 - [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, 2481 - [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP, 2482 - }, 2483 - [HNS_ROCE_QP_STATE_INIT] = { 2484 - [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, 2485 - [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, 2486 - /* Note: In v1 engine, HW doesn't support RST2INIT. 2487 - * We use RST2INIT cmd instead of INIT2INIT. 2488 - */ 2489 - [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP, 2490 - [HNS_ROCE_QP_STATE_RTR] = HNS_ROCE_CMD_INIT2RTR_QP, 2491 - }, 2492 - [HNS_ROCE_QP_STATE_RTR] = { 2493 - [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, 2494 - [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, 2495 - [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTR2RTS_QP, 2496 - }, 2497 - [HNS_ROCE_QP_STATE_RTS] = { 2498 - [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, 2499 - [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, 2500 - [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTS2RTS_QP, 2501 - [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_RTS2SQD_QP, 2502 - }, 2503 - [HNS_ROCE_QP_STATE_SQD] = { 2504 - [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, 2505 - [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, 2506 - [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_SQD2RTS_QP, 2507 - [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_SQD2SQD_QP, 2508 - }, 2509 - [HNS_ROCE_QP_STATE_ERR] = { 2510 - [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, 2511 - [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, 2512 - } 2513 - }; 2514 - 2515 - struct hns_roce_cmd_mailbox *mailbox; 2516 - struct device *dev = &hr_dev->pdev->dev; 2517 - int ret; 2518 - 2519 - if (cur_state >= HNS_ROCE_QP_NUM_STATE || 2520 - new_state >= HNS_ROCE_QP_NUM_STATE || 2521 - !op[cur_state][new_state]) { 2522 - dev_err(dev, "[modify_qp]not support state %d to %d\n", 2523 - cur_state, new_state); 2524 - return -EINVAL; 2525 - } 2526 - 2527 - if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP) 2528 - return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2, 2529 - HNS_ROCE_CMD_2RST_QP, 2530 - HNS_ROCE_CMD_TIMEOUT_MSECS); 2531 - 2532 - if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP) 2533 - return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2, 2534 - HNS_ROCE_CMD_2ERR_QP, 2535 - HNS_ROCE_CMD_TIMEOUT_MSECS); 2536 - 2537 - mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 2538 - if (IS_ERR(mailbox)) 2539 - return PTR_ERR(mailbox); 2540 - 2541 - memcpy(mailbox->buf, context, sizeof(*context)); 2542 - 2543 - ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0, 2544 - op[cur_state][new_state], 2545 - HNS_ROCE_CMD_TIMEOUT_MSECS); 2546 - 2547 - hns_roce_free_cmd_mailbox(hr_dev, mailbox); 2548 - return ret; 2549 - } 2550 - 2551 - static int find_wqe_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 2552 - u64 *sq_ba, u64 *rq_ba, dma_addr_t *bt_ba) 2553 - { 2554 - struct ib_device *ibdev = &hr_dev->ib_dev; 2555 - int count; 2556 - 2557 - count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, sq_ba, 1, bt_ba); 2558 - if (count < 1) { 2559 - ibdev_err(ibdev, "Failed to find SQ ba\n"); 2560 - return -ENOBUFS; 2561 - } 2562 - 2563 - count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, rq_ba, 2564 - 1, NULL); 2565 - if (!count) { 2566 - ibdev_err(ibdev, "Failed to find RQ ba\n"); 2567 - return -ENOBUFS; 2568 - } 2569 - 2570 - return 0; 2571 - } 2572 - 2573 - static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, 2574 - int attr_mask, enum ib_qp_state cur_state, 2575 - enum ib_qp_state new_state) 2576 - { 2577 - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 2578 - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 2579 - struct hns_roce_sqp_context *context; 2580 - dma_addr_t dma_handle = 0; 2581 - u32 __iomem *addr; 2582 - u64 sq_ba = 0; 2583 - u64 rq_ba = 0; 2584 - __le32 tmp; 2585 - u32 reg_val; 2586 - 2587 - context = kzalloc(sizeof(*context), GFP_KERNEL); 2588 - if (!context) 2589 - return -ENOMEM; 2590 - 2591 - /* Search QP buf's MTTs */ 2592 - if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle)) 2593 - goto out; 2594 - 2595 - if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 2596 - roce_set_field(context->qp1c_bytes_4, 2597 - QP1C_BYTES_4_SQ_WQE_SHIFT_M, 2598 - QP1C_BYTES_4_SQ_WQE_SHIFT_S, 2599 - ilog2((unsigned int)hr_qp->sq.wqe_cnt)); 2600 - roce_set_field(context->qp1c_bytes_4, 2601 - QP1C_BYTES_4_RQ_WQE_SHIFT_M, 2602 - QP1C_BYTES_4_RQ_WQE_SHIFT_S, 2603 - ilog2((unsigned int)hr_qp->rq.wqe_cnt)); 2604 - roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M, 2605 - QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn); 2606 - 2607 - context->sq_rq_bt_l = cpu_to_le32(dma_handle); 2608 - roce_set_field(context->qp1c_bytes_12, 2609 - QP1C_BYTES_12_SQ_RQ_BT_H_M, 2610 - QP1C_BYTES_12_SQ_RQ_BT_H_S, 2611 - upper_32_bits(dma_handle)); 2612 - 2613 - roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M, 2614 - QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head); 2615 - roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M, 2616 - QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port); 2617 - roce_set_bit(context->qp1c_bytes_16, 2618 - QP1C_BYTES_16_SIGNALING_TYPE_S, 2619 - hr_qp->sq_signal_bits); 2620 - roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S, 2621 - 1); 2622 - roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S, 2623 - 1); 2624 - roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_QP1_ERR_S, 2625 - 0); 2626 - 2627 - roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_SQ_HEAD_M, 2628 - QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head); 2629 - roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M, 2630 - QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index); 2631 - 2632 - context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba); 2633 - 2634 - roce_set_field(context->qp1c_bytes_28, 2635 - QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M, 2636 - QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S, 2637 - upper_32_bits(rq_ba)); 2638 - roce_set_field(context->qp1c_bytes_28, 2639 - QP1C_BYTES_28_RQ_CUR_IDX_M, 2640 - QP1C_BYTES_28_RQ_CUR_IDX_S, 0); 2641 - 2642 - roce_set_field(context->qp1c_bytes_32, 2643 - QP1C_BYTES_32_RX_CQ_NUM_M, 2644 - QP1C_BYTES_32_RX_CQ_NUM_S, 2645 - to_hr_cq(ibqp->recv_cq)->cqn); 2646 - roce_set_field(context->qp1c_bytes_32, 2647 - QP1C_BYTES_32_TX_CQ_NUM_M, 2648 - QP1C_BYTES_32_TX_CQ_NUM_S, 2649 - to_hr_cq(ibqp->send_cq)->cqn); 2650 - 2651 - context->cur_sq_wqe_ba_l = cpu_to_le32(sq_ba); 2652 - 2653 - roce_set_field(context->qp1c_bytes_40, 2654 - QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M, 2655 - QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S, 2656 - upper_32_bits(sq_ba)); 2657 - roce_set_field(context->qp1c_bytes_40, 2658 - QP1C_BYTES_40_SQ_CUR_IDX_M, 2659 - QP1C_BYTES_40_SQ_CUR_IDX_S, 0); 2660 - 2661 - /* Copy context to QP1C register */ 2662 - addr = (u32 __iomem *)(hr_dev->reg_base + 2663 - ROCEE_QP1C_CFG0_0_REG + 2664 - hr_qp->phy_port * sizeof(*context)); 2665 - 2666 - writel(le32_to_cpu(context->qp1c_bytes_4), addr); 2667 - writel(le32_to_cpu(context->sq_rq_bt_l), addr + 1); 2668 - writel(le32_to_cpu(context->qp1c_bytes_12), addr + 2); 2669 - writel(le32_to_cpu(context->qp1c_bytes_16), addr + 3); 2670 - writel(le32_to_cpu(context->qp1c_bytes_20), addr + 4); 2671 - writel(le32_to_cpu(context->cur_rq_wqe_ba_l), addr + 5); 2672 - writel(le32_to_cpu(context->qp1c_bytes_28), addr + 6); 2673 - writel(le32_to_cpu(context->qp1c_bytes_32), addr + 7); 2674 - writel(le32_to_cpu(context->cur_sq_wqe_ba_l), addr + 8); 2675 - writel(le32_to_cpu(context->qp1c_bytes_40), addr + 9); 2676 - } 2677 - 2678 - /* Modify QP1C status */ 2679 - reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG + 2680 - hr_qp->phy_port * sizeof(*context)); 2681 - tmp = cpu_to_le32(reg_val); 2682 - roce_set_field(tmp, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M, 2683 - ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state); 2684 - reg_val = le32_to_cpu(tmp); 2685 - roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG + 2686 - hr_qp->phy_port * sizeof(*context), reg_val); 2687 - 2688 - hr_qp->state = new_state; 2689 - if (new_state == IB_QPS_RESET) { 2690 - hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn, 2691 - ibqp->srq ? to_hr_srq(ibqp->srq) : NULL); 2692 - if (ibqp->send_cq != ibqp->recv_cq) 2693 - hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq), 2694 - hr_qp->qpn, NULL); 2695 - 2696 - hr_qp->rq.head = 0; 2697 - hr_qp->rq.tail = 0; 2698 - hr_qp->sq.head = 0; 2699 - hr_qp->sq.tail = 0; 2700 - } 2701 - 2702 - kfree(context); 2703 - return 0; 2704 - 2705 - out: 2706 - kfree(context); 2707 - return -EINVAL; 2708 - } 2709 - 2710 - static bool check_qp_state(enum ib_qp_state cur_state, 2711 - enum ib_qp_state new_state) 2712 - { 2713 - static const bool sm[][IB_QPS_ERR + 1] = { 2714 - [IB_QPS_RESET] = { [IB_QPS_RESET] = true, 2715 - [IB_QPS_INIT] = true }, 2716 - [IB_QPS_INIT] = { [IB_QPS_RESET] = true, 2717 - [IB_QPS_INIT] = true, 2718 - [IB_QPS_RTR] = true, 2719 - [IB_QPS_ERR] = true }, 2720 - [IB_QPS_RTR] = { [IB_QPS_RESET] = true, 2721 - [IB_QPS_RTS] = true, 2722 - [IB_QPS_ERR] = true }, 2723 - [IB_QPS_RTS] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }, 2724 - [IB_QPS_SQD] = {}, 2725 - [IB_QPS_SQE] = {}, 2726 - [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true } 2727 - }; 2728 - 2729 - return sm[cur_state][new_state]; 2730 - } 2731 - 2732 - static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, 2733 - int attr_mask, enum ib_qp_state cur_state, 2734 - enum ib_qp_state new_state) 2735 - { 2736 - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 2737 - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 2738 - struct device *dev = &hr_dev->pdev->dev; 2739 - struct hns_roce_qp_context *context; 2740 - const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); 2741 - dma_addr_t dma_handle_2 = 0; 2742 - dma_addr_t dma_handle = 0; 2743 - __le32 doorbell[2] = {0}; 2744 - u64 *mtts_2 = NULL; 2745 - int ret = -EINVAL; 2746 - const u8 *smac; 2747 - u64 sq_ba = 0; 2748 - u64 rq_ba = 0; 2749 - u32 port; 2750 - u32 port_num; 2751 - u8 *dmac; 2752 - 2753 - if (!check_qp_state(cur_state, new_state)) { 2754 - ibdev_err(ibqp->device, 2755 - "not support QP(%u) status from %d to %d\n", 2756 - ibqp->qp_num, cur_state, new_state); 2757 - return -EINVAL; 2758 - } 2759 - 2760 - context = kzalloc(sizeof(*context), GFP_KERNEL); 2761 - if (!context) 2762 - return -ENOMEM; 2763 - 2764 - /* Search qp buf's mtts */ 2765 - if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle)) 2766 - goto out; 2767 - 2768 - /* Search IRRL's mtts */ 2769 - mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table, 2770 - hr_qp->qpn, &dma_handle_2); 2771 - if (mtts_2 == NULL) { 2772 - dev_err(dev, "qp irrl_table find failed\n"); 2773 - goto out; 2774 - } 2775 - 2776 - /* 2777 - * Reset to init 2778 - * Mandatory param: 2779 - * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS 2780 - * Optional param: NA 2781 - */ 2782 - if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 2783 - roce_set_field(context->qpc_bytes_4, 2784 - QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M, 2785 - QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S, 2786 - to_hr_qp_type(hr_qp->ibqp.qp_type)); 2787 - 2788 - roce_set_bit(context->qpc_bytes_4, 2789 - QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0); 2790 - roce_set_bit(context->qpc_bytes_4, 2791 - QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S, 2792 - !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ)); 2793 - roce_set_bit(context->qpc_bytes_4, 2794 - QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S, 2795 - !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) 2796 - ); 2797 - roce_set_bit(context->qpc_bytes_4, 2798 - QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S, 2799 - !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) 2800 - ); 2801 - roce_set_bit(context->qpc_bytes_4, 2802 - QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1); 2803 - roce_set_field(context->qpc_bytes_4, 2804 - QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M, 2805 - QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S, 2806 - ilog2((unsigned int)hr_qp->sq.wqe_cnt)); 2807 - roce_set_field(context->qpc_bytes_4, 2808 - QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M, 2809 - QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S, 2810 - ilog2((unsigned int)hr_qp->rq.wqe_cnt)); 2811 - roce_set_field(context->qpc_bytes_4, 2812 - QP_CONTEXT_QPC_BYTES_4_PD_M, 2813 - QP_CONTEXT_QPC_BYTES_4_PD_S, 2814 - to_hr_pd(ibqp->pd)->pdn); 2815 - hr_qp->access_flags = attr->qp_access_flags; 2816 - roce_set_field(context->qpc_bytes_8, 2817 - QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M, 2818 - QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S, 2819 - to_hr_cq(ibqp->send_cq)->cqn); 2820 - roce_set_field(context->qpc_bytes_8, 2821 - QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M, 2822 - QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S, 2823 - to_hr_cq(ibqp->recv_cq)->cqn); 2824 - 2825 - if (ibqp->srq) 2826 - roce_set_field(context->qpc_bytes_12, 2827 - QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M, 2828 - QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S, 2829 - to_hr_srq(ibqp->srq)->srqn); 2830 - 2831 - roce_set_field(context->qpc_bytes_12, 2832 - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M, 2833 - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S, 2834 - attr->pkey_index); 2835 - hr_qp->pkey_index = attr->pkey_index; 2836 - roce_set_field(context->qpc_bytes_16, 2837 - QP_CONTEXT_QPC_BYTES_16_QP_NUM_M, 2838 - QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn); 2839 - } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { 2840 - roce_set_field(context->qpc_bytes_4, 2841 - QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M, 2842 - QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S, 2843 - to_hr_qp_type(hr_qp->ibqp.qp_type)); 2844 - roce_set_bit(context->qpc_bytes_4, 2845 - QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0); 2846 - if (attr_mask & IB_QP_ACCESS_FLAGS) { 2847 - roce_set_bit(context->qpc_bytes_4, 2848 - QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S, 2849 - !!(attr->qp_access_flags & 2850 - IB_ACCESS_REMOTE_READ)); 2851 - roce_set_bit(context->qpc_bytes_4, 2852 - QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S, 2853 - !!(attr->qp_access_flags & 2854 - IB_ACCESS_REMOTE_WRITE)); 2855 - } else { 2856 - roce_set_bit(context->qpc_bytes_4, 2857 - QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S, 2858 - !!(hr_qp->access_flags & 2859 - IB_ACCESS_REMOTE_READ)); 2860 - roce_set_bit(context->qpc_bytes_4, 2861 - QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S, 2862 - !!(hr_qp->access_flags & 2863 - IB_ACCESS_REMOTE_WRITE)); 2864 - } 2865 - 2866 - roce_set_bit(context->qpc_bytes_4, 2867 - QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1); 2868 - roce_set_field(context->qpc_bytes_4, 2869 - QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M, 2870 - QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S, 2871 - ilog2((unsigned int)hr_qp->sq.wqe_cnt)); 2872 - roce_set_field(context->qpc_bytes_4, 2873 - QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M, 2874 - QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S, 2875 - ilog2((unsigned int)hr_qp->rq.wqe_cnt)); 2876 - roce_set_field(context->qpc_bytes_4, 2877 - QP_CONTEXT_QPC_BYTES_4_PD_M, 2878 - QP_CONTEXT_QPC_BYTES_4_PD_S, 2879 - to_hr_pd(ibqp->pd)->pdn); 2880 - 2881 - roce_set_field(context->qpc_bytes_8, 2882 - QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M, 2883 - QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S, 2884 - to_hr_cq(ibqp->send_cq)->cqn); 2885 - roce_set_field(context->qpc_bytes_8, 2886 - QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M, 2887 - QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S, 2888 - to_hr_cq(ibqp->recv_cq)->cqn); 2889 - 2890 - if (ibqp->srq) 2891 - roce_set_field(context->qpc_bytes_12, 2892 - QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M, 2893 - QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S, 2894 - to_hr_srq(ibqp->srq)->srqn); 2895 - if (attr_mask & IB_QP_PKEY_INDEX) 2896 - roce_set_field(context->qpc_bytes_12, 2897 - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M, 2898 - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S, 2899 - attr->pkey_index); 2900 - else 2901 - roce_set_field(context->qpc_bytes_12, 2902 - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M, 2903 - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S, 2904 - hr_qp->pkey_index); 2905 - 2906 - roce_set_field(context->qpc_bytes_16, 2907 - QP_CONTEXT_QPC_BYTES_16_QP_NUM_M, 2908 - QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn); 2909 - } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { 2910 - if ((attr_mask & IB_QP_ALT_PATH) || 2911 - (attr_mask & IB_QP_ACCESS_FLAGS) || 2912 - (attr_mask & IB_QP_PKEY_INDEX) || 2913 - (attr_mask & IB_QP_QKEY)) { 2914 - dev_err(dev, "INIT2RTR attr_mask error\n"); 2915 - goto out; 2916 - } 2917 - 2918 - dmac = (u8 *)attr->ah_attr.roce.dmac; 2919 - 2920 - context->sq_rq_bt_l = cpu_to_le32(dma_handle); 2921 - roce_set_field(context->qpc_bytes_24, 2922 - QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M, 2923 - QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S, 2924 - upper_32_bits(dma_handle)); 2925 - roce_set_bit(context->qpc_bytes_24, 2926 - QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S, 2927 - 1); 2928 - roce_set_field(context->qpc_bytes_24, 2929 - QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M, 2930 - QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S, 2931 - attr->min_rnr_timer); 2932 - context->irrl_ba_l = cpu_to_le32((u32)(dma_handle_2)); 2933 - roce_set_field(context->qpc_bytes_32, 2934 - QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M, 2935 - QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S, 2936 - ((u32)(dma_handle_2 >> 32)) & 2937 - QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M); 2938 - roce_set_field(context->qpc_bytes_32, 2939 - QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M, 2940 - QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S, 0); 2941 - roce_set_bit(context->qpc_bytes_32, 2942 - QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S, 2943 - 1); 2944 - roce_set_bit(context->qpc_bytes_32, 2945 - QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S, 2946 - hr_qp->sq_signal_bits); 2947 - 2948 - port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : 2949 - hr_qp->port; 2950 - smac = (const u8 *)hr_dev->dev_addr[port]; 2951 - /* when dmac equals smac or loop_idc is 1, it should loopback */ 2952 - if (ether_addr_equal_unaligned(dmac, smac) || 2953 - hr_dev->loop_idc == 0x1) 2954 - roce_set_bit(context->qpc_bytes_32, 2955 - QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1); 2956 - 2957 - roce_set_bit(context->qpc_bytes_32, 2958 - QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S, 2959 - rdma_ah_get_ah_flags(&attr->ah_attr)); 2960 - roce_set_field(context->qpc_bytes_32, 2961 - QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M, 2962 - QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S, 2963 - ilog2((unsigned int)attr->max_dest_rd_atomic)); 2964 - 2965 - if (attr_mask & IB_QP_DEST_QPN) 2966 - roce_set_field(context->qpc_bytes_36, 2967 - QP_CONTEXT_QPC_BYTES_36_DEST_QP_M, 2968 - QP_CONTEXT_QPC_BYTES_36_DEST_QP_S, 2969 - attr->dest_qp_num); 2970 - 2971 - /* Configure GID index */ 2972 - port_num = rdma_ah_get_port_num(&attr->ah_attr); 2973 - roce_set_field(context->qpc_bytes_36, 2974 - QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M, 2975 - QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S, 2976 - hns_get_gid_index(hr_dev, 2977 - port_num - 1, 2978 - grh->sgid_index)); 2979 - 2980 - memcpy(&(context->dmac_l), dmac, 4); 2981 - 2982 - roce_set_field(context->qpc_bytes_44, 2983 - QP_CONTEXT_QPC_BYTES_44_DMAC_H_M, 2984 - QP_CONTEXT_QPC_BYTES_44_DMAC_H_S, 2985 - *((u16 *)(&dmac[4]))); 2986 - roce_set_field(context->qpc_bytes_44, 2987 - QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M, 2988 - QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S, 2989 - rdma_ah_get_static_rate(&attr->ah_attr)); 2990 - roce_set_field(context->qpc_bytes_44, 2991 - QP_CONTEXT_QPC_BYTES_44_HOPLMT_M, 2992 - QP_CONTEXT_QPC_BYTES_44_HOPLMT_S, 2993 - grh->hop_limit); 2994 - 2995 - roce_set_field(context->qpc_bytes_48, 2996 - QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M, 2997 - QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S, 2998 - grh->flow_label); 2999 - roce_set_field(context->qpc_bytes_48, 3000 - QP_CONTEXT_QPC_BYTES_48_TCLASS_M, 3001 - QP_CONTEXT_QPC_BYTES_48_TCLASS_S, 3002 - grh->traffic_class); 3003 - roce_set_field(context->qpc_bytes_48, 3004 - QP_CONTEXT_QPC_BYTES_48_MTU_M, 3005 - QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu); 3006 - 3007 - memcpy(context->dgid, grh->dgid.raw, 3008 - sizeof(grh->dgid.raw)); 3009 - 3010 - dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l, 3011 - roce_get_field(context->qpc_bytes_44, 3012 - QP_CONTEXT_QPC_BYTES_44_DMAC_H_M, 3013 - QP_CONTEXT_QPC_BYTES_44_DMAC_H_S)); 3014 - 3015 - roce_set_field(context->qpc_bytes_68, 3016 - QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M, 3017 - QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S, 3018 - hr_qp->rq.head); 3019 - roce_set_field(context->qpc_bytes_68, 3020 - QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M, 3021 - QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0); 3022 - 3023 - context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba); 3024 - 3025 - roce_set_field(context->qpc_bytes_76, 3026 - QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M, 3027 - QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S, 3028 - upper_32_bits(rq_ba)); 3029 - roce_set_field(context->qpc_bytes_76, 3030 - QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M, 3031 - QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0); 3032 - 3033 - context->rx_rnr_time = 0; 3034 - 3035 - roce_set_field(context->qpc_bytes_84, 3036 - QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M, 3037 - QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S, 3038 - attr->rq_psn - 1); 3039 - roce_set_field(context->qpc_bytes_84, 3040 - QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M, 3041 - QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S, 0); 3042 - 3043 - roce_set_field(context->qpc_bytes_88, 3044 - QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M, 3045 - QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S, 3046 - attr->rq_psn); 3047 - roce_set_bit(context->qpc_bytes_88, 3048 - QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S, 0); 3049 - roce_set_bit(context->qpc_bytes_88, 3050 - QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S, 0); 3051 - roce_set_field(context->qpc_bytes_88, 3052 - QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M, 3053 - QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S, 3054 - 0); 3055 - roce_set_field(context->qpc_bytes_88, 3056 - QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M, 3057 - QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S, 3058 - 0); 3059 - 3060 - context->dma_length = 0; 3061 - context->r_key = 0; 3062 - context->va_l = 0; 3063 - context->va_h = 0; 3064 - 3065 - roce_set_field(context->qpc_bytes_108, 3066 - QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M, 3067 - QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S, 0); 3068 - roce_set_bit(context->qpc_bytes_108, 3069 - QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S, 0); 3070 - roce_set_bit(context->qpc_bytes_108, 3071 - QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S, 0); 3072 - 3073 - roce_set_field(context->qpc_bytes_112, 3074 - QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M, 3075 - QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S, 0); 3076 - roce_set_field(context->qpc_bytes_112, 3077 - QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M, 3078 - QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S, 0); 3079 - 3080 - /* For chip resp ack */ 3081 - roce_set_field(context->qpc_bytes_156, 3082 - QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M, 3083 - QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S, 3084 - hr_qp->phy_port); 3085 - roce_set_field(context->qpc_bytes_156, 3086 - QP_CONTEXT_QPC_BYTES_156_SL_M, 3087 - QP_CONTEXT_QPC_BYTES_156_SL_S, 3088 - rdma_ah_get_sl(&attr->ah_attr)); 3089 - hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); 3090 - } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) { 3091 - /* If exist optional param, return error */ 3092 - if ((attr_mask & IB_QP_ALT_PATH) || 3093 - (attr_mask & IB_QP_ACCESS_FLAGS) || 3094 - (attr_mask & IB_QP_QKEY) || 3095 - (attr_mask & IB_QP_PATH_MIG_STATE) || 3096 - (attr_mask & IB_QP_CUR_STATE) || 3097 - (attr_mask & IB_QP_MIN_RNR_TIMER)) { 3098 - dev_err(dev, "RTR2RTS attr_mask error\n"); 3099 - goto out; 3100 - } 3101 - 3102 - context->rx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba); 3103 - 3104 - roce_set_field(context->qpc_bytes_120, 3105 - QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M, 3106 - QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S, 3107 - upper_32_bits(sq_ba)); 3108 - 3109 - roce_set_field(context->qpc_bytes_124, 3110 - QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M, 3111 - QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S, 0); 3112 - roce_set_field(context->qpc_bytes_124, 3113 - QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M, 3114 - QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S, 0); 3115 - 3116 - roce_set_field(context->qpc_bytes_128, 3117 - QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M, 3118 - QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S, 3119 - attr->sq_psn); 3120 - roce_set_bit(context->qpc_bytes_128, 3121 - QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S, 0); 3122 - roce_set_field(context->qpc_bytes_128, 3123 - QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M, 3124 - QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S, 3125 - 0); 3126 - roce_set_bit(context->qpc_bytes_128, 3127 - QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S, 0); 3128 - 3129 - roce_set_field(context->qpc_bytes_132, 3130 - QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M, 3131 - QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S, 0); 3132 - roce_set_field(context->qpc_bytes_132, 3133 - QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M, 3134 - QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S, 0); 3135 - 3136 - roce_set_field(context->qpc_bytes_136, 3137 - QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M, 3138 - QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S, 3139 - attr->sq_psn); 3140 - roce_set_field(context->qpc_bytes_136, 3141 - QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M, 3142 - QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S, 3143 - attr->sq_psn); 3144 - 3145 - roce_set_field(context->qpc_bytes_140, 3146 - QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M, 3147 - QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S, 3148 - (attr->sq_psn >> SQ_PSN_SHIFT)); 3149 - roce_set_field(context->qpc_bytes_140, 3150 - QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M, 3151 - QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S, 0); 3152 - roce_set_bit(context->qpc_bytes_140, 3153 - QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0); 3154 - 3155 - roce_set_field(context->qpc_bytes_148, 3156 - QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M, 3157 - QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0); 3158 - roce_set_field(context->qpc_bytes_148, 3159 - QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M, 3160 - QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S, 3161 - attr->retry_cnt); 3162 - roce_set_field(context->qpc_bytes_148, 3163 - QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M, 3164 - QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S, 3165 - attr->rnr_retry); 3166 - roce_set_field(context->qpc_bytes_148, 3167 - QP_CONTEXT_QPC_BYTES_148_LSN_M, 3168 - QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100); 3169 - 3170 - context->rnr_retry = 0; 3171 - 3172 - roce_set_field(context->qpc_bytes_156, 3173 - QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M, 3174 - QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S, 3175 - attr->retry_cnt); 3176 - if (attr->timeout < 0x12) { 3177 - dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n", 3178 - attr->timeout); 3179 - roce_set_field(context->qpc_bytes_156, 3180 - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, 3181 - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S, 3182 - 0x12); 3183 - } else { 3184 - roce_set_field(context->qpc_bytes_156, 3185 - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, 3186 - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S, 3187 - attr->timeout); 3188 - } 3189 - roce_set_field(context->qpc_bytes_156, 3190 - QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M, 3191 - QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S, 3192 - attr->rnr_retry); 3193 - roce_set_field(context->qpc_bytes_156, 3194 - QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M, 3195 - QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S, 3196 - hr_qp->phy_port); 3197 - roce_set_field(context->qpc_bytes_156, 3198 - QP_CONTEXT_QPC_BYTES_156_SL_M, 3199 - QP_CONTEXT_QPC_BYTES_156_SL_S, 3200 - rdma_ah_get_sl(&attr->ah_attr)); 3201 - hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); 3202 - roce_set_field(context->qpc_bytes_156, 3203 - QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M, 3204 - QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S, 3205 - ilog2((unsigned int)attr->max_rd_atomic)); 3206 - roce_set_field(context->qpc_bytes_156, 3207 - QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M, 3208 - QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S, 0); 3209 - context->pkt_use_len = 0; 3210 - 3211 - roce_set_field(context->qpc_bytes_164, 3212 - QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M, 3213 - QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S, attr->sq_psn); 3214 - roce_set_field(context->qpc_bytes_164, 3215 - QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M, 3216 - QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S, 0); 3217 - 3218 - roce_set_field(context->qpc_bytes_168, 3219 - QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M, 3220 - QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S, 3221 - attr->sq_psn); 3222 - roce_set_field(context->qpc_bytes_168, 3223 - QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M, 3224 - QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S, 0); 3225 - roce_set_field(context->qpc_bytes_168, 3226 - QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M, 3227 - QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S, 0); 3228 - roce_set_bit(context->qpc_bytes_168, 3229 - QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S, 0); 3230 - roce_set_bit(context->qpc_bytes_168, 3231 - QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S, 0); 3232 - roce_set_bit(context->qpc_bytes_168, 3233 - QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S, 0); 3234 - context->sge_use_len = 0; 3235 - 3236 - roce_set_field(context->qpc_bytes_176, 3237 - QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M, 3238 - QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S, 0); 3239 - roce_set_field(context->qpc_bytes_176, 3240 - QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M, 3241 - QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S, 3242 - 0); 3243 - roce_set_field(context->qpc_bytes_180, 3244 - QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M, 3245 - QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S, 0); 3246 - roce_set_field(context->qpc_bytes_180, 3247 - QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M, 3248 - QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0); 3249 - 3250 - context->tx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba); 3251 - 3252 - roce_set_field(context->qpc_bytes_188, 3253 - QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M, 3254 - QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S, 3255 - upper_32_bits(sq_ba)); 3256 - roce_set_bit(context->qpc_bytes_188, 3257 - QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0); 3258 - roce_set_field(context->qpc_bytes_188, 3259 - QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M, 3260 - QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S, 3261 - 0); 3262 - } 3263 - 3264 - /* Every status migrate must change state */ 3265 - roce_set_field(context->qpc_bytes_144, 3266 - QP_CONTEXT_QPC_BYTES_144_QP_STATE_M, 3267 - QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state); 3268 - 3269 - /* SW pass context to HW */ 3270 - ret = hns_roce_v1_qp_modify(hr_dev, to_hns_roce_state(cur_state), 3271 - to_hns_roce_state(new_state), context, 3272 - hr_qp); 3273 - if (ret) { 3274 - dev_err(dev, "hns_roce_qp_modify failed\n"); 3275 - goto out; 3276 - } 3277 - 3278 - /* 3279 - * Use rst2init to instead of init2init with drv, 3280 - * need to hw to flash RQ HEAD by DB again 3281 - */ 3282 - if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { 3283 - roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M, 3284 - RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head); 3285 - roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M, 3286 - RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn); 3287 - roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M, 3288 - RQ_DOORBELL_U32_8_CMD_S, 1); 3289 - roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1); 3290 - 3291 - if (ibqp->uobject) { 3292 - hr_qp->rq.db_reg = hr_dev->reg_base + 3293 - hr_dev->odb_offset + 3294 - DB_REG_OFFSET * hr_dev->priv_uar.index; 3295 - } 3296 - 3297 - hns_roce_write64_k(doorbell, hr_qp->rq.db_reg); 3298 - } 3299 - 3300 - hr_qp->state = new_state; 3301 - 3302 - if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 3303 - hr_qp->resp_depth = attr->max_dest_rd_atomic; 3304 - if (attr_mask & IB_QP_PORT) { 3305 - hr_qp->port = attr->port_num - 1; 3306 - hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; 3307 - } 3308 - 3309 - if (new_state == IB_QPS_RESET && !ibqp->uobject) { 3310 - hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn, 3311 - ibqp->srq ? to_hr_srq(ibqp->srq) : NULL); 3312 - if (ibqp->send_cq != ibqp->recv_cq) 3313 - hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq), 3314 - hr_qp->qpn, NULL); 3315 - 3316 - hr_qp->rq.head = 0; 3317 - hr_qp->rq.tail = 0; 3318 - hr_qp->sq.head = 0; 3319 - hr_qp->sq.tail = 0; 3320 - } 3321 - out: 3322 - kfree(context); 3323 - return ret; 3324 - } 3325 - 3326 - static int hns_roce_v1_modify_qp(struct ib_qp *ibqp, 3327 - const struct ib_qp_attr *attr, int attr_mask, 3328 - enum ib_qp_state cur_state, 3329 - enum ib_qp_state new_state) 3330 - { 3331 - if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 3332 - return -EOPNOTSUPP; 3333 - 3334 - if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) 3335 - return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state, 3336 - new_state); 3337 - else 3338 - return hns_roce_v1_m_qp(ibqp, attr, attr_mask, cur_state, 3339 - new_state); 3340 - } 3341 - 3342 - static enum ib_qp_state to_ib_qp_state(enum hns_roce_qp_state state) 3343 - { 3344 - switch (state) { 3345 - case HNS_ROCE_QP_STATE_RST: 3346 - return IB_QPS_RESET; 3347 - case HNS_ROCE_QP_STATE_INIT: 3348 - return IB_QPS_INIT; 3349 - case HNS_ROCE_QP_STATE_RTR: 3350 - return IB_QPS_RTR; 3351 - case HNS_ROCE_QP_STATE_RTS: 3352 - return IB_QPS_RTS; 3353 - case HNS_ROCE_QP_STATE_SQD: 3354 - return IB_QPS_SQD; 3355 - case HNS_ROCE_QP_STATE_ERR: 3356 - return IB_QPS_ERR; 3357 - default: 3358 - return IB_QPS_ERR; 3359 - } 3360 - } 3361 - 3362 - static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev, 3363 - struct hns_roce_qp *hr_qp, 3364 - struct hns_roce_qp_context *hr_context) 3365 - { 3366 - struct hns_roce_cmd_mailbox *mailbox; 3367 - int ret; 3368 - 3369 - mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 3370 - if (IS_ERR(mailbox)) 3371 - return PTR_ERR(mailbox); 3372 - 3373 - ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0, 3374 - HNS_ROCE_CMD_QUERY_QP, 3375 - HNS_ROCE_CMD_TIMEOUT_MSECS); 3376 - if (!ret) 3377 - memcpy(hr_context, mailbox->buf, sizeof(*hr_context)); 3378 - else 3379 - dev_err(&hr_dev->pdev->dev, "QUERY QP cmd process error\n"); 3380 - 3381 - hns_roce_free_cmd_mailbox(hr_dev, mailbox); 3382 - 3383 - return ret; 3384 - } 3385 - 3386 - static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, 3387 - int qp_attr_mask, 3388 - struct ib_qp_init_attr *qp_init_attr) 3389 - { 3390 - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 3391 - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 3392 - struct hns_roce_sqp_context context; 3393 - u32 addr; 3394 - 3395 - mutex_lock(&hr_qp->mutex); 3396 - 3397 - if (hr_qp->state == IB_QPS_RESET) { 3398 - qp_attr->qp_state = IB_QPS_RESET; 3399 - goto done; 3400 - } 3401 - 3402 - addr = ROCEE_QP1C_CFG0_0_REG + 3403 - hr_qp->port * sizeof(struct hns_roce_sqp_context); 3404 - context.qp1c_bytes_4 = cpu_to_le32(roce_read(hr_dev, addr)); 3405 - context.sq_rq_bt_l = cpu_to_le32(roce_read(hr_dev, addr + 1)); 3406 - context.qp1c_bytes_12 = cpu_to_le32(roce_read(hr_dev, addr + 2)); 3407 - context.qp1c_bytes_16 = cpu_to_le32(roce_read(hr_dev, addr + 3)); 3408 - context.qp1c_bytes_20 = cpu_to_le32(roce_read(hr_dev, addr + 4)); 3409 - context.cur_rq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 5)); 3410 - context.qp1c_bytes_28 = cpu_to_le32(roce_read(hr_dev, addr + 6)); 3411 - context.qp1c_bytes_32 = cpu_to_le32(roce_read(hr_dev, addr + 7)); 3412 - context.cur_sq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 8)); 3413 - context.qp1c_bytes_40 = cpu_to_le32(roce_read(hr_dev, addr + 9)); 3414 - 3415 - hr_qp->state = roce_get_field(context.qp1c_bytes_4, 3416 - QP1C_BYTES_4_QP_STATE_M, 3417 - QP1C_BYTES_4_QP_STATE_S); 3418 - qp_attr->qp_state = hr_qp->state; 3419 - qp_attr->path_mtu = IB_MTU_256; 3420 - qp_attr->path_mig_state = IB_MIG_ARMED; 3421 - qp_attr->qkey = QKEY_VAL; 3422 - qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; 3423 - qp_attr->rq_psn = 0; 3424 - qp_attr->sq_psn = 0; 3425 - qp_attr->dest_qp_num = 1; 3426 - qp_attr->qp_access_flags = 6; 3427 - 3428 - qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20, 3429 - QP1C_BYTES_20_PKEY_IDX_M, 3430 - QP1C_BYTES_20_PKEY_IDX_S); 3431 - qp_attr->port_num = hr_qp->port + 1; 3432 - qp_attr->sq_draining = 0; 3433 - qp_attr->max_rd_atomic = 0; 3434 - qp_attr->max_dest_rd_atomic = 0; 3435 - qp_attr->min_rnr_timer = 0; 3436 - qp_attr->timeout = 0; 3437 - qp_attr->retry_cnt = 0; 3438 - qp_attr->rnr_retry = 0; 3439 - qp_attr->alt_timeout = 0; 3440 - 3441 - done: 3442 - qp_attr->cur_qp_state = qp_attr->qp_state; 3443 - qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt; 3444 - qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs; 3445 - qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt; 3446 - qp_attr->cap.max_send_sge = hr_qp->sq.max_gs; 3447 - qp_attr->cap.max_inline_data = 0; 3448 - qp_init_attr->cap = qp_attr->cap; 3449 - qp_init_attr->create_flags = 0; 3450 - 3451 - mutex_unlock(&hr_qp->mutex); 3452 - 3453 - return 0; 3454 - } 3455 - 3456 - static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, 3457 - int qp_attr_mask, 3458 - struct ib_qp_init_attr *qp_init_attr) 3459 - { 3460 - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 3461 - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 3462 - struct device *dev = &hr_dev->pdev->dev; 3463 - struct hns_roce_qp_context *context; 3464 - int tmp_qp_state; 3465 - int ret = 0; 3466 - int state; 3467 - 3468 - context = kzalloc(sizeof(*context), GFP_KERNEL); 3469 - if (!context) 3470 - return -ENOMEM; 3471 - 3472 - memset(qp_attr, 0, sizeof(*qp_attr)); 3473 - memset(qp_init_attr, 0, sizeof(*qp_init_attr)); 3474 - 3475 - mutex_lock(&hr_qp->mutex); 3476 - 3477 - if (hr_qp->state == IB_QPS_RESET) { 3478 - qp_attr->qp_state = IB_QPS_RESET; 3479 - goto done; 3480 - } 3481 - 3482 - ret = hns_roce_v1_query_qpc(hr_dev, hr_qp, context); 3483 - if (ret) { 3484 - dev_err(dev, "query qpc error\n"); 3485 - ret = -EINVAL; 3486 - goto out; 3487 - } 3488 - 3489 - state = roce_get_field(context->qpc_bytes_144, 3490 - QP_CONTEXT_QPC_BYTES_144_QP_STATE_M, 3491 - QP_CONTEXT_QPC_BYTES_144_QP_STATE_S); 3492 - tmp_qp_state = (int)to_ib_qp_state((enum hns_roce_qp_state)state); 3493 - if (tmp_qp_state == -1) { 3494 - dev_err(dev, "to_ib_qp_state error\n"); 3495 - ret = -EINVAL; 3496 - goto out; 3497 - } 3498 - hr_qp->state = (u8)tmp_qp_state; 3499 - qp_attr->qp_state = (enum ib_qp_state)hr_qp->state; 3500 - qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->qpc_bytes_48, 3501 - QP_CONTEXT_QPC_BYTES_48_MTU_M, 3502 - QP_CONTEXT_QPC_BYTES_48_MTU_S); 3503 - qp_attr->path_mig_state = IB_MIG_ARMED; 3504 - qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; 3505 - if (hr_qp->ibqp.qp_type == IB_QPT_UD) 3506 - qp_attr->qkey = QKEY_VAL; 3507 - 3508 - qp_attr->rq_psn = roce_get_field(context->qpc_bytes_88, 3509 - QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M, 3510 - QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S); 3511 - qp_attr->sq_psn = (u32)roce_get_field(context->qpc_bytes_164, 3512 - QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M, 3513 - QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S); 3514 - qp_attr->dest_qp_num = (u8)roce_get_field(context->qpc_bytes_36, 3515 - QP_CONTEXT_QPC_BYTES_36_DEST_QP_M, 3516 - QP_CONTEXT_QPC_BYTES_36_DEST_QP_S); 3517 - qp_attr->qp_access_flags = ((roce_get_bit(context->qpc_bytes_4, 3518 - QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S)) << 2) | 3519 - ((roce_get_bit(context->qpc_bytes_4, 3520 - QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S)) << 1) | 3521 - ((roce_get_bit(context->qpc_bytes_4, 3522 - QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3); 3523 - 3524 - if (hr_qp->ibqp.qp_type == IB_QPT_RC) { 3525 - struct ib_global_route *grh = 3526 - rdma_ah_retrieve_grh(&qp_attr->ah_attr); 3527 - 3528 - rdma_ah_set_sl(&qp_attr->ah_attr, 3529 - roce_get_field(context->qpc_bytes_156, 3530 - QP_CONTEXT_QPC_BYTES_156_SL_M, 3531 - QP_CONTEXT_QPC_BYTES_156_SL_S)); 3532 - rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH); 3533 - grh->flow_label = 3534 - roce_get_field(context->qpc_bytes_48, 3535 - QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M, 3536 - QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S); 3537 - grh->sgid_index = 3538 - roce_get_field(context->qpc_bytes_36, 3539 - QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M, 3540 - QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S); 3541 - grh->hop_limit = 3542 - roce_get_field(context->qpc_bytes_44, 3543 - QP_CONTEXT_QPC_BYTES_44_HOPLMT_M, 3544 - QP_CONTEXT_QPC_BYTES_44_HOPLMT_S); 3545 - grh->traffic_class = 3546 - roce_get_field(context->qpc_bytes_48, 3547 - QP_CONTEXT_QPC_BYTES_48_TCLASS_M, 3548 - QP_CONTEXT_QPC_BYTES_48_TCLASS_S); 3549 - 3550 - memcpy(grh->dgid.raw, context->dgid, 3551 - sizeof(grh->dgid.raw)); 3552 - } 3553 - 3554 - qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12, 3555 - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M, 3556 - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S); 3557 - qp_attr->port_num = hr_qp->port + 1; 3558 - qp_attr->sq_draining = 0; 3559 - qp_attr->max_rd_atomic = 1 << roce_get_field(context->qpc_bytes_156, 3560 - QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M, 3561 - QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S); 3562 - qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->qpc_bytes_32, 3563 - QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M, 3564 - QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S); 3565 - qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24, 3566 - QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M, 3567 - QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S)); 3568 - qp_attr->timeout = (u8)(roce_get_field(context->qpc_bytes_156, 3569 - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, 3570 - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S)); 3571 - qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148, 3572 - QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M, 3573 - QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S); 3574 - qp_attr->rnr_retry = (u8)le32_to_cpu(context->rnr_retry); 3575 - 3576 - done: 3577 - qp_attr->cur_qp_state = qp_attr->qp_state; 3578 - qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt; 3579 - qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs; 3580 - 3581 - if (!ibqp->uobject) { 3582 - qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt; 3583 - qp_attr->cap.max_send_sge = hr_qp->sq.max_gs; 3584 - } else { 3585 - qp_attr->cap.max_send_wr = 0; 3586 - qp_attr->cap.max_send_sge = 0; 3587 - } 3588 - 3589 - qp_init_attr->cap = qp_attr->cap; 3590 - 3591 - out: 3592 - mutex_unlock(&hr_qp->mutex); 3593 - kfree(context); 3594 - return ret; 3595 - } 3596 - 3597 - static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, 3598 - int qp_attr_mask, 3599 - struct ib_qp_init_attr *qp_init_attr) 3600 - { 3601 - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 3602 - 3603 - return hr_qp->doorbell_qpn <= 1 ? 3604 - hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) : 3605 - hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr); 3606 - } 3607 - 3608 - int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) 3609 - { 3610 - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 3611 - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 3612 - struct hns_roce_cq *send_cq, *recv_cq; 3613 - int ret; 3614 - 3615 - ret = hns_roce_v1_modify_qp(ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET); 3616 - if (ret) 3617 - return ret; 3618 - 3619 - send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL; 3620 - recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL; 3621 - 3622 - hns_roce_lock_cqs(send_cq, recv_cq); 3623 - if (!udata) { 3624 - if (recv_cq) 3625 - __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, 3626 - (hr_qp->ibqp.srq ? 3627 - to_hr_srq(hr_qp->ibqp.srq) : 3628 - NULL)); 3629 - 3630 - if (send_cq && send_cq != recv_cq) 3631 - __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL); 3632 - } 3633 - hns_roce_qp_remove(hr_dev, hr_qp); 3634 - hns_roce_unlock_cqs(send_cq, recv_cq); 3635 - 3636 - hns_roce_qp_destroy(hr_dev, hr_qp, udata); 3637 - 3638 - return 0; 3639 - } 3640 - 3641 - static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) 3642 - { 3643 - struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); 3644 - struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); 3645 - struct device *dev = &hr_dev->pdev->dev; 3646 - u32 cqe_cnt_ori; 3647 - u32 cqe_cnt_cur; 3648 - int wait_time = 0; 3649 - 3650 - /* 3651 - * Before freeing cq buffer, we need to ensure that the outstanding CQE 3652 - * have been written by checking the CQE counter. 3653 - */ 3654 - cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT); 3655 - while (1) { 3656 - if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) & 3657 - HNS_ROCE_CQE_WCMD_EMPTY_BIT) 3658 - break; 3659 - 3660 - cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT); 3661 - if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT) 3662 - break; 3663 - 3664 - msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS); 3665 - if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) { 3666 - dev_warn(dev, "Destroy cq 0x%lx timeout!\n", 3667 - hr_cq->cqn); 3668 - break; 3669 - } 3670 - wait_time++; 3671 - } 3672 - return 0; 3673 - } 3674 - 3675 - static void set_eq_cons_index_v1(struct hns_roce_eq *eq, u32 req_not) 3676 - { 3677 - roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) | 3678 - (req_not << eq->log_entries), eq->db_reg); 3679 - } 3680 - 3681 - static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev, 3682 - struct hns_roce_aeqe *aeqe, int qpn) 3683 - { 3684 - struct device *dev = &hr_dev->pdev->dev; 3685 - 3686 - dev_warn(dev, "Local Work Queue Catastrophic Error.\n"); 3687 - switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, 3688 - HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { 3689 - case HNS_ROCE_LWQCE_QPC_ERROR: 3690 - dev_warn(dev, "QP %d, QPC error.\n", qpn); 3691 - break; 3692 - case HNS_ROCE_LWQCE_MTU_ERROR: 3693 - dev_warn(dev, "QP %d, MTU error.\n", qpn); 3694 - break; 3695 - case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR: 3696 - dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn); 3697 - break; 3698 - case HNS_ROCE_LWQCE_WQE_ADDR_ERROR: 3699 - dev_warn(dev, "QP %d, WQE addr error.\n", qpn); 3700 - break; 3701 - case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR: 3702 - dev_warn(dev, "QP %d, WQE shift error\n", qpn); 3703 - break; 3704 - case HNS_ROCE_LWQCE_SL_ERROR: 3705 - dev_warn(dev, "QP %d, SL error.\n", qpn); 3706 - break; 3707 - case HNS_ROCE_LWQCE_PORT_ERROR: 3708 - dev_warn(dev, "QP %d, port error.\n", qpn); 3709 - break; 3710 - default: 3711 - break; 3712 - } 3713 - } 3714 - 3715 - static void hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev *hr_dev, 3716 - struct hns_roce_aeqe *aeqe, 3717 - int qpn) 3718 - { 3719 - struct device *dev = &hr_dev->pdev->dev; 3720 - 3721 - dev_warn(dev, "Local Access Violation Work Queue Error.\n"); 3722 - switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, 3723 - HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { 3724 - case HNS_ROCE_LAVWQE_R_KEY_VIOLATION: 3725 - dev_warn(dev, "QP %d, R_key violation.\n", qpn); 3726 - break; 3727 - case HNS_ROCE_LAVWQE_LENGTH_ERROR: 3728 - dev_warn(dev, "QP %d, length error.\n", qpn); 3729 - break; 3730 - case HNS_ROCE_LAVWQE_VA_ERROR: 3731 - dev_warn(dev, "QP %d, VA error.\n", qpn); 3732 - break; 3733 - case HNS_ROCE_LAVWQE_PD_ERROR: 3734 - dev_err(dev, "QP %d, PD error.\n", qpn); 3735 - break; 3736 - case HNS_ROCE_LAVWQE_RW_ACC_ERROR: 3737 - dev_warn(dev, "QP %d, rw acc error.\n", qpn); 3738 - break; 3739 - case HNS_ROCE_LAVWQE_KEY_STATE_ERROR: 3740 - dev_warn(dev, "QP %d, key state error.\n", qpn); 3741 - break; 3742 - case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR: 3743 - dev_warn(dev, "QP %d, MR operation error.\n", qpn); 3744 - break; 3745 - default: 3746 - break; 3747 - } 3748 - } 3749 - 3750 - static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev, 3751 - struct hns_roce_aeqe *aeqe, 3752 - int event_type) 3753 - { 3754 - struct device *dev = &hr_dev->pdev->dev; 3755 - int phy_port; 3756 - int qpn; 3757 - 3758 - qpn = roce_get_field(aeqe->event.queue_event.num, 3759 - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, 3760 - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S); 3761 - phy_port = roce_get_field(aeqe->event.queue_event.num, 3762 - HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M, 3763 - HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S); 3764 - if (qpn <= 1) 3765 - qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port; 3766 - 3767 - switch (event_type) { 3768 - case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: 3769 - dev_warn(dev, "Invalid Req Local Work Queue Error.\n" 3770 - "QP %d, phy_port %d.\n", qpn, phy_port); 3771 - break; 3772 - case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: 3773 - hns_roce_v1_wq_catas_err_handle(hr_dev, aeqe, qpn); 3774 - break; 3775 - case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: 3776 - hns_roce_v1_local_wq_access_err_handle(hr_dev, aeqe, qpn); 3777 - break; 3778 - default: 3779 - break; 3780 - } 3781 - 3782 - hns_roce_qp_event(hr_dev, qpn, event_type); 3783 - } 3784 - 3785 - static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev, 3786 - struct hns_roce_aeqe *aeqe, 3787 - int event_type) 3788 - { 3789 - struct device *dev = &hr_dev->pdev->dev; 3790 - u32 cqn; 3791 - 3792 - cqn = roce_get_field(aeqe->event.queue_event.num, 3793 - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, 3794 - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S); 3795 - 3796 - switch (event_type) { 3797 - case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: 3798 - dev_warn(dev, "CQ 0x%x access err.\n", cqn); 3799 - break; 3800 - case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: 3801 - dev_warn(dev, "CQ 0x%x overflow\n", cqn); 3802 - break; 3803 - case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID: 3804 - dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn); 3805 - break; 3806 - default: 3807 - break; 3808 - } 3809 - 3810 - hns_roce_cq_event(hr_dev, cqn, event_type); 3811 - } 3812 - 3813 - static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev, 3814 - struct hns_roce_aeqe *aeqe) 3815 - { 3816 - struct device *dev = &hr_dev->pdev->dev; 3817 - 3818 - switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, 3819 - HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { 3820 - case HNS_ROCE_DB_SUBTYPE_SDB_OVF: 3821 - dev_warn(dev, "SDB overflow.\n"); 3822 - break; 3823 - case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF: 3824 - dev_warn(dev, "SDB almost overflow.\n"); 3825 - break; 3826 - case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP: 3827 - dev_warn(dev, "SDB almost empty.\n"); 3828 - break; 3829 - case HNS_ROCE_DB_SUBTYPE_ODB_OVF: 3830 - dev_warn(dev, "ODB overflow.\n"); 3831 - break; 3832 - case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF: 3833 - dev_warn(dev, "ODB almost overflow.\n"); 3834 - break; 3835 - case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP: 3836 - dev_warn(dev, "SDB almost empty.\n"); 3837 - break; 3838 - default: 3839 - break; 3840 - } 3841 - } 3842 - 3843 - static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry) 3844 - { 3845 - unsigned long off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQE_SIZE; 3846 - 3847 - return (struct hns_roce_aeqe *)((u8 *) 3848 - (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) + 3849 - off % HNS_ROCE_BA_SIZE); 3850 - } 3851 - 3852 - static struct hns_roce_aeqe *next_aeqe_sw_v1(struct hns_roce_eq *eq) 3853 - { 3854 - struct hns_roce_aeqe *aeqe = get_aeqe_v1(eq, eq->cons_index); 3855 - 3856 - return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^ 3857 - !!(eq->cons_index & eq->entries)) ? aeqe : NULL; 3858 - } 3859 - 3860 - static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev, 3861 - struct hns_roce_eq *eq) 3862 - { 3863 - struct device *dev = &hr_dev->pdev->dev; 3864 - struct hns_roce_aeqe *aeqe; 3865 - int aeqes_found = 0; 3866 - int event_type; 3867 - 3868 - while ((aeqe = next_aeqe_sw_v1(eq))) { 3869 - /* Make sure we read the AEQ entry after we have checked the 3870 - * ownership bit 3871 - */ 3872 - dma_rmb(); 3873 - 3874 - dev_dbg(dev, "aeqe = %pK, aeqe->asyn.event_type = 0x%lx\n", 3875 - aeqe, 3876 - roce_get_field(aeqe->asyn, 3877 - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, 3878 - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); 3879 - event_type = roce_get_field(aeqe->asyn, 3880 - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, 3881 - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S); 3882 - switch (event_type) { 3883 - case HNS_ROCE_EVENT_TYPE_PATH_MIG: 3884 - dev_warn(dev, "PATH MIG not supported\n"); 3885 - break; 3886 - case HNS_ROCE_EVENT_TYPE_COMM_EST: 3887 - dev_warn(dev, "COMMUNICATION established\n"); 3888 - break; 3889 - case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: 3890 - dev_warn(dev, "SQ DRAINED not supported\n"); 3891 - break; 3892 - case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: 3893 - dev_warn(dev, "PATH MIG failed\n"); 3894 - break; 3895 - case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: 3896 - case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: 3897 - case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: 3898 - hns_roce_v1_qp_err_handle(hr_dev, aeqe, event_type); 3899 - break; 3900 - case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: 3901 - case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: 3902 - case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: 3903 - dev_warn(dev, "SRQ not support!\n"); 3904 - break; 3905 - case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: 3906 - case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: 3907 - case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID: 3908 - hns_roce_v1_cq_err_handle(hr_dev, aeqe, event_type); 3909 - break; 3910 - case HNS_ROCE_EVENT_TYPE_PORT_CHANGE: 3911 - dev_warn(dev, "port change.\n"); 3912 - break; 3913 - case HNS_ROCE_EVENT_TYPE_MB: 3914 - hns_roce_cmd_event(hr_dev, 3915 - le16_to_cpu(aeqe->event.cmd.token), 3916 - aeqe->event.cmd.status, 3917 - le64_to_cpu(aeqe->event.cmd.out_param 3918 - )); 3919 - break; 3920 - case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: 3921 - hns_roce_v1_db_overflow_handle(hr_dev, aeqe); 3922 - break; 3923 - default: 3924 - dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n", 3925 - event_type, eq->eqn, eq->cons_index); 3926 - break; 3927 - } 3928 - 3929 - eq->cons_index++; 3930 - aeqes_found = 1; 3931 - 3932 - if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) 3933 - eq->cons_index = 0; 3934 - } 3935 - 3936 - set_eq_cons_index_v1(eq, 0); 3937 - 3938 - return aeqes_found; 3939 - } 3940 - 3941 - static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry) 3942 - { 3943 - unsigned long off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQE_SIZE; 3944 - 3945 - return (struct hns_roce_ceqe *)((u8 *) 3946 - (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) + 3947 - off % HNS_ROCE_BA_SIZE); 3948 - } 3949 - 3950 - static struct hns_roce_ceqe *next_ceqe_sw_v1(struct hns_roce_eq *eq) 3951 - { 3952 - struct hns_roce_ceqe *ceqe = get_ceqe_v1(eq, eq->cons_index); 3953 - 3954 - return (!!(roce_get_bit(ceqe->comp, 3955 - HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^ 3956 - (!!(eq->cons_index & eq->entries)) ? ceqe : NULL; 3957 - } 3958 - 3959 - static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev, 3960 - struct hns_roce_eq *eq) 3961 - { 3962 - struct hns_roce_ceqe *ceqe; 3963 - int ceqes_found = 0; 3964 - u32 cqn; 3965 - 3966 - while ((ceqe = next_ceqe_sw_v1(eq))) { 3967 - /* Make sure we read CEQ entry after we have checked the 3968 - * ownership bit 3969 - */ 3970 - dma_rmb(); 3971 - 3972 - cqn = roce_get_field(ceqe->comp, 3973 - HNS_ROCE_CEQE_CEQE_COMP_CQN_M, 3974 - HNS_ROCE_CEQE_CEQE_COMP_CQN_S); 3975 - hns_roce_cq_completion(hr_dev, cqn); 3976 - 3977 - ++eq->cons_index; 3978 - ceqes_found = 1; 3979 - 3980 - if (eq->cons_index > 3981 - EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1) 3982 - eq->cons_index = 0; 3983 - } 3984 - 3985 - set_eq_cons_index_v1(eq, 0); 3986 - 3987 - return ceqes_found; 3988 - } 3989 - 3990 - static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr) 3991 - { 3992 - struct hns_roce_eq *eq = eq_ptr; 3993 - struct hns_roce_dev *hr_dev = eq->hr_dev; 3994 - int int_work; 3995 - 3996 - if (eq->type_flag == HNS_ROCE_CEQ) 3997 - /* CEQ irq routine, CEQ is pulse irq, not clear */ 3998 - int_work = hns_roce_v1_ceq_int(hr_dev, eq); 3999 - else 4000 - /* AEQ irq routine, AEQ is pulse irq, not clear */ 4001 - int_work = hns_roce_v1_aeq_int(hr_dev, eq); 4002 - 4003 - return IRQ_RETVAL(int_work); 4004 - } 4005 - 4006 - static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id) 4007 - { 4008 - struct hns_roce_dev *hr_dev = dev_id; 4009 - struct device *dev = &hr_dev->pdev->dev; 4010 - int int_work = 0; 4011 - u32 caepaemask_val; 4012 - u32 cealmovf_val; 4013 - u32 caepaest_val; 4014 - u32 aeshift_val; 4015 - u32 ceshift_val; 4016 - u32 cemask_val; 4017 - __le32 tmp; 4018 - int i; 4019 - 4020 - /* 4021 - * Abnormal interrupt: 4022 - * AEQ overflow, ECC multi-bit err, CEQ overflow must clear 4023 - * interrupt, mask irq, clear irq, cancel mask operation 4024 - */ 4025 - aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG); 4026 - tmp = cpu_to_le32(aeshift_val); 4027 - 4028 - /* AEQE overflow */ 4029 - if (roce_get_bit(tmp, 4030 - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) { 4031 - dev_warn(dev, "AEQ overflow!\n"); 4032 - 4033 - /* Set mask */ 4034 - caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG); 4035 - tmp = cpu_to_le32(caepaemask_val); 4036 - roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S, 4037 - HNS_ROCE_INT_MASK_ENABLE); 4038 - caepaemask_val = le32_to_cpu(tmp); 4039 - roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val); 4040 - 4041 - /* Clear int state(INT_WC : write 1 clear) */ 4042 - caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG); 4043 - tmp = cpu_to_le32(caepaest_val); 4044 - roce_set_bit(tmp, ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1); 4045 - caepaest_val = le32_to_cpu(tmp); 4046 - roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val); 4047 - 4048 - /* Clear mask */ 4049 - caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG); 4050 - tmp = cpu_to_le32(caepaemask_val); 4051 - roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S, 4052 - HNS_ROCE_INT_MASK_DISABLE); 4053 - caepaemask_val = le32_to_cpu(tmp); 4054 - roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val); 4055 - } 4056 - 4057 - /* CEQ almost overflow */ 4058 - for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) { 4059 - ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG + 4060 - i * CEQ_REG_OFFSET); 4061 - tmp = cpu_to_le32(ceshift_val); 4062 - 4063 - if (roce_get_bit(tmp, 4064 - ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) { 4065 - dev_warn(dev, "CEQ[%d] almost overflow!\n", i); 4066 - int_work++; 4067 - 4068 - /* Set mask */ 4069 - cemask_val = roce_read(hr_dev, 4070 - ROCEE_CAEP_CE_IRQ_MASK_0_REG + 4071 - i * CEQ_REG_OFFSET); 4072 - tmp = cpu_to_le32(cemask_val); 4073 - roce_set_bit(tmp, 4074 - ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S, 4075 - HNS_ROCE_INT_MASK_ENABLE); 4076 - cemask_val = le32_to_cpu(tmp); 4077 - roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG + 4078 - i * CEQ_REG_OFFSET, cemask_val); 4079 - 4080 - /* Clear int state(INT_WC : write 1 clear) */ 4081 - cealmovf_val = roce_read(hr_dev, 4082 - ROCEE_CAEP_CEQ_ALM_OVF_0_REG + 4083 - i * CEQ_REG_OFFSET); 4084 - tmp = cpu_to_le32(cealmovf_val); 4085 - roce_set_bit(tmp, 4086 - ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S, 4087 - 1); 4088 - cealmovf_val = le32_to_cpu(tmp); 4089 - roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG + 4090 - i * CEQ_REG_OFFSET, cealmovf_val); 4091 - 4092 - /* Clear mask */ 4093 - cemask_val = roce_read(hr_dev, 4094 - ROCEE_CAEP_CE_IRQ_MASK_0_REG + 4095 - i * CEQ_REG_OFFSET); 4096 - tmp = cpu_to_le32(cemask_val); 4097 - roce_set_bit(tmp, 4098 - ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S, 4099 - HNS_ROCE_INT_MASK_DISABLE); 4100 - cemask_val = le32_to_cpu(tmp); 4101 - roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG + 4102 - i * CEQ_REG_OFFSET, cemask_val); 4103 - } 4104 - } 4105 - 4106 - /* ECC multi-bit error alarm */ 4107 - dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n", 4108 - roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG), 4109 - roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG), 4110 - roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG)); 4111 - 4112 - dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n", 4113 - roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG), 4114 - roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG), 4115 - roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG)); 4116 - 4117 - return IRQ_RETVAL(int_work); 4118 - } 4119 - 4120 - static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev) 4121 - { 4122 - u32 aemask_val; 4123 - int masken = 0; 4124 - __le32 tmp; 4125 - int i; 4126 - 4127 - /* AEQ INT */ 4128 - aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG); 4129 - tmp = cpu_to_le32(aemask_val); 4130 - roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S, 4131 - masken); 4132 - roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken); 4133 - aemask_val = le32_to_cpu(tmp); 4134 - roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val); 4135 - 4136 - /* CEQ INT */ 4137 - for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) { 4138 - /* IRQ mask */ 4139 - roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG + 4140 - i * CEQ_REG_OFFSET, masken); 4141 - } 4142 - } 4143 - 4144 - static void hns_roce_v1_free_eq(struct hns_roce_dev *hr_dev, 4145 - struct hns_roce_eq *eq) 4146 - { 4147 - int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) + 4148 - HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE; 4149 - int i; 4150 - 4151 - if (!eq->buf_list) 4152 - return; 4153 - 4154 - for (i = 0; i < npages; ++i) 4155 - dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE, 4156 - eq->buf_list[i].buf, eq->buf_list[i].map); 4157 - 4158 - kfree(eq->buf_list); 4159 - } 4160 - 4161 - static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num, 4162 - int enable_flag) 4163 - { 4164 - void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num]; 4165 - __le32 tmp; 4166 - u32 val; 4167 - 4168 - val = readl(eqc); 4169 - tmp = cpu_to_le32(val); 4170 - 4171 - if (enable_flag) 4172 - roce_set_field(tmp, 4173 - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M, 4174 - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S, 4175 - HNS_ROCE_EQ_STAT_VALID); 4176 - else 4177 - roce_set_field(tmp, 4178 - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M, 4179 - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S, 4180 - HNS_ROCE_EQ_STAT_INVALID); 4181 - 4182 - val = le32_to_cpu(tmp); 4183 - writel(val, eqc); 4184 - } 4185 - 4186 - static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev, 4187 - struct hns_roce_eq *eq) 4188 - { 4189 - void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn]; 4190 - struct device *dev = &hr_dev->pdev->dev; 4191 - dma_addr_t tmp_dma_addr; 4192 - u32 eqcuridx_val; 4193 - u32 eqconsindx_val; 4194 - u32 eqshift_val; 4195 - __le32 tmp2 = 0; 4196 - __le32 tmp1 = 0; 4197 - __le32 tmp = 0; 4198 - int num_bas; 4199 - int ret; 4200 - int i; 4201 - 4202 - num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) + 4203 - HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE; 4204 - 4205 - if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) { 4206 - dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n", 4207 - (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE, 4208 - num_bas); 4209 - return -EINVAL; 4210 - } 4211 - 4212 - eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL); 4213 - if (!eq->buf_list) 4214 - return -ENOMEM; 4215 - 4216 - for (i = 0; i < num_bas; ++i) { 4217 - eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE, 4218 - &tmp_dma_addr, 4219 - GFP_KERNEL); 4220 - if (!eq->buf_list[i].buf) { 4221 - ret = -ENOMEM; 4222 - goto err_out_free_pages; 4223 - } 4224 - 4225 - eq->buf_list[i].map = tmp_dma_addr; 4226 - } 4227 - eq->cons_index = 0; 4228 - roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M, 4229 - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S, 4230 - HNS_ROCE_EQ_STAT_INVALID); 4231 - roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M, 4232 - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S, 4233 - eq->log_entries); 4234 - eqshift_val = le32_to_cpu(tmp); 4235 - writel(eqshift_val, eqc); 4236 - 4237 - /* Configure eq extended address 12~44bit */ 4238 - writel((u32)(eq->buf_list[0].map >> 12), eqc + 4); 4239 - 4240 - /* 4241 - * Configure eq extended address 45~49 bit. 4242 - * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of 4243 - * using 4K page, and shift more 32 because of 4244 - * calculating the high 32 bit value evaluated to hardware. 4245 - */ 4246 - roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M, 4247 - ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S, 4248 - eq->buf_list[0].map >> 44); 4249 - roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M, 4250 - ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0); 4251 - eqcuridx_val = le32_to_cpu(tmp1); 4252 - writel(eqcuridx_val, eqc + 8); 4253 - 4254 - /* Configure eq consumer index */ 4255 - roce_set_field(tmp2, ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M, 4256 - ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0); 4257 - eqconsindx_val = le32_to_cpu(tmp2); 4258 - writel(eqconsindx_val, eqc + 0xc); 4259 - 4260 - return 0; 4261 - 4262 - err_out_free_pages: 4263 - for (i -= 1; i >= 0; i--) 4264 - dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf, 4265 - eq->buf_list[i].map); 4266 - 4267 - kfree(eq->buf_list); 4268 - return ret; 4269 - } 4270 - 4271 - static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev) 4272 - { 4273 - struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; 4274 - struct device *dev = &hr_dev->pdev->dev; 4275 - struct hns_roce_eq *eq; 4276 - int irq_num; 4277 - int eq_num; 4278 - int ret; 4279 - int i, j; 4280 - 4281 - eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; 4282 - irq_num = eq_num + hr_dev->caps.num_other_vectors; 4283 - 4284 - eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL); 4285 - if (!eq_table->eq) 4286 - return -ENOMEM; 4287 - 4288 - eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base), 4289 - GFP_KERNEL); 4290 - if (!eq_table->eqc_base) { 4291 - ret = -ENOMEM; 4292 - goto err_eqc_base_alloc_fail; 4293 - } 4294 - 4295 - for (i = 0; i < eq_num; i++) { 4296 - eq = &eq_table->eq[i]; 4297 - eq->hr_dev = hr_dev; 4298 - eq->eqn = i; 4299 - eq->irq = hr_dev->irq[i]; 4300 - eq->log_page_size = PAGE_SHIFT; 4301 - 4302 - if (i < hr_dev->caps.num_comp_vectors) { 4303 - /* CEQ */ 4304 - eq_table->eqc_base[i] = hr_dev->reg_base + 4305 - ROCEE_CAEP_CEQC_SHIFT_0_REG + 4306 - CEQ_REG_OFFSET * i; 4307 - eq->type_flag = HNS_ROCE_CEQ; 4308 - eq->db_reg = hr_dev->reg_base + 4309 - ROCEE_CAEP_CEQC_CONS_IDX_0_REG + 4310 - CEQ_REG_OFFSET * i; 4311 - eq->entries = hr_dev->caps.ceqe_depth; 4312 - eq->log_entries = ilog2(eq->entries); 4313 - eq->eqe_size = HNS_ROCE_CEQE_SIZE; 4314 - } else { 4315 - /* AEQ */ 4316 - eq_table->eqc_base[i] = hr_dev->reg_base + 4317 - ROCEE_CAEP_AEQC_AEQE_SHIFT_REG; 4318 - eq->type_flag = HNS_ROCE_AEQ; 4319 - eq->db_reg = hr_dev->reg_base + 4320 - ROCEE_CAEP_AEQE_CONS_IDX_REG; 4321 - eq->entries = hr_dev->caps.aeqe_depth; 4322 - eq->log_entries = ilog2(eq->entries); 4323 - eq->eqe_size = HNS_ROCE_AEQE_SIZE; 4324 - } 4325 - } 4326 - 4327 - /* Disable irq */ 4328 - hns_roce_v1_int_mask_enable(hr_dev); 4329 - 4330 - /* Configure ce int interval */ 4331 - roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG, 4332 - HNS_ROCE_CEQ_DEFAULT_INTERVAL); 4333 - 4334 - /* Configure ce int burst num */ 4335 - roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG, 4336 - HNS_ROCE_CEQ_DEFAULT_BURST_NUM); 4337 - 4338 - for (i = 0; i < eq_num; i++) { 4339 - ret = hns_roce_v1_create_eq(hr_dev, &eq_table->eq[i]); 4340 - if (ret) { 4341 - dev_err(dev, "eq create failed\n"); 4342 - goto err_create_eq_fail; 4343 - } 4344 - } 4345 - 4346 - for (j = 0; j < irq_num; j++) { 4347 - if (j < eq_num) 4348 - ret = request_irq(hr_dev->irq[j], 4349 - hns_roce_v1_msix_interrupt_eq, 0, 4350 - hr_dev->irq_names[j], 4351 - &eq_table->eq[j]); 4352 - else 4353 - ret = request_irq(hr_dev->irq[j], 4354 - hns_roce_v1_msix_interrupt_abn, 0, 4355 - hr_dev->irq_names[j], hr_dev); 4356 - 4357 - if (ret) { 4358 - dev_err(dev, "request irq error!\n"); 4359 - goto err_request_irq_fail; 4360 - } 4361 - } 4362 - 4363 - for (i = 0; i < eq_num; i++) 4364 - hns_roce_v1_enable_eq(hr_dev, i, EQ_ENABLE); 4365 - 4366 - return 0; 4367 - 4368 - err_request_irq_fail: 4369 - for (j -= 1; j >= 0; j--) 4370 - free_irq(hr_dev->irq[j], &eq_table->eq[j]); 4371 - 4372 - err_create_eq_fail: 4373 - for (i -= 1; i >= 0; i--) 4374 - hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]); 4375 - 4376 - kfree(eq_table->eqc_base); 4377 - 4378 - err_eqc_base_alloc_fail: 4379 - kfree(eq_table->eq); 4380 - 4381 - return ret; 4382 - } 4383 - 4384 - static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev) 4385 - { 4386 - struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; 4387 - int irq_num; 4388 - int eq_num; 4389 - int i; 4390 - 4391 - eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; 4392 - irq_num = eq_num + hr_dev->caps.num_other_vectors; 4393 - for (i = 0; i < eq_num; i++) { 4394 - /* Disable EQ */ 4395 - hns_roce_v1_enable_eq(hr_dev, i, EQ_DISABLE); 4396 - 4397 - free_irq(hr_dev->irq[i], &eq_table->eq[i]); 4398 - 4399 - hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]); 4400 - } 4401 - for (i = eq_num; i < irq_num; i++) 4402 - free_irq(hr_dev->irq[i], hr_dev); 4403 - 4404 - kfree(eq_table->eqc_base); 4405 - kfree(eq_table->eq); 4406 - } 4407 - 4408 - static const struct ib_device_ops hns_roce_v1_dev_ops = { 4409 - .destroy_qp = hns_roce_v1_destroy_qp, 4410 - .poll_cq = hns_roce_v1_poll_cq, 4411 - .post_recv = hns_roce_v1_post_recv, 4412 - .post_send = hns_roce_v1_post_send, 4413 - .query_qp = hns_roce_v1_query_qp, 4414 - .req_notify_cq = hns_roce_v1_req_notify_cq, 4415 - }; 4416 - 4417 - static const struct hns_roce_hw hns_roce_hw_v1 = { 4418 - .reset = hns_roce_v1_reset, 4419 - .hw_profile = hns_roce_v1_profile, 4420 - .hw_init = hns_roce_v1_init, 4421 - .hw_exit = hns_roce_v1_exit, 4422 - .post_mbox = hns_roce_v1_post_mbox, 4423 - .poll_mbox_done = hns_roce_v1_chk_mbox, 4424 - .set_gid = hns_roce_v1_set_gid, 4425 - .set_mac = hns_roce_v1_set_mac, 4426 - .set_mtu = hns_roce_v1_set_mtu, 4427 - .write_mtpt = hns_roce_v1_write_mtpt, 4428 - .write_cqc = hns_roce_v1_write_cqc, 4429 - .set_hem = hns_roce_v1_set_hem, 4430 - .clear_hem = hns_roce_v1_clear_hem, 4431 - .modify_qp = hns_roce_v1_modify_qp, 4432 - .dereg_mr = hns_roce_v1_dereg_mr, 4433 - .destroy_cq = hns_roce_v1_destroy_cq, 4434 - .init_eq = hns_roce_v1_init_eq_table, 4435 - .cleanup_eq = hns_roce_v1_cleanup_eq_table, 4436 - .hns_roce_dev_ops = &hns_roce_v1_dev_ops, 4437 - }; 4438 - 4439 - static const struct of_device_id hns_roce_of_match[] = { 4440 - { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, }, 4441 - {}, 4442 - }; 4443 - MODULE_DEVICE_TABLE(of, hns_roce_of_match); 4444 - 4445 - static const struct acpi_device_id hns_roce_acpi_match[] = { 4446 - { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 }, 4447 - {}, 4448 - }; 4449 - MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match); 4450 - 4451 - static struct 4452 - platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode) 4453 - { 4454 - struct device *dev; 4455 - 4456 - /* get the 'device' corresponding to the matching 'fwnode' */ 4457 - dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode); 4458 - /* get the platform device */ 4459 - return dev ? to_platform_device(dev) : NULL; 4460 - } 4461 - 4462 - static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev) 4463 - { 4464 - struct device *dev = &hr_dev->pdev->dev; 4465 - struct platform_device *pdev = NULL; 4466 - struct net_device *netdev = NULL; 4467 - struct device_node *net_node; 4468 - int port_cnt = 0; 4469 - u8 phy_port; 4470 - int ret; 4471 - int i; 4472 - 4473 - /* check if we are compatible with the underlying SoC */ 4474 - if (dev_of_node(dev)) { 4475 - const struct of_device_id *of_id; 4476 - 4477 - of_id = of_match_node(hns_roce_of_match, dev->of_node); 4478 - if (!of_id) { 4479 - dev_err(dev, "device is not compatible!\n"); 4480 - return -ENXIO; 4481 - } 4482 - hr_dev->hw = (const struct hns_roce_hw *)of_id->data; 4483 - if (!hr_dev->hw) { 4484 - dev_err(dev, "couldn't get H/W specific DT data!\n"); 4485 - return -ENXIO; 4486 - } 4487 - } else if (is_acpi_device_node(dev->fwnode)) { 4488 - const struct acpi_device_id *acpi_id; 4489 - 4490 - acpi_id = acpi_match_device(hns_roce_acpi_match, dev); 4491 - if (!acpi_id) { 4492 - dev_err(dev, "device is not compatible!\n"); 4493 - return -ENXIO; 4494 - } 4495 - hr_dev->hw = (const struct hns_roce_hw *) acpi_id->driver_data; 4496 - if (!hr_dev->hw) { 4497 - dev_err(dev, "couldn't get H/W specific ACPI data!\n"); 4498 - return -ENXIO; 4499 - } 4500 - } else { 4501 - dev_err(dev, "can't read compatibility data from DT or ACPI\n"); 4502 - return -ENXIO; 4503 - } 4504 - 4505 - /* get the mapped register base address */ 4506 - hr_dev->reg_base = devm_platform_ioremap_resource(hr_dev->pdev, 0); 4507 - if (IS_ERR(hr_dev->reg_base)) 4508 - return PTR_ERR(hr_dev->reg_base); 4509 - 4510 - /* read the node_guid of IB device from the DT or ACPI */ 4511 - ret = device_property_read_u8_array(dev, "node-guid", 4512 - (u8 *)&hr_dev->ib_dev.node_guid, 4513 - GUID_LEN); 4514 - if (ret) { 4515 - dev_err(dev, "couldn't get node_guid from DT or ACPI!\n"); 4516 - return ret; 4517 - } 4518 - 4519 - /* get the RoCE associated ethernet ports or netdevices */ 4520 - for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) { 4521 - if (dev_of_node(dev)) { 4522 - net_node = of_parse_phandle(dev->of_node, "eth-handle", 4523 - i); 4524 - if (!net_node) 4525 - continue; 4526 - pdev = of_find_device_by_node(net_node); 4527 - } else if (is_acpi_device_node(dev->fwnode)) { 4528 - struct fwnode_reference_args args; 4529 - 4530 - ret = acpi_node_get_property_reference(dev->fwnode, 4531 - "eth-handle", 4532 - i, &args); 4533 - if (ret) 4534 - continue; 4535 - pdev = hns_roce_find_pdev(args.fwnode); 4536 - } else { 4537 - dev_err(dev, "cannot read data from DT or ACPI\n"); 4538 - return -ENXIO; 4539 - } 4540 - 4541 - if (pdev) { 4542 - netdev = platform_get_drvdata(pdev); 4543 - phy_port = (u8)i; 4544 - if (netdev) { 4545 - hr_dev->iboe.netdevs[port_cnt] = netdev; 4546 - hr_dev->iboe.phy_port[port_cnt] = phy_port; 4547 - } else { 4548 - dev_err(dev, "no netdev found with pdev %s\n", 4549 - pdev->name); 4550 - return -ENODEV; 4551 - } 4552 - port_cnt++; 4553 - } 4554 - } 4555 - 4556 - if (port_cnt == 0) { 4557 - dev_err(dev, "unable to get eth-handle for available ports!\n"); 4558 - return -EINVAL; 4559 - } 4560 - 4561 - hr_dev->caps.num_ports = port_cnt; 4562 - 4563 - /* cmd issue mode: 0 is poll, 1 is event */ 4564 - hr_dev->cmd_mod = 1; 4565 - hr_dev->loop_idc = 0; 4566 - hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG; 4567 - hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG; 4568 - 4569 - /* read the interrupt names from the DT or ACPI */ 4570 - ret = device_property_read_string_array(dev, "interrupt-names", 4571 - hr_dev->irq_names, 4572 - HNS_ROCE_V1_MAX_IRQ_NUM); 4573 - if (ret < 0) { 4574 - dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n"); 4575 - return ret; 4576 - } 4577 - 4578 - /* fetch the interrupt numbers */ 4579 - for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) { 4580 - hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i); 4581 - if (hr_dev->irq[i] <= 0) 4582 - return -EINVAL; 4583 - } 4584 - 4585 - return 0; 4586 - } 4587 - 4588 - /** 4589 - * hns_roce_probe - RoCE driver entrance 4590 - * @pdev: pointer to platform device 4591 - * Return : int 4592 - * 4593 - */ 4594 - static int hns_roce_probe(struct platform_device *pdev) 4595 - { 4596 - int ret; 4597 - struct hns_roce_dev *hr_dev; 4598 - struct device *dev = &pdev->dev; 4599 - 4600 - hr_dev = ib_alloc_device(hns_roce_dev, ib_dev); 4601 - if (!hr_dev) 4602 - return -ENOMEM; 4603 - 4604 - hr_dev->priv = kzalloc(sizeof(struct hns_roce_v1_priv), GFP_KERNEL); 4605 - if (!hr_dev->priv) { 4606 - ret = -ENOMEM; 4607 - goto error_failed_kzalloc; 4608 - } 4609 - 4610 - hr_dev->pdev = pdev; 4611 - hr_dev->dev = dev; 4612 - platform_set_drvdata(pdev, hr_dev); 4613 - 4614 - if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) && 4615 - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) { 4616 - dev_err(dev, "Not usable DMA addressing mode\n"); 4617 - ret = -EIO; 4618 - goto error_failed_get_cfg; 4619 - } 4620 - 4621 - ret = hns_roce_get_cfg(hr_dev); 4622 - if (ret) { 4623 - dev_err(dev, "Get Configuration failed!\n"); 4624 - goto error_failed_get_cfg; 4625 - } 4626 - 4627 - ret = hns_roce_init(hr_dev); 4628 - if (ret) { 4629 - dev_err(dev, "RoCE engine init failed!\n"); 4630 - goto error_failed_get_cfg; 4631 - } 4632 - 4633 - return 0; 4634 - 4635 - error_failed_get_cfg: 4636 - kfree(hr_dev->priv); 4637 - 4638 - error_failed_kzalloc: 4639 - ib_dealloc_device(&hr_dev->ib_dev); 4640 - 4641 - return ret; 4642 - } 4643 - 4644 - /** 4645 - * hns_roce_remove - remove RoCE device 4646 - * @pdev: pointer to platform device 4647 - */ 4648 - static int hns_roce_remove(struct platform_device *pdev) 4649 - { 4650 - struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev); 4651 - 4652 - hns_roce_exit(hr_dev); 4653 - kfree(hr_dev->priv); 4654 - ib_dealloc_device(&hr_dev->ib_dev); 4655 - 4656 - return 0; 4657 - } 4658 - 4659 - static struct platform_driver hns_roce_driver = { 4660 - .probe = hns_roce_probe, 4661 - .remove = hns_roce_remove, 4662 - .driver = { 4663 - .name = DRV_NAME, 4664 - .of_match_table = hns_roce_of_match, 4665 - .acpi_match_table = ACPI_PTR(hns_roce_acpi_match), 4666 - }, 4667 - }; 4668 - 4669 - module_platform_driver(hns_roce_driver); 4670 - 4671 - MODULE_LICENSE("Dual BSD/GPL"); 4672 - MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>"); 4673 - MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>"); 4674 - MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>"); 4675 - MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver");
-1147
drivers/infiniband/hw/hns/hns_roce_hw_v1.h
··· 1 - /* 2 - * Copyright (c) 2016 Hisilicon Limited. 3 - * 4 - * This software is available to you under a choice of one of two 5 - * licenses. You may choose to be licensed under the terms of the GNU 6 - * General Public License (GPL) Version 2, available from the file 7 - * COPYING in the main directory of this source tree, or the 8 - * OpenIB.org BSD license below: 9 - * 10 - * Redistribution and use in source and binary forms, with or 11 - * without modification, are permitted provided that the following 12 - * conditions are met: 13 - * 14 - * - Redistributions of source code must retain the above 15 - * copyright notice, this list of conditions and the following 16 - * disclaimer. 17 - * 18 - * - Redistributions in binary form must reproduce the above 19 - * copyright notice, this list of conditions and the following 20 - * disclaimer in the documentation and/or other materials 21 - * provided with the distribution. 22 - * 23 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 - * SOFTWARE. 31 - */ 32 - 33 - #ifndef _HNS_ROCE_HW_V1_H 34 - #define _HNS_ROCE_HW_V1_H 35 - 36 - #define CQ_STATE_VALID 2 37 - 38 - #define HNS_ROCE_V1_MAX_PD_NUM 0x8000 39 - #define HNS_ROCE_V1_MAX_CQ_NUM 0x10000 40 - #define HNS_ROCE_V1_MAX_CQE_NUM 0x8000 41 - 42 - #define HNS_ROCE_V1_MAX_QP_NUM 0x40000 43 - #define HNS_ROCE_V1_MAX_WQE_NUM 0x4000 44 - 45 - #define HNS_ROCE_V1_MAX_MTPT_NUM 0x80000 46 - 47 - #define HNS_ROCE_V1_MAX_MTT_SEGS 0x100000 48 - 49 - #define HNS_ROCE_V1_MAX_QP_INIT_RDMA 128 50 - #define HNS_ROCE_V1_MAX_QP_DEST_RDMA 128 51 - 52 - #define HNS_ROCE_V1_MAX_SQ_DESC_SZ 64 53 - #define HNS_ROCE_V1_MAX_RQ_DESC_SZ 64 54 - #define HNS_ROCE_V1_SG_NUM 2 55 - #define HNS_ROCE_V1_INLINE_SIZE 32 56 - 57 - #define HNS_ROCE_V1_UAR_NUM 256 58 - #define HNS_ROCE_V1_PHY_UAR_NUM 8 59 - 60 - #define HNS_ROCE_V1_GID_NUM 16 61 - #define HNS_ROCE_V1_RESV_QP 8 62 - 63 - #define HNS_ROCE_V1_MAX_IRQ_NUM 34 64 - #define HNS_ROCE_V1_COMP_VEC_NUM 32 65 - #define HNS_ROCE_V1_AEQE_VEC_NUM 1 66 - #define HNS_ROCE_V1_ABNORMAL_VEC_NUM 1 67 - 68 - #define HNS_ROCE_V1_COMP_EQE_NUM 0x8000 69 - #define HNS_ROCE_V1_ASYNC_EQE_NUM 0x400 70 - 71 - #define HNS_ROCE_V1_QPC_SIZE 256 72 - #define HNS_ROCE_V1_IRRL_ENTRY_SIZE 8 73 - #define HNS_ROCE_V1_CQC_ENTRY_SIZE 64 74 - #define HNS_ROCE_V1_MTPT_ENTRY_SIZE 64 75 - #define HNS_ROCE_V1_MTT_ENTRY_SIZE 64 76 - 77 - #define HNS_ROCE_V1_CQE_SIZE 32 78 - #define HNS_ROCE_V1_PAGE_SIZE_SUPPORT 0xFFFFF000 79 - 80 - #define HNS_ROCE_V1_TABLE_CHUNK_SIZE (1 << 17) 81 - 82 - #define HNS_ROCE_V1_EXT_RAQ_WF 8 83 - #define HNS_ROCE_V1_RAQ_ENTRY 64 84 - #define HNS_ROCE_V1_RAQ_DEPTH 32768 85 - #define HNS_ROCE_V1_RAQ_SIZE (HNS_ROCE_V1_RAQ_ENTRY * HNS_ROCE_V1_RAQ_DEPTH) 86 - 87 - #define HNS_ROCE_V1_SDB_DEPTH 0x400 88 - #define HNS_ROCE_V1_ODB_DEPTH 0x400 89 - 90 - #define HNS_ROCE_V1_DB_RSVD 0x80 91 - 92 - #define HNS_ROCE_V1_SDB_ALEPT HNS_ROCE_V1_DB_RSVD 93 - #define HNS_ROCE_V1_SDB_ALFUL (HNS_ROCE_V1_SDB_DEPTH - HNS_ROCE_V1_DB_RSVD) 94 - #define HNS_ROCE_V1_ODB_ALEPT HNS_ROCE_V1_DB_RSVD 95 - #define HNS_ROCE_V1_ODB_ALFUL (HNS_ROCE_V1_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD) 96 - 97 - #define HNS_ROCE_V1_EXT_SDB_DEPTH 0x4000 98 - #define HNS_ROCE_V1_EXT_ODB_DEPTH 0x4000 99 - #define HNS_ROCE_V1_EXT_SDB_ENTRY 16 100 - #define HNS_ROCE_V1_EXT_ODB_ENTRY 16 101 - #define HNS_ROCE_V1_EXT_SDB_SIZE \ 102 - (HNS_ROCE_V1_EXT_SDB_DEPTH * HNS_ROCE_V1_EXT_SDB_ENTRY) 103 - #define HNS_ROCE_V1_EXT_ODB_SIZE \ 104 - (HNS_ROCE_V1_EXT_ODB_DEPTH * HNS_ROCE_V1_EXT_ODB_ENTRY) 105 - 106 - #define HNS_ROCE_V1_EXT_SDB_ALEPT HNS_ROCE_V1_DB_RSVD 107 - #define HNS_ROCE_V1_EXT_SDB_ALFUL \ 108 - (HNS_ROCE_V1_EXT_SDB_DEPTH - HNS_ROCE_V1_DB_RSVD) 109 - #define HNS_ROCE_V1_EXT_ODB_ALEPT HNS_ROCE_V1_DB_RSVD 110 - #define HNS_ROCE_V1_EXT_ODB_ALFUL \ 111 - (HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD) 112 - 113 - #define HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS 50000 114 - #define HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS 10000 115 - #define HNS_ROCE_V1_FREE_MR_WAIT_VALUE 5 116 - #define HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE 20 117 - 118 - #define HNS_ROCE_BT_RSV_BUF_SIZE (1 << 17) 119 - 120 - #define HNS_ROCE_V1_TPTR_ENTRY_SIZE 2 121 - #define HNS_ROCE_V1_TPTR_BUF_SIZE \ 122 - (HNS_ROCE_V1_TPTR_ENTRY_SIZE * HNS_ROCE_V1_MAX_CQ_NUM) 123 - 124 - #define HNS_ROCE_ODB_POLL_MODE 0 125 - 126 - #define HNS_ROCE_SDB_NORMAL_MODE 0 127 - #define HNS_ROCE_SDB_EXTEND_MODE 1 128 - 129 - #define HNS_ROCE_ODB_EXTEND_MODE 1 130 - 131 - #define KEY_VALID 0x02 132 - 133 - #define HNS_ROCE_CQE_QPN_MASK 0x3ffff 134 - #define HNS_ROCE_CQE_STATUS_MASK 0x1f 135 - #define HNS_ROCE_CQE_OPCODE_MASK 0xf 136 - 137 - #define HNS_ROCE_CQE_SUCCESS 0x00 138 - #define HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR 0x01 139 - #define HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR 0x02 140 - #define HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR 0x03 141 - #define HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR 0x04 142 - #define HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR 0x05 143 - #define HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR 0x06 144 - #define HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR 0x07 145 - #define HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR 0x08 146 - #define HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR 0x09 147 - #define HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR 0x0a 148 - #define HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR 0x0b 149 - #define HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR 0x0c 150 - 151 - #define QP1C_CFGN_OFFSET 0x28 152 - #define PHY_PORT_OFFSET 0x8 153 - #define MTPT_IDX_SHIFT 16 154 - #define ALL_PORT_VAL_OPEN 0x3f 155 - #define POL_TIME_INTERVAL_VAL 0x80 156 - #define SLEEP_TIME_INTERVAL 20 157 - #define SQ_PSN_SHIFT 8 158 - #define QKEY_VAL 0x80010000 159 - #define SDB_INV_CNT_OFFSET 8 160 - 161 - #define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x10 162 - #define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x10 163 - 164 - #define HNS_ROCE_INT_MASK_DISABLE 0 165 - #define HNS_ROCE_INT_MASK_ENABLE 1 166 - 167 - #define CEQ_REG_OFFSET 0x18 168 - 169 - #define HNS_ROCE_CEQE_CEQE_COMP_OWNER_S 0 170 - 171 - #define HNS_ROCE_V1_CONS_IDX_M GENMASK(15, 0) 172 - 173 - #define HNS_ROCE_CEQE_CEQE_COMP_CQN_S 16 174 - #define HNS_ROCE_CEQE_CEQE_COMP_CQN_M GENMASK(31, 16) 175 - 176 - #define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S 16 177 - #define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M GENMASK(23, 16) 178 - 179 - #define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S 24 180 - #define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M GENMASK(30, 24) 181 - 182 - #define HNS_ROCE_AEQE_U32_4_OWNER_S 31 183 - 184 - #define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S 0 185 - #define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M GENMASK(23, 0) 186 - 187 - #define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S 25 188 - #define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M GENMASK(27, 25) 189 - 190 - #define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0 191 - #define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M GENMASK(15, 0) 192 - 193 - #define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S 0 194 - #define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M GENMASK(4, 0) 195 - 196 - /* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */ 197 - enum { 198 - HNS_ROCE_LWQCE_QPC_ERROR = 1, 199 - HNS_ROCE_LWQCE_MTU_ERROR, 200 - HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR, 201 - HNS_ROCE_LWQCE_WQE_ADDR_ERROR, 202 - HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR, 203 - HNS_ROCE_LWQCE_SL_ERROR, 204 - HNS_ROCE_LWQCE_PORT_ERROR, 205 - }; 206 - 207 - /* Local Access Violation Work Queue Error,SUBTYPE 0x7 */ 208 - enum { 209 - HNS_ROCE_LAVWQE_R_KEY_VIOLATION = 1, 210 - HNS_ROCE_LAVWQE_LENGTH_ERROR, 211 - HNS_ROCE_LAVWQE_VA_ERROR, 212 - HNS_ROCE_LAVWQE_PD_ERROR, 213 - HNS_ROCE_LAVWQE_RW_ACC_ERROR, 214 - HNS_ROCE_LAVWQE_KEY_STATE_ERROR, 215 - HNS_ROCE_LAVWQE_MR_OPERATION_ERROR, 216 - }; 217 - 218 - /* DOORBELL overflow subtype */ 219 - enum { 220 - HNS_ROCE_DB_SUBTYPE_SDB_OVF = 1, 221 - HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF, 222 - HNS_ROCE_DB_SUBTYPE_ODB_OVF, 223 - HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF, 224 - HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP, 225 - HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP, 226 - }; 227 - 228 - enum { 229 - /* RQ&SRQ related operations */ 230 - HNS_ROCE_OPCODE_SEND_DATA_RECEIVE = 0x06, 231 - HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE, 232 - }; 233 - 234 - enum { 235 - HNS_ROCE_PORT_DOWN = 0, 236 - HNS_ROCE_PORT_UP, 237 - }; 238 - 239 - struct hns_roce_cq_context { 240 - __le32 cqc_byte_4; 241 - __le32 cq_bt_l; 242 - __le32 cqc_byte_12; 243 - __le32 cur_cqe_ba0_l; 244 - __le32 cqc_byte_20; 245 - __le32 cqe_tptr_addr_l; 246 - __le32 cur_cqe_ba1_l; 247 - __le32 cqc_byte_32; 248 - }; 249 - 250 - #define CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S 0 251 - #define CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M \ 252 - (((1UL << 2) - 1) << CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S) 253 - 254 - #define CQ_CONTEXT_CQC_BYTE_4_CQN_S 16 255 - #define CQ_CONTEXT_CQC_BYTE_4_CQN_M \ 256 - (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_4_CQN_S) 257 - 258 - #define CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S 0 259 - #define CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M \ 260 - (((1UL << 17) - 1) << CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S) 261 - 262 - #define CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S 20 263 - #define CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M \ 264 - (((1UL << 4) - 1) << CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S) 265 - 266 - #define CQ_CONTEXT_CQC_BYTE_12_CEQN_S 24 267 - #define CQ_CONTEXT_CQC_BYTE_12_CEQN_M \ 268 - (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_12_CEQN_S) 269 - 270 - #define CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S 0 271 - #define CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M \ 272 - (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S) 273 - 274 - #define CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S 16 275 - #define CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M \ 276 - (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S) 277 - 278 - #define CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S 8 279 - #define CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M \ 280 - (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S) 281 - 282 - #define CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S 0 283 - #define CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M \ 284 - (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S) 285 - 286 - #define CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S 9 287 - 288 - #define CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S 8 289 - #define CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S 14 290 - #define CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S 15 291 - 292 - #define CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S 16 293 - #define CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M \ 294 - (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S) 295 - 296 - struct hns_roce_cqe { 297 - __le32 cqe_byte_4; 298 - union { 299 - __le32 r_key; 300 - __le32 immediate_data; 301 - }; 302 - __le32 byte_cnt; 303 - __le32 cqe_byte_16; 304 - __le32 cqe_byte_20; 305 - __le32 s_mac_l; 306 - __le32 cqe_byte_28; 307 - __le32 reserved; 308 - }; 309 - 310 - #define CQE_BYTE_4_OWNER_S 7 311 - #define CQE_BYTE_4_SQ_RQ_FLAG_S 14 312 - 313 - #define CQE_BYTE_4_STATUS_OF_THE_OPERATION_S 8 314 - #define CQE_BYTE_4_STATUS_OF_THE_OPERATION_M \ 315 - (((1UL << 5) - 1) << CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) 316 - 317 - #define CQE_BYTE_4_WQE_INDEX_S 16 318 - #define CQE_BYTE_4_WQE_INDEX_M (((1UL << 14) - 1) << CQE_BYTE_4_WQE_INDEX_S) 319 - 320 - #define CQE_BYTE_4_OPERATION_TYPE_S 0 321 - #define CQE_BYTE_4_OPERATION_TYPE_M \ 322 - (((1UL << 4) - 1) << CQE_BYTE_4_OPERATION_TYPE_S) 323 - 324 - #define CQE_BYTE_4_IMM_INDICATOR_S 15 325 - 326 - #define CQE_BYTE_16_LOCAL_QPN_S 0 327 - #define CQE_BYTE_16_LOCAL_QPN_M (((1UL << 24) - 1) << CQE_BYTE_16_LOCAL_QPN_S) 328 - 329 - #define CQE_BYTE_20_PORT_NUM_S 26 330 - #define CQE_BYTE_20_PORT_NUM_M (((1UL << 3) - 1) << CQE_BYTE_20_PORT_NUM_S) 331 - 332 - #define CQE_BYTE_20_SL_S 24 333 - #define CQE_BYTE_20_SL_M (((1UL << 2) - 1) << CQE_BYTE_20_SL_S) 334 - 335 - #define CQE_BYTE_20_REMOTE_QPN_S 0 336 - #define CQE_BYTE_20_REMOTE_QPN_M \ 337 - (((1UL << 24) - 1) << CQE_BYTE_20_REMOTE_QPN_S) 338 - 339 - #define CQE_BYTE_20_GRH_PRESENT_S 29 340 - 341 - #define CQE_BYTE_28_P_KEY_IDX_S 16 342 - #define CQE_BYTE_28_P_KEY_IDX_M (((1UL << 16) - 1) << CQE_BYTE_28_P_KEY_IDX_S) 343 - 344 - #define CQ_DB_REQ_NOT_SOL 0 345 - #define CQ_DB_REQ_NOT (1 << 16) 346 - 347 - struct hns_roce_v1_mpt_entry { 348 - __le32 mpt_byte_4; 349 - __le32 pbl_addr_l; 350 - __le32 mpt_byte_12; 351 - __le32 virt_addr_l; 352 - __le32 virt_addr_h; 353 - __le32 length; 354 - __le32 mpt_byte_28; 355 - __le32 pa0_l; 356 - __le32 mpt_byte_36; 357 - __le32 mpt_byte_40; 358 - __le32 mpt_byte_44; 359 - __le32 mpt_byte_48; 360 - __le32 pa4_l; 361 - __le32 mpt_byte_56; 362 - __le32 mpt_byte_60; 363 - __le32 mpt_byte_64; 364 - }; 365 - 366 - #define MPT_BYTE_4_KEY_STATE_S 0 367 - #define MPT_BYTE_4_KEY_STATE_M (((1UL << 2) - 1) << MPT_BYTE_4_KEY_STATE_S) 368 - 369 - #define MPT_BYTE_4_KEY_S 8 370 - #define MPT_BYTE_4_KEY_M (((1UL << 8) - 1) << MPT_BYTE_4_KEY_S) 371 - 372 - #define MPT_BYTE_4_PAGE_SIZE_S 16 373 - #define MPT_BYTE_4_PAGE_SIZE_M (((1UL << 2) - 1) << MPT_BYTE_4_PAGE_SIZE_S) 374 - 375 - #define MPT_BYTE_4_MW_TYPE_S 20 376 - 377 - #define MPT_BYTE_4_MW_BIND_ENABLE_S 21 378 - 379 - #define MPT_BYTE_4_OWN_S 22 380 - 381 - #define MPT_BYTE_4_MEMORY_LOCATION_TYPE_S 24 382 - #define MPT_BYTE_4_MEMORY_LOCATION_TYPE_M \ 383 - (((1UL << 2) - 1) << MPT_BYTE_4_MEMORY_LOCATION_TYPE_S) 384 - 385 - #define MPT_BYTE_4_REMOTE_ATOMIC_S 26 386 - #define MPT_BYTE_4_LOCAL_WRITE_S 27 387 - #define MPT_BYTE_4_REMOTE_WRITE_S 28 388 - #define MPT_BYTE_4_REMOTE_READ_S 29 389 - #define MPT_BYTE_4_REMOTE_INVAL_ENABLE_S 30 390 - #define MPT_BYTE_4_ADDRESS_TYPE_S 31 391 - 392 - #define MPT_BYTE_12_PBL_ADDR_H_S 0 393 - #define MPT_BYTE_12_PBL_ADDR_H_M \ 394 - (((1UL << 17) - 1) << MPT_BYTE_12_PBL_ADDR_H_S) 395 - 396 - #define MPT_BYTE_12_MW_BIND_COUNTER_S 17 397 - #define MPT_BYTE_12_MW_BIND_COUNTER_M \ 398 - (((1UL << 15) - 1) << MPT_BYTE_12_MW_BIND_COUNTER_S) 399 - 400 - #define MPT_BYTE_28_PD_S 0 401 - #define MPT_BYTE_28_PD_M (((1UL << 16) - 1) << MPT_BYTE_28_PD_S) 402 - 403 - #define MPT_BYTE_28_L_KEY_IDX_L_S 16 404 - #define MPT_BYTE_28_L_KEY_IDX_L_M \ 405 - (((1UL << 16) - 1) << MPT_BYTE_28_L_KEY_IDX_L_S) 406 - 407 - #define MPT_BYTE_36_PA0_H_S 0 408 - #define MPT_BYTE_36_PA0_H_M (((1UL << 5) - 1) << MPT_BYTE_36_PA0_H_S) 409 - 410 - #define MPT_BYTE_36_PA1_L_S 8 411 - #define MPT_BYTE_36_PA1_L_M (((1UL << 24) - 1) << MPT_BYTE_36_PA1_L_S) 412 - 413 - #define MPT_BYTE_40_PA1_H_S 0 414 - #define MPT_BYTE_40_PA1_H_M (((1UL << 13) - 1) << MPT_BYTE_40_PA1_H_S) 415 - 416 - #define MPT_BYTE_40_PA2_L_S 16 417 - #define MPT_BYTE_40_PA2_L_M (((1UL << 16) - 1) << MPT_BYTE_40_PA2_L_S) 418 - 419 - #define MPT_BYTE_44_PA2_H_S 0 420 - #define MPT_BYTE_44_PA2_H_M (((1UL << 21) - 1) << MPT_BYTE_44_PA2_H_S) 421 - 422 - #define MPT_BYTE_44_PA3_L_S 24 423 - #define MPT_BYTE_44_PA3_L_M (((1UL << 8) - 1) << MPT_BYTE_44_PA3_L_S) 424 - 425 - #define MPT_BYTE_48_PA3_H_S 0 426 - #define MPT_BYTE_48_PA3_H_M (((1UL << 29) - 1) << MPT_BYTE_48_PA3_H_S) 427 - 428 - #define MPT_BYTE_56_PA4_H_S 0 429 - #define MPT_BYTE_56_PA4_H_M (((1UL << 5) - 1) << MPT_BYTE_56_PA4_H_S) 430 - 431 - #define MPT_BYTE_56_PA5_L_S 8 432 - #define MPT_BYTE_56_PA5_L_M (((1UL << 24) - 1) << MPT_BYTE_56_PA5_L_S) 433 - 434 - #define MPT_BYTE_60_PA5_H_S 0 435 - #define MPT_BYTE_60_PA5_H_M (((1UL << 13) - 1) << MPT_BYTE_60_PA5_H_S) 436 - 437 - #define MPT_BYTE_60_PA6_L_S 16 438 - #define MPT_BYTE_60_PA6_L_M (((1UL << 16) - 1) << MPT_BYTE_60_PA6_L_S) 439 - 440 - #define MPT_BYTE_64_PA6_H_S 0 441 - #define MPT_BYTE_64_PA6_H_M (((1UL << 21) - 1) << MPT_BYTE_64_PA6_H_S) 442 - 443 - #define MPT_BYTE_64_L_KEY_IDX_H_S 24 444 - #define MPT_BYTE_64_L_KEY_IDX_H_M \ 445 - (((1UL << 8) - 1) << MPT_BYTE_64_L_KEY_IDX_H_S) 446 - 447 - struct hns_roce_wqe_ctrl_seg { 448 - __le32 sgl_pa_h; 449 - __le32 flag; 450 - union { 451 - __be32 imm_data; 452 - __le32 inv_key; 453 - }; 454 - __le32 msg_length; 455 - }; 456 - 457 - struct hns_roce_wqe_data_seg { 458 - __le64 addr; 459 - __le32 lkey; 460 - __le32 len; 461 - }; 462 - 463 - struct hns_roce_wqe_raddr_seg { 464 - __le32 rkey; 465 - __le32 len; /* reserved */ 466 - __le64 raddr; 467 - }; 468 - 469 - struct hns_roce_rq_wqe_ctrl { 470 - __le32 rwqe_byte_4; 471 - __le32 rocee_sgl_ba_l; 472 - __le32 rwqe_byte_12; 473 - __le32 reserved[5]; 474 - }; 475 - 476 - #define RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S 16 477 - #define RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M \ 478 - (((1UL << 6) - 1) << RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S) 479 - 480 - #define HNS_ROCE_QP_DESTROY_TIMEOUT_MSECS 10000 481 - 482 - #define GID_LEN 16 483 - 484 - struct hns_roce_ud_send_wqe { 485 - __le32 dmac_h; 486 - __le32 u32_8; 487 - __le32 immediate_data; 488 - 489 - __le32 u32_16; 490 - union { 491 - unsigned char dgid[GID_LEN]; 492 - struct { 493 - __le32 u32_20; 494 - __le32 u32_24; 495 - __le32 u32_28; 496 - __le32 u32_32; 497 - }; 498 - }; 499 - 500 - __le32 u32_36; 501 - __le32 u32_40; 502 - 503 - __le32 va0_l; 504 - __le32 va0_h; 505 - __le32 l_key0; 506 - 507 - __le32 va1_l; 508 - __le32 va1_h; 509 - __le32 l_key1; 510 - }; 511 - 512 - #define UD_SEND_WQE_U32_4_DMAC_0_S 0 513 - #define UD_SEND_WQE_U32_4_DMAC_0_M \ 514 - (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_0_S) 515 - 516 - #define UD_SEND_WQE_U32_4_DMAC_1_S 8 517 - #define UD_SEND_WQE_U32_4_DMAC_1_M \ 518 - (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_1_S) 519 - 520 - #define UD_SEND_WQE_U32_4_DMAC_2_S 16 521 - #define UD_SEND_WQE_U32_4_DMAC_2_M \ 522 - (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_2_S) 523 - 524 - #define UD_SEND_WQE_U32_4_DMAC_3_S 24 525 - #define UD_SEND_WQE_U32_4_DMAC_3_M \ 526 - (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_3_S) 527 - 528 - #define UD_SEND_WQE_U32_8_DMAC_4_S 0 529 - #define UD_SEND_WQE_U32_8_DMAC_4_M \ 530 - (((1UL << 8) - 1) << UD_SEND_WQE_U32_8_DMAC_4_S) 531 - 532 - #define UD_SEND_WQE_U32_8_DMAC_5_S 8 533 - #define UD_SEND_WQE_U32_8_DMAC_5_M \ 534 - (((1UL << 8) - 1) << UD_SEND_WQE_U32_8_DMAC_5_S) 535 - 536 - #define UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S 22 537 - 538 - #define UD_SEND_WQE_U32_8_OPERATION_TYPE_S 16 539 - #define UD_SEND_WQE_U32_8_OPERATION_TYPE_M \ 540 - (((1UL << 4) - 1) << UD_SEND_WQE_U32_8_OPERATION_TYPE_S) 541 - 542 - #define UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S 24 543 - #define UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M \ 544 - (((1UL << 6) - 1) << UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S) 545 - 546 - #define UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S 31 547 - 548 - #define UD_SEND_WQE_U32_16_DEST_QP_S 0 549 - #define UD_SEND_WQE_U32_16_DEST_QP_M \ 550 - (((1UL << 24) - 1) << UD_SEND_WQE_U32_16_DEST_QP_S) 551 - 552 - #define UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S 24 553 - #define UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M \ 554 - (((1UL << 8) - 1) << UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S) 555 - 556 - #define UD_SEND_WQE_U32_36_FLOW_LABEL_S 0 557 - #define UD_SEND_WQE_U32_36_FLOW_LABEL_M \ 558 - (((1UL << 20) - 1) << UD_SEND_WQE_U32_36_FLOW_LABEL_S) 559 - 560 - #define UD_SEND_WQE_U32_36_PRIORITY_S 20 561 - #define UD_SEND_WQE_U32_36_PRIORITY_M \ 562 - (((1UL << 4) - 1) << UD_SEND_WQE_U32_36_PRIORITY_S) 563 - 564 - #define UD_SEND_WQE_U32_36_SGID_INDEX_S 24 565 - #define UD_SEND_WQE_U32_36_SGID_INDEX_M \ 566 - (((1UL << 8) - 1) << UD_SEND_WQE_U32_36_SGID_INDEX_S) 567 - 568 - #define UD_SEND_WQE_U32_40_HOP_LIMIT_S 0 569 - #define UD_SEND_WQE_U32_40_HOP_LIMIT_M \ 570 - (((1UL << 8) - 1) << UD_SEND_WQE_U32_40_HOP_LIMIT_S) 571 - 572 - #define UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S 8 573 - #define UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M \ 574 - (((1UL << 8) - 1) << UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S) 575 - 576 - struct hns_roce_sqp_context { 577 - __le32 qp1c_bytes_4; 578 - __le32 sq_rq_bt_l; 579 - __le32 qp1c_bytes_12; 580 - __le32 qp1c_bytes_16; 581 - __le32 qp1c_bytes_20; 582 - __le32 cur_rq_wqe_ba_l; 583 - __le32 qp1c_bytes_28; 584 - __le32 qp1c_bytes_32; 585 - __le32 cur_sq_wqe_ba_l; 586 - __le32 qp1c_bytes_40; 587 - }; 588 - 589 - #define QP1C_BYTES_4_QP_STATE_S 0 590 - #define QP1C_BYTES_4_QP_STATE_M \ 591 - (((1UL << 3) - 1) << QP1C_BYTES_4_QP_STATE_S) 592 - 593 - #define QP1C_BYTES_4_SQ_WQE_SHIFT_S 8 594 - #define QP1C_BYTES_4_SQ_WQE_SHIFT_M \ 595 - (((1UL << 4) - 1) << QP1C_BYTES_4_SQ_WQE_SHIFT_S) 596 - 597 - #define QP1C_BYTES_4_RQ_WQE_SHIFT_S 12 598 - #define QP1C_BYTES_4_RQ_WQE_SHIFT_M \ 599 - (((1UL << 4) - 1) << QP1C_BYTES_4_RQ_WQE_SHIFT_S) 600 - 601 - #define QP1C_BYTES_4_PD_S 16 602 - #define QP1C_BYTES_4_PD_M (((1UL << 16) - 1) << QP1C_BYTES_4_PD_S) 603 - 604 - #define QP1C_BYTES_12_SQ_RQ_BT_H_S 0 605 - #define QP1C_BYTES_12_SQ_RQ_BT_H_M \ 606 - (((1UL << 17) - 1) << QP1C_BYTES_12_SQ_RQ_BT_H_S) 607 - 608 - #define QP1C_BYTES_16_RQ_HEAD_S 0 609 - #define QP1C_BYTES_16_RQ_HEAD_M (((1UL << 15) - 1) << QP1C_BYTES_16_RQ_HEAD_S) 610 - 611 - #define QP1C_BYTES_16_PORT_NUM_S 16 612 - #define QP1C_BYTES_16_PORT_NUM_M \ 613 - (((1UL << 3) - 1) << QP1C_BYTES_16_PORT_NUM_S) 614 - 615 - #define QP1C_BYTES_16_SIGNALING_TYPE_S 27 616 - #define QP1C_BYTES_16_LOCAL_ENABLE_E2E_CREDIT_S 28 617 - #define QP1C_BYTES_16_RQ_BA_FLG_S 29 618 - #define QP1C_BYTES_16_SQ_BA_FLG_S 30 619 - #define QP1C_BYTES_16_QP1_ERR_S 31 620 - 621 - #define QP1C_BYTES_20_SQ_HEAD_S 0 622 - #define QP1C_BYTES_20_SQ_HEAD_M (((1UL << 15) - 1) << QP1C_BYTES_20_SQ_HEAD_S) 623 - 624 - #define QP1C_BYTES_20_PKEY_IDX_S 16 625 - #define QP1C_BYTES_20_PKEY_IDX_M \ 626 - (((1UL << 16) - 1) << QP1C_BYTES_20_PKEY_IDX_S) 627 - 628 - #define QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S 0 629 - #define QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M \ 630 - (((1UL << 5) - 1) << QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S) 631 - 632 - #define QP1C_BYTES_28_RQ_CUR_IDX_S 16 633 - #define QP1C_BYTES_28_RQ_CUR_IDX_M \ 634 - (((1UL << 15) - 1) << QP1C_BYTES_28_RQ_CUR_IDX_S) 635 - 636 - #define QP1C_BYTES_32_TX_CQ_NUM_S 0 637 - #define QP1C_BYTES_32_TX_CQ_NUM_M \ 638 - (((1UL << 16) - 1) << QP1C_BYTES_32_TX_CQ_NUM_S) 639 - 640 - #define QP1C_BYTES_32_RX_CQ_NUM_S 16 641 - #define QP1C_BYTES_32_RX_CQ_NUM_M \ 642 - (((1UL << 16) - 1) << QP1C_BYTES_32_RX_CQ_NUM_S) 643 - 644 - #define QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S 0 645 - #define QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M \ 646 - (((1UL << 5) - 1) << QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S) 647 - 648 - #define QP1C_BYTES_40_SQ_CUR_IDX_S 16 649 - #define QP1C_BYTES_40_SQ_CUR_IDX_M \ 650 - (((1UL << 15) - 1) << QP1C_BYTES_40_SQ_CUR_IDX_S) 651 - 652 - #define HNS_ROCE_WQE_INLINE (1UL<<31) 653 - #define HNS_ROCE_WQE_SE (1UL<<30) 654 - 655 - #define HNS_ROCE_WQE_SGE_NUM_BIT 24 656 - #define HNS_ROCE_WQE_IMM (1UL<<23) 657 - #define HNS_ROCE_WQE_FENCE (1UL<<21) 658 - #define HNS_ROCE_WQE_CQ_NOTIFY (1UL<<20) 659 - 660 - #define HNS_ROCE_WQE_OPCODE_SEND (0<<16) 661 - #define HNS_ROCE_WQE_OPCODE_RDMA_READ (1<<16) 662 - #define HNS_ROCE_WQE_OPCODE_RDMA_WRITE (2<<16) 663 - #define HNS_ROCE_WQE_OPCODE_LOCAL_INV (4<<16) 664 - #define HNS_ROCE_WQE_OPCODE_UD_SEND (7<<16) 665 - #define HNS_ROCE_WQE_OPCODE_MASK (15<<16) 666 - 667 - struct hns_roce_qp_context { 668 - __le32 qpc_bytes_4; 669 - __le32 qpc_bytes_8; 670 - __le32 qpc_bytes_12; 671 - __le32 qpc_bytes_16; 672 - __le32 sq_rq_bt_l; 673 - __le32 qpc_bytes_24; 674 - __le32 irrl_ba_l; 675 - __le32 qpc_bytes_32; 676 - __le32 qpc_bytes_36; 677 - __le32 dmac_l; 678 - __le32 qpc_bytes_44; 679 - __le32 qpc_bytes_48; 680 - u8 dgid[16]; 681 - __le32 qpc_bytes_68; 682 - __le32 cur_rq_wqe_ba_l; 683 - __le32 qpc_bytes_76; 684 - __le32 rx_rnr_time; 685 - __le32 qpc_bytes_84; 686 - __le32 qpc_bytes_88; 687 - union { 688 - __le32 rx_sge_len; 689 - __le32 dma_length; 690 - }; 691 - union { 692 - __le32 rx_sge_num; 693 - __le32 rx_send_pktn; 694 - __le32 r_key; 695 - }; 696 - __le32 va_l; 697 - __le32 va_h; 698 - __le32 qpc_bytes_108; 699 - __le32 qpc_bytes_112; 700 - __le32 rx_cur_sq_wqe_ba_l; 701 - __le32 qpc_bytes_120; 702 - __le32 qpc_bytes_124; 703 - __le32 qpc_bytes_128; 704 - __le32 qpc_bytes_132; 705 - __le32 qpc_bytes_136; 706 - __le32 qpc_bytes_140; 707 - __le32 qpc_bytes_144; 708 - __le32 qpc_bytes_148; 709 - union { 710 - __le32 rnr_retry; 711 - __le32 ack_time; 712 - }; 713 - __le32 qpc_bytes_156; 714 - __le32 pkt_use_len; 715 - __le32 qpc_bytes_164; 716 - __le32 qpc_bytes_168; 717 - union { 718 - __le32 sge_use_len; 719 - __le32 pa_use_len; 720 - }; 721 - __le32 qpc_bytes_176; 722 - __le32 qpc_bytes_180; 723 - __le32 tx_cur_sq_wqe_ba_l; 724 - __le32 qpc_bytes_188; 725 - __le32 rvd21; 726 - }; 727 - 728 - #define QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S 0 729 - #define QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M \ 730 - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S) 731 - 732 - #define QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S 3 733 - #define QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S 4 734 - #define QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S 5 735 - #define QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S 6 736 - #define QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S 7 737 - 738 - #define QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S 8 739 - #define QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M \ 740 - (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S) 741 - 742 - #define QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S 12 743 - #define QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M \ 744 - (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S) 745 - 746 - #define QP_CONTEXT_QPC_BYTES_4_PD_S 16 747 - #define QP_CONTEXT_QPC_BYTES_4_PD_M \ 748 - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_4_PD_S) 749 - 750 - #define QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S 0 751 - #define QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M \ 752 - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S) 753 - 754 - #define QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S 16 755 - #define QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M \ 756 - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S) 757 - 758 - #define QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S 0 759 - #define QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M \ 760 - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S) 761 - 762 - #define QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S 16 763 - #define QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M \ 764 - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S) 765 - 766 - #define QP_CONTEXT_QPC_BYTES_16_QP_NUM_S 0 767 - #define QP_CONTEXT_QPC_BYTES_16_QP_NUM_M \ 768 - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_16_QP_NUM_S) 769 - 770 - #define QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S 0 771 - #define QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M \ 772 - (((1UL << 17) - 1) << QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S) 773 - 774 - #define QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S 18 775 - #define QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M \ 776 - (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S) 777 - 778 - #define QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S 23 779 - 780 - #define QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S 0 781 - #define QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M \ 782 - (((1UL << 17) - 1) << QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S) 783 - 784 - #define QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S 18 785 - #define QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M \ 786 - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S) 787 - 788 - #define QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S 20 789 - #define QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S 21 790 - #define QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S 22 791 - #define QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S 23 792 - 793 - #define QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S 24 794 - #define QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M \ 795 - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S) 796 - 797 - #define QP_CONTEXT_QPC_BYTES_36_DEST_QP_S 0 798 - #define QP_CONTEXT_QPC_BYTES_36_DEST_QP_M \ 799 - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_36_DEST_QP_S) 800 - 801 - #define QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S 24 802 - #define QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M \ 803 - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S) 804 - 805 - #define QP_CONTEXT_QPC_BYTES_44_DMAC_H_S 0 806 - #define QP_CONTEXT_QPC_BYTES_44_DMAC_H_M \ 807 - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_44_DMAC_H_S) 808 - 809 - #define QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S 16 810 - #define QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M \ 811 - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S) 812 - 813 - #define QP_CONTEXT_QPC_BYTES_44_HOPLMT_S 24 814 - #define QP_CONTEXT_QPC_BYTES_44_HOPLMT_M \ 815 - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_44_HOPLMT_S) 816 - 817 - #define QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S 0 818 - #define QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M \ 819 - (((1UL << 20) - 1) << QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S) 820 - 821 - #define QP_CONTEXT_QPC_BYTES_48_TCLASS_S 20 822 - #define QP_CONTEXT_QPC_BYTES_48_TCLASS_M \ 823 - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_48_TCLASS_S) 824 - 825 - #define QP_CONTEXT_QPC_BYTES_48_MTU_S 28 826 - #define QP_CONTEXT_QPC_BYTES_48_MTU_M \ 827 - (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_48_MTU_S) 828 - 829 - #define QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S 0 830 - #define QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M \ 831 - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S) 832 - 833 - #define QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S 16 834 - #define QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M \ 835 - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S) 836 - 837 - #define QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S 0 838 - #define QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M \ 839 - (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S) 840 - 841 - #define QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S 8 842 - #define QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M \ 843 - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S) 844 - 845 - #define QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S 0 846 - #define QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M \ 847 - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S) 848 - 849 - #define QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S 24 850 - #define QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M \ 851 - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S) 852 - 853 - #define QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S 0 854 - #define QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M \ 855 - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S) 856 - 857 - #define QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S 24 858 - #define QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S 25 859 - 860 - #define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S 26 861 - #define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M \ 862 - (((1UL << 2) - 1) << \ 863 - QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S) 864 - 865 - #define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S 29 866 - #define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M \ 867 - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S) 868 - 869 - #define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S 0 870 - #define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M \ 871 - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S) 872 - 873 - #define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S 24 874 - #define QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S 25 875 - 876 - #define QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S 0 877 - #define QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M \ 878 - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S) 879 - 880 - #define QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S 24 881 - #define QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M \ 882 - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S) 883 - 884 - #define QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S 0 885 - #define QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M \ 886 - (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S) 887 - 888 - #define QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S 0 889 - #define QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M \ 890 - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S) 891 - 892 - #define QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S 16 893 - #define QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M \ 894 - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S) 895 - 896 - #define QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S 0 897 - #define QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M \ 898 - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S) 899 - 900 - #define QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S 24 901 - 902 - #define QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S 25 903 - #define QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M \ 904 - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S) 905 - 906 - #define QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S 27 907 - 908 - #define QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S 0 909 - #define QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M \ 910 - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S) 911 - 912 - #define QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S 24 913 - #define QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M \ 914 - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S) 915 - 916 - #define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S 0 917 - #define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M \ 918 - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S) 919 - 920 - #define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S 24 921 - #define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M \ 922 - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S) 923 - 924 - #define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S 0 925 - #define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M \ 926 - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S) 927 - 928 - #define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S 16 929 - #define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M \ 930 - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S) 931 - 932 - #define QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S 31 933 - 934 - #define QP_CONTEXT_QPC_BYTES_144_QP_STATE_S 0 935 - #define QP_CONTEXT_QPC_BYTES_144_QP_STATE_M \ 936 - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_144_QP_STATE_S) 937 - 938 - #define QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S 0 939 - #define QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M \ 940 - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S) 941 - 942 - #define QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S 2 943 - #define QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M \ 944 - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S) 945 - 946 - #define QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S 5 947 - #define QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M \ 948 - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S) 949 - 950 - #define QP_CONTEXT_QPC_BYTES_148_LSN_S 8 951 - #define QP_CONTEXT_QPC_BYTES_148_LSN_M \ 952 - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_148_LSN_S) 953 - 954 - #define QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S 0 955 - #define QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M \ 956 - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S) 957 - 958 - #define QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S 3 959 - #define QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M \ 960 - (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S) 961 - 962 - #define QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S 8 963 - #define QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M \ 964 - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S) 965 - 966 - #define QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S 11 967 - #define QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M \ 968 - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S) 969 - 970 - #define QP_CONTEXT_QPC_BYTES_156_SL_S 14 971 - #define QP_CONTEXT_QPC_BYTES_156_SL_M \ 972 - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_156_SL_S) 973 - 974 - #define QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S 16 975 - #define QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M \ 976 - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S) 977 - 978 - #define QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S 24 979 - #define QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M \ 980 - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S) 981 - 982 - #define QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S 0 983 - #define QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M \ 984 - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S) 985 - 986 - #define QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S 24 987 - #define QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M \ 988 - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S) 989 - 990 - #define QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S 0 991 - #define QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M \ 992 - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S) 993 - 994 - #define QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S 24 995 - #define QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M \ 996 - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S) 997 - 998 - #define QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S 26 999 - #define QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M \ 1000 - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S) 1001 - 1002 - #define QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S 28 1003 - #define QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S 29 1004 - #define QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S 30 1005 - 1006 - #define QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S 0 1007 - #define QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M \ 1008 - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S) 1009 - 1010 - #define QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S 16 1011 - #define QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M \ 1012 - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S) 1013 - 1014 - #define QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S 0 1015 - #define QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M \ 1016 - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S) 1017 - 1018 - #define QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S 16 1019 - #define QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M \ 1020 - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S) 1021 - 1022 - #define QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S 0 1023 - #define QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M \ 1024 - (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S) 1025 - 1026 - #define QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S 8 1027 - 1028 - #define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S 16 1029 - #define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M \ 1030 - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S) 1031 - 1032 - #define STATUS_MASK 0xff 1033 - #define GO_BIT_TIMEOUT_MSECS 10000 1034 - #define HCR_STATUS_OFFSET 0x18 1035 - #define HCR_GO_BIT 15 1036 - 1037 - struct hns_roce_rq_db { 1038 - __le32 u32_4; 1039 - __le32 u32_8; 1040 - }; 1041 - 1042 - #define RQ_DOORBELL_U32_4_RQ_HEAD_S 0 1043 - #define RQ_DOORBELL_U32_4_RQ_HEAD_M \ 1044 - (((1UL << 15) - 1) << RQ_DOORBELL_U32_4_RQ_HEAD_S) 1045 - 1046 - #define RQ_DOORBELL_U32_8_QPN_S 0 1047 - #define RQ_DOORBELL_U32_8_QPN_M (((1UL << 24) - 1) << RQ_DOORBELL_U32_8_QPN_S) 1048 - 1049 - #define RQ_DOORBELL_U32_8_CMD_S 28 1050 - #define RQ_DOORBELL_U32_8_CMD_M (((1UL << 3) - 1) << RQ_DOORBELL_U32_8_CMD_S) 1051 - 1052 - #define RQ_DOORBELL_U32_8_HW_SYNC_S 31 1053 - 1054 - struct hns_roce_sq_db { 1055 - __le32 u32_4; 1056 - __le32 u32_8; 1057 - }; 1058 - 1059 - #define SQ_DOORBELL_U32_4_SQ_HEAD_S 0 1060 - #define SQ_DOORBELL_U32_4_SQ_HEAD_M \ 1061 - (((1UL << 15) - 1) << SQ_DOORBELL_U32_4_SQ_HEAD_S) 1062 - 1063 - #define SQ_DOORBELL_U32_4_SL_S 16 1064 - #define SQ_DOORBELL_U32_4_SL_M \ 1065 - (((1UL << 2) - 1) << SQ_DOORBELL_U32_4_SL_S) 1066 - 1067 - #define SQ_DOORBELL_U32_4_PORT_S 18 1068 - #define SQ_DOORBELL_U32_4_PORT_M (((1UL << 3) - 1) << SQ_DOORBELL_U32_4_PORT_S) 1069 - 1070 - #define SQ_DOORBELL_U32_8_QPN_S 0 1071 - #define SQ_DOORBELL_U32_8_QPN_M (((1UL << 24) - 1) << SQ_DOORBELL_U32_8_QPN_S) 1072 - 1073 - #define SQ_DOORBELL_HW_SYNC_S 31 1074 - 1075 - struct hns_roce_ext_db { 1076 - int esdb_dep; 1077 - int eodb_dep; 1078 - struct hns_roce_buf_list *sdb_buf_list; 1079 - struct hns_roce_buf_list *odb_buf_list; 1080 - }; 1081 - 1082 - struct hns_roce_db_table { 1083 - int sdb_ext_mod; 1084 - int odb_ext_mod; 1085 - struct hns_roce_ext_db *ext_db; 1086 - }; 1087 - 1088 - #define HW_SYNC_SLEEP_TIME_INTERVAL 20 1089 - #define HW_SYNC_TIMEOUT_MSECS (25 * HW_SYNC_SLEEP_TIME_INTERVAL) 1090 - #define BT_CMD_SYNC_SHIFT 31 1091 - #define HNS_ROCE_BA_SIZE (32 * 4096) 1092 - 1093 - struct hns_roce_bt_table { 1094 - struct hns_roce_buf_list qpc_buf; 1095 - struct hns_roce_buf_list mtpt_buf; 1096 - struct hns_roce_buf_list cqc_buf; 1097 - }; 1098 - 1099 - struct hns_roce_tptr_table { 1100 - struct hns_roce_buf_list tptr_buf; 1101 - }; 1102 - 1103 - struct hns_roce_qp_work { 1104 - struct work_struct work; 1105 - struct ib_device *ib_dev; 1106 - struct hns_roce_qp *qp; 1107 - u32 db_wait_stage; 1108 - u32 sdb_issue_ptr; 1109 - u32 sdb_inv_cnt; 1110 - u32 sche_cnt; 1111 - }; 1112 - 1113 - struct hns_roce_mr_free_work { 1114 - struct work_struct work; 1115 - struct ib_device *ib_dev; 1116 - struct completion *comp; 1117 - int comp_flag; 1118 - void *mr; 1119 - }; 1120 - 1121 - struct hns_roce_recreate_lp_qp_work { 1122 - struct work_struct work; 1123 - struct ib_device *ib_dev; 1124 - struct completion *comp; 1125 - int comp_flag; 1126 - }; 1127 - 1128 - struct hns_roce_free_mr { 1129 - struct workqueue_struct *free_mr_wq; 1130 - struct hns_roce_qp *mr_free_qp[HNS_ROCE_V1_RESV_QP]; 1131 - struct hns_roce_cq *mr_free_cq; 1132 - struct hns_roce_pd *mr_free_pd; 1133 - }; 1134 - 1135 - struct hns_roce_v1_priv { 1136 - struct hns_roce_db_table db_table; 1137 - struct hns_roce_raq_table raq_table; 1138 - struct hns_roce_bt_table bt_table; 1139 - struct hns_roce_tptr_table tptr_table; 1140 - struct hns_roce_free_mr free_mr; 1141 - }; 1142 - 1143 - int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset); 1144 - int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); 1145 - int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); 1146 - 1147 - #endif
+23 -26
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
··· 678 678 static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, 679 679 void *wqe) 680 680 { 681 + #define HNS_ROCE_SL_SHIFT 2 681 682 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe; 682 683 683 684 /* All kinds of DirectWQE have the same header field layout */ ··· 686 685 roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_L_M, 687 686 V2_RC_SEND_WQE_BYTE_4_DB_SL_L_S, qp->sl); 688 687 roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_H_M, 689 - V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S, qp->sl >> 2); 688 + V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S, 689 + qp->sl >> HNS_ROCE_SL_SHIFT); 690 690 roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M, 691 691 V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S, qp->sq.head); 692 692 ··· 1307 1305 continue; 1308 1306 1309 1307 dev_err_ratelimited(hr_dev->dev, 1310 - "Cmdq IO error, opcode = %x, return = %x\n", 1308 + "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n", 1311 1309 desc->opcode, desc_ret); 1312 1310 ret = -EIO; 1313 1311 } 1314 1312 } else { 1315 1313 /* FW/HW reset or incorrect number of desc */ 1316 1314 tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG); 1317 - dev_warn(hr_dev->dev, "CMDQ move tail from %d to %d\n", 1315 + dev_warn(hr_dev->dev, "CMDQ move tail from %u to %u.\n", 1318 1316 csq->head, tail); 1319 1317 csq->head = tail; 1320 1318 ··· 1573 1571 struct hns_roce_cmq_desc desc; 1574 1572 int ret; 1575 1573 1576 - if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09) { 1574 + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { 1577 1575 hr_dev->func_num = 1; 1578 1576 return 0; 1579 1577 } ··· 2005 2003 caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM; 2006 2004 2007 2005 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { 2008 - caps->flags |= HNS_ROCE_CAP_FLAG_STASH; 2006 + caps->flags |= HNS_ROCE_CAP_FLAG_STASH | 2007 + HNS_ROCE_CAP_FLAG_DIRECT_WQE; 2009 2008 caps->max_sq_inline = HNS_ROCE_V3_MAX_SQ_INLINE; 2010 2009 } else { 2011 2010 caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE; ··· 2147 2144 caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ; 2148 2145 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ; 2149 2146 2150 - caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM; 2151 2147 caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM; 2152 2148 caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0; 2153 2149 caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0; ··· 2163 2161 (u32)priv->handle->rinfo.num_vectors - 2); 2164 2162 2165 2163 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { 2164 + caps->eqe_hop_num = HNS_ROCE_V3_EQE_HOP_NUM; 2166 2165 caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE; 2167 2166 caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE; 2168 2167 ··· 2184 2181 } else { 2185 2182 u32 func_num = max_t(u32, 1, hr_dev->func_num); 2186 2183 2184 + caps->eqe_hop_num = HNS_ROCE_V2_EQE_HOP_NUM; 2187 2185 caps->ceqe_size = HNS_ROCE_CEQE_SIZE; 2188 2186 caps->aeqe_size = HNS_ROCE_AEQE_SIZE; 2189 2187 caps->gid_table_len[0] /= func_num; ··· 2397 2393 struct hns_roce_caps *caps = &hr_dev->caps; 2398 2394 int ret; 2399 2395 2400 - if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09) 2396 + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) 2401 2397 return 0; 2402 2398 2403 2399 ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_QPC_SIZE, ··· 2971 2967 return hns_roce_cmq_send(hr_dev, desc, 2); 2972 2968 } 2973 2969 2974 - static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u32 port, 2975 - int gid_index, const union ib_gid *gid, 2970 + static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, int gid_index, 2971 + const union ib_gid *gid, 2976 2972 const struct ib_gid_attr *attr) 2977 2973 { 2978 2974 enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1; ··· 3067 3063 } 3068 3064 3069 3065 static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev, 3070 - void *mb_buf, struct hns_roce_mr *mr, 3071 - unsigned long mtpt_idx) 3066 + void *mb_buf, struct hns_roce_mr *mr) 3072 3067 { 3073 3068 struct hns_roce_v2_mpt_entry *mpt_entry; 3074 3069 int ret; ··· 4491 4488 return 0; 4492 4489 } 4493 4490 4494 - static inline u16 get_udp_sport(u32 fl, u32 lqpn, u32 rqpn) 4495 - { 4496 - if (!fl) 4497 - fl = rdma_calc_flow_label(lqpn, rqpn); 4498 - 4499 - return rdma_flow_label_to_udp_sport(fl); 4500 - } 4501 - 4502 4491 static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr, 4503 4492 u32 *dip_idx) 4504 4493 { ··· 4707 4712 } 4708 4713 4709 4714 hr_reg_write(context, QPC_UDPSPN, 4710 - is_udp ? get_udp_sport(grh->flow_label, ibqp->qp_num, 4711 - attr->dest_qp_num) : 0); 4715 + is_udp ? rdma_get_udp_sport(grh->flow_label, ibqp->qp_num, 4716 + attr->dest_qp_num) : 4717 + 0); 4712 4718 4713 4719 hr_reg_clear(qpc_mask, QPC_UDPSPN); 4714 4720 ··· 4735 4739 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); 4736 4740 if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) { 4737 4741 ibdev_err(ibdev, 4738 - "failed to fill QPC, sl (%d) shouldn't be larger than %d.\n", 4742 + "failed to fill QPC, sl (%u) shouldn't be larger than %d.\n", 4739 4743 hr_qp->sl, MAX_SERVICE_LEVEL); 4740 4744 return -EINVAL; 4741 4745 } ··· 4764 4768 [IB_QPS_ERR] = true }, 4765 4769 [IB_QPS_SQD] = {}, 4766 4770 [IB_QPS_SQE] = {}, 4767 - [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true } 4771 + [IB_QPS_ERR] = { [IB_QPS_RESET] = true, 4772 + [IB_QPS_ERR] = true } 4768 4773 }; 4769 4774 4770 4775 return sm[cur_state][new_state]; ··· 5865 5868 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag); 5866 5869 } 5867 5870 5868 - static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn) 5871 + static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, u32 eqn) 5869 5872 { 5870 5873 struct device *dev = hr_dev->dev; 5871 5874 int ret; ··· 5879 5882 0, HNS_ROCE_CMD_DESTROY_AEQC, 5880 5883 HNS_ROCE_CMD_TIMEOUT_MSECS); 5881 5884 if (ret) 5882 - dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn); 5885 + dev_err(dev, "[mailbox cmd] destroy eqc(%u) failed.\n", eqn); 5883 5886 } 5884 5887 5885 5888 static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) ··· 6391 6394 if (!id) 6392 6395 return 0; 6393 6396 6394 - if (id->driver_data && handle->pdev->revision < PCI_REVISION_ID_HIP09) 6397 + if (id->driver_data && handle->pdev->revision == PCI_REVISION_ID_HIP08) 6395 6398 return 0; 6396 6399 6397 6400 ret = __hns_roce_hw_v2_init_instance(handle);
+4 -18
drivers/infiniband/hw/hns/hns_roce_hw_v2.h
··· 35 35 36 36 #include <linux/bitops.h> 37 37 38 - #define HNS_ROCE_VF_QPC_BT_NUM 256 39 - #define HNS_ROCE_VF_SCCC_BT_NUM 64 40 - #define HNS_ROCE_VF_SRQC_BT_NUM 64 41 - #define HNS_ROCE_VF_CQC_BT_NUM 64 42 - #define HNS_ROCE_VF_MPT_BT_NUM 64 43 - #define HNS_ROCE_VF_SMAC_NUM 32 44 - #define HNS_ROCE_VF_SL_NUM 8 45 - #define HNS_ROCE_VF_GMV_BT_NUM 256 46 - 47 38 #define HNS_ROCE_V2_MAX_QP_NUM 0x1000 48 39 #define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200 49 40 #define HNS_ROCE_V2_MAX_WQE_NUM 0x8000 50 - #define HNS_ROCE_V2_MAX_SRQ 0x100000 51 41 #define HNS_ROCE_V2_MAX_SRQ_WR 0x8000 52 42 #define HNS_ROCE_V2_MAX_SRQ_SGE 64 53 43 #define HNS_ROCE_V2_MAX_CQ_NUM 0x100000 54 44 #define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100 55 45 #define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000 56 46 #define HNS_ROCE_V2_MAX_CQE_NUM 0x400000 57 - #define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000 58 47 #define HNS_ROCE_V2_MAX_RQ_SGE_NUM 64 59 48 #define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64 60 49 #define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000 ··· 52 63 #define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32 53 64 #define HNS_ROCE_V2_UAR_NUM 256 54 65 #define HNS_ROCE_V2_PHY_UAR_NUM 1 55 - #define HNS_ROCE_V2_MAX_IRQ_NUM 65 56 - #define HNS_ROCE_V2_COMP_VEC_NUM 63 57 66 #define HNS_ROCE_V2_AEQE_VEC_NUM 1 58 67 #define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1 59 68 #define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000 60 69 #define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000 61 - #define HNS_ROCE_V2_MAX_CQE_SEGS 0x1000000 62 70 #define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000 63 71 #define HNS_ROCE_V2_MAX_IDX_SEGS 0x1000000 64 72 #define HNS_ROCE_V2_MAX_PD_NUM 0x1000000 ··· 67 81 #define HNS_ROCE_V2_MAX_RQ_DESC_SZ 16 68 82 #define HNS_ROCE_V2_MAX_SRQ_DESC_SZ 64 69 83 #define HNS_ROCE_V2_IRRL_ENTRY_SZ 64 70 - #define HNS_ROCE_V2_TRRL_ENTRY_SZ 48 71 84 #define HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ 100 72 85 #define HNS_ROCE_V2_CQC_ENTRY_SZ 64 73 86 #define HNS_ROCE_V2_SRQC_ENTRY_SZ 64 ··· 88 103 #define HNS_ROCE_INVALID_LKEY 0x0 89 104 #define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000 90 105 #define HNS_ROCE_CMQ_TX_TIMEOUT 30000 91 - #define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2 92 106 #define HNS_ROCE_V2_RSV_QPS 8 93 107 94 108 #define HNS_ROCE_V2_HW_RST_TIMEOUT 1000 ··· 101 117 #define HNS_ROCE_CQE_HOP_NUM 1 102 118 #define HNS_ROCE_SRQWQE_HOP_NUM 1 103 119 #define HNS_ROCE_PBL_HOP_NUM 2 104 - #define HNS_ROCE_EQE_HOP_NUM 2 105 120 #define HNS_ROCE_IDX_HOP_NUM 1 106 121 #define HNS_ROCE_SQWQE_HOP_NUM 2 107 122 #define HNS_ROCE_EXT_SGE_HOP_NUM 1 108 123 #define HNS_ROCE_RQWQE_HOP_NUM 2 124 + 125 + #define HNS_ROCE_V2_EQE_HOP_NUM 2 126 + #define HNS_ROCE_V3_EQE_HOP_NUM 1 109 127 110 128 #define HNS_ROCE_BA_PG_SZ_SUPPORTED_256K 6 111 129 #define HNS_ROCE_BA_PG_SZ_SUPPORTED_16K 2 ··· 1427 1441 struct hns_roce_dip { 1428 1442 u8 dgid[GID_LEN_V2]; 1429 1443 u32 dip_idx; 1430 - struct list_head node; /* all dips are on a list */ 1444 + struct list_head node; /* all dips are on a list */ 1431 1445 }; 1432 1446 1433 1447 /* only for RNR timeout issue of HIP08 */
+30 -55
drivers/infiniband/hw/hns/hns_roce_main.c
··· 31 31 * SOFTWARE. 32 32 */ 33 33 #include <linux/acpi.h> 34 - #include <linux/of_platform.h> 35 34 #include <linux/module.h> 36 35 #include <linux/pci.h> 37 36 #include <rdma/ib_addr.h> ··· 69 70 if (port >= hr_dev->caps.num_ports) 70 71 return -EINVAL; 71 72 72 - ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &attr->gid, attr); 73 + ret = hr_dev->hw->set_gid(hr_dev, attr->index, &attr->gid, attr); 73 74 74 75 return ret; 75 76 } ··· 83 84 if (port >= hr_dev->caps.num_ports) 84 85 return -EINVAL; 85 86 86 - ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, NULL, NULL); 87 + ret = hr_dev->hw->set_gid(hr_dev, attr->index, NULL, NULL); 87 88 88 89 return ret; 89 90 } ··· 151 152 u8 i; 152 153 153 154 for (i = 0; i < hr_dev->caps.num_ports; i++) { 154 - if (hr_dev->hw->set_mtu) 155 - hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i], 156 - hr_dev->caps.max_mtu); 157 155 ret = hns_roce_set_mac(hr_dev, i, 158 156 hr_dev->iboe.netdevs[i]->dev_addr); 159 157 if (ret) ··· 266 270 static int hns_roce_query_pkey(struct ib_device *ib_dev, u32 port, u16 index, 267 271 u16 *pkey) 268 272 { 273 + if (index > 0) 274 + return -EINVAL; 275 + 269 276 *pkey = PKEY_ID; 270 277 271 278 return 0; ··· 306 307 entry->address = address; 307 308 entry->mmap_type = mmap_type; 308 309 309 - ret = rdma_user_mmap_entry_insert_exact( 310 - ucontext, &entry->rdma_entry, length, 311 - mmap_type == HNS_ROCE_MMAP_TYPE_DB ? 0 : 1); 310 + switch (mmap_type) { 311 + /* pgoff 0 must be used by DB for compatibility */ 312 + case HNS_ROCE_MMAP_TYPE_DB: 313 + ret = rdma_user_mmap_entry_insert_exact( 314 + ucontext, &entry->rdma_entry, length, 0); 315 + break; 316 + case HNS_ROCE_MMAP_TYPE_DWQE: 317 + ret = rdma_user_mmap_entry_insert_range( 318 + ucontext, &entry->rdma_entry, length, 1, 319 + U32_MAX); 320 + break; 321 + default: 322 + ret = -EINVAL; 323 + break; 324 + } 325 + 312 326 if (ret) { 313 327 kfree(entry); 314 328 return NULL; ··· 335 323 if (context->db_mmap_entry) 336 324 rdma_user_mmap_entry_remove( 337 325 &context->db_mmap_entry->rdma_entry); 338 - 339 - if (context->tptr_mmap_entry) 340 - rdma_user_mmap_entry_remove( 341 - &context->tptr_mmap_entry->rdma_entry); 342 326 } 343 327 344 328 static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx) 345 329 { 346 330 struct hns_roce_ucontext *context = to_hr_ucontext(uctx); 347 - struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device); 348 331 u64 address; 349 - int ret; 350 332 351 333 address = context->uar.pfn << PAGE_SHIFT; 352 334 context->db_mmap_entry = hns_roce_user_mmap_entry_insert( ··· 348 342 if (!context->db_mmap_entry) 349 343 return -ENOMEM; 350 344 351 - if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size) 352 - return 0; 353 - 354 - /* 355 - * FIXME: using io_remap_pfn_range on the dma address returned 356 - * by dma_alloc_coherent is totally wrong. 357 - */ 358 - context->tptr_mmap_entry = 359 - hns_roce_user_mmap_entry_insert(uctx, hr_dev->tptr_dma_addr, 360 - hr_dev->tptr_size, 361 - HNS_ROCE_MMAP_TYPE_TPTR); 362 - if (!context->tptr_mmap_entry) { 363 - ret = -ENOMEM; 364 - goto err; 365 - } 366 - 367 345 return 0; 368 - 369 - err: 370 - hns_roce_dealloc_uar_entry(context); 371 - return ret; 372 346 } 373 347 374 348 static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx, ··· 422 436 423 437 entry = to_hns_mmap(rdma_entry); 424 438 pfn = entry->address >> PAGE_SHIFT; 425 - prot = vma->vm_page_prot; 426 439 427 - if (entry->mmap_type != HNS_ROCE_MMAP_TYPE_TPTR) 428 - prot = pgprot_noncached(prot); 440 + switch (entry->mmap_type) { 441 + case HNS_ROCE_MMAP_TYPE_DB: 442 + case HNS_ROCE_MMAP_TYPE_DWQE: 443 + prot = pgprot_device(vma->vm_page_prot); 444 + break; 445 + default: 446 + return -EINVAL; 447 + } 429 448 430 449 ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE, 431 450 prot, rdma_entry); ··· 807 816 int ret; 808 817 809 818 spin_lock_init(&hr_dev->sm_lock); 810 - spin_lock_init(&hr_dev->bt_cmd_lock); 811 819 812 820 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || 813 821 hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) { ··· 897 907 struct device *dev = hr_dev->dev; 898 908 int ret; 899 909 900 - if (hr_dev->hw->reset) { 901 - ret = hr_dev->hw->reset(hr_dev, true); 902 - if (ret) { 903 - dev_err(dev, "Reset RoCE engine failed!\n"); 904 - return ret; 905 - } 906 - } 907 910 hr_dev->is_reset = false; 908 911 909 912 if (hr_dev->hw->cmq_init) { 910 913 ret = hr_dev->hw->cmq_init(hr_dev); 911 914 if (ret) { 912 915 dev_err(dev, "Init RoCE Command Queue failed!\n"); 913 - goto error_failed_cmq_init; 916 + return ret; 914 917 } 915 918 } 916 919 ··· 986 1003 if (hr_dev->hw->cmq_exit) 987 1004 hr_dev->hw->cmq_exit(hr_dev); 988 1005 989 - error_failed_cmq_init: 990 - if (hr_dev->hw->reset) { 991 - if (hr_dev->hw->reset(hr_dev, false)) 992 - dev_err(dev, "Dereset RoCE engine failed!\n"); 993 - } 994 - 995 1006 return ret; 996 1007 } 997 1008 ··· 1005 1028 hns_roce_cmd_cleanup(hr_dev); 1006 1029 if (hr_dev->hw->cmq_exit) 1007 1030 hr_dev->hw->cmq_exit(hr_dev); 1008 - if (hr_dev->hw->reset) 1009 - hr_dev->hw->reset(hr_dev, false); 1010 1031 } 1011 1032 1012 1033 MODULE_LICENSE("Dual BSD/GPL");
+10 -22
drivers/infiniband/hw/hns/hns_roce_mr.c
··· 31 31 * SOFTWARE. 32 32 */ 33 33 34 - #include <linux/platform_device.h> 35 34 #include <linux/vmalloc.h> 36 35 #include <rdma/ib_umem.h> 37 36 #include "hns_roce_device.h" ··· 80 81 return -ENOMEM; 81 82 } 82 83 83 - mr->key = hw_index_to_key(id); /* MR key */ 84 + mr->key = hw_index_to_key(id); /* MR key */ 84 85 85 86 err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table, 86 87 (unsigned long)id); ··· 172 173 } 173 174 174 175 if (mr->type != MR_TYPE_FRMR) 175 - ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr, 176 - mtpt_idx); 176 + ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr); 177 177 else 178 178 ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr); 179 179 if (ret) { ··· 361 363 struct hns_roce_mr *mr = to_hr_mr(ibmr); 362 364 int ret = 0; 363 365 364 - if (hr_dev->hw->dereg_mr) { 365 - ret = hr_dev->hw->dereg_mr(hr_dev, mr, udata); 366 - } else { 367 - hns_roce_mr_free(hr_dev, mr); 368 - kfree(mr); 369 - } 366 + hns_roce_mr_free(hr_dev, mr); 367 + kfree(mr); 370 368 371 369 return ret; 372 370 } ··· 608 614 return -ENOBUFS; 609 615 610 616 for (i = 0; i < count && npage < max_count; i++) { 611 - if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) 612 - addr = to_hr_hw_page_addr(pages[npage]); 613 - else 614 - addr = pages[npage]; 617 + addr = pages[npage]; 615 618 616 619 mtts[i] = cpu_to_le64(addr); 617 620 npage++; ··· 815 824 } 816 825 817 826 int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, 818 - int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr) 827 + u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr) 819 828 { 820 829 struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg; 821 830 int mtt_count, left; 822 - int start_index; 831 + u32 start_index; 823 832 int total = 0; 824 833 __le64 *mtts; 825 834 u32 npage; ··· 838 847 continue; 839 848 840 849 addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT); 841 - if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) 842 - mtt_buf[total] = to_hr_hw_page_addr(addr); 843 - else 844 - mtt_buf[total] = addr; 850 + mtt_buf[total] = addr; 845 851 846 852 total++; 847 853 } ··· 872 884 static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev, 873 885 struct hns_roce_buf_attr *attr, 874 886 struct hns_roce_hem_cfg *cfg, 875 - unsigned int *buf_page_shift, int unalinged_size) 887 + unsigned int *buf_page_shift, u64 unalinged_size) 876 888 { 877 889 struct hns_roce_buf_region *r; 878 - int first_region_padding; 890 + u64 first_region_padding; 879 891 int page_cnt, region_cnt; 880 892 unsigned int page_shift; 881 893 size_t buf_size;
+3 -14
drivers/infiniband/hw/hns/hns_roce_pd.c
··· 30 30 * SOFTWARE. 31 31 */ 32 32 33 - #include <linux/platform_device.h> 34 33 #include <linux/pci.h> 35 34 #include "hns_roce_device.h" 36 35 ··· 85 86 int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) 86 87 { 87 88 struct hns_roce_ida *uar_ida = &hr_dev->uar_ida; 88 - struct resource *res; 89 89 int id; 90 90 91 91 /* Using bitmap to manager UAR index */ ··· 102 104 else 103 105 uar->index = 0; 104 106 105 - if (!dev_is_pci(hr_dev->dev)) { 106 - res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0); 107 - if (!res) { 108 - ida_free(&uar_ida->ida, id); 109 - dev_err(&hr_dev->pdev->dev, "memory resource not found!\n"); 110 - return -EINVAL; 111 - } 112 - uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index; 113 - } else { 114 - uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2)) 115 - >> PAGE_SHIFT); 116 - } 107 + uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2)) >> PAGE_SHIFT); 108 + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE) 109 + hr_dev->dwqe_page = pci_resource_start(hr_dev->pci_dev, 4); 117 110 118 111 return 0; 119 112 }
+61 -32
drivers/infiniband/hw/hns/hns_roce_qp.c
··· 32 32 */ 33 33 34 34 #include <linux/pci.h> 35 - #include <linux/platform_device.h> 36 35 #include <rdma/ib_addr.h> 37 36 #include <rdma/ib_umem.h> 38 37 #include <rdma/uverbs_ioctl.h> ··· 109 110 return; 110 111 } 111 112 112 - if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 && 113 - (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR || 114 - event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR || 115 - event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR || 116 - event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION || 117 - event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH)) { 113 + if (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR || 114 + event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR || 115 + event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR || 116 + event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION || 117 + event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH) { 118 118 qp->state = IB_QPS_ERR; 119 119 120 120 flush_cqe(hr_dev, qp); ··· 217 219 int ret; 218 220 219 221 if (hr_qp->ibqp.qp_type == IB_QPT_GSI) { 220 - /* when hw version is v1, the sqpn is allocated */ 221 - if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) 222 - num = HNS_ROCE_MAX_PORTS + 223 - hr_dev->iboe.phy_port[hr_qp->port]; 224 - else 225 - num = 1; 226 - 222 + num = 1; 227 223 hr_qp->doorbell_qpn = 1; 228 224 } else { 229 225 mutex_lock(&qp_table->bank_mutex); ··· 316 324 if (!hr_qp->qpn) 317 325 return -EINVAL; 318 326 319 - /* In v1 engine, GSI QP context is saved in the RoCE hw's register */ 320 - if (hr_qp->ibqp.qp_type == IB_QPT_GSI && 321 - hr_dev->hw_rev == HNS_ROCE_HW_VER1) 322 - return 0; 323 - 324 327 /* Alloc memory for QPC */ 325 328 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); 326 329 if (ret) { ··· 366 379 return ret; 367 380 } 368 381 382 + static void qp_user_mmap_entry_remove(struct hns_roce_qp *hr_qp) 383 + { 384 + rdma_user_mmap_entry_remove(&hr_qp->dwqe_mmap_entry->rdma_entry); 385 + } 386 + 369 387 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 370 388 { 371 389 struct xarray *xa = &hr_dev->qp_table_xa; ··· 393 401 static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 394 402 { 395 403 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 396 - 397 - /* In v1 engine, GSI QP context is saved in the RoCE hw's register */ 398 - if (hr_qp->ibqp.qp_type == IB_QPT_GSI && 399 - hr_dev->hw_rev == HNS_ROCE_HW_VER1) 400 - return; 401 404 402 405 if (hr_dev->caps.trrl_entry_sz) 403 406 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); ··· 521 534 u32 wqe_sge_cnt; 522 535 523 536 hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT; 524 - 525 - if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) { 526 - hr_qp->sq.max_gs = HNS_ROCE_SGE_IN_WQE; 527 - return; 528 - } 529 537 530 538 hr_qp->sq.max_gs = max(1U, cap->max_send_sge); 531 539 ··· 762 780 goto err_inline; 763 781 } 764 782 783 + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE) 784 + hr_qp->en_flags |= HNS_ROCE_QP_CAP_DIRECT_WQE; 785 + 765 786 return 0; 787 + 766 788 err_inline: 767 789 free_rq_inline_buf(hr_qp); 768 790 ··· 806 820 { 807 821 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) && 808 822 hns_roce_qp_has_rq(init_attr)); 823 + } 824 + 825 + static int qp_mmap_entry(struct hns_roce_qp *hr_qp, 826 + struct hns_roce_dev *hr_dev, 827 + struct ib_udata *udata, 828 + struct hns_roce_ib_create_qp_resp *resp) 829 + { 830 + struct hns_roce_ucontext *uctx = 831 + rdma_udata_to_drv_context(udata, 832 + struct hns_roce_ucontext, ibucontext); 833 + struct rdma_user_mmap_entry *rdma_entry; 834 + u64 address; 835 + 836 + address = hr_dev->dwqe_page + hr_qp->qpn * HNS_ROCE_DWQE_SIZE; 837 + 838 + hr_qp->dwqe_mmap_entry = 839 + hns_roce_user_mmap_entry_insert(&uctx->ibucontext, address, 840 + HNS_ROCE_DWQE_SIZE, 841 + HNS_ROCE_MMAP_TYPE_DWQE); 842 + 843 + if (!hr_qp->dwqe_mmap_entry) { 844 + ibdev_err(&hr_dev->ib_dev, "failed to get dwqe mmap entry.\n"); 845 + return -ENOMEM; 846 + } 847 + 848 + rdma_entry = &hr_qp->dwqe_mmap_entry->rdma_entry; 849 + resp->dwqe_mmap_key = rdma_user_mmap_get_offset(rdma_entry); 850 + 851 + return 0; 809 852 } 810 853 811 854 static int alloc_user_qp_db(struct hns_roce_dev *hr_dev, ··· 924 909 hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB; 925 910 926 911 if (udata) { 912 + if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) { 913 + ret = qp_mmap_entry(hr_qp, hr_dev, udata, resp); 914 + if (ret) 915 + return ret; 916 + } 917 + 927 918 ret = alloc_user_qp_db(hr_dev, hr_qp, init_attr, udata, ucmd, 928 919 resp); 929 920 if (ret) 930 - return ret; 921 + goto err_remove_qp; 931 922 } else { 932 923 ret = alloc_kernel_qp_db(hr_dev, hr_qp, init_attr); 933 924 if (ret) ··· 941 920 } 942 921 943 922 return 0; 923 + 924 + err_remove_qp: 925 + if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) 926 + qp_user_mmap_entry_remove(hr_qp); 927 + 928 + return ret; 944 929 } 945 930 946 931 static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, ··· 960 933 hns_roce_db_unmap_user(uctx, &hr_qp->rdb); 961 934 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) 962 935 hns_roce_db_unmap_user(uctx, &hr_qp->sdb); 936 + if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) 937 + qp_user_mmap_entry_remove(hr_qp); 963 938 } else { 964 939 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) 965 940 hns_roce_free_db(hr_dev, &hr_qp->rdb); ··· 1187 1158 goto out; 1188 1159 break; 1189 1160 case IB_QPT_UD: 1190 - if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 && 1161 + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && 1191 1162 is_user) 1192 1163 goto out; 1193 1164 break; ··· 1420 1391 } 1421 1392 } 1422 1393 1423 - static inline void *get_wqe(struct hns_roce_qp *hr_qp, int offset) 1394 + static inline void *get_wqe(struct hns_roce_qp *hr_qp, u32 offset) 1424 1395 { 1425 1396 return hns_roce_buf_offset(hr_qp->mtr.kmem, offset); 1426 1397 }
+1 -1
drivers/infiniband/hw/irdma/i40iw_if.c
··· 198 198 aux_dev); 199 199 struct i40e_info *cdev_info = i40e_adev->ldev; 200 200 201 - return i40e_client_device_unregister(cdev_info); 201 + i40e_client_device_unregister(cdev_info); 202 202 } 203 203 204 204 static const struct auxiliary_device_id i40iw_auxiliary_id_table[] = {
+1 -1
drivers/infiniband/hw/irdma/pble.h
··· 69 69 struct irdma_chunk { 70 70 struct list_head list; 71 71 struct irdma_dma_info dmainfo; 72 - void *bitmapbuf; 72 + unsigned long *bitmapbuf; 73 73 74 74 u32 sizeofbitmap; 75 75 u64 size;
+10 -21
drivers/infiniband/hw/irdma/verbs.c
··· 21 21 return -EINVAL; 22 22 23 23 memset(props, 0, sizeof(*props)); 24 - ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr); 24 + addrconf_addr_eui48((u8 *)&props->sys_image_guid, 25 + iwdev->netdev->dev_addr); 25 26 props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 | 26 27 irdma_fw_minor_ver(&rf->sc_dev); 27 28 props->device_cap_flags = iwdev->device_cap_flags; ··· 1171 1170 udp_info->ttl = attr->ah_attr.grh.hop_limit; 1172 1171 udp_info->flow_label = attr->ah_attr.grh.flow_label; 1173 1172 udp_info->tos = attr->ah_attr.grh.traffic_class; 1173 + udp_info->src_port = 1174 + rdma_get_udp_sport(udp_info->flow_label, 1175 + ibqp->qp_num, 1176 + roce_info->dest_qp); 1174 1177 irdma_qp_rem_qos(&iwqp->sc_qp); 1175 1178 dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri); 1176 1179 ctx_info->user_pri = rt_tos2priority(udp_info->tos); ··· 4326 4321 return IB_LINK_LAYER_ETHERNET; 4327 4322 } 4328 4323 4329 - static __be64 irdma_mac_to_guid(struct net_device *ndev) 4330 - { 4331 - const unsigned char *mac = ndev->dev_addr; 4332 - __be64 guid; 4333 - unsigned char *dst = (unsigned char *)&guid; 4334 - 4335 - dst[0] = mac[0] ^ 2; 4336 - dst[1] = mac[1]; 4337 - dst[2] = mac[2]; 4338 - dst[3] = 0xff; 4339 - dst[4] = 0xfe; 4340 - dst[5] = mac[3]; 4341 - dst[6] = mac[4]; 4342 - dst[7] = mac[5]; 4343 - 4344 - return guid; 4345 - } 4346 - 4347 4324 static const struct ib_device_ops irdma_roce_dev_ops = { 4348 4325 .attach_mcast = irdma_attach_mcast, 4349 4326 .create_ah = irdma_create_ah, ··· 4395 4408 static void irdma_init_roce_device(struct irdma_device *iwdev) 4396 4409 { 4397 4410 iwdev->ibdev.node_type = RDMA_NODE_IB_CA; 4398 - iwdev->ibdev.node_guid = irdma_mac_to_guid(iwdev->netdev); 4411 + addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid, 4412 + iwdev->netdev->dev_addr); 4399 4413 ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops); 4400 4414 } 4401 4415 ··· 4409 4421 struct net_device *netdev = iwdev->netdev; 4410 4422 4411 4423 iwdev->ibdev.node_type = RDMA_NODE_RNIC; 4412 - ether_addr_copy((u8 *)&iwdev->ibdev.node_guid, netdev->dev_addr); 4424 + addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid, 4425 + netdev->dev_addr); 4413 4426 iwdev->ibdev.ops.iw_add_ref = irdma_qp_add_ref; 4414 4427 iwdev->ibdev.ops.iw_rem_ref = irdma_qp_rem_ref; 4415 4428 iwdev->ibdev.ops.iw_get_qp = irdma_get_qp;
+12 -22
drivers/infiniband/hw/mlx4/main.c
··· 85 85 86 86 static struct workqueue_struct *wq; 87 87 88 - static void init_query_mad(struct ib_smp *mad) 89 - { 90 - mad->base_version = 1; 91 - mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 92 - mad->class_version = 1; 93 - mad->method = IB_MGMT_METHOD_GET; 94 - } 95 - 96 88 static int check_flow_steering_support(struct mlx4_dev *dev) 97 89 { 98 90 int eth_num_ports = 0; ··· 463 471 if (!in_mad || !out_mad) 464 472 goto out; 465 473 466 - init_query_mad(in_mad); 474 + ib_init_query_mad(in_mad); 467 475 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 468 476 469 477 err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS, ··· 661 669 if (!in_mad || !out_mad) 662 670 goto out; 663 671 664 - init_query_mad(in_mad); 672 + ib_init_query_mad(in_mad); 665 673 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 666 674 in_mad->attr_mod = cpu_to_be32(port); 667 675 ··· 713 721 714 722 /* If reported active speed is QDR, check if is FDR-10 */ 715 723 if (props->active_speed == IB_SPEED_QDR) { 716 - init_query_mad(in_mad); 724 + ib_init_query_mad(in_mad); 717 725 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO; 718 726 in_mad->attr_mod = cpu_to_be32(port); 719 727 ··· 840 848 if (!in_mad || !out_mad) 841 849 goto out; 842 850 843 - init_query_mad(in_mad); 851 + ib_init_query_mad(in_mad); 844 852 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 845 853 in_mad->attr_mod = cpu_to_be32(port); 846 854 ··· 862 870 } 863 871 } 864 872 865 - init_query_mad(in_mad); 873 + ib_init_query_mad(in_mad); 866 874 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; 867 875 in_mad->attr_mod = cpu_to_be32(index / 8); 868 876 ··· 909 917 if (!in_mad || !out_mad) 910 918 goto out; 911 919 912 - init_query_mad(in_mad); 920 + ib_init_query_mad(in_mad); 913 921 in_mad->attr_id = IB_SMP_ATTR_SL_TO_VL_TABLE; 914 922 in_mad->attr_mod = 0; 915 923 ··· 963 971 if (!in_mad || !out_mad) 964 972 goto out; 965 973 966 - init_query_mad(in_mad); 974 + ib_init_query_mad(in_mad); 967 975 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; 968 976 in_mad->attr_mod = cpu_to_be32(index / 32); 969 977 ··· 1982 1990 if (!in_mad || !out_mad) 1983 1991 goto out; 1984 1992 1985 - init_query_mad(in_mad); 1993 + ib_init_query_mad(in_mad); 1986 1994 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; 1987 1995 if (mlx4_is_master(dev->dev)) 1988 1996 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW; ··· 2776 2784 if (err) 2777 2785 goto err_counter; 2778 2786 2779 - ibdev->ib_uc_qpns_bitmap = 2780 - kmalloc_array(BITS_TO_LONGS(ibdev->steer_qpn_count), 2781 - sizeof(long), 2782 - GFP_KERNEL); 2787 + ibdev->ib_uc_qpns_bitmap = bitmap_alloc(ibdev->steer_qpn_count, 2788 + GFP_KERNEL); 2783 2789 if (!ibdev->ib_uc_qpns_bitmap) 2784 2790 goto err_steer_qp_release; 2785 2791 ··· 2865 2875 mlx4_ib_diag_cleanup(ibdev); 2866 2876 2867 2877 err_steer_free_bitmap: 2868 - kfree(ibdev->ib_uc_qpns_bitmap); 2878 + bitmap_free(ibdev->ib_uc_qpns_bitmap); 2869 2879 2870 2880 err_steer_qp_release: 2871 2881 mlx4_qp_release_range(dev, ibdev->steer_qpn_base, ··· 2978 2988 2979 2989 mlx4_qp_release_range(dev, ibdev->steer_qpn_base, 2980 2990 ibdev->steer_qpn_count); 2981 - kfree(ibdev->ib_uc_qpns_bitmap); 2991 + bitmap_free(ibdev->ib_uc_qpns_bitmap); 2982 2992 2983 2993 iounmap(ibdev->uar_map); 2984 2994 for (p = 0; p < ibdev->num_ports; ++p)
+4 -1
drivers/infiniband/hw/mlx5/cq.c
··· 328 328 } 329 329 330 330 wc->vendor_err = cqe->vendor_err_synd; 331 - if (dump) 331 + if (dump) { 332 + mlx5_ib_warn(dev, "WC error: %d, Message: %s\n", wc->status, 333 + ib_wc_status_msg(wc->status)); 332 334 dump_cqe(dev, cqe); 335 + } 333 336 } 334 337 335 338 static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
+14 -9
drivers/infiniband/hw/mlx5/mad.c
··· 291 291 if (!in_mad || !out_mad) 292 292 goto out; 293 293 294 - init_query_mad(in_mad); 294 + ib_init_query_mad(in_mad); 295 295 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; 296 296 in_mad->attr_mod = cpu_to_be32(port); 297 297 ··· 318 318 if (!in_mad) 319 319 return -ENOMEM; 320 320 321 - init_query_mad(in_mad); 321 + ib_init_query_mad(in_mad); 322 322 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 323 323 324 324 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, ··· 405 405 if (!in_mad || !out_mad) 406 406 goto out; 407 407 408 - init_query_mad(in_mad); 408 + ib_init_query_mad(in_mad); 409 409 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; 410 410 411 411 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); ··· 430 430 if (!in_mad || !out_mad) 431 431 goto out; 432 432 433 - init_query_mad(in_mad); 433 + ib_init_query_mad(in_mad); 434 434 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 435 435 436 436 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); ··· 456 456 if (!in_mad || !out_mad) 457 457 goto out; 458 458 459 - init_query_mad(in_mad); 459 + ib_init_query_mad(in_mad); 460 460 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; 461 461 in_mad->attr_mod = cpu_to_be32(index / 32); 462 462 ··· 485 485 if (!in_mad || !out_mad) 486 486 goto out; 487 487 488 - init_query_mad(in_mad); 488 + ib_init_query_mad(in_mad); 489 489 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 490 490 in_mad->attr_mod = cpu_to_be32(port); 491 491 ··· 496 496 497 497 memcpy(gid->raw, out_mad->data + 8, 8); 498 498 499 - init_query_mad(in_mad); 499 + ib_init_query_mad(in_mad); 500 500 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; 501 501 in_mad->attr_mod = cpu_to_be32(index / 8); 502 502 ··· 530 530 531 531 /* props being zeroed by the caller, avoid zeroing it here */ 532 532 533 - init_query_mad(in_mad); 533 + ib_init_query_mad(in_mad); 534 534 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 535 535 in_mad->attr_mod = cpu_to_be32(port); 536 536 ··· 584 584 props->port_cap_flags2 & IB_PORT_LINK_SPEED_HDR_SUP) 585 585 props->active_speed = IB_SPEED_HDR; 586 586 break; 587 + case 8: 588 + if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP && 589 + props->port_cap_flags2 & IB_PORT_LINK_SPEED_NDR_SUP) 590 + props->active_speed = IB_SPEED_NDR; 591 + break; 587 592 } 588 593 } 589 594 ··· 596 591 if (props->active_speed == 4) { 597 592 if (dev->port_caps[port - 1].ext_port_cap & 598 593 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { 599 - init_query_mad(in_mad); 594 + ib_init_query_mad(in_mad); 600 595 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; 601 596 in_mad->attr_mod = cpu_to_be32(port); 602 597
+2 -10
drivers/infiniband/hw/mlx5/mlx5_ib.h
··· 665 665 666 666 /* User MR data */ 667 667 struct mlx5_cache_ent *cache_ent; 668 + /* Everything after cache_ent is zero'd when MR allocated */ 668 669 struct ib_umem *umem; 669 670 670 - /* This is zero'd when the MR is allocated */ 671 671 union { 672 672 /* Used only while the MR is in the cache */ 673 673 struct { ··· 719 719 /* Zero the fields in the mr that are variant depending on usage */ 720 720 static inline void mlx5_clear_mr(struct mlx5_ib_mr *mr) 721 721 { 722 - memset(mr->out, 0, sizeof(*mr) - offsetof(struct mlx5_ib_mr, out)); 722 + memset_after(mr, 0, cache_ent); 723 723 } 724 724 725 725 static inline bool is_odp_mr(struct mlx5_ib_mr *mr) ··· 1465 1465 extern const struct uapi_definition mlx5_ib_flow_defs[]; 1466 1466 extern const struct uapi_definition mlx5_ib_qos_defs[]; 1467 1467 extern const struct uapi_definition mlx5_ib_std_types_defs[]; 1468 - 1469 - static inline void init_query_mad(struct ib_smp *mad) 1470 - { 1471 - mad->base_version = 1; 1472 - mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 1473 - mad->class_version = 1; 1474 - mad->method = IB_MGMT_METHOD_GET; 1475 - } 1476 1468 1477 1469 static inline int is_qp1(enum ib_qp_type qp_type) 1478 1470 {
+5 -10
drivers/infiniband/hw/mthca/mthca_allocator.c
··· 51 51 } 52 52 53 53 if (obj < alloc->max) { 54 - set_bit(obj, alloc->table); 54 + __set_bit(obj, alloc->table); 55 55 obj |= alloc->top; 56 56 } else 57 57 obj = -1; ··· 69 69 70 70 spin_lock_irqsave(&alloc->lock, flags); 71 71 72 - clear_bit(obj, alloc->table); 72 + __clear_bit(obj, alloc->table); 73 73 alloc->last = min(alloc->last, obj); 74 74 alloc->top = (alloc->top + alloc->max) & alloc->mask; 75 75 ··· 79 79 int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask, 80 80 u32 reserved) 81 81 { 82 - int i; 83 - 84 82 /* num must be a power of 2 */ 85 83 if (num != 1 << (ffs(num) - 1)) 86 84 return -EINVAL; ··· 88 90 alloc->max = num; 89 91 alloc->mask = mask; 90 92 spin_lock_init(&alloc->lock); 91 - alloc->table = kmalloc_array(BITS_TO_LONGS(num), sizeof(long), 92 - GFP_KERNEL); 93 + alloc->table = bitmap_zalloc(num, GFP_KERNEL); 93 94 if (!alloc->table) 94 95 return -ENOMEM; 95 96 96 - bitmap_zero(alloc->table, num); 97 - for (i = 0; i < reserved; ++i) 98 - set_bit(i, alloc->table); 97 + bitmap_set(alloc->table, 0, reserved); 99 98 100 99 return 0; 101 100 } 102 101 103 102 void mthca_alloc_cleanup(struct mthca_alloc *alloc) 104 103 { 105 - kfree(alloc->table); 104 + bitmap_free(alloc->table); 106 105 } 107 106 108 107 /*
+11 -14
drivers/infiniband/hw/mthca/mthca_mr.c
··· 101 101 return -1; 102 102 103 103 found: 104 - clear_bit(seg, buddy->bits[o]); 104 + __clear_bit(seg, buddy->bits[o]); 105 105 --buddy->num_free[o]; 106 106 107 107 while (o > order) { 108 108 --o; 109 109 seg <<= 1; 110 - set_bit(seg ^ 1, buddy->bits[o]); 110 + __set_bit(seg ^ 1, buddy->bits[o]); 111 111 ++buddy->num_free[o]; 112 112 } 113 113 ··· 125 125 spin_lock(&buddy->lock); 126 126 127 127 while (test_bit(seg ^ 1, buddy->bits[order])) { 128 - clear_bit(seg ^ 1, buddy->bits[order]); 128 + __clear_bit(seg ^ 1, buddy->bits[order]); 129 129 --buddy->num_free[order]; 130 130 seg >>= 1; 131 131 ++order; 132 132 } 133 133 134 - set_bit(seg, buddy->bits[order]); 134 + __set_bit(seg, buddy->bits[order]); 135 135 ++buddy->num_free[order]; 136 136 137 137 spin_unlock(&buddy->lock); ··· 139 139 140 140 static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order) 141 141 { 142 - int i, s; 142 + int i; 143 143 144 144 buddy->max_order = max_order; 145 145 spin_lock_init(&buddy->lock); ··· 152 152 goto err_out; 153 153 154 154 for (i = 0; i <= buddy->max_order; ++i) { 155 - s = BITS_TO_LONGS(1 << (buddy->max_order - i)); 156 - buddy->bits[i] = kmalloc_array(s, sizeof(long), GFP_KERNEL); 155 + buddy->bits[i] = bitmap_zalloc(1 << (buddy->max_order - i), 156 + GFP_KERNEL); 157 157 if (!buddy->bits[i]) 158 158 goto err_out_free; 159 - bitmap_zero(buddy->bits[i], 160 - 1 << (buddy->max_order - i)); 161 159 } 162 160 163 - set_bit(0, buddy->bits[buddy->max_order]); 161 + __set_bit(0, buddy->bits[buddy->max_order]); 164 162 buddy->num_free[buddy->max_order] = 1; 165 163 166 164 return 0; 167 165 168 166 err_out_free: 169 167 for (i = 0; i <= buddy->max_order; ++i) 170 - kfree(buddy->bits[i]); 168 + bitmap_free(buddy->bits[i]); 171 169 172 170 err_out: 173 171 kfree(buddy->bits); ··· 179 181 int i; 180 182 181 183 for (i = 0; i <= buddy->max_order; ++i) 182 - kfree(buddy->bits[i]); 184 + bitmap_free(buddy->bits[i]); 183 185 184 186 kfree(buddy->bits); 185 187 kfree(buddy->num_free); ··· 467 469 mpt_entry->start = cpu_to_be64(iova); 468 470 mpt_entry->length = cpu_to_be64(total_size); 469 471 470 - memset(&mpt_entry->lkey, 0, 471 - sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey)); 472 + memset_startat(mpt_entry, 0, lkey); 472 473 473 474 if (mr->mtt) 474 475 mpt_entry->mtt_seg =
+6 -14
drivers/infiniband/hw/mthca/mthca_provider.c
··· 50 50 #include <rdma/mthca-abi.h> 51 51 #include "mthca_memfree.h" 52 52 53 - static void init_query_mad(struct ib_smp *mad) 54 - { 55 - mad->base_version = 1; 56 - mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 57 - mad->class_version = 1; 58 - mad->method = IB_MGMT_METHOD_GET; 59 - } 60 - 61 53 static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props, 62 54 struct ib_udata *uhw) 63 55 { ··· 70 78 71 79 props->fw_ver = mdev->fw_ver; 72 80 73 - init_query_mad(in_mad); 81 + ib_init_query_mad(in_mad); 74 82 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 75 83 76 84 err = mthca_MAD_IFC(mdev, 1, 1, ··· 132 140 133 141 /* props being zeroed by the caller, avoid zeroing it here */ 134 142 135 - init_query_mad(in_mad); 143 + ib_init_query_mad(in_mad); 136 144 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 137 145 in_mad->attr_mod = cpu_to_be32(port); 138 146 ··· 226 234 if (!in_mad || !out_mad) 227 235 goto out; 228 236 229 - init_query_mad(in_mad); 237 + ib_init_query_mad(in_mad); 230 238 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; 231 239 in_mad->attr_mod = cpu_to_be32(index / 32); 232 240 ··· 255 263 if (!in_mad || !out_mad) 256 264 goto out; 257 265 258 - init_query_mad(in_mad); 266 + ib_init_query_mad(in_mad); 259 267 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 260 268 in_mad->attr_mod = cpu_to_be32(port); 261 269 ··· 266 274 267 275 memcpy(gid->raw, out_mad->data + 8, 8); 268 276 269 - init_query_mad(in_mad); 277 + ib_init_query_mad(in_mad); 270 278 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; 271 279 in_mad->attr_mod = cpu_to_be32(index / 8); 272 280 ··· 998 1006 if (!in_mad || !out_mad) 999 1007 goto out; 1000 1008 1001 - init_query_mad(in_mad); 1009 + ib_init_query_mad(in_mad); 1002 1010 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; 1003 1011 1004 1012 err = mthca_MAD_IFC(dev, 1, 1,
+6 -10
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
··· 1506 1506 static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev) 1507 1507 { 1508 1508 int status = -ENOMEM; 1509 - size_t pd_bitmap_size; 1510 1509 struct ocrdma_alloc_pd_range *cmd; 1511 1510 struct ocrdma_alloc_pd_range_rsp *rsp; 1512 1511 ··· 1527 1528 dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid & 1528 1529 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; 1529 1530 dev->pd_mgr->max_dpp_pd = rsp->pd_count; 1530 - pd_bitmap_size = 1531 - BITS_TO_LONGS(rsp->pd_count) * sizeof(long); 1532 - dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size, 1533 - GFP_KERNEL); 1531 + dev->pd_mgr->pd_dpp_bitmap = bitmap_zalloc(rsp->pd_count, 1532 + GFP_KERNEL); 1534 1533 } 1535 1534 kfree(cmd); 1536 1535 } ··· 1544 1547 dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid & 1545 1548 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; 1546 1549 dev->pd_mgr->max_normal_pd = rsp->pd_count; 1547 - pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long); 1548 - dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size, 1549 - GFP_KERNEL); 1550 + dev->pd_mgr->pd_norm_bitmap = bitmap_zalloc(rsp->pd_count, 1551 + GFP_KERNEL); 1550 1552 } 1551 1553 kfree(cmd); 1552 1554 ··· 1607 1611 static void ocrdma_free_pd_pool(struct ocrdma_dev *dev) 1608 1612 { 1609 1613 ocrdma_mbx_dealloc_pd_range(dev); 1610 - kfree(dev->pd_mgr->pd_norm_bitmap); 1611 - kfree(dev->pd_mgr->pd_dpp_bitmap); 1614 + bitmap_free(dev->pd_mgr->pd_norm_bitmap); 1615 + bitmap_free(dev->pd_mgr->pd_dpp_bitmap); 1612 1616 kfree(dev->pd_mgr); 1613 1617 } 1614 1618
+2 -15
drivers/infiniband/hw/ocrdma/ocrdma_main.c
··· 62 62 MODULE_AUTHOR("Emulex Corporation"); 63 63 MODULE_LICENSE("Dual BSD/GPL"); 64 64 65 - void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid) 66 - { 67 - u8 mac_addr[6]; 68 - 69 - memcpy(&mac_addr[0], &dev->nic_info.mac_addr[0], ETH_ALEN); 70 - guid[0] = mac_addr[0] ^ 2; 71 - guid[1] = mac_addr[1]; 72 - guid[2] = mac_addr[2]; 73 - guid[3] = 0xff; 74 - guid[4] = 0xfe; 75 - guid[5] = mac_addr[3]; 76 - guid[6] = mac_addr[4]; 77 - guid[7] = mac_addr[5]; 78 - } 79 65 static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device, 80 66 u32 port_num) 81 67 { ··· 189 203 { 190 204 int ret; 191 205 192 - ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid); 206 + addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid, 207 + dev->nic_info.mac_addr); 193 208 BUILD_BUG_ON(sizeof(OCRDMA_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX); 194 209 memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC, 195 210 sizeof(OCRDMA_NODE_DESC));
+8 -10
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
··· 41 41 */ 42 42 43 43 #include <linux/dma-mapping.h> 44 + #include <net/addrconf.h> 44 45 #include <rdma/ib_verbs.h> 45 46 #include <rdma/ib_user_verbs.h> 46 47 #include <rdma/iw_cm.h> ··· 75 74 memset(attr, 0, sizeof *attr); 76 75 memcpy(&attr->fw_ver, &dev->attr.fw_ver[0], 77 76 min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver))); 78 - ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid); 77 + addrconf_addr_eui48((u8 *)&attr->sys_image_guid, 78 + dev->nic_info.mac_addr); 79 79 attr->max_mr_size = dev->attr.max_mr_size; 80 80 attr->page_size_cap = 0xffff000; 81 81 attr->vendor_id = dev->nic_info.pdev->vendor; ··· 247 245 static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool) 248 246 { 249 247 u16 pd_bitmap_idx = 0; 250 - const unsigned long *pd_bitmap; 248 + unsigned long *pd_bitmap; 251 249 252 250 if (dpp_pool) { 253 251 pd_bitmap = dev->pd_mgr->pd_dpp_bitmap; 254 252 pd_bitmap_idx = find_first_zero_bit(pd_bitmap, 255 253 dev->pd_mgr->max_dpp_pd); 256 - __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap); 254 + __set_bit(pd_bitmap_idx, pd_bitmap); 257 255 dev->pd_mgr->pd_dpp_count++; 258 256 if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh) 259 257 dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count; ··· 261 259 pd_bitmap = dev->pd_mgr->pd_norm_bitmap; 262 260 pd_bitmap_idx = find_first_zero_bit(pd_bitmap, 263 261 dev->pd_mgr->max_normal_pd); 264 - __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap); 262 + __set_bit(pd_bitmap_idx, pd_bitmap); 265 263 dev->pd_mgr->pd_norm_count++; 266 264 if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh) 267 265 dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count; ··· 1846 1844 1847 1845 int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) 1848 1846 { 1849 - int status; 1850 1847 struct ocrdma_srq *srq; 1851 1848 1852 1849 srq = get_ocrdma_srq(ibsrq); 1853 - status = ocrdma_mbx_query_srq(srq, srq_attr); 1854 - return status; 1850 + return ocrdma_mbx_query_srq(srq, srq_attr); 1855 1851 } 1856 1852 1857 1853 int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) ··· 1960 1960 static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, 1961 1961 const struct ib_send_wr *wr) 1962 1962 { 1963 - int status; 1964 1963 struct ocrdma_sge *sge; 1965 1964 u32 wqe_size = sizeof(*hdr); 1966 1965 ··· 1971 1972 sge = (struct ocrdma_sge *)(hdr + 1); 1972 1973 } 1973 1974 1974 - status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); 1975 - return status; 1975 + return ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); 1976 1976 } 1977 1977 1978 1978 static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
-1
drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
··· 59 59 enum rdma_protocol_type 60 60 ocrdma_query_protocol(struct ib_device *device, u32 port_num); 61 61 62 - void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid); 63 62 int ocrdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey); 64 63 65 64 int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
+2
drivers/infiniband/hw/qedr/verbs.c
··· 1931 1931 /* db offset was calculated in copy_qp_uresp, now set in the user q */ 1932 1932 if (qedr_qp_has_sq(qp)) { 1933 1933 qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset; 1934 + qp->sq.max_wr = attrs->cap.max_send_wr; 1934 1935 rc = qedr_db_recovery_add(dev, qp->usq.db_addr, 1935 1936 &qp->usq.db_rec_data->db_data, 1936 1937 DB_REC_WIDTH_32B, ··· 1942 1941 1943 1942 if (qedr_qp_has_rq(qp)) { 1944 1943 qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset; 1944 + qp->rq.max_wr = attrs->cap.max_recv_wr; 1945 1945 rc = qedr_db_recovery_add(dev, qp->urq.db_addr, 1946 1946 &qp->urq.db_rec_data->db_data, 1947 1947 DB_REC_WIDTH_32B,
+1 -1
drivers/infiniband/hw/qib/qib_iba6120.c
··· 3030 3030 3031 3031 /* Does read/modify/write to appropriate registers to 3032 3032 * set output and direction bits selected by mask. 3033 - * these are in their canonical postions (e.g. lsb of 3033 + * these are in their canonical positions (e.g. lsb of 3034 3034 * dir will end up in D48 of extctrl on existing chips). 3035 3035 * returns contents of GP Inputs. 3036 3036 */
+1 -1
drivers/infiniband/hw/qib/qib_iba7220.c
··· 3742 3742 /* 3743 3743 * Does read/modify/write to appropriate registers to 3744 3744 * set output and direction bits selected by mask. 3745 - * these are in their canonical postions (e.g. lsb of 3745 + * these are in their canonical positions (e.g. lsb of 3746 3746 * dir will end up in D48 of extctrl on existing chips). 3747 3747 * returns contents of GP Inputs. 3748 3748 */
+1 -1
drivers/infiniband/hw/qib/qib_iba7322.c
··· 5665 5665 /* 5666 5666 * Does read/modify/write to appropriate registers to 5667 5667 * set output and direction bits selected by mask. 5668 - * these are in their canonical postions (e.g. lsb of 5668 + * these are in their canonical positions (e.g. lsb of 5669 5669 * dir will end up in D48 of extctrl on existing chips). 5670 5670 * returns contents of GP Inputs. 5671 5671 */
+2 -1
drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
··· 243 243 &qpn_attr_summary.attr, 244 244 NULL 245 245 }; 246 + ATTRIBUTE_GROUPS(usnic_ib_qpn_default); 246 247 247 248 static struct kobj_type usnic_ib_qpn_type = { 248 249 .sysfs_ops = &usnic_ib_qpn_sysfs_ops, 249 - .default_attrs = usnic_ib_qpn_default_attrs 250 + .default_groups = usnic_ib_qpn_default_groups, 250 251 }; 251 252 252 253 int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev)
+3 -5
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
··· 442 442 int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 443 443 { 444 444 struct usnic_ib_pd *pd = to_upd(ibpd); 445 - void *umem_pd; 446 445 447 - umem_pd = pd->umem_pd = usnic_uiom_alloc_pd(); 448 - if (IS_ERR_OR_NULL(umem_pd)) { 449 - return umem_pd ? PTR_ERR(umem_pd) : -ENOMEM; 450 - } 446 + pd->umem_pd = usnic_uiom_alloc_pd(); 447 + if (IS_ERR(pd->umem_pd)) 448 + return PTR_ERR(pd->umem_pd); 451 449 452 450 return 0; 453 451 }
+5 -5
drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
··· 63 63 tbl->max = num; 64 64 tbl->mask = mask; 65 65 spin_lock_init(&tbl->lock); 66 - tbl->table = kcalloc(BITS_TO_LONGS(num), sizeof(long), GFP_KERNEL); 66 + tbl->table = bitmap_zalloc(num, GFP_KERNEL); 67 67 if (!tbl->table) 68 68 return -ENOMEM; 69 69 70 70 /* 0th UAR is taken by the device. */ 71 - set_bit(0, tbl->table); 71 + __set_bit(0, tbl->table); 72 72 73 73 return 0; 74 74 } ··· 77 77 { 78 78 struct pvrdma_id_table *tbl = &dev->uar_table.tbl; 79 79 80 - kfree(tbl->table); 80 + bitmap_free(tbl->table); 81 81 } 82 82 83 83 int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar) ··· 100 100 return -ENOMEM; 101 101 } 102 102 103 - set_bit(obj, tbl->table); 103 + __set_bit(obj, tbl->table); 104 104 obj |= tbl->top; 105 105 106 106 spin_unlock_irqrestore(&tbl->lock, flags); ··· 120 120 121 121 obj = uar->index & (tbl->max - 1); 122 122 spin_lock_irqsave(&tbl->lock, flags); 123 - clear_bit(obj, tbl->table); 123 + __clear_bit(obj, tbl->table); 124 124 tbl->last = min(tbl->last, obj); 125 125 tbl->top = (tbl->top + tbl->max) & tbl->mask; 126 126 spin_unlock_irqrestore(&tbl->lock, flags);
-1
drivers/infiniband/sw/rxe/Makefile
··· 22 22 rxe_mcast.o \ 23 23 rxe_task.o \ 24 24 rxe_net.o \ 25 - rxe_sysfs.o \ 26 25 rxe_hw_counters.o
-4
drivers/infiniband/sw/rxe/rxe.c
··· 13 13 MODULE_DESCRIPTION("Soft RDMA transport"); 14 14 MODULE_LICENSE("Dual BSD/GPL"); 15 15 16 - bool rxe_initialized; 17 - 18 16 /* free resources for a rxe device all objects created for this device must 19 17 * have been destroyed 20 18 */ ··· 288 290 return err; 289 291 290 292 rdma_link_register(&rxe_link_ops); 291 - rxe_initialized = true; 292 293 pr_info("loaded\n"); 293 294 return 0; 294 295 } ··· 298 301 ib_unregister_driver(RDMA_DRIVER_RXE); 299 302 rxe_net_exit(); 300 303 301 - rxe_initialized = false; 302 304 pr_info("unloaded\n"); 303 305 } 304 306
-2
drivers/infiniband/sw/rxe/rxe.h
··· 39 39 40 40 #define RXE_ROCE_V2_SPORT (0xc000) 41 41 42 - extern bool rxe_initialized; 43 - 44 42 void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu); 45 43 46 44 int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name);
+3 -5
drivers/infiniband/sw/rxe/rxe_comp.c
··· 458 458 struct rxe_pkt_info *pkt, 459 459 struct rxe_send_wqe *wqe) 460 460 { 461 - unsigned long flags; 462 - 463 461 if (wqe->has_rd_atomic) { 464 462 wqe->has_rd_atomic = 0; 465 463 atomic_inc(&qp->req.rd_atomic); ··· 470 472 471 473 if (unlikely(qp->req.state == QP_STATE_DRAIN)) { 472 474 /* state_lock used by requester & completer */ 473 - spin_lock_irqsave(&qp->state_lock, flags); 475 + spin_lock_bh(&qp->state_lock); 474 476 if ((qp->req.state == QP_STATE_DRAIN) && 475 477 (qp->comp.psn == qp->req.psn)) { 476 478 qp->req.state = QP_STATE_DRAINED; 477 - spin_unlock_irqrestore(&qp->state_lock, flags); 479 + spin_unlock_bh(&qp->state_lock); 478 480 479 481 if (qp->ibqp.event_handler) { 480 482 struct ib_event ev; ··· 486 488 qp->ibqp.qp_context); 487 489 } 488 490 } else { 489 - spin_unlock_irqrestore(&qp->state_lock, flags); 491 + spin_unlock_bh(&qp->state_lock); 490 492 } 491 493 } 492 494
+10 -14
drivers/infiniband/sw/rxe/rxe_cq.c
··· 42 42 static void rxe_send_complete(struct tasklet_struct *t) 43 43 { 44 44 struct rxe_cq *cq = from_tasklet(cq, t, comp_task); 45 - unsigned long flags; 46 45 47 - spin_lock_irqsave(&cq->cq_lock, flags); 46 + spin_lock_bh(&cq->cq_lock); 48 47 if (cq->is_dying) { 49 - spin_unlock_irqrestore(&cq->cq_lock, flags); 48 + spin_unlock_bh(&cq->cq_lock); 50 49 return; 51 50 } 52 - spin_unlock_irqrestore(&cq->cq_lock, flags); 51 + spin_unlock_bh(&cq->cq_lock); 53 52 54 53 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); 55 54 } ··· 105 106 int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) 106 107 { 107 108 struct ib_event ev; 108 - unsigned long flags; 109 109 int full; 110 110 void *addr; 111 111 112 - spin_lock_irqsave(&cq->cq_lock, flags); 112 + spin_lock_bh(&cq->cq_lock); 113 113 114 114 full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT); 115 115 if (unlikely(full)) { 116 - spin_unlock_irqrestore(&cq->cq_lock, flags); 116 + spin_unlock_bh(&cq->cq_lock); 117 117 if (cq->ibcq.event_handler) { 118 118 ev.device = cq->ibcq.device; 119 119 ev.element.cq = &cq->ibcq; ··· 128 130 129 131 queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT); 130 132 131 - spin_unlock_irqrestore(&cq->cq_lock, flags); 133 + spin_unlock_bh(&cq->cq_lock); 132 134 133 135 if ((cq->notify == IB_CQ_NEXT_COMP) || 134 136 (cq->notify == IB_CQ_SOLICITED && solicited)) { ··· 141 143 142 144 void rxe_cq_disable(struct rxe_cq *cq) 143 145 { 144 - unsigned long flags; 145 - 146 - spin_lock_irqsave(&cq->cq_lock, flags); 146 + spin_lock_bh(&cq->cq_lock); 147 147 cq->is_dying = true; 148 - spin_unlock_irqrestore(&cq->cq_lock, flags); 148 + spin_unlock_bh(&cq->cq_lock); 149 149 } 150 150 151 - void rxe_cq_cleanup(struct rxe_pool_entry *arg) 151 + void rxe_cq_cleanup(struct rxe_pool_elem *elem) 152 152 { 153 - struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem); 153 + struct rxe_cq *cq = container_of(elem, typeof(*cq), elem); 154 154 155 155 if (cq->queue) 156 156 rxe_queue_cleanup(cq->queue);
+5 -5
drivers/infiniband/sw/rxe/rxe_loc.h
··· 37 37 38 38 void rxe_cq_disable(struct rxe_cq *cq); 39 39 40 - void rxe_cq_cleanup(struct rxe_pool_entry *arg); 40 + void rxe_cq_cleanup(struct rxe_pool_elem *arg); 41 41 42 42 /* rxe_mcast.c */ 43 43 int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid, ··· 51 51 52 52 void rxe_drop_all_mcast_groups(struct rxe_qp *qp); 53 53 54 - void rxe_mc_cleanup(struct rxe_pool_entry *arg); 54 + void rxe_mc_cleanup(struct rxe_pool_elem *arg); 55 55 56 56 /* rxe_mmap.c */ 57 57 struct rxe_mmap_info { ··· 89 89 int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe); 90 90 int rxe_mr_set_page(struct ib_mr *ibmr, u64 addr); 91 91 int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); 92 - void rxe_mr_cleanup(struct rxe_pool_entry *arg); 92 + void rxe_mr_cleanup(struct rxe_pool_elem *arg); 93 93 94 94 /* rxe_mw.c */ 95 95 int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata); ··· 97 97 int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe); 98 98 int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey); 99 99 struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey); 100 - void rxe_mw_cleanup(struct rxe_pool_entry *arg); 100 + void rxe_mw_cleanup(struct rxe_pool_elem *arg); 101 101 102 102 /* rxe_net.c */ 103 103 struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, ··· 131 131 132 132 void rxe_qp_destroy(struct rxe_qp *qp); 133 133 134 - void rxe_qp_cleanup(struct rxe_pool_entry *arg); 134 + void rxe_qp_cleanup(struct rxe_pool_elem *elem); 135 135 136 136 static inline int qp_num(struct rxe_qp *qp) 137 137 {
+5 -6
drivers/infiniband/sw/rxe/rxe_mcast.c
··· 40 40 int err; 41 41 struct rxe_mc_grp *grp; 42 42 struct rxe_pool *pool = &rxe->mc_grp_pool; 43 - unsigned long flags; 44 43 45 44 if (rxe->attr.max_mcast_qp_attach == 0) 46 45 return -EINVAL; 47 46 48 - write_lock_irqsave(&pool->pool_lock, flags); 47 + write_lock_bh(&pool->pool_lock); 49 48 50 49 grp = rxe_pool_get_key_locked(pool, mgid); 51 50 if (grp) ··· 52 53 53 54 grp = create_grp(rxe, pool, mgid); 54 55 if (IS_ERR(grp)) { 55 - write_unlock_irqrestore(&pool->pool_lock, flags); 56 + write_unlock_bh(&pool->pool_lock); 56 57 err = PTR_ERR(grp); 57 58 return err; 58 59 } 59 60 60 61 done: 61 - write_unlock_irqrestore(&pool->pool_lock, flags); 62 + write_unlock_bh(&pool->pool_lock); 62 63 *grp_p = grp; 63 64 return 0; 64 65 } ··· 168 169 } 169 170 } 170 171 171 - void rxe_mc_cleanup(struct rxe_pool_entry *arg) 172 + void rxe_mc_cleanup(struct rxe_pool_elem *elem) 172 173 { 173 - struct rxe_mc_grp *grp = container_of(arg, typeof(*grp), pelem); 174 + struct rxe_mc_grp *grp = container_of(elem, typeof(*grp), elem); 174 175 struct rxe_dev *rxe = grp->rxe; 175 176 176 177 rxe_drop_key(grp);
+3 -3
drivers/infiniband/sw/rxe/rxe_mr.c
··· 50 50 51 51 static void rxe_mr_init(int access, struct rxe_mr *mr) 52 52 { 53 - u32 lkey = mr->pelem.index << 8 | rxe_get_next_key(-1); 53 + u32 lkey = mr->elem.index << 8 | rxe_get_next_key(-1); 54 54 u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0; 55 55 56 56 /* set ibmr->l/rkey and also copy into private l/rkey ··· 697 697 return 0; 698 698 } 699 699 700 - void rxe_mr_cleanup(struct rxe_pool_entry *arg) 700 + void rxe_mr_cleanup(struct rxe_pool_elem *elem) 701 701 { 702 - struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem); 702 + struct rxe_mr *mr = container_of(elem, typeof(*mr), elem); 703 703 704 704 ib_umem_release(mr->umem); 705 705
+9 -12
drivers/infiniband/sw/rxe/rxe_mw.c
··· 21 21 } 22 22 23 23 rxe_add_index(mw); 24 - mw->rkey = ibmw->rkey = (mw->pelem.index << 8) | rxe_get_next_key(-1); 24 + mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1); 25 25 mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ? 26 26 RXE_MW_STATE_FREE : RXE_MW_STATE_VALID; 27 27 spin_lock_init(&mw->lock); ··· 56 56 { 57 57 struct rxe_mw *mw = to_rmw(ibmw); 58 58 struct rxe_pd *pd = to_rpd(ibmw->pd); 59 - unsigned long flags; 60 59 61 - spin_lock_irqsave(&mw->lock, flags); 60 + spin_lock_bh(&mw->lock); 62 61 rxe_do_dealloc_mw(mw); 63 - spin_unlock_irqrestore(&mw->lock, flags); 62 + spin_unlock_bh(&mw->lock); 64 63 65 64 rxe_drop_ref(mw); 66 65 rxe_drop_ref(pd); ··· 196 197 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 197 198 u32 mw_rkey = wqe->wr.wr.mw.mw_rkey; 198 199 u32 mr_lkey = wqe->wr.wr.mw.mr_lkey; 199 - unsigned long flags; 200 200 201 201 mw = rxe_pool_get_index(&rxe->mw_pool, mw_rkey >> 8); 202 202 if (unlikely(!mw)) { ··· 223 225 mr = NULL; 224 226 } 225 227 226 - spin_lock_irqsave(&mw->lock, flags); 228 + spin_lock_bh(&mw->lock); 227 229 228 230 ret = rxe_check_bind_mw(qp, wqe, mw, mr); 229 231 if (ret) ··· 231 233 232 234 rxe_do_bind_mw(qp, wqe, mw, mr); 233 235 err_unlock: 234 - spin_unlock_irqrestore(&mw->lock, flags); 236 + spin_unlock_bh(&mw->lock); 235 237 err_drop_mr: 236 238 if (mr) 237 239 rxe_drop_ref(mr); ··· 278 280 int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey) 279 281 { 280 282 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 281 - unsigned long flags; 282 283 struct rxe_mw *mw; 283 284 int ret; 284 285 ··· 292 295 goto err_drop_ref; 293 296 } 294 297 295 - spin_lock_irqsave(&mw->lock, flags); 298 + spin_lock_bh(&mw->lock); 296 299 297 300 ret = rxe_check_invalidate_mw(qp, mw); 298 301 if (ret) ··· 300 303 301 304 rxe_do_invalidate_mw(mw); 302 305 err_unlock: 303 - spin_unlock_irqrestore(&mw->lock, flags); 306 + spin_unlock_bh(&mw->lock); 304 307 err_drop_ref: 305 308 rxe_drop_ref(mw); 306 309 err: ··· 330 333 return mw; 331 334 } 332 335 333 - void rxe_mw_cleanup(struct rxe_pool_entry *elem) 336 + void rxe_mw_cleanup(struct rxe_pool_elem *elem) 334 337 { 335 - struct rxe_mw *mw = container_of(elem, typeof(*mw), pelem); 338 + struct rxe_mw *mw = container_of(elem, typeof(*mw), elem); 336 339 337 340 rxe_drop_index(mw); 338 341 }
+2 -7
drivers/infiniband/sw/rxe/rxe_net.c
··· 22 22 23 23 int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid) 24 24 { 25 - int err; 26 25 unsigned char ll_addr[ETH_ALEN]; 27 26 28 27 ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr); 29 - err = dev_mc_add(rxe->ndev, ll_addr); 30 28 31 - return err; 29 + return dev_mc_add(rxe->ndev, ll_addr); 32 30 } 33 31 34 32 int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid) 35 33 { 36 - int err; 37 34 unsigned char ll_addr[ETH_ALEN]; 38 35 39 36 ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr); 40 - err = dev_mc_del(rxe->ndev, ll_addr); 41 37 42 - return err; 38 + return dev_mc_del(rxe->ndev, ll_addr); 43 39 } 44 40 45 41 static struct dst_entry *rxe_find_route4(struct net_device *ndev, ··· 440 444 else 441 445 err = rxe_send(skb, pkt); 442 446 if (err) { 443 - rxe->xmit_errors++; 444 447 rxe_counter_inc(rxe, RXE_CNT_SEND_ERR); 445 448 return err; 446 449 }
+370 -369
drivers/infiniband/sw/rxe/rxe_opcode.c
··· 108 108 struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { 109 109 [IB_OPCODE_RC_SEND_FIRST] = { 110 110 .name = "IB_OPCODE_RC_SEND_FIRST", 111 - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK 112 - | RXE_SEND_MASK | RXE_START_MASK, 111 + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK | 112 + RXE_SEND_MASK | RXE_START_MASK, 113 113 .length = RXE_BTH_BYTES, 114 114 .offset = { 115 115 [RXE_BTH] = 0, ··· 117 117 } 118 118 }, 119 119 [IB_OPCODE_RC_SEND_MIDDLE] = { 120 - .name = "IB_OPCODE_RC_SEND_MIDDLE]", 121 - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK 122 - | RXE_MIDDLE_MASK, 120 + .name = "IB_OPCODE_RC_SEND_MIDDLE", 121 + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK | 122 + RXE_MIDDLE_MASK, 123 123 .length = RXE_BTH_BYTES, 124 124 .offset = { 125 125 [RXE_BTH] = 0, ··· 128 128 }, 129 129 [IB_OPCODE_RC_SEND_LAST] = { 130 130 .name = "IB_OPCODE_RC_SEND_LAST", 131 - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK 132 - | RXE_SEND_MASK | RXE_END_MASK, 131 + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK | 132 + RXE_SEND_MASK | RXE_END_MASK, 133 133 .length = RXE_BTH_BYTES, 134 134 .offset = { 135 135 [RXE_BTH] = 0, ··· 138 138 }, 139 139 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = { 140 140 .name = "IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE", 141 - .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK 142 - | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK, 141 + .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | 142 + RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK, 143 143 .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES, 144 144 .offset = { 145 145 [RXE_BTH] = 0, 146 146 [RXE_IMMDT] = RXE_BTH_BYTES, 147 - [RXE_PAYLOAD] = RXE_BTH_BYTES 148 - + RXE_IMMDT_BYTES, 147 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 148 + RXE_IMMDT_BYTES, 149 149 } 150 150 }, 151 151 [IB_OPCODE_RC_SEND_ONLY] = { 152 152 .name = "IB_OPCODE_RC_SEND_ONLY", 153 - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK 154 - | RXE_RWR_MASK | RXE_SEND_MASK 155 - | RXE_START_MASK | RXE_END_MASK, 153 + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK | 154 + RXE_RWR_MASK | RXE_SEND_MASK | 155 + RXE_START_MASK | RXE_END_MASK, 156 156 .length = RXE_BTH_BYTES, 157 157 .offset = { 158 158 [RXE_BTH] = 0, ··· 161 161 }, 162 162 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = { 163 163 .name = "IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE", 164 - .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK 165 - | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK 166 - | RXE_START_MASK | RXE_END_MASK, 164 + .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | 165 + RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK | 166 + RXE_START_MASK | RXE_END_MASK, 167 167 .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES, 168 168 .offset = { 169 169 [RXE_BTH] = 0, 170 170 [RXE_IMMDT] = RXE_BTH_BYTES, 171 - [RXE_PAYLOAD] = RXE_BTH_BYTES 172 - + RXE_IMMDT_BYTES, 171 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 172 + RXE_IMMDT_BYTES, 173 173 } 174 174 }, 175 175 [IB_OPCODE_RC_RDMA_WRITE_FIRST] = { 176 176 .name = "IB_OPCODE_RC_RDMA_WRITE_FIRST", 177 - .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK 178 - | RXE_WRITE_MASK | RXE_START_MASK, 177 + .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | 178 + RXE_WRITE_MASK | RXE_START_MASK, 179 179 .length = RXE_BTH_BYTES + RXE_RETH_BYTES, 180 180 .offset = { 181 181 [RXE_BTH] = 0, 182 182 [RXE_RETH] = RXE_BTH_BYTES, 183 - [RXE_PAYLOAD] = RXE_BTH_BYTES 184 - + RXE_RETH_BYTES, 183 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 184 + RXE_RETH_BYTES, 185 185 } 186 186 }, 187 187 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = { 188 188 .name = "IB_OPCODE_RC_RDMA_WRITE_MIDDLE", 189 - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK 190 - | RXE_MIDDLE_MASK, 189 + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK | 190 + RXE_MIDDLE_MASK, 191 191 .length = RXE_BTH_BYTES, 192 192 .offset = { 193 193 [RXE_BTH] = 0, ··· 196 196 }, 197 197 [IB_OPCODE_RC_RDMA_WRITE_LAST] = { 198 198 .name = "IB_OPCODE_RC_RDMA_WRITE_LAST", 199 - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK 200 - | RXE_END_MASK, 199 + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK | 200 + RXE_END_MASK, 201 201 .length = RXE_BTH_BYTES, 202 202 .offset = { 203 203 [RXE_BTH] = 0, ··· 206 206 }, 207 207 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = { 208 208 .name = "IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE", 209 - .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK 210 - | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK 211 - | RXE_END_MASK, 209 + .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | 210 + RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK | 211 + RXE_END_MASK, 212 212 .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES, 213 213 .offset = { 214 214 [RXE_BTH] = 0, 215 215 [RXE_IMMDT] = RXE_BTH_BYTES, 216 - [RXE_PAYLOAD] = RXE_BTH_BYTES 217 - + RXE_IMMDT_BYTES, 216 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 217 + RXE_IMMDT_BYTES, 218 218 } 219 219 }, 220 220 [IB_OPCODE_RC_RDMA_WRITE_ONLY] = { 221 221 .name = "IB_OPCODE_RC_RDMA_WRITE_ONLY", 222 - .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK 223 - | RXE_WRITE_MASK | RXE_START_MASK 224 - | RXE_END_MASK, 222 + .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | 223 + RXE_WRITE_MASK | RXE_START_MASK | 224 + RXE_END_MASK, 225 225 .length = RXE_BTH_BYTES + RXE_RETH_BYTES, 226 226 .offset = { 227 227 [RXE_BTH] = 0, 228 228 [RXE_RETH] = RXE_BTH_BYTES, 229 - [RXE_PAYLOAD] = RXE_BTH_BYTES 230 - + RXE_RETH_BYTES, 229 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 230 + RXE_RETH_BYTES, 231 231 } 232 232 }, 233 233 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = { 234 234 .name = "IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE", 235 - .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK 236 - | RXE_REQ_MASK | RXE_WRITE_MASK 237 - | RXE_COMP_MASK | RXE_RWR_MASK 238 - | RXE_START_MASK | RXE_END_MASK, 235 + .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | 236 + RXE_REQ_MASK | RXE_WRITE_MASK | 237 + RXE_COMP_MASK | RXE_RWR_MASK | 238 + RXE_START_MASK | RXE_END_MASK, 239 239 .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES, 240 240 .offset = { 241 241 [RXE_BTH] = 0, 242 242 [RXE_RETH] = RXE_BTH_BYTES, 243 - [RXE_IMMDT] = RXE_BTH_BYTES 244 - + RXE_RETH_BYTES, 245 - [RXE_PAYLOAD] = RXE_BTH_BYTES 246 - + RXE_RETH_BYTES 247 - + RXE_IMMDT_BYTES, 243 + [RXE_IMMDT] = RXE_BTH_BYTES + 244 + RXE_RETH_BYTES, 245 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 246 + RXE_RETH_BYTES + 247 + RXE_IMMDT_BYTES, 248 248 } 249 249 }, 250 250 [IB_OPCODE_RC_RDMA_READ_REQUEST] = { 251 251 .name = "IB_OPCODE_RC_RDMA_READ_REQUEST", 252 - .mask = RXE_RETH_MASK | RXE_REQ_MASK | RXE_READ_MASK 253 - | RXE_START_MASK | RXE_END_MASK, 252 + .mask = RXE_RETH_MASK | RXE_REQ_MASK | RXE_READ_MASK | 253 + RXE_START_MASK | RXE_END_MASK, 254 254 .length = RXE_BTH_BYTES + RXE_RETH_BYTES, 255 255 .offset = { 256 256 [RXE_BTH] = 0, 257 257 [RXE_RETH] = RXE_BTH_BYTES, 258 - [RXE_PAYLOAD] = RXE_BTH_BYTES 259 - + RXE_RETH_BYTES, 258 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 259 + RXE_RETH_BYTES, 260 260 } 261 261 }, 262 262 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = { 263 263 .name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST", 264 - .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK 265 - | RXE_START_MASK, 264 + .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK | 265 + RXE_START_MASK, 266 266 .length = RXE_BTH_BYTES + RXE_AETH_BYTES, 267 267 .offset = { 268 268 [RXE_BTH] = 0, 269 269 [RXE_AETH] = RXE_BTH_BYTES, 270 - [RXE_PAYLOAD] = RXE_BTH_BYTES 271 - + RXE_AETH_BYTES, 270 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 271 + RXE_AETH_BYTES, 272 272 } 273 273 }, 274 274 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = { ··· 282 282 }, 283 283 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = { 284 284 .name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST", 285 - .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK 286 - | RXE_END_MASK, 285 + .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK | 286 + RXE_END_MASK, 287 287 .length = RXE_BTH_BYTES + RXE_AETH_BYTES, 288 288 .offset = { 289 289 [RXE_BTH] = 0, 290 290 [RXE_AETH] = RXE_BTH_BYTES, 291 - [RXE_PAYLOAD] = RXE_BTH_BYTES 292 - + RXE_AETH_BYTES, 291 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 292 + RXE_AETH_BYTES, 293 293 } 294 294 }, 295 295 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = { 296 296 .name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY", 297 - .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK 298 - | RXE_START_MASK | RXE_END_MASK, 297 + .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK | 298 + RXE_START_MASK | RXE_END_MASK, 299 299 .length = RXE_BTH_BYTES + RXE_AETH_BYTES, 300 300 .offset = { 301 301 [RXE_BTH] = 0, 302 302 [RXE_AETH] = RXE_BTH_BYTES, 303 - [RXE_PAYLOAD] = RXE_BTH_BYTES 304 - + RXE_AETH_BYTES, 303 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 304 + RXE_AETH_BYTES, 305 305 } 306 306 }, 307 307 [IB_OPCODE_RC_ACKNOWLEDGE] = { 308 308 .name = "IB_OPCODE_RC_ACKNOWLEDGE", 309 - .mask = RXE_AETH_MASK | RXE_ACK_MASK | RXE_START_MASK 310 - | RXE_END_MASK, 309 + .mask = RXE_AETH_MASK | RXE_ACK_MASK | RXE_START_MASK | 310 + RXE_END_MASK, 311 311 .length = RXE_BTH_BYTES + RXE_AETH_BYTES, 312 312 .offset = { 313 313 [RXE_BTH] = 0, 314 314 [RXE_AETH] = RXE_BTH_BYTES, 315 - [RXE_PAYLOAD] = RXE_BTH_BYTES 316 - + RXE_AETH_BYTES, 315 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 316 + RXE_AETH_BYTES, 317 317 } 318 318 }, 319 319 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = { 320 320 .name = "IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE", 321 - .mask = RXE_AETH_MASK | RXE_ATMACK_MASK | RXE_ACK_MASK 322 - | RXE_START_MASK | RXE_END_MASK, 321 + .mask = RXE_AETH_MASK | RXE_ATMACK_MASK | RXE_ACK_MASK | 322 + RXE_START_MASK | RXE_END_MASK, 323 323 .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES, 324 324 .offset = { 325 325 [RXE_BTH] = 0, 326 326 [RXE_AETH] = RXE_BTH_BYTES, 327 - [RXE_ATMACK] = RXE_BTH_BYTES 328 - + RXE_AETH_BYTES, 329 - [RXE_PAYLOAD] = RXE_BTH_BYTES 330 - + RXE_ATMACK_BYTES + RXE_AETH_BYTES, 327 + [RXE_ATMACK] = RXE_BTH_BYTES + 328 + RXE_AETH_BYTES, 329 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 330 + RXE_ATMACK_BYTES + 331 + RXE_AETH_BYTES, 331 332 } 332 333 }, 333 334 [IB_OPCODE_RC_COMPARE_SWAP] = { 334 335 .name = "IB_OPCODE_RC_COMPARE_SWAP", 335 - .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK 336 - | RXE_START_MASK | RXE_END_MASK, 336 + .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK | 337 + RXE_START_MASK | RXE_END_MASK, 337 338 .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES, 338 339 .offset = { 339 340 [RXE_BTH] = 0, 340 341 [RXE_ATMETH] = RXE_BTH_BYTES, 341 - [RXE_PAYLOAD] = RXE_BTH_BYTES 342 - + RXE_ATMETH_BYTES, 342 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 343 + RXE_ATMETH_BYTES, 343 344 } 344 345 }, 345 346 [IB_OPCODE_RC_FETCH_ADD] = { 346 347 .name = "IB_OPCODE_RC_FETCH_ADD", 347 - .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK 348 - | RXE_START_MASK | RXE_END_MASK, 348 + .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK | 349 + RXE_START_MASK | RXE_END_MASK, 349 350 .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES, 350 351 .offset = { 351 352 [RXE_BTH] = 0, 352 353 [RXE_ATMETH] = RXE_BTH_BYTES, 353 - [RXE_PAYLOAD] = RXE_BTH_BYTES 354 - + RXE_ATMETH_BYTES, 354 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 355 + RXE_ATMETH_BYTES, 355 356 } 356 357 }, 357 358 [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = { 358 359 .name = "IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE", 359 - .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK 360 - | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK, 360 + .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | 361 + RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK, 361 362 .length = RXE_BTH_BYTES + RXE_IETH_BYTES, 362 363 .offset = { 363 364 [RXE_BTH] = 0, 364 365 [RXE_IETH] = RXE_BTH_BYTES, 365 - [RXE_PAYLOAD] = RXE_BTH_BYTES 366 - + RXE_IETH_BYTES, 366 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 367 + RXE_IETH_BYTES, 367 368 } 368 369 }, 369 370 [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = { 370 371 .name = "IB_OPCODE_RC_SEND_ONLY_INV", 371 - .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK 372 - | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK 373 - | RXE_END_MASK | RXE_START_MASK, 372 + .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | 373 + RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK | 374 + RXE_END_MASK | RXE_START_MASK, 374 375 .length = RXE_BTH_BYTES + RXE_IETH_BYTES, 375 376 .offset = { 376 377 [RXE_BTH] = 0, 377 378 [RXE_IETH] = RXE_BTH_BYTES, 378 - [RXE_PAYLOAD] = RXE_BTH_BYTES 379 - + RXE_IETH_BYTES, 379 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 380 + RXE_IETH_BYTES, 380 381 } 381 382 }, 382 383 383 384 /* UC */ 384 385 [IB_OPCODE_UC_SEND_FIRST] = { 385 386 .name = "IB_OPCODE_UC_SEND_FIRST", 386 - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK 387 - | RXE_SEND_MASK | RXE_START_MASK, 387 + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK | 388 + RXE_SEND_MASK | RXE_START_MASK, 388 389 .length = RXE_BTH_BYTES, 389 390 .offset = { 390 391 [RXE_BTH] = 0, ··· 394 393 }, 395 394 [IB_OPCODE_UC_SEND_MIDDLE] = { 396 395 .name = "IB_OPCODE_UC_SEND_MIDDLE", 397 - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK 398 - | RXE_MIDDLE_MASK, 396 + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK | 397 + RXE_MIDDLE_MASK, 399 398 .length = RXE_BTH_BYTES, 400 399 .offset = { 401 400 [RXE_BTH] = 0, ··· 404 403 }, 405 404 [IB_OPCODE_UC_SEND_LAST] = { 406 405 .name = "IB_OPCODE_UC_SEND_LAST", 407 - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK 408 - | RXE_SEND_MASK | RXE_END_MASK, 406 + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK | 407 + RXE_SEND_MASK | RXE_END_MASK, 409 408 .length = RXE_BTH_BYTES, 410 409 .offset = { 411 410 [RXE_BTH] = 0, ··· 414 413 }, 415 414 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = { 416 415 .name = "IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE", 417 - .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK 418 - | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK, 416 + .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | 417 + RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK, 419 418 .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES, 420 419 .offset = { 421 420 [RXE_BTH] = 0, 422 421 [RXE_IMMDT] = RXE_BTH_BYTES, 423 - [RXE_PAYLOAD] = RXE_BTH_BYTES 424 - + RXE_IMMDT_BYTES, 422 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 423 + RXE_IMMDT_BYTES, 425 424 } 426 425 }, 427 426 [IB_OPCODE_UC_SEND_ONLY] = { 428 427 .name = "IB_OPCODE_UC_SEND_ONLY", 429 - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK 430 - | RXE_RWR_MASK | RXE_SEND_MASK 431 - | RXE_START_MASK | RXE_END_MASK, 428 + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK | 429 + RXE_RWR_MASK | RXE_SEND_MASK | 430 + RXE_START_MASK | RXE_END_MASK, 432 431 .length = RXE_BTH_BYTES, 433 432 .offset = { 434 433 [RXE_BTH] = 0, ··· 437 436 }, 438 437 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = { 439 438 .name = "IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE", 440 - .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK 441 - | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK 442 - | RXE_START_MASK | RXE_END_MASK, 439 + .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | 440 + RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK | 441 + RXE_START_MASK | RXE_END_MASK, 443 442 .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES, 444 443 .offset = { 445 444 [RXE_BTH] = 0, 446 445 [RXE_IMMDT] = RXE_BTH_BYTES, 447 - [RXE_PAYLOAD] = RXE_BTH_BYTES 448 - + RXE_IMMDT_BYTES, 446 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 447 + RXE_IMMDT_BYTES, 449 448 } 450 449 }, 451 450 [IB_OPCODE_UC_RDMA_WRITE_FIRST] = { 452 451 .name = "IB_OPCODE_UC_RDMA_WRITE_FIRST", 453 - .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK 454 - | RXE_WRITE_MASK | RXE_START_MASK, 452 + .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | 453 + RXE_WRITE_MASK | RXE_START_MASK, 455 454 .length = RXE_BTH_BYTES + RXE_RETH_BYTES, 456 455 .offset = { 457 456 [RXE_BTH] = 0, 458 457 [RXE_RETH] = RXE_BTH_BYTES, 459 - [RXE_PAYLOAD] = RXE_BTH_BYTES 460 - + RXE_RETH_BYTES, 458 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 459 + RXE_RETH_BYTES, 461 460 } 462 461 }, 463 462 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = { 464 463 .name = "IB_OPCODE_UC_RDMA_WRITE_MIDDLE", 465 - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK 466 - | RXE_MIDDLE_MASK, 464 + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK | 465 + RXE_MIDDLE_MASK, 467 466 .length = RXE_BTH_BYTES, 468 467 .offset = { 469 468 [RXE_BTH] = 0, ··· 472 471 }, 473 472 [IB_OPCODE_UC_RDMA_WRITE_LAST] = { 474 473 .name = "IB_OPCODE_UC_RDMA_WRITE_LAST", 475 - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK 476 - | RXE_END_MASK, 474 + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK | 475 + RXE_END_MASK, 477 476 .length = RXE_BTH_BYTES, 478 477 .offset = { 479 478 [RXE_BTH] = 0, ··· 482 481 }, 483 482 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = { 484 483 .name = "IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE", 485 - .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK 486 - | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK 487 - | RXE_END_MASK, 484 + .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | 485 + RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK | 486 + RXE_END_MASK, 488 487 .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES, 489 488 .offset = { 490 489 [RXE_BTH] = 0, 491 490 [RXE_IMMDT] = RXE_BTH_BYTES, 492 - [RXE_PAYLOAD] = RXE_BTH_BYTES 493 - + RXE_IMMDT_BYTES, 491 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 492 + RXE_IMMDT_BYTES, 494 493 } 495 494 }, 496 495 [IB_OPCODE_UC_RDMA_WRITE_ONLY] = { 497 496 .name = "IB_OPCODE_UC_RDMA_WRITE_ONLY", 498 - .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK 499 - | RXE_WRITE_MASK | RXE_START_MASK 500 - | RXE_END_MASK, 497 + .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | 498 + RXE_WRITE_MASK | RXE_START_MASK | 499 + RXE_END_MASK, 501 500 .length = RXE_BTH_BYTES + RXE_RETH_BYTES, 502 501 .offset = { 503 502 [RXE_BTH] = 0, 504 503 [RXE_RETH] = RXE_BTH_BYTES, 505 - [RXE_PAYLOAD] = RXE_BTH_BYTES 506 - + RXE_RETH_BYTES, 504 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 505 + RXE_RETH_BYTES, 507 506 } 508 507 }, 509 508 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = { 510 509 .name = "IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE", 511 - .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK 512 - | RXE_REQ_MASK | RXE_WRITE_MASK 513 - | RXE_COMP_MASK | RXE_RWR_MASK 514 - | RXE_START_MASK | RXE_END_MASK, 510 + .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | 511 + RXE_REQ_MASK | RXE_WRITE_MASK | 512 + RXE_COMP_MASK | RXE_RWR_MASK | 513 + RXE_START_MASK | RXE_END_MASK, 515 514 .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES, 516 515 .offset = { 517 516 [RXE_BTH] = 0, 518 517 [RXE_RETH] = RXE_BTH_BYTES, 519 - [RXE_IMMDT] = RXE_BTH_BYTES 520 - + RXE_RETH_BYTES, 521 - [RXE_PAYLOAD] = RXE_BTH_BYTES 522 - + RXE_RETH_BYTES 523 - + RXE_IMMDT_BYTES, 518 + [RXE_IMMDT] = RXE_BTH_BYTES + 519 + RXE_RETH_BYTES, 520 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 521 + RXE_RETH_BYTES + 522 + RXE_IMMDT_BYTES, 524 523 } 525 524 }, 526 525 527 526 /* RD */ 528 527 [IB_OPCODE_RD_SEND_FIRST] = { 529 528 .name = "IB_OPCODE_RD_SEND_FIRST", 530 - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK 531 - | RXE_REQ_MASK | RXE_RWR_MASK | RXE_SEND_MASK 532 - | RXE_START_MASK, 529 + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK | 530 + RXE_REQ_MASK | RXE_RWR_MASK | RXE_SEND_MASK | 531 + RXE_START_MASK, 533 532 .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES, 534 533 .offset = { 535 534 [RXE_BTH] = 0, 536 535 [RXE_RDETH] = RXE_BTH_BYTES, 537 - [RXE_DETH] = RXE_BTH_BYTES 538 - + RXE_RDETH_BYTES, 539 - [RXE_PAYLOAD] = RXE_BTH_BYTES 540 - + RXE_RDETH_BYTES 541 - + RXE_DETH_BYTES, 536 + [RXE_DETH] = RXE_BTH_BYTES + 537 + RXE_RDETH_BYTES, 538 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 539 + RXE_RDETH_BYTES + 540 + RXE_DETH_BYTES, 542 541 } 543 542 }, 544 543 [IB_OPCODE_RD_SEND_MIDDLE] = { 545 544 .name = "IB_OPCODE_RD_SEND_MIDDLE", 546 - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK 547 - | RXE_REQ_MASK | RXE_SEND_MASK 548 - | RXE_MIDDLE_MASK, 545 + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK | 546 + RXE_REQ_MASK | RXE_SEND_MASK | 547 + RXE_MIDDLE_MASK, 549 548 .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES, 550 549 .offset = { 551 550 [RXE_BTH] = 0, 552 551 [RXE_RDETH] = RXE_BTH_BYTES, 553 - [RXE_DETH] = RXE_BTH_BYTES 554 - + RXE_RDETH_BYTES, 555 - [RXE_PAYLOAD] = RXE_BTH_BYTES 556 - + RXE_RDETH_BYTES 557 - + RXE_DETH_BYTES, 552 + [RXE_DETH] = RXE_BTH_BYTES + 553 + RXE_RDETH_BYTES, 554 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 555 + RXE_RDETH_BYTES + 556 + RXE_DETH_BYTES, 558 557 } 559 558 }, 560 559 [IB_OPCODE_RD_SEND_LAST] = { 561 560 .name = "IB_OPCODE_RD_SEND_LAST", 562 - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK 563 - | RXE_REQ_MASK | RXE_COMP_MASK | RXE_SEND_MASK 564 - | RXE_END_MASK, 561 + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK | 562 + RXE_REQ_MASK | RXE_COMP_MASK | RXE_SEND_MASK | 563 + RXE_END_MASK, 565 564 .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES, 566 565 .offset = { 567 566 [RXE_BTH] = 0, 568 567 [RXE_RDETH] = RXE_BTH_BYTES, 569 - [RXE_DETH] = RXE_BTH_BYTES 570 - + RXE_RDETH_BYTES, 571 - [RXE_PAYLOAD] = RXE_BTH_BYTES 572 - + RXE_RDETH_BYTES 573 - + RXE_DETH_BYTES, 568 + [RXE_DETH] = RXE_BTH_BYTES + 569 + RXE_RDETH_BYTES, 570 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 571 + RXE_RDETH_BYTES + 572 + RXE_DETH_BYTES, 574 573 } 575 574 }, 576 575 [IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE] = { 577 576 .name = "IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE", 578 - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK 579 - | RXE_PAYLOAD_MASK | RXE_REQ_MASK 580 - | RXE_COMP_MASK | RXE_SEND_MASK 581 - | RXE_END_MASK, 582 - .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES 583 - + RXE_RDETH_BYTES, 577 + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK | 578 + RXE_PAYLOAD_MASK | RXE_REQ_MASK | 579 + RXE_COMP_MASK | RXE_SEND_MASK | 580 + RXE_END_MASK, 581 + .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES + 582 + RXE_RDETH_BYTES, 584 583 .offset = { 585 584 [RXE_BTH] = 0, 586 585 [RXE_RDETH] = RXE_BTH_BYTES, 587 - [RXE_DETH] = RXE_BTH_BYTES 588 - + RXE_RDETH_BYTES, 589 - [RXE_IMMDT] = RXE_BTH_BYTES 590 - + RXE_RDETH_BYTES 591 - + RXE_DETH_BYTES, 592 - [RXE_PAYLOAD] = RXE_BTH_BYTES 593 - + RXE_RDETH_BYTES 594 - + RXE_DETH_BYTES 595 - + RXE_IMMDT_BYTES, 586 + [RXE_DETH] = RXE_BTH_BYTES + 587 + RXE_RDETH_BYTES, 588 + [RXE_IMMDT] = RXE_BTH_BYTES + 589 + RXE_RDETH_BYTES + 590 + RXE_DETH_BYTES, 591 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 592 + RXE_RDETH_BYTES + 593 + RXE_DETH_BYTES + 594 + RXE_IMMDT_BYTES, 596 595 } 597 596 }, 598 597 [IB_OPCODE_RD_SEND_ONLY] = { 599 598 .name = "IB_OPCODE_RD_SEND_ONLY", 600 - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK 601 - | RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK 602 - | RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK, 599 + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK | 600 + RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK | 601 + RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK, 603 602 .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES, 604 603 .offset = { 605 604 [RXE_BTH] = 0, 606 605 [RXE_RDETH] = RXE_BTH_BYTES, 607 - [RXE_DETH] = RXE_BTH_BYTES 608 - + RXE_RDETH_BYTES, 609 - [RXE_PAYLOAD] = RXE_BTH_BYTES 610 - + RXE_RDETH_BYTES 611 - + RXE_DETH_BYTES, 606 + [RXE_DETH] = RXE_BTH_BYTES + 607 + RXE_RDETH_BYTES, 608 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 609 + RXE_RDETH_BYTES + 610 + RXE_DETH_BYTES, 612 611 } 613 612 }, 614 613 [IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE] = { 615 614 .name = "IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE", 616 - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK 617 - | RXE_PAYLOAD_MASK | RXE_REQ_MASK 618 - | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK 619 - | RXE_START_MASK | RXE_END_MASK, 620 - .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES 621 - + RXE_RDETH_BYTES, 615 + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK | 616 + RXE_PAYLOAD_MASK | RXE_REQ_MASK | 617 + RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK | 618 + RXE_START_MASK | RXE_END_MASK, 619 + .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES + 620 + RXE_RDETH_BYTES, 622 621 .offset = { 623 622 [RXE_BTH] = 0, 624 623 [RXE_RDETH] = RXE_BTH_BYTES, 625 - [RXE_DETH] = RXE_BTH_BYTES 626 - + RXE_RDETH_BYTES, 627 - [RXE_IMMDT] = RXE_BTH_BYTES 628 - + RXE_RDETH_BYTES 629 - + RXE_DETH_BYTES, 630 - [RXE_PAYLOAD] = RXE_BTH_BYTES 631 - + RXE_RDETH_BYTES 632 - + RXE_DETH_BYTES 633 - + RXE_IMMDT_BYTES, 624 + [RXE_DETH] = RXE_BTH_BYTES + 625 + RXE_RDETH_BYTES, 626 + [RXE_IMMDT] = RXE_BTH_BYTES + 627 + RXE_RDETH_BYTES + 628 + RXE_DETH_BYTES, 629 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 630 + RXE_RDETH_BYTES + 631 + RXE_DETH_BYTES + 632 + RXE_IMMDT_BYTES, 634 633 } 635 634 }, 636 635 [IB_OPCODE_RD_RDMA_WRITE_FIRST] = { 637 636 .name = "IB_OPCODE_RD_RDMA_WRITE_FIRST", 638 - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK 639 - | RXE_PAYLOAD_MASK | RXE_REQ_MASK 640 - | RXE_WRITE_MASK | RXE_START_MASK, 641 - .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES 642 - + RXE_RDETH_BYTES, 637 + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK | 638 + RXE_PAYLOAD_MASK | RXE_REQ_MASK | 639 + RXE_WRITE_MASK | RXE_START_MASK, 640 + .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES + 641 + RXE_RDETH_BYTES, 643 642 .offset = { 644 643 [RXE_BTH] = 0, 645 644 [RXE_RDETH] = RXE_BTH_BYTES, 646 - [RXE_DETH] = RXE_BTH_BYTES 647 - + RXE_RDETH_BYTES, 648 - [RXE_RETH] = RXE_BTH_BYTES 649 - + RXE_RDETH_BYTES 650 - + RXE_DETH_BYTES, 651 - [RXE_PAYLOAD] = RXE_BTH_BYTES 652 - + RXE_RDETH_BYTES 653 - + RXE_DETH_BYTES 654 - + RXE_RETH_BYTES, 645 + [RXE_DETH] = RXE_BTH_BYTES + 646 + RXE_RDETH_BYTES, 647 + [RXE_RETH] = RXE_BTH_BYTES + 648 + RXE_RDETH_BYTES + 649 + RXE_DETH_BYTES, 650 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 651 + RXE_RDETH_BYTES + 652 + RXE_DETH_BYTES + 653 + RXE_RETH_BYTES, 655 654 } 656 655 }, 657 656 [IB_OPCODE_RD_RDMA_WRITE_MIDDLE] = { 658 657 .name = "IB_OPCODE_RD_RDMA_WRITE_MIDDLE", 659 - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK 660 - | RXE_REQ_MASK | RXE_WRITE_MASK 661 - | RXE_MIDDLE_MASK, 658 + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK | 659 + RXE_REQ_MASK | RXE_WRITE_MASK | 660 + RXE_MIDDLE_MASK, 662 661 .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES, 663 662 .offset = { 664 663 [RXE_BTH] = 0, 665 664 [RXE_RDETH] = RXE_BTH_BYTES, 666 - [RXE_DETH] = RXE_BTH_BYTES 667 - + RXE_RDETH_BYTES, 668 - [RXE_PAYLOAD] = RXE_BTH_BYTES 669 - + RXE_RDETH_BYTES 670 - + RXE_DETH_BYTES, 665 + [RXE_DETH] = RXE_BTH_BYTES + 666 + RXE_RDETH_BYTES, 667 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 668 + RXE_RDETH_BYTES + 669 + RXE_DETH_BYTES, 671 670 } 672 671 }, 673 672 [IB_OPCODE_RD_RDMA_WRITE_LAST] = { 674 673 .name = "IB_OPCODE_RD_RDMA_WRITE_LAST", 675 - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK 676 - | RXE_REQ_MASK | RXE_WRITE_MASK 677 - | RXE_END_MASK, 674 + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK | 675 + RXE_REQ_MASK | RXE_WRITE_MASK | 676 + RXE_END_MASK, 678 677 .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES, 679 678 .offset = { 680 679 [RXE_BTH] = 0, 681 680 [RXE_RDETH] = RXE_BTH_BYTES, 682 - [RXE_DETH] = RXE_BTH_BYTES 683 - + RXE_RDETH_BYTES, 684 - [RXE_PAYLOAD] = RXE_BTH_BYTES 685 - + RXE_RDETH_BYTES 686 - + RXE_DETH_BYTES, 681 + [RXE_DETH] = RXE_BTH_BYTES + 682 + RXE_RDETH_BYTES, 683 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 684 + RXE_RDETH_BYTES + 685 + RXE_DETH_BYTES, 687 686 } 688 687 }, 689 688 [IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE] = { 690 689 .name = "IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE", 691 - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK 692 - | RXE_PAYLOAD_MASK | RXE_REQ_MASK 693 - | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK 694 - | RXE_END_MASK, 695 - .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES 696 - + RXE_RDETH_BYTES, 690 + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK | 691 + RXE_PAYLOAD_MASK | RXE_REQ_MASK | 692 + RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK | 693 + RXE_END_MASK, 694 + .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES + 695 + RXE_RDETH_BYTES, 697 696 .offset = { 698 697 [RXE_BTH] = 0, 699 698 [RXE_RDETH] = RXE_BTH_BYTES, 700 - [RXE_DETH] = RXE_BTH_BYTES 701 - + RXE_RDETH_BYTES, 702 - [RXE_IMMDT] = RXE_BTH_BYTES 703 - + RXE_RDETH_BYTES 704 - + RXE_DETH_BYTES, 705 - [RXE_PAYLOAD] = RXE_BTH_BYTES 706 - + RXE_RDETH_BYTES 707 - + RXE_DETH_BYTES 708 - + RXE_IMMDT_BYTES, 699 + [RXE_DETH] = RXE_BTH_BYTES + 700 + RXE_RDETH_BYTES, 701 + [RXE_IMMDT] = RXE_BTH_BYTES + 702 + RXE_RDETH_BYTES + 703 + RXE_DETH_BYTES, 704 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 705 + RXE_RDETH_BYTES + 706 + RXE_DETH_BYTES + 707 + RXE_IMMDT_BYTES, 709 708 } 710 709 }, 711 710 [IB_OPCODE_RD_RDMA_WRITE_ONLY] = { 712 711 .name = "IB_OPCODE_RD_RDMA_WRITE_ONLY", 713 - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK 714 - | RXE_PAYLOAD_MASK | RXE_REQ_MASK 715 - | RXE_WRITE_MASK | RXE_START_MASK 716 - | RXE_END_MASK, 717 - .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES 718 - + RXE_RDETH_BYTES, 712 + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK | 713 + RXE_PAYLOAD_MASK | RXE_REQ_MASK | 714 + RXE_WRITE_MASK | RXE_START_MASK | 715 + RXE_END_MASK, 716 + .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES + 717 + RXE_RDETH_BYTES, 719 718 .offset = { 720 719 [RXE_BTH] = 0, 721 720 [RXE_RDETH] = RXE_BTH_BYTES, 722 - [RXE_DETH] = RXE_BTH_BYTES 723 - + RXE_RDETH_BYTES, 724 - [RXE_RETH] = RXE_BTH_BYTES 725 - + RXE_RDETH_BYTES 726 - + RXE_DETH_BYTES, 727 - [RXE_PAYLOAD] = RXE_BTH_BYTES 728 - + RXE_RDETH_BYTES 729 - + RXE_DETH_BYTES 730 - + RXE_RETH_BYTES, 721 + [RXE_DETH] = RXE_BTH_BYTES + 722 + RXE_RDETH_BYTES, 723 + [RXE_RETH] = RXE_BTH_BYTES + 724 + RXE_RDETH_BYTES + 725 + RXE_DETH_BYTES, 726 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 727 + RXE_RDETH_BYTES + 728 + RXE_DETH_BYTES + 729 + RXE_RETH_BYTES, 731 730 } 732 731 }, 733 732 [IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = { 734 733 .name = "IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE", 735 - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK 736 - | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK 737 - | RXE_REQ_MASK | RXE_WRITE_MASK 738 - | RXE_COMP_MASK | RXE_RWR_MASK 739 - | RXE_START_MASK | RXE_END_MASK, 740 - .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES 741 - + RXE_DETH_BYTES + RXE_RDETH_BYTES, 734 + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK | 735 + RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | 736 + RXE_REQ_MASK | RXE_WRITE_MASK | 737 + RXE_COMP_MASK | RXE_RWR_MASK | 738 + RXE_START_MASK | RXE_END_MASK, 739 + .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES + 740 + RXE_DETH_BYTES + RXE_RDETH_BYTES, 742 741 .offset = { 743 742 [RXE_BTH] = 0, 744 743 [RXE_RDETH] = RXE_BTH_BYTES, 745 - [RXE_DETH] = RXE_BTH_BYTES 746 - + RXE_RDETH_BYTES, 747 - [RXE_RETH] = RXE_BTH_BYTES 748 - + RXE_RDETH_BYTES 749 - + RXE_DETH_BYTES, 750 - [RXE_IMMDT] = RXE_BTH_BYTES 751 - + RXE_RDETH_BYTES 752 - + RXE_DETH_BYTES 753 - + RXE_RETH_BYTES, 754 - [RXE_PAYLOAD] = RXE_BTH_BYTES 755 - + RXE_RDETH_BYTES 756 - + RXE_DETH_BYTES 757 - + RXE_RETH_BYTES 758 - + RXE_IMMDT_BYTES, 744 + [RXE_DETH] = RXE_BTH_BYTES + 745 + RXE_RDETH_BYTES, 746 + [RXE_RETH] = RXE_BTH_BYTES + 747 + RXE_RDETH_BYTES + 748 + RXE_DETH_BYTES, 749 + [RXE_IMMDT] = RXE_BTH_BYTES + 750 + RXE_RDETH_BYTES + 751 + RXE_DETH_BYTES + 752 + RXE_RETH_BYTES, 753 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 754 + RXE_RDETH_BYTES + 755 + RXE_DETH_BYTES + 756 + RXE_RETH_BYTES + 757 + RXE_IMMDT_BYTES, 759 758 } 760 759 }, 761 760 [IB_OPCODE_RD_RDMA_READ_REQUEST] = { 762 761 .name = "IB_OPCODE_RD_RDMA_READ_REQUEST", 763 - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK 764 - | RXE_REQ_MASK | RXE_READ_MASK 765 - | RXE_START_MASK | RXE_END_MASK, 766 - .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES 767 - + RXE_RDETH_BYTES, 762 + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK | 763 + RXE_REQ_MASK | RXE_READ_MASK | 764 + RXE_START_MASK | RXE_END_MASK, 765 + .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES + 766 + RXE_RDETH_BYTES, 768 767 .offset = { 769 768 [RXE_BTH] = 0, 770 769 [RXE_RDETH] = RXE_BTH_BYTES, 771 - [RXE_DETH] = RXE_BTH_BYTES 772 - + RXE_RDETH_BYTES, 773 - [RXE_RETH] = RXE_BTH_BYTES 774 - + RXE_RDETH_BYTES 775 - + RXE_DETH_BYTES, 776 - [RXE_PAYLOAD] = RXE_BTH_BYTES 777 - + RXE_RETH_BYTES 778 - + RXE_DETH_BYTES 779 - + RXE_RDETH_BYTES, 770 + [RXE_DETH] = RXE_BTH_BYTES + 771 + RXE_RDETH_BYTES, 772 + [RXE_RETH] = RXE_BTH_BYTES + 773 + RXE_RDETH_BYTES + 774 + RXE_DETH_BYTES, 775 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 776 + RXE_RETH_BYTES + 777 + RXE_DETH_BYTES + 778 + RXE_RDETH_BYTES, 780 779 } 781 780 }, 782 781 [IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST] = { 783 782 .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST", 784 - .mask = RXE_RDETH_MASK | RXE_AETH_MASK 785 - | RXE_PAYLOAD_MASK | RXE_ACK_MASK 786 - | RXE_START_MASK, 783 + .mask = RXE_RDETH_MASK | RXE_AETH_MASK | 784 + RXE_PAYLOAD_MASK | RXE_ACK_MASK | 785 + RXE_START_MASK, 787 786 .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES, 788 787 .offset = { 789 788 [RXE_BTH] = 0, 790 789 [RXE_RDETH] = RXE_BTH_BYTES, 791 - [RXE_AETH] = RXE_BTH_BYTES 792 - + RXE_RDETH_BYTES, 793 - [RXE_PAYLOAD] = RXE_BTH_BYTES 794 - + RXE_RDETH_BYTES 795 - + RXE_AETH_BYTES, 790 + [RXE_AETH] = RXE_BTH_BYTES + 791 + RXE_RDETH_BYTES, 792 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 793 + RXE_RDETH_BYTES + 794 + RXE_AETH_BYTES, 796 795 } 797 796 }, 798 797 [IB_OPCODE_RD_RDMA_READ_RESPONSE_MIDDLE] = { 799 798 .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_MIDDLE", 800 - .mask = RXE_RDETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK 801 - | RXE_MIDDLE_MASK, 799 + .mask = RXE_RDETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK | 800 + RXE_MIDDLE_MASK, 802 801 .length = RXE_BTH_BYTES + RXE_RDETH_BYTES, 803 802 .offset = { 804 803 [RXE_BTH] = 0, 805 804 [RXE_RDETH] = RXE_BTH_BYTES, 806 - [RXE_PAYLOAD] = RXE_BTH_BYTES 807 - + RXE_RDETH_BYTES, 805 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 806 + RXE_RDETH_BYTES, 808 807 } 809 808 }, 810 809 [IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST] = { 811 810 .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST", 812 - .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK 813 - | RXE_ACK_MASK | RXE_END_MASK, 811 + .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK | 812 + RXE_ACK_MASK | RXE_END_MASK, 814 813 .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES, 815 814 .offset = { 816 815 [RXE_BTH] = 0, 817 816 [RXE_RDETH] = RXE_BTH_BYTES, 818 - [RXE_AETH] = RXE_BTH_BYTES 819 - + RXE_RDETH_BYTES, 820 - [RXE_PAYLOAD] = RXE_BTH_BYTES 821 - + RXE_RDETH_BYTES 822 - + RXE_AETH_BYTES, 817 + [RXE_AETH] = RXE_BTH_BYTES + 818 + RXE_RDETH_BYTES, 819 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 820 + RXE_RDETH_BYTES + 821 + RXE_AETH_BYTES, 823 822 } 824 823 }, 825 824 [IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY] = { 826 825 .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY", 827 - .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK 828 - | RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK, 826 + .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK | 827 + RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK, 829 828 .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES, 830 829 .offset = { 831 830 [RXE_BTH] = 0, 832 831 [RXE_RDETH] = RXE_BTH_BYTES, 833 - [RXE_AETH] = RXE_BTH_BYTES 834 - + RXE_RDETH_BYTES, 835 - [RXE_PAYLOAD] = RXE_BTH_BYTES 836 - + RXE_RDETH_BYTES 837 - + RXE_AETH_BYTES, 832 + [RXE_AETH] = RXE_BTH_BYTES + 833 + RXE_RDETH_BYTES, 834 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 835 + RXE_RDETH_BYTES + 836 + RXE_AETH_BYTES, 838 837 } 839 838 }, 840 839 [IB_OPCODE_RD_ACKNOWLEDGE] = { 841 840 .name = "IB_OPCODE_RD_ACKNOWLEDGE", 842 - .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ACK_MASK 843 - | RXE_START_MASK | RXE_END_MASK, 841 + .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ACK_MASK | 842 + RXE_START_MASK | RXE_END_MASK, 844 843 .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES, 845 844 .offset = { 846 845 [RXE_BTH] = 0, 847 846 [RXE_RDETH] = RXE_BTH_BYTES, 848 - [RXE_AETH] = RXE_BTH_BYTES 849 - + RXE_RDETH_BYTES, 847 + [RXE_AETH] = RXE_BTH_BYTES + 848 + RXE_RDETH_BYTES, 850 849 } 851 850 }, 852 851 [IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE] = { 853 852 .name = "IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE", 854 - .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ATMACK_MASK 855 - | RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK, 856 - .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES 857 - + RXE_RDETH_BYTES, 853 + .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ATMACK_MASK | 854 + RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK, 855 + .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES + 856 + RXE_RDETH_BYTES, 858 857 .offset = { 859 858 [RXE_BTH] = 0, 860 859 [RXE_RDETH] = RXE_BTH_BYTES, 861 - [RXE_AETH] = RXE_BTH_BYTES 862 - + RXE_RDETH_BYTES, 863 - [RXE_ATMACK] = RXE_BTH_BYTES 864 - + RXE_RDETH_BYTES 865 - + RXE_AETH_BYTES, 860 + [RXE_AETH] = RXE_BTH_BYTES + 861 + RXE_RDETH_BYTES, 862 + [RXE_ATMACK] = RXE_BTH_BYTES + 863 + RXE_RDETH_BYTES + 864 + RXE_AETH_BYTES, 866 865 } 867 866 }, 868 867 [IB_OPCODE_RD_COMPARE_SWAP] = { 869 868 .name = "RD_COMPARE_SWAP", 870 - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK 871 - | RXE_REQ_MASK | RXE_ATOMIC_MASK 872 - | RXE_START_MASK | RXE_END_MASK, 873 - .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES 874 - + RXE_RDETH_BYTES, 869 + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK | 870 + RXE_REQ_MASK | RXE_ATOMIC_MASK | 871 + RXE_START_MASK | RXE_END_MASK, 872 + .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES + 873 + RXE_RDETH_BYTES, 875 874 .offset = { 876 875 [RXE_BTH] = 0, 877 876 [RXE_RDETH] = RXE_BTH_BYTES, 878 - [RXE_DETH] = RXE_BTH_BYTES 879 - + RXE_RDETH_BYTES, 880 - [RXE_ATMETH] = RXE_BTH_BYTES 881 - + RXE_RDETH_BYTES 882 - + RXE_DETH_BYTES, 877 + [RXE_DETH] = RXE_BTH_BYTES + 878 + RXE_RDETH_BYTES, 879 + [RXE_ATMETH] = RXE_BTH_BYTES + 880 + RXE_RDETH_BYTES + 881 + RXE_DETH_BYTES, 883 882 [RXE_PAYLOAD] = RXE_BTH_BYTES + 884 - + RXE_ATMETH_BYTES 885 - + RXE_DETH_BYTES + 886 - + RXE_RDETH_BYTES, 883 + RXE_ATMETH_BYTES + 884 + RXE_DETH_BYTES + 885 + RXE_RDETH_BYTES, 887 886 } 888 887 }, 889 888 [IB_OPCODE_RD_FETCH_ADD] = { 890 889 .name = "IB_OPCODE_RD_FETCH_ADD", 891 - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK 892 - | RXE_REQ_MASK | RXE_ATOMIC_MASK 893 - | RXE_START_MASK | RXE_END_MASK, 894 - .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES 895 - + RXE_RDETH_BYTES, 890 + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK | 891 + RXE_REQ_MASK | RXE_ATOMIC_MASK | 892 + RXE_START_MASK | RXE_END_MASK, 893 + .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES + 894 + RXE_RDETH_BYTES, 896 895 .offset = { 897 896 [RXE_BTH] = 0, 898 897 [RXE_RDETH] = RXE_BTH_BYTES, 899 - [RXE_DETH] = RXE_BTH_BYTES 900 - + RXE_RDETH_BYTES, 901 - [RXE_ATMETH] = RXE_BTH_BYTES 902 - + RXE_RDETH_BYTES 903 - + RXE_DETH_BYTES, 898 + [RXE_DETH] = RXE_BTH_BYTES + 899 + RXE_RDETH_BYTES, 900 + [RXE_ATMETH] = RXE_BTH_BYTES + 901 + RXE_RDETH_BYTES + 902 + RXE_DETH_BYTES, 904 903 [RXE_PAYLOAD] = RXE_BTH_BYTES + 905 - + RXE_ATMETH_BYTES 906 - + RXE_DETH_BYTES + 907 - + RXE_RDETH_BYTES, 904 + RXE_ATMETH_BYTES + 905 + RXE_DETH_BYTES + 906 + RXE_RDETH_BYTES, 908 907 } 909 908 }, 910 909 911 910 /* UD */ 912 911 [IB_OPCODE_UD_SEND_ONLY] = { 913 912 .name = "IB_OPCODE_UD_SEND_ONLY", 914 - .mask = RXE_DETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK 915 - | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK 916 - | RXE_START_MASK | RXE_END_MASK, 913 + .mask = RXE_DETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | 914 + RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK | 915 + RXE_START_MASK | RXE_END_MASK, 917 916 .length = RXE_BTH_BYTES + RXE_DETH_BYTES, 918 917 .offset = { 919 918 [RXE_BTH] = 0, 920 919 [RXE_DETH] = RXE_BTH_BYTES, 921 - [RXE_PAYLOAD] = RXE_BTH_BYTES 922 - + RXE_DETH_BYTES, 920 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 921 + RXE_DETH_BYTES, 923 922 } 924 923 }, 925 924 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = { 926 925 .name = "IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE", 927 - .mask = RXE_DETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK 928 - | RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK 929 - | RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK, 926 + .mask = RXE_DETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | 927 + RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK | 928 + RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK, 930 929 .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES, 931 930 .offset = { 932 931 [RXE_BTH] = 0, 933 932 [RXE_DETH] = RXE_BTH_BYTES, 934 - [RXE_IMMDT] = RXE_BTH_BYTES 935 - + RXE_DETH_BYTES, 936 - [RXE_PAYLOAD] = RXE_BTH_BYTES 937 - + RXE_DETH_BYTES 938 - + RXE_IMMDT_BYTES, 933 + [RXE_IMMDT] = RXE_BTH_BYTES + 934 + RXE_DETH_BYTES, 935 + [RXE_PAYLOAD] = RXE_BTH_BYTES + 936 + RXE_DETH_BYTES + 937 + RXE_IMMDT_BYTES, 939 938 } 940 939 }, 941 940
+84 -93
drivers/infiniband/sw/rxe/rxe_pool.c
··· 5 5 */ 6 6 7 7 #include "rxe.h" 8 - #include "rxe_loc.h" 8 + 9 + #define RXE_POOL_ALIGN (16) 9 10 10 11 static const struct rxe_type_info { 11 12 const char *name; 12 13 size_t size; 13 14 size_t elem_offset; 14 - void (*cleanup)(struct rxe_pool_entry *obj); 15 + void (*cleanup)(struct rxe_pool_elem *obj); 15 16 enum rxe_pool_flags flags; 16 17 u32 min_index; 17 18 u32 max_index; ··· 22 21 [RXE_TYPE_UC] = { 23 22 .name = "rxe-uc", 24 23 .size = sizeof(struct rxe_ucontext), 25 - .elem_offset = offsetof(struct rxe_ucontext, pelem), 24 + .elem_offset = offsetof(struct rxe_ucontext, elem), 26 25 .flags = RXE_POOL_NO_ALLOC, 27 26 }, 28 27 [RXE_TYPE_PD] = { 29 28 .name = "rxe-pd", 30 29 .size = sizeof(struct rxe_pd), 31 - .elem_offset = offsetof(struct rxe_pd, pelem), 30 + .elem_offset = offsetof(struct rxe_pd, elem), 32 31 .flags = RXE_POOL_NO_ALLOC, 33 32 }, 34 33 [RXE_TYPE_AH] = { 35 34 .name = "rxe-ah", 36 35 .size = sizeof(struct rxe_ah), 37 - .elem_offset = offsetof(struct rxe_ah, pelem), 36 + .elem_offset = offsetof(struct rxe_ah, elem), 38 37 .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, 39 38 .min_index = RXE_MIN_AH_INDEX, 40 39 .max_index = RXE_MAX_AH_INDEX, ··· 42 41 [RXE_TYPE_SRQ] = { 43 42 .name = "rxe-srq", 44 43 .size = sizeof(struct rxe_srq), 45 - .elem_offset = offsetof(struct rxe_srq, pelem), 44 + .elem_offset = offsetof(struct rxe_srq, elem), 46 45 .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, 47 46 .min_index = RXE_MIN_SRQ_INDEX, 48 47 .max_index = RXE_MAX_SRQ_INDEX, ··· 50 49 [RXE_TYPE_QP] = { 51 50 .name = "rxe-qp", 52 51 .size = sizeof(struct rxe_qp), 53 - .elem_offset = offsetof(struct rxe_qp, pelem), 52 + .elem_offset = offsetof(struct rxe_qp, elem), 54 53 .cleanup = rxe_qp_cleanup, 55 54 .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, 56 55 .min_index = RXE_MIN_QP_INDEX, ··· 59 58 [RXE_TYPE_CQ] = { 60 59 .name = "rxe-cq", 61 60 .size = sizeof(struct rxe_cq), 62 - .elem_offset = offsetof(struct rxe_cq, pelem), 61 + .elem_offset = offsetof(struct rxe_cq, elem), 63 62 .flags = RXE_POOL_NO_ALLOC, 64 63 .cleanup = rxe_cq_cleanup, 65 64 }, 66 65 [RXE_TYPE_MR] = { 67 66 .name = "rxe-mr", 68 67 .size = sizeof(struct rxe_mr), 69 - .elem_offset = offsetof(struct rxe_mr, pelem), 68 + .elem_offset = offsetof(struct rxe_mr, elem), 70 69 .cleanup = rxe_mr_cleanup, 71 70 .flags = RXE_POOL_INDEX, 72 71 .min_index = RXE_MIN_MR_INDEX, ··· 75 74 [RXE_TYPE_MW] = { 76 75 .name = "rxe-mw", 77 76 .size = sizeof(struct rxe_mw), 78 - .elem_offset = offsetof(struct rxe_mw, pelem), 77 + .elem_offset = offsetof(struct rxe_mw, elem), 79 78 .cleanup = rxe_mw_cleanup, 80 79 .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, 81 80 .min_index = RXE_MIN_MW_INDEX, ··· 84 83 [RXE_TYPE_MC_GRP] = { 85 84 .name = "rxe-mc_grp", 86 85 .size = sizeof(struct rxe_mc_grp), 87 - .elem_offset = offsetof(struct rxe_mc_grp, pelem), 86 + .elem_offset = offsetof(struct rxe_mc_grp, elem), 88 87 .cleanup = rxe_mc_cleanup, 89 88 .flags = RXE_POOL_KEY, 90 89 .key_offset = offsetof(struct rxe_mc_grp, mgid), ··· 93 92 [RXE_TYPE_MC_ELEM] = { 94 93 .name = "rxe-mc_elem", 95 94 .size = sizeof(struct rxe_mc_elem), 96 - .elem_offset = offsetof(struct rxe_mc_elem, pelem), 95 + .elem_offset = offsetof(struct rxe_mc_elem, elem), 97 96 }, 98 97 }; 99 - 100 - static inline const char *pool_name(struct rxe_pool *pool) 101 - { 102 - return rxe_type_info[pool->type].name; 103 - } 104 98 105 99 static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min) 106 100 { ··· 126 130 enum rxe_elem_type type, 127 131 unsigned int max_elem) 128 132 { 133 + const struct rxe_type_info *info = &rxe_type_info[type]; 129 134 int err = 0; 130 - size_t size = rxe_type_info[type].size; 131 135 132 136 memset(pool, 0, sizeof(*pool)); 133 137 134 138 pool->rxe = rxe; 139 + pool->name = info->name; 135 140 pool->type = type; 136 141 pool->max_elem = max_elem; 137 - pool->elem_size = ALIGN(size, RXE_POOL_ALIGN); 138 - pool->flags = rxe_type_info[type].flags; 139 - pool->index.tree = RB_ROOT; 140 - pool->key.tree = RB_ROOT; 141 - pool->cleanup = rxe_type_info[type].cleanup; 142 + pool->elem_size = ALIGN(info->size, RXE_POOL_ALIGN); 143 + pool->elem_offset = info->elem_offset; 144 + pool->flags = info->flags; 145 + pool->cleanup = info->cleanup; 142 146 143 147 atomic_set(&pool->num_elem, 0); 144 148 145 149 rwlock_init(&pool->pool_lock); 146 150 147 - if (rxe_type_info[type].flags & RXE_POOL_INDEX) { 148 - err = rxe_pool_init_index(pool, 149 - rxe_type_info[type].max_index, 150 - rxe_type_info[type].min_index); 151 + if (pool->flags & RXE_POOL_INDEX) { 152 + pool->index.tree = RB_ROOT; 153 + err = rxe_pool_init_index(pool, info->max_index, 154 + info->min_index); 151 155 if (err) 152 156 goto out; 153 157 } 154 158 155 - if (rxe_type_info[type].flags & RXE_POOL_KEY) { 156 - pool->key.key_offset = rxe_type_info[type].key_offset; 157 - pool->key.key_size = rxe_type_info[type].key_size; 159 + if (pool->flags & RXE_POOL_KEY) { 160 + pool->key.tree = RB_ROOT; 161 + pool->key.key_offset = info->key_offset; 162 + pool->key.key_size = info->key_size; 158 163 } 159 164 160 165 out: ··· 166 169 { 167 170 if (atomic_read(&pool->num_elem) > 0) 168 171 pr_warn("%s pool destroyed with unfree'd elem\n", 169 - pool_name(pool)); 172 + pool->name); 170 173 171 - bitmap_free(pool->index.table); 174 + if (pool->flags & RXE_POOL_INDEX) 175 + bitmap_free(pool->index.table); 172 176 } 173 177 174 178 static u32 alloc_index(struct rxe_pool *pool) ··· 187 189 return index + pool->index.min_index; 188 190 } 189 191 190 - static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new) 192 + static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_elem *new) 191 193 { 192 194 struct rb_node **link = &pool->index.tree.rb_node; 193 195 struct rb_node *parent = NULL; 194 - struct rxe_pool_entry *elem; 196 + struct rxe_pool_elem *elem; 195 197 196 198 while (*link) { 197 199 parent = *link; 198 - elem = rb_entry(parent, struct rxe_pool_entry, index_node); 200 + elem = rb_entry(parent, struct rxe_pool_elem, index_node); 199 201 200 202 if (elem->index == new->index) { 201 203 pr_warn("element already exists!\n"); ··· 214 216 return 0; 215 217 } 216 218 217 - static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new) 219 + static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_elem *new) 218 220 { 219 221 struct rb_node **link = &pool->key.tree.rb_node; 220 222 struct rb_node *parent = NULL; 221 - struct rxe_pool_entry *elem; 223 + struct rxe_pool_elem *elem; 222 224 int cmp; 223 225 224 226 while (*link) { 225 227 parent = *link; 226 - elem = rb_entry(parent, struct rxe_pool_entry, key_node); 228 + elem = rb_entry(parent, struct rxe_pool_elem, key_node); 227 229 228 230 cmp = memcmp((u8 *)elem + pool->key.key_offset, 229 - (u8 *)new + pool->key.key_offset, pool->key.key_size); 231 + (u8 *)new + pool->key.key_offset, 232 + pool->key.key_size); 230 233 231 234 if (cmp == 0) { 232 235 pr_warn("key already exists!\n"); ··· 246 247 return 0; 247 248 } 248 249 249 - int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key) 250 + int __rxe_add_key_locked(struct rxe_pool_elem *elem, void *key) 250 251 { 251 252 struct rxe_pool *pool = elem->pool; 252 253 int err; ··· 257 258 return err; 258 259 } 259 260 260 - int __rxe_add_key(struct rxe_pool_entry *elem, void *key) 261 + int __rxe_add_key(struct rxe_pool_elem *elem, void *key) 261 262 { 262 263 struct rxe_pool *pool = elem->pool; 263 - unsigned long flags; 264 264 int err; 265 265 266 - write_lock_irqsave(&pool->pool_lock, flags); 266 + write_lock_bh(&pool->pool_lock); 267 267 err = __rxe_add_key_locked(elem, key); 268 - write_unlock_irqrestore(&pool->pool_lock, flags); 268 + write_unlock_bh(&pool->pool_lock); 269 269 270 270 return err; 271 271 } 272 272 273 - void __rxe_drop_key_locked(struct rxe_pool_entry *elem) 273 + void __rxe_drop_key_locked(struct rxe_pool_elem *elem) 274 274 { 275 275 struct rxe_pool *pool = elem->pool; 276 276 277 277 rb_erase(&elem->key_node, &pool->key.tree); 278 278 } 279 279 280 - void __rxe_drop_key(struct rxe_pool_entry *elem) 280 + void __rxe_drop_key(struct rxe_pool_elem *elem) 281 281 { 282 282 struct rxe_pool *pool = elem->pool; 283 - unsigned long flags; 284 283 285 - write_lock_irqsave(&pool->pool_lock, flags); 284 + write_lock_bh(&pool->pool_lock); 286 285 __rxe_drop_key_locked(elem); 287 - write_unlock_irqrestore(&pool->pool_lock, flags); 286 + write_unlock_bh(&pool->pool_lock); 288 287 } 289 288 290 - int __rxe_add_index_locked(struct rxe_pool_entry *elem) 289 + int __rxe_add_index_locked(struct rxe_pool_elem *elem) 291 290 { 292 291 struct rxe_pool *pool = elem->pool; 293 292 int err; ··· 296 299 return err; 297 300 } 298 301 299 - int __rxe_add_index(struct rxe_pool_entry *elem) 302 + int __rxe_add_index(struct rxe_pool_elem *elem) 300 303 { 301 304 struct rxe_pool *pool = elem->pool; 302 - unsigned long flags; 303 305 int err; 304 306 305 - write_lock_irqsave(&pool->pool_lock, flags); 307 + write_lock_bh(&pool->pool_lock); 306 308 err = __rxe_add_index_locked(elem); 307 - write_unlock_irqrestore(&pool->pool_lock, flags); 309 + write_unlock_bh(&pool->pool_lock); 308 310 309 311 return err; 310 312 } 311 313 312 - void __rxe_drop_index_locked(struct rxe_pool_entry *elem) 314 + void __rxe_drop_index_locked(struct rxe_pool_elem *elem) 313 315 { 314 316 struct rxe_pool *pool = elem->pool; 315 317 ··· 316 320 rb_erase(&elem->index_node, &pool->index.tree); 317 321 } 318 322 319 - void __rxe_drop_index(struct rxe_pool_entry *elem) 323 + void __rxe_drop_index(struct rxe_pool_elem *elem) 320 324 { 321 325 struct rxe_pool *pool = elem->pool; 322 - unsigned long flags; 323 326 324 - write_lock_irqsave(&pool->pool_lock, flags); 327 + write_lock_bh(&pool->pool_lock); 325 328 __rxe_drop_index_locked(elem); 326 - write_unlock_irqrestore(&pool->pool_lock, flags); 329 + write_unlock_bh(&pool->pool_lock); 327 330 } 328 331 329 332 void *rxe_alloc_locked(struct rxe_pool *pool) 330 333 { 331 - const struct rxe_type_info *info = &rxe_type_info[pool->type]; 332 - struct rxe_pool_entry *elem; 333 - u8 *obj; 334 + struct rxe_pool_elem *elem; 335 + void *obj; 334 336 335 337 if (atomic_inc_return(&pool->num_elem) > pool->max_elem) 336 338 goto out_cnt; 337 339 338 - obj = kzalloc(info->size, GFP_ATOMIC); 340 + obj = kzalloc(pool->elem_size, GFP_ATOMIC); 339 341 if (!obj) 340 342 goto out_cnt; 341 343 342 - elem = (struct rxe_pool_entry *)(obj + info->elem_offset); 344 + elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset); 343 345 344 346 elem->pool = pool; 347 + elem->obj = obj; 345 348 kref_init(&elem->ref_cnt); 346 349 347 350 return obj; ··· 352 357 353 358 void *rxe_alloc(struct rxe_pool *pool) 354 359 { 355 - const struct rxe_type_info *info = &rxe_type_info[pool->type]; 356 - struct rxe_pool_entry *elem; 357 - u8 *obj; 360 + struct rxe_pool_elem *elem; 361 + void *obj; 358 362 359 363 if (atomic_inc_return(&pool->num_elem) > pool->max_elem) 360 364 goto out_cnt; 361 365 362 - obj = kzalloc(info->size, GFP_KERNEL); 366 + obj = kzalloc(pool->elem_size, GFP_KERNEL); 363 367 if (!obj) 364 368 goto out_cnt; 365 369 366 - elem = (struct rxe_pool_entry *)(obj + info->elem_offset); 370 + elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset); 367 371 368 372 elem->pool = pool; 373 + elem->obj = obj; 369 374 kref_init(&elem->ref_cnt); 370 375 371 376 return obj; ··· 375 380 return NULL; 376 381 } 377 382 378 - int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem) 383 + int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem) 379 384 { 380 385 if (atomic_inc_return(&pool->num_elem) > pool->max_elem) 381 386 goto out_cnt; 382 387 383 388 elem->pool = pool; 389 + elem->obj = (u8 *)elem - pool->elem_offset; 384 390 kref_init(&elem->ref_cnt); 385 391 386 392 return 0; ··· 393 397 394 398 void rxe_elem_release(struct kref *kref) 395 399 { 396 - struct rxe_pool_entry *elem = 397 - container_of(kref, struct rxe_pool_entry, ref_cnt); 400 + struct rxe_pool_elem *elem = 401 + container_of(kref, struct rxe_pool_elem, ref_cnt); 398 402 struct rxe_pool *pool = elem->pool; 399 - const struct rxe_type_info *info = &rxe_type_info[pool->type]; 400 - u8 *obj; 403 + void *obj; 401 404 402 405 if (pool->cleanup) 403 406 pool->cleanup(elem); 404 407 405 408 if (!(pool->flags & RXE_POOL_NO_ALLOC)) { 406 - obj = (u8 *)elem - info->elem_offset; 409 + obj = elem->obj; 407 410 kfree(obj); 408 411 } 409 412 ··· 411 416 412 417 void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index) 413 418 { 414 - const struct rxe_type_info *info = &rxe_type_info[pool->type]; 415 419 struct rb_node *node; 416 - struct rxe_pool_entry *elem; 417 - u8 *obj; 420 + struct rxe_pool_elem *elem; 421 + void *obj; 418 422 419 423 node = pool->index.tree.rb_node; 420 424 421 425 while (node) { 422 - elem = rb_entry(node, struct rxe_pool_entry, index_node); 426 + elem = rb_entry(node, struct rxe_pool_elem, index_node); 423 427 424 428 if (elem->index > index) 425 429 node = node->rb_left; ··· 430 436 431 437 if (node) { 432 438 kref_get(&elem->ref_cnt); 433 - obj = (u8 *)elem - info->elem_offset; 439 + obj = elem->obj; 434 440 } else { 435 441 obj = NULL; 436 442 } ··· 440 446 441 447 void *rxe_pool_get_index(struct rxe_pool *pool, u32 index) 442 448 { 443 - u8 *obj; 444 - unsigned long flags; 449 + void *obj; 445 450 446 - read_lock_irqsave(&pool->pool_lock, flags); 451 + read_lock_bh(&pool->pool_lock); 447 452 obj = rxe_pool_get_index_locked(pool, index); 448 - read_unlock_irqrestore(&pool->pool_lock, flags); 453 + read_unlock_bh(&pool->pool_lock); 449 454 450 455 return obj; 451 456 } 452 457 453 458 void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key) 454 459 { 455 - const struct rxe_type_info *info = &rxe_type_info[pool->type]; 456 460 struct rb_node *node; 457 - struct rxe_pool_entry *elem; 458 - u8 *obj; 461 + struct rxe_pool_elem *elem; 462 + void *obj; 459 463 int cmp; 460 464 461 465 node = pool->key.tree.rb_node; 462 466 463 467 while (node) { 464 - elem = rb_entry(node, struct rxe_pool_entry, key_node); 468 + elem = rb_entry(node, struct rxe_pool_elem, key_node); 465 469 466 470 cmp = memcmp((u8 *)elem + pool->key.key_offset, 467 471 key, pool->key.key_size); ··· 474 482 475 483 if (node) { 476 484 kref_get(&elem->ref_cnt); 477 - obj = (u8 *)elem - info->elem_offset; 485 + obj = elem->obj; 478 486 } else { 479 487 obj = NULL; 480 488 } ··· 484 492 485 493 void *rxe_pool_get_key(struct rxe_pool *pool, void *key) 486 494 { 487 - u8 *obj; 488 - unsigned long flags; 495 + void *obj; 489 496 490 - read_lock_irqsave(&pool->pool_lock, flags); 497 + read_lock_bh(&pool->pool_lock); 491 498 obj = rxe_pool_get_key_locked(pool, key); 492 - read_unlock_irqrestore(&pool->pool_lock, flags); 499 + read_unlock_bh(&pool->pool_lock); 493 500 494 501 return obj; 495 502 }
+26 -28
drivers/infiniband/sw/rxe/rxe_pool.h
··· 7 7 #ifndef RXE_POOL_H 8 8 #define RXE_POOL_H 9 9 10 - #define RXE_POOL_ALIGN (16) 11 - #define RXE_POOL_CACHE_FLAGS (0) 12 - 13 10 enum rxe_pool_flags { 14 11 RXE_POOL_INDEX = BIT(1), 15 12 RXE_POOL_KEY = BIT(2), ··· 27 30 RXE_NUM_TYPES, /* keep me last */ 28 31 }; 29 32 30 - struct rxe_pool_entry; 31 - 32 - struct rxe_pool_entry { 33 + struct rxe_pool_elem { 33 34 struct rxe_pool *pool; 35 + void *obj; 34 36 struct kref ref_cnt; 35 37 struct list_head list; 36 38 ··· 43 47 44 48 struct rxe_pool { 45 49 struct rxe_dev *rxe; 50 + const char *name; 46 51 rwlock_t pool_lock; /* protects pool add/del/search */ 47 - size_t elem_size; 48 - void (*cleanup)(struct rxe_pool_entry *obj); 52 + void (*cleanup)(struct rxe_pool_elem *obj); 49 53 enum rxe_pool_flags flags; 50 54 enum rxe_elem_type type; 51 55 52 56 unsigned int max_elem; 53 57 atomic_t num_elem; 58 + size_t elem_size; 59 + size_t elem_offset; 54 60 55 61 /* only used if indexed */ 56 62 struct { ··· 87 89 void *rxe_alloc(struct rxe_pool *pool); 88 90 89 91 /* connect already allocated object to pool */ 90 - int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem); 92 + int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem); 91 93 92 - #define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->pelem) 94 + #define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->elem) 93 95 94 96 /* assign an index to an indexed object and insert object into 95 97 * pool's rb tree holding and not holding the pool_lock 96 98 */ 97 - int __rxe_add_index_locked(struct rxe_pool_entry *elem); 99 + int __rxe_add_index_locked(struct rxe_pool_elem *elem); 98 100 99 - #define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->pelem) 101 + #define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->elem) 100 102 101 - int __rxe_add_index(struct rxe_pool_entry *elem); 103 + int __rxe_add_index(struct rxe_pool_elem *elem); 102 104 103 - #define rxe_add_index(obj) __rxe_add_index(&(obj)->pelem) 105 + #define rxe_add_index(obj) __rxe_add_index(&(obj)->elem) 104 106 105 107 /* drop an index and remove object from rb tree 106 108 * holding and not holding the pool_lock 107 109 */ 108 - void __rxe_drop_index_locked(struct rxe_pool_entry *elem); 110 + void __rxe_drop_index_locked(struct rxe_pool_elem *elem); 109 111 110 - #define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->pelem) 112 + #define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->elem) 111 113 112 - void __rxe_drop_index(struct rxe_pool_entry *elem); 114 + void __rxe_drop_index(struct rxe_pool_elem *elem); 113 115 114 - #define rxe_drop_index(obj) __rxe_drop_index(&(obj)->pelem) 116 + #define rxe_drop_index(obj) __rxe_drop_index(&(obj)->elem) 115 117 116 118 /* assign a key to a keyed object and insert object into 117 119 * pool's rb tree holding and not holding pool_lock 118 120 */ 119 - int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key); 121 + int __rxe_add_key_locked(struct rxe_pool_elem *elem, void *key); 120 122 121 - #define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->pelem, key) 123 + #define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->elem, key) 122 124 123 - int __rxe_add_key(struct rxe_pool_entry *elem, void *key); 125 + int __rxe_add_key(struct rxe_pool_elem *elem, void *key); 124 126 125 - #define rxe_add_key(obj, key) __rxe_add_key(&(obj)->pelem, key) 127 + #define rxe_add_key(obj, key) __rxe_add_key(&(obj)->elem, key) 126 128 127 129 /* remove elem from rb tree holding and not holding the pool_lock */ 128 - void __rxe_drop_key_locked(struct rxe_pool_entry *elem); 130 + void __rxe_drop_key_locked(struct rxe_pool_elem *elem); 129 131 130 - #define rxe_drop_key_locked(obj) __rxe_drop_key_locked(&(obj)->pelem) 132 + #define rxe_drop_key_locked(obj) __rxe_drop_key_locked(&(obj)->elem) 131 133 132 - void __rxe_drop_key(struct rxe_pool_entry *elem); 134 + void __rxe_drop_key(struct rxe_pool_elem *elem); 133 135 134 - #define rxe_drop_key(obj) __rxe_drop_key(&(obj)->pelem) 136 + #define rxe_drop_key(obj) __rxe_drop_key(&(obj)->elem) 135 137 136 138 /* lookup an indexed object from index holding and not holding the pool_lock. 137 139 * takes a reference on object ··· 151 153 void rxe_elem_release(struct kref *kref); 152 154 153 155 /* take a reference on an object */ 154 - #define rxe_add_ref(elem) kref_get(&(elem)->pelem.ref_cnt) 156 + #define rxe_add_ref(obj) kref_get(&(obj)->elem.ref_cnt) 155 157 156 158 /* drop a reference on an object */ 157 - #define rxe_drop_ref(elem) kref_put(&(elem)->pelem.ref_cnt, rxe_elem_release) 159 + #define rxe_drop_ref(obj) kref_put(&(obj)->elem.ref_cnt, rxe_elem_release) 158 160 159 161 #endif /* RXE_POOL_H */
+3 -3
drivers/infiniband/sw/rxe/rxe_qp.c
··· 167 167 qp->attr.path_mtu = 1; 168 168 qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu); 169 169 170 - qpn = qp->pelem.index; 170 + qpn = qp->elem.index; 171 171 port = &rxe->port; 172 172 173 173 switch (init->qp_type) { ··· 832 832 } 833 833 834 834 /* called when the last reference to the qp is dropped */ 835 - void rxe_qp_cleanup(struct rxe_pool_entry *arg) 835 + void rxe_qp_cleanup(struct rxe_pool_elem *elem) 836 836 { 837 - struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem); 837 + struct rxe_qp *qp = container_of(elem, typeof(*qp), elem); 838 838 839 839 execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work); 840 840 }
+4 -5
drivers/infiniband/sw/rxe/rxe_queue.c
··· 151 151 struct rxe_queue *new_q; 152 152 unsigned int num_elem = *num_elem_p; 153 153 int err; 154 - unsigned long flags = 0, flags1; 155 154 156 155 new_q = rxe_queue_init(q->rxe, &num_elem, elem_size, q->type); 157 156 if (!new_q) ··· 164 165 goto err1; 165 166 } 166 167 167 - spin_lock_irqsave(consumer_lock, flags1); 168 + spin_lock_bh(consumer_lock); 168 169 169 170 if (producer_lock) { 170 - spin_lock_irqsave(producer_lock, flags); 171 + spin_lock_bh(producer_lock); 171 172 err = resize_finish(q, new_q, num_elem); 172 - spin_unlock_irqrestore(producer_lock, flags); 173 + spin_unlock_bh(producer_lock); 173 174 } else { 174 175 err = resize_finish(q, new_q, num_elem); 175 176 } 176 177 177 - spin_unlock_irqrestore(consumer_lock, flags1); 178 + spin_unlock_bh(consumer_lock); 178 179 179 180 rxe_queue_cleanup(new_q); /* new/old dep on err */ 180 181 if (err)
+5 -11
drivers/infiniband/sw/rxe/rxe_req.c
··· 110 110 static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) 111 111 { 112 112 struct rxe_send_wqe *wqe; 113 - unsigned long flags; 114 113 struct rxe_queue *q = qp->sq.queue; 115 114 unsigned int index = qp->req.wqe_index; 116 115 unsigned int cons; ··· 123 124 /* check to see if we are drained; 124 125 * state_lock used by requester and completer 125 126 */ 126 - spin_lock_irqsave(&qp->state_lock, flags); 127 + spin_lock_bh(&qp->state_lock); 127 128 do { 128 129 if (qp->req.state != QP_STATE_DRAIN) { 129 130 /* comp just finished */ 130 - spin_unlock_irqrestore(&qp->state_lock, 131 - flags); 131 + spin_unlock_bh(&qp->state_lock); 132 132 break; 133 133 } 134 134 135 135 if (wqe && ((index != cons) || 136 136 (wqe->state != wqe_state_posted))) { 137 137 /* comp not done yet */ 138 - spin_unlock_irqrestore(&qp->state_lock, 139 - flags); 138 + spin_unlock_bh(&qp->state_lock); 140 139 break; 141 140 } 142 141 143 142 qp->req.state = QP_STATE_DRAINED; 144 - spin_unlock_irqrestore(&qp->state_lock, flags); 143 + spin_unlock_bh(&qp->state_lock); 145 144 146 145 if (qp->ibqp.event_handler) { 147 146 struct ib_event ev; ··· 369 372 int pad = (-payload) & 0x3; 370 373 int paylen; 371 374 int solicited; 372 - u16 pkey; 373 375 u32 qp_num; 374 376 int ack_req; 375 377 ··· 400 404 (pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) == 401 405 (RXE_WRITE_MASK | RXE_IMMDT_MASK)); 402 406 403 - pkey = IB_DEFAULT_PKEY_FULL; 404 - 405 407 qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn : 406 408 qp->attr.dest_qp_num; 407 409 ··· 408 414 if (ack_req) 409 415 qp->req.noack_pkts = 0; 410 416 411 - bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num, 417 + bth_init(pkt, pkt->opcode, solicited, 0, pad, IB_DEFAULT_PKEY_FULL, qp_num, 412 418 ack_req, pkt->psn); 413 419 414 420 /* init optional headers */
+1 -1
drivers/infiniband/sw/rxe/rxe_srq.c
··· 83 83 srq->ibsrq.event_handler = init->event_handler; 84 84 srq->ibsrq.srq_context = init->srq_context; 85 85 srq->limit = init->attr.srq_limit; 86 - srq->srq_num = srq->pelem.index; 86 + srq->srq_num = srq->elem.index; 87 87 srq->rq.max_wr = init->attr.max_wr; 88 88 srq->rq.max_sge = init->attr.max_sge; 89 89
-119
drivers/infiniband/sw/rxe/rxe_sysfs.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 - /* 3 - * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 4 - * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 5 - */ 6 - 7 - #include "rxe.h" 8 - #include "rxe_net.h" 9 - 10 - /* Copy argument and remove trailing CR. Return the new length. */ 11 - static int sanitize_arg(const char *val, char *intf, int intf_len) 12 - { 13 - int len; 14 - 15 - if (!val) 16 - return 0; 17 - 18 - /* Remove newline. */ 19 - for (len = 0; len < intf_len - 1 && val[len] && val[len] != '\n'; len++) 20 - intf[len] = val[len]; 21 - intf[len] = 0; 22 - 23 - if (len == 0 || (val[len] != 0 && val[len] != '\n')) 24 - return 0; 25 - 26 - return len; 27 - } 28 - 29 - static int rxe_param_set_add(const char *val, const struct kernel_param *kp) 30 - { 31 - int len; 32 - int err = 0; 33 - char intf[32]; 34 - struct net_device *ndev; 35 - struct rxe_dev *exists; 36 - 37 - if (!rxe_initialized) { 38 - pr_err("Module parameters are not supported, use rdma link add or rxe_cfg\n"); 39 - return -EAGAIN; 40 - } 41 - 42 - len = sanitize_arg(val, intf, sizeof(intf)); 43 - if (!len) { 44 - pr_err("add: invalid interface name\n"); 45 - return -EINVAL; 46 - } 47 - 48 - ndev = dev_get_by_name(&init_net, intf); 49 - if (!ndev) { 50 - pr_err("interface %s not found\n", intf); 51 - return -EINVAL; 52 - } 53 - 54 - if (is_vlan_dev(ndev)) { 55 - pr_err("rxe creation allowed on top of a real device only\n"); 56 - err = -EPERM; 57 - goto err; 58 - } 59 - 60 - exists = rxe_get_dev_from_net(ndev); 61 - if (exists) { 62 - ib_device_put(&exists->ib_dev); 63 - pr_err("already configured on %s\n", intf); 64 - err = -EINVAL; 65 - goto err; 66 - } 67 - 68 - err = rxe_net_add("rxe%d", ndev); 69 - if (err) { 70 - pr_err("failed to add %s\n", intf); 71 - goto err; 72 - } 73 - 74 - err: 75 - dev_put(ndev); 76 - return err; 77 - } 78 - 79 - static int rxe_param_set_remove(const char *val, const struct kernel_param *kp) 80 - { 81 - int len; 82 - char intf[32]; 83 - struct ib_device *ib_dev; 84 - 85 - len = sanitize_arg(val, intf, sizeof(intf)); 86 - if (!len) { 87 - pr_err("add: invalid interface name\n"); 88 - return -EINVAL; 89 - } 90 - 91 - if (strncmp("all", intf, len) == 0) { 92 - pr_info("rxe_sys: remove all"); 93 - ib_unregister_driver(RDMA_DRIVER_RXE); 94 - return 0; 95 - } 96 - 97 - ib_dev = ib_device_get_by_name(intf, RDMA_DRIVER_RXE); 98 - if (!ib_dev) { 99 - pr_err("not configured on %s\n", intf); 100 - return -EINVAL; 101 - } 102 - 103 - ib_unregister_device_and_put(ib_dev); 104 - 105 - return 0; 106 - } 107 - 108 - static const struct kernel_param_ops rxe_add_ops = { 109 - .set = rxe_param_set_add, 110 - }; 111 - 112 - static const struct kernel_param_ops rxe_remove_ops = { 113 - .set = rxe_param_set_remove, 114 - }; 115 - 116 - module_param_cb(add, &rxe_add_ops, NULL, 0200); 117 - MODULE_PARM_DESC(add, "DEPRECATED. Create RXE device over network interface"); 118 - module_param_cb(remove, &rxe_remove_ops, NULL, 0200); 119 - MODULE_PARM_DESC(remove, "DEPRECATED. Remove RXE device over network interface");
+8 -10
drivers/infiniband/sw/rxe/rxe_task.c
··· 32 32 { 33 33 int cont; 34 34 int ret; 35 - unsigned long flags; 36 35 struct rxe_task *task = from_tasklet(task, t, tasklet); 37 36 38 - spin_lock_irqsave(&task->state_lock, flags); 37 + spin_lock_bh(&task->state_lock); 39 38 switch (task->state) { 40 39 case TASK_STATE_START: 41 40 task->state = TASK_STATE_BUSY; 42 - spin_unlock_irqrestore(&task->state_lock, flags); 41 + spin_unlock_bh(&task->state_lock); 43 42 break; 44 43 45 44 case TASK_STATE_BUSY: 46 45 task->state = TASK_STATE_ARMED; 47 46 fallthrough; 48 47 case TASK_STATE_ARMED: 49 - spin_unlock_irqrestore(&task->state_lock, flags); 48 + spin_unlock_bh(&task->state_lock); 50 49 return; 51 50 52 51 default: 53 - spin_unlock_irqrestore(&task->state_lock, flags); 52 + spin_unlock_bh(&task->state_lock); 54 53 pr_warn("%s failed with bad state %d\n", __func__, task->state); 55 54 return; 56 55 } ··· 58 59 cont = 0; 59 60 ret = task->func(task->arg); 60 61 61 - spin_lock_irqsave(&task->state_lock, flags); 62 + spin_lock_bh(&task->state_lock); 62 63 switch (task->state) { 63 64 case TASK_STATE_BUSY: 64 65 if (ret) ··· 80 81 pr_warn("%s failed with bad state %d\n", __func__, 81 82 task->state); 82 83 } 83 - spin_unlock_irqrestore(&task->state_lock, flags); 84 + spin_unlock_bh(&task->state_lock); 84 85 } while (cont); 85 86 86 87 task->ret = ret; ··· 105 106 106 107 void rxe_cleanup_task(struct rxe_task *task) 107 108 { 108 - unsigned long flags; 109 109 bool idle; 110 110 111 111 /* ··· 114 116 task->destroyed = true; 115 117 116 118 do { 117 - spin_lock_irqsave(&task->state_lock, flags); 119 + spin_lock_bh(&task->state_lock); 118 120 idle = (task->state == TASK_STATE_START); 119 - spin_unlock_irqrestore(&task->state_lock, flags); 121 + spin_unlock_bh(&task->state_lock); 120 122 } while (!idle); 121 123 122 124 tasklet_kill(&task->tasklet);
+17 -17
drivers/infiniband/sw/rxe/rxe_verbs.c
··· 182 182 183 183 /* create index > 0 */ 184 184 rxe_add_index(ah); 185 - ah->ah_num = ah->pelem.index; 185 + ah->ah_num = ah->elem.index; 186 186 187 187 if (uresp) { 188 188 /* only if new user provider */ ··· 383 383 const struct ib_recv_wr **bad_wr) 384 384 { 385 385 int err = 0; 386 - unsigned long flags; 387 386 struct rxe_srq *srq = to_rsrq(ibsrq); 388 387 389 - spin_lock_irqsave(&srq->rq.producer_lock, flags); 388 + spin_lock_bh(&srq->rq.producer_lock); 390 389 391 390 while (wr) { 392 391 err = post_one_recv(&srq->rq, wr); ··· 394 395 wr = wr->next; 395 396 } 396 397 397 - spin_unlock_irqrestore(&srq->rq.producer_lock, flags); 398 + spin_unlock_bh(&srq->rq.producer_lock); 398 399 399 400 if (err) 400 401 *bad_wr = wr; ··· 467 468 err = rxe_qp_from_attr(qp, attr, mask, udata); 468 469 if (err) 469 470 goto err1; 471 + 472 + if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH)) 473 + qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label, 474 + qp->ibqp.qp_num, 475 + qp->attr.dest_qp_num); 470 476 471 477 return 0; 472 478 ··· 638 634 int err; 639 635 struct rxe_sq *sq = &qp->sq; 640 636 struct rxe_send_wqe *send_wqe; 641 - unsigned long flags; 642 637 int full; 643 638 644 639 err = validate_send_wr(qp, ibwr, mask, length); 645 640 if (err) 646 641 return err; 647 642 648 - spin_lock_irqsave(&qp->sq.sq_lock, flags); 643 + spin_lock_bh(&qp->sq.sq_lock); 649 644 650 645 full = queue_full(sq->queue, QUEUE_TYPE_TO_DRIVER); 651 646 652 647 if (unlikely(full)) { 653 - spin_unlock_irqrestore(&qp->sq.sq_lock, flags); 648 + spin_unlock_bh(&qp->sq.sq_lock); 654 649 return -ENOMEM; 655 650 } 656 651 ··· 658 655 659 656 queue_advance_producer(sq->queue, QUEUE_TYPE_TO_DRIVER); 660 657 661 - spin_unlock_irqrestore(&qp->sq.sq_lock, flags); 658 + spin_unlock_bh(&qp->sq.sq_lock); 662 659 663 660 return 0; 664 661 } ··· 738 735 int err = 0; 739 736 struct rxe_qp *qp = to_rqp(ibqp); 740 737 struct rxe_rq *rq = &qp->rq; 741 - unsigned long flags; 742 738 743 739 if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) { 744 740 *bad_wr = wr; ··· 751 749 goto err1; 752 750 } 753 751 754 - spin_lock_irqsave(&rq->producer_lock, flags); 752 + spin_lock_bh(&rq->producer_lock); 755 753 756 754 while (wr) { 757 755 err = post_one_recv(rq, wr); ··· 762 760 wr = wr->next; 763 761 } 764 762 765 - spin_unlock_irqrestore(&rq->producer_lock, flags); 763 + spin_unlock_bh(&rq->producer_lock); 766 764 767 765 if (qp->resp.state == QP_STATE_ERROR) 768 766 rxe_run_task(&qp->resp.task, 1); ··· 843 841 int i; 844 842 struct rxe_cq *cq = to_rcq(ibcq); 845 843 struct rxe_cqe *cqe; 846 - unsigned long flags; 847 844 848 - spin_lock_irqsave(&cq->cq_lock, flags); 845 + spin_lock_bh(&cq->cq_lock); 849 846 for (i = 0; i < num_entries; i++) { 850 847 cqe = queue_head(cq->queue, QUEUE_TYPE_FROM_DRIVER); 851 848 if (!cqe) ··· 853 852 memcpy(wc++, &cqe->ibwc, sizeof(*wc)); 854 853 queue_advance_consumer(cq->queue, QUEUE_TYPE_FROM_DRIVER); 855 854 } 856 - spin_unlock_irqrestore(&cq->cq_lock, flags); 855 + spin_unlock_bh(&cq->cq_lock); 857 856 858 857 return i; 859 858 } ··· 871 870 static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) 872 871 { 873 872 struct rxe_cq *cq = to_rcq(ibcq); 874 - unsigned long irq_flags; 875 873 int ret = 0; 876 874 int empty; 877 875 878 - spin_lock_irqsave(&cq->cq_lock, irq_flags); 876 + spin_lock_bh(&cq->cq_lock); 879 877 if (cq->notify != IB_CQ_NEXT_COMP) 880 878 cq->notify = flags & IB_CQ_SOLICITED_MASK; 881 879 ··· 883 883 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty) 884 884 ret = 1; 885 885 886 - spin_unlock_irqrestore(&cq->cq_lock, irq_flags); 886 + spin_unlock_bh(&cq->cq_lock); 887 887 888 888 return ret; 889 889 }
+11 -13
drivers/infiniband/sw/rxe/rxe_verbs.h
··· 35 35 36 36 struct rxe_ucontext { 37 37 struct ib_ucontext ibuc; 38 - struct rxe_pool_entry pelem; 38 + struct rxe_pool_elem elem; 39 39 }; 40 40 41 41 struct rxe_pd { 42 42 struct ib_pd ibpd; 43 - struct rxe_pool_entry pelem; 43 + struct rxe_pool_elem elem; 44 44 }; 45 45 46 46 struct rxe_ah { 47 47 struct ib_ah ibah; 48 - struct rxe_pool_entry pelem; 48 + struct rxe_pool_elem elem; 49 49 struct rxe_av av; 50 50 bool is_user; 51 51 int ah_num; ··· 60 60 61 61 struct rxe_cq { 62 62 struct ib_cq ibcq; 63 - struct rxe_pool_entry pelem; 63 + struct rxe_pool_elem elem; 64 64 struct rxe_queue *queue; 65 65 spinlock_t cq_lock; 66 66 u8 notify; ··· 95 95 96 96 struct rxe_srq { 97 97 struct ib_srq ibsrq; 98 - struct rxe_pool_entry pelem; 98 + struct rxe_pool_elem elem; 99 99 struct rxe_pd *pd; 100 100 struct rxe_rq rq; 101 101 u32 srq_num; ··· 209 209 210 210 struct rxe_qp { 211 211 struct ib_qp ibqp; 212 - struct rxe_pool_entry pelem; 212 + struct rxe_pool_elem elem; 213 213 struct ib_qp_attr attr; 214 214 unsigned int valid; 215 215 unsigned int mtu; ··· 309 309 } 310 310 311 311 struct rxe_mr { 312 - struct rxe_pool_entry pelem; 312 + struct rxe_pool_elem elem; 313 313 struct ib_mr ibmr; 314 314 315 315 struct ib_umem *umem; ··· 342 342 343 343 struct rxe_mw { 344 344 struct ib_mw ibmw; 345 - struct rxe_pool_entry pelem; 345 + struct rxe_pool_elem elem; 346 346 spinlock_t lock; 347 347 enum rxe_mw_state state; 348 348 struct rxe_qp *qp; /* Type 2 only */ ··· 354 354 }; 355 355 356 356 struct rxe_mc_grp { 357 - struct rxe_pool_entry pelem; 357 + struct rxe_pool_elem elem; 358 358 spinlock_t mcg_lock; /* guard group */ 359 359 struct rxe_dev *rxe; 360 360 struct list_head qp_list; ··· 365 365 }; 366 366 367 367 struct rxe_mc_elem { 368 - struct rxe_pool_entry pelem; 368 + struct rxe_pool_elem elem; 369 369 struct list_head qp_list; 370 370 struct list_head grp_list; 371 371 struct rxe_qp *qp; ··· 391 391 struct mutex usdev_lock; 392 392 393 393 struct net_device *ndev; 394 - 395 - int xmit_errors; 396 394 397 395 struct rxe_pool uc_pool; 398 396 struct rxe_pool pd_pool; ··· 482 484 483 485 int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name); 484 486 485 - void rxe_mc_cleanup(struct rxe_pool_entry *arg); 487 + void rxe_mc_cleanup(struct rxe_pool_elem *elem); 486 488 487 489 #endif /* RXE_VERBS_H */
+4 -2
drivers/infiniband/sw/siw/siw_verbs.c
··· 8 8 #include <linux/uaccess.h> 9 9 #include <linux/vmalloc.h> 10 10 #include <linux/xarray.h> 11 + #include <net/addrconf.h> 11 12 12 13 #include <rdma/iw_cm.h> 13 14 #include <rdma/ib_verbs.h> ··· 156 155 attr->vendor_id = SIW_VENDOR_ID; 157 156 attr->vendor_part_id = sdev->vendor_part_id; 158 157 159 - memcpy(&attr->sys_image_guid, sdev->netdev->dev_addr, 6); 158 + addrconf_addr_eui48((u8 *)&attr->sys_image_guid, 159 + sdev->netdev->dev_addr); 160 160 161 161 return 0; 162 162 } ··· 662 660 kbuf += core_sge->length; 663 661 core_sge++; 664 662 } 665 - sqe->sge[0].length = bytes > 0 ? bytes : 0; 663 + sqe->sge[0].length = max(bytes, 0); 666 664 sqe->num_sge = bytes > 0 ? 1 : 0; 667 665 668 666 return bytes;
+28 -48
drivers/infiniband/ulp/iser/iscsi_iser.c
··· 113 113 module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO); 114 114 MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)"); 115 115 116 - int iser_pi_guard; 117 - module_param_named(pi_guard, iser_pi_guard, int, S_IRUGO); 118 - MODULE_PARM_DESC(pi_guard, "T10-PI guard_type [deprecated]"); 119 - 120 116 static int iscsi_iser_set(const char *val, const struct kernel_param *kp) 121 117 { 122 118 int ret; ··· 135 139 * Notes: In case of data length errors or iscsi PDU completion failures 136 140 * this routine will signal iscsi layer of connection failure. 137 141 */ 138 - void 139 - iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 140 - char *rx_data, int rx_data_len) 142 + void iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 143 + char *rx_data, int rx_data_len) 141 144 { 142 145 int rc = 0; 143 146 int datalen; ··· 171 176 * Netes: This routine can't fail, just assign iscsi task 172 177 * hdr and max hdr size. 173 178 */ 174 - static int 175 - iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode) 179 + static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode) 176 180 { 177 181 struct iscsi_iser_task *iser_task = task->dd_data; 178 182 ··· 192 198 * state mutex to avoid dereferencing the IB device which 193 199 * may have already been terminated. 194 200 */ 195 - int 196 - iser_initialize_task_headers(struct iscsi_task *task, 197 - struct iser_tx_desc *tx_desc) 201 + int iser_initialize_task_headers(struct iscsi_task *task, 202 + struct iser_tx_desc *tx_desc) 198 203 { 199 204 struct iser_conn *iser_conn = task->conn->dd_data; 200 205 struct iser_device *device = iser_conn->ib_conn.device; ··· 230 237 * Return: Returns zero on success or -ENOMEM when failing 231 238 * to init task headers (dma mapping error). 232 239 */ 233 - static int 234 - iscsi_iser_task_init(struct iscsi_task *task) 240 + static int iscsi_iser_task_init(struct iscsi_task *task) 235 241 { 236 242 struct iscsi_iser_task *iser_task = task->dd_data; 237 243 int ret; ··· 264 272 * xmit. 265 273 * 266 274 **/ 267 - static int 268 - iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) 275 + static int iscsi_iser_mtask_xmit(struct iscsi_conn *conn, 276 + struct iscsi_task *task) 269 277 { 270 278 int error = 0; 271 279 ··· 282 290 return error; 283 291 } 284 292 285 - static int 286 - iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn, 287 - struct iscsi_task *task) 293 + static int iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn, 294 + struct iscsi_task *task) 288 295 { 289 296 struct iscsi_r2t_info *r2t = &task->unsol_r2t; 290 297 struct iscsi_data hdr; ··· 317 326 * 318 327 * Return: zero on success or escalates $error on failure. 319 328 */ 320 - static int 321 - iscsi_iser_task_xmit(struct iscsi_task *task) 329 + static int iscsi_iser_task_xmit(struct iscsi_task *task) 322 330 { 323 331 struct iscsi_conn *conn = task->conn; 324 332 struct iscsi_iser_task *iser_task = task->dd_data; ··· 400 410 * 401 411 * In addition the error sector is marked. 402 412 */ 403 - static u8 404 - iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector) 413 + static u8 iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector) 405 414 { 406 415 struct iscsi_iser_task *iser_task = task->dd_data; 407 416 enum iser_data_dir dir = iser_task->dir[ISER_DIR_IN] ? ··· 449 460 * -EINVAL in case end-point doesn't exsits anymore or iser connection 450 461 * state is not UP (teardown already started). 451 462 */ 452 - static int 453 - iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, 454 - struct iscsi_cls_conn *cls_conn, 455 - uint64_t transport_eph, 456 - int is_leading) 463 + static int iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, 464 + struct iscsi_cls_conn *cls_conn, 465 + uint64_t transport_eph, int is_leading) 457 466 { 458 467 struct iscsi_conn *conn = cls_conn->dd_data; 459 468 struct iser_conn *iser_conn; ··· 506 519 * from this point iscsi must call conn_stop in session/connection 507 520 * teardown so iser transport must wait for it. 508 521 */ 509 - static int 510 - iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) 522 + static int iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) 511 523 { 512 524 struct iscsi_conn *iscsi_conn; 513 525 struct iser_conn *iser_conn; ··· 528 542 * handle, so we call it under iser the state lock to protect against 529 543 * this kind of race. 530 544 */ 531 - static void 532 - iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) 545 + static void iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) 533 546 { 534 547 struct iscsi_conn *conn = cls_conn->dd_data; 535 548 struct iser_conn *iser_conn = conn->dd_data; ··· 563 578 * 564 579 * Removes and free iscsi host. 565 580 */ 566 - static void 567 - iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) 581 + static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) 568 582 { 569 583 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 570 584 ··· 572 588 iscsi_host_free(shost); 573 589 } 574 590 575 - static inline unsigned int 576 - iser_dif_prot_caps(int prot_caps) 591 + static inline unsigned int iser_dif_prot_caps(int prot_caps) 577 592 { 578 593 int ret = 0; 579 594 ··· 691 708 return NULL; 692 709 } 693 710 694 - static int 695 - iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn, 696 - enum iscsi_param param, char *buf, int buflen) 711 + static int iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn, 712 + enum iscsi_param param, char *buf, int buflen) 697 713 { 698 714 int value; 699 715 ··· 742 760 * 743 761 * Output connection statistics. 744 762 */ 745 - static void 746 - iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) 763 + static void iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, 764 + struct iscsi_stats *stats) 747 765 { 748 766 struct iscsi_conn *conn = cls_conn->dd_data; 749 767 ··· 794 812 * Return: iscsi_endpoint created by iscsi layer or ERR_PTR(error) 795 813 * if fails. 796 814 */ 797 - static struct iscsi_endpoint * 798 - iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, 799 - int non_blocking) 815 + static struct iscsi_endpoint *iscsi_iser_ep_connect(struct Scsi_Host *shost, 816 + struct sockaddr *dst_addr, 817 + int non_blocking) 800 818 { 801 819 int err; 802 820 struct iser_conn *iser_conn; ··· 839 857 * or more likely iser connection state transitioned to TEMINATING or 840 858 * DOWN during the wait period. 841 859 */ 842 - static int 843 - iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) 860 + static int iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) 844 861 { 845 862 struct iser_conn *iser_conn = ep->dd_data; 846 863 int rc; ··· 874 893 * and cleanup or actually call it immediately in case we didn't pass 875 894 * iscsi conn bind/start stage, thus it is safe. 876 895 */ 877 - static void 878 - iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) 896 + static void iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) 879 897 { 880 898 struct iser_conn *iser_conn = ep->dd_data; 881 899
+3 -20
drivers/infiniband/ulp/iser/iscsi_iser.h
··· 119 119 120 120 #define ISER_QP_MAX_RECV_DTOS (ISER_DEF_XMIT_CMDS_MAX) 121 121 122 - #define ISER_MIN_POSTED_RX (ISER_DEF_XMIT_CMDS_MAX >> 2) 123 - 124 122 /* the max TX (send) WR supported by the iSER QP is defined by * 125 123 * max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect * 126 124 * to have at max for SCSI command. The tx posting & completion handling code * ··· 145 147 - ISER_MAX_TX_MISC_PDUS \ 146 148 - ISER_MAX_RX_MISC_PDUS) / \ 147 149 (1 + ISER_INFLIGHT_DATAOUTS)) 148 - 149 - #define ISER_SIGNAL_CMD_COUNT 32 150 150 151 151 /* Constant PDU lengths calculations */ 152 152 #define ISER_HEADERS_LEN (sizeof(struct iser_ctrl) + sizeof(struct iscsi_hdr)) ··· 362 366 * @qp: Connection Queue-pair 363 367 * @cq: Connection completion queue 364 368 * @cq_size: The number of max outstanding completions 365 - * @post_recv_buf_count: post receive counter 366 - * @sig_count: send work request signal count 367 - * @rx_wr: receive work request for batch posts 368 369 * @device: reference to iser device 369 370 * @fr_pool: connection fast registration poool 370 371 * @pi_support: Indicate device T10-PI support ··· 372 379 struct ib_qp *qp; 373 380 struct ib_cq *cq; 374 381 u32 cq_size; 375 - int post_recv_buf_count; 376 - u8 sig_count; 377 - struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX]; 378 382 struct iser_device *device; 379 383 struct iser_fr_pool fr_pool; 380 384 bool pi_support; ··· 387 397 * @state: connection logical state 388 398 * @qp_max_recv_dtos: maximum number of data outs, corresponds 389 399 * to max number of post recvs 390 - * @qp_max_recv_dtos_mask: (qp_max_recv_dtos - 1) 391 - * @min_posted_rx: (qp_max_recv_dtos >> 2) 392 400 * @max_cmds: maximum cmds allowed for this connection 393 401 * @name: connection peer portal 394 402 * @release_work: deffered work for release job ··· 397 409 * (state is ISER_CONN_UP) 398 410 * @conn_list: entry in ig conn list 399 411 * @login_desc: login descriptor 400 - * @rx_desc_head: head of rx_descs cyclic buffer 401 412 * @rx_descs: rx buffers array (cyclic buffer) 402 413 * @num_rx_descs: number of rx descriptors 403 414 * @scsi_sg_tablesize: scsi host sg_tablesize ··· 409 422 struct iscsi_endpoint *ep; 410 423 enum iser_conn_state state; 411 424 unsigned qp_max_recv_dtos; 412 - unsigned qp_max_recv_dtos_mask; 413 - unsigned min_posted_rx; 414 425 u16 max_cmds; 415 426 char name[ISER_OBJECT_NAME_SIZE]; 416 427 struct work_struct release_work; ··· 418 433 struct completion up_completion; 419 434 struct list_head conn_list; 420 435 struct iser_login_desc login_desc; 421 - unsigned int rx_desc_head; 422 436 struct iser_rx_desc *rx_descs; 423 437 u32 num_rx_descs; 424 438 unsigned short scsi_sg_tablesize; ··· 470 486 extern struct iser_global ig; 471 487 extern int iser_debug_level; 472 488 extern bool iser_pi_enable; 473 - extern int iser_pi_guard; 474 489 extern unsigned int iser_max_sectors; 475 490 extern bool iser_always_reg; 476 491 ··· 526 543 int non_blocking); 527 544 528 545 int iser_post_recvl(struct iser_conn *iser_conn); 529 - int iser_post_recvm(struct iser_conn *iser_conn, int count); 530 - int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc, 531 - bool signal); 546 + int iser_post_recvm(struct iser_conn *iser_conn, 547 + struct iser_rx_desc *rx_desc); 548 + int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc); 532 549 533 550 int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, 534 551 struct iser_data_buf *data,
+41 -65
drivers/infiniband/ulp/iser/iser_initiator.c
··· 95 95 * task->data[ISER_DIR_OUT].data_len, Protection size 96 96 * is stored at task->prot[ISER_DIR_OUT].data_len 97 97 */ 98 - static int 99 - iser_prepare_write_cmd(struct iscsi_task *task, 100 - unsigned int imm_sz, 101 - unsigned int unsol_sz, 102 - unsigned int edtl) 98 + static int iser_prepare_write_cmd(struct iscsi_task *task, unsigned int imm_sz, 99 + unsigned int unsol_sz, unsigned int edtl) 103 100 { 104 101 struct iscsi_iser_task *iser_task = task->dd_data; 105 102 struct iser_mem_reg *mem_reg; ··· 157 160 } 158 161 159 162 /* creates a new tx descriptor and adds header regd buffer */ 160 - static void iser_create_send_desc(struct iser_conn *iser_conn, 161 - struct iser_tx_desc *tx_desc) 163 + static void iser_create_send_desc(struct iser_conn *iser_conn, 164 + struct iser_tx_desc *tx_desc) 162 165 { 163 166 struct iser_device *device = iser_conn->ib_conn.device; 164 167 ··· 244 247 struct iser_device *device = ib_conn->device; 245 248 246 249 iser_conn->qp_max_recv_dtos = session->cmds_max; 247 - iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */ 248 - iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2; 249 250 250 251 if (iser_alloc_fastreg_pool(ib_conn, session->scsi_cmds_max, 251 252 iser_conn->pages_per_mr)) ··· 275 280 rx_sg->lkey = device->pd->local_dma_lkey; 276 281 } 277 282 278 - iser_conn->rx_desc_head = 0; 279 283 return 0; 280 284 281 285 rx_desc_dma_map_failed: ··· 316 322 static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) 317 323 { 318 324 struct iser_conn *iser_conn = conn->dd_data; 319 - struct ib_conn *ib_conn = &iser_conn->ib_conn; 320 325 struct iscsi_session *session = conn->session; 326 + int err = 0; 327 + int i; 321 328 322 329 iser_dbg("req op %x flags %x\n", req->opcode, req->flags); 323 330 /* check if this is the last login - going to full feature phase */ 324 331 if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE) 325 - return 0; 326 - 327 - /* 328 - * Check that there is one posted recv buffer 329 - * (for the last login response). 330 - */ 331 - WARN_ON(ib_conn->post_recv_buf_count != 1); 332 + goto out; 332 333 333 334 if (session->discovery_sess) { 334 335 iser_info("Discovery session, re-using login RX buffer\n"); 335 - return 0; 336 - } else 337 - iser_info("Normal session, posting batch of RX %d buffers\n", 338 - iser_conn->min_posted_rx); 336 + goto out; 337 + } 339 338 340 - /* Initial post receive buffers */ 341 - if (iser_post_recvm(iser_conn, iser_conn->min_posted_rx)) 342 - return -ENOMEM; 339 + iser_info("Normal session, posting batch of RX %d buffers\n", 340 + iser_conn->qp_max_recv_dtos - 1); 343 341 344 - return 0; 345 - } 346 - 347 - static inline bool iser_signal_comp(u8 sig_count) 348 - { 349 - return ((sig_count % ISER_SIGNAL_CMD_COUNT) == 0); 342 + /* 343 + * Initial post receive buffers. 344 + * There is one already posted recv buffer (for the last login 345 + * response). Therefore, the first recv buffer is skipped here. 346 + */ 347 + for (i = 1; i < iser_conn->qp_max_recv_dtos; i++) { 348 + err = iser_post_recvm(iser_conn, &iser_conn->rx_descs[i]); 349 + if (err) 350 + goto out; 351 + } 352 + out: 353 + return err; 350 354 } 351 355 352 356 /** ··· 352 360 * @conn: link to matching iscsi connection 353 361 * @task: SCSI command task 354 362 */ 355 - int iser_send_command(struct iscsi_conn *conn, 356 - struct iscsi_task *task) 363 + int iser_send_command(struct iscsi_conn *conn, struct iscsi_task *task) 357 364 { 358 365 struct iser_conn *iser_conn = conn->dd_data; 359 366 struct iscsi_iser_task *iser_task = task->dd_data; ··· 362 371 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; 363 372 struct scsi_cmnd *sc = task->sc; 364 373 struct iser_tx_desc *tx_desc = &iser_task->desc; 365 - u8 sig_count = ++iser_conn->ib_conn.sig_count; 366 374 367 375 edtl = ntohl(hdr->data_length); 368 376 ··· 408 418 409 419 iser_task->status = ISER_TASK_STATUS_STARTED; 410 420 411 - err = iser_post_send(&iser_conn->ib_conn, tx_desc, 412 - iser_signal_comp(sig_count)); 421 + err = iser_post_send(&iser_conn->ib_conn, tx_desc); 413 422 if (!err) 414 423 return 0; 415 424 ··· 423 434 * @task: SCSI command task 424 435 * @hdr: pointer to the LLD's iSCSI message header 425 436 */ 426 - int iser_send_data_out(struct iscsi_conn *conn, 427 - struct iscsi_task *task, 437 + int iser_send_data_out(struct iscsi_conn *conn, struct iscsi_task *task, 428 438 struct iscsi_data *hdr) 429 439 { 430 440 struct iser_conn *iser_conn = conn->dd_data; ··· 475 487 itt, buf_offset, data_seg_len); 476 488 477 489 478 - err = iser_post_send(&iser_conn->ib_conn, tx_desc, true); 490 + err = iser_post_send(&iser_conn->ib_conn, tx_desc); 479 491 if (!err) 480 492 return 0; 481 493 ··· 485 497 return err; 486 498 } 487 499 488 - int iser_send_control(struct iscsi_conn *conn, 489 - struct iscsi_task *task) 500 + int iser_send_control(struct iscsi_conn *conn, struct iscsi_task *task) 490 501 { 491 502 struct iser_conn *iser_conn = conn->dd_data; 492 503 struct iscsi_iser_task *iser_task = task->dd_data; ··· 537 550 goto send_control_error; 538 551 } 539 552 540 - err = iser_post_send(&iser_conn->ib_conn, mdesc, true); 553 + err = iser_post_send(&iser_conn->ib_conn, mdesc); 541 554 if (!err) 542 555 return 0; 543 556 ··· 577 590 desc->rsp_dma, ISER_RX_LOGIN_SIZE, 578 591 DMA_FROM_DEVICE); 579 592 580 - ib_conn->post_recv_buf_count--; 593 + if (iser_conn->iscsi_conn->session->discovery_sess) 594 + return; 595 + 596 + /* Post the first RX buffer that is skipped in iser_post_rx_bufs() */ 597 + iser_post_recvm(iser_conn, iser_conn->rx_descs); 581 598 } 582 599 583 - static inline int 584 - iser_inv_desc(struct iser_fr_desc *desc, u32 rkey) 600 + static inline int iser_inv_desc(struct iser_fr_desc *desc, u32 rkey) 585 601 { 586 602 if (unlikely((!desc->sig_protected && rkey != desc->rsc.mr->rkey) || 587 603 (desc->sig_protected && rkey != desc->rsc.sig_mr->rkey))) { ··· 597 607 return 0; 598 608 } 599 609 600 - static int 601 - iser_check_remote_inv(struct iser_conn *iser_conn, 602 - struct ib_wc *wc, 603 - struct iscsi_hdr *hdr) 610 + static int iser_check_remote_inv(struct iser_conn *iser_conn, struct ib_wc *wc, 611 + struct iscsi_hdr *hdr) 604 612 { 605 613 if (wc->wc_flags & IB_WC_WITH_INVALIDATE) { 606 614 struct iscsi_task *task; ··· 645 657 struct iser_conn *iser_conn = to_iser_conn(ib_conn); 646 658 struct iser_rx_desc *desc = iser_rx(wc->wr_cqe); 647 659 struct iscsi_hdr *hdr; 648 - int length; 649 - int outstanding, count, err; 660 + int length, err; 650 661 651 662 if (unlikely(wc->status != IB_WC_SUCCESS)) { 652 663 iser_err_comp(wc, "task_rsp"); ··· 674 687 desc->dma_addr, ISER_RX_PAYLOAD_SIZE, 675 688 DMA_FROM_DEVICE); 676 689 677 - /* decrementing conn->post_recv_buf_count only --after-- freeing the * 678 - * task eliminates the need to worry on tasks which are completed in * 679 - * parallel to the execution of iser_conn_term. So the code that waits * 680 - * for the posted rx bufs refcount to become zero handles everything */ 681 - ib_conn->post_recv_buf_count--; 682 - 683 - outstanding = ib_conn->post_recv_buf_count; 684 - if (outstanding + iser_conn->min_posted_rx <= iser_conn->qp_max_recv_dtos) { 685 - count = min(iser_conn->qp_max_recv_dtos - outstanding, 686 - iser_conn->min_posted_rx); 687 - err = iser_post_recvm(iser_conn, count); 688 - if (err) 689 - iser_err("posting %d rx bufs err %d\n", count, err); 690 - } 690 + err = iser_post_recvm(iser_conn, desc); 691 + if (err) 692 + iser_err("posting rx buffer err %d\n", err); 691 693 } 692 694 693 695 void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc)
+24 -34
drivers/infiniband/ulp/iser/iser_memory.c
··· 44 44 iser_err_comp(wc, "memreg"); 45 45 } 46 46 47 - static struct iser_fr_desc * 48 - iser_reg_desc_get_fr(struct ib_conn *ib_conn) 47 + static struct iser_fr_desc *iser_reg_desc_get_fr(struct ib_conn *ib_conn) 49 48 { 50 49 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; 51 50 struct iser_fr_desc *desc; ··· 59 60 return desc; 60 61 } 61 62 62 - static void 63 - iser_reg_desc_put_fr(struct ib_conn *ib_conn, 64 - struct iser_fr_desc *desc) 63 + static void iser_reg_desc_put_fr(struct ib_conn *ib_conn, 64 + struct iser_fr_desc *desc) 65 65 { 66 66 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; 67 67 unsigned long flags; ··· 71 73 } 72 74 73 75 int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, 74 - struct iser_data_buf *data, 75 - enum iser_data_dir iser_dir, 76 - enum dma_data_direction dma_dir) 76 + struct iser_data_buf *data, 77 + enum iser_data_dir iser_dir, 78 + enum dma_data_direction dma_dir) 77 79 { 78 80 struct ib_device *dev; 79 81 ··· 98 100 ib_dma_unmap_sg(dev, data->sg, data->size, dir); 99 101 } 100 102 101 - static int 102 - iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem, 103 - struct iser_mem_reg *reg) 103 + static int iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem, 104 + struct iser_mem_reg *reg) 104 105 { 105 106 struct scatterlist *sg = mem->sg; 106 107 ··· 151 154 reg->mem_h = NULL; 152 155 } 153 156 154 - static void 155 - iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_domain *domain) 157 + static void iser_set_dif_domain(struct scsi_cmnd *sc, 158 + struct ib_sig_domain *domain) 156 159 { 157 160 domain->sig_type = IB_SIG_TYPE_T10_DIF; 158 161 domain->sig.dif.pi_interval = scsi_prot_interval(sc); ··· 168 171 domain->sig.dif.ref_remap = true; 169 172 } 170 173 171 - static int 172 - iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs) 174 + static int iser_set_sig_attrs(struct scsi_cmnd *sc, 175 + struct ib_sig_attrs *sig_attrs) 173 176 { 174 177 switch (scsi_get_prot_op(sc)) { 175 178 case SCSI_PROT_WRITE_INSERT: ··· 202 205 return 0; 203 206 } 204 207 205 - static inline void 206 - iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask) 208 + static inline void iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask) 207 209 { 208 210 *mask = 0; 209 211 if (sc->prot_flags & SCSI_PROT_REF_CHECK) ··· 211 215 *mask |= IB_SIG_CHECK_GUARD; 212 216 } 213 217 214 - static inline void 215 - iser_inv_rkey(struct ib_send_wr *inv_wr, 216 - struct ib_mr *mr, 217 - struct ib_cqe *cqe, 218 - struct ib_send_wr *next_wr) 218 + static inline void iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr, 219 + struct ib_cqe *cqe, struct ib_send_wr *next_wr) 219 220 { 220 221 inv_wr->opcode = IB_WR_LOCAL_INV; 221 222 inv_wr->wr_cqe = cqe; ··· 222 229 inv_wr->next = next_wr; 223 230 } 224 231 225 - static int 226 - iser_reg_sig_mr(struct iscsi_iser_task *iser_task, 227 - struct iser_data_buf *mem, 228 - struct iser_data_buf *sig_mem, 229 - struct iser_reg_resources *rsc, 230 - struct iser_mem_reg *sig_reg) 232 + static int iser_reg_sig_mr(struct iscsi_iser_task *iser_task, 233 + struct iser_data_buf *mem, 234 + struct iser_data_buf *sig_mem, 235 + struct iser_reg_resources *rsc, 236 + struct iser_mem_reg *sig_reg) 231 237 { 232 238 struct iser_tx_desc *tx_desc = &iser_task->desc; 233 239 struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe; ··· 327 335 return 0; 328 336 } 329 337 330 - static int 331 - iser_reg_data_sg(struct iscsi_iser_task *task, 332 - struct iser_data_buf *mem, 333 - struct iser_fr_desc *desc, 334 - bool use_dma_key, 335 - struct iser_mem_reg *reg) 338 + static int iser_reg_data_sg(struct iscsi_iser_task *task, 339 + struct iser_data_buf *mem, 340 + struct iser_fr_desc *desc, bool use_dma_key, 341 + struct iser_mem_reg *reg) 336 342 { 337 343 struct iser_device *device = task->iser_conn->ib_conn.device; 338 344
+57 -81
drivers/infiniband/ulp/iser/iser_verbs.c
··· 265 265 memset(&init_attr, 0, sizeof(init_attr)); 266 266 267 267 init_attr.event_handler = iser_qp_event_callback; 268 - init_attr.qp_context = (void *)ib_conn; 269 - init_attr.send_cq = ib_conn->cq; 270 - init_attr.recv_cq = ib_conn->cq; 271 - init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; 268 + init_attr.qp_context = (void *)ib_conn; 269 + init_attr.send_cq = ib_conn->cq; 270 + init_attr.recv_cq = ib_conn->cq; 271 + init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; 272 272 init_attr.cap.max_send_sge = 2; 273 273 init_attr.cap.max_recv_sge = 1; 274 - init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 275 - init_attr.qp_type = IB_QPT_RC; 274 + init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 275 + init_attr.qp_type = IB_QPT_RC; 276 276 init_attr.cap.max_send_wr = max_send_wr; 277 277 if (ib_conn->pi_support) 278 278 init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN; ··· 283 283 goto out_err; 284 284 285 285 ib_conn->qp = ib_conn->cma_id->qp; 286 - iser_info("setting conn %p cma_id %p qp %p max_send_wr %d\n", 287 - ib_conn, ib_conn->cma_id, 288 - ib_conn->cma_id->qp, max_send_wr); 286 + iser_info("setting conn %p cma_id %p qp %p max_send_wr %d\n", ib_conn, 287 + ib_conn->cma_id, ib_conn->cma_id->qp, max_send_wr); 289 288 return ret; 290 289 291 290 out_err: ··· 312 313 goto inc_refcnt; 313 314 314 315 device = kzalloc(sizeof *device, GFP_KERNEL); 315 - if (device == NULL) 316 + if (!device) 316 317 goto out; 317 318 318 319 /* assign this device to the device */ ··· 391 392 * so the cm_id removal is out of here. It is Safe to 392 393 * be invoked multiple times. 393 394 */ 394 - static void iser_free_ib_conn_res(struct iser_conn *iser_conn, 395 - bool destroy) 395 + static void iser_free_ib_conn_res(struct iser_conn *iser_conn, bool destroy) 396 396 { 397 397 struct ib_conn *ib_conn = &iser_conn->ib_conn; 398 398 struct iser_device *device = ib_conn->device; ··· 399 401 iser_info("freeing conn %p cma_id %p qp %p\n", 400 402 iser_conn, ib_conn->cma_id, ib_conn->qp); 401 403 402 - if (ib_conn->qp != NULL) { 404 + if (ib_conn->qp) { 403 405 rdma_destroy_qp(ib_conn->cma_id); 404 406 ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size); 405 407 ib_conn->qp = NULL; ··· 409 411 if (iser_conn->rx_descs) 410 412 iser_free_rx_descriptors(iser_conn); 411 413 412 - if (device != NULL) { 414 + if (device) { 413 415 iser_device_try_release(device); 414 416 ib_conn->device = NULL; 415 417 } ··· 443 445 iser_free_ib_conn_res(iser_conn, true); 444 446 mutex_unlock(&iser_conn->state_mutex); 445 447 446 - if (ib_conn->cma_id != NULL) { 448 + if (ib_conn->cma_id) { 447 449 rdma_destroy_id(ib_conn->cma_id); 448 450 ib_conn->cma_id = NULL; 449 451 } ··· 499 501 { 500 502 struct iser_conn *iser_conn; 501 503 502 - iser_conn = (struct iser_conn *)cma_id->context; 504 + iser_conn = cma_id->context; 503 505 iser_conn->state = ISER_CONN_TERMINATING; 504 506 } 505 507 506 - static void 507 - iser_calc_scsi_params(struct iser_conn *iser_conn, 508 - unsigned int max_sectors) 508 + static void iser_calc_scsi_params(struct iser_conn *iser_conn, 509 + unsigned int max_sectors) 509 510 { 510 511 struct iser_device *device = iser_conn->ib_conn.device; 511 512 struct ib_device_attr *attr = &device->ib_device->attrs; ··· 542 545 static void iser_addr_handler(struct rdma_cm_id *cma_id) 543 546 { 544 547 struct iser_device *device; 545 - struct iser_conn *iser_conn; 546 - struct ib_conn *ib_conn; 548 + struct iser_conn *iser_conn; 549 + struct ib_conn *ib_conn; 547 550 int ret; 548 551 549 - iser_conn = (struct iser_conn *)cma_id->context; 552 + iser_conn = cma_id->context; 550 553 if (iser_conn->state != ISER_CONN_PENDING) 551 554 /* bailout */ 552 555 return; ··· 590 593 static void iser_route_handler(struct rdma_cm_id *cma_id) 591 594 { 592 595 struct rdma_conn_param conn_param; 593 - int ret; 596 + int ret; 594 597 struct iser_cm_hdr req_hdr; 595 - struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; 598 + struct iser_conn *iser_conn = cma_id->context; 596 599 struct ib_conn *ib_conn = &iser_conn->ib_conn; 597 600 struct ib_device *ib_dev = ib_conn->device->ib_device; 598 601 ··· 606 609 607 610 memset(&conn_param, 0, sizeof conn_param); 608 611 conn_param.responder_resources = ib_dev->attrs.max_qp_rd_atom; 609 - conn_param.initiator_depth = 1; 610 - conn_param.retry_count = 7; 611 - conn_param.rnr_retry_count = 6; 612 + conn_param.initiator_depth = 1; 613 + conn_param.retry_count = 7; 614 + conn_param.rnr_retry_count = 6; 612 615 613 616 memset(&req_hdr, 0, sizeof(req_hdr)); 614 617 req_hdr.flags = ISER_ZBVA_NOT_SUP; ··· 635 638 struct ib_qp_attr attr; 636 639 struct ib_qp_init_attr init_attr; 637 640 638 - iser_conn = (struct iser_conn *)cma_id->context; 641 + iser_conn = cma_id->context; 639 642 if (iser_conn->state != ISER_CONN_PENDING) 640 643 /* bailout */ 641 644 return; ··· 658 661 659 662 static void iser_disconnected_handler(struct rdma_cm_id *cma_id) 660 663 { 661 - struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; 664 + struct iser_conn *iser_conn = cma_id->context; 662 665 663 666 if (iser_conn_terminate(iser_conn)) { 664 667 if (iser_conn->iscsi_conn) ··· 672 675 static void iser_cleanup_handler(struct rdma_cm_id *cma_id, 673 676 bool destroy) 674 677 { 675 - struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; 678 + struct iser_conn *iser_conn = cma_id->context; 676 679 677 680 /* 678 681 * We are not guaranteed that we visited disconnected_handler ··· 684 687 complete(&iser_conn->ib_completion); 685 688 } 686 689 687 - static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 690 + static int iser_cma_handler(struct rdma_cm_id *cma_id, 691 + struct rdma_cm_event *event) 688 692 { 689 693 struct iser_conn *iser_conn; 690 694 int ret = 0; 691 695 692 - iser_conn = (struct iser_conn *)cma_id->context; 696 + iser_conn = cma_id->context; 693 697 iser_info("%s (%d): status %d conn %p id %p\n", 694 698 rdma_event_msg(event->event), event->event, 695 699 event->status, cma_id->context, cma_id); ··· 755 757 INIT_LIST_HEAD(&iser_conn->conn_list); 756 758 mutex_init(&iser_conn->state_mutex); 757 759 758 - ib_conn->post_recv_buf_count = 0; 759 760 ib_conn->reg_cqe.done = iser_reg_comp; 760 761 } 761 762 ··· 762 765 * starts the process of connecting to the target 763 766 * sleeps until the connection is established or rejected 764 767 */ 765 - int iser_connect(struct iser_conn *iser_conn, 766 - struct sockaddr *src_addr, 767 - struct sockaddr *dst_addr, 768 - int non_blocking) 768 + int iser_connect(struct iser_conn *iser_conn, struct sockaddr *src_addr, 769 + struct sockaddr *dst_addr, int non_blocking) 769 770 { 770 771 struct ib_conn *ib_conn = &iser_conn->ib_conn; 771 772 int err = 0; ··· 780 785 iser_conn->state = ISER_CONN_PENDING; 781 786 782 787 ib_conn->cma_id = rdma_create_id(&init_net, iser_cma_handler, 783 - (void *)iser_conn, 784 - RDMA_PS_TCP, IB_QPT_RC); 788 + iser_conn, RDMA_PS_TCP, IB_QPT_RC); 785 789 if (IS_ERR(ib_conn->cma_id)) { 786 790 err = PTR_ERR(ib_conn->cma_id); 787 791 iser_err("rdma_create_id failed: %d\n", err); ··· 823 829 struct ib_conn *ib_conn = &iser_conn->ib_conn; 824 830 struct iser_login_desc *desc = &iser_conn->login_desc; 825 831 struct ib_recv_wr wr; 826 - int ib_ret; 832 + int ret; 827 833 828 834 desc->sge.addr = desc->rsp_dma; 829 835 desc->sge.length = ISER_RX_LOGIN_SIZE; ··· 835 841 wr.num_sge = 1; 836 842 wr.next = NULL; 837 843 838 - ib_conn->post_recv_buf_count++; 839 - ib_ret = ib_post_recv(ib_conn->qp, &wr, NULL); 840 - if (ib_ret) { 841 - iser_err("ib_post_recv failed ret=%d\n", ib_ret); 842 - ib_conn->post_recv_buf_count--; 843 - } 844 + ret = ib_post_recv(ib_conn->qp, &wr, NULL); 845 + if (unlikely(ret)) 846 + iser_err("ib_post_recv login failed ret=%d\n", ret); 844 847 845 - return ib_ret; 848 + return ret; 846 849 } 847 850 848 - int iser_post_recvm(struct iser_conn *iser_conn, int count) 851 + int iser_post_recvm(struct iser_conn *iser_conn, struct iser_rx_desc *rx_desc) 849 852 { 850 853 struct ib_conn *ib_conn = &iser_conn->ib_conn; 851 - unsigned int my_rx_head = iser_conn->rx_desc_head; 852 - struct iser_rx_desc *rx_desc; 853 - struct ib_recv_wr *wr; 854 - int i, ib_ret; 854 + struct ib_recv_wr wr; 855 + int ret; 855 856 856 - for (wr = ib_conn->rx_wr, i = 0; i < count; i++, wr++) { 857 - rx_desc = &iser_conn->rx_descs[my_rx_head]; 858 - rx_desc->cqe.done = iser_task_rsp; 859 - wr->wr_cqe = &rx_desc->cqe; 860 - wr->sg_list = &rx_desc->rx_sg; 861 - wr->num_sge = 1; 862 - wr->next = wr + 1; 863 - my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask; 864 - } 857 + rx_desc->cqe.done = iser_task_rsp; 858 + wr.wr_cqe = &rx_desc->cqe; 859 + wr.sg_list = &rx_desc->rx_sg; 860 + wr.num_sge = 1; 861 + wr.next = NULL; 865 862 866 - wr--; 867 - wr->next = NULL; /* mark end of work requests list */ 863 + ret = ib_post_recv(ib_conn->qp, &wr, NULL); 864 + if (unlikely(ret)) 865 + iser_err("ib_post_recv failed ret=%d\n", ret); 868 866 869 - ib_conn->post_recv_buf_count += count; 870 - ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, NULL); 871 - if (unlikely(ib_ret)) { 872 - iser_err("ib_post_recv failed ret=%d\n", ib_ret); 873 - ib_conn->post_recv_buf_count -= count; 874 - } else 875 - iser_conn->rx_desc_head = my_rx_head; 876 - 877 - return ib_ret; 867 + return ret; 878 868 } 879 869 880 870 ··· 866 888 * iser_post_send - Initiate a Send DTO operation 867 889 * @ib_conn: connection RDMA resources 868 890 * @tx_desc: iSER TX descriptor 869 - * @signal: true to send work request as SIGNALED 870 891 * 871 892 * Return: 0 on success, -1 on failure 872 893 */ 873 - int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc, 874 - bool signal) 894 + int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc) 875 895 { 876 896 struct ib_send_wr *wr = &tx_desc->send_wr; 877 897 struct ib_send_wr *first_wr; 878 - int ib_ret; 898 + int ret; 879 899 880 900 ib_dma_sync_single_for_device(ib_conn->device->ib_device, 881 901 tx_desc->dma_addr, ISER_HEADERS_LEN, ··· 884 908 wr->sg_list = tx_desc->tx_sg; 885 909 wr->num_sge = tx_desc->num_sge; 886 910 wr->opcode = IB_WR_SEND; 887 - wr->send_flags = signal ? IB_SEND_SIGNALED : 0; 911 + wr->send_flags = IB_SEND_SIGNALED; 888 912 889 913 if (tx_desc->inv_wr.next) 890 914 first_wr = &tx_desc->inv_wr; ··· 893 917 else 894 918 first_wr = wr; 895 919 896 - ib_ret = ib_post_send(ib_conn->qp, first_wr, NULL); 897 - if (unlikely(ib_ret)) 920 + ret = ib_post_send(ib_conn->qp, first_wr, NULL); 921 + if (unlikely(ret)) 898 922 iser_err("ib_post_send failed, ret:%d opcode:%d\n", 899 - ib_ret, wr->opcode); 923 + ret, wr->opcode); 900 924 901 - return ib_ret; 925 + return ret; 902 926 } 903 927 904 928 u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
+4 -4
drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
··· 13 13 14 14 void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con) 15 15 { 16 - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 17 - struct rtrs_clt_stats *stats = sess->stats; 16 + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); 17 + struct rtrs_clt_stats *stats = clt_path->stats; 18 18 struct rtrs_clt_stats_pcpu *s; 19 19 int cpu; 20 20 ··· 180 180 void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir) 181 181 { 182 182 struct rtrs_clt_con *con = req->con; 183 - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 184 - struct rtrs_clt_stats *stats = sess->stats; 183 + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); 184 + struct rtrs_clt_stats *stats = clt_path->stats; 185 185 unsigned int len; 186 186 187 187 len = req->usr_len + req->data_len;
+74 -71
drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
··· 16 16 #define MIN_MAX_RECONN_ATT -1 17 17 #define MAX_MAX_RECONN_ATT 9999 18 18 19 - static void rtrs_clt_sess_release(struct kobject *kobj) 19 + static void rtrs_clt_path_release(struct kobject *kobj) 20 20 { 21 - struct rtrs_clt_sess *sess; 21 + struct rtrs_clt_path *clt_path; 22 22 23 - sess = container_of(kobj, struct rtrs_clt_sess, kobj); 23 + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); 24 24 25 - free_sess(sess); 25 + free_path(clt_path); 26 26 } 27 27 28 28 static struct kobj_type ktype_sess = { 29 29 .sysfs_ops = &kobj_sysfs_ops, 30 - .release = rtrs_clt_sess_release 30 + .release = rtrs_clt_path_release 31 31 }; 32 32 33 - static void rtrs_clt_sess_stats_release(struct kobject *kobj) 33 + static void rtrs_clt_path_stats_release(struct kobject *kobj) 34 34 { 35 35 struct rtrs_clt_stats *stats; 36 36 ··· 43 43 44 44 static struct kobj_type ktype_stats = { 45 45 .sysfs_ops = &kobj_sysfs_ops, 46 - .release = rtrs_clt_sess_stats_release, 46 + .release = rtrs_clt_path_stats_release, 47 47 }; 48 48 49 49 static ssize_t max_reconnect_attempts_show(struct device *dev, 50 50 struct device_attribute *attr, 51 51 char *page) 52 52 { 53 - struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev); 53 + struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess, 54 + dev); 54 55 55 56 return sysfs_emit(page, "%d\n", 56 57 rtrs_clt_get_max_reconnect_attempts(clt)); ··· 64 63 { 65 64 int value; 66 65 int ret; 67 - struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev); 66 + struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess, 67 + dev); 68 68 69 69 ret = kstrtoint(buf, 10, &value); 70 70 if (ret) { ··· 92 90 struct device_attribute *attr, 93 91 char *page) 94 92 { 95 - struct rtrs_clt *clt; 93 + struct rtrs_clt_sess *clt; 96 94 97 - clt = container_of(dev, struct rtrs_clt, dev); 95 + clt = container_of(dev, struct rtrs_clt_sess, dev); 98 96 99 97 switch (clt->mp_policy) { 100 98 case MP_POLICY_RR: ··· 116 114 const char *buf, 117 115 size_t count) 118 116 { 119 - struct rtrs_clt *clt; 117 + struct rtrs_clt_sess *clt; 120 118 int value; 121 119 int ret; 122 120 size_t len = 0; 123 121 124 - clt = container_of(dev, struct rtrs_clt, dev); 122 + clt = container_of(dev, struct rtrs_clt_sess, dev); 125 123 126 124 ret = kstrtoint(buf, 10, &value); 127 125 if (!ret && (value == MP_POLICY_RR || ··· 171 169 .src = &srcaddr, 172 170 .dst = &dstaddr 173 171 }; 174 - struct rtrs_clt *clt; 172 + struct rtrs_clt_sess *clt; 175 173 const char *nl; 176 174 size_t len; 177 175 int err; 178 176 179 - clt = container_of(dev, struct rtrs_clt, dev); 177 + clt = container_of(dev, struct rtrs_clt_sess, dev); 180 178 181 179 nl = strchr(buf, '\n'); 182 180 if (nl) ··· 199 197 static ssize_t rtrs_clt_state_show(struct kobject *kobj, 200 198 struct kobj_attribute *attr, char *page) 201 199 { 202 - struct rtrs_clt_sess *sess; 200 + struct rtrs_clt_path *clt_path; 203 201 204 - sess = container_of(kobj, struct rtrs_clt_sess, kobj); 205 - if (sess->state == RTRS_CLT_CONNECTED) 202 + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); 203 + if (clt_path->state == RTRS_CLT_CONNECTED) 206 204 return sysfs_emit(page, "connected\n"); 207 205 208 206 return sysfs_emit(page, "disconnected\n"); ··· 221 219 struct kobj_attribute *attr, 222 220 const char *buf, size_t count) 223 221 { 224 - struct rtrs_clt_sess *sess; 222 + struct rtrs_clt_path *clt_path; 225 223 int ret; 226 224 227 - sess = container_of(kobj, struct rtrs_clt_sess, kobj); 225 + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); 228 226 if (!sysfs_streq(buf, "1")) { 229 - rtrs_err(sess->clt, "%s: unknown value: '%s'\n", 227 + rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n", 230 228 attr->attr.name, buf); 231 229 return -EINVAL; 232 230 } 233 - ret = rtrs_clt_reconnect_from_sysfs(sess); 231 + ret = rtrs_clt_reconnect_from_sysfs(clt_path); 234 232 if (ret) 235 233 return ret; 236 234 ··· 251 249 struct kobj_attribute *attr, 252 250 const char *buf, size_t count) 253 251 { 254 - struct rtrs_clt_sess *sess; 252 + struct rtrs_clt_path *clt_path; 255 253 256 - sess = container_of(kobj, struct rtrs_clt_sess, kobj); 254 + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); 257 255 if (!sysfs_streq(buf, "1")) { 258 - rtrs_err(sess->clt, "%s: unknown value: '%s'\n", 256 + rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n", 259 257 attr->attr.name, buf); 260 258 return -EINVAL; 261 259 } 262 - rtrs_clt_close_conns(sess, true); 260 + rtrs_clt_close_conns(clt_path, true); 263 261 264 262 return count; 265 263 } ··· 278 276 struct kobj_attribute *attr, 279 277 const char *buf, size_t count) 280 278 { 281 - struct rtrs_clt_sess *sess; 279 + struct rtrs_clt_path *clt_path; 282 280 int ret; 283 281 284 - sess = container_of(kobj, struct rtrs_clt_sess, kobj); 282 + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); 285 283 if (!sysfs_streq(buf, "1")) { 286 - rtrs_err(sess->clt, "%s: unknown value: '%s'\n", 284 + rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n", 287 285 attr->attr.name, buf); 288 286 return -EINVAL; 289 287 } 290 - ret = rtrs_clt_remove_path_from_sysfs(sess, &attr->attr); 288 + ret = rtrs_clt_remove_path_from_sysfs(clt_path, &attr->attr); 291 289 if (ret) 292 290 return ret; 293 291 ··· 335 333 struct kobj_attribute *attr, 336 334 char *page) 337 335 { 338 - struct rtrs_clt_sess *sess; 336 + struct rtrs_clt_path *clt_path; 339 337 340 - sess = container_of(kobj, typeof(*sess), kobj); 338 + clt_path = container_of(kobj, typeof(*clt_path), kobj); 341 339 342 - return sysfs_emit(page, "%u\n", sess->hca_port); 340 + return sysfs_emit(page, "%u\n", clt_path->hca_port); 343 341 } 344 342 345 343 static struct kobj_attribute rtrs_clt_hca_port_attr = ··· 349 347 struct kobj_attribute *attr, 350 348 char *page) 351 349 { 352 - struct rtrs_clt_sess *sess; 350 + struct rtrs_clt_path *clt_path; 353 351 354 - sess = container_of(kobj, struct rtrs_clt_sess, kobj); 352 + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); 355 353 356 - return sysfs_emit(page, "%s\n", sess->hca_name); 354 + return sysfs_emit(page, "%s\n", clt_path->hca_name); 357 355 } 358 356 359 357 static struct kobj_attribute rtrs_clt_hca_name_attr = ··· 363 361 struct kobj_attribute *attr, 364 362 char *page) 365 363 { 366 - struct rtrs_clt_sess *sess; 364 + struct rtrs_clt_path *clt_path; 367 365 368 - sess = container_of(kobj, struct rtrs_clt_sess, kobj); 366 + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); 369 367 370 368 return sysfs_emit(page, "%lld ns\n", 371 - ktime_to_ns(sess->s.hb_cur_latency)); 369 + ktime_to_ns(clt_path->s.hb_cur_latency)); 372 370 } 373 371 374 372 static struct kobj_attribute rtrs_clt_cur_latency_attr = ··· 378 376 struct kobj_attribute *attr, 379 377 char *page) 380 378 { 381 - struct rtrs_clt_sess *sess; 379 + struct rtrs_clt_path *clt_path; 382 380 int len; 383 381 384 - sess = container_of(kobj, struct rtrs_clt_sess, kobj); 385 - len = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr, page, 382 + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); 383 + len = sockaddr_to_str((struct sockaddr *)&clt_path->s.src_addr, page, 386 384 PAGE_SIZE); 387 385 len += sysfs_emit_at(page, len, "\n"); 388 386 return len; ··· 395 393 struct kobj_attribute *attr, 396 394 char *page) 397 395 { 398 - struct rtrs_clt_sess *sess; 396 + struct rtrs_clt_path *clt_path; 399 397 int len; 400 398 401 - sess = container_of(kobj, struct rtrs_clt_sess, kobj); 402 - len = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, page, 399 + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); 400 + len = sockaddr_to_str((struct sockaddr *)&clt_path->s.dst_addr, page, 403 401 PAGE_SIZE); 404 402 len += sysfs_emit_at(page, len, "\n"); 405 403 return len; ··· 408 406 static struct kobj_attribute rtrs_clt_dst_addr_attr = 409 407 __ATTR(dst_addr, 0444, rtrs_clt_dst_addr_show, NULL); 410 408 411 - static struct attribute *rtrs_clt_sess_attrs[] = { 409 + static struct attribute *rtrs_clt_path_attrs[] = { 412 410 &rtrs_clt_hca_name_attr.attr, 413 411 &rtrs_clt_hca_port_attr.attr, 414 412 &rtrs_clt_src_addr_attr.attr, ··· 421 419 NULL, 422 420 }; 423 421 424 - static const struct attribute_group rtrs_clt_sess_attr_group = { 425 - .attrs = rtrs_clt_sess_attrs, 422 + static const struct attribute_group rtrs_clt_path_attr_group = { 423 + .attrs = rtrs_clt_path_attrs, 426 424 }; 427 425 428 - int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess) 426 + int rtrs_clt_create_path_files(struct rtrs_clt_path *clt_path) 429 427 { 430 - struct rtrs_clt *clt = sess->clt; 428 + struct rtrs_clt_sess *clt = clt_path->clt; 431 429 char str[NAME_MAX]; 432 430 int err; 433 431 struct rtrs_addr path = { 434 - .src = &sess->s.src_addr, 435 - .dst = &sess->s.dst_addr, 432 + .src = &clt_path->s.src_addr, 433 + .dst = &clt_path->s.dst_addr, 436 434 }; 437 435 438 436 rtrs_addr_to_str(&path, str, sizeof(str)); 439 - err = kobject_init_and_add(&sess->kobj, &ktype_sess, clt->kobj_paths, 437 + err = kobject_init_and_add(&clt_path->kobj, &ktype_sess, 438 + clt->kobj_paths, 440 439 "%s", str); 441 440 if (err) { 442 441 pr_err("kobject_init_and_add: %d\n", err); 443 - kobject_put(&sess->kobj); 442 + kobject_put(&clt_path->kobj); 444 443 return err; 445 444 } 446 - err = sysfs_create_group(&sess->kobj, &rtrs_clt_sess_attr_group); 445 + err = sysfs_create_group(&clt_path->kobj, &rtrs_clt_path_attr_group); 447 446 if (err) { 448 447 pr_err("sysfs_create_group(): %d\n", err); 449 448 goto put_kobj; 450 449 } 451 - err = kobject_init_and_add(&sess->stats->kobj_stats, &ktype_stats, 452 - &sess->kobj, "stats"); 450 + err = kobject_init_and_add(&clt_path->stats->kobj_stats, &ktype_stats, 451 + &clt_path->kobj, "stats"); 453 452 if (err) { 454 453 pr_err("kobject_init_and_add: %d\n", err); 455 - kobject_put(&sess->stats->kobj_stats); 454 + kobject_put(&clt_path->stats->kobj_stats); 456 455 goto remove_group; 457 456 } 458 457 459 - err = sysfs_create_group(&sess->stats->kobj_stats, 458 + err = sysfs_create_group(&clt_path->stats->kobj_stats, 460 459 &rtrs_clt_stats_attr_group); 461 460 if (err) { 462 461 pr_err("failed to create stats sysfs group, err: %d\n", err); ··· 467 464 return 0; 468 465 469 466 put_kobj_stats: 470 - kobject_del(&sess->stats->kobj_stats); 471 - kobject_put(&sess->stats->kobj_stats); 467 + kobject_del(&clt_path->stats->kobj_stats); 468 + kobject_put(&clt_path->stats->kobj_stats); 472 469 remove_group: 473 - sysfs_remove_group(&sess->kobj, &rtrs_clt_sess_attr_group); 470 + sysfs_remove_group(&clt_path->kobj, &rtrs_clt_path_attr_group); 474 471 put_kobj: 475 - kobject_del(&sess->kobj); 476 - kobject_put(&sess->kobj); 472 + kobject_del(&clt_path->kobj); 473 + kobject_put(&clt_path->kobj); 477 474 478 475 return err; 479 476 } 480 477 481 - void rtrs_clt_destroy_sess_files(struct rtrs_clt_sess *sess, 478 + void rtrs_clt_destroy_path_files(struct rtrs_clt_path *clt_path, 482 479 const struct attribute *sysfs_self) 483 480 { 484 - kobject_del(&sess->stats->kobj_stats); 485 - kobject_put(&sess->stats->kobj_stats); 481 + kobject_del(&clt_path->stats->kobj_stats); 482 + kobject_put(&clt_path->stats->kobj_stats); 486 483 if (sysfs_self) 487 - sysfs_remove_file_self(&sess->kobj, sysfs_self); 488 - kobject_del(&sess->kobj); 484 + sysfs_remove_file_self(&clt_path->kobj, sysfs_self); 485 + kobject_del(&clt_path->kobj); 489 486 } 490 487 491 488 static struct attribute *rtrs_clt_attrs[] = { ··· 499 496 .attrs = rtrs_clt_attrs, 500 497 }; 501 498 502 - int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt) 499 + int rtrs_clt_create_sysfs_root_files(struct rtrs_clt_sess *clt) 503 500 { 504 501 return sysfs_create_group(&clt->dev.kobj, &rtrs_clt_attr_group); 505 502 } 506 503 507 - void rtrs_clt_destroy_sysfs_root(struct rtrs_clt *clt) 504 + void rtrs_clt_destroy_sysfs_root(struct rtrs_clt_sess *clt) 508 505 { 509 506 sysfs_remove_group(&clt->dev.kobj, &rtrs_clt_attr_group); 510 507
+553 -534
drivers/infiniband/ulp/rtrs/rtrs-clt.c
··· 46 46 static struct workqueue_struct *rtrs_wq; 47 47 static struct class *rtrs_clt_dev_class; 48 48 49 - static inline bool rtrs_clt_is_connected(const struct rtrs_clt *clt) 49 + static inline bool rtrs_clt_is_connected(const struct rtrs_clt_sess *clt) 50 50 { 51 - struct rtrs_clt_sess *sess; 51 + struct rtrs_clt_path *clt_path; 52 52 bool connected = false; 53 53 54 54 rcu_read_lock(); 55 - list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) 56 - connected |= READ_ONCE(sess->state) == RTRS_CLT_CONNECTED; 55 + list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) 56 + connected |= READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED; 57 57 rcu_read_unlock(); 58 58 59 59 return connected; 60 60 } 61 61 62 62 static struct rtrs_permit * 63 - __rtrs_get_permit(struct rtrs_clt *clt, enum rtrs_clt_con_type con_type) 63 + __rtrs_get_permit(struct rtrs_clt_sess *clt, enum rtrs_clt_con_type con_type) 64 64 { 65 65 size_t max_depth = clt->queue_depth; 66 66 struct rtrs_permit *permit; ··· 87 87 return permit; 88 88 } 89 89 90 - static inline void __rtrs_put_permit(struct rtrs_clt *clt, 90 + static inline void __rtrs_put_permit(struct rtrs_clt_sess *clt, 91 91 struct rtrs_permit *permit) 92 92 { 93 93 clear_bit_unlock(permit->mem_id, clt->permits_map); ··· 107 107 * Context: 108 108 * Can sleep if @wait == RTRS_PERMIT_WAIT 109 109 */ 110 - struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *clt, 110 + struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt_sess *clt, 111 111 enum rtrs_clt_con_type con_type, 112 112 enum wait_type can_wait) 113 113 { ··· 142 142 * Context: 143 143 * Does not matter 144 144 */ 145 - void rtrs_clt_put_permit(struct rtrs_clt *clt, struct rtrs_permit *permit) 145 + void rtrs_clt_put_permit(struct rtrs_clt_sess *clt, 146 + struct rtrs_permit *permit) 146 147 { 147 148 if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map))) 148 149 return; ··· 164 163 165 164 /** 166 165 * rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit 167 - * @sess: client session pointer 166 + * @clt_path: client path pointer 168 167 * @permit: permit for the allocation of the RDMA buffer 169 168 * Note: 170 169 * IO connection starts from 1. 171 170 * 0 connection is for user messages. 172 171 */ 173 172 static 174 - struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess, 173 + struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_path *clt_path, 175 174 struct rtrs_permit *permit) 176 175 { 177 176 int id = 0; 178 177 179 178 if (permit->con_type == RTRS_IO_CON) 180 - id = (permit->cpu_id % (sess->s.irq_con_num - 1)) + 1; 179 + id = (permit->cpu_id % (clt_path->s.irq_con_num - 1)) + 1; 181 180 182 - return to_clt_con(sess->s.con[id]); 181 + return to_clt_con(clt_path->s.con[id]); 183 182 } 184 183 185 184 /** 186 185 * rtrs_clt_change_state() - change the session state through session state 187 186 * machine. 188 187 * 189 - * @sess: client session to change the state of. 188 + * @clt_path: client path to change the state of. 190 189 * @new_state: state to change to. 191 190 * 192 191 * returns true if sess's state is changed to new state, otherwise return false. ··· 194 193 * Locks: 195 194 * state_wq lock must be hold. 196 195 */ 197 - static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess, 196 + static bool rtrs_clt_change_state(struct rtrs_clt_path *clt_path, 198 197 enum rtrs_clt_state new_state) 199 198 { 200 199 enum rtrs_clt_state old_state; 201 200 bool changed = false; 202 201 203 - lockdep_assert_held(&sess->state_wq.lock); 202 + lockdep_assert_held(&clt_path->state_wq.lock); 204 203 205 - old_state = sess->state; 204 + old_state = clt_path->state; 206 205 switch (new_state) { 207 206 case RTRS_CLT_CONNECTING: 208 207 switch (old_state) { ··· 276 275 break; 277 276 } 278 277 if (changed) { 279 - sess->state = new_state; 280 - wake_up_locked(&sess->state_wq); 278 + clt_path->state = new_state; 279 + wake_up_locked(&clt_path->state_wq); 281 280 } 282 281 283 282 return changed; 284 283 } 285 284 286 - static bool rtrs_clt_change_state_from_to(struct rtrs_clt_sess *sess, 285 + static bool rtrs_clt_change_state_from_to(struct rtrs_clt_path *clt_path, 287 286 enum rtrs_clt_state old_state, 288 287 enum rtrs_clt_state new_state) 289 288 { 290 289 bool changed = false; 291 290 292 - spin_lock_irq(&sess->state_wq.lock); 293 - if (sess->state == old_state) 294 - changed = rtrs_clt_change_state(sess, new_state); 295 - spin_unlock_irq(&sess->state_wq.lock); 291 + spin_lock_irq(&clt_path->state_wq.lock); 292 + if (clt_path->state == old_state) 293 + changed = rtrs_clt_change_state(clt_path, new_state); 294 + spin_unlock_irq(&clt_path->state_wq.lock); 296 295 297 296 return changed; 298 297 } 299 298 300 299 static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con) 301 300 { 302 - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 301 + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); 303 302 304 - if (rtrs_clt_change_state_from_to(sess, 303 + if (rtrs_clt_change_state_from_to(clt_path, 305 304 RTRS_CLT_CONNECTED, 306 305 RTRS_CLT_RECONNECTING)) { 307 - struct rtrs_clt *clt = sess->clt; 306 + struct rtrs_clt_sess *clt = clt_path->clt; 308 307 unsigned int delay_ms; 309 308 310 309 /* 311 310 * Normal scenario, reconnect if we were successfully connected 312 311 */ 313 312 delay_ms = clt->reconnect_delay_sec * 1000; 314 - queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, 313 + queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, 315 314 msecs_to_jiffies(delay_ms + 316 315 prandom_u32() % RTRS_RECONNECT_SEED)); 317 316 } else { ··· 320 319 * so notify waiter with error state, waiter is responsible 321 320 * for cleaning the rest and reconnect if needed. 322 321 */ 323 - rtrs_clt_change_state_from_to(sess, 322 + rtrs_clt_change_state_from_to(clt_path, 324 323 RTRS_CLT_CONNECTING, 325 324 RTRS_CLT_CONNECTING_ERR); 326 325 } ··· 331 330 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); 332 331 333 332 if (wc->status != IB_WC_SUCCESS) { 334 - rtrs_err(con->c.sess, "Failed IB_WR_REG_MR: %s\n", 333 + rtrs_err(con->c.path, "Failed IB_WR_REG_MR: %s\n", 335 334 ib_wc_status_msg(wc->status)); 336 335 rtrs_rdma_error_recovery(con); 337 336 } ··· 351 350 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); 352 351 353 352 if (wc->status != IB_WC_SUCCESS) { 354 - rtrs_err(con->c.sess, "Failed IB_WR_LOCAL_INV: %s\n", 353 + rtrs_err(con->c.path, "Failed IB_WR_LOCAL_INV: %s\n", 355 354 ib_wc_status_msg(wc->status)); 356 355 rtrs_rdma_error_recovery(con); 357 356 } ··· 381 380 bool notify, bool can_wait) 382 381 { 383 382 struct rtrs_clt_con *con = req->con; 384 - struct rtrs_clt_sess *sess; 383 + struct rtrs_clt_path *clt_path; 385 384 int err; 386 385 387 386 if (WARN_ON(!req->in_use)) 388 387 return; 389 388 if (WARN_ON(!req->con)) 390 389 return; 391 - sess = to_clt_sess(con->c.sess); 390 + clt_path = to_clt_path(con->c.path); 392 391 393 392 if (req->sg_cnt) { 394 393 if (req->dir == DMA_FROM_DEVICE && req->need_inv) { ··· 418 417 refcount_inc(&req->ref); 419 418 err = rtrs_inv_rkey(req); 420 419 if (err) { 421 - rtrs_err(con->c.sess, "Send INV WR key=%#x: %d\n", 420 + rtrs_err(con->c.path, "Send INV WR key=%#x: %d\n", 422 421 req->mr->rkey, err); 423 422 } else if (can_wait) { 424 423 wait_for_completion(&req->inv_comp); ··· 434 433 if (!refcount_dec_and_test(&req->ref)) 435 434 return; 436 435 } 437 - ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist, 436 + ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist, 438 437 req->sg_cnt, req->dir); 439 438 } 440 439 if (!refcount_dec_and_test(&req->ref)) 441 440 return; 442 441 if (req->mp_policy == MP_POLICY_MIN_INFLIGHT) 443 - atomic_dec(&sess->stats->inflight); 442 + atomic_dec(&clt_path->stats->inflight); 444 443 445 444 req->in_use = false; 446 445 req->con = NULL; 447 446 448 447 if (errno) { 449 - rtrs_err_rl(con->c.sess, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n", 450 - errno, kobject_name(&sess->kobj), sess->hca_name, 451 - sess->hca_port, notify); 448 + rtrs_err_rl(con->c.path, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n", 449 + errno, kobject_name(&clt_path->kobj), clt_path->hca_name, 450 + clt_path->hca_port, notify); 452 451 } 453 452 454 453 if (notify) ··· 460 459 struct rtrs_rbuf *rbuf, u32 off, 461 460 u32 imm, struct ib_send_wr *wr) 462 461 { 463 - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 462 + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); 464 463 enum ib_send_flags flags; 465 464 struct ib_sge sge; 466 465 467 466 if (!req->sg_size) { 468 - rtrs_wrn(con->c.sess, 467 + rtrs_wrn(con->c.path, 469 468 "Doing RDMA Write failed, no data supplied\n"); 470 469 return -EINVAL; 471 470 } ··· 473 472 /* user data and user message in the first list element */ 474 473 sge.addr = req->iu->dma_addr; 475 474 sge.length = req->sg_size; 476 - sge.lkey = sess->s.dev->ib_pd->local_dma_lkey; 475 + sge.lkey = clt_path->s.dev->ib_pd->local_dma_lkey; 477 476 478 477 /* 479 478 * From time to time we have to post signalled sends, 480 479 * or send queue will fill up and only QP reset can help. 481 480 */ 482 - flags = atomic_inc_return(&con->c.wr_cnt) % sess->s.signal_interval ? 481 + flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ? 483 482 0 : IB_SEND_SIGNALED; 484 483 485 - ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr, 484 + ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, 485 + req->iu->dma_addr, 486 486 req->sg_size, DMA_TO_DEVICE); 487 487 488 488 return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1, ··· 491 489 imm, flags, wr, NULL); 492 490 } 493 491 494 - static void process_io_rsp(struct rtrs_clt_sess *sess, u32 msg_id, 492 + static void process_io_rsp(struct rtrs_clt_path *clt_path, u32 msg_id, 495 493 s16 errno, bool w_inval) 496 494 { 497 495 struct rtrs_clt_io_req *req; 498 496 499 - if (WARN_ON(msg_id >= sess->queue_depth)) 497 + if (WARN_ON(msg_id >= clt_path->queue_depth)) 500 498 return; 501 499 502 - req = &sess->reqs[msg_id]; 500 + req = &clt_path->reqs[msg_id]; 503 501 /* Drop need_inv if server responded with send with invalidation */ 504 502 req->need_inv &= !w_inval; 505 503 complete_rdma_req(req, errno, true, false); ··· 509 507 { 510 508 struct rtrs_iu *iu; 511 509 int err; 512 - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 510 + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); 513 511 514 - WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0); 512 + WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0); 515 513 iu = container_of(wc->wr_cqe, struct rtrs_iu, 516 514 cqe); 517 515 err = rtrs_iu_post_recv(&con->c, iu); 518 516 if (err) { 519 - rtrs_err(con->c.sess, "post iu failed %d\n", err); 517 + rtrs_err(con->c.path, "post iu failed %d\n", err); 520 518 rtrs_rdma_error_recovery(con); 521 519 } 522 520 } 523 521 524 522 static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc) 525 523 { 526 - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 524 + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); 527 525 struct rtrs_msg_rkey_rsp *msg; 528 526 u32 imm_type, imm_payload; 529 527 bool w_inval = false; ··· 531 529 u32 buf_id; 532 530 int err; 533 531 534 - WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0); 532 + WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0); 535 533 536 534 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); 537 535 538 536 if (wc->byte_len < sizeof(*msg)) { 539 - rtrs_err(con->c.sess, "rkey response is malformed: size %d\n", 537 + rtrs_err(con->c.path, "rkey response is malformed: size %d\n", 540 538 wc->byte_len); 541 539 goto out; 542 540 } 543 - ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr, 541 + ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr, 544 542 iu->size, DMA_FROM_DEVICE); 545 543 msg = iu->buf; 546 544 if (le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP) { 547 - rtrs_err(sess->clt, "rkey response is malformed: type %d\n", 545 + rtrs_err(clt_path->clt, 546 + "rkey response is malformed: type %d\n", 548 547 le16_to_cpu(msg->type)); 549 548 goto out; 550 549 } 551 550 buf_id = le16_to_cpu(msg->buf_id); 552 - if (WARN_ON(buf_id >= sess->queue_depth)) 551 + if (WARN_ON(buf_id >= clt_path->queue_depth)) 553 552 goto out; 554 553 555 554 rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload); ··· 563 560 564 561 if (WARN_ON(buf_id != msg_id)) 565 562 goto out; 566 - sess->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey); 567 - process_io_rsp(sess, msg_id, err, w_inval); 563 + clt_path->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey); 564 + process_io_rsp(clt_path, msg_id, err, w_inval); 568 565 } 569 - ib_dma_sync_single_for_device(sess->s.dev->ib_dev, iu->dma_addr, 566 + ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, iu->dma_addr, 570 567 iu->size, DMA_FROM_DEVICE); 571 568 return rtrs_clt_recv_done(con, wc); 572 569 out: ··· 603 600 static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) 604 601 { 605 602 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); 606 - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 603 + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); 607 604 u32 imm_type, imm_payload; 608 605 bool w_inval = false; 609 606 int err; 610 607 611 608 if (wc->status != IB_WC_SUCCESS) { 612 609 if (wc->status != IB_WC_WR_FLUSH_ERR) { 613 - rtrs_err(sess->clt, "RDMA failed: %s\n", 610 + rtrs_err(clt_path->clt, "RDMA failed: %s\n", 614 611 ib_wc_status_msg(wc->status)); 615 612 rtrs_rdma_error_recovery(con); 616 613 } ··· 635 632 w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM); 636 633 rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err); 637 634 638 - process_io_rsp(sess, msg_id, err, w_inval); 635 + process_io_rsp(clt_path, msg_id, err, w_inval); 639 636 } else if (imm_type == RTRS_HB_MSG_IMM) { 640 637 WARN_ON(con->c.cid); 641 - rtrs_send_hb_ack(&sess->s); 642 - if (sess->flags & RTRS_MSG_NEW_RKEY_F) 638 + rtrs_send_hb_ack(&clt_path->s); 639 + if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) 643 640 return rtrs_clt_recv_done(con, wc); 644 641 } else if (imm_type == RTRS_HB_ACK_IMM) { 645 642 WARN_ON(con->c.cid); 646 - sess->s.hb_missed_cnt = 0; 647 - sess->s.hb_cur_latency = 648 - ktime_sub(ktime_get(), sess->s.hb_last_sent); 649 - if (sess->flags & RTRS_MSG_NEW_RKEY_F) 643 + clt_path->s.hb_missed_cnt = 0; 644 + clt_path->s.hb_cur_latency = 645 + ktime_sub(ktime_get(), clt_path->s.hb_last_sent); 646 + if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) 650 647 return rtrs_clt_recv_done(con, wc); 651 648 } else { 652 - rtrs_wrn(con->c.sess, "Unknown IMM type %u\n", 649 + rtrs_wrn(con->c.path, "Unknown IMM type %u\n", 653 650 imm_type); 654 651 } 655 652 if (w_inval) ··· 661 658 else 662 659 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); 663 660 if (err) { 664 - rtrs_err(con->c.sess, "rtrs_post_recv_empty(): %d\n", 661 + rtrs_err(con->c.path, "rtrs_post_recv_empty(): %d\n", 665 662 err); 666 663 rtrs_rdma_error_recovery(con); 667 664 } ··· 673 670 WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE || 674 671 wc->wc_flags & IB_WC_WITH_IMM)); 675 672 WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done); 676 - if (sess->flags & RTRS_MSG_NEW_RKEY_F) { 673 + if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) { 677 674 if (wc->wc_flags & IB_WC_WITH_INVALIDATE) 678 675 return rtrs_clt_recv_done(con, wc); 679 676 ··· 688 685 break; 689 686 690 687 default: 691 - rtrs_wrn(sess->clt, "Unexpected WC type: %d\n", wc->opcode); 688 + rtrs_wrn(clt_path->clt, "Unexpected WC type: %d\n", wc->opcode); 692 689 return; 693 690 } 694 691 } ··· 696 693 static int post_recv_io(struct rtrs_clt_con *con, size_t q_size) 697 694 { 698 695 int err, i; 699 - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 696 + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); 700 697 701 698 for (i = 0; i < q_size; i++) { 702 - if (sess->flags & RTRS_MSG_NEW_RKEY_F) { 699 + if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) { 703 700 struct rtrs_iu *iu = &con->rsp_ius[i]; 704 701 705 702 err = rtrs_iu_post_recv(&con->c, iu); ··· 713 710 return 0; 714 711 } 715 712 716 - static int post_recv_sess(struct rtrs_clt_sess *sess) 713 + static int post_recv_path(struct rtrs_clt_path *clt_path) 717 714 { 718 715 size_t q_size = 0; 719 716 int err, cid; 720 717 721 - for (cid = 0; cid < sess->s.con_num; cid++) { 718 + for (cid = 0; cid < clt_path->s.con_num; cid++) { 722 719 if (cid == 0) 723 720 q_size = SERVICE_CON_QUEUE_DEPTH; 724 721 else 725 - q_size = sess->queue_depth; 722 + q_size = clt_path->queue_depth; 726 723 727 724 /* 728 725 * x2 for RDMA read responses + FR key invalidations, ··· 730 727 */ 731 728 q_size *= 2; 732 729 733 - err = post_recv_io(to_clt_con(sess->s.con[cid]), q_size); 730 + err = post_recv_io(to_clt_con(clt_path->s.con[cid]), q_size); 734 731 if (err) { 735 - rtrs_err(sess->clt, "post_recv_io(), err: %d\n", err); 732 + rtrs_err(clt_path->clt, "post_recv_io(), err: %d\n", 733 + err); 736 734 return err; 737 735 } 738 736 } ··· 744 740 struct path_it { 745 741 int i; 746 742 struct list_head skip_list; 747 - struct rtrs_clt *clt; 748 - struct rtrs_clt_sess *(*next_path)(struct path_it *it); 743 + struct rtrs_clt_sess *clt; 744 + struct rtrs_clt_path *(*next_path)(struct path_it *it); 749 745 }; 750 746 751 747 /** ··· 777 773 * Locks: 778 774 * rcu_read_lock() must be hold. 779 775 */ 780 - static struct rtrs_clt_sess *get_next_path_rr(struct path_it *it) 776 + static struct rtrs_clt_path *get_next_path_rr(struct path_it *it) 781 777 { 782 - struct rtrs_clt_sess __rcu **ppcpu_path; 783 - struct rtrs_clt_sess *path; 784 - struct rtrs_clt *clt; 778 + struct rtrs_clt_path __rcu **ppcpu_path; 779 + struct rtrs_clt_path *path; 780 + struct rtrs_clt_sess *clt; 785 781 786 782 clt = it->clt; 787 783 ··· 815 811 * Locks: 816 812 * rcu_read_lock() must be hold. 817 813 */ 818 - static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it) 814 + static struct rtrs_clt_path *get_next_path_min_inflight(struct path_it *it) 819 815 { 820 - struct rtrs_clt_sess *min_path = NULL; 821 - struct rtrs_clt *clt = it->clt; 822 - struct rtrs_clt_sess *sess; 816 + struct rtrs_clt_path *min_path = NULL; 817 + struct rtrs_clt_sess *clt = it->clt; 818 + struct rtrs_clt_path *clt_path; 823 819 int min_inflight = INT_MAX; 824 820 int inflight; 825 821 826 - list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) { 827 - if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED) 822 + list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) { 823 + if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) 828 824 continue; 829 825 830 - if (!list_empty(raw_cpu_ptr(sess->mp_skip_entry))) 826 + if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry))) 831 827 continue; 832 828 833 - inflight = atomic_read(&sess->stats->inflight); 829 + inflight = atomic_read(&clt_path->stats->inflight); 834 830 835 831 if (inflight < min_inflight) { 836 832 min_inflight = inflight; 837 - min_path = sess; 833 + min_path = clt_path; 838 834 } 839 835 } 840 836 ··· 866 862 * Therefore the caller MUST check the returned 867 863 * path is NULL and trigger the IO error. 868 864 */ 869 - static struct rtrs_clt_sess *get_next_path_min_latency(struct path_it *it) 865 + static struct rtrs_clt_path *get_next_path_min_latency(struct path_it *it) 870 866 { 871 - struct rtrs_clt_sess *min_path = NULL; 872 - struct rtrs_clt *clt = it->clt; 873 - struct rtrs_clt_sess *sess; 874 - ktime_t min_latency = INT_MAX; 867 + struct rtrs_clt_path *min_path = NULL; 868 + struct rtrs_clt_sess *clt = it->clt; 869 + struct rtrs_clt_path *clt_path; 870 + ktime_t min_latency = KTIME_MAX; 875 871 ktime_t latency; 876 872 877 - list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) { 878 - if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED) 873 + list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) { 874 + if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) 879 875 continue; 880 876 881 - if (!list_empty(raw_cpu_ptr(sess->mp_skip_entry))) 877 + if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry))) 882 878 continue; 883 879 884 - latency = sess->s.hb_cur_latency; 880 + latency = clt_path->s.hb_cur_latency; 885 881 886 882 if (latency < min_latency) { 887 883 min_latency = latency; 888 - min_path = sess; 884 + min_path = clt_path; 889 885 } 890 886 } 891 887 ··· 899 895 return min_path; 900 896 } 901 897 902 - static inline void path_it_init(struct path_it *it, struct rtrs_clt *clt) 898 + static inline void path_it_init(struct path_it *it, struct rtrs_clt_sess *clt) 903 899 { 904 900 INIT_LIST_HEAD(&it->skip_list); 905 901 it->clt = clt; ··· 932 928 * the corresponding buffer of rtrs_iu (req->iu->buf), which later on will 933 929 * also hold the control message of rtrs. 934 930 * @req: an io request holding information about IO. 935 - * @sess: client session 931 + * @clt_path: client path 936 932 * @conf: conformation callback function to notify upper layer. 937 933 * @permit: permit for allocation of RDMA remote buffer 938 934 * @priv: private pointer ··· 944 940 * @dir: direction of the IO. 945 941 */ 946 942 static void rtrs_clt_init_req(struct rtrs_clt_io_req *req, 947 - struct rtrs_clt_sess *sess, 943 + struct rtrs_clt_path *clt_path, 948 944 void (*conf)(void *priv, int errno), 949 945 struct rtrs_permit *permit, void *priv, 950 946 const struct kvec *vec, size_t usr_len, ··· 962 958 req->sg_cnt = sg_cnt; 963 959 req->priv = priv; 964 960 req->dir = dir; 965 - req->con = rtrs_permit_to_clt_con(sess, permit); 961 + req->con = rtrs_permit_to_clt_con(clt_path, permit); 966 962 req->conf = conf; 967 963 req->need_inv = false; 968 964 req->need_inv_comp = false; 969 965 req->inv_errno = 0; 970 966 refcount_set(&req->ref, 1); 971 - req->mp_policy = sess->clt->mp_policy; 967 + req->mp_policy = clt_path->clt->mp_policy; 972 968 973 969 iov_iter_kvec(&iter, READ, vec, 1, usr_len); 974 970 len = _copy_from_iter(req->iu->buf, usr_len, &iter); ··· 978 974 } 979 975 980 976 static struct rtrs_clt_io_req * 981 - rtrs_clt_get_req(struct rtrs_clt_sess *sess, 977 + rtrs_clt_get_req(struct rtrs_clt_path *clt_path, 982 978 void (*conf)(void *priv, int errno), 983 979 struct rtrs_permit *permit, void *priv, 984 980 const struct kvec *vec, size_t usr_len, ··· 987 983 { 988 984 struct rtrs_clt_io_req *req; 989 985 990 - req = &sess->reqs[permit->mem_id]; 991 - rtrs_clt_init_req(req, sess, conf, permit, priv, vec, usr_len, 986 + req = &clt_path->reqs[permit->mem_id]; 987 + rtrs_clt_init_req(req, clt_path, conf, permit, priv, vec, usr_len, 992 988 sg, sg_cnt, data_len, dir); 993 989 return req; 994 990 } 995 991 996 992 static struct rtrs_clt_io_req * 997 - rtrs_clt_get_copy_req(struct rtrs_clt_sess *alive_sess, 993 + rtrs_clt_get_copy_req(struct rtrs_clt_path *alive_path, 998 994 struct rtrs_clt_io_req *fail_req) 999 995 { 1000 996 struct rtrs_clt_io_req *req; ··· 1003 999 .iov_len = fail_req->usr_len 1004 1000 }; 1005 1001 1006 - req = &alive_sess->reqs[fail_req->permit->mem_id]; 1007 - rtrs_clt_init_req(req, alive_sess, fail_req->conf, fail_req->permit, 1002 + req = &alive_path->reqs[fail_req->permit->mem_id]; 1003 + rtrs_clt_init_req(req, alive_path, fail_req->conf, fail_req->permit, 1008 1004 fail_req->priv, &vec, fail_req->usr_len, 1009 1005 fail_req->sglist, fail_req->sg_cnt, 1010 1006 fail_req->data_len, fail_req->dir); ··· 1017 1013 u32 size, u32 imm, struct ib_send_wr *wr, 1018 1014 struct ib_send_wr *tail) 1019 1015 { 1020 - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 1016 + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); 1021 1017 struct ib_sge *sge = req->sge; 1022 1018 enum ib_send_flags flags; 1023 1019 struct scatterlist *sg; ··· 1037 1033 for_each_sg(req->sglist, sg, req->sg_cnt, i) { 1038 1034 sge[i].addr = sg_dma_address(sg); 1039 1035 sge[i].length = sg_dma_len(sg); 1040 - sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey; 1036 + sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey; 1041 1037 } 1042 1038 num_sge = 1 + req->sg_cnt; 1043 1039 } 1044 1040 sge[i].addr = req->iu->dma_addr; 1045 1041 sge[i].length = size; 1046 - sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey; 1042 + sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey; 1047 1043 1048 1044 /* 1049 1045 * From time to time we have to post signalled sends, 1050 1046 * or send queue will fill up and only QP reset can help. 1051 1047 */ 1052 - flags = atomic_inc_return(&con->c.wr_cnt) % sess->s.signal_interval ? 1048 + flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ? 1053 1049 0 : IB_SEND_SIGNALED; 1054 1050 1055 - ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr, 1051 + ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, 1052 + req->iu->dma_addr, 1056 1053 size, DMA_TO_DEVICE); 1057 1054 1058 1055 return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge, ··· 1079 1074 static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) 1080 1075 { 1081 1076 struct rtrs_clt_con *con = req->con; 1082 - struct rtrs_sess *s = con->c.sess; 1083 - struct rtrs_clt_sess *sess = to_clt_sess(s); 1077 + struct rtrs_path *s = con->c.path; 1078 + struct rtrs_clt_path *clt_path = to_clt_path(s); 1084 1079 struct rtrs_msg_rdma_write *msg; 1085 1080 1086 1081 struct rtrs_rbuf *rbuf; ··· 1093 1088 1094 1089 const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len; 1095 1090 1096 - if (tsize > sess->chunk_size) { 1091 + if (tsize > clt_path->chunk_size) { 1097 1092 rtrs_wrn(s, "Write request failed, size too big %zu > %d\n", 1098 - tsize, sess->chunk_size); 1093 + tsize, clt_path->chunk_size); 1099 1094 return -EMSGSIZE; 1100 1095 } 1101 1096 if (req->sg_cnt) { 1102 - count = ib_dma_map_sg(sess->s.dev->ib_dev, req->sglist, 1097 + count = ib_dma_map_sg(clt_path->s.dev->ib_dev, req->sglist, 1103 1098 req->sg_cnt, req->dir); 1104 1099 if (!count) { 1105 1100 rtrs_wrn(s, "Write request failed, map failed\n"); ··· 1116 1111 imm = rtrs_to_io_req_imm(imm); 1117 1112 buf_id = req->permit->mem_id; 1118 1113 req->sg_size = tsize; 1119 - rbuf = &sess->rbufs[buf_id]; 1114 + rbuf = &clt_path->rbufs[buf_id]; 1120 1115 1121 1116 if (count) { 1122 1117 ret = rtrs_map_sg_fr(req, count); ··· 1124 1119 rtrs_err_rl(s, 1125 1120 "Write request failed, failed to map fast reg. data, err: %d\n", 1126 1121 ret); 1127 - ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist, 1122 + ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist, 1128 1123 req->sg_cnt, req->dir); 1129 1124 return ret; 1130 1125 } ··· 1158 1153 if (ret) { 1159 1154 rtrs_err_rl(s, 1160 1155 "Write request failed: error=%d path=%s [%s:%u]\n", 1161 - ret, kobject_name(&sess->kobj), sess->hca_name, 1162 - sess->hca_port); 1156 + ret, kobject_name(&clt_path->kobj), clt_path->hca_name, 1157 + clt_path->hca_port); 1163 1158 if (req->mp_policy == MP_POLICY_MIN_INFLIGHT) 1164 - atomic_dec(&sess->stats->inflight); 1159 + atomic_dec(&clt_path->stats->inflight); 1165 1160 if (req->sg_cnt) 1166 - ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist, 1161 + ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist, 1167 1162 req->sg_cnt, req->dir); 1168 1163 } 1169 1164 ··· 1173 1168 static int rtrs_clt_read_req(struct rtrs_clt_io_req *req) 1174 1169 { 1175 1170 struct rtrs_clt_con *con = req->con; 1176 - struct rtrs_sess *s = con->c.sess; 1177 - struct rtrs_clt_sess *sess = to_clt_sess(s); 1171 + struct rtrs_path *s = con->c.path; 1172 + struct rtrs_clt_path *clt_path = to_clt_path(s); 1178 1173 struct rtrs_msg_rdma_read *msg; 1179 - struct rtrs_ib_dev *dev = sess->s.dev; 1174 + struct rtrs_ib_dev *dev = clt_path->s.dev; 1180 1175 1181 1176 struct ib_reg_wr rwr; 1182 1177 struct ib_send_wr *wr = NULL; ··· 1186 1181 1187 1182 const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len; 1188 1183 1189 - if (tsize > sess->chunk_size) { 1184 + if (tsize > clt_path->chunk_size) { 1190 1185 rtrs_wrn(s, 1191 1186 "Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n", 1192 - tsize, sess->chunk_size); 1187 + tsize, clt_path->chunk_size); 1193 1188 return -EMSGSIZE; 1194 1189 } 1195 1190 ··· 1259 1254 */ 1260 1255 rtrs_clt_update_all_stats(req, READ); 1261 1256 1262 - ret = rtrs_post_send_rdma(req->con, req, &sess->rbufs[buf_id], 1257 + ret = rtrs_post_send_rdma(req->con, req, &clt_path->rbufs[buf_id], 1263 1258 req->data_len, imm, wr); 1264 1259 if (ret) { 1265 1260 rtrs_err_rl(s, 1266 1261 "Read request failed: error=%d path=%s [%s:%u]\n", 1267 - ret, kobject_name(&sess->kobj), sess->hca_name, 1268 - sess->hca_port); 1262 + ret, kobject_name(&clt_path->kobj), clt_path->hca_name, 1263 + clt_path->hca_port); 1269 1264 if (req->mp_policy == MP_POLICY_MIN_INFLIGHT) 1270 - atomic_dec(&sess->stats->inflight); 1265 + atomic_dec(&clt_path->stats->inflight); 1271 1266 req->need_inv = false; 1272 1267 if (req->sg_cnt) 1273 1268 ib_dma_unmap_sg(dev->ib_dev, req->sglist, ··· 1282 1277 * @clt: clt context 1283 1278 * @fail_req: a failed io request. 1284 1279 */ 1285 - static int rtrs_clt_failover_req(struct rtrs_clt *clt, 1280 + static int rtrs_clt_failover_req(struct rtrs_clt_sess *clt, 1286 1281 struct rtrs_clt_io_req *fail_req) 1287 1282 { 1288 - struct rtrs_clt_sess *alive_sess; 1283 + struct rtrs_clt_path *alive_path; 1289 1284 struct rtrs_clt_io_req *req; 1290 1285 int err = -ECONNABORTED; 1291 1286 struct path_it it; 1292 1287 1293 1288 rcu_read_lock(); 1294 1289 for (path_it_init(&it, clt); 1295 - (alive_sess = it.next_path(&it)) && it.i < it.clt->paths_num; 1290 + (alive_path = it.next_path(&it)) && it.i < it.clt->paths_num; 1296 1291 it.i++) { 1297 - if (READ_ONCE(alive_sess->state) != RTRS_CLT_CONNECTED) 1292 + if (READ_ONCE(alive_path->state) != RTRS_CLT_CONNECTED) 1298 1293 continue; 1299 - req = rtrs_clt_get_copy_req(alive_sess, fail_req); 1294 + req = rtrs_clt_get_copy_req(alive_path, fail_req); 1300 1295 if (req->dir == DMA_TO_DEVICE) 1301 1296 err = rtrs_clt_write_req(req); 1302 1297 else ··· 1306 1301 continue; 1307 1302 } 1308 1303 /* Success path */ 1309 - rtrs_clt_inc_failover_cnt(alive_sess->stats); 1304 + rtrs_clt_inc_failover_cnt(alive_path->stats); 1310 1305 break; 1311 1306 } 1312 1307 path_it_deinit(&it); ··· 1315 1310 return err; 1316 1311 } 1317 1312 1318 - static void fail_all_outstanding_reqs(struct rtrs_clt_sess *sess) 1313 + static void fail_all_outstanding_reqs(struct rtrs_clt_path *clt_path) 1319 1314 { 1320 - struct rtrs_clt *clt = sess->clt; 1315 + struct rtrs_clt_sess *clt = clt_path->clt; 1321 1316 struct rtrs_clt_io_req *req; 1322 1317 int i, err; 1323 1318 1324 - if (!sess->reqs) 1319 + if (!clt_path->reqs) 1325 1320 return; 1326 - for (i = 0; i < sess->queue_depth; ++i) { 1327 - req = &sess->reqs[i]; 1321 + for (i = 0; i < clt_path->queue_depth; ++i) { 1322 + req = &clt_path->reqs[i]; 1328 1323 if (!req->in_use) 1329 1324 continue; 1330 1325 ··· 1342 1337 } 1343 1338 } 1344 1339 1345 - static void free_sess_reqs(struct rtrs_clt_sess *sess) 1340 + static void free_path_reqs(struct rtrs_clt_path *clt_path) 1346 1341 { 1347 1342 struct rtrs_clt_io_req *req; 1348 1343 int i; 1349 1344 1350 - if (!sess->reqs) 1345 + if (!clt_path->reqs) 1351 1346 return; 1352 - for (i = 0; i < sess->queue_depth; ++i) { 1353 - req = &sess->reqs[i]; 1347 + for (i = 0; i < clt_path->queue_depth; ++i) { 1348 + req = &clt_path->reqs[i]; 1354 1349 if (req->mr) 1355 1350 ib_dereg_mr(req->mr); 1356 1351 kfree(req->sge); 1357 - rtrs_iu_free(req->iu, sess->s.dev->ib_dev, 1); 1352 + rtrs_iu_free(req->iu, clt_path->s.dev->ib_dev, 1); 1358 1353 } 1359 - kfree(sess->reqs); 1360 - sess->reqs = NULL; 1354 + kfree(clt_path->reqs); 1355 + clt_path->reqs = NULL; 1361 1356 } 1362 1357 1363 - static int alloc_sess_reqs(struct rtrs_clt_sess *sess) 1358 + static int alloc_path_reqs(struct rtrs_clt_path *clt_path) 1364 1359 { 1365 1360 struct rtrs_clt_io_req *req; 1366 1361 int i, err = -ENOMEM; 1367 1362 1368 - sess->reqs = kcalloc(sess->queue_depth, sizeof(*sess->reqs), 1369 - GFP_KERNEL); 1370 - if (!sess->reqs) 1363 + clt_path->reqs = kcalloc(clt_path->queue_depth, 1364 + sizeof(*clt_path->reqs), 1365 + GFP_KERNEL); 1366 + if (!clt_path->reqs) 1371 1367 return -ENOMEM; 1372 1368 1373 - for (i = 0; i < sess->queue_depth; ++i) { 1374 - req = &sess->reqs[i]; 1375 - req->iu = rtrs_iu_alloc(1, sess->max_hdr_size, GFP_KERNEL, 1376 - sess->s.dev->ib_dev, 1369 + for (i = 0; i < clt_path->queue_depth; ++i) { 1370 + req = &clt_path->reqs[i]; 1371 + req->iu = rtrs_iu_alloc(1, clt_path->max_hdr_size, GFP_KERNEL, 1372 + clt_path->s.dev->ib_dev, 1377 1373 DMA_TO_DEVICE, 1378 1374 rtrs_clt_rdma_done); 1379 1375 if (!req->iu) ··· 1384 1378 if (!req->sge) 1385 1379 goto out; 1386 1380 1387 - req->mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG, 1388 - sess->max_pages_per_mr); 1381 + req->mr = ib_alloc_mr(clt_path->s.dev->ib_pd, 1382 + IB_MR_TYPE_MEM_REG, 1383 + clt_path->max_pages_per_mr); 1389 1384 if (IS_ERR(req->mr)) { 1390 1385 err = PTR_ERR(req->mr); 1391 1386 req->mr = NULL; 1392 - pr_err("Failed to alloc sess->max_pages_per_mr %d\n", 1393 - sess->max_pages_per_mr); 1387 + pr_err("Failed to alloc clt_path->max_pages_per_mr %d\n", 1388 + clt_path->max_pages_per_mr); 1394 1389 goto out; 1395 1390 } 1396 1391 ··· 1401 1394 return 0; 1402 1395 1403 1396 out: 1404 - free_sess_reqs(sess); 1397 + free_path_reqs(clt_path); 1405 1398 1406 1399 return err; 1407 1400 } 1408 1401 1409 - static int alloc_permits(struct rtrs_clt *clt) 1402 + static int alloc_permits(struct rtrs_clt_sess *clt) 1410 1403 { 1411 1404 unsigned int chunk_bits; 1412 1405 int err, i; ··· 1440 1433 return err; 1441 1434 } 1442 1435 1443 - static void free_permits(struct rtrs_clt *clt) 1436 + static void free_permits(struct rtrs_clt_sess *clt) 1444 1437 { 1445 1438 if (clt->permits_map) { 1446 1439 size_t sz = clt->queue_depth; ··· 1454 1447 clt->permits = NULL; 1455 1448 } 1456 1449 1457 - static void query_fast_reg_mode(struct rtrs_clt_sess *sess) 1450 + static void query_fast_reg_mode(struct rtrs_clt_path *clt_path) 1458 1451 { 1459 1452 struct ib_device *ib_dev; 1460 1453 u64 max_pages_per_mr; 1461 1454 int mr_page_shift; 1462 1455 1463 - ib_dev = sess->s.dev->ib_dev; 1456 + ib_dev = clt_path->s.dev->ib_dev; 1464 1457 1465 1458 /* 1466 1459 * Use the smallest page size supported by the HCA, down to a ··· 1470 1463 mr_page_shift = max(12, ffs(ib_dev->attrs.page_size_cap) - 1); 1471 1464 max_pages_per_mr = ib_dev->attrs.max_mr_size; 1472 1465 do_div(max_pages_per_mr, (1ull << mr_page_shift)); 1473 - sess->max_pages_per_mr = 1474 - min3(sess->max_pages_per_mr, (u32)max_pages_per_mr, 1466 + clt_path->max_pages_per_mr = 1467 + min3(clt_path->max_pages_per_mr, (u32)max_pages_per_mr, 1475 1468 ib_dev->attrs.max_fast_reg_page_list_len); 1476 - sess->clt->max_segments = 1477 - min(sess->max_pages_per_mr, sess->clt->max_segments); 1469 + clt_path->clt->max_segments = 1470 + min(clt_path->max_pages_per_mr, clt_path->clt->max_segments); 1478 1471 } 1479 1472 1480 - static bool rtrs_clt_change_state_get_old(struct rtrs_clt_sess *sess, 1473 + static bool rtrs_clt_change_state_get_old(struct rtrs_clt_path *clt_path, 1481 1474 enum rtrs_clt_state new_state, 1482 1475 enum rtrs_clt_state *old_state) 1483 1476 { 1484 1477 bool changed; 1485 1478 1486 - spin_lock_irq(&sess->state_wq.lock); 1479 + spin_lock_irq(&clt_path->state_wq.lock); 1487 1480 if (old_state) 1488 - *old_state = sess->state; 1489 - changed = rtrs_clt_change_state(sess, new_state); 1490 - spin_unlock_irq(&sess->state_wq.lock); 1481 + *old_state = clt_path->state; 1482 + changed = rtrs_clt_change_state(clt_path, new_state); 1483 + spin_unlock_irq(&clt_path->state_wq.lock); 1491 1484 1492 1485 return changed; 1493 1486 } ··· 1499 1492 rtrs_rdma_error_recovery(con); 1500 1493 } 1501 1494 1502 - static void rtrs_clt_init_hb(struct rtrs_clt_sess *sess) 1495 + static void rtrs_clt_init_hb(struct rtrs_clt_path *clt_path) 1503 1496 { 1504 - rtrs_init_hb(&sess->s, &io_comp_cqe, 1497 + rtrs_init_hb(&clt_path->s, &io_comp_cqe, 1505 1498 RTRS_HB_INTERVAL_MS, 1506 1499 RTRS_HB_MISSED_MAX, 1507 1500 rtrs_clt_hb_err_handler, ··· 1511 1504 static void rtrs_clt_reconnect_work(struct work_struct *work); 1512 1505 static void rtrs_clt_close_work(struct work_struct *work); 1513 1506 1514 - static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt, 1507 + static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt, 1515 1508 const struct rtrs_addr *path, 1516 1509 size_t con_num, u32 nr_poll_queues) 1517 1510 { 1518 - struct rtrs_clt_sess *sess; 1511 + struct rtrs_clt_path *clt_path; 1519 1512 int err = -ENOMEM; 1520 1513 int cpu; 1521 1514 size_t total_con; 1522 1515 1523 - sess = kzalloc(sizeof(*sess), GFP_KERNEL); 1524 - if (!sess) 1516 + clt_path = kzalloc(sizeof(*clt_path), GFP_KERNEL); 1517 + if (!clt_path) 1525 1518 goto err; 1526 1519 1527 1520 /* ··· 1529 1522 * +1: Extra connection for user messages 1530 1523 */ 1531 1524 total_con = con_num + nr_poll_queues + 1; 1532 - sess->s.con = kcalloc(total_con, sizeof(*sess->s.con), GFP_KERNEL); 1533 - if (!sess->s.con) 1534 - goto err_free_sess; 1525 + clt_path->s.con = kcalloc(total_con, sizeof(*clt_path->s.con), 1526 + GFP_KERNEL); 1527 + if (!clt_path->s.con) 1528 + goto err_free_path; 1535 1529 1536 - sess->s.con_num = total_con; 1537 - sess->s.irq_con_num = con_num + 1; 1530 + clt_path->s.con_num = total_con; 1531 + clt_path->s.irq_con_num = con_num + 1; 1538 1532 1539 - sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL); 1540 - if (!sess->stats) 1533 + clt_path->stats = kzalloc(sizeof(*clt_path->stats), GFP_KERNEL); 1534 + if (!clt_path->stats) 1541 1535 goto err_free_con; 1542 1536 1543 - mutex_init(&sess->init_mutex); 1544 - uuid_gen(&sess->s.uuid); 1545 - memcpy(&sess->s.dst_addr, path->dst, 1537 + mutex_init(&clt_path->init_mutex); 1538 + uuid_gen(&clt_path->s.uuid); 1539 + memcpy(&clt_path->s.dst_addr, path->dst, 1546 1540 rdma_addr_size((struct sockaddr *)path->dst)); 1547 1541 1548 1542 /* ··· 1552 1544 * the sess->src_addr will contain only zeros, which is then fine. 1553 1545 */ 1554 1546 if (path->src) 1555 - memcpy(&sess->s.src_addr, path->src, 1547 + memcpy(&clt_path->s.src_addr, path->src, 1556 1548 rdma_addr_size((struct sockaddr *)path->src)); 1557 - strscpy(sess->s.sessname, clt->sessname, sizeof(sess->s.sessname)); 1558 - sess->clt = clt; 1559 - sess->max_pages_per_mr = RTRS_MAX_SEGMENTS; 1560 - init_waitqueue_head(&sess->state_wq); 1561 - sess->state = RTRS_CLT_CONNECTING; 1562 - atomic_set(&sess->connected_cnt, 0); 1563 - INIT_WORK(&sess->close_work, rtrs_clt_close_work); 1564 - INIT_DELAYED_WORK(&sess->reconnect_dwork, rtrs_clt_reconnect_work); 1565 - rtrs_clt_init_hb(sess); 1549 + strscpy(clt_path->s.sessname, clt->sessname, 1550 + sizeof(clt_path->s.sessname)); 1551 + clt_path->clt = clt; 1552 + clt_path->max_pages_per_mr = RTRS_MAX_SEGMENTS; 1553 + init_waitqueue_head(&clt_path->state_wq); 1554 + clt_path->state = RTRS_CLT_CONNECTING; 1555 + atomic_set(&clt_path->connected_cnt, 0); 1556 + INIT_WORK(&clt_path->close_work, rtrs_clt_close_work); 1557 + INIT_DELAYED_WORK(&clt_path->reconnect_dwork, rtrs_clt_reconnect_work); 1558 + rtrs_clt_init_hb(clt_path); 1566 1559 1567 - sess->mp_skip_entry = alloc_percpu(typeof(*sess->mp_skip_entry)); 1568 - if (!sess->mp_skip_entry) 1560 + clt_path->mp_skip_entry = alloc_percpu(typeof(*clt_path->mp_skip_entry)); 1561 + if (!clt_path->mp_skip_entry) 1569 1562 goto err_free_stats; 1570 1563 1571 1564 for_each_possible_cpu(cpu) 1572 - INIT_LIST_HEAD(per_cpu_ptr(sess->mp_skip_entry, cpu)); 1565 + INIT_LIST_HEAD(per_cpu_ptr(clt_path->mp_skip_entry, cpu)); 1573 1566 1574 - err = rtrs_clt_init_stats(sess->stats); 1567 + err = rtrs_clt_init_stats(clt_path->stats); 1575 1568 if (err) 1576 1569 goto err_free_percpu; 1577 1570 1578 - return sess; 1571 + return clt_path; 1579 1572 1580 1573 err_free_percpu: 1581 - free_percpu(sess->mp_skip_entry); 1574 + free_percpu(clt_path->mp_skip_entry); 1582 1575 err_free_stats: 1583 - kfree(sess->stats); 1576 + kfree(clt_path->stats); 1584 1577 err_free_con: 1585 - kfree(sess->s.con); 1586 - err_free_sess: 1587 - kfree(sess); 1578 + kfree(clt_path->s.con); 1579 + err_free_path: 1580 + kfree(clt_path); 1588 1581 err: 1589 1582 return ERR_PTR(err); 1590 1583 } 1591 1584 1592 - void free_sess(struct rtrs_clt_sess *sess) 1585 + void free_path(struct rtrs_clt_path *clt_path) 1593 1586 { 1594 - free_percpu(sess->mp_skip_entry); 1595 - mutex_destroy(&sess->init_mutex); 1596 - kfree(sess->s.con); 1597 - kfree(sess->rbufs); 1598 - kfree(sess); 1587 + free_percpu(clt_path->mp_skip_entry); 1588 + mutex_destroy(&clt_path->init_mutex); 1589 + kfree(clt_path->s.con); 1590 + kfree(clt_path->rbufs); 1591 + kfree(clt_path); 1599 1592 } 1600 1593 1601 - static int create_con(struct rtrs_clt_sess *sess, unsigned int cid) 1594 + static int create_con(struct rtrs_clt_path *clt_path, unsigned int cid) 1602 1595 { 1603 1596 struct rtrs_clt_con *con; 1604 1597 ··· 1610 1601 /* Map first two connections to the first CPU */ 1611 1602 con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids; 1612 1603 con->c.cid = cid; 1613 - con->c.sess = &sess->s; 1604 + con->c.path = &clt_path->s; 1614 1605 /* Align with srv, init as 1 */ 1615 1606 atomic_set(&con->c.wr_cnt, 1); 1616 1607 mutex_init(&con->con_mutex); 1617 1608 1618 - sess->s.con[cid] = &con->c; 1609 + clt_path->s.con[cid] = &con->c; 1619 1610 1620 1611 return 0; 1621 1612 } 1622 1613 1623 1614 static void destroy_con(struct rtrs_clt_con *con) 1624 1615 { 1625 - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 1616 + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); 1626 1617 1627 - sess->s.con[con->c.cid] = NULL; 1618 + clt_path->s.con[con->c.cid] = NULL; 1628 1619 mutex_destroy(&con->con_mutex); 1629 1620 kfree(con); 1630 1621 } 1631 1622 1632 1623 static int create_con_cq_qp(struct rtrs_clt_con *con) 1633 1624 { 1634 - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 1625 + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); 1635 1626 u32 max_send_wr, max_recv_wr, cq_num, max_send_sge, wr_limit; 1636 1627 int err, cq_vector; 1637 1628 struct rtrs_msg_rkey_rsp *rsp; ··· 1640 1631 if (con->c.cid == 0) { 1641 1632 max_send_sge = 1; 1642 1633 /* We must be the first here */ 1643 - if (WARN_ON(sess->s.dev)) 1634 + if (WARN_ON(clt_path->s.dev)) 1644 1635 return -EINVAL; 1645 1636 1646 1637 /* ··· 1648 1639 * Be careful not to close user connection before ib dev 1649 1640 * is gracefully put. 1650 1641 */ 1651 - sess->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device, 1642 + clt_path->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device, 1652 1643 &dev_pd); 1653 - if (!sess->s.dev) { 1654 - rtrs_wrn(sess->clt, 1644 + if (!clt_path->s.dev) { 1645 + rtrs_wrn(clt_path->clt, 1655 1646 "rtrs_ib_dev_find_get_or_add(): no memory\n"); 1656 1647 return -ENOMEM; 1657 1648 } 1658 - sess->s.dev_ref = 1; 1659 - query_fast_reg_mode(sess); 1660 - wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr; 1649 + clt_path->s.dev_ref = 1; 1650 + query_fast_reg_mode(clt_path); 1651 + wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr; 1661 1652 /* 1662 1653 * Two (request + registration) completion for send 1663 1654 * Two for recv if always_invalidate is set on server ··· 1674 1665 * This is always true if user connection (cid == 0) is 1675 1666 * established first. 1676 1667 */ 1677 - if (WARN_ON(!sess->s.dev)) 1668 + if (WARN_ON(!clt_path->s.dev)) 1678 1669 return -EINVAL; 1679 - if (WARN_ON(!sess->queue_depth)) 1670 + if (WARN_ON(!clt_path->queue_depth)) 1680 1671 return -EINVAL; 1681 1672 1682 - wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr; 1673 + wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr; 1683 1674 /* Shared between connections */ 1684 - sess->s.dev_ref++; 1675 + clt_path->s.dev_ref++; 1685 1676 max_send_wr = min_t(int, wr_limit, 1686 1677 /* QD * (REQ + RSP + FR REGS or INVS) + drain */ 1687 - sess->queue_depth * 3 + 1); 1678 + clt_path->queue_depth * 3 + 1); 1688 1679 max_recv_wr = min_t(int, wr_limit, 1689 - sess->queue_depth * 3 + 1); 1680 + clt_path->queue_depth * 3 + 1); 1690 1681 max_send_sge = 2; 1691 1682 } 1692 1683 atomic_set(&con->c.sq_wr_avail, max_send_wr); 1693 1684 cq_num = max_send_wr + max_recv_wr; 1694 1685 /* alloc iu to recv new rkey reply when server reports flags set */ 1695 - if (sess->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) { 1686 + if (clt_path->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) { 1696 1687 con->rsp_ius = rtrs_iu_alloc(cq_num, sizeof(*rsp), 1697 - GFP_KERNEL, sess->s.dev->ib_dev, 1688 + GFP_KERNEL, 1689 + clt_path->s.dev->ib_dev, 1698 1690 DMA_FROM_DEVICE, 1699 1691 rtrs_clt_rdma_done); 1700 1692 if (!con->rsp_ius) ··· 1703 1693 con->queue_num = cq_num; 1704 1694 } 1705 1695 cq_num = max_send_wr + max_recv_wr; 1706 - cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors; 1707 - if (con->c.cid >= sess->s.irq_con_num) 1708 - err = rtrs_cq_qp_create(&sess->s, &con->c, max_send_sge, 1696 + cq_vector = con->cpu % clt_path->s.dev->ib_dev->num_comp_vectors; 1697 + if (con->c.cid >= clt_path->s.irq_con_num) 1698 + err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge, 1709 1699 cq_vector, cq_num, max_send_wr, 1710 1700 max_recv_wr, IB_POLL_DIRECT); 1711 1701 else 1712 - err = rtrs_cq_qp_create(&sess->s, &con->c, max_send_sge, 1702 + err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge, 1713 1703 cq_vector, cq_num, max_send_wr, 1714 1704 max_recv_wr, IB_POLL_SOFTIRQ); 1715 1705 /* ··· 1721 1711 1722 1712 static void destroy_con_cq_qp(struct rtrs_clt_con *con) 1723 1713 { 1724 - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 1714 + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); 1725 1715 1726 1716 /* 1727 1717 * Be careful here: destroy_con_cq_qp() can be called even ··· 1730 1720 lockdep_assert_held(&con->con_mutex); 1731 1721 rtrs_cq_qp_destroy(&con->c); 1732 1722 if (con->rsp_ius) { 1733 - rtrs_iu_free(con->rsp_ius, sess->s.dev->ib_dev, con->queue_num); 1723 + rtrs_iu_free(con->rsp_ius, clt_path->s.dev->ib_dev, 1724 + con->queue_num); 1734 1725 con->rsp_ius = NULL; 1735 1726 con->queue_num = 0; 1736 1727 } 1737 - if (sess->s.dev_ref && !--sess->s.dev_ref) { 1738 - rtrs_ib_dev_put(sess->s.dev); 1739 - sess->s.dev = NULL; 1728 + if (clt_path->s.dev_ref && !--clt_path->s.dev_ref) { 1729 + rtrs_ib_dev_put(clt_path->s.dev); 1730 + clt_path->s.dev = NULL; 1740 1731 } 1741 1732 } 1742 1733 ··· 1756 1745 1757 1746 static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con) 1758 1747 { 1759 - struct rtrs_sess *s = con->c.sess; 1748 + struct rtrs_path *s = con->c.path; 1760 1749 int err; 1761 1750 1762 1751 mutex_lock(&con->con_mutex); ··· 1775 1764 1776 1765 static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con) 1777 1766 { 1778 - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 1779 - struct rtrs_clt *clt = sess->clt; 1767 + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); 1768 + struct rtrs_clt_sess *clt = clt_path->clt; 1780 1769 struct rtrs_msg_conn_req msg; 1781 1770 struct rdma_conn_param param; 1782 1771 ··· 1793 1782 .magic = cpu_to_le16(RTRS_MAGIC), 1794 1783 .version = cpu_to_le16(RTRS_PROTO_VER), 1795 1784 .cid = cpu_to_le16(con->c.cid), 1796 - .cid_num = cpu_to_le16(sess->s.con_num), 1797 - .recon_cnt = cpu_to_le16(sess->s.recon_cnt), 1785 + .cid_num = cpu_to_le16(clt_path->s.con_num), 1786 + .recon_cnt = cpu_to_le16(clt_path->s.recon_cnt), 1798 1787 }; 1799 - msg.first_conn = sess->for_new_clt ? FIRST_CONN : 0; 1800 - uuid_copy(&msg.sess_uuid, &sess->s.uuid); 1788 + msg.first_conn = clt_path->for_new_clt ? FIRST_CONN : 0; 1789 + uuid_copy(&msg.sess_uuid, &clt_path->s.uuid); 1801 1790 uuid_copy(&msg.paths_uuid, &clt->paths_uuid); 1802 1791 1803 1792 err = rdma_connect_locked(con->c.cm_id, &param); ··· 1810 1799 static int rtrs_rdma_conn_established(struct rtrs_clt_con *con, 1811 1800 struct rdma_cm_event *ev) 1812 1801 { 1813 - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 1814 - struct rtrs_clt *clt = sess->clt; 1802 + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); 1803 + struct rtrs_clt_sess *clt = clt_path->clt; 1815 1804 const struct rtrs_msg_conn_rsp *msg; 1816 1805 u16 version, queue_depth; 1817 1806 int errno; ··· 1842 1831 if (con->c.cid == 0) { 1843 1832 queue_depth = le16_to_cpu(msg->queue_depth); 1844 1833 1845 - if (sess->queue_depth > 0 && queue_depth != sess->queue_depth) { 1834 + if (clt_path->queue_depth > 0 && queue_depth != clt_path->queue_depth) { 1846 1835 rtrs_err(clt, "Error: queue depth changed\n"); 1847 1836 1848 1837 /* 1849 1838 * Stop any more reconnection attempts 1850 1839 */ 1851 - sess->reconnect_attempts = -1; 1840 + clt_path->reconnect_attempts = -1; 1852 1841 rtrs_err(clt, 1853 1842 "Disabling auto-reconnect. Trigger a manual reconnect after issue is resolved\n"); 1854 1843 return -ECONNRESET; 1855 1844 } 1856 1845 1857 - if (!sess->rbufs) { 1858 - sess->rbufs = kcalloc(queue_depth, sizeof(*sess->rbufs), 1859 - GFP_KERNEL); 1860 - if (!sess->rbufs) 1846 + if (!clt_path->rbufs) { 1847 + clt_path->rbufs = kcalloc(queue_depth, 1848 + sizeof(*clt_path->rbufs), 1849 + GFP_KERNEL); 1850 + if (!clt_path->rbufs) 1861 1851 return -ENOMEM; 1862 1852 } 1863 - sess->queue_depth = queue_depth; 1864 - sess->s.signal_interval = min_not_zero(queue_depth, 1853 + clt_path->queue_depth = queue_depth; 1854 + clt_path->s.signal_interval = min_not_zero(queue_depth, 1865 1855 (unsigned short) SERVICE_CON_QUEUE_DEPTH); 1866 - sess->max_hdr_size = le32_to_cpu(msg->max_hdr_size); 1867 - sess->max_io_size = le32_to_cpu(msg->max_io_size); 1868 - sess->flags = le32_to_cpu(msg->flags); 1869 - sess->chunk_size = sess->max_io_size + sess->max_hdr_size; 1856 + clt_path->max_hdr_size = le32_to_cpu(msg->max_hdr_size); 1857 + clt_path->max_io_size = le32_to_cpu(msg->max_io_size); 1858 + clt_path->flags = le32_to_cpu(msg->flags); 1859 + clt_path->chunk_size = clt_path->max_io_size + clt_path->max_hdr_size; 1870 1860 1871 1861 /* 1872 1862 * Global IO size is always a minimum. ··· 1878 1866 * connections in parallel, use lock. 1879 1867 */ 1880 1868 mutex_lock(&clt->paths_mutex); 1881 - clt->queue_depth = sess->queue_depth; 1882 - clt->max_io_size = min_not_zero(sess->max_io_size, 1869 + clt->queue_depth = clt_path->queue_depth; 1870 + clt->max_io_size = min_not_zero(clt_path->max_io_size, 1883 1871 clt->max_io_size); 1884 1872 mutex_unlock(&clt->paths_mutex); 1885 1873 1886 1874 /* 1887 1875 * Cache the hca_port and hca_name for sysfs 1888 1876 */ 1889 - sess->hca_port = con->c.cm_id->port_num; 1890 - scnprintf(sess->hca_name, sizeof(sess->hca_name), 1891 - sess->s.dev->ib_dev->name); 1892 - sess->s.src_addr = con->c.cm_id->route.addr.src_addr; 1877 + clt_path->hca_port = con->c.cm_id->port_num; 1878 + scnprintf(clt_path->hca_name, sizeof(clt_path->hca_name), 1879 + clt_path->s.dev->ib_dev->name); 1880 + clt_path->s.src_addr = con->c.cm_id->route.addr.src_addr; 1893 1881 /* set for_new_clt, to allow future reconnect on any path */ 1894 - sess->for_new_clt = 1; 1882 + clt_path->for_new_clt = 1; 1895 1883 } 1896 1884 1897 1885 return 0; ··· 1899 1887 1900 1888 static inline void flag_success_on_conn(struct rtrs_clt_con *con) 1901 1889 { 1902 - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 1890 + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); 1903 1891 1904 - atomic_inc(&sess->connected_cnt); 1892 + atomic_inc(&clt_path->connected_cnt); 1905 1893 con->cm_err = 1; 1906 1894 } 1907 1895 1908 1896 static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con, 1909 1897 struct rdma_cm_event *ev) 1910 1898 { 1911 - struct rtrs_sess *s = con->c.sess; 1899 + struct rtrs_path *s = con->c.path; 1912 1900 const struct rtrs_msg_conn_rsp *msg; 1913 1901 const char *rej_msg; 1914 1902 int status, errno; ··· 1936 1924 return -ECONNRESET; 1937 1925 } 1938 1926 1939 - void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait) 1927 + void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait) 1940 1928 { 1941 - if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_CLOSING, NULL)) 1942 - queue_work(rtrs_wq, &sess->close_work); 1929 + if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSING, NULL)) 1930 + queue_work(rtrs_wq, &clt_path->close_work); 1943 1931 if (wait) 1944 - flush_work(&sess->close_work); 1932 + flush_work(&clt_path->close_work); 1945 1933 } 1946 1934 1947 1935 static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err) 1948 1936 { 1949 1937 if (con->cm_err == 1) { 1950 - struct rtrs_clt_sess *sess; 1938 + struct rtrs_clt_path *clt_path; 1951 1939 1952 - sess = to_clt_sess(con->c.sess); 1953 - if (atomic_dec_and_test(&sess->connected_cnt)) 1940 + clt_path = to_clt_path(con->c.path); 1941 + if (atomic_dec_and_test(&clt_path->connected_cnt)) 1954 1942 1955 - wake_up(&sess->state_wq); 1943 + wake_up(&clt_path->state_wq); 1956 1944 } 1957 1945 con->cm_err = cm_err; 1958 1946 } ··· 1961 1949 struct rdma_cm_event *ev) 1962 1950 { 1963 1951 struct rtrs_clt_con *con = cm_id->context; 1964 - struct rtrs_sess *s = con->c.sess; 1965 - struct rtrs_clt_sess *sess = to_clt_sess(s); 1952 + struct rtrs_path *s = con->c.path; 1953 + struct rtrs_clt_path *clt_path = to_clt_path(s); 1966 1954 int cm_err = 0; 1967 1955 1968 1956 switch (ev->event) { ··· 1980 1968 * i.e. wake up without state change, but we set cm_err. 1981 1969 */ 1982 1970 flag_success_on_conn(con); 1983 - wake_up(&sess->state_wq); 1971 + wake_up(&clt_path->state_wq); 1984 1972 return 0; 1985 1973 } 1986 1974 break; ··· 2009 1997 /* 2010 1998 * Device removal is a special case. Queue close and return 0. 2011 1999 */ 2012 - rtrs_clt_close_conns(sess, false); 2000 + rtrs_clt_close_conns(clt_path, false); 2013 2001 return 0; 2014 2002 default: 2015 2003 rtrs_err(s, "Unexpected RDMA CM error (CM event: %s, err: %d)\n", ··· 2032 2020 2033 2021 static int create_cm(struct rtrs_clt_con *con) 2034 2022 { 2035 - struct rtrs_sess *s = con->c.sess; 2036 - struct rtrs_clt_sess *sess = to_clt_sess(s); 2023 + struct rtrs_path *s = con->c.path; 2024 + struct rtrs_clt_path *clt_path = to_clt_path(s); 2037 2025 struct rdma_cm_id *cm_id; 2038 2026 int err; 2039 2027 2040 2028 cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con, 2041 - sess->s.dst_addr.ss_family == AF_IB ? 2029 + clt_path->s.dst_addr.ss_family == AF_IB ? 2042 2030 RDMA_PS_IB : RDMA_PS_TCP, IB_QPT_RC); 2043 2031 if (IS_ERR(cm_id)) { 2044 2032 err = PTR_ERR(cm_id); ··· 2054 2042 rtrs_err(s, "Set address reuse failed, err: %d\n", err); 2055 2043 goto destroy_cm; 2056 2044 } 2057 - err = rdma_resolve_addr(cm_id, (struct sockaddr *)&sess->s.src_addr, 2058 - (struct sockaddr *)&sess->s.dst_addr, 2045 + err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr, 2046 + (struct sockaddr *)&clt_path->s.dst_addr, 2059 2047 RTRS_CONNECT_TIMEOUT_MS); 2060 2048 if (err) { 2061 2049 rtrs_err(s, "Failed to resolve address, err: %d\n", err); ··· 2067 2055 * or session state was really changed to error by device removal. 2068 2056 */ 2069 2057 err = wait_event_interruptible_timeout( 2070 - sess->state_wq, 2071 - con->cm_err || sess->state != RTRS_CLT_CONNECTING, 2058 + clt_path->state_wq, 2059 + con->cm_err || clt_path->state != RTRS_CLT_CONNECTING, 2072 2060 msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS)); 2073 2061 if (err == 0 || err == -ERESTARTSYS) { 2074 2062 if (err == 0) ··· 2080 2068 err = con->cm_err; 2081 2069 goto errr; 2082 2070 } 2083 - if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTING) { 2071 + if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING) { 2084 2072 /* Device removal */ 2085 2073 err = -ECONNABORTED; 2086 2074 goto errr; ··· 2099 2087 return err; 2100 2088 } 2101 2089 2102 - static void rtrs_clt_sess_up(struct rtrs_clt_sess *sess) 2090 + static void rtrs_clt_path_up(struct rtrs_clt_path *clt_path) 2103 2091 { 2104 - struct rtrs_clt *clt = sess->clt; 2092 + struct rtrs_clt_sess *clt = clt_path->clt; 2105 2093 int up; 2106 2094 2107 2095 /* ··· 2125 2113 mutex_unlock(&clt->paths_ev_mutex); 2126 2114 2127 2115 /* Mark session as established */ 2128 - sess->established = true; 2129 - sess->reconnect_attempts = 0; 2130 - sess->stats->reconnects.successful_cnt++; 2116 + clt_path->established = true; 2117 + clt_path->reconnect_attempts = 0; 2118 + clt_path->stats->reconnects.successful_cnt++; 2131 2119 } 2132 2120 2133 - static void rtrs_clt_sess_down(struct rtrs_clt_sess *sess) 2121 + static void rtrs_clt_path_down(struct rtrs_clt_path *clt_path) 2134 2122 { 2135 - struct rtrs_clt *clt = sess->clt; 2123 + struct rtrs_clt_sess *clt = clt_path->clt; 2136 2124 2137 - if (!sess->established) 2125 + if (!clt_path->established) 2138 2126 return; 2139 2127 2140 - sess->established = false; 2128 + clt_path->established = false; 2141 2129 mutex_lock(&clt->paths_ev_mutex); 2142 2130 WARN_ON(!clt->paths_up); 2143 2131 if (--clt->paths_up == 0) ··· 2145 2133 mutex_unlock(&clt->paths_ev_mutex); 2146 2134 } 2147 2135 2148 - static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess) 2136 + static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path) 2149 2137 { 2150 2138 struct rtrs_clt_con *con; 2151 2139 unsigned int cid; 2152 2140 2153 - WARN_ON(READ_ONCE(sess->state) == RTRS_CLT_CONNECTED); 2141 + WARN_ON(READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED); 2154 2142 2155 2143 /* 2156 2144 * Possible race with rtrs_clt_open(), when DEVICE_REMOVAL comes 2157 2145 * exactly in between. Start destroying after it finishes. 2158 2146 */ 2159 - mutex_lock(&sess->init_mutex); 2160 - mutex_unlock(&sess->init_mutex); 2147 + mutex_lock(&clt_path->init_mutex); 2148 + mutex_unlock(&clt_path->init_mutex); 2161 2149 2162 2150 /* 2163 2151 * All IO paths must observe !CONNECTED state before we ··· 2165 2153 */ 2166 2154 synchronize_rcu(); 2167 2155 2168 - rtrs_stop_hb(&sess->s); 2156 + rtrs_stop_hb(&clt_path->s); 2169 2157 2170 2158 /* 2171 2159 * The order it utterly crucial: firstly disconnect and complete all ··· 2174 2162 * eventually notify upper layer about session disconnection. 2175 2163 */ 2176 2164 2177 - for (cid = 0; cid < sess->s.con_num; cid++) { 2178 - if (!sess->s.con[cid]) 2165 + for (cid = 0; cid < clt_path->s.con_num; cid++) { 2166 + if (!clt_path->s.con[cid]) 2179 2167 break; 2180 - con = to_clt_con(sess->s.con[cid]); 2168 + con = to_clt_con(clt_path->s.con[cid]); 2181 2169 stop_cm(con); 2182 2170 } 2183 - fail_all_outstanding_reqs(sess); 2184 - free_sess_reqs(sess); 2185 - rtrs_clt_sess_down(sess); 2171 + fail_all_outstanding_reqs(clt_path); 2172 + free_path_reqs(clt_path); 2173 + rtrs_clt_path_down(clt_path); 2186 2174 2187 2175 /* 2188 2176 * Wait for graceful shutdown, namely when peer side invokes ··· 2192 2180 * since CM does not fire anything. That is fine, we are not in 2193 2181 * hurry. 2194 2182 */ 2195 - wait_event_timeout(sess->state_wq, !atomic_read(&sess->connected_cnt), 2183 + wait_event_timeout(clt_path->state_wq, 2184 + !atomic_read(&clt_path->connected_cnt), 2196 2185 msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS)); 2197 2186 2198 - for (cid = 0; cid < sess->s.con_num; cid++) { 2199 - if (!sess->s.con[cid]) 2187 + for (cid = 0; cid < clt_path->s.con_num; cid++) { 2188 + if (!clt_path->s.con[cid]) 2200 2189 break; 2201 - con = to_clt_con(sess->s.con[cid]); 2190 + con = to_clt_con(clt_path->s.con[cid]); 2202 2191 mutex_lock(&con->con_mutex); 2203 2192 destroy_con_cq_qp(con); 2204 2193 mutex_unlock(&con->con_mutex); ··· 2208 2195 } 2209 2196 } 2210 2197 2211 - static inline bool xchg_sessions(struct rtrs_clt_sess __rcu **rcu_ppcpu_path, 2212 - struct rtrs_clt_sess *sess, 2213 - struct rtrs_clt_sess *next) 2198 + static inline bool xchg_paths(struct rtrs_clt_path __rcu **rcu_ppcpu_path, 2199 + struct rtrs_clt_path *clt_path, 2200 + struct rtrs_clt_path *next) 2214 2201 { 2215 - struct rtrs_clt_sess **ppcpu_path; 2202 + struct rtrs_clt_path **ppcpu_path; 2216 2203 2217 2204 /* Call cmpxchg() without sparse warnings */ 2218 2205 ppcpu_path = (typeof(ppcpu_path))rcu_ppcpu_path; 2219 - return sess == cmpxchg(ppcpu_path, sess, next); 2206 + return clt_path == cmpxchg(ppcpu_path, clt_path, next); 2220 2207 } 2221 2208 2222 - static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess) 2209 + static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path) 2223 2210 { 2224 - struct rtrs_clt *clt = sess->clt; 2225 - struct rtrs_clt_sess *next; 2211 + struct rtrs_clt_sess *clt = clt_path->clt; 2212 + struct rtrs_clt_path *next; 2226 2213 bool wait_for_grace = false; 2227 2214 int cpu; 2228 2215 2229 2216 mutex_lock(&clt->paths_mutex); 2230 - list_del_rcu(&sess->s.entry); 2217 + list_del_rcu(&clt_path->s.entry); 2231 2218 2232 2219 /* Make sure everybody observes path removal. */ 2233 2220 synchronize_rcu(); ··· 2268 2255 * removed. If @sess is the last element, then @next is NULL. 2269 2256 */ 2270 2257 rcu_read_lock(); 2271 - next = list_next_or_null_rr_rcu(&clt->paths_list, &sess->s.entry, 2258 + next = list_next_or_null_rr_rcu(&clt->paths_list, &clt_path->s.entry, 2272 2259 typeof(*next), s.entry); 2273 2260 rcu_read_unlock(); 2274 2261 ··· 2277 2264 * removed, so change the pointer manually. 2278 2265 */ 2279 2266 for_each_possible_cpu(cpu) { 2280 - struct rtrs_clt_sess __rcu **ppcpu_path; 2267 + struct rtrs_clt_path __rcu **ppcpu_path; 2281 2268 2282 2269 ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu); 2283 2270 if (rcu_dereference_protected(*ppcpu_path, 2284 - lockdep_is_held(&clt->paths_mutex)) != sess) 2271 + lockdep_is_held(&clt->paths_mutex)) != clt_path) 2285 2272 /* 2286 2273 * synchronize_rcu() was called just after deleting 2287 2274 * entry from the list, thus IO code path cannot ··· 2294 2281 * We race with IO code path, which also changes pointer, 2295 2282 * thus we have to be careful not to overwrite it. 2296 2283 */ 2297 - if (xchg_sessions(ppcpu_path, sess, next)) 2284 + if (xchg_paths(ppcpu_path, clt_path, next)) 2298 2285 /* 2299 2286 * @ppcpu_path was successfully replaced with @next, 2300 2287 * that means that someone could also pick up the ··· 2309 2296 mutex_unlock(&clt->paths_mutex); 2310 2297 } 2311 2298 2312 - static void rtrs_clt_add_path_to_arr(struct rtrs_clt_sess *sess) 2299 + static void rtrs_clt_add_path_to_arr(struct rtrs_clt_path *clt_path) 2313 2300 { 2314 - struct rtrs_clt *clt = sess->clt; 2301 + struct rtrs_clt_sess *clt = clt_path->clt; 2315 2302 2316 2303 mutex_lock(&clt->paths_mutex); 2317 2304 clt->paths_num++; 2318 2305 2319 - list_add_tail_rcu(&sess->s.entry, &clt->paths_list); 2306 + list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list); 2320 2307 mutex_unlock(&clt->paths_mutex); 2321 2308 } 2322 2309 2323 2310 static void rtrs_clt_close_work(struct work_struct *work) 2324 2311 { 2325 - struct rtrs_clt_sess *sess; 2312 + struct rtrs_clt_path *clt_path; 2326 2313 2327 - sess = container_of(work, struct rtrs_clt_sess, close_work); 2314 + clt_path = container_of(work, struct rtrs_clt_path, close_work); 2328 2315 2329 - cancel_delayed_work_sync(&sess->reconnect_dwork); 2330 - rtrs_clt_stop_and_destroy_conns(sess); 2331 - rtrs_clt_change_state_get_old(sess, RTRS_CLT_CLOSED, NULL); 2316 + cancel_delayed_work_sync(&clt_path->reconnect_dwork); 2317 + rtrs_clt_stop_and_destroy_conns(clt_path); 2318 + rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSED, NULL); 2332 2319 } 2333 2320 2334 - static int init_conns(struct rtrs_clt_sess *sess) 2321 + static int init_conns(struct rtrs_clt_path *clt_path) 2335 2322 { 2336 2323 unsigned int cid; 2337 2324 int err; ··· 2341 2328 * to avoid clashes with previous sessions not yet closed 2342 2329 * sessions on a server side. 2343 2330 */ 2344 - sess->s.recon_cnt++; 2331 + clt_path->s.recon_cnt++; 2345 2332 2346 2333 /* Establish all RDMA connections */ 2347 - for (cid = 0; cid < sess->s.con_num; cid++) { 2348 - err = create_con(sess, cid); 2334 + for (cid = 0; cid < clt_path->s.con_num; cid++) { 2335 + err = create_con(clt_path, cid); 2349 2336 if (err) 2350 2337 goto destroy; 2351 2338 2352 - err = create_cm(to_clt_con(sess->s.con[cid])); 2339 + err = create_cm(to_clt_con(clt_path->s.con[cid])); 2353 2340 if (err) { 2354 - destroy_con(to_clt_con(sess->s.con[cid])); 2341 + destroy_con(to_clt_con(clt_path->s.con[cid])); 2355 2342 goto destroy; 2356 2343 } 2357 2344 } 2358 - err = alloc_sess_reqs(sess); 2345 + err = alloc_path_reqs(clt_path); 2359 2346 if (err) 2360 2347 goto destroy; 2361 2348 2362 - rtrs_start_hb(&sess->s); 2349 + rtrs_start_hb(&clt_path->s); 2363 2350 2364 2351 return 0; 2365 2352 2366 2353 destroy: 2367 2354 while (cid--) { 2368 - struct rtrs_clt_con *con = to_clt_con(sess->s.con[cid]); 2355 + struct rtrs_clt_con *con = to_clt_con(clt_path->s.con[cid]); 2369 2356 2370 2357 stop_cm(con); 2371 2358 ··· 2380 2367 * doing rdma_resolve_addr(), switch to CONNECTION_ERR state 2381 2368 * manually to keep reconnecting. 2382 2369 */ 2383 - rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL); 2370 + rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL); 2384 2371 2385 2372 return err; 2386 2373 } ··· 2388 2375 static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc) 2389 2376 { 2390 2377 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); 2391 - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 2378 + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); 2392 2379 struct rtrs_iu *iu; 2393 2380 2394 2381 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); 2395 - rtrs_iu_free(iu, sess->s.dev->ib_dev, 1); 2382 + rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1); 2396 2383 2397 2384 if (wc->status != IB_WC_SUCCESS) { 2398 - rtrs_err(sess->clt, "Sess info request send failed: %s\n", 2385 + rtrs_err(clt_path->clt, "Path info request send failed: %s\n", 2399 2386 ib_wc_status_msg(wc->status)); 2400 - rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL); 2387 + rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL); 2401 2388 return; 2402 2389 } 2403 2390 2404 2391 rtrs_clt_update_wc_stats(con); 2405 2392 } 2406 2393 2407 - static int process_info_rsp(struct rtrs_clt_sess *sess, 2394 + static int process_info_rsp(struct rtrs_clt_path *clt_path, 2408 2395 const struct rtrs_msg_info_rsp *msg) 2409 2396 { 2410 2397 unsigned int sg_cnt, total_len; 2411 2398 int i, sgi; 2412 2399 2413 2400 sg_cnt = le16_to_cpu(msg->sg_cnt); 2414 - if (!sg_cnt || (sess->queue_depth % sg_cnt)) { 2415 - rtrs_err(sess->clt, "Incorrect sg_cnt %d, is not multiple\n", 2401 + if (!sg_cnt || (clt_path->queue_depth % sg_cnt)) { 2402 + rtrs_err(clt_path->clt, 2403 + "Incorrect sg_cnt %d, is not multiple\n", 2416 2404 sg_cnt); 2417 2405 return -EINVAL; 2418 2406 } ··· 2422 2408 * Check if IB immediate data size is enough to hold the mem_id and 2423 2409 * the offset inside the memory chunk. 2424 2410 */ 2425 - if ((ilog2(sg_cnt - 1) + 1) + (ilog2(sess->chunk_size - 1) + 1) > 2411 + if ((ilog2(sg_cnt - 1) + 1) + (ilog2(clt_path->chunk_size - 1) + 1) > 2426 2412 MAX_IMM_PAYL_BITS) { 2427 - rtrs_err(sess->clt, 2413 + rtrs_err(clt_path->clt, 2428 2414 "RDMA immediate size (%db) not enough to encode %d buffers of size %dB\n", 2429 - MAX_IMM_PAYL_BITS, sg_cnt, sess->chunk_size); 2415 + MAX_IMM_PAYL_BITS, sg_cnt, clt_path->chunk_size); 2430 2416 return -EINVAL; 2431 2417 } 2432 2418 total_len = 0; 2433 - for (sgi = 0, i = 0; sgi < sg_cnt && i < sess->queue_depth; sgi++) { 2419 + for (sgi = 0, i = 0; sgi < sg_cnt && i < clt_path->queue_depth; sgi++) { 2434 2420 const struct rtrs_sg_desc *desc = &msg->desc[sgi]; 2435 2421 u32 len, rkey; 2436 2422 u64 addr; ··· 2441 2427 2442 2428 total_len += len; 2443 2429 2444 - if (!len || (len % sess->chunk_size)) { 2445 - rtrs_err(sess->clt, "Incorrect [%d].len %d\n", sgi, 2430 + if (!len || (len % clt_path->chunk_size)) { 2431 + rtrs_err(clt_path->clt, "Incorrect [%d].len %d\n", 2432 + sgi, 2446 2433 len); 2447 2434 return -EINVAL; 2448 2435 } 2449 - for ( ; len && i < sess->queue_depth; i++) { 2450 - sess->rbufs[i].addr = addr; 2451 - sess->rbufs[i].rkey = rkey; 2436 + for ( ; len && i < clt_path->queue_depth; i++) { 2437 + clt_path->rbufs[i].addr = addr; 2438 + clt_path->rbufs[i].rkey = rkey; 2452 2439 2453 - len -= sess->chunk_size; 2454 - addr += sess->chunk_size; 2440 + len -= clt_path->chunk_size; 2441 + addr += clt_path->chunk_size; 2455 2442 } 2456 2443 } 2457 2444 /* Sanity check */ 2458 - if (sgi != sg_cnt || i != sess->queue_depth) { 2459 - rtrs_err(sess->clt, "Incorrect sg vector, not fully mapped\n"); 2445 + if (sgi != sg_cnt || i != clt_path->queue_depth) { 2446 + rtrs_err(clt_path->clt, 2447 + "Incorrect sg vector, not fully mapped\n"); 2460 2448 return -EINVAL; 2461 2449 } 2462 - if (total_len != sess->chunk_size * sess->queue_depth) { 2463 - rtrs_err(sess->clt, "Incorrect total_len %d\n", total_len); 2450 + if (total_len != clt_path->chunk_size * clt_path->queue_depth) { 2451 + rtrs_err(clt_path->clt, "Incorrect total_len %d\n", total_len); 2464 2452 return -EINVAL; 2465 2453 } 2466 2454 ··· 2472 2456 static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc) 2473 2457 { 2474 2458 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); 2475 - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 2459 + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); 2476 2460 struct rtrs_msg_info_rsp *msg; 2477 2461 enum rtrs_clt_state state; 2478 2462 struct rtrs_iu *iu; ··· 2484 2468 WARN_ON(con->c.cid); 2485 2469 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); 2486 2470 if (wc->status != IB_WC_SUCCESS) { 2487 - rtrs_err(sess->clt, "Sess info response recv failed: %s\n", 2471 + rtrs_err(clt_path->clt, "Path info response recv failed: %s\n", 2488 2472 ib_wc_status_msg(wc->status)); 2489 2473 goto out; 2490 2474 } 2491 2475 WARN_ON(wc->opcode != IB_WC_RECV); 2492 2476 2493 2477 if (wc->byte_len < sizeof(*msg)) { 2494 - rtrs_err(sess->clt, "Sess info response is malformed: size %d\n", 2478 + rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n", 2495 2479 wc->byte_len); 2496 2480 goto out; 2497 2481 } 2498 - ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr, 2482 + ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr, 2499 2483 iu->size, DMA_FROM_DEVICE); 2500 2484 msg = iu->buf; 2501 2485 if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP) { 2502 - rtrs_err(sess->clt, "Sess info response is malformed: type %d\n", 2486 + rtrs_err(clt_path->clt, "Path info response is malformed: type %d\n", 2503 2487 le16_to_cpu(msg->type)); 2504 2488 goto out; 2505 2489 } 2506 2490 rx_sz = sizeof(*msg); 2507 2491 rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt); 2508 2492 if (wc->byte_len < rx_sz) { 2509 - rtrs_err(sess->clt, "Sess info response is malformed: size %d\n", 2493 + rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n", 2510 2494 wc->byte_len); 2511 2495 goto out; 2512 2496 } 2513 - err = process_info_rsp(sess, msg); 2497 + err = process_info_rsp(clt_path, msg); 2514 2498 if (err) 2515 2499 goto out; 2516 2500 2517 - err = post_recv_sess(sess); 2501 + err = post_recv_path(clt_path); 2518 2502 if (err) 2519 2503 goto out; 2520 2504 ··· 2522 2506 2523 2507 out: 2524 2508 rtrs_clt_update_wc_stats(con); 2525 - rtrs_iu_free(iu, sess->s.dev->ib_dev, 1); 2526 - rtrs_clt_change_state_get_old(sess, state, NULL); 2509 + rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1); 2510 + rtrs_clt_change_state_get_old(clt_path, state, NULL); 2527 2511 } 2528 2512 2529 - static int rtrs_send_sess_info(struct rtrs_clt_sess *sess) 2513 + static int rtrs_send_path_info(struct rtrs_clt_path *clt_path) 2530 2514 { 2531 - struct rtrs_clt_con *usr_con = to_clt_con(sess->s.con[0]); 2515 + struct rtrs_clt_con *usr_con = to_clt_con(clt_path->s.con[0]); 2532 2516 struct rtrs_msg_info_req *msg; 2533 2517 struct rtrs_iu *tx_iu, *rx_iu; 2534 2518 size_t rx_sz; 2535 2519 int err; 2536 2520 2537 2521 rx_sz = sizeof(struct rtrs_msg_info_rsp); 2538 - rx_sz += sizeof(struct rtrs_sg_desc) * sess->queue_depth; 2522 + rx_sz += sizeof(struct rtrs_sg_desc) * clt_path->queue_depth; 2539 2523 2540 2524 tx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), GFP_KERNEL, 2541 - sess->s.dev->ib_dev, DMA_TO_DEVICE, 2525 + clt_path->s.dev->ib_dev, DMA_TO_DEVICE, 2542 2526 rtrs_clt_info_req_done); 2543 - rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, sess->s.dev->ib_dev, 2527 + rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, clt_path->s.dev->ib_dev, 2544 2528 DMA_FROM_DEVICE, rtrs_clt_info_rsp_done); 2545 2529 if (!tx_iu || !rx_iu) { 2546 2530 err = -ENOMEM; ··· 2549 2533 /* Prepare for getting info response */ 2550 2534 err = rtrs_iu_post_recv(&usr_con->c, rx_iu); 2551 2535 if (err) { 2552 - rtrs_err(sess->clt, "rtrs_iu_post_recv(), err: %d\n", err); 2536 + rtrs_err(clt_path->clt, "rtrs_iu_post_recv(), err: %d\n", err); 2553 2537 goto out; 2554 2538 } 2555 2539 rx_iu = NULL; 2556 2540 2557 2541 msg = tx_iu->buf; 2558 2542 msg->type = cpu_to_le16(RTRS_MSG_INFO_REQ); 2559 - memcpy(msg->sessname, sess->s.sessname, sizeof(msg->sessname)); 2543 + memcpy(msg->pathname, clt_path->s.sessname, sizeof(msg->pathname)); 2560 2544 2561 - ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr, 2545 + ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, 2546 + tx_iu->dma_addr, 2562 2547 tx_iu->size, DMA_TO_DEVICE); 2563 2548 2564 2549 /* Send info request */ 2565 2550 err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL); 2566 2551 if (err) { 2567 - rtrs_err(sess->clt, "rtrs_iu_post_send(), err: %d\n", err); 2552 + rtrs_err(clt_path->clt, "rtrs_iu_post_send(), err: %d\n", err); 2568 2553 goto out; 2569 2554 } 2570 2555 tx_iu = NULL; 2571 2556 2572 2557 /* Wait for state change */ 2573 - wait_event_interruptible_timeout(sess->state_wq, 2574 - sess->state != RTRS_CLT_CONNECTING, 2558 + wait_event_interruptible_timeout(clt_path->state_wq, 2559 + clt_path->state != RTRS_CLT_CONNECTING, 2575 2560 msecs_to_jiffies( 2576 2561 RTRS_CONNECT_TIMEOUT_MS)); 2577 - if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED) { 2578 - if (READ_ONCE(sess->state) == RTRS_CLT_CONNECTING_ERR) 2562 + if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) { 2563 + if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTING_ERR) 2579 2564 err = -ECONNRESET; 2580 2565 else 2581 2566 err = -ETIMEDOUT; ··· 2584 2567 2585 2568 out: 2586 2569 if (tx_iu) 2587 - rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1); 2570 + rtrs_iu_free(tx_iu, clt_path->s.dev->ib_dev, 1); 2588 2571 if (rx_iu) 2589 - rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1); 2572 + rtrs_iu_free(rx_iu, clt_path->s.dev->ib_dev, 1); 2590 2573 if (err) 2591 2574 /* If we've never taken async path because of malloc problems */ 2592 - rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL); 2575 + rtrs_clt_change_state_get_old(clt_path, 2576 + RTRS_CLT_CONNECTING_ERR, NULL); 2593 2577 2594 2578 return err; 2595 2579 } 2596 2580 2597 2581 /** 2598 - * init_sess() - establishes all session connections and does handshake 2599 - * @sess: client session. 2582 + * init_path() - establishes all path connections and does handshake 2583 + * @clt_path: client path. 2600 2584 * In case of error full close or reconnect procedure should be taken, 2601 2585 * because reconnect or close async works can be started. 2602 2586 */ 2603 - static int init_sess(struct rtrs_clt_sess *sess) 2587 + static int init_path(struct rtrs_clt_path *clt_path) 2604 2588 { 2605 2589 int err; 2606 2590 char str[NAME_MAX]; 2607 2591 struct rtrs_addr path = { 2608 - .src = &sess->s.src_addr, 2609 - .dst = &sess->s.dst_addr, 2592 + .src = &clt_path->s.src_addr, 2593 + .dst = &clt_path->s.dst_addr, 2610 2594 }; 2611 2595 2612 2596 rtrs_addr_to_str(&path, str, sizeof(str)); 2613 2597 2614 - mutex_lock(&sess->init_mutex); 2615 - err = init_conns(sess); 2598 + mutex_lock(&clt_path->init_mutex); 2599 + err = init_conns(clt_path); 2616 2600 if (err) { 2617 - rtrs_err(sess->clt, 2601 + rtrs_err(clt_path->clt, 2618 2602 "init_conns() failed: err=%d path=%s [%s:%u]\n", err, 2619 - str, sess->hca_name, sess->hca_port); 2603 + str, clt_path->hca_name, clt_path->hca_port); 2620 2604 goto out; 2621 2605 } 2622 - err = rtrs_send_sess_info(sess); 2606 + err = rtrs_send_path_info(clt_path); 2623 2607 if (err) { 2624 - rtrs_err( 2625 - sess->clt, 2626 - "rtrs_send_sess_info() failed: err=%d path=%s [%s:%u]\n", 2627 - err, str, sess->hca_name, sess->hca_port); 2608 + rtrs_err(clt_path->clt, 2609 + "rtrs_send_path_info() failed: err=%d path=%s [%s:%u]\n", 2610 + err, str, clt_path->hca_name, clt_path->hca_port); 2628 2611 goto out; 2629 2612 } 2630 - rtrs_clt_sess_up(sess); 2613 + rtrs_clt_path_up(clt_path); 2631 2614 out: 2632 - mutex_unlock(&sess->init_mutex); 2615 + mutex_unlock(&clt_path->init_mutex); 2633 2616 2634 2617 return err; 2635 2618 } 2636 2619 2637 2620 static void rtrs_clt_reconnect_work(struct work_struct *work) 2638 2621 { 2639 - struct rtrs_clt_sess *sess; 2640 - struct rtrs_clt *clt; 2622 + struct rtrs_clt_path *clt_path; 2623 + struct rtrs_clt_sess *clt; 2641 2624 unsigned int delay_ms; 2642 2625 int err; 2643 2626 2644 - sess = container_of(to_delayed_work(work), struct rtrs_clt_sess, 2645 - reconnect_dwork); 2646 - clt = sess->clt; 2627 + clt_path = container_of(to_delayed_work(work), struct rtrs_clt_path, 2628 + reconnect_dwork); 2629 + clt = clt_path->clt; 2647 2630 2648 - if (READ_ONCE(sess->state) != RTRS_CLT_RECONNECTING) 2631 + if (READ_ONCE(clt_path->state) != RTRS_CLT_RECONNECTING) 2649 2632 return; 2650 2633 2651 - if (sess->reconnect_attempts >= clt->max_reconnect_attempts) { 2652 - /* Close a session completely if max attempts is reached */ 2653 - rtrs_clt_close_conns(sess, false); 2634 + if (clt_path->reconnect_attempts >= clt->max_reconnect_attempts) { 2635 + /* Close a path completely if max attempts is reached */ 2636 + rtrs_clt_close_conns(clt_path, false); 2654 2637 return; 2655 2638 } 2656 - sess->reconnect_attempts++; 2639 + clt_path->reconnect_attempts++; 2657 2640 2658 2641 /* Stop everything */ 2659 - rtrs_clt_stop_and_destroy_conns(sess); 2642 + rtrs_clt_stop_and_destroy_conns(clt_path); 2660 2643 msleep(RTRS_RECONNECT_BACKOFF); 2661 - if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING, NULL)) { 2662 - err = init_sess(sess); 2644 + if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING, NULL)) { 2645 + err = init_path(clt_path); 2663 2646 if (err) 2664 2647 goto reconnect_again; 2665 2648 } ··· 2667 2650 return; 2668 2651 2669 2652 reconnect_again: 2670 - if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING, NULL)) { 2671 - sess->stats->reconnects.fail_cnt++; 2653 + if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_RECONNECTING, NULL)) { 2654 + clt_path->stats->reconnects.fail_cnt++; 2672 2655 delay_ms = clt->reconnect_delay_sec * 1000; 2673 - queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, 2656 + queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, 2674 2657 msecs_to_jiffies(delay_ms + 2675 2658 prandom_u32() % 2676 2659 RTRS_RECONNECT_SEED)); ··· 2679 2662 2680 2663 static void rtrs_clt_dev_release(struct device *dev) 2681 2664 { 2682 - struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev); 2665 + struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess, 2666 + dev); 2683 2667 2684 2668 kfree(clt); 2685 2669 } 2686 2670 2687 - static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num, 2671 + static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num, 2688 2672 u16 port, size_t pdu_sz, void *priv, 2689 2673 void (*link_ev)(void *priv, 2690 2674 enum rtrs_clt_link_ev ev), 2691 2675 unsigned int reconnect_delay_sec, 2692 2676 unsigned int max_reconnect_attempts) 2693 2677 { 2694 - struct rtrs_clt *clt; 2678 + struct rtrs_clt_sess *clt; 2695 2679 int err; 2696 2680 2697 2681 if (!paths_num || paths_num > MAX_PATHS_NUM) ··· 2767 2749 return ERR_PTR(err); 2768 2750 } 2769 2751 2770 - static void free_clt(struct rtrs_clt *clt) 2752 + static void free_clt(struct rtrs_clt_sess *clt) 2771 2753 { 2772 2754 free_permits(clt); 2773 2755 free_percpu(clt->pcpu_path); ··· 2778 2760 } 2779 2761 2780 2762 /** 2781 - * rtrs_clt_open() - Open a session to an RTRS server 2763 + * rtrs_clt_open() - Open a path to an RTRS server 2782 2764 * @ops: holds the link event callback and the private pointer. 2783 2765 * @sessname: name of the session 2784 2766 * @paths: Paths to be established defined by their src and dst addresses ··· 2795 2777 * 2796 2778 * Return a valid pointer on success otherwise PTR_ERR. 2797 2779 */ 2798 - struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops, 2799 - const char *sessname, 2780 + struct rtrs_clt_sess *rtrs_clt_open(struct rtrs_clt_ops *ops, 2781 + const char *pathname, 2800 2782 const struct rtrs_addr *paths, 2801 2783 size_t paths_num, u16 port, 2802 2784 size_t pdu_sz, u8 reconnect_delay_sec, 2803 2785 s16 max_reconnect_attempts, u32 nr_poll_queues) 2804 2786 { 2805 - struct rtrs_clt_sess *sess, *tmp; 2806 - struct rtrs_clt *clt; 2787 + struct rtrs_clt_path *clt_path, *tmp; 2788 + struct rtrs_clt_sess *clt; 2807 2789 int err, i; 2808 2790 2809 - if (strchr(sessname, '/') || strchr(sessname, '.')) { 2810 - pr_err("sessname cannot contain / and .\n"); 2791 + if (strchr(pathname, '/') || strchr(pathname, '.')) { 2792 + pr_err("pathname cannot contain / and .\n"); 2811 2793 err = -EINVAL; 2812 2794 goto out; 2813 2795 } 2814 2796 2815 - clt = alloc_clt(sessname, paths_num, port, pdu_sz, ops->priv, 2797 + clt = alloc_clt(pathname, paths_num, port, pdu_sz, ops->priv, 2816 2798 ops->link_ev, 2817 2799 reconnect_delay_sec, 2818 2800 max_reconnect_attempts); ··· 2821 2803 goto out; 2822 2804 } 2823 2805 for (i = 0; i < paths_num; i++) { 2824 - struct rtrs_clt_sess *sess; 2806 + struct rtrs_clt_path *clt_path; 2825 2807 2826 - sess = alloc_sess(clt, &paths[i], nr_cpu_ids, 2808 + clt_path = alloc_path(clt, &paths[i], nr_cpu_ids, 2827 2809 nr_poll_queues); 2828 - if (IS_ERR(sess)) { 2829 - err = PTR_ERR(sess); 2830 - goto close_all_sess; 2810 + if (IS_ERR(clt_path)) { 2811 + err = PTR_ERR(clt_path); 2812 + goto close_all_path; 2831 2813 } 2832 2814 if (!i) 2833 - sess->for_new_clt = 1; 2834 - list_add_tail_rcu(&sess->s.entry, &clt->paths_list); 2815 + clt_path->for_new_clt = 1; 2816 + list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list); 2835 2817 2836 - err = init_sess(sess); 2818 + err = init_path(clt_path); 2837 2819 if (err) { 2838 - list_del_rcu(&sess->s.entry); 2839 - rtrs_clt_close_conns(sess, true); 2840 - free_percpu(sess->stats->pcpu_stats); 2841 - kfree(sess->stats); 2842 - free_sess(sess); 2843 - goto close_all_sess; 2820 + list_del_rcu(&clt_path->s.entry); 2821 + rtrs_clt_close_conns(clt_path, true); 2822 + free_percpu(clt_path->stats->pcpu_stats); 2823 + kfree(clt_path->stats); 2824 + free_path(clt_path); 2825 + goto close_all_path; 2844 2826 } 2845 2827 2846 - err = rtrs_clt_create_sess_files(sess); 2828 + err = rtrs_clt_create_path_files(clt_path); 2847 2829 if (err) { 2848 - list_del_rcu(&sess->s.entry); 2849 - rtrs_clt_close_conns(sess, true); 2850 - free_percpu(sess->stats->pcpu_stats); 2851 - kfree(sess->stats); 2852 - free_sess(sess); 2853 - goto close_all_sess; 2830 + list_del_rcu(&clt_path->s.entry); 2831 + rtrs_clt_close_conns(clt_path, true); 2832 + free_percpu(clt_path->stats->pcpu_stats); 2833 + kfree(clt_path->stats); 2834 + free_path(clt_path); 2835 + goto close_all_path; 2854 2836 } 2855 2837 } 2856 2838 err = alloc_permits(clt); 2857 2839 if (err) 2858 - goto close_all_sess; 2840 + goto close_all_path; 2859 2841 2860 2842 return clt; 2861 2843 2862 - close_all_sess: 2863 - list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) { 2864 - rtrs_clt_destroy_sess_files(sess, NULL); 2865 - rtrs_clt_close_conns(sess, true); 2866 - kobject_put(&sess->kobj); 2844 + close_all_path: 2845 + list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) { 2846 + rtrs_clt_destroy_path_files(clt_path, NULL); 2847 + rtrs_clt_close_conns(clt_path, true); 2848 + kobject_put(&clt_path->kobj); 2867 2849 } 2868 2850 rtrs_clt_destroy_sysfs_root(clt); 2869 2851 free_clt(clt); ··· 2874 2856 EXPORT_SYMBOL(rtrs_clt_open); 2875 2857 2876 2858 /** 2877 - * rtrs_clt_close() - Close a session 2859 + * rtrs_clt_close() - Close a path 2878 2860 * @clt: Session handle. Session is freed upon return. 2879 2861 */ 2880 - void rtrs_clt_close(struct rtrs_clt *clt) 2862 + void rtrs_clt_close(struct rtrs_clt_sess *clt) 2881 2863 { 2882 - struct rtrs_clt_sess *sess, *tmp; 2864 + struct rtrs_clt_path *clt_path, *tmp; 2883 2865 2884 2866 /* Firstly forbid sysfs access */ 2885 2867 rtrs_clt_destroy_sysfs_root(clt); 2886 2868 2887 2869 /* Now it is safe to iterate over all paths without locks */ 2888 - list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) { 2889 - rtrs_clt_close_conns(sess, true); 2890 - rtrs_clt_destroy_sess_files(sess, NULL); 2891 - kobject_put(&sess->kobj); 2870 + list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) { 2871 + rtrs_clt_close_conns(clt_path, true); 2872 + rtrs_clt_destroy_path_files(clt_path, NULL); 2873 + kobject_put(&clt_path->kobj); 2892 2874 } 2893 2875 free_clt(clt); 2894 2876 } 2895 2877 EXPORT_SYMBOL(rtrs_clt_close); 2896 2878 2897 - int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess) 2879 + int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *clt_path) 2898 2880 { 2899 2881 enum rtrs_clt_state old_state; 2900 2882 int err = -EBUSY; 2901 2883 bool changed; 2902 2884 2903 - changed = rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING, 2885 + changed = rtrs_clt_change_state_get_old(clt_path, 2886 + RTRS_CLT_RECONNECTING, 2904 2887 &old_state); 2905 2888 if (changed) { 2906 - sess->reconnect_attempts = 0; 2907 - queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, 0); 2889 + clt_path->reconnect_attempts = 0; 2890 + queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, 0); 2908 2891 } 2909 2892 if (changed || old_state == RTRS_CLT_RECONNECTING) { 2910 2893 /* ··· 2913 2894 * execution, so do the flush if we have queued something 2914 2895 * right now or work is pending. 2915 2896 */ 2916 - flush_delayed_work(&sess->reconnect_dwork); 2917 - err = (READ_ONCE(sess->state) == 2897 + flush_delayed_work(&clt_path->reconnect_dwork); 2898 + err = (READ_ONCE(clt_path->state) == 2918 2899 RTRS_CLT_CONNECTED ? 0 : -ENOTCONN); 2919 2900 } 2920 2901 2921 2902 return err; 2922 2903 } 2923 2904 2924 - int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess, 2905 + int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *clt_path, 2925 2906 const struct attribute *sysfs_self) 2926 2907 { 2927 2908 enum rtrs_clt_state old_state; ··· 2937 2918 * removing the path. 2938 2919 */ 2939 2920 do { 2940 - rtrs_clt_close_conns(sess, true); 2941 - changed = rtrs_clt_change_state_get_old(sess, 2921 + rtrs_clt_close_conns(clt_path, true); 2922 + changed = rtrs_clt_change_state_get_old(clt_path, 2942 2923 RTRS_CLT_DEAD, 2943 2924 &old_state); 2944 2925 } while (!changed && old_state != RTRS_CLT_DEAD); 2945 2926 2946 2927 if (changed) { 2947 - rtrs_clt_remove_path_from_arr(sess); 2948 - rtrs_clt_destroy_sess_files(sess, sysfs_self); 2949 - kobject_put(&sess->kobj); 2928 + rtrs_clt_remove_path_from_arr(clt_path); 2929 + rtrs_clt_destroy_path_files(clt_path, sysfs_self); 2930 + kobject_put(&clt_path->kobj); 2950 2931 } 2951 2932 2952 2933 return 0; 2953 2934 } 2954 2935 2955 - void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value) 2936 + void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value) 2956 2937 { 2957 2938 clt->max_reconnect_attempts = (unsigned int)value; 2958 2939 } 2959 2940 2960 - int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt) 2941 + int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt) 2961 2942 { 2962 2943 return (int)clt->max_reconnect_attempts; 2963 2944 } ··· 2987 2968 * On dir=WRITE rtrs client will rdma write data in sg to server side. 2988 2969 */ 2989 2970 int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops, 2990 - struct rtrs_clt *clt, struct rtrs_permit *permit, 2991 - const struct kvec *vec, size_t nr, size_t data_len, 2992 - struct scatterlist *sg, unsigned int sg_cnt) 2971 + struct rtrs_clt_sess *clt, struct rtrs_permit *permit, 2972 + const struct kvec *vec, size_t nr, size_t data_len, 2973 + struct scatterlist *sg, unsigned int sg_cnt) 2993 2974 { 2994 2975 struct rtrs_clt_io_req *req; 2995 - struct rtrs_clt_sess *sess; 2976 + struct rtrs_clt_path *clt_path; 2996 2977 2997 2978 enum dma_data_direction dma_dir; 2998 2979 int err = -ECONNABORTED, i; ··· 3014 2995 3015 2996 rcu_read_lock(); 3016 2997 for (path_it_init(&it, clt); 3017 - (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { 3018 - if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED) 2998 + (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { 2999 + if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) 3019 3000 continue; 3020 3001 3021 - if (usr_len + hdr_len > sess->max_hdr_size) { 3022 - rtrs_wrn_rl(sess->clt, 3002 + if (usr_len + hdr_len > clt_path->max_hdr_size) { 3003 + rtrs_wrn_rl(clt_path->clt, 3023 3004 "%s request failed, user message size is %zu and header length %zu, but max size is %u\n", 3024 3005 dir == READ ? "Read" : "Write", 3025 - usr_len, hdr_len, sess->max_hdr_size); 3006 + usr_len, hdr_len, clt_path->max_hdr_size); 3026 3007 err = -EMSGSIZE; 3027 3008 break; 3028 3009 } 3029 - req = rtrs_clt_get_req(sess, ops->conf_fn, permit, ops->priv, 3010 + req = rtrs_clt_get_req(clt_path, ops->conf_fn, permit, ops->priv, 3030 3011 vec, usr_len, sg, sg_cnt, data_len, 3031 3012 dma_dir); 3032 3013 if (dir == READ) ··· 3047 3028 } 3048 3029 EXPORT_SYMBOL(rtrs_clt_request); 3049 3030 3050 - int rtrs_clt_rdma_cq_direct(struct rtrs_clt *clt, unsigned int index) 3031 + int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index) 3051 3032 { 3052 3033 /* If no path, return -1 for block layer not to try again */ 3053 3034 int cnt = -1; 3054 3035 struct rtrs_con *con; 3055 - struct rtrs_clt_sess *sess; 3036 + struct rtrs_clt_path *clt_path; 3056 3037 struct path_it it; 3057 3038 3058 3039 rcu_read_lock(); 3059 3040 for (path_it_init(&it, clt); 3060 - (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { 3061 - if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED) 3041 + (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { 3042 + if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) 3062 3043 continue; 3063 3044 3064 - con = sess->s.con[index + 1]; 3045 + con = clt_path->s.con[index + 1]; 3065 3046 cnt = ib_process_cq_direct(con->cq, -1); 3066 3047 if (cnt) 3067 3048 break; ··· 3081 3062 * 0 on success 3082 3063 * -ECOMM no connection to the server 3083 3064 */ 3084 - int rtrs_clt_query(struct rtrs_clt *clt, struct rtrs_attrs *attr) 3065 + int rtrs_clt_query(struct rtrs_clt_sess *clt, struct rtrs_attrs *attr) 3085 3066 { 3086 3067 if (!rtrs_clt_is_connected(clt)) 3087 3068 return -ECOMM; ··· 3096 3077 } 3097 3078 EXPORT_SYMBOL(rtrs_clt_query); 3098 3079 3099 - int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt, 3080 + int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt, 3100 3081 struct rtrs_addr *addr) 3101 3082 { 3102 - struct rtrs_clt_sess *sess; 3083 + struct rtrs_clt_path *clt_path; 3103 3084 int err; 3104 3085 3105 - sess = alloc_sess(clt, addr, nr_cpu_ids, 0); 3106 - if (IS_ERR(sess)) 3107 - return PTR_ERR(sess); 3086 + clt_path = alloc_path(clt, addr, nr_cpu_ids, 0); 3087 + if (IS_ERR(clt_path)) 3088 + return PTR_ERR(clt_path); 3108 3089 3109 3090 mutex_lock(&clt->paths_mutex); 3110 3091 if (clt->paths_num == 0) { ··· 3113 3094 * the addition of the first path is like a new session for 3114 3095 * the storage server 3115 3096 */ 3116 - sess->for_new_clt = 1; 3097 + clt_path->for_new_clt = 1; 3117 3098 } 3118 3099 3119 3100 mutex_unlock(&clt->paths_mutex); ··· 3123 3104 * IO will never grab it. Also it is very important to add 3124 3105 * path before init, since init fires LINK_CONNECTED event. 3125 3106 */ 3126 - rtrs_clt_add_path_to_arr(sess); 3107 + rtrs_clt_add_path_to_arr(clt_path); 3127 3108 3128 - err = init_sess(sess); 3109 + err = init_path(clt_path); 3129 3110 if (err) 3130 - goto close_sess; 3111 + goto close_path; 3131 3112 3132 - err = rtrs_clt_create_sess_files(sess); 3113 + err = rtrs_clt_create_path_files(clt_path); 3133 3114 if (err) 3134 - goto close_sess; 3115 + goto close_path; 3135 3116 3136 3117 return 0; 3137 3118 3138 - close_sess: 3139 - rtrs_clt_remove_path_from_arr(sess); 3140 - rtrs_clt_close_conns(sess, true); 3141 - free_percpu(sess->stats->pcpu_stats); 3142 - kfree(sess->stats); 3143 - free_sess(sess); 3119 + close_path: 3120 + rtrs_clt_remove_path_from_arr(clt_path); 3121 + rtrs_clt_close_conns(clt_path, true); 3122 + free_percpu(clt_path->stats->pcpu_stats); 3123 + kfree(clt_path->stats); 3124 + free_path(clt_path); 3144 3125 3145 3126 return err; 3146 3127 }
+21 -20
drivers/infiniband/ulp/rtrs/rtrs-clt.h
··· 124 124 u32 rkey; 125 125 }; 126 126 127 - struct rtrs_clt_sess { 128 - struct rtrs_sess s; 129 - struct rtrs_clt *clt; 127 + struct rtrs_clt_path { 128 + struct rtrs_path s; 129 + struct rtrs_clt_sess *clt; 130 130 wait_queue_head_t state_wq; 131 131 enum rtrs_clt_state state; 132 132 atomic_t connected_cnt; ··· 153 153 *mp_skip_entry; 154 154 }; 155 155 156 - struct rtrs_clt { 156 + struct rtrs_clt_sess { 157 157 struct list_head paths_list; /* rcu protected list */ 158 158 size_t paths_num; 159 - struct rtrs_clt_sess 159 + struct rtrs_clt_path 160 160 __rcu * __percpu *pcpu_path; 161 161 uuid_t paths_uuid; 162 162 int paths_up; ··· 186 186 return container_of(c, struct rtrs_clt_con, c); 187 187 } 188 188 189 - static inline struct rtrs_clt_sess *to_clt_sess(struct rtrs_sess *s) 189 + static inline struct rtrs_clt_path *to_clt_path(struct rtrs_path *s) 190 190 { 191 - return container_of(s, struct rtrs_clt_sess, s); 191 + return container_of(s, struct rtrs_clt_path, s); 192 192 } 193 193 194 - static inline int permit_size(struct rtrs_clt *clt) 194 + static inline int permit_size(struct rtrs_clt_sess *clt) 195 195 { 196 196 return sizeof(struct rtrs_permit) + clt->pdu_sz; 197 197 } 198 198 199 - static inline struct rtrs_permit *get_permit(struct rtrs_clt *clt, int idx) 199 + static inline struct rtrs_permit *get_permit(struct rtrs_clt_sess *clt, 200 + int idx) 200 201 { 201 202 return (struct rtrs_permit *)(clt->permits + permit_size(clt) * idx); 202 203 } 203 204 204 - int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess); 205 - void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait); 206 - int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt, 205 + int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *path); 206 + void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait); 207 + int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt, 207 208 struct rtrs_addr *addr); 208 - int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess, 209 + int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *path, 209 210 const struct attribute *sysfs_self); 210 211 211 - void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value); 212 - int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt); 213 - void free_sess(struct rtrs_clt_sess *sess); 212 + void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value); 213 + int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt); 214 + void free_path(struct rtrs_clt_path *clt_path); 214 215 215 216 /* rtrs-clt-stats.c */ 216 217 ··· 240 239 241 240 /* rtrs-clt-sysfs.c */ 242 241 243 - int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt); 244 - void rtrs_clt_destroy_sysfs_root(struct rtrs_clt *clt); 242 + int rtrs_clt_create_sysfs_root_files(struct rtrs_clt_sess *clt); 243 + void rtrs_clt_destroy_sysfs_root(struct rtrs_clt_sess *clt); 245 244 246 - int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess); 247 - void rtrs_clt_destroy_sess_files(struct rtrs_clt_sess *sess, 245 + int rtrs_clt_create_path_files(struct rtrs_clt_path *clt_path); 246 + void rtrs_clt_destroy_path_files(struct rtrs_clt_path *clt_path, 248 247 const struct attribute *sysfs_self); 249 248 250 249 #endif /* RTRS_CLT_H */
+9 -9
drivers/infiniband/ulp/rtrs/rtrs-pri.h
··· 90 90 }; 91 91 92 92 struct rtrs_con { 93 - struct rtrs_sess *sess; 93 + struct rtrs_path *path; 94 94 struct ib_qp *qp; 95 95 struct ib_cq *cq; 96 96 struct rdma_cm_id *cm_id; ··· 100 100 atomic_t sq_wr_avail; 101 101 }; 102 102 103 - struct rtrs_sess { 103 + struct rtrs_path { 104 104 struct list_head entry; 105 105 struct sockaddr_storage dst_addr; 106 106 struct sockaddr_storage src_addr; ··· 229 229 /** 230 230 * struct rtrs_msg_info_req 231 231 * @type: @RTRS_MSG_INFO_REQ 232 - * @sessname: Session name chosen by client 232 + * @pathname: Path name chosen by client 233 233 */ 234 234 struct rtrs_msg_info_req { 235 235 __le16 type; 236 - u8 sessname[NAME_MAX]; 236 + u8 pathname[NAME_MAX]; 237 237 u8 reserved[15]; 238 238 }; 239 239 ··· 313 313 314 314 int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe); 315 315 316 - int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con, 316 + int rtrs_cq_qp_create(struct rtrs_path *path, struct rtrs_con *con, 317 317 u32 max_send_sge, int cq_vector, int nr_cqe, 318 318 u32 max_send_wr, u32 max_recv_wr, 319 319 enum ib_poll_context poll_ctx); 320 320 void rtrs_cq_qp_destroy(struct rtrs_con *con); 321 321 322 - void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe, 322 + void rtrs_init_hb(struct rtrs_path *path, struct ib_cqe *cqe, 323 323 unsigned int interval_ms, unsigned int missed_max, 324 324 void (*err_handler)(struct rtrs_con *con), 325 325 struct workqueue_struct *wq); 326 - void rtrs_start_hb(struct rtrs_sess *sess); 327 - void rtrs_stop_hb(struct rtrs_sess *sess); 328 - void rtrs_send_hb_ack(struct rtrs_sess *sess); 326 + void rtrs_start_hb(struct rtrs_path *path); 327 + void rtrs_stop_hb(struct rtrs_path *path); 328 + void rtrs_send_hb_ack(struct rtrs_path *path); 329 329 330 330 void rtrs_rdma_dev_pd_init(enum ib_pd_flags pd_flags, 331 331 struct rtrs_rdma_dev_pd *pool);
+61 -60
drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
··· 15 15 16 16 static void rtrs_srv_release(struct kobject *kobj) 17 17 { 18 - struct rtrs_srv_sess *sess; 18 + struct rtrs_srv_path *srv_path; 19 19 20 - sess = container_of(kobj, struct rtrs_srv_sess, kobj); 21 - kfree(sess); 20 + srv_path = container_of(kobj, struct rtrs_srv_path, kobj); 21 + kfree(srv_path); 22 22 } 23 23 24 24 static struct kobj_type ktype = { ··· 36 36 struct kobj_attribute *attr, 37 37 const char *buf, size_t count) 38 38 { 39 - struct rtrs_srv_sess *sess; 40 - struct rtrs_sess *s; 39 + struct rtrs_srv_path *srv_path; 40 + struct rtrs_path *s; 41 41 char str[MAXHOSTNAMELEN]; 42 42 43 - sess = container_of(kobj, struct rtrs_srv_sess, kobj); 44 - s = &sess->s; 43 + srv_path = container_of(kobj, struct rtrs_srv_path, kobj); 44 + s = &srv_path->s; 45 45 if (!sysfs_streq(buf, "1")) { 46 46 rtrs_err(s, "%s: invalid value: '%s'\n", 47 47 attr->attr.name, buf); 48 48 return -EINVAL; 49 49 } 50 50 51 - sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, str, sizeof(str)); 51 + sockaddr_to_str((struct sockaddr *)&srv_path->s.dst_addr, str, 52 + sizeof(str)); 52 53 53 54 rtrs_info(s, "disconnect for path %s requested\n", str); 54 55 /* first remove sysfs itself to avoid deadlock */ 55 - sysfs_remove_file_self(&sess->kobj, &attr->attr); 56 - close_sess(sess); 56 + sysfs_remove_file_self(&srv_path->kobj, &attr->attr); 57 + close_path(srv_path); 57 58 58 59 return count; 59 60 } ··· 67 66 struct kobj_attribute *attr, 68 67 char *page) 69 68 { 70 - struct rtrs_srv_sess *sess; 69 + struct rtrs_srv_path *srv_path; 71 70 struct rtrs_con *usr_con; 72 71 73 - sess = container_of(kobj, typeof(*sess), kobj); 74 - usr_con = sess->s.con[0]; 72 + srv_path = container_of(kobj, typeof(*srv_path), kobj); 73 + usr_con = srv_path->s.con[0]; 75 74 76 75 return sysfs_emit(page, "%u\n", usr_con->cm_id->port_num); 77 76 } ··· 83 82 struct kobj_attribute *attr, 84 83 char *page) 85 84 { 86 - struct rtrs_srv_sess *sess; 85 + struct rtrs_srv_path *srv_path; 87 86 88 - sess = container_of(kobj, struct rtrs_srv_sess, kobj); 87 + srv_path = container_of(kobj, struct rtrs_srv_path, kobj); 89 88 90 - return sysfs_emit(page, "%s\n", sess->s.dev->ib_dev->name); 89 + return sysfs_emit(page, "%s\n", srv_path->s.dev->ib_dev->name); 91 90 } 92 91 93 92 static struct kobj_attribute rtrs_srv_hca_name_attr = ··· 97 96 struct kobj_attribute *attr, 98 97 char *page) 99 98 { 100 - struct rtrs_srv_sess *sess; 99 + struct rtrs_srv_path *srv_path; 101 100 int cnt; 102 101 103 - sess = container_of(kobj, struct rtrs_srv_sess, kobj); 104 - cnt = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, 102 + srv_path = container_of(kobj, struct rtrs_srv_path, kobj); 103 + cnt = sockaddr_to_str((struct sockaddr *)&srv_path->s.dst_addr, 105 104 page, PAGE_SIZE); 106 105 return cnt + sysfs_emit_at(page, cnt, "\n"); 107 106 } ··· 113 112 struct kobj_attribute *attr, 114 113 char *page) 115 114 { 116 - struct rtrs_srv_sess *sess; 115 + struct rtrs_srv_path *srv_path; 117 116 int len; 118 117 119 - sess = container_of(kobj, struct rtrs_srv_sess, kobj); 120 - len = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr, page, 118 + srv_path = container_of(kobj, struct rtrs_srv_path, kobj); 119 + len = sockaddr_to_str((struct sockaddr *)&srv_path->s.src_addr, page, 121 120 PAGE_SIZE); 122 121 len += sysfs_emit_at(page, len, "\n"); 123 122 return len; ··· 126 125 static struct kobj_attribute rtrs_srv_dst_addr_attr = 127 126 __ATTR(dst_addr, 0444, rtrs_srv_dst_addr_show, NULL); 128 127 129 - static struct attribute *rtrs_srv_sess_attrs[] = { 128 + static struct attribute *rtrs_srv_path_attrs[] = { 130 129 &rtrs_srv_hca_name_attr.attr, 131 130 &rtrs_srv_hca_port_attr.attr, 132 131 &rtrs_srv_src_addr_attr.attr, ··· 135 134 NULL, 136 135 }; 137 136 138 - static const struct attribute_group rtrs_srv_sess_attr_group = { 139 - .attrs = rtrs_srv_sess_attrs, 137 + static const struct attribute_group rtrs_srv_path_attr_group = { 138 + .attrs = rtrs_srv_path_attrs, 140 139 }; 141 140 142 141 STAT_ATTR(struct rtrs_srv_stats, rdma, ··· 152 151 .attrs = rtrs_srv_stats_attrs, 153 152 }; 154 153 155 - static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess) 154 + static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_path *srv_path) 156 155 { 157 - struct rtrs_srv *srv = sess->srv; 156 + struct rtrs_srv_sess *srv = srv_path->srv; 158 157 int err = 0; 159 158 160 159 mutex_lock(&srv->paths_mutex); ··· 165 164 goto unlock; 166 165 } 167 166 srv->dev.class = rtrs_dev_class; 168 - err = dev_set_name(&srv->dev, "%s", sess->s.sessname); 167 + err = dev_set_name(&srv->dev, "%s", srv_path->s.sessname); 169 168 if (err) 170 169 goto unlock; 171 170 ··· 197 196 } 198 197 199 198 static void 200 - rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess) 199 + rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_path *srv_path) 201 200 { 202 - struct rtrs_srv *srv = sess->srv; 201 + struct rtrs_srv_sess *srv = srv_path->srv; 203 202 204 203 mutex_lock(&srv->paths_mutex); 205 204 if (!--srv->dev_ref) { ··· 214 213 } 215 214 } 216 215 217 - static void rtrs_srv_sess_stats_release(struct kobject *kobj) 216 + static void rtrs_srv_path_stats_release(struct kobject *kobj) 218 217 { 219 218 struct rtrs_srv_stats *stats; 220 219 ··· 225 224 226 225 static struct kobj_type ktype_stats = { 227 226 .sysfs_ops = &kobj_sysfs_ops, 228 - .release = rtrs_srv_sess_stats_release, 227 + .release = rtrs_srv_path_stats_release, 229 228 }; 230 229 231 - static int rtrs_srv_create_stats_files(struct rtrs_srv_sess *sess) 230 + static int rtrs_srv_create_stats_files(struct rtrs_srv_path *srv_path) 232 231 { 233 232 int err; 234 - struct rtrs_sess *s = &sess->s; 233 + struct rtrs_path *s = &srv_path->s; 235 234 236 - err = kobject_init_and_add(&sess->stats->kobj_stats, &ktype_stats, 237 - &sess->kobj, "stats"); 235 + err = kobject_init_and_add(&srv_path->stats->kobj_stats, &ktype_stats, 236 + &srv_path->kobj, "stats"); 238 237 if (err) { 239 238 rtrs_err(s, "kobject_init_and_add(): %d\n", err); 240 - kobject_put(&sess->stats->kobj_stats); 239 + kobject_put(&srv_path->stats->kobj_stats); 241 240 return err; 242 241 } 243 - err = sysfs_create_group(&sess->stats->kobj_stats, 242 + err = sysfs_create_group(&srv_path->stats->kobj_stats, 244 243 &rtrs_srv_stats_attr_group); 245 244 if (err) { 246 245 rtrs_err(s, "sysfs_create_group(): %d\n", err); ··· 250 249 return 0; 251 250 252 251 err: 253 - kobject_del(&sess->stats->kobj_stats); 254 - kobject_put(&sess->stats->kobj_stats); 252 + kobject_del(&srv_path->stats->kobj_stats); 253 + kobject_put(&srv_path->stats->kobj_stats); 255 254 256 255 return err; 257 256 } 258 257 259 - int rtrs_srv_create_sess_files(struct rtrs_srv_sess *sess) 258 + int rtrs_srv_create_path_files(struct rtrs_srv_path *srv_path) 260 259 { 261 - struct rtrs_srv *srv = sess->srv; 262 - struct rtrs_sess *s = &sess->s; 260 + struct rtrs_srv_sess *srv = srv_path->srv; 261 + struct rtrs_path *s = &srv_path->s; 263 262 char str[NAME_MAX]; 264 263 int err; 265 264 struct rtrs_addr path = { 266 - .src = &sess->s.dst_addr, 267 - .dst = &sess->s.src_addr, 265 + .src = &srv_path->s.dst_addr, 266 + .dst = &srv_path->s.src_addr, 268 267 }; 269 268 270 269 rtrs_addr_to_str(&path, str, sizeof(str)); 271 - err = rtrs_srv_create_once_sysfs_root_folders(sess); 270 + err = rtrs_srv_create_once_sysfs_root_folders(srv_path); 272 271 if (err) 273 272 return err; 274 273 275 - err = kobject_init_and_add(&sess->kobj, &ktype, srv->kobj_paths, 274 + err = kobject_init_and_add(&srv_path->kobj, &ktype, srv->kobj_paths, 276 275 "%s", str); 277 276 if (err) { 278 277 rtrs_err(s, "kobject_init_and_add(): %d\n", err); 279 278 goto destroy_root; 280 279 } 281 - err = sysfs_create_group(&sess->kobj, &rtrs_srv_sess_attr_group); 280 + err = sysfs_create_group(&srv_path->kobj, &rtrs_srv_path_attr_group); 282 281 if (err) { 283 282 rtrs_err(s, "sysfs_create_group(): %d\n", err); 284 283 goto put_kobj; 285 284 } 286 - err = rtrs_srv_create_stats_files(sess); 285 + err = rtrs_srv_create_stats_files(srv_path); 287 286 if (err) 288 287 goto remove_group; 289 288 290 289 return 0; 291 290 292 291 remove_group: 293 - sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group); 292 + sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group); 294 293 put_kobj: 295 - kobject_del(&sess->kobj); 294 + kobject_del(&srv_path->kobj); 296 295 destroy_root: 297 - kobject_put(&sess->kobj); 298 - rtrs_srv_destroy_once_sysfs_root_folders(sess); 296 + kobject_put(&srv_path->kobj); 297 + rtrs_srv_destroy_once_sysfs_root_folders(srv_path); 299 298 300 299 return err; 301 300 } 302 301 303 - void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess) 302 + void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path) 304 303 { 305 - if (sess->kobj.state_in_sysfs) { 306 - kobject_del(&sess->stats->kobj_stats); 307 - kobject_put(&sess->stats->kobj_stats); 308 - sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group); 309 - kobject_put(&sess->kobj); 304 + if (srv_path->kobj.state_in_sysfs) { 305 + kobject_del(&srv_path->stats->kobj_stats); 306 + kobject_put(&srv_path->stats->kobj_stats); 307 + sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group); 308 + kobject_put(&srv_path->kobj); 310 309 311 - rtrs_srv_destroy_once_sysfs_root_folders(sess); 310 + rtrs_srv_destroy_once_sysfs_root_folders(srv_path); 312 311 } 313 312 }
+345 -335
drivers/infiniband/ulp/rtrs/rtrs-srv.c
··· 62 62 return container_of(c, struct rtrs_srv_con, c); 63 63 } 64 64 65 - static inline struct rtrs_srv_sess *to_srv_sess(struct rtrs_sess *s) 65 + static inline struct rtrs_srv_path *to_srv_path(struct rtrs_path *s) 66 66 { 67 - return container_of(s, struct rtrs_srv_sess, s); 67 + return container_of(s, struct rtrs_srv_path, s); 68 68 } 69 69 70 - static bool rtrs_srv_change_state(struct rtrs_srv_sess *sess, 70 + static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path, 71 71 enum rtrs_srv_state new_state) 72 72 { 73 73 enum rtrs_srv_state old_state; 74 74 bool changed = false; 75 75 76 - spin_lock_irq(&sess->state_lock); 77 - old_state = sess->state; 76 + spin_lock_irq(&srv_path->state_lock); 77 + old_state = srv_path->state; 78 78 switch (new_state) { 79 79 case RTRS_SRV_CONNECTED: 80 80 if (old_state == RTRS_SRV_CONNECTING) ··· 93 93 break; 94 94 } 95 95 if (changed) 96 - sess->state = new_state; 97 - spin_unlock_irq(&sess->state_lock); 96 + srv_path->state = new_state; 97 + spin_unlock_irq(&srv_path->state_lock); 98 98 99 99 return changed; 100 100 } ··· 106 106 kfree(id); 107 107 } 108 108 109 - static void rtrs_srv_free_ops_ids(struct rtrs_srv_sess *sess) 109 + static void rtrs_srv_free_ops_ids(struct rtrs_srv_path *srv_path) 110 110 { 111 - struct rtrs_srv *srv = sess->srv; 111 + struct rtrs_srv_sess *srv = srv_path->srv; 112 112 int i; 113 113 114 - if (sess->ops_ids) { 114 + if (srv_path->ops_ids) { 115 115 for (i = 0; i < srv->queue_depth; i++) 116 - free_id(sess->ops_ids[i]); 117 - kfree(sess->ops_ids); 118 - sess->ops_ids = NULL; 116 + free_id(srv_path->ops_ids[i]); 117 + kfree(srv_path->ops_ids); 118 + srv_path->ops_ids = NULL; 119 119 } 120 120 } 121 121 ··· 127 127 128 128 static inline void rtrs_srv_inflight_ref_release(struct percpu_ref *ref) 129 129 { 130 - struct rtrs_srv_sess *sess = container_of(ref, struct rtrs_srv_sess, ids_inflight_ref); 130 + struct rtrs_srv_path *srv_path = container_of(ref, 131 + struct rtrs_srv_path, 132 + ids_inflight_ref); 131 133 132 - percpu_ref_exit(&sess->ids_inflight_ref); 133 - complete(&sess->complete_done); 134 + percpu_ref_exit(&srv_path->ids_inflight_ref); 135 + complete(&srv_path->complete_done); 134 136 } 135 137 136 - static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_sess *sess) 138 + static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_path *srv_path) 137 139 { 138 - struct rtrs_srv *srv = sess->srv; 140 + struct rtrs_srv_sess *srv = srv_path->srv; 139 141 struct rtrs_srv_op *id; 140 142 int i, ret; 141 143 142 - sess->ops_ids = kcalloc(srv->queue_depth, sizeof(*sess->ops_ids), 143 - GFP_KERNEL); 144 - if (!sess->ops_ids) 144 + srv_path->ops_ids = kcalloc(srv->queue_depth, 145 + sizeof(*srv_path->ops_ids), 146 + GFP_KERNEL); 147 + if (!srv_path->ops_ids) 145 148 goto err; 146 149 147 150 for (i = 0; i < srv->queue_depth; ++i) { ··· 152 149 if (!id) 153 150 goto err; 154 151 155 - sess->ops_ids[i] = id; 152 + srv_path->ops_ids[i] = id; 156 153 } 157 154 158 - ret = percpu_ref_init(&sess->ids_inflight_ref, 155 + ret = percpu_ref_init(&srv_path->ids_inflight_ref, 159 156 rtrs_srv_inflight_ref_release, 0, GFP_KERNEL); 160 157 if (ret) { 161 158 pr_err("Percpu reference init failed\n"); 162 159 goto err; 163 160 } 164 - init_completion(&sess->complete_done); 161 + init_completion(&srv_path->complete_done); 165 162 166 163 return 0; 167 164 168 165 err: 169 - rtrs_srv_free_ops_ids(sess); 166 + rtrs_srv_free_ops_ids(srv_path); 170 167 return -ENOMEM; 171 168 } 172 169 173 - static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_sess *sess) 170 + static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_path *srv_path) 174 171 { 175 - percpu_ref_get(&sess->ids_inflight_ref); 172 + percpu_ref_get(&srv_path->ids_inflight_ref); 176 173 } 177 174 178 - static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_sess *sess) 175 + static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_path *srv_path) 179 176 { 180 - percpu_ref_put(&sess->ids_inflight_ref); 177 + percpu_ref_put(&srv_path->ids_inflight_ref); 181 178 } 182 179 183 180 static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc) 184 181 { 185 182 struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context); 186 - struct rtrs_sess *s = con->c.sess; 187 - struct rtrs_srv_sess *sess = to_srv_sess(s); 183 + struct rtrs_path *s = con->c.path; 184 + struct rtrs_srv_path *srv_path = to_srv_path(s); 188 185 189 186 if (wc->status != IB_WC_SUCCESS) { 190 187 rtrs_err(s, "REG MR failed: %s\n", 191 188 ib_wc_status_msg(wc->status)); 192 - close_sess(sess); 189 + close_path(srv_path); 193 190 return; 194 191 } 195 192 } ··· 200 197 201 198 static int rdma_write_sg(struct rtrs_srv_op *id) 202 199 { 203 - struct rtrs_sess *s = id->con->c.sess; 204 - struct rtrs_srv_sess *sess = to_srv_sess(s); 205 - dma_addr_t dma_addr = sess->dma_addr[id->msg_id]; 200 + struct rtrs_path *s = id->con->c.path; 201 + struct rtrs_srv_path *srv_path = to_srv_path(s); 202 + dma_addr_t dma_addr = srv_path->dma_addr[id->msg_id]; 206 203 struct rtrs_srv_mr *srv_mr; 207 204 struct ib_send_wr inv_wr; 208 205 struct ib_rdma_wr imm_wr; ··· 236 233 return -EINVAL; 237 234 } 238 235 239 - plist->lkey = sess->s.dev->ib_pd->local_dma_lkey; 236 + plist->lkey = srv_path->s.dev->ib_pd->local_dma_lkey; 240 237 offset += plist->length; 241 238 242 239 wr->wr.sg_list = plist; ··· 287 284 if (always_invalidate) { 288 285 struct rtrs_msg_rkey_rsp *msg; 289 286 290 - srv_mr = &sess->mrs[id->msg_id]; 287 + srv_mr = &srv_path->mrs[id->msg_id]; 291 288 rwr.wr.opcode = IB_WR_REG_MR; 292 289 rwr.wr.wr_cqe = &local_reg_cqe; 293 290 rwr.wr.num_sge = 0; ··· 303 300 304 301 list.addr = srv_mr->iu->dma_addr; 305 302 list.length = sizeof(*msg); 306 - list.lkey = sess->s.dev->ib_pd->local_dma_lkey; 303 + list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey; 307 304 imm_wr.wr.sg_list = &list; 308 305 imm_wr.wr.num_sge = 1; 309 306 imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM; 310 - ib_dma_sync_single_for_device(sess->s.dev->ib_dev, 307 + ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, 311 308 srv_mr->iu->dma_addr, 312 309 srv_mr->iu->size, DMA_TO_DEVICE); 313 310 } else { ··· 320 317 0, need_inval)); 321 318 322 319 imm_wr.wr.wr_cqe = &io_comp_cqe; 323 - ib_dma_sync_single_for_device(sess->s.dev->ib_dev, dma_addr, 320 + ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, dma_addr, 324 321 offset, DMA_BIDIRECTIONAL); 325 322 326 323 err = ib_post_send(id->con->c.qp, &id->tx_wr.wr, NULL); ··· 344 341 static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id, 345 342 int errno) 346 343 { 347 - struct rtrs_sess *s = con->c.sess; 348 - struct rtrs_srv_sess *sess = to_srv_sess(s); 344 + struct rtrs_path *s = con->c.path; 345 + struct rtrs_srv_path *srv_path = to_srv_path(s); 349 346 struct ib_send_wr inv_wr, *wr = NULL; 350 347 struct ib_rdma_wr imm_wr; 351 348 struct ib_reg_wr rwr; ··· 405 402 struct ib_sge list; 406 403 struct rtrs_msg_rkey_rsp *msg; 407 404 408 - srv_mr = &sess->mrs[id->msg_id]; 405 + srv_mr = &srv_path->mrs[id->msg_id]; 409 406 rwr.wr.next = &imm_wr.wr; 410 407 rwr.wr.opcode = IB_WR_REG_MR; 411 408 rwr.wr.wr_cqe = &local_reg_cqe; ··· 422 419 423 420 list.addr = srv_mr->iu->dma_addr; 424 421 list.length = sizeof(*msg); 425 - list.lkey = sess->s.dev->ib_pd->local_dma_lkey; 422 + list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey; 426 423 imm_wr.wr.sg_list = &list; 427 424 imm_wr.wr.num_sge = 1; 428 425 imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM; 429 - ib_dma_sync_single_for_device(sess->s.dev->ib_dev, 426 + ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, 430 427 srv_mr->iu->dma_addr, 431 428 srv_mr->iu->size, DMA_TO_DEVICE); 432 429 } else { ··· 447 444 return err; 448 445 } 449 446 450 - void close_sess(struct rtrs_srv_sess *sess) 447 + void close_path(struct rtrs_srv_path *srv_path) 451 448 { 452 - if (rtrs_srv_change_state(sess, RTRS_SRV_CLOSING)) 453 - queue_work(rtrs_wq, &sess->close_work); 454 - WARN_ON(sess->state != RTRS_SRV_CLOSING); 449 + if (rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSING)) 450 + queue_work(rtrs_wq, &srv_path->close_work); 451 + WARN_ON(srv_path->state != RTRS_SRV_CLOSING); 455 452 } 456 453 457 454 static inline const char *rtrs_srv_state_str(enum rtrs_srv_state state) ··· 483 480 */ 484 481 bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status) 485 482 { 486 - struct rtrs_srv_sess *sess; 483 + struct rtrs_srv_path *srv_path; 487 484 struct rtrs_srv_con *con; 488 - struct rtrs_sess *s; 485 + struct rtrs_path *s; 489 486 int err; 490 487 491 488 if (WARN_ON(!id)) 492 489 return true; 493 490 494 491 con = id->con; 495 - s = con->c.sess; 496 - sess = to_srv_sess(s); 492 + s = con->c.path; 493 + srv_path = to_srv_path(s); 497 494 498 495 id->status = status; 499 496 500 - if (sess->state != RTRS_SRV_CONNECTED) { 497 + if (srv_path->state != RTRS_SRV_CONNECTED) { 501 498 rtrs_err_rl(s, 502 - "Sending I/O response failed, session %s is disconnected, sess state %s\n", 503 - kobject_name(&sess->kobj), 504 - rtrs_srv_state_str(sess->state)); 499 + "Sending I/O response failed, server path %s is disconnected, path state %s\n", 500 + kobject_name(&srv_path->kobj), 501 + rtrs_srv_state_str(srv_path->state)); 505 502 goto out; 506 503 } 507 504 if (always_invalidate) { 508 - struct rtrs_srv_mr *mr = &sess->mrs[id->msg_id]; 505 + struct rtrs_srv_mr *mr = &srv_path->mrs[id->msg_id]; 509 506 510 507 ib_update_fast_reg_key(mr->mr, ib_inc_rkey(mr->mr->rkey)); 511 508 } 512 509 if (atomic_sub_return(1, &con->c.sq_wr_avail) < 0) { 513 - rtrs_err(s, "IB send queue full: sess=%s cid=%d\n", 514 - kobject_name(&sess->kobj), 510 + rtrs_err(s, "IB send queue full: srv_path=%s cid=%d\n", 511 + kobject_name(&srv_path->kobj), 515 512 con->c.cid); 516 513 atomic_add(1, &con->c.sq_wr_avail); 517 514 spin_lock(&con->rsp_wr_wait_lock); ··· 526 523 err = rdma_write_sg(id); 527 524 528 525 if (err) { 529 - rtrs_err_rl(s, "IO response failed: %d: sess=%s\n", err, 530 - kobject_name(&sess->kobj)); 531 - close_sess(sess); 526 + rtrs_err_rl(s, "IO response failed: %d: srv_path=%s\n", err, 527 + kobject_name(&srv_path->kobj)); 528 + close_path(srv_path); 532 529 } 533 530 out: 534 - rtrs_srv_put_ops_ids(sess); 531 + rtrs_srv_put_ops_ids(srv_path); 535 532 return true; 536 533 } 537 534 EXPORT_SYMBOL(rtrs_srv_resp_rdma); ··· 541 538 * @srv: Session pointer 542 539 * @priv: The private pointer that is associated with the session. 543 540 */ 544 - void rtrs_srv_set_sess_priv(struct rtrs_srv *srv, void *priv) 541 + void rtrs_srv_set_sess_priv(struct rtrs_srv_sess *srv, void *priv) 545 542 { 546 543 srv->priv = priv; 547 544 } 548 545 EXPORT_SYMBOL(rtrs_srv_set_sess_priv); 549 546 550 - static void unmap_cont_bufs(struct rtrs_srv_sess *sess) 547 + static void unmap_cont_bufs(struct rtrs_srv_path *srv_path) 551 548 { 552 549 int i; 553 550 554 - for (i = 0; i < sess->mrs_num; i++) { 551 + for (i = 0; i < srv_path->mrs_num; i++) { 555 552 struct rtrs_srv_mr *srv_mr; 556 553 557 - srv_mr = &sess->mrs[i]; 558 - rtrs_iu_free(srv_mr->iu, sess->s.dev->ib_dev, 1); 554 + srv_mr = &srv_path->mrs[i]; 555 + rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1); 559 556 ib_dereg_mr(srv_mr->mr); 560 - ib_dma_unmap_sg(sess->s.dev->ib_dev, srv_mr->sgt.sgl, 557 + ib_dma_unmap_sg(srv_path->s.dev->ib_dev, srv_mr->sgt.sgl, 561 558 srv_mr->sgt.nents, DMA_BIDIRECTIONAL); 562 559 sg_free_table(&srv_mr->sgt); 563 560 } 564 - kfree(sess->mrs); 561 + kfree(srv_path->mrs); 565 562 } 566 563 567 - static int map_cont_bufs(struct rtrs_srv_sess *sess) 564 + static int map_cont_bufs(struct rtrs_srv_path *srv_path) 568 565 { 569 - struct rtrs_srv *srv = sess->srv; 570 - struct rtrs_sess *ss = &sess->s; 566 + struct rtrs_srv_sess *srv = srv_path->srv; 567 + struct rtrs_path *ss = &srv_path->s; 571 568 int i, mri, err, mrs_num; 572 569 unsigned int chunk_bits; 573 570 int chunks_per_mr = 1; ··· 584 581 mrs_num = srv->queue_depth; 585 582 } else { 586 583 chunks_per_mr = 587 - sess->s.dev->ib_dev->attrs.max_fast_reg_page_list_len; 584 + srv_path->s.dev->ib_dev->attrs.max_fast_reg_page_list_len; 588 585 mrs_num = DIV_ROUND_UP(srv->queue_depth, chunks_per_mr); 589 586 chunks_per_mr = DIV_ROUND_UP(srv->queue_depth, mrs_num); 590 587 } 591 588 592 - sess->mrs = kcalloc(mrs_num, sizeof(*sess->mrs), GFP_KERNEL); 593 - if (!sess->mrs) 589 + srv_path->mrs = kcalloc(mrs_num, sizeof(*srv_path->mrs), GFP_KERNEL); 590 + if (!srv_path->mrs) 594 591 return -ENOMEM; 595 592 596 - sess->mrs_num = mrs_num; 593 + srv_path->mrs_num = mrs_num; 597 594 598 595 for (mri = 0; mri < mrs_num; mri++) { 599 - struct rtrs_srv_mr *srv_mr = &sess->mrs[mri]; 596 + struct rtrs_srv_mr *srv_mr = &srv_path->mrs[mri]; 600 597 struct sg_table *sgt = &srv_mr->sgt; 601 598 struct scatterlist *s; 602 599 struct ib_mr *mr; ··· 615 612 sg_set_page(s, srv->chunks[chunks + i], 616 613 max_chunk_size, 0); 617 614 618 - nr = ib_dma_map_sg(sess->s.dev->ib_dev, sgt->sgl, 615 + nr = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl, 619 616 sgt->nents, DMA_BIDIRECTIONAL); 620 617 if (nr < sgt->nents) { 621 618 err = nr < 0 ? nr : -EINVAL; 622 619 goto free_sg; 623 620 } 624 - mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG, 621 + mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG, 625 622 sgt->nents); 626 623 if (IS_ERR(mr)) { 627 624 err = PTR_ERR(mr); ··· 637 634 if (always_invalidate) { 638 635 srv_mr->iu = rtrs_iu_alloc(1, 639 636 sizeof(struct rtrs_msg_rkey_rsp), 640 - GFP_KERNEL, sess->s.dev->ib_dev, 637 + GFP_KERNEL, srv_path->s.dev->ib_dev, 641 638 DMA_TO_DEVICE, rtrs_srv_rdma_done); 642 639 if (!srv_mr->iu) { 643 640 err = -ENOMEM; ··· 647 644 } 648 645 /* Eventually dma addr for each chunk can be cached */ 649 646 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) 650 - sess->dma_addr[chunks + i] = sg_dma_address(s); 647 + srv_path->dma_addr[chunks + i] = sg_dma_address(s); 651 648 652 649 ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); 653 650 srv_mr->mr = mr; ··· 655 652 continue; 656 653 err: 657 654 while (mri--) { 658 - srv_mr = &sess->mrs[mri]; 655 + srv_mr = &srv_path->mrs[mri]; 659 656 sgt = &srv_mr->sgt; 660 657 mr = srv_mr->mr; 661 - rtrs_iu_free(srv_mr->iu, sess->s.dev->ib_dev, 1); 658 + rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1); 662 659 dereg_mr: 663 660 ib_dereg_mr(mr); 664 661 unmap_sg: 665 - ib_dma_unmap_sg(sess->s.dev->ib_dev, sgt->sgl, 662 + ib_dma_unmap_sg(srv_path->s.dev->ib_dev, sgt->sgl, 666 663 sgt->nents, DMA_BIDIRECTIONAL); 667 664 free_sg: 668 665 sg_free_table(sgt); 669 666 } 670 - kfree(sess->mrs); 667 + kfree(srv_path->mrs); 671 668 672 669 return err; 673 670 } 674 671 675 672 chunk_bits = ilog2(srv->queue_depth - 1) + 1; 676 - sess->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits); 673 + srv_path->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits); 677 674 678 675 return 0; 679 676 } 680 677 681 678 static void rtrs_srv_hb_err_handler(struct rtrs_con *c) 682 679 { 683 - close_sess(to_srv_sess(c->sess)); 680 + close_path(to_srv_path(c->path)); 684 681 } 685 682 686 - static void rtrs_srv_init_hb(struct rtrs_srv_sess *sess) 683 + static void rtrs_srv_init_hb(struct rtrs_srv_path *srv_path) 687 684 { 688 - rtrs_init_hb(&sess->s, &io_comp_cqe, 685 + rtrs_init_hb(&srv_path->s, &io_comp_cqe, 689 686 RTRS_HB_INTERVAL_MS, 690 687 RTRS_HB_MISSED_MAX, 691 688 rtrs_srv_hb_err_handler, 692 689 rtrs_wq); 693 690 } 694 691 695 - static void rtrs_srv_start_hb(struct rtrs_srv_sess *sess) 692 + static void rtrs_srv_start_hb(struct rtrs_srv_path *srv_path) 696 693 { 697 - rtrs_start_hb(&sess->s); 694 + rtrs_start_hb(&srv_path->s); 698 695 } 699 696 700 - static void rtrs_srv_stop_hb(struct rtrs_srv_sess *sess) 697 + static void rtrs_srv_stop_hb(struct rtrs_srv_path *srv_path) 701 698 { 702 - rtrs_stop_hb(&sess->s); 699 + rtrs_stop_hb(&srv_path->s); 703 700 } 704 701 705 702 static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc) 706 703 { 707 704 struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context); 708 - struct rtrs_sess *s = con->c.sess; 709 - struct rtrs_srv_sess *sess = to_srv_sess(s); 705 + struct rtrs_path *s = con->c.path; 706 + struct rtrs_srv_path *srv_path = to_srv_path(s); 710 707 struct rtrs_iu *iu; 711 708 712 709 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); 713 - rtrs_iu_free(iu, sess->s.dev->ib_dev, 1); 710 + rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1); 714 711 715 712 if (wc->status != IB_WC_SUCCESS) { 716 713 rtrs_err(s, "Sess info response send failed: %s\n", 717 714 ib_wc_status_msg(wc->status)); 718 - close_sess(sess); 715 + close_path(srv_path); 719 716 return; 720 717 } 721 718 WARN_ON(wc->opcode != IB_WC_SEND); 722 719 } 723 720 724 - static void rtrs_srv_sess_up(struct rtrs_srv_sess *sess) 721 + static void rtrs_srv_path_up(struct rtrs_srv_path *srv_path) 725 722 { 726 - struct rtrs_srv *srv = sess->srv; 723 + struct rtrs_srv_sess *srv = srv_path->srv; 727 724 struct rtrs_srv_ctx *ctx = srv->ctx; 728 725 int up; 729 726 ··· 734 731 mutex_unlock(&srv->paths_ev_mutex); 735 732 736 733 /* Mark session as established */ 737 - sess->established = true; 734 + srv_path->established = true; 738 735 } 739 736 740 - static void rtrs_srv_sess_down(struct rtrs_srv_sess *sess) 737 + static void rtrs_srv_path_down(struct rtrs_srv_path *srv_path) 741 738 { 742 - struct rtrs_srv *srv = sess->srv; 739 + struct rtrs_srv_sess *srv = srv_path->srv; 743 740 struct rtrs_srv_ctx *ctx = srv->ctx; 744 741 745 - if (!sess->established) 742 + if (!srv_path->established) 746 743 return; 747 744 748 - sess->established = false; 745 + srv_path->established = false; 749 746 mutex_lock(&srv->paths_ev_mutex); 750 747 WARN_ON(!srv->paths_up); 751 748 if (--srv->paths_up == 0) ··· 753 750 mutex_unlock(&srv->paths_ev_mutex); 754 751 } 755 752 756 - static bool exist_sessname(struct rtrs_srv_ctx *ctx, 757 - const char *sessname, const uuid_t *path_uuid) 753 + static bool exist_pathname(struct rtrs_srv_ctx *ctx, 754 + const char *pathname, const uuid_t *path_uuid) 758 755 { 759 - struct rtrs_srv *srv; 760 - struct rtrs_srv_sess *sess; 756 + struct rtrs_srv_sess *srv; 757 + struct rtrs_srv_path *srv_path; 761 758 bool found = false; 762 759 763 760 mutex_lock(&ctx->srv_mutex); ··· 770 767 continue; 771 768 } 772 769 773 - list_for_each_entry(sess, &srv->paths_list, s.entry) { 774 - if (strlen(sess->s.sessname) == strlen(sessname) && 775 - !strcmp(sess->s.sessname, sessname)) { 770 + list_for_each_entry(srv_path, &srv->paths_list, s.entry) { 771 + if (strlen(srv_path->s.sessname) == strlen(pathname) && 772 + !strcmp(srv_path->s.sessname, pathname)) { 776 773 found = true; 777 774 break; 778 775 } ··· 785 782 return found; 786 783 } 787 784 788 - static int post_recv_sess(struct rtrs_srv_sess *sess); 785 + static int post_recv_path(struct rtrs_srv_path *srv_path); 789 786 static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno); 790 787 791 788 static int process_info_req(struct rtrs_srv_con *con, 792 789 struct rtrs_msg_info_req *msg) 793 790 { 794 - struct rtrs_sess *s = con->c.sess; 795 - struct rtrs_srv_sess *sess = to_srv_sess(s); 791 + struct rtrs_path *s = con->c.path; 792 + struct rtrs_srv_path *srv_path = to_srv_path(s); 796 793 struct ib_send_wr *reg_wr = NULL; 797 794 struct rtrs_msg_info_rsp *rsp; 798 795 struct rtrs_iu *tx_iu; ··· 800 797 int mri, err; 801 798 size_t tx_sz; 802 799 803 - err = post_recv_sess(sess); 800 + err = post_recv_path(srv_path); 804 801 if (err) { 805 - rtrs_err(s, "post_recv_sess(), err: %d\n", err); 802 + rtrs_err(s, "post_recv_path(), err: %d\n", err); 806 803 return err; 807 804 } 808 805 809 - if (strchr(msg->sessname, '/') || strchr(msg->sessname, '.')) { 810 - rtrs_err(s, "sessname cannot contain / and .\n"); 806 + if (strchr(msg->pathname, '/') || strchr(msg->pathname, '.')) { 807 + rtrs_err(s, "pathname cannot contain / and .\n"); 811 808 return -EINVAL; 812 809 } 813 810 814 - if (exist_sessname(sess->srv->ctx, 815 - msg->sessname, &sess->srv->paths_uuid)) { 816 - rtrs_err(s, "sessname is duplicated: %s\n", msg->sessname); 811 + if (exist_pathname(srv_path->srv->ctx, 812 + msg->pathname, &srv_path->srv->paths_uuid)) { 813 + rtrs_err(s, "pathname is duplicated: %s\n", msg->pathname); 817 814 return -EPERM; 818 815 } 819 - strscpy(sess->s.sessname, msg->sessname, sizeof(sess->s.sessname)); 816 + strscpy(srv_path->s.sessname, msg->pathname, 817 + sizeof(srv_path->s.sessname)); 820 818 821 - rwr = kcalloc(sess->mrs_num, sizeof(*rwr), GFP_KERNEL); 819 + rwr = kcalloc(srv_path->mrs_num, sizeof(*rwr), GFP_KERNEL); 822 820 if (!rwr) 823 821 return -ENOMEM; 824 822 825 823 tx_sz = sizeof(*rsp); 826 - tx_sz += sizeof(rsp->desc[0]) * sess->mrs_num; 827 - tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, sess->s.dev->ib_dev, 824 + tx_sz += sizeof(rsp->desc[0]) * srv_path->mrs_num; 825 + tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, srv_path->s.dev->ib_dev, 828 826 DMA_TO_DEVICE, rtrs_srv_info_rsp_done); 829 827 if (!tx_iu) { 830 828 err = -ENOMEM; ··· 834 830 835 831 rsp = tx_iu->buf; 836 832 rsp->type = cpu_to_le16(RTRS_MSG_INFO_RSP); 837 - rsp->sg_cnt = cpu_to_le16(sess->mrs_num); 833 + rsp->sg_cnt = cpu_to_le16(srv_path->mrs_num); 838 834 839 - for (mri = 0; mri < sess->mrs_num; mri++) { 840 - struct ib_mr *mr = sess->mrs[mri].mr; 835 + for (mri = 0; mri < srv_path->mrs_num; mri++) { 836 + struct ib_mr *mr = srv_path->mrs[mri].mr; 841 837 842 838 rsp->desc[mri].addr = cpu_to_le64(mr->iova); 843 839 rsp->desc[mri].key = cpu_to_le32(mr->rkey); ··· 858 854 reg_wr = &rwr[mri].wr; 859 855 } 860 856 861 - err = rtrs_srv_create_sess_files(sess); 857 + err = rtrs_srv_create_path_files(srv_path); 862 858 if (err) 863 859 goto iu_free; 864 - kobject_get(&sess->kobj); 865 - get_device(&sess->srv->dev); 866 - rtrs_srv_change_state(sess, RTRS_SRV_CONNECTED); 867 - rtrs_srv_start_hb(sess); 860 + kobject_get(&srv_path->kobj); 861 + get_device(&srv_path->srv->dev); 862 + rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED); 863 + rtrs_srv_start_hb(srv_path); 868 864 869 865 /* 870 866 * We do not account number of established connections at the current ··· 872 868 * all connections are successfully established. Thus, simply notify 873 869 * listener with a proper event if we are the first path. 874 870 */ 875 - rtrs_srv_sess_up(sess); 871 + rtrs_srv_path_up(srv_path); 876 872 877 - ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr, 873 + ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, 874 + tx_iu->dma_addr, 878 875 tx_iu->size, DMA_TO_DEVICE); 879 876 880 877 /* Send info response */ ··· 883 878 if (err) { 884 879 rtrs_err(s, "rtrs_iu_post_send(), err: %d\n", err); 885 880 iu_free: 886 - rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1); 881 + rtrs_iu_free(tx_iu, srv_path->s.dev->ib_dev, 1); 887 882 } 888 883 rwr_free: 889 884 kfree(rwr); ··· 894 889 static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc) 895 890 { 896 891 struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context); 897 - struct rtrs_sess *s = con->c.sess; 898 - struct rtrs_srv_sess *sess = to_srv_sess(s); 892 + struct rtrs_path *s = con->c.path; 893 + struct rtrs_srv_path *srv_path = to_srv_path(s); 899 894 struct rtrs_msg_info_req *msg; 900 895 struct rtrs_iu *iu; 901 896 int err; ··· 915 910 wc->byte_len); 916 911 goto close; 917 912 } 918 - ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr, 913 + ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev, iu->dma_addr, 919 914 iu->size, DMA_FROM_DEVICE); 920 915 msg = iu->buf; 921 916 if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_REQ) { ··· 928 923 goto close; 929 924 930 925 out: 931 - rtrs_iu_free(iu, sess->s.dev->ib_dev, 1); 926 + rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1); 932 927 return; 933 928 close: 934 - close_sess(sess); 929 + close_path(srv_path); 935 930 goto out; 936 931 } 937 932 938 933 static int post_recv_info_req(struct rtrs_srv_con *con) 939 934 { 940 - struct rtrs_sess *s = con->c.sess; 941 - struct rtrs_srv_sess *sess = to_srv_sess(s); 935 + struct rtrs_path *s = con->c.path; 936 + struct rtrs_srv_path *srv_path = to_srv_path(s); 942 937 struct rtrs_iu *rx_iu; 943 938 int err; 944 939 945 940 rx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), 946 - GFP_KERNEL, sess->s.dev->ib_dev, 941 + GFP_KERNEL, srv_path->s.dev->ib_dev, 947 942 DMA_FROM_DEVICE, rtrs_srv_info_req_done); 948 943 if (!rx_iu) 949 944 return -ENOMEM; ··· 951 946 err = rtrs_iu_post_recv(&con->c, rx_iu); 952 947 if (err) { 953 948 rtrs_err(s, "rtrs_iu_post_recv(), err: %d\n", err); 954 - rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1); 949 + rtrs_iu_free(rx_iu, srv_path->s.dev->ib_dev, 1); 955 950 return err; 956 951 } 957 952 ··· 971 966 return 0; 972 967 } 973 968 974 - static int post_recv_sess(struct rtrs_srv_sess *sess) 969 + static int post_recv_path(struct rtrs_srv_path *srv_path) 975 970 { 976 - struct rtrs_srv *srv = sess->srv; 977 - struct rtrs_sess *s = &sess->s; 971 + struct rtrs_srv_sess *srv = srv_path->srv; 972 + struct rtrs_path *s = &srv_path->s; 978 973 size_t q_size; 979 974 int err, cid; 980 975 981 - for (cid = 0; cid < sess->s.con_num; cid++) { 976 + for (cid = 0; cid < srv_path->s.con_num; cid++) { 982 977 if (cid == 0) 983 978 q_size = SERVICE_CON_QUEUE_DEPTH; 984 979 else 985 980 q_size = srv->queue_depth; 986 981 987 - err = post_recv_io(to_srv_con(sess->s.con[cid]), q_size); 982 + err = post_recv_io(to_srv_con(srv_path->s.con[cid]), q_size); 988 983 if (err) { 989 984 rtrs_err(s, "post_recv_io(), err: %d\n", err); 990 985 return err; ··· 998 993 struct rtrs_msg_rdma_read *msg, 999 994 u32 buf_id, u32 off) 1000 995 { 1001 - struct rtrs_sess *s = con->c.sess; 1002 - struct rtrs_srv_sess *sess = to_srv_sess(s); 1003 - struct rtrs_srv *srv = sess->srv; 996 + struct rtrs_path *s = con->c.path; 997 + struct rtrs_srv_path *srv_path = to_srv_path(s); 998 + struct rtrs_srv_sess *srv = srv_path->srv; 1004 999 struct rtrs_srv_ctx *ctx = srv->ctx; 1005 1000 struct rtrs_srv_op *id; 1006 1001 ··· 1008 1003 void *data; 1009 1004 int ret; 1010 1005 1011 - if (sess->state != RTRS_SRV_CONNECTED) { 1006 + if (srv_path->state != RTRS_SRV_CONNECTED) { 1012 1007 rtrs_err_rl(s, 1013 1008 "Processing read request failed, session is disconnected, sess state %s\n", 1014 - rtrs_srv_state_str(sess->state)); 1009 + rtrs_srv_state_str(srv_path->state)); 1015 1010 return; 1016 1011 } 1017 1012 if (msg->sg_cnt != 1 && msg->sg_cnt != 0) { ··· 1019 1014 "Processing read request failed, invalid message\n"); 1020 1015 return; 1021 1016 } 1022 - rtrs_srv_get_ops_ids(sess); 1023 - rtrs_srv_update_rdma_stats(sess->stats, off, READ); 1024 - id = sess->ops_ids[buf_id]; 1017 + rtrs_srv_get_ops_ids(srv_path); 1018 + rtrs_srv_update_rdma_stats(srv_path->stats, off, READ); 1019 + id = srv_path->ops_ids[buf_id]; 1025 1020 id->con = con; 1026 1021 id->dir = READ; 1027 1022 id->msg_id = buf_id; ··· 1047 1042 rtrs_err_rl(s, 1048 1043 "Sending err msg for failed RDMA-Write-Req failed, msg_id %d, err: %d\n", 1049 1044 buf_id, ret); 1050 - close_sess(sess); 1045 + close_path(srv_path); 1051 1046 } 1052 - rtrs_srv_put_ops_ids(sess); 1047 + rtrs_srv_put_ops_ids(srv_path); 1053 1048 } 1054 1049 1055 1050 static void process_write(struct rtrs_srv_con *con, 1056 1051 struct rtrs_msg_rdma_write *req, 1057 1052 u32 buf_id, u32 off) 1058 1053 { 1059 - struct rtrs_sess *s = con->c.sess; 1060 - struct rtrs_srv_sess *sess = to_srv_sess(s); 1061 - struct rtrs_srv *srv = sess->srv; 1054 + struct rtrs_path *s = con->c.path; 1055 + struct rtrs_srv_path *srv_path = to_srv_path(s); 1056 + struct rtrs_srv_sess *srv = srv_path->srv; 1062 1057 struct rtrs_srv_ctx *ctx = srv->ctx; 1063 1058 struct rtrs_srv_op *id; 1064 1059 ··· 1066 1061 void *data; 1067 1062 int ret; 1068 1063 1069 - if (sess->state != RTRS_SRV_CONNECTED) { 1064 + if (srv_path->state != RTRS_SRV_CONNECTED) { 1070 1065 rtrs_err_rl(s, 1071 1066 "Processing write request failed, session is disconnected, sess state %s\n", 1072 - rtrs_srv_state_str(sess->state)); 1067 + rtrs_srv_state_str(srv_path->state)); 1073 1068 return; 1074 1069 } 1075 - rtrs_srv_get_ops_ids(sess); 1076 - rtrs_srv_update_rdma_stats(sess->stats, off, WRITE); 1077 - id = sess->ops_ids[buf_id]; 1070 + rtrs_srv_get_ops_ids(srv_path); 1071 + rtrs_srv_update_rdma_stats(srv_path->stats, off, WRITE); 1072 + id = srv_path->ops_ids[buf_id]; 1078 1073 id->con = con; 1079 1074 id->dir = WRITE; 1080 1075 id->msg_id = buf_id; ··· 1099 1094 rtrs_err_rl(s, 1100 1095 "Processing write request failed, sending I/O response failed, msg_id %d, err: %d\n", 1101 1096 buf_id, ret); 1102 - close_sess(sess); 1097 + close_path(srv_path); 1103 1098 } 1104 - rtrs_srv_put_ops_ids(sess); 1099 + rtrs_srv_put_ops_ids(srv_path); 1105 1100 } 1106 1101 1107 1102 static void process_io_req(struct rtrs_srv_con *con, void *msg, 1108 1103 u32 id, u32 off) 1109 1104 { 1110 - struct rtrs_sess *s = con->c.sess; 1111 - struct rtrs_srv_sess *sess = to_srv_sess(s); 1105 + struct rtrs_path *s = con->c.path; 1106 + struct rtrs_srv_path *srv_path = to_srv_path(s); 1112 1107 struct rtrs_msg_rdma_hdr *hdr; 1113 1108 unsigned int type; 1114 1109 1115 - ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, sess->dma_addr[id], 1110 + ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev, 1111 + srv_path->dma_addr[id], 1116 1112 max_chunk_size, DMA_BIDIRECTIONAL); 1117 1113 hdr = msg; 1118 1114 type = le16_to_cpu(hdr->type); ··· 1135 1129 return; 1136 1130 1137 1131 err: 1138 - close_sess(sess); 1132 + close_path(srv_path); 1139 1133 } 1140 1134 1141 1135 static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) ··· 1143 1137 struct rtrs_srv_mr *mr = 1144 1138 container_of(wc->wr_cqe, typeof(*mr), inv_cqe); 1145 1139 struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context); 1146 - struct rtrs_sess *s = con->c.sess; 1147 - struct rtrs_srv_sess *sess = to_srv_sess(s); 1148 - struct rtrs_srv *srv = sess->srv; 1140 + struct rtrs_path *s = con->c.path; 1141 + struct rtrs_srv_path *srv_path = to_srv_path(s); 1142 + struct rtrs_srv_sess *srv = srv_path->srv; 1149 1143 u32 msg_id, off; 1150 1144 void *data; 1151 1145 1152 1146 if (wc->status != IB_WC_SUCCESS) { 1153 1147 rtrs_err(s, "Failed IB_WR_LOCAL_INV: %s\n", 1154 1148 ib_wc_status_msg(wc->status)); 1155 - close_sess(sess); 1149 + close_path(srv_path); 1156 1150 } 1157 1151 msg_id = mr->msg_id; 1158 1152 off = mr->msg_off; ··· 1200 1194 static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc) 1201 1195 { 1202 1196 struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context); 1203 - struct rtrs_sess *s = con->c.sess; 1204 - struct rtrs_srv_sess *sess = to_srv_sess(s); 1205 - struct rtrs_srv *srv = sess->srv; 1197 + struct rtrs_path *s = con->c.path; 1198 + struct rtrs_srv_path *srv_path = to_srv_path(s); 1199 + struct rtrs_srv_sess *srv = srv_path->srv; 1206 1200 u32 imm_type, imm_payload; 1207 1201 int err; 1208 1202 ··· 1212 1206 "%s (wr_cqe: %p, type: %d, vendor_err: 0x%x, len: %u)\n", 1213 1207 ib_wc_status_msg(wc->status), wc->wr_cqe, 1214 1208 wc->opcode, wc->vendor_err, wc->byte_len); 1215 - close_sess(sess); 1209 + close_path(srv_path); 1216 1210 } 1217 1211 return; 1218 1212 } ··· 1228 1222 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); 1229 1223 if (err) { 1230 1224 rtrs_err(s, "rtrs_post_recv(), err: %d\n", err); 1231 - close_sess(sess); 1225 + close_path(srv_path); 1232 1226 break; 1233 1227 } 1234 1228 rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), ··· 1237 1231 u32 msg_id, off; 1238 1232 void *data; 1239 1233 1240 - msg_id = imm_payload >> sess->mem_bits; 1241 - off = imm_payload & ((1 << sess->mem_bits) - 1); 1234 + msg_id = imm_payload >> srv_path->mem_bits; 1235 + off = imm_payload & ((1 << srv_path->mem_bits) - 1); 1242 1236 if (msg_id >= srv->queue_depth || off >= max_chunk_size) { 1243 1237 rtrs_err(s, "Wrong msg_id %u, off %u\n", 1244 1238 msg_id, off); 1245 - close_sess(sess); 1239 + close_path(srv_path); 1246 1240 return; 1247 1241 } 1248 1242 if (always_invalidate) { 1249 - struct rtrs_srv_mr *mr = &sess->mrs[msg_id]; 1243 + struct rtrs_srv_mr *mr = &srv_path->mrs[msg_id]; 1250 1244 1251 1245 mr->msg_off = off; 1252 1246 mr->msg_id = msg_id; ··· 1254 1248 if (err) { 1255 1249 rtrs_err(s, "rtrs_post_recv(), err: %d\n", 1256 1250 err); 1257 - close_sess(sess); 1251 + close_path(srv_path); 1258 1252 break; 1259 1253 } 1260 1254 } else { ··· 1263 1257 } 1264 1258 } else if (imm_type == RTRS_HB_MSG_IMM) { 1265 1259 WARN_ON(con->c.cid); 1266 - rtrs_send_hb_ack(&sess->s); 1260 + rtrs_send_hb_ack(&srv_path->s); 1267 1261 } else if (imm_type == RTRS_HB_ACK_IMM) { 1268 1262 WARN_ON(con->c.cid); 1269 - sess->s.hb_missed_cnt = 0; 1263 + srv_path->s.hb_missed_cnt = 0; 1270 1264 } else { 1271 1265 rtrs_wrn(s, "Unknown IMM type %u\n", imm_type); 1272 1266 } ··· 1290 1284 } 1291 1285 1292 1286 /** 1293 - * rtrs_srv_get_sess_name() - Get rtrs_srv peer hostname. 1287 + * rtrs_srv_get_path_name() - Get rtrs_srv peer hostname. 1294 1288 * @srv: Session 1295 - * @sessname: Sessname buffer 1289 + * @pathname: Pathname buffer 1296 1290 * @len: Length of sessname buffer 1297 1291 */ 1298 - int rtrs_srv_get_sess_name(struct rtrs_srv *srv, char *sessname, size_t len) 1292 + int rtrs_srv_get_path_name(struct rtrs_srv_sess *srv, char *pathname, 1293 + size_t len) 1299 1294 { 1300 - struct rtrs_srv_sess *sess; 1295 + struct rtrs_srv_path *srv_path; 1301 1296 int err = -ENOTCONN; 1302 1297 1303 1298 mutex_lock(&srv->paths_mutex); 1304 - list_for_each_entry(sess, &srv->paths_list, s.entry) { 1305 - if (sess->state != RTRS_SRV_CONNECTED) 1299 + list_for_each_entry(srv_path, &srv->paths_list, s.entry) { 1300 + if (srv_path->state != RTRS_SRV_CONNECTED) 1306 1301 continue; 1307 - strscpy(sessname, sess->s.sessname, 1308 - min_t(size_t, sizeof(sess->s.sessname), len)); 1302 + strscpy(pathname, srv_path->s.sessname, 1303 + min_t(size_t, sizeof(srv_path->s.sessname), len)); 1309 1304 err = 0; 1310 1305 break; 1311 1306 } ··· 1314 1307 1315 1308 return err; 1316 1309 } 1317 - EXPORT_SYMBOL(rtrs_srv_get_sess_name); 1310 + EXPORT_SYMBOL(rtrs_srv_get_path_name); 1318 1311 1319 1312 /** 1320 1313 * rtrs_srv_get_queue_depth() - Get rtrs_srv qdepth. 1321 1314 * @srv: Session 1322 1315 */ 1323 - int rtrs_srv_get_queue_depth(struct rtrs_srv *srv) 1316 + int rtrs_srv_get_queue_depth(struct rtrs_srv_sess *srv) 1324 1317 { 1325 1318 return srv->queue_depth; 1326 1319 } 1327 1320 EXPORT_SYMBOL(rtrs_srv_get_queue_depth); 1328 1321 1329 - static int find_next_bit_ring(struct rtrs_srv_sess *sess) 1322 + static int find_next_bit_ring(struct rtrs_srv_path *srv_path) 1330 1323 { 1331 - struct ib_device *ib_dev = sess->s.dev->ib_dev; 1324 + struct ib_device *ib_dev = srv_path->s.dev->ib_dev; 1332 1325 int v; 1333 1326 1334 - v = cpumask_next(sess->cur_cq_vector, &cq_affinity_mask); 1327 + v = cpumask_next(srv_path->cur_cq_vector, &cq_affinity_mask); 1335 1328 if (v >= nr_cpu_ids || v >= ib_dev->num_comp_vectors) 1336 1329 v = cpumask_first(&cq_affinity_mask); 1337 1330 return v; 1338 1331 } 1339 1332 1340 - static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_sess *sess) 1333 + static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_path *srv_path) 1341 1334 { 1342 - sess->cur_cq_vector = find_next_bit_ring(sess); 1335 + srv_path->cur_cq_vector = find_next_bit_ring(srv_path); 1343 1336 1344 - return sess->cur_cq_vector; 1337 + return srv_path->cur_cq_vector; 1345 1338 } 1346 1339 1347 1340 static void rtrs_srv_dev_release(struct device *dev) 1348 1341 { 1349 - struct rtrs_srv *srv = container_of(dev, struct rtrs_srv, dev); 1342 + struct rtrs_srv_sess *srv = container_of(dev, struct rtrs_srv_sess, 1343 + dev); 1350 1344 1351 1345 kfree(srv); 1352 1346 } 1353 1347 1354 - static void free_srv(struct rtrs_srv *srv) 1348 + static void free_srv(struct rtrs_srv_sess *srv) 1355 1349 { 1356 1350 int i; 1357 1351 ··· 1366 1358 put_device(&srv->dev); 1367 1359 } 1368 1360 1369 - static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx, 1361 + static struct rtrs_srv_sess *get_or_create_srv(struct rtrs_srv_ctx *ctx, 1370 1362 const uuid_t *paths_uuid, 1371 1363 bool first_conn) 1372 1364 { 1373 - struct rtrs_srv *srv; 1365 + struct rtrs_srv_sess *srv; 1374 1366 int i; 1375 1367 1376 1368 mutex_lock(&ctx->srv_mutex); ··· 1432 1424 return ERR_PTR(-ENOMEM); 1433 1425 } 1434 1426 1435 - static void put_srv(struct rtrs_srv *srv) 1427 + static void put_srv(struct rtrs_srv_sess *srv) 1436 1428 { 1437 1429 if (refcount_dec_and_test(&srv->refcount)) { 1438 1430 struct rtrs_srv_ctx *ctx = srv->ctx; ··· 1446 1438 } 1447 1439 } 1448 1440 1449 - static void __add_path_to_srv(struct rtrs_srv *srv, 1450 - struct rtrs_srv_sess *sess) 1441 + static void __add_path_to_srv(struct rtrs_srv_sess *srv, 1442 + struct rtrs_srv_path *srv_path) 1451 1443 { 1452 - list_add_tail(&sess->s.entry, &srv->paths_list); 1444 + list_add_tail(&srv_path->s.entry, &srv->paths_list); 1453 1445 srv->paths_num++; 1454 1446 WARN_ON(srv->paths_num >= MAX_PATHS_NUM); 1455 1447 } 1456 1448 1457 - static void del_path_from_srv(struct rtrs_srv_sess *sess) 1449 + static void del_path_from_srv(struct rtrs_srv_path *srv_path) 1458 1450 { 1459 - struct rtrs_srv *srv = sess->srv; 1451 + struct rtrs_srv_sess *srv = srv_path->srv; 1460 1452 1461 1453 if (WARN_ON(!srv)) 1462 1454 return; 1463 1455 1464 1456 mutex_lock(&srv->paths_mutex); 1465 - list_del(&sess->s.entry); 1457 + list_del(&srv_path->s.entry); 1466 1458 WARN_ON(!srv->paths_num); 1467 1459 srv->paths_num--; 1468 1460 mutex_unlock(&srv->paths_mutex); ··· 1492 1484 } 1493 1485 } 1494 1486 1495 - static bool __is_path_w_addr_exists(struct rtrs_srv *srv, 1487 + static bool __is_path_w_addr_exists(struct rtrs_srv_sess *srv, 1496 1488 struct rdma_addr *addr) 1497 1489 { 1498 - struct rtrs_srv_sess *sess; 1490 + struct rtrs_srv_path *srv_path; 1499 1491 1500 - list_for_each_entry(sess, &srv->paths_list, s.entry) 1501 - if (!sockaddr_cmp((struct sockaddr *)&sess->s.dst_addr, 1492 + list_for_each_entry(srv_path, &srv->paths_list, s.entry) 1493 + if (!sockaddr_cmp((struct sockaddr *)&srv_path->s.dst_addr, 1502 1494 (struct sockaddr *)&addr->dst_addr) && 1503 - !sockaddr_cmp((struct sockaddr *)&sess->s.src_addr, 1495 + !sockaddr_cmp((struct sockaddr *)&srv_path->s.src_addr, 1504 1496 (struct sockaddr *)&addr->src_addr)) 1505 1497 return true; 1506 1498 1507 1499 return false; 1508 1500 } 1509 1501 1510 - static void free_sess(struct rtrs_srv_sess *sess) 1502 + static void free_path(struct rtrs_srv_path *srv_path) 1511 1503 { 1512 - if (sess->kobj.state_in_sysfs) { 1513 - kobject_del(&sess->kobj); 1514 - kobject_put(&sess->kobj); 1504 + if (srv_path->kobj.state_in_sysfs) { 1505 + kobject_del(&srv_path->kobj); 1506 + kobject_put(&srv_path->kobj); 1515 1507 } else { 1516 - kfree(sess->stats); 1517 - kfree(sess); 1508 + kfree(srv_path->stats); 1509 + kfree(srv_path); 1518 1510 } 1519 1511 } 1520 1512 1521 1513 static void rtrs_srv_close_work(struct work_struct *work) 1522 1514 { 1523 - struct rtrs_srv_sess *sess; 1515 + struct rtrs_srv_path *srv_path; 1524 1516 struct rtrs_srv_con *con; 1525 1517 int i; 1526 1518 1527 - sess = container_of(work, typeof(*sess), close_work); 1519 + srv_path = container_of(work, typeof(*srv_path), close_work); 1528 1520 1529 - rtrs_srv_destroy_sess_files(sess); 1530 - rtrs_srv_stop_hb(sess); 1521 + rtrs_srv_destroy_path_files(srv_path); 1522 + rtrs_srv_stop_hb(srv_path); 1531 1523 1532 - for (i = 0; i < sess->s.con_num; i++) { 1533 - if (!sess->s.con[i]) 1524 + for (i = 0; i < srv_path->s.con_num; i++) { 1525 + if (!srv_path->s.con[i]) 1534 1526 continue; 1535 - con = to_srv_con(sess->s.con[i]); 1527 + con = to_srv_con(srv_path->s.con[i]); 1536 1528 rdma_disconnect(con->c.cm_id); 1537 1529 ib_drain_qp(con->c.qp); 1538 1530 } ··· 1541 1533 * Degrade ref count to the usual model with a single shared 1542 1534 * atomic_t counter 1543 1535 */ 1544 - percpu_ref_kill(&sess->ids_inflight_ref); 1536 + percpu_ref_kill(&srv_path->ids_inflight_ref); 1545 1537 1546 1538 /* Wait for all completion */ 1547 - wait_for_completion(&sess->complete_done); 1539 + wait_for_completion(&srv_path->complete_done); 1548 1540 1549 1541 /* Notify upper layer if we are the last path */ 1550 - rtrs_srv_sess_down(sess); 1542 + rtrs_srv_path_down(srv_path); 1551 1543 1552 - unmap_cont_bufs(sess); 1553 - rtrs_srv_free_ops_ids(sess); 1544 + unmap_cont_bufs(srv_path); 1545 + rtrs_srv_free_ops_ids(srv_path); 1554 1546 1555 - for (i = 0; i < sess->s.con_num; i++) { 1556 - if (!sess->s.con[i]) 1547 + for (i = 0; i < srv_path->s.con_num; i++) { 1548 + if (!srv_path->s.con[i]) 1557 1549 continue; 1558 - con = to_srv_con(sess->s.con[i]); 1550 + con = to_srv_con(srv_path->s.con[i]); 1559 1551 rtrs_cq_qp_destroy(&con->c); 1560 1552 rdma_destroy_id(con->c.cm_id); 1561 1553 kfree(con); 1562 1554 } 1563 - rtrs_ib_dev_put(sess->s.dev); 1555 + rtrs_ib_dev_put(srv_path->s.dev); 1564 1556 1565 - del_path_from_srv(sess); 1566 - put_srv(sess->srv); 1567 - sess->srv = NULL; 1568 - rtrs_srv_change_state(sess, RTRS_SRV_CLOSED); 1557 + del_path_from_srv(srv_path); 1558 + put_srv(srv_path->srv); 1559 + srv_path->srv = NULL; 1560 + rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSED); 1569 1561 1570 - kfree(sess->dma_addr); 1571 - kfree(sess->s.con); 1572 - free_sess(sess); 1562 + kfree(srv_path->dma_addr); 1563 + kfree(srv_path->s.con); 1564 + free_path(srv_path); 1573 1565 } 1574 1566 1575 - static int rtrs_rdma_do_accept(struct rtrs_srv_sess *sess, 1567 + static int rtrs_rdma_do_accept(struct rtrs_srv_path *srv_path, 1576 1568 struct rdma_cm_id *cm_id) 1577 1569 { 1578 - struct rtrs_srv *srv = sess->srv; 1570 + struct rtrs_srv_sess *srv = srv_path->srv; 1579 1571 struct rtrs_msg_conn_rsp msg; 1580 1572 struct rdma_conn_param param; 1581 1573 int err; ··· 1623 1615 return errno; 1624 1616 } 1625 1617 1626 - static struct rtrs_srv_sess * 1627 - __find_sess(struct rtrs_srv *srv, const uuid_t *sess_uuid) 1618 + static struct rtrs_srv_path * 1619 + __find_path(struct rtrs_srv_sess *srv, const uuid_t *sess_uuid) 1628 1620 { 1629 - struct rtrs_srv_sess *sess; 1621 + struct rtrs_srv_path *srv_path; 1630 1622 1631 - list_for_each_entry(sess, &srv->paths_list, s.entry) { 1632 - if (uuid_equal(&sess->s.uuid, sess_uuid)) 1633 - return sess; 1623 + list_for_each_entry(srv_path, &srv->paths_list, s.entry) { 1624 + if (uuid_equal(&srv_path->s.uuid, sess_uuid)) 1625 + return srv_path; 1634 1626 } 1635 1627 1636 1628 return NULL; 1637 1629 } 1638 1630 1639 - static int create_con(struct rtrs_srv_sess *sess, 1631 + static int create_con(struct rtrs_srv_path *srv_path, 1640 1632 struct rdma_cm_id *cm_id, 1641 1633 unsigned int cid) 1642 1634 { 1643 - struct rtrs_srv *srv = sess->srv; 1644 - struct rtrs_sess *s = &sess->s; 1635 + struct rtrs_srv_sess *srv = srv_path->srv; 1636 + struct rtrs_path *s = &srv_path->s; 1645 1637 struct rtrs_srv_con *con; 1646 1638 1647 1639 u32 cq_num, max_send_wr, max_recv_wr, wr_limit; ··· 1656 1648 spin_lock_init(&con->rsp_wr_wait_lock); 1657 1649 INIT_LIST_HEAD(&con->rsp_wr_wait_list); 1658 1650 con->c.cm_id = cm_id; 1659 - con->c.sess = &sess->s; 1651 + con->c.path = &srv_path->s; 1660 1652 con->c.cid = cid; 1661 1653 atomic_set(&con->c.wr_cnt, 1); 1662 - wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr; 1654 + wr_limit = srv_path->s.dev->ib_dev->attrs.max_qp_wr; 1663 1655 1664 1656 if (con->c.cid == 0) { 1665 1657 /* ··· 1692 1684 } 1693 1685 cq_num = max_send_wr + max_recv_wr; 1694 1686 atomic_set(&con->c.sq_wr_avail, max_send_wr); 1695 - cq_vector = rtrs_srv_get_next_cq_vector(sess); 1687 + cq_vector = rtrs_srv_get_next_cq_vector(srv_path); 1696 1688 1697 1689 /* TODO: SOFTIRQ can be faster, but be careful with softirq context */ 1698 - err = rtrs_cq_qp_create(&sess->s, &con->c, 1, cq_vector, cq_num, 1690 + err = rtrs_cq_qp_create(&srv_path->s, &con->c, 1, cq_vector, cq_num, 1699 1691 max_send_wr, max_recv_wr, 1700 1692 IB_POLL_WORKQUEUE); 1701 1693 if (err) { ··· 1707 1699 if (err) 1708 1700 goto free_cqqp; 1709 1701 } 1710 - WARN_ON(sess->s.con[cid]); 1711 - sess->s.con[cid] = &con->c; 1702 + WARN_ON(srv_path->s.con[cid]); 1703 + srv_path->s.con[cid] = &con->c; 1712 1704 1713 1705 /* 1714 1706 * Change context from server to current connection. The other ··· 1727 1719 return err; 1728 1720 } 1729 1721 1730 - static struct rtrs_srv_sess *__alloc_sess(struct rtrs_srv *srv, 1722 + static struct rtrs_srv_path *__alloc_path(struct rtrs_srv_sess *srv, 1731 1723 struct rdma_cm_id *cm_id, 1732 1724 unsigned int con_num, 1733 1725 unsigned int recon_cnt, 1734 1726 const uuid_t *uuid) 1735 1727 { 1736 - struct rtrs_srv_sess *sess; 1728 + struct rtrs_srv_path *srv_path; 1737 1729 int err = -ENOMEM; 1738 1730 char str[NAME_MAX]; 1739 1731 struct rtrs_addr path; ··· 1747 1739 pr_err("Path with same addr exists\n"); 1748 1740 goto err; 1749 1741 } 1750 - sess = kzalloc(sizeof(*sess), GFP_KERNEL); 1751 - if (!sess) 1742 + srv_path = kzalloc(sizeof(*srv_path), GFP_KERNEL); 1743 + if (!srv_path) 1752 1744 goto err; 1753 1745 1754 - sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL); 1755 - if (!sess->stats) 1746 + srv_path->stats = kzalloc(sizeof(*srv_path->stats), GFP_KERNEL); 1747 + if (!srv_path->stats) 1756 1748 goto err_free_sess; 1757 1749 1758 - sess->stats->sess = sess; 1750 + srv_path->stats->srv_path = srv_path; 1759 1751 1760 - sess->dma_addr = kcalloc(srv->queue_depth, sizeof(*sess->dma_addr), 1761 - GFP_KERNEL); 1762 - if (!sess->dma_addr) 1752 + srv_path->dma_addr = kcalloc(srv->queue_depth, 1753 + sizeof(*srv_path->dma_addr), 1754 + GFP_KERNEL); 1755 + if (!srv_path->dma_addr) 1763 1756 goto err_free_stats; 1764 1757 1765 - sess->s.con = kcalloc(con_num, sizeof(*sess->s.con), GFP_KERNEL); 1766 - if (!sess->s.con) 1758 + srv_path->s.con = kcalloc(con_num, sizeof(*srv_path->s.con), 1759 + GFP_KERNEL); 1760 + if (!srv_path->s.con) 1767 1761 goto err_free_dma_addr; 1768 1762 1769 - sess->state = RTRS_SRV_CONNECTING; 1770 - sess->srv = srv; 1771 - sess->cur_cq_vector = -1; 1772 - sess->s.dst_addr = cm_id->route.addr.dst_addr; 1773 - sess->s.src_addr = cm_id->route.addr.src_addr; 1763 + srv_path->state = RTRS_SRV_CONNECTING; 1764 + srv_path->srv = srv; 1765 + srv_path->cur_cq_vector = -1; 1766 + srv_path->s.dst_addr = cm_id->route.addr.dst_addr; 1767 + srv_path->s.src_addr = cm_id->route.addr.src_addr; 1774 1768 1775 1769 /* temporary until receiving session-name from client */ 1776 - path.src = &sess->s.src_addr; 1777 - path.dst = &sess->s.dst_addr; 1770 + path.src = &srv_path->s.src_addr; 1771 + path.dst = &srv_path->s.dst_addr; 1778 1772 rtrs_addr_to_str(&path, str, sizeof(str)); 1779 - strscpy(sess->s.sessname, str, sizeof(sess->s.sessname)); 1773 + strscpy(srv_path->s.sessname, str, sizeof(srv_path->s.sessname)); 1780 1774 1781 - sess->s.con_num = con_num; 1782 - sess->s.irq_con_num = con_num; 1783 - sess->s.recon_cnt = recon_cnt; 1784 - uuid_copy(&sess->s.uuid, uuid); 1785 - spin_lock_init(&sess->state_lock); 1786 - INIT_WORK(&sess->close_work, rtrs_srv_close_work); 1787 - rtrs_srv_init_hb(sess); 1775 + srv_path->s.con_num = con_num; 1776 + srv_path->s.irq_con_num = con_num; 1777 + srv_path->s.recon_cnt = recon_cnt; 1778 + uuid_copy(&srv_path->s.uuid, uuid); 1779 + spin_lock_init(&srv_path->state_lock); 1780 + INIT_WORK(&srv_path->close_work, rtrs_srv_close_work); 1781 + rtrs_srv_init_hb(srv_path); 1788 1782 1789 - sess->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd); 1790 - if (!sess->s.dev) { 1783 + srv_path->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd); 1784 + if (!srv_path->s.dev) { 1791 1785 err = -ENOMEM; 1792 1786 goto err_free_con; 1793 1787 } 1794 - err = map_cont_bufs(sess); 1788 + err = map_cont_bufs(srv_path); 1795 1789 if (err) 1796 1790 goto err_put_dev; 1797 1791 1798 - err = rtrs_srv_alloc_ops_ids(sess); 1792 + err = rtrs_srv_alloc_ops_ids(srv_path); 1799 1793 if (err) 1800 1794 goto err_unmap_bufs; 1801 1795 1802 - __add_path_to_srv(srv, sess); 1796 + __add_path_to_srv(srv, srv_path); 1803 1797 1804 - return sess; 1798 + return srv_path; 1805 1799 1806 1800 err_unmap_bufs: 1807 - unmap_cont_bufs(sess); 1801 + unmap_cont_bufs(srv_path); 1808 1802 err_put_dev: 1809 - rtrs_ib_dev_put(sess->s.dev); 1803 + rtrs_ib_dev_put(srv_path->s.dev); 1810 1804 err_free_con: 1811 - kfree(sess->s.con); 1805 + kfree(srv_path->s.con); 1812 1806 err_free_dma_addr: 1813 - kfree(sess->dma_addr); 1807 + kfree(srv_path->dma_addr); 1814 1808 err_free_stats: 1815 - kfree(sess->stats); 1809 + kfree(srv_path->stats); 1816 1810 err_free_sess: 1817 - kfree(sess); 1811 + kfree(srv_path); 1818 1812 err: 1819 1813 return ERR_PTR(err); 1820 1814 } ··· 1826 1816 size_t len) 1827 1817 { 1828 1818 struct rtrs_srv_ctx *ctx = cm_id->context; 1829 - struct rtrs_srv_sess *sess; 1830 - struct rtrs_srv *srv; 1819 + struct rtrs_srv_path *srv_path; 1820 + struct rtrs_srv_sess *srv; 1831 1821 1832 1822 u16 version, con_num, cid; 1833 1823 u16 recon_cnt; ··· 1867 1857 goto reject_w_err; 1868 1858 } 1869 1859 mutex_lock(&srv->paths_mutex); 1870 - sess = __find_sess(srv, &msg->sess_uuid); 1871 - if (sess) { 1872 - struct rtrs_sess *s = &sess->s; 1860 + srv_path = __find_path(srv, &msg->sess_uuid); 1861 + if (srv_path) { 1862 + struct rtrs_path *s = &srv_path->s; 1873 1863 1874 1864 /* Session already holds a reference */ 1875 1865 put_srv(srv); 1876 1866 1877 - if (sess->state != RTRS_SRV_CONNECTING) { 1867 + if (srv_path->state != RTRS_SRV_CONNECTING) { 1878 1868 rtrs_err(s, "Session in wrong state: %s\n", 1879 - rtrs_srv_state_str(sess->state)); 1869 + rtrs_srv_state_str(srv_path->state)); 1880 1870 mutex_unlock(&srv->paths_mutex); 1881 1871 goto reject_w_err; 1882 1872 } ··· 1896 1886 goto reject_w_err; 1897 1887 } 1898 1888 } else { 1899 - sess = __alloc_sess(srv, cm_id, con_num, recon_cnt, 1889 + srv_path = __alloc_path(srv, cm_id, con_num, recon_cnt, 1900 1890 &msg->sess_uuid); 1901 - if (IS_ERR(sess)) { 1891 + if (IS_ERR(srv_path)) { 1902 1892 mutex_unlock(&srv->paths_mutex); 1903 1893 put_srv(srv); 1904 - err = PTR_ERR(sess); 1894 + err = PTR_ERR(srv_path); 1905 1895 pr_err("RTRS server session allocation failed: %d\n", err); 1906 1896 goto reject_w_err; 1907 1897 } 1908 1898 } 1909 - err = create_con(sess, cm_id, cid); 1899 + err = create_con(srv_path, cm_id, cid); 1910 1900 if (err) { 1911 - rtrs_err((&sess->s), "create_con(), error %d\n", err); 1901 + rtrs_err((&srv_path->s), "create_con(), error %d\n", err); 1912 1902 rtrs_rdma_do_reject(cm_id, err); 1913 1903 /* 1914 1904 * Since session has other connections we follow normal way ··· 1917 1907 */ 1918 1908 goto close_and_return_err; 1919 1909 } 1920 - err = rtrs_rdma_do_accept(sess, cm_id); 1910 + err = rtrs_rdma_do_accept(srv_path, cm_id); 1921 1911 if (err) { 1922 - rtrs_err((&sess->s), "rtrs_rdma_do_accept(), error %d\n", err); 1912 + rtrs_err((&srv_path->s), "rtrs_rdma_do_accept(), error %d\n", err); 1923 1913 rtrs_rdma_do_reject(cm_id, err); 1924 1914 /* 1925 1915 * Since current connection was successfully added to the ··· 1939 1929 1940 1930 close_and_return_err: 1941 1931 mutex_unlock(&srv->paths_mutex); 1942 - close_sess(sess); 1932 + close_path(srv_path); 1943 1933 1944 1934 return err; 1945 1935 } ··· 1947 1937 static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id, 1948 1938 struct rdma_cm_event *ev) 1949 1939 { 1950 - struct rtrs_srv_sess *sess = NULL; 1951 - struct rtrs_sess *s = NULL; 1940 + struct rtrs_srv_path *srv_path = NULL; 1941 + struct rtrs_path *s = NULL; 1952 1942 1953 1943 if (ev->event != RDMA_CM_EVENT_CONNECT_REQUEST) { 1954 1944 struct rtrs_con *c = cm_id->context; 1955 1945 1956 - s = c->sess; 1957 - sess = to_srv_sess(s); 1946 + s = c->path; 1947 + srv_path = to_srv_path(s); 1958 1948 } 1959 1949 1960 1950 switch (ev->event) { ··· 1978 1968 case RDMA_CM_EVENT_ADDR_CHANGE: 1979 1969 case RDMA_CM_EVENT_TIMEWAIT_EXIT: 1980 1970 case RDMA_CM_EVENT_DEVICE_REMOVAL: 1981 - close_sess(sess); 1971 + close_path(srv_path); 1982 1972 break; 1983 1973 default: 1984 1974 pr_err("Ignoring unexpected CM event %s, err %d\n", ··· 2186 2176 } 2187 2177 EXPORT_SYMBOL(rtrs_srv_open); 2188 2178 2189 - static void close_sessions(struct rtrs_srv *srv) 2179 + static void close_paths(struct rtrs_srv_sess *srv) 2190 2180 { 2191 - struct rtrs_srv_sess *sess; 2181 + struct rtrs_srv_path *srv_path; 2192 2182 2193 2183 mutex_lock(&srv->paths_mutex); 2194 - list_for_each_entry(sess, &srv->paths_list, s.entry) 2195 - close_sess(sess); 2184 + list_for_each_entry(srv_path, &srv->paths_list, s.entry) 2185 + close_path(srv_path); 2196 2186 mutex_unlock(&srv->paths_mutex); 2197 2187 } 2198 2188 2199 2189 static void close_ctx(struct rtrs_srv_ctx *ctx) 2200 2190 { 2201 - struct rtrs_srv *srv; 2191 + struct rtrs_srv_sess *srv; 2202 2192 2203 2193 mutex_lock(&ctx->srv_mutex); 2204 2194 list_for_each_entry(srv, &ctx->srv_list, ctx_list) 2205 - close_sessions(srv); 2195 + close_paths(srv); 2206 2196 mutex_unlock(&ctx->srv_mutex); 2207 2197 flush_workqueue(rtrs_wq); 2208 2198 }
+8 -8
drivers/infiniband/ulp/rtrs/rtrs-srv.h
··· 37 37 struct rtrs_srv_stats { 38 38 struct kobject kobj_stats; 39 39 struct rtrs_srv_stats_rdma_stats rdma_stats; 40 - struct rtrs_srv_sess *sess; 40 + struct rtrs_srv_path *srv_path; 41 41 }; 42 42 43 43 struct rtrs_srv_con { ··· 71 71 struct rtrs_iu *iu; /* send buffer for new rkey msg */ 72 72 }; 73 73 74 - struct rtrs_srv_sess { 75 - struct rtrs_sess s; 76 - struct rtrs_srv *srv; 74 + struct rtrs_srv_path { 75 + struct rtrs_path s; 76 + struct rtrs_srv_sess *srv; 77 77 struct work_struct close_work; 78 78 enum rtrs_srv_state state; 79 79 spinlock_t state_lock; ··· 90 90 struct rtrs_srv_stats *stats; 91 91 }; 92 92 93 - struct rtrs_srv { 93 + struct rtrs_srv_sess { 94 94 struct list_head paths_list; 95 95 int paths_up; 96 96 struct mutex paths_ev_mutex; ··· 125 125 126 126 extern struct class *rtrs_dev_class; 127 127 128 - void close_sess(struct rtrs_srv_sess *sess); 128 + void close_path(struct rtrs_srv_path *srv_path); 129 129 130 130 static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s, 131 131 size_t size, int d) ··· 142 142 char *page, size_t len); 143 143 144 144 /* functions which are implemented in rtrs-srv-sysfs.c */ 145 - int rtrs_srv_create_sess_files(struct rtrs_srv_sess *sess); 146 - void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess); 145 + int rtrs_srv_create_path_files(struct rtrs_srv_path *srv_path); 146 + void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path); 147 147 148 148 #endif /* RTRS_SRV_H */
+49 -49
drivers/infiniband/ulp/rtrs/rtrs.c
··· 69 69 70 70 int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu) 71 71 { 72 - struct rtrs_sess *sess = con->sess; 72 + struct rtrs_path *path = con->path; 73 73 struct ib_recv_wr wr; 74 74 struct ib_sge list; 75 75 76 76 list.addr = iu->dma_addr; 77 77 list.length = iu->size; 78 - list.lkey = sess->dev->ib_pd->local_dma_lkey; 78 + list.lkey = path->dev->ib_pd->local_dma_lkey; 79 79 80 80 if (list.length == 0) { 81 - rtrs_wrn(con->sess, 81 + rtrs_wrn(con->path, 82 82 "Posting receive work request failed, sg list is empty\n"); 83 83 return -EINVAL; 84 84 } ··· 126 126 int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size, 127 127 struct ib_send_wr *head) 128 128 { 129 - struct rtrs_sess *sess = con->sess; 129 + struct rtrs_path *path = con->path; 130 130 struct ib_send_wr wr; 131 131 struct ib_sge list; 132 132 ··· 135 135 136 136 list.addr = iu->dma_addr; 137 137 list.length = size; 138 - list.lkey = sess->dev->ib_pd->local_dma_lkey; 138 + list.lkey = path->dev->ib_pd->local_dma_lkey; 139 139 140 140 wr = (struct ib_send_wr) { 141 141 .wr_cqe = &iu->cqe, ··· 188 188 struct ib_send_wr *head) 189 189 { 190 190 struct ib_rdma_wr wr; 191 - struct rtrs_sess *sess = con->sess; 191 + struct rtrs_path *path = con->path; 192 192 enum ib_send_flags sflags; 193 193 194 194 atomic_dec_if_positive(&con->sq_wr_avail); 195 - sflags = (atomic_inc_return(&con->wr_cnt) % sess->signal_interval) ? 195 + sflags = (atomic_inc_return(&con->wr_cnt) % path->signal_interval) ? 196 196 0 : IB_SEND_SIGNALED; 197 197 198 198 wr = (struct ib_rdma_wr) { ··· 211 211 212 212 switch (ev->event) { 213 213 case IB_EVENT_COMM_EST: 214 - rtrs_info(con->sess, "QP event %s (%d) received\n", 214 + rtrs_info(con->path, "QP event %s (%d) received\n", 215 215 ib_event_msg(ev->event), ev->event); 216 216 rdma_notify(con->cm_id, IB_EVENT_COMM_EST); 217 217 break; 218 218 default: 219 - rtrs_info(con->sess, "Unhandled QP event %s (%d) received\n", 219 + rtrs_info(con->path, "Unhandled QP event %s (%d) received\n", 220 220 ib_event_msg(ev->event), ev->event); 221 221 break; 222 222 } ··· 224 224 225 225 static bool is_pollqueue(struct rtrs_con *con) 226 226 { 227 - return con->cid >= con->sess->irq_con_num; 227 + return con->cid >= con->path->irq_con_num; 228 228 } 229 229 230 230 static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe, ··· 240 240 cq = ib_cq_pool_get(cm_id->device, nr_cqe, cq_vector, poll_ctx); 241 241 242 242 if (IS_ERR(cq)) { 243 - rtrs_err(con->sess, "Creating completion queue failed, errno: %ld\n", 243 + rtrs_err(con->path, "Creating completion queue failed, errno: %ld\n", 244 244 PTR_ERR(cq)); 245 245 return PTR_ERR(cq); 246 246 } ··· 271 271 272 272 ret = rdma_create_qp(cm_id, pd, &init_attr); 273 273 if (ret) { 274 - rtrs_err(con->sess, "Creating QP failed, err: %d\n", ret); 274 + rtrs_err(con->path, "Creating QP failed, err: %d\n", ret); 275 275 return ret; 276 276 } 277 277 con->qp = cm_id->qp; ··· 290 290 con->cq = NULL; 291 291 } 292 292 293 - int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con, 293 + int rtrs_cq_qp_create(struct rtrs_path *path, struct rtrs_con *con, 294 294 u32 max_send_sge, int cq_vector, int nr_cqe, 295 295 u32 max_send_wr, u32 max_recv_wr, 296 296 enum ib_poll_context poll_ctx) ··· 301 301 if (err) 302 302 return err; 303 303 304 - err = create_qp(con, sess->dev->ib_pd, max_send_wr, max_recv_wr, 304 + err = create_qp(con, path->dev->ib_pd, max_send_wr, max_recv_wr, 305 305 max_send_sge); 306 306 if (err) { 307 307 destroy_cq(con); 308 308 return err; 309 309 } 310 - con->sess = sess; 310 + con->path = path; 311 311 312 312 return 0; 313 313 } ··· 323 323 } 324 324 EXPORT_SYMBOL_GPL(rtrs_cq_qp_destroy); 325 325 326 - static void schedule_hb(struct rtrs_sess *sess) 326 + static void schedule_hb(struct rtrs_path *path) 327 327 { 328 - queue_delayed_work(sess->hb_wq, &sess->hb_dwork, 329 - msecs_to_jiffies(sess->hb_interval_ms)); 328 + queue_delayed_work(path->hb_wq, &path->hb_dwork, 329 + msecs_to_jiffies(path->hb_interval_ms)); 330 330 } 331 331 332 - void rtrs_send_hb_ack(struct rtrs_sess *sess) 332 + void rtrs_send_hb_ack(struct rtrs_path *path) 333 333 { 334 - struct rtrs_con *usr_con = sess->con[0]; 334 + struct rtrs_con *usr_con = path->con[0]; 335 335 u32 imm; 336 336 int err; 337 337 338 338 imm = rtrs_to_imm(RTRS_HB_ACK_IMM, 0); 339 - err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm, 339 + err = rtrs_post_rdma_write_imm_empty(usr_con, path->hb_cqe, imm, 340 340 NULL); 341 341 if (err) { 342 - rtrs_err(sess, "send HB ACK failed, errno: %d\n", err); 343 - sess->hb_err_handler(usr_con); 342 + rtrs_err(path, "send HB ACK failed, errno: %d\n", err); 343 + path->hb_err_handler(usr_con); 344 344 return; 345 345 } 346 346 } ··· 349 349 static void hb_work(struct work_struct *work) 350 350 { 351 351 struct rtrs_con *usr_con; 352 - struct rtrs_sess *sess; 352 + struct rtrs_path *path; 353 353 u32 imm; 354 354 int err; 355 355 356 - sess = container_of(to_delayed_work(work), typeof(*sess), hb_dwork); 357 - usr_con = sess->con[0]; 356 + path = container_of(to_delayed_work(work), typeof(*path), hb_dwork); 357 + usr_con = path->con[0]; 358 358 359 - if (sess->hb_missed_cnt > sess->hb_missed_max) { 360 - rtrs_err(sess, "HB missed max reached.\n"); 361 - sess->hb_err_handler(usr_con); 359 + if (path->hb_missed_cnt > path->hb_missed_max) { 360 + rtrs_err(path, "HB missed max reached.\n"); 361 + path->hb_err_handler(usr_con); 362 362 return; 363 363 } 364 - if (sess->hb_missed_cnt++) { 364 + if (path->hb_missed_cnt++) { 365 365 /* Reschedule work without sending hb */ 366 - schedule_hb(sess); 366 + schedule_hb(path); 367 367 return; 368 368 } 369 369 370 - sess->hb_last_sent = ktime_get(); 370 + path->hb_last_sent = ktime_get(); 371 371 372 372 imm = rtrs_to_imm(RTRS_HB_MSG_IMM, 0); 373 - err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm, 373 + err = rtrs_post_rdma_write_imm_empty(usr_con, path->hb_cqe, imm, 374 374 NULL); 375 375 if (err) { 376 - rtrs_err(sess, "HB send failed, errno: %d\n", err); 377 - sess->hb_err_handler(usr_con); 376 + rtrs_err(path, "HB send failed, errno: %d\n", err); 377 + path->hb_err_handler(usr_con); 378 378 return; 379 379 } 380 380 381 - schedule_hb(sess); 381 + schedule_hb(path); 382 382 } 383 383 384 - void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe, 384 + void rtrs_init_hb(struct rtrs_path *path, struct ib_cqe *cqe, 385 385 unsigned int interval_ms, unsigned int missed_max, 386 386 void (*err_handler)(struct rtrs_con *con), 387 387 struct workqueue_struct *wq) 388 388 { 389 - sess->hb_cqe = cqe; 390 - sess->hb_interval_ms = interval_ms; 391 - sess->hb_err_handler = err_handler; 392 - sess->hb_wq = wq; 393 - sess->hb_missed_max = missed_max; 394 - sess->hb_missed_cnt = 0; 395 - INIT_DELAYED_WORK(&sess->hb_dwork, hb_work); 389 + path->hb_cqe = cqe; 390 + path->hb_interval_ms = interval_ms; 391 + path->hb_err_handler = err_handler; 392 + path->hb_wq = wq; 393 + path->hb_missed_max = missed_max; 394 + path->hb_missed_cnt = 0; 395 + INIT_DELAYED_WORK(&path->hb_dwork, hb_work); 396 396 } 397 397 EXPORT_SYMBOL_GPL(rtrs_init_hb); 398 398 399 - void rtrs_start_hb(struct rtrs_sess *sess) 399 + void rtrs_start_hb(struct rtrs_path *path) 400 400 { 401 - schedule_hb(sess); 401 + schedule_hb(path); 402 402 } 403 403 EXPORT_SYMBOL_GPL(rtrs_start_hb); 404 404 405 - void rtrs_stop_hb(struct rtrs_sess *sess) 405 + void rtrs_stop_hb(struct rtrs_path *path) 406 406 { 407 - cancel_delayed_work_sync(&sess->hb_dwork); 408 - sess->hb_missed_cnt = 0; 407 + cancel_delayed_work_sync(&path->hb_dwork); 408 + path->hb_missed_cnt = 0; 409 409 } 410 410 EXPORT_SYMBOL_GPL(rtrs_stop_hb); 411 411
+18 -16
drivers/infiniband/ulp/rtrs/rtrs.h
··· 13 13 #include <linux/scatterlist.h> 14 14 15 15 struct rtrs_permit; 16 - struct rtrs_clt; 16 + struct rtrs_clt_sess; 17 17 struct rtrs_srv_ctx; 18 - struct rtrs_srv; 18 + struct rtrs_srv_sess; 19 19 struct rtrs_srv_op; 20 20 21 21 /* ··· 52 52 void (*link_ev)(void *priv, enum rtrs_clt_link_ev ev); 53 53 }; 54 54 55 - struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops, 56 - const char *sessname, 55 + struct rtrs_clt_sess *rtrs_clt_open(struct rtrs_clt_ops *ops, 56 + const char *pathname, 57 57 const struct rtrs_addr *paths, 58 58 size_t path_cnt, u16 port, 59 59 size_t pdu_sz, u8 reconnect_delay_sec, 60 60 s16 max_reconnect_attempts, u32 nr_poll_queues); 61 61 62 - void rtrs_clt_close(struct rtrs_clt *sess); 62 + void rtrs_clt_close(struct rtrs_clt_sess *clt); 63 63 64 64 enum wait_type { 65 65 RTRS_PERMIT_NOWAIT = 0, ··· 77 77 RTRS_IO_CON 78 78 }; 79 79 80 - struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *sess, 81 - enum rtrs_clt_con_type con_type, 82 - enum wait_type wait); 80 + struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt_sess *sess, 81 + enum rtrs_clt_con_type con_type, 82 + enum wait_type wait); 83 83 84 - void rtrs_clt_put_permit(struct rtrs_clt *sess, struct rtrs_permit *permit); 84 + void rtrs_clt_put_permit(struct rtrs_clt_sess *sess, 85 + struct rtrs_permit *permit); 85 86 86 87 /** 87 88 * rtrs_clt_req_ops - it holds the request confirmation callback ··· 99 98 }; 100 99 101 100 int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops, 102 - struct rtrs_clt *sess, struct rtrs_permit *permit, 101 + struct rtrs_clt_sess *sess, struct rtrs_permit *permit, 103 102 const struct kvec *vec, size_t nr, size_t len, 104 103 struct scatterlist *sg, unsigned int sg_cnt); 105 - int rtrs_clt_rdma_cq_direct(struct rtrs_clt *clt, unsigned int index); 104 + int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index); 106 105 107 106 /** 108 107 * rtrs_attrs - RTRS session attributes ··· 113 112 u32 max_segments; 114 113 }; 115 114 116 - int rtrs_clt_query(struct rtrs_clt *sess, struct rtrs_attrs *attr); 115 + int rtrs_clt_query(struct rtrs_clt_sess *sess, struct rtrs_attrs *attr); 117 116 118 117 /* 119 118 * Here goes RTRS server API ··· 164 163 * @priv: Private data from user if previously set with 165 164 * rtrs_srv_set_sess_priv() 166 165 */ 167 - int (*link_ev)(struct rtrs_srv *sess, enum rtrs_srv_link_ev ev, 166 + int (*link_ev)(struct rtrs_srv_sess *sess, enum rtrs_srv_link_ev ev, 168 167 void *priv); 169 168 }; 170 169 ··· 174 173 175 174 bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int errno); 176 175 177 - void rtrs_srv_set_sess_priv(struct rtrs_srv *sess, void *priv); 176 + void rtrs_srv_set_sess_priv(struct rtrs_srv_sess *sess, void *priv); 178 177 179 - int rtrs_srv_get_sess_name(struct rtrs_srv *sess, char *sessname, size_t len); 178 + int rtrs_srv_get_path_name(struct rtrs_srv_sess *sess, char *pathname, 179 + size_t len); 180 180 181 - int rtrs_srv_get_queue_depth(struct rtrs_srv *sess); 181 + int rtrs_srv_get_queue_depth(struct rtrs_srv_sess *sess); 182 182 183 183 int rtrs_addr_to_sockaddr(const char *str, size_t len, u16 port, 184 184 struct rtrs_addr *addr);
+1 -1
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
··· 497 497 __be32 opt2; 498 498 __be64 opt0; 499 499 __be32 iss; 500 - __be32 rsvd; 500 + __be32 rsvd[3]; 501 501 }; 502 502 503 503 struct cpl_act_open_req {
+1
include/rdma/ib_mad.h
··· 276 276 IB_PORT_SWITCH_PORT_STATE_TABLE_SUP = 1 << 3, 277 277 IB_PORT_LINK_WIDTH_2X_SUP = 1 << 4, 278 278 IB_PORT_LINK_SPEED_HDR_SUP = 1 << 5, 279 + IB_PORT_LINK_SPEED_NDR_SUP = 1 << 10, 279 280 }; 280 281 281 282 #define OPA_CLASS_PORT_INFO_PR_SUPPORT BIT(26)
+11 -1
include/rdma/ib_smi.h
··· 144 144 #define IB_NOTICE_TRAP_DR_NOTICE 0x80 145 145 #define IB_NOTICE_TRAP_DR_TRUNC 0x40 146 146 147 - 147 + /** 148 + * ib_init_query_mad - Initialize query MAD. 149 + * @mad: MAD to initialize. 150 + */ 151 + static inline void ib_init_query_mad(struct ib_smp *mad) 152 + { 153 + mad->base_version = IB_MGMT_BASE_VERSION; 154 + mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 155 + mad->class_version = 1; 156 + mad->method = IB_MGMT_METHOD_GET; 157 + } 148 158 #endif /* IB_SMI_H */
+17
include/rdma/ib_verbs.h
··· 4749 4749 return (u32)(v & IB_GRH_FLOWLABEL_MASK); 4750 4750 } 4751 4751 4752 + /** 4753 + * rdma_get_udp_sport - Calculate and set UDP source port based on the flow 4754 + * label. If flow label is not defined in GRH then 4755 + * calculate it based on lqpn/rqpn. 4756 + * 4757 + * @fl: flow label from GRH 4758 + * @lqpn: local qp number 4759 + * @rqpn: remote qp number 4760 + */ 4761 + static inline u16 rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn) 4762 + { 4763 + if (!fl) 4764 + fl = rdma_calc_flow_label(lqpn, rqpn); 4765 + 4766 + return rdma_flow_label_to_udp_sport(fl); 4767 + } 4768 + 4752 4769 const struct ib_port_immutable* 4753 4770 ib_port_immutable_read(struct ib_device *dev, unsigned int port); 4754 4771 #endif /* IB_VERBS_H */
+2
include/uapi/rdma/hns-abi.h
··· 77 77 HNS_ROCE_QP_CAP_RQ_RECORD_DB = 1 << 0, 78 78 HNS_ROCE_QP_CAP_SQ_RECORD_DB = 1 << 1, 79 79 HNS_ROCE_QP_CAP_OWNER_DB = 1 << 2, 80 + HNS_ROCE_QP_CAP_DIRECT_WQE = 1 << 5, 80 81 }; 81 82 82 83 struct hns_roce_ib_create_qp_resp { 83 84 __aligned_u64 cap_flags; 85 + __aligned_u64 dwqe_mmap_key; 84 86 }; 85 87 86 88 struct hns_roce_ib_alloc_ucontext_resp {