Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull more rdma updates from Doug Ledford:
"Items of note:

- two patches fix a regression in the 4.15 kernel. The 4.14 kernel
worked fine with NVMe over Fabrics and mlx5 adapters. That broke in
4.15. The fix is here.

- one of the patches (the endian notation patch from Lijun) looks
like a lot of lines of change, but it's mostly mechanical in
nature. It amounts to the biggest chunk of change in it (it's about
2/3rds of the overall pull request).

Summary:

- Clean up some function signatures in rxe for clarity

- Tidy the RDMA netlink header to remove unimplemented constants

- bnxt_re driver fixes, one is a regression this window.

- Minor hns driver fixes

- Various fixes from Dan Carpenter and his tool

- Fix IRQ cleanup race in HFI1

- HF1 performance optimizations and a fix to report counters in the right units

- Fix for an IPoIB startup sequence race with the external manager

- Oops fix for the new kabi path

- Endian cleanups for hns

- Fix for mlx5 related to the new automatic affinity support"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (38 commits)
net/mlx5: increase async EQ to avoid EQ overrun
mlx5: fix mlx5_get_vector_affinity to start from completion vector 0
RDMA/hns: Fix the endian problem for hns
IB/uverbs: Use the standard kConfig format for experimental
IB: Update references to libibverbs
IB/hfi1: Add 16B rcvhdr trace support
IB/hfi1: Convert kzalloc_node and kcalloc to use kcalloc_node
IB/core: Avoid a potential OOPs for an unused optional parameter
IB/core: Map iWarp AH type to undefined in rdma_ah_find_type
IB/ipoib: Fix for potential no-carrier state
IB/hfi1: Show fault stats in both TX and RX directions
IB/hfi1: Remove blind constants from 16B update
IB/hfi1: Convert PortXmitWait/PortVLXmitWait counters to flit times
IB/hfi1: Do not override given pcie_pset value
IB/hfi1: Optimize process_receive_ib()
IB/hfi1: Remove unnecessary fecn and becn fields
IB/hfi1: Look up ibport using a pointer in receive path
IB/hfi1: Optimize packet type comparison using 9B and bypass code paths
IB/hfi1: Compute BTH only for RDMA_WRITE_LAST/SEND_LAST packet
IB/hfi1: Remove dependence on qp->s_hdrwords
...

+917 -642
+1 -1
Documentation/infiniband/user_verbs.txt
··· 5 5 described in chapter 11 of the InfiniBand Architecture Specification. 6 6 7 7 To use the verbs, the libibverbs library, available from 8 - http://www.openfabrics.org/, is required. libibverbs contains a 8 + https://github.com/linux-rdma/rdma-core, is required. libibverbs contains a 9 9 device-independent API for using the ib_uverbs interface. 10 10 libibverbs also requires appropriate device-dependent kernel and 11 11 userspace driver for your InfiniBand hardware. For example, to use
+1 -1
MAINTAINERS
··· 6946 6946 M: Doug Ledford <dledford@redhat.com> 6947 6947 M: Jason Gunthorpe <jgg@mellanox.com> 6948 6948 L: linux-rdma@vger.kernel.org 6949 - W: http://www.openfabrics.org/ 6949 + W: https://github.com/linux-rdma/rdma-core 6950 6950 Q: http://patchwork.kernel.org/project/linux-rdma/list/ 6951 6951 T: git git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git 6952 6952 S: Supported
+4 -3
drivers/infiniband/Kconfig
··· 20 20 Userspace InfiniBand Management Datagram (MAD) support. This 21 21 is the kernel side of the userspace MAD support, which allows 22 22 userspace processes to send and receive MADs. You will also 23 - need libibumad from <http://www.openfabrics.org/downloads/management/>. 23 + need libibumad from rdma-core 24 + <https://github.com/linux-rdma/rdma-core>. 24 25 25 26 config INFINIBAND_USER_ACCESS 26 27 tristate "InfiniBand userspace access (verbs and CM)" ··· 33 32 to set up connections and directly access InfiniBand 34 33 hardware for fast-path operations. You will also need 35 34 libibverbs, libibcm and a hardware driver library from 36 - <http://www.openfabrics.org/git/>. 35 + rdma-core <https://github.com/linux-rdma/rdma-core>. 37 36 38 37 config INFINIBAND_EXP_USER_ACCESS 39 - bool "Allow experimental support for Infiniband ABI" 38 + bool "Enable the full uverbs ioctl interface (EXPERIMENTAL)" 40 39 depends on INFINIBAND_USER_ACCESS 41 40 ---help--- 42 41 IOCTL based ABI support for Infiniband. This allows userspace
+3 -1
drivers/infiniband/core/nldev.c
··· 499 499 return -EINVAL; 500 500 501 501 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 502 - if (!msg) 502 + if (!msg) { 503 + ret = -ENOMEM; 503 504 goto err; 505 + } 504 506 505 507 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 506 508 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
+1 -1
drivers/infiniband/core/uverbs_std_types.c
··· 316 316 cq->uobject = &obj->uobject; 317 317 cq->comp_handler = ib_uverbs_comp_handler; 318 318 cq->event_handler = ib_uverbs_cq_event_handler; 319 - cq->cq_context = &ev_file->ev_queue; 319 + cq->cq_context = ev_file ? &ev_file->ev_queue : NULL; 320 320 obj->uobject.object = cq; 321 321 obj->uobject.user_handle = user_handle; 322 322 atomic_set(&cq->usecnt, 0);
+2 -5
drivers/infiniband/hw/bnxt_re/ib_verbs.c
··· 1314 1314 return rc; 1315 1315 } 1316 1316 1317 - if (srq->umem && !IS_ERR(srq->umem)) 1317 + if (srq->umem) 1318 1318 ib_umem_release(srq->umem); 1319 1319 kfree(srq); 1320 1320 atomic_dec(&rdev->srq_count); ··· 1430 1430 return &srq->ib_srq; 1431 1431 1432 1432 fail: 1433 - if (udata && srq->umem && !IS_ERR(srq->umem)) { 1433 + if (srq->umem) 1434 1434 ib_umem_release(srq->umem); 1435 - srq->umem = NULL; 1436 - } 1437 - 1438 1435 kfree(srq); 1439 1436 exit: 1440 1437 return ERR_PTR(rc);
+3 -1
drivers/infiniband/hw/bnxt_re/qplib_fp.c
··· 557 557 558 558 srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq), 559 559 GFP_KERNEL); 560 - if (!srq->swq) 560 + if (!srq->swq) { 561 + rc = -ENOMEM; 561 562 goto fail; 563 + } 562 564 563 565 RCFW_CMD_PREP(req, CREATE_SRQ, cmd_flags); 564 566
+7 -11
drivers/infiniband/hw/bnxt_re/qplib_res.c
··· 705 705 dpit->max = dbr_len / PAGE_SIZE; 706 706 707 707 dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL); 708 - if (!dpit->app_tbl) { 709 - pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem); 710 - dev_err(&res->pdev->dev, 711 - "QPLIB: DPI app tbl allocation failed"); 712 - return -ENOMEM; 713 - } 708 + if (!dpit->app_tbl) 709 + goto unmap_io; 714 710 715 711 bytes = dpit->max >> 3; 716 712 if (!bytes) ··· 714 718 715 719 dpit->tbl = kmalloc(bytes, GFP_KERNEL); 716 720 if (!dpit->tbl) { 717 - pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem); 718 721 kfree(dpit->app_tbl); 719 722 dpit->app_tbl = NULL; 720 - dev_err(&res->pdev->dev, 721 - "QPLIB: DPI tbl allocation failed for size = %d", 722 - bytes); 723 - return -ENOMEM; 723 + goto unmap_io; 724 724 } 725 725 726 726 memset((u8 *)dpit->tbl, 0xFF, bytes); 727 727 728 728 return 0; 729 + 730 + unmap_io: 731 + pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem); 732 + return -ENOMEM; 729 733 } 730 734 731 735 /* PKEYs */
+70 -12
drivers/infiniband/hw/hfi1/chip.c
··· 1083 1083 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd); 1084 1084 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms); 1085 1085 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index); 1086 + static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width); 1086 1087 1087 1088 /* 1088 1089 * Error interrupt table entry. This is used as input to the interrupt ··· 6906 6905 /* no longer frozen */ 6907 6906 } 6908 6907 6908 + /** 6909 + * update_xmit_counters - update PortXmitWait/PortVlXmitWait 6910 + * counters. 6911 + * @ppd: info of physical Hfi port 6912 + * @link_width: new link width after link up or downgrade 6913 + * 6914 + * Update the PortXmitWait and PortVlXmitWait counters after 6915 + * a link up or downgrade event to reflect a link width change. 6916 + */ 6917 + static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width) 6918 + { 6919 + int i; 6920 + u16 tx_width; 6921 + u16 link_speed; 6922 + 6923 + tx_width = tx_link_width(link_width); 6924 + link_speed = get_link_speed(ppd->link_speed_active); 6925 + 6926 + /* 6927 + * There are C_VL_COUNT number of PortVLXmitWait counters. 6928 + * Adding 1 to C_VL_COUNT to include the PortXmitWait counter. 6929 + */ 6930 + for (i = 0; i < C_VL_COUNT + 1; i++) 6931 + get_xmit_wait_counters(ppd, tx_width, link_speed, i); 6932 + } 6933 + 6909 6934 /* 6910 6935 * Handle a link up interrupt from the 8051. 6911 6936 * ··· 7553 7526 set_link_state(ppd, HLS_GOING_UP); 7554 7527 } 7555 7528 7556 - /* 7557 - * Apply the link width downgrade enabled policy against the current active 7558 - * link widths. 7529 + /** 7530 + * apply_link_downgrade_policy - Apply the link width downgrade enabled 7531 + * policy against the current active link widths. 7532 + * @ppd: info of physical Hfi port 7533 + * @refresh_widths: True indicates link downgrade event 7534 + * @return: True indicates a successful link downgrade. False indicates 7535 + * link downgrade event failed and the link will bounce back to 7536 + * default link width. 7559 7537 * 7560 - * Called when the enabled policy changes or the active link widths change. 7538 + * Called when the enabled policy changes or the active link widths 7539 + * change. 7540 + * Refresh_widths indicates that a link downgrade occurred. The 7541 + * link_downgraded variable is set by refresh_widths and 7542 + * determines the success/failure of the policy application. 7561 7543 */ 7562 - void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths) 7544 + bool apply_link_downgrade_policy(struct hfi1_pportdata *ppd, 7545 + bool refresh_widths) 7563 7546 { 7564 7547 int do_bounce = 0; 7565 7548 int tries; 7566 7549 u16 lwde; 7567 7550 u16 tx, rx; 7551 + bool link_downgraded = refresh_widths; 7568 7552 7569 7553 /* use the hls lock to avoid a race with actual link up */ 7570 7554 tries = 0; ··· 7609 7571 ppd->link_width_downgrade_rx_active == 0) { 7610 7572 /* the 8051 reported a dead link as a downgrade */ 7611 7573 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n"); 7574 + link_downgraded = false; 7612 7575 } else if (lwde == 0) { 7613 7576 /* downgrade is disabled */ 7614 7577 ··· 7626 7587 ppd->link_width_downgrade_tx_active, 7627 7588 ppd->link_width_downgrade_rx_active); 7628 7589 do_bounce = 1; 7590 + link_downgraded = false; 7629 7591 } 7630 7592 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 || 7631 7593 (lwde & ppd->link_width_downgrade_rx_active) == 0) { ··· 7638 7598 lwde, ppd->link_width_downgrade_tx_active, 7639 7599 ppd->link_width_downgrade_rx_active); 7640 7600 do_bounce = 1; 7601 + link_downgraded = false; 7641 7602 } 7642 7603 7643 7604 done: ··· 7650 7609 set_link_state(ppd, HLS_DN_OFFLINE); 7651 7610 start_link(ppd); 7652 7611 } 7612 + 7613 + return link_downgraded; 7653 7614 } 7654 7615 7655 7616 /* ··· 7665 7622 link_downgrade_work); 7666 7623 7667 7624 dd_dev_info(ppd->dd, "8051: Link width downgrade\n"); 7668 - apply_link_downgrade_policy(ppd, 1); 7625 + if (apply_link_downgrade_policy(ppd, true)) 7626 + update_xmit_counters(ppd, ppd->link_width_downgrade_tx_active); 7669 7627 } 7670 7628 7671 7629 static char *dcc_err_string(char *buf, int buf_len, u64 flags) ··· 8308 8264 /* handle the interrupt(s) */ 8309 8265 sdma_engine_interrupt(sde, status); 8310 8266 } else { 8311 - dd_dev_err_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n", 8312 - sde->this_idx); 8267 + dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n", 8268 + sde->this_idx); 8313 8269 } 8314 8270 return IRQ_HANDLED; 8315 8271 } ··· 10641 10597 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); 10642 10598 10643 10599 handle_linkup_change(dd, 1); 10600 + 10601 + /* 10602 + * After link up, a new link width will have been set. 10603 + * Update the xmit counters with regards to the new 10604 + * link width. 10605 + */ 10606 + update_xmit_counters(ppd, ppd->link_width_active); 10607 + 10644 10608 ppd->host_link_state = HLS_UP_INIT; 10645 10609 update_statusp(ppd, IB_PORT_INIT); 10646 10610 break; ··· 13012 12960 pci_intx(pdev, 0); 13013 12961 } 13014 12962 13015 - static void clean_up_interrupts(struct hfi1_devdata *dd) 12963 + /** 12964 + * hfi1_clean_up_interrupts() - Free all IRQ resources 12965 + * @dd: valid device data data structure 12966 + * 12967 + * Free the MSI or INTx IRQs and assoicated PCI resources, 12968 + * if they have been allocated. 12969 + */ 12970 + void hfi1_clean_up_interrupts(struct hfi1_devdata *dd) 13016 12971 { 13017 12972 int i; 13018 12973 ··· 13380 13321 return 0; 13381 13322 13382 13323 fail: 13383 - clean_up_interrupts(dd); 13324 + hfi1_clean_up_interrupts(dd); 13384 13325 return ret; 13385 13326 } 13386 13327 ··· 14807 14748 aspm_exit(dd); 14808 14749 free_cntrs(dd); 14809 14750 free_rcverr(dd); 14810 - clean_up_interrupts(dd); 14811 14751 finish_chip_resources(dd); 14812 14752 } 14813 14753 ··· 15262 15204 bail_free_cntrs: 15263 15205 free_cntrs(dd); 15264 15206 bail_clear_intr: 15265 - clean_up_interrupts(dd); 15207 + hfi1_clean_up_interrupts(dd); 15266 15208 bail_cleanup: 15267 15209 hfi1_pcie_ddcleanup(dd); 15268 15210 bail_free:
+2 -2
drivers/infiniband/hw/hfi1/chip.h
··· 736 736 int start_link(struct hfi1_pportdata *ppd); 737 737 int bringup_serdes(struct hfi1_pportdata *ppd); 738 738 void set_intr_state(struct hfi1_devdata *dd, u32 enable); 739 - void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, 740 - int refresh_widths); 739 + bool apply_link_downgrade_policy(struct hfi1_pportdata *ppd, 740 + bool refresh_widths); 741 741 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd, 742 742 u32 intr_adjust, u32 npkts); 743 743 int stop_drain_data_vls(struct hfi1_devdata *dd);
+8 -1
drivers/infiniband/hw/hfi1/debugfs.c
··· 1 1 /* 2 - * Copyright(c) 2015-2017 Intel Corporation. 2 + * Copyright(c) 2015-2018 Intel Corporation. 3 3 * 4 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 5 * redistributing this file, you may do so under either license. ··· 1200 1200 n_bytes += rcd->opstats->stats[i].n_bytes; 1201 1201 } 1202 1202 hfi1_rcd_put(rcd); 1203 + } 1204 + for_each_possible_cpu(j) { 1205 + struct hfi1_opcode_stats_perctx *sp = 1206 + per_cpu_ptr(dd->tx_opstats, j); 1207 + 1208 + n_packets += sp->stats[i].n_packets; 1209 + n_bytes += sp->stats[i].n_bytes; 1203 1210 } 1204 1211 if (!n_packets && !n_bytes) 1205 1212 return SEQ_SKIP;
+25 -26
drivers/infiniband/hw/hfi1/driver.c
··· 256 256 u32 mlid_base; 257 257 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 258 258 struct hfi1_devdata *dd = ppd->dd; 259 - struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; 259 + struct hfi1_ibdev *verbs_dev = &dd->verbs_dev; 260 + struct rvt_dev_info *rdi = &verbs_dev->rdi; 261 + 262 + if ((packet->rhf & RHF_DC_ERR) && 263 + hfi1_dbg_fault_suppress_err(verbs_dev)) 264 + return; 260 265 261 266 if (packet->rhf & (RHF_VCRC_ERR | RHF_ICRC_ERR)) 262 267 return; ··· 639 634 } 640 635 } 641 636 642 - static void process_rcv_qp_work(struct hfi1_ctxtdata *rcd) 637 + static void process_rcv_qp_work(struct hfi1_packet *packet) 643 638 { 644 639 struct rvt_qp *qp, *nqp; 640 + struct hfi1_ctxtdata *rcd = packet->rcd; 645 641 646 642 /* 647 643 * Iterate over all QPs waiting to respond. ··· 652 646 list_del_init(&qp->rspwait); 653 647 if (qp->r_flags & RVT_R_RSP_NAK) { 654 648 qp->r_flags &= ~RVT_R_RSP_NAK; 655 - hfi1_send_rc_ack(rcd, qp, 0); 649 + packet->qp = qp; 650 + hfi1_send_rc_ack(packet, 0); 656 651 } 657 652 if (qp->r_flags & RVT_R_RSP_SEND) { 658 653 unsigned long flags; ··· 674 667 if (thread) { 675 668 if ((packet->numpkt & (MAX_PKT_RECV_THREAD - 1)) == 0) 676 669 /* allow defered processing */ 677 - process_rcv_qp_work(packet->rcd); 670 + process_rcv_qp_work(packet); 678 671 cond_resched(); 679 672 return RCV_PKT_OK; 680 673 } else { ··· 816 809 last = RCV_PKT_DONE; 817 810 process_rcv_update(last, &packet); 818 811 } 819 - process_rcv_qp_work(rcd); 812 + process_rcv_qp_work(&packet); 820 813 rcd->head = packet.rhqoff; 821 814 bail: 822 815 finish_packet(&packet); ··· 845 838 last = RCV_PKT_DONE; 846 839 process_rcv_update(last, &packet); 847 840 } 848 - process_rcv_qp_work(rcd); 841 + process_rcv_qp_work(&packet); 849 842 rcd->head = packet.rhqoff; 850 843 bail: 851 844 finish_packet(&packet); ··· 1075 1068 process_rcv_update(last, &packet); 1076 1069 } 1077 1070 1078 - process_rcv_qp_work(rcd); 1071 + process_rcv_qp_work(&packet); 1079 1072 rcd->head = packet.rhqoff; 1080 1073 1081 1074 bail: ··· 1445 1438 packet->sc = hfi1_9B_get_sc5(hdr, packet->rhf); 1446 1439 packet->pad = ib_bth_get_pad(packet->ohdr); 1447 1440 packet->extra_byte = 0; 1448 - packet->fecn = ib_bth_get_fecn(packet->ohdr); 1449 - packet->becn = ib_bth_get_becn(packet->ohdr); 1441 + packet->pkey = ib_bth_get_pkey(packet->ohdr); 1442 + packet->migrated = ib_bth_is_migration(packet->ohdr); 1450 1443 1451 1444 return 0; 1452 1445 drop: ··· 1499 1492 1500 1493 /* Query commonly used fields from packet header */ 1501 1494 packet->opcode = ib_bth_get_opcode(packet->ohdr); 1502 - packet->hlen = hdr_len_by_opcode[packet->opcode] + 8 + grh_len; 1503 - packet->payload = packet->ebuf + packet->hlen - (4 * sizeof(u32)); 1495 + /* hdr_len_by_opcode already has an IB LRH factored in */ 1496 + packet->hlen = hdr_len_by_opcode[packet->opcode] + 1497 + (LRH_16B_BYTES - LRH_9B_BYTES) + grh_len; 1498 + packet->payload = packet->ebuf + packet->hlen - LRH_16B_BYTES; 1504 1499 packet->slid = hfi1_16B_get_slid(packet->hdr); 1505 1500 packet->dlid = hfi1_16B_get_dlid(packet->hdr); 1506 1501 if (unlikely(hfi1_is_16B_mcast(packet->dlid))) ··· 1513 1504 packet->sl = ibp->sc_to_sl[packet->sc]; 1514 1505 packet->pad = hfi1_16B_bth_get_pad(packet->ohdr); 1515 1506 packet->extra_byte = SIZE_OF_LT; 1516 - packet->fecn = hfi1_16B_get_fecn(packet->hdr); 1517 - packet->becn = hfi1_16B_get_becn(packet->hdr); 1507 + packet->pkey = hfi1_16B_get_pkey(packet->hdr); 1508 + packet->migrated = opa_bth_is_migration(packet->ohdr); 1518 1509 1519 1510 if (hfi1_bypass_ingress_pkt_check(packet)) 1520 1511 goto drop; ··· 1559 1550 if (hfi1_setup_9B_packet(packet)) 1560 1551 return RHF_RCV_CONTINUE; 1561 1552 1562 - trace_hfi1_rcvhdr(packet->rcd->ppd->dd, 1563 - packet->rcd->ctxt, 1564 - rhf_err_flags(packet->rhf), 1565 - RHF_RCV_TYPE_IB, 1566 - packet->hlen, 1567 - packet->tlen, 1568 - packet->updegr, 1569 - rhf_egr_index(packet->rhf)); 1570 - 1571 - if (unlikely( 1572 - (hfi1_dbg_fault_suppress_err(&packet->rcd->dd->verbs_dev) && 1573 - (packet->rhf & RHF_DC_ERR)))) 1574 - return RHF_RCV_CONTINUE; 1553 + trace_hfi1_rcvhdr(packet); 1575 1554 1576 1555 if (unlikely(rhf_err_flags(packet->rhf))) { 1577 1556 handle_eflags(packet); ··· 1594 1597 1595 1598 if (hfi1_setup_bypass_packet(packet)) 1596 1599 return RHF_RCV_CONTINUE; 1600 + 1601 + trace_hfi1_rcvhdr(packet); 1597 1602 1598 1603 if (unlikely(rhf_err_flags(packet->rhf))) { 1599 1604 handle_eflags(packet);
+1 -3
drivers/infiniband/hw/hfi1/file_ops.c
··· 196 196 if (!atomic_inc_not_zero(&dd->user_refcount)) 197 197 return -ENXIO; 198 198 199 - /* Just take a ref now. Not all opens result in a context assign */ 200 - kobject_get(&dd->kobj); 201 - 202 199 /* The real work is performed later in assign_ctxt() */ 203 200 204 201 fd = kzalloc(sizeof(*fd), GFP_KERNEL); ··· 205 208 fd->mm = current->mm; 206 209 mmgrab(fd->mm); 207 210 fd->dd = dd; 211 + kobject_get(&fd->dd->kobj); 208 212 fp->private_data = fd; 209 213 } else { 210 214 fp->private_data = NULL;
+15 -11
drivers/infiniband/hw/hfi1/hfi.h
··· 341 341 u32 slid; 342 342 u16 tlen; 343 343 s16 etail; 344 + u16 pkey; 344 345 u8 hlen; 345 346 u8 numpkt; 346 347 u8 rsize; ··· 352 351 u8 sc; 353 352 u8 sl; 354 353 u8 opcode; 355 - bool becn; 356 - bool fecn; 354 + bool migrated; 357 355 }; 358 356 359 357 /* Packet types */ ··· 858 858 struct work_struct linkstate_active_work; 859 859 /* Does this port need to prescan for FECNs */ 860 860 bool cc_prescan; 861 + /* 862 + * Sample sendWaitCnt & sendWaitVlCnt during link transition 863 + * and counter request. 864 + */ 865 + u64 port_vl_xmit_wait_last[C_VL_COUNT + 1]; 866 + u16 prev_link_width; 867 + u64 vl_xmit_flit_cnt[C_VL_COUNT + 1]; 861 868 }; 862 869 863 870 typedef int (*rhf_rcv_function_ptr)(struct hfi1_packet *packet); ··· 1786 1779 static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt, 1787 1780 bool do_cnp) 1788 1781 { 1789 - struct ib_other_headers *ohdr = pkt->ohdr; 1790 - 1791 - u32 bth1; 1792 - bool becn = false; 1793 - bool fecn = false; 1782 + bool becn; 1783 + bool fecn; 1794 1784 1795 1785 if (pkt->etype == RHF_RCV_TYPE_BYPASS) { 1796 1786 fecn = hfi1_16B_get_fecn(pkt->hdr); 1797 1787 becn = hfi1_16B_get_becn(pkt->hdr); 1798 1788 } else { 1799 - bth1 = be32_to_cpu(ohdr->bth[1]); 1800 - fecn = bth1 & IB_FECN_SMASK; 1801 - becn = bth1 & IB_BECN_SMASK; 1789 + fecn = ib_bth_get_fecn(pkt->ohdr); 1790 + becn = ib_bth_get_becn(pkt->ohdr); 1802 1791 } 1803 1792 if (unlikely(fecn || becn)) { 1804 1793 hfi1_process_ecn_slowpath(qp, pkt, do_cnp); ··· 1960 1957 int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len); 1961 1958 1962 1959 int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent); 1960 + void hfi1_clean_up_interrupts(struct hfi1_devdata *dd); 1963 1961 void hfi1_pcie_cleanup(struct pci_dev *pdev); 1964 1962 int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev); 1965 1963 void hfi1_pcie_ddcleanup(struct hfi1_devdata *); ··· 2420 2416 static inline void hfi1_make_16b_hdr(struct hfi1_16b_header *hdr, 2421 2417 u32 slid, u32 dlid, 2422 2418 u16 len, u16 pkey, 2423 - u8 becn, u8 fecn, u8 l4, 2419 + bool becn, bool fecn, u8 l4, 2424 2420 u8 sc) 2425 2421 { 2426 2422 u32 lrh0 = 0;
+22 -9
drivers/infiniband/hw/hfi1/init.c
··· 172 172 u16 i; 173 173 int ret; 174 174 175 - dd->rcd = kzalloc_node(dd->num_rcv_contexts * sizeof(*dd->rcd), 175 + dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd), 176 176 GFP_KERNEL, dd->node); 177 177 if (!dd->rcd) 178 178 return -ENOMEM; ··· 439 439 * The resulting value will be rounded down to the closest 440 440 * multiple of dd->rcv_entries.group_size. 441 441 */ 442 - rcd->egrbufs.buffers = kzalloc_node( 443 - rcd->egrbufs.count * sizeof(*rcd->egrbufs.buffers), 444 - GFP_KERNEL, numa); 442 + rcd->egrbufs.buffers = 443 + kcalloc_node(rcd->egrbufs.count, 444 + sizeof(*rcd->egrbufs.buffers), 445 + GFP_KERNEL, numa); 445 446 if (!rcd->egrbufs.buffers) 446 447 goto bail; 447 - rcd->egrbufs.rcvtids = kzalloc_node( 448 - rcd->egrbufs.count * 449 - sizeof(*rcd->egrbufs.rcvtids), 450 - GFP_KERNEL, numa); 448 + rcd->egrbufs.rcvtids = 449 + kcalloc_node(rcd->egrbufs.count, 450 + sizeof(*rcd->egrbufs.rcvtids), 451 + GFP_KERNEL, numa); 451 452 if (!rcd->egrbufs.rcvtids) 452 453 goto bail; 453 454 rcd->egrbufs.size = eager_buffer_size; ··· 638 637 ppd->dd = dd; 639 638 ppd->hw_pidx = hw_pidx; 640 639 ppd->port = port; /* IB port number, not index */ 640 + ppd->prev_link_width = LINK_WIDTH_DEFAULT; 641 + /* 642 + * There are C_VL_COUNT number of PortVLXmitWait counters. 643 + * Adding 1 to C_VL_COUNT to include the PortXmitWait counter. 644 + */ 645 + for (i = 0; i < C_VL_COUNT + 1; i++) { 646 + ppd->port_vl_xmit_wait_last[i] = 0; 647 + ppd->vl_xmit_flit_cnt[i] = 0; 648 + } 641 649 642 650 default_pkey_idx = 1; 643 651 ··· 1068 1058 } 1069 1059 dd->flags &= ~HFI1_INITTED; 1070 1060 1071 - /* mask interrupts, but not errors */ 1061 + /* mask and clean up interrupts, but not errors */ 1072 1062 set_intr_state(dd, 0); 1063 + hfi1_clean_up_interrupts(dd); 1073 1064 1074 1065 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1075 1066 ppd = dd->pport + pidx; ··· 1229 1218 free_percpu(dd->rcv_limit); 1230 1219 free_percpu(dd->send_schedule); 1231 1220 free_percpu(dd->tx_opstats); 1221 + sdma_clean(dd, dd->num_sdma); 1232 1222 rvt_dealloc_device(&dd->verbs_dev.rdi); 1233 1223 } 1234 1224 ··· 1716 1704 dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j); 1717 1705 1718 1706 if (initfail || ret) { 1707 + hfi1_clean_up_interrupts(dd); 1719 1708 stop_timers(dd); 1720 1709 flush_workqueue(ib_wq); 1721 1710 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+9
drivers/infiniband/hw/hfi1/iowait.h
··· 371 371 } 372 372 } 373 373 374 + /** 375 + * iowait_packet_queued() - determine if a packet is already built 376 + * @wait: the wait structure 377 + */ 378 + static inline bool iowait_packet_queued(struct iowait *wait) 379 + { 380 + return !list_empty(&wait->tx_head); 381 + } 382 + 374 383 #endif
+117 -10
drivers/infiniband/hw/hfi1/mad.c
··· 2649 2649 } 2650 2650 } 2651 2651 2652 + /** 2653 + * tx_link_width - convert link width bitmask to integer 2654 + * value representing actual link width. 2655 + * @link_width: width of active link 2656 + * @return: return index of the bit set in link_width var 2657 + * 2658 + * The function convert and return the index of bit set 2659 + * that indicate the current link width. 2660 + */ 2661 + u16 tx_link_width(u16 link_width) 2662 + { 2663 + int n = LINK_WIDTH_DEFAULT; 2664 + u16 tx_width = n; 2665 + 2666 + while (link_width && n) { 2667 + if (link_width & (1 << (n - 1))) { 2668 + tx_width = n; 2669 + break; 2670 + } 2671 + n--; 2672 + } 2673 + 2674 + return tx_width; 2675 + } 2676 + 2677 + /** 2678 + * get_xmit_wait_counters - Convert HFI 's SendWaitCnt/SendWaitVlCnt 2679 + * counter in unit of TXE cycle times to flit times. 2680 + * @ppd: info of physical Hfi port 2681 + * @link_width: width of active link 2682 + * @link_speed: speed of active link 2683 + * @vl: represent VL0-VL7, VL15 for PortVLXmitWait counters request 2684 + * and if vl value is C_VL_COUNT, it represent SendWaitCnt 2685 + * counter request 2686 + * @return: return SendWaitCnt/SendWaitVlCnt counter value per vl. 2687 + * 2688 + * Convert SendWaitCnt/SendWaitVlCnt counter from TXE cycle times to 2689 + * flit times. Call this function to samples these counters. This 2690 + * function will calculate for previous state transition and update 2691 + * current state at end of function using ppd->prev_link_width and 2692 + * ppd->port_vl_xmit_wait_last to port_vl_xmit_wait_curr and link_width. 2693 + */ 2694 + u64 get_xmit_wait_counters(struct hfi1_pportdata *ppd, 2695 + u16 link_width, u16 link_speed, int vl) 2696 + { 2697 + u64 port_vl_xmit_wait_curr; 2698 + u64 delta_vl_xmit_wait; 2699 + u64 xmit_wait_val; 2700 + 2701 + if (vl > C_VL_COUNT) 2702 + return 0; 2703 + if (vl < C_VL_COUNT) 2704 + port_vl_xmit_wait_curr = 2705 + read_port_cntr(ppd, C_TX_WAIT_VL, vl); 2706 + else 2707 + port_vl_xmit_wait_curr = 2708 + read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL); 2709 + 2710 + xmit_wait_val = 2711 + port_vl_xmit_wait_curr - 2712 + ppd->port_vl_xmit_wait_last[vl]; 2713 + delta_vl_xmit_wait = 2714 + convert_xmit_counter(xmit_wait_val, 2715 + ppd->prev_link_width, 2716 + link_speed); 2717 + 2718 + ppd->vl_xmit_flit_cnt[vl] += delta_vl_xmit_wait; 2719 + ppd->port_vl_xmit_wait_last[vl] = port_vl_xmit_wait_curr; 2720 + ppd->prev_link_width = link_width; 2721 + 2722 + return ppd->vl_xmit_flit_cnt[vl]; 2723 + } 2724 + 2652 2725 static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, 2653 2726 struct ib_device *ibdev, 2654 2727 u8 port, u32 *resp_len) ··· 2741 2668 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 2742 2669 int vfi; 2743 2670 u64 tmp, tmp2; 2671 + u16 link_width; 2672 + u16 link_speed; 2744 2673 2745 2674 response_data_size = sizeof(struct opa_port_status_rsp) + 2746 2675 num_vls * sizeof(struct _vls_pctrs); ··· 2786 2711 rsp->port_multicast_rcv_pkts = 2787 2712 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS, 2788 2713 CNTR_INVALID_VL)); 2714 + /* 2715 + * Convert PortXmitWait counter from TXE cycle times 2716 + * to flit times. 2717 + */ 2718 + link_width = 2719 + tx_link_width(ppd->link_width_downgrade_tx_active); 2720 + link_speed = get_link_speed(ppd->link_speed_active); 2789 2721 rsp->port_xmit_wait = 2790 - cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL)); 2722 + cpu_to_be64(get_xmit_wait_counters(ppd, link_width, 2723 + link_speed, C_VL_COUNT)); 2791 2724 rsp->port_rcv_fecn = 2792 2725 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL)); 2793 2726 rsp->port_rcv_becn = ··· 2860 2777 rsp->vls[vfi].port_vl_xmit_pkts = 2861 2778 cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL, 2862 2779 idx_from_vl(vl))); 2863 - 2780 + /* 2781 + * Convert PortVlXmitWait counter from TXE cycle 2782 + * times to flit times. 2783 + */ 2864 2784 rsp->vls[vfi].port_vl_xmit_wait = 2865 - cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT_VL, 2866 - idx_from_vl(vl))); 2785 + cpu_to_be64(get_xmit_wait_counters(ppd, link_width, 2786 + link_speed, 2787 + idx_from_vl(vl))); 2867 2788 2868 2789 rsp->vls[vfi].port_vl_rcv_fecn = 2869 2790 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL, ··· 2997 2910 unsigned long vl; 2998 2911 u32 vl_select_mask; 2999 2912 int vfi; 2913 + u16 link_width; 2914 + u16 link_speed; 3000 2915 3001 2916 num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; 3002 2917 num_vls = hweight32(be32_to_cpu(req->vl_select_mask)); ··· 3048 2959 rsp->link_quality_indicator = cpu_to_be32((u32)lq); 3049 2960 pma_get_opa_port_dctrs(ibdev, rsp); 3050 2961 2962 + /* 2963 + * Convert PortXmitWait counter from TXE 2964 + * cycle times to flit times. 2965 + */ 2966 + link_width = 2967 + tx_link_width(ppd->link_width_downgrade_tx_active); 2968 + link_speed = get_link_speed(ppd->link_speed_active); 3051 2969 rsp->port_xmit_wait = 3052 - cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL)); 2970 + cpu_to_be64(get_xmit_wait_counters(ppd, link_width, 2971 + link_speed, C_VL_COUNT)); 3053 2972 rsp->port_rcv_fecn = 3054 2973 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL)); 3055 2974 rsp->port_rcv_becn = ··· 3093 2996 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL, 3094 2997 idx_from_vl(vl))); 3095 2998 2999 + /* 3000 + * Convert PortVlXmitWait counter from TXE 3001 + * cycle times to flit times. 3002 + */ 3096 3003 rsp->vls[vfi].port_vl_xmit_wait = 3097 - cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT_VL, 3098 - idx_from_vl(vl))); 3004 + cpu_to_be64(get_xmit_wait_counters(ppd, link_width, 3005 + link_speed, 3006 + idx_from_vl(vl))); 3099 3007 3100 3008 rsp->vls[vfi].port_vl_rcv_fecn = 3101 3009 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL, ··· 3518 3416 if (counter_select & CS_PORT_MCAST_RCV_PKTS) 3519 3417 write_dev_cntr(dd, C_DC_MC_RCV_PKTS, CNTR_INVALID_VL, 0); 3520 3418 3521 - if (counter_select & CS_PORT_XMIT_WAIT) 3419 + if (counter_select & CS_PORT_XMIT_WAIT) { 3522 3420 write_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL, 0); 3523 - 3421 + ppd->port_vl_xmit_wait_last[C_VL_COUNT] = 0; 3422 + ppd->vl_xmit_flit_cnt[C_VL_COUNT] = 0; 3423 + } 3524 3424 /* ignore cs_sw_portCongestion for HFIs */ 3525 3425 3526 3426 if (counter_select & CS_PORT_RCV_FECN) ··· 3595 3491 if (counter_select & CS_PORT_RCV_PKTS) 3596 3492 write_dev_cntr(dd, C_DC_RX_PKT_VL, idx_from_vl(vl), 0); 3597 3493 3598 - if (counter_select & CS_PORT_XMIT_WAIT) 3494 + if (counter_select & CS_PORT_XMIT_WAIT) { 3599 3495 write_port_cntr(ppd, C_TX_WAIT_VL, idx_from_vl(vl), 0); 3496 + ppd->port_vl_xmit_wait_last[idx_from_vl(vl)] = 0; 3497 + ppd->vl_xmit_flit_cnt[idx_from_vl(vl)] = 0; 3498 + } 3600 3499 3601 3500 /* sw_port_vl_congestion is 0 for HFIs */ 3602 3501 if (counter_select & CS_PORT_RCV_FECN)
+45
drivers/infiniband/hw/hfi1/mad.h
··· 180 180 #define OPA_VLARB_PREEMPT_MATRIX 3 181 181 182 182 #define IB_PMA_PORT_COUNTERS_CONG cpu_to_be16(0xFF00) 183 + #define LINK_SPEED_25G 1 184 + #define LINK_SPEED_12_5G 2 185 + #define LINK_WIDTH_DEFAULT 4 186 + #define DECIMAL_FACTORING 1000 187 + /* 188 + * The default link width is multiplied by 1000 189 + * to get accurate value after division. 190 + */ 191 + #define FACTOR_LINK_WIDTH (LINK_WIDTH_DEFAULT * DECIMAL_FACTORING) 183 192 184 193 struct ib_pma_portcounters_cong { 185 194 u8 reserved; ··· 438 429 439 430 void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port); 440 431 void hfi1_handle_trap_timer(struct timer_list *t); 432 + u16 tx_link_width(u16 link_width); 433 + u64 get_xmit_wait_counters(struct hfi1_pportdata *ppd, u16 link_width, 434 + u16 link_speed, int vl); 435 + /** 436 + * get_link_speed - determine whether 12.5G or 25G speed 437 + * @link_speed: the speed of active link 438 + * @return: Return 2 if link speed identified as 12.5G 439 + * or return 1 if link speed is 25G. 440 + * 441 + * The function indirectly calculate required link speed 442 + * value for convert_xmit_counter function. If the link 443 + * speed is 25G, the function return as 1 as it is required 444 + * by xmit counter conversion formula :-( 25G / link_speed). 445 + * This conversion will provide value 1 if current 446 + * link speed is 25G or 2 if 12.5G.This is done to avoid 447 + * 12.5 float number conversion. 448 + */ 449 + static inline u16 get_link_speed(u16 link_speed) 450 + { 451 + return (link_speed == 1) ? 452 + LINK_SPEED_12_5G : LINK_SPEED_25G; 453 + } 441 454 455 + /** 456 + * convert_xmit_counter - calculate flit times for given xmit counter 457 + * value 458 + * @xmit_wait_val: current xmit counter value 459 + * @link_width: width of active link 460 + * @link_speed: speed of active link 461 + * @return: return xmit counter value in flit times. 462 + */ 463 + static inline u64 convert_xmit_counter(u64 xmit_wait_val, u16 link_width, 464 + u16 link_speed) 465 + { 466 + return (xmit_wait_val * 2 * (FACTOR_LINK_WIDTH / link_width) 467 + * link_speed) / DECIMAL_FACTORING; 468 + } 442 469 #endif /* _HFI1_MAD_H */
+12 -11
drivers/infiniband/hw/hfi1/pcie.c
··· 1034 1034 int do_retry, retry_count = 0; 1035 1035 int intnum = 0; 1036 1036 uint default_pset; 1037 + uint pset = pcie_pset; 1037 1038 u16 target_vector, target_speed; 1038 1039 u16 lnkctl2, vendor; 1039 1040 u8 div; ··· 1202 1201 * 1203 1202 * Set Gen3EqPsetReqVec, leave other fields 0. 1204 1203 */ 1205 - if (pcie_pset == UNSET_PSET) 1206 - pcie_pset = default_pset; 1207 - if (pcie_pset > 10) { /* valid range is 0-10, inclusive */ 1204 + if (pset == UNSET_PSET) 1205 + pset = default_pset; 1206 + if (pset > 10) { /* valid range is 0-10, inclusive */ 1208 1207 dd_dev_err(dd, "%s: Invalid Eq Pset %u, setting to %d\n", 1209 - __func__, pcie_pset, default_pset); 1210 - pcie_pset = default_pset; 1208 + __func__, pset, default_pset); 1209 + pset = default_pset; 1211 1210 } 1212 - dd_dev_info(dd, "%s: using EQ Pset %u\n", __func__, pcie_pset); 1211 + dd_dev_info(dd, "%s: using EQ Pset %u\n", __func__, pset); 1213 1212 pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL106, 1214 - ((1 << pcie_pset) << 1213 + ((1 << pset) << 1215 1214 PCIE_CFG_REG_PL106_GEN3_EQ_PSET_REQ_VEC_SHIFT) | 1216 1215 PCIE_CFG_REG_PL106_GEN3_EQ_EVAL2MS_DISABLE_SMASK | 1217 1216 PCIE_CFG_REG_PL106_GEN3_EQ_PHASE23_EXIT_MODE_SMASK); ··· 1241 1240 /* apply static CTLE tunings */ 1242 1241 u8 pcie_dc, pcie_lf, pcie_hf, pcie_bw; 1243 1242 1244 - pcie_dc = ctle_tunings[pcie_pset][0]; 1245 - pcie_lf = ctle_tunings[pcie_pset][1]; 1246 - pcie_hf = ctle_tunings[pcie_pset][2]; 1247 - pcie_bw = ctle_tunings[pcie_pset][3]; 1243 + pcie_dc = ctle_tunings[pset][0]; 1244 + pcie_lf = ctle_tunings[pset][1]; 1245 + pcie_hf = ctle_tunings[pset][2]; 1246 + pcie_bw = ctle_tunings[pset][3]; 1248 1247 write_gasket_interrupt(dd, intnum++, 0x0026, 0x0200 | pcie_dc); 1249 1248 write_gasket_interrupt(dd, intnum++, 0x0026, 0x0100 | pcie_lf); 1250 1249 write_gasket_interrupt(dd, intnum++, 0x0026, 0x0000 | pcie_hf);
+8 -7
drivers/infiniband/hw/hfi1/pio.c
··· 455 455 dd->hw_to_sw = kmalloc_array(TXE_NUM_CONTEXTS, sizeof(u8), 456 456 GFP_KERNEL); 457 457 dd->send_contexts = kcalloc(dd->num_send_contexts, 458 - sizeof(struct send_context_info), 459 - GFP_KERNEL); 458 + sizeof(struct send_context_info), 459 + GFP_KERNEL); 460 460 if (!dd->send_contexts || !dd->hw_to_sw) { 461 461 kfree(dd->hw_to_sw); 462 462 kfree(dd->send_contexts); ··· 856 856 * so head == tail can mean empty. 857 857 */ 858 858 sc->sr_size = sci->credits + 1; 859 - sc->sr = kzalloc_node(sizeof(union pio_shadow_ring) * 860 - sc->sr_size, GFP_KERNEL, numa); 859 + sc->sr = kcalloc_node(sc->sr_size, 860 + sizeof(union pio_shadow_ring), 861 + GFP_KERNEL, numa); 861 862 if (!sc->sr) { 862 863 sc_free(sc); 863 864 return NULL; ··· 1959 1958 hfi1_init_ctxt(dd->vld[15].sc); 1960 1959 dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048); 1961 1960 1962 - dd->kernel_send_context = kzalloc_node(dd->num_send_contexts * 1963 - sizeof(struct send_context *), 1964 - GFP_KERNEL, dd->node); 1961 + dd->kernel_send_context = kcalloc_node(dd->num_send_contexts, 1962 + sizeof(struct send_context *), 1963 + GFP_KERNEL, dd->node); 1965 1964 if (!dd->kernel_send_context) 1966 1965 goto freesc15; 1967 1966
+1 -3
drivers/infiniband/hw/hfi1/qp.c
··· 565 565 if (qp->s_ack_queue) 566 566 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 567 567 seq_printf(s, 568 - "N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x S(%u %u %u %u %u %u %u) R(%u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d OS %x %x E %x %x %x RNR %d %s %d\n", 568 + "N %d %s QP %x R %u %s %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x S(%u %u %u %u %u %u %u) R(%u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d OS %x %x E %x %x %x RNR %d %s %d\n", 569 569 iter->n, 570 570 qp_idle(qp) ? "I" : "B", 571 571 qp->ibqp.qp_num, ··· 573 573 qp_type_str[qp->ibqp.qp_type], 574 574 qp->state, 575 575 wqe ? wqe->wr.opcode : 0, 576 - qp->s_hdrwords, 577 576 qp->s_flags, 578 577 iowait_sdma_pending(&priv->s_iowait), 579 578 iowait_pio_pending(&priv->s_iowait), ··· 794 795 } 795 796 796 797 if (!(qp->s_flags & RVT_S_BUSY)) { 797 - qp->s_hdrwords = 0; 798 798 if (qp->s_rdma_mr) { 799 799 rvt_put_mr(qp->s_rdma_mr); 800 800 qp->s_rdma_mr = NULL;
+13
drivers/infiniband/hw/hfi1/qp.h
··· 51 51 #include <rdma/rdmavt_qp.h> 52 52 #include "verbs.h" 53 53 #include "sdma.h" 54 + #include "verbs_txreq.h" 54 55 55 56 extern unsigned int hfi1_qp_table_size; 56 57 57 58 extern const struct rvt_operation_params hfi1_post_parms[]; 59 + 60 + /* 61 + * Send if not busy or waiting for I/O and either 62 + * a RC response is pending or we can process send work requests. 63 + */ 64 + static inline int hfi1_send_ok(struct rvt_qp *qp) 65 + { 66 + return !(qp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT_IO)) && 67 + (verbs_txreq_queued(qp) || 68 + (qp->s_flags & RVT_S_RESP_PENDING) || 69 + !(qp->s_flags & RVT_S_ANY_WAIT_SEND)); 70 + } 58 71 59 72 /* 60 73 * free_ahg - clear ahg from QP
+25 -26
drivers/infiniband/hw/hfi1/rc.c
··· 226 226 bth2 = mask_psn(qp->s_ack_psn); 227 227 } 228 228 qp->s_rdma_ack_cnt++; 229 - qp->s_hdrwords = hwords; 230 229 ps->s_txreq->sde = priv->s_sde; 231 230 ps->s_txreq->s_cur_size = len; 231 + ps->s_txreq->hdr_dwords = hwords; 232 232 hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle, ps); 233 - /* pbc */ 234 - ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2; 235 233 return 1; 236 234 237 235 bail: ··· 383 385 : IB_WC_SUCCESS); 384 386 if (local_ops) 385 387 atomic_dec(&qp->local_ops_pending); 386 - qp->s_hdrwords = 0; 387 388 goto done_free_tx; 388 389 } 389 390 ··· 685 688 bth2 |= IB_BTH_REQ_ACK; 686 689 } 687 690 qp->s_len -= len; 688 - qp->s_hdrwords = hwords; 691 + ps->s_txreq->hdr_dwords = hwords; 689 692 ps->s_txreq->sde = priv->s_sde; 690 693 ps->s_txreq->ss = ss; 691 694 ps->s_txreq->s_cur_size = len; ··· 696 699 bth2, 697 700 middle, 698 701 ps); 699 - /* pbc */ 700 - ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2; 701 702 return 1; 702 703 703 704 done_free_tx: ··· 709 714 bail_no_tx: 710 715 ps->s_txreq = NULL; 711 716 qp->s_flags &= ~RVT_S_BUSY; 712 - qp->s_hdrwords = 0; 713 717 return 0; 714 718 } 715 719 ··· 728 734 ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn)); 729 735 } 730 736 731 - static inline void hfi1_queue_rc_ack(struct rvt_qp *qp, bool is_fecn) 737 + static inline void hfi1_queue_rc_ack(struct hfi1_packet *packet, bool is_fecn) 732 738 { 733 - struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 739 + struct rvt_qp *qp = packet->qp; 740 + struct hfi1_ibport *ibp; 734 741 unsigned long flags; 735 742 736 743 spin_lock_irqsave(&qp->s_lock, flags); 737 744 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) 738 745 goto unlock; 746 + ibp = rcd_to_iport(packet->rcd); 739 747 this_cpu_inc(*ibp->rvp.rc_qacks); 740 748 qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING; 741 749 qp->s_nak_state = qp->r_nak_state; ··· 751 755 spin_unlock_irqrestore(&qp->s_lock, flags); 752 756 } 753 757 754 - static inline void hfi1_make_rc_ack_9B(struct rvt_qp *qp, 758 + static inline void hfi1_make_rc_ack_9B(struct hfi1_packet *packet, 755 759 struct hfi1_opa_header *opa_hdr, 756 760 u8 sc5, bool is_fecn, 757 761 u64 *pbc_flags, u32 *hwords, 758 762 u32 *nwords) 759 763 { 760 - struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 764 + struct rvt_qp *qp = packet->qp; 765 + struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); 761 766 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 762 767 struct ib_header *hdr = &opa_hdr->ibh; 763 768 struct ib_other_headers *ohdr; ··· 799 802 hfi1_make_bth_aeth(qp, ohdr, bth0, bth1); 800 803 } 801 804 802 - static inline void hfi1_make_rc_ack_16B(struct rvt_qp *qp, 805 + static inline void hfi1_make_rc_ack_16B(struct hfi1_packet *packet, 803 806 struct hfi1_opa_header *opa_hdr, 804 807 u8 sc5, bool is_fecn, 805 808 u64 *pbc_flags, u32 *hwords, 806 809 u32 *nwords) 807 810 { 808 - struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 811 + struct rvt_qp *qp = packet->qp; 812 + struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); 809 813 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 810 814 struct hfi1_16b_header *hdr = &opa_hdr->opah; 811 815 struct ib_other_headers *ohdr; 812 816 u32 bth0, bth1 = 0; 813 817 u16 len, pkey; 814 - u8 becn = !!is_fecn; 818 + bool becn = is_fecn; 815 819 u8 l4 = OPA_16B_L4_IB_LOCAL; 816 820 u8 extra_bytes; 817 821 ··· 852 854 hfi1_make_bth_aeth(qp, ohdr, bth0, bth1); 853 855 } 854 856 855 - typedef void (*hfi1_make_rc_ack)(struct rvt_qp *qp, 857 + typedef void (*hfi1_make_rc_ack)(struct hfi1_packet *packet, 856 858 struct hfi1_opa_header *opa_hdr, 857 859 u8 sc5, bool is_fecn, 858 860 u64 *pbc_flags, u32 *hwords, ··· 872 874 * Note that RDMA reads and atomics are handled in the 873 875 * send side QP state and send engine. 874 876 */ 875 - void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, 876 - struct rvt_qp *qp, bool is_fecn) 877 + void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn) 877 878 { 879 + struct hfi1_ctxtdata *rcd = packet->rcd; 880 + struct rvt_qp *qp = packet->qp; 878 881 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 879 882 struct hfi1_qp_priv *priv = qp->priv; 880 883 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); ··· 892 893 893 894 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ 894 895 if (qp->s_flags & RVT_S_RESP_PENDING) { 895 - hfi1_queue_rc_ack(qp, is_fecn); 896 + hfi1_queue_rc_ack(packet, is_fecn); 896 897 return; 897 898 } 898 899 899 900 /* Ensure s_rdma_ack_cnt changes are committed */ 900 901 if (qp->s_rdma_ack_cnt) { 901 - hfi1_queue_rc_ack(qp, is_fecn); 902 + hfi1_queue_rc_ack(packet, is_fecn); 902 903 return; 903 904 } 904 905 ··· 907 908 return; 908 909 909 910 /* Make the appropriate header */ 910 - hfi1_make_rc_ack_tbl[priv->hdr_type](qp, &opa_hdr, sc5, is_fecn, 911 + hfi1_make_rc_ack_tbl[priv->hdr_type](packet, &opa_hdr, sc5, is_fecn, 911 912 &pbc_flags, &hwords, &nwords); 912 913 913 914 plen = 2 /* PBC */ + hwords + nwords; ··· 921 922 * so that when enough buffer space becomes available, 922 923 * the ACK is sent ahead of other outgoing packets. 923 924 */ 924 - hfi1_queue_rc_ack(qp, is_fecn); 925 + hfi1_queue_rc_ack(packet, is_fecn); 925 926 return; 926 927 } 927 928 trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device), ··· 1539 1540 void *data = packet->payload; 1540 1541 u32 tlen = packet->tlen; 1541 1542 struct rvt_qp *qp = packet->qp; 1542 - struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 1543 + struct hfi1_ibport *ibp; 1543 1544 struct ib_other_headers *ohdr = packet->ohdr; 1544 1545 struct rvt_swqe *wqe; 1545 1546 enum ib_wc_status status; ··· 1696 1697 goto ack_err; 1697 1698 1698 1699 ack_seq_err: 1700 + ibp = rcd_to_iport(rcd); 1699 1701 rdma_seq_err(qp, ibp, psn, rcd); 1700 1702 goto ack_done; 1701 1703 ··· 2037 2037 struct rvt_qp *qp = packet->qp; 2038 2038 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 2039 2039 struct ib_other_headers *ohdr = packet->ohdr; 2040 - u32 bth0 = be32_to_cpu(ohdr->bth[0]); 2041 2040 u32 opcode = packet->opcode; 2042 2041 u32 hdrsize = packet->hlen; 2043 2042 u32 psn = ib_bth_get_psn(packet->ohdr); ··· 2234 2235 wc.port_num = 0; 2235 2236 /* Signal completion event if the solicited bit is set. */ 2236 2237 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 2237 - (bth0 & IB_BTH_SOLICITED) != 0); 2238 + ib_bth_is_solicited(ohdr)); 2238 2239 break; 2239 2240 2240 2241 case OP(RDMA_WRITE_ONLY): ··· 2478 2479 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; 2479 2480 qp->r_ack_psn = qp->r_psn; 2480 2481 send_ack: 2481 - hfi1_send_rc_ack(rcd, qp, is_fecn); 2482 + hfi1_send_rc_ack(packet, is_fecn); 2482 2483 } 2483 2484 2484 2485 void hfi1_rc_hdrerr(
+17 -30
drivers/infiniband/hw/hfi1/ruc.c
··· 225 225 u32 dlid = packet->dlid; 226 226 u32 slid = packet->slid; 227 227 u32 sl = packet->sl; 228 - int migrated; 229 - u32 bth0, bth1; 230 - u16 pkey; 231 - 232 - bth0 = be32_to_cpu(packet->ohdr->bth[0]); 233 - bth1 = be32_to_cpu(packet->ohdr->bth[1]); 234 - if (packet->etype == RHF_RCV_TYPE_BYPASS) { 235 - pkey = hfi1_16B_get_pkey(packet->hdr); 236 - migrated = bth1 & OPA_BTH_MIG_REQ; 237 - } else { 238 - pkey = ib_bth_get_pkey(packet->ohdr); 239 - migrated = bth0 & IB_BTH_MIG_REQ; 240 - } 228 + bool migrated = packet->migrated; 229 + u16 pkey = packet->pkey; 241 230 242 231 if (qp->s_mig_state == IB_MIG_ARMED && migrated) { 243 232 if (!packet->grh) { ··· 745 756 u32 slid; 746 757 u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index); 747 758 u8 l4 = OPA_16B_L4_IB_LOCAL; 748 - u8 extra_bytes = hfi1_get_16b_padding((qp->s_hdrwords << 2), 749 - ps->s_txreq->s_cur_size); 759 + u8 extra_bytes = hfi1_get_16b_padding( 760 + (ps->s_txreq->hdr_dwords << 2), 761 + ps->s_txreq->s_cur_size); 750 762 u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size + 751 763 extra_bytes + SIZE_OF_LT) >> 2); 752 - u8 becn = 0; 764 + bool becn = false; 753 765 754 766 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) && 755 767 hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) { 756 768 struct ib_grh *grh; 757 769 struct ib_global_route *grd = 758 770 rdma_ah_retrieve_grh(&qp->remote_ah_attr); 759 - int hdrwords; 760 - 761 771 /* 762 772 * Ensure OPA GIDs are transformed to IB gids 763 773 * before creating the GRH. ··· 765 777 grd->sgid_index = 0; 766 778 grh = &ps->s_txreq->phdr.hdr.opah.u.l.grh; 767 779 l4 = OPA_16B_L4_IB_GLOBAL; 768 - hdrwords = qp->s_hdrwords - 4; 769 - qp->s_hdrwords += hfi1_make_grh(ibp, grh, grd, 770 - hdrwords, nwords); 780 + ps->s_txreq->hdr_dwords += 781 + hfi1_make_grh(ibp, grh, grd, 782 + ps->s_txreq->hdr_dwords - LRH_16B_DWORDS, 783 + nwords); 771 784 middle = 0; 772 785 } 773 786 ··· 787 798 if (qp->s_flags & RVT_S_ECN) { 788 799 qp->s_flags &= ~RVT_S_ECN; 789 800 /* we recently received a FECN, so return a BECN */ 790 - becn = 1; 801 + becn = true; 791 802 } 792 803 hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2); 793 804 ··· 802 813 slid, 803 814 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 804 815 16B), 805 - (qp->s_hdrwords + nwords) >> 1, 816 + (ps->s_txreq->hdr_dwords + nwords) >> 1, 806 817 pkey, becn, 0, l4, priv->s_sc); 807 818 } 808 819 ··· 822 833 823 834 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) { 824 835 struct ib_grh *grh = &ps->s_txreq->phdr.hdr.ibh.u.l.grh; 825 - int hdrwords = qp->s_hdrwords - 2; 826 836 827 837 lrh0 = HFI1_LRH_GRH; 828 - qp->s_hdrwords += 838 + ps->s_txreq->hdr_dwords += 829 839 hfi1_make_grh(ibp, grh, 830 840 rdma_ah_read_grh(&qp->remote_ah_attr), 831 - hdrwords, nwords); 841 + ps->s_txreq->hdr_dwords - LRH_9B_DWORDS, 842 + nwords); 832 843 middle = 0; 833 844 } 834 845 lrh0 |= (priv->s_sc & 0xf) << 12 | ··· 854 865 hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2); 855 866 hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh, 856 867 lrh0, 857 - qp->s_hdrwords + nwords, 868 + ps->s_txreq->hdr_dwords + nwords, 858 869 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B), 859 870 ppd_from_ibp(ibp)->lid | 860 871 rdma_ah_get_path_bits(&qp->remote_ah_attr)); ··· 1019 1030 ps.s_txreq = get_waiting_verbs_txreq(qp); 1020 1031 do { 1021 1032 /* Check for a constructed packet to be sent. */ 1022 - if (qp->s_hdrwords != 0) { 1033 + if (ps.s_txreq) { 1023 1034 spin_unlock_irqrestore(&qp->s_lock, ps.flags); 1024 1035 /* 1025 1036 * If the packet cannot be sent now, return and ··· 1027 1038 */ 1028 1039 if (hfi1_verbs_send(qp, &ps)) 1029 1040 return; 1030 - /* Record that s_ahg is empty. */ 1031 - qp->s_hdrwords = 0; 1032 1041 /* allow other tasks to run */ 1033 1042 if (schedule_send_yield(qp, &ps)) 1034 1043 return;
+9 -7
drivers/infiniband/hw/hfi1/sdma.c
··· 1275 1275 return -ENOMEM; 1276 1276 } 1277 1277 1278 - /* 1279 - * Clean up allocated memory. 1278 + /** 1279 + * sdma_clean() Clean up allocated memory 1280 + * @dd: struct hfi1_devdata 1281 + * @num_engines: num sdma engines 1280 1282 * 1281 - * This routine is can be called regardless of the success of sdma_init() 1282 - * 1283 + * This routine can be called regardless of the success of 1284 + * sdma_init() 1283 1285 */ 1284 - static void sdma_clean(struct hfi1_devdata *dd, size_t num_engines) 1286 + void sdma_clean(struct hfi1_devdata *dd, size_t num_engines) 1285 1287 { 1286 1288 size_t i; 1287 1289 struct sdma_engine *sde; ··· 1388 1386 num_engines, descq_cnt); 1389 1387 1390 1388 /* alloc memory for array of send engines */ 1391 - dd->per_sdma = kcalloc(num_engines, sizeof(*dd->per_sdma), GFP_KERNEL); 1389 + dd->per_sdma = kcalloc_node(num_engines, sizeof(*dd->per_sdma), 1390 + GFP_KERNEL, dd->node); 1392 1391 if (!dd->per_sdma) 1393 1392 return ret; 1394 1393 ··· 1620 1617 */ 1621 1618 sdma_finalput(&sde->state); 1622 1619 } 1623 - sdma_clean(dd, dd->num_sdma); 1624 1620 } 1625 1621 1626 1622 /*
+1
drivers/infiniband/hw/hfi1/sdma.h
··· 420 420 int sdma_init(struct hfi1_devdata *dd, u8 port); 421 421 void sdma_start(struct hfi1_devdata *dd); 422 422 void sdma_exit(struct hfi1_devdata *dd); 423 + void sdma_clean(struct hfi1_devdata *dd, size_t num_engines); 423 424 void sdma_all_running(struct hfi1_devdata *dd); 424 425 void sdma_all_idle(struct hfi1_devdata *dd); 425 426 void sdma_freeze_notify(struct hfi1_devdata *dd, int go_idle);
+4 -4
drivers/infiniband/hw/hfi1/trace.c
··· 138 138 } 139 139 140 140 void hfi1_trace_parse_9b_bth(struct ib_other_headers *ohdr, 141 - u8 *ack, u8 *becn, u8 *fecn, u8 *mig, 141 + u8 *ack, bool *becn, bool *fecn, u8 *mig, 142 142 u8 *se, u8 *pad, u8 *opcode, u8 *tver, 143 143 u16 *pkey, u32 *psn, u32 *qpn) 144 144 { ··· 184 184 } 185 185 186 186 void hfi1_trace_parse_16b_hdr(struct hfi1_16b_header *hdr, 187 - u8 *age, u8 *becn, u8 *fecn, 187 + u8 *age, bool *becn, bool *fecn, 188 188 u8 *l4, u8 *rc, u8 *sc, 189 189 u16 *entropy, u16 *len, u16 *pkey, 190 190 u32 *dlid, u32 *slid) ··· 207 207 #define LRH_16B_PRN "age:%d becn:%d fecn:%d l4:%d " \ 208 208 "rc:%d sc:%d pkey:0x%.4x entropy:0x%.4x" 209 209 const char *hfi1_trace_fmt_lrh(struct trace_seq *p, bool bypass, 210 - u8 age, u8 becn, u8 fecn, u8 l4, 210 + u8 age, bool becn, bool fecn, u8 l4, 211 211 u8 lnh, const char *lnh_name, u8 lver, 212 212 u8 rc, u8 sc, u8 sl, u16 entropy, 213 213 u16 len, u16 pkey, u32 dlid, u32 slid) ··· 235 235 "op:0x%.2x,%s se:%d m:%d pad:%d tver:%d " \ 236 236 "qpn:0x%.6x a:%d psn:0x%.8x" 237 237 const char *hfi1_trace_fmt_bth(struct trace_seq *p, bool bypass, 238 - u8 ack, u8 becn, u8 fecn, u8 mig, 238 + u8 ack, bool becn, bool fecn, u8 mig, 239 239 u8 se, u8 pad, u8 opcode, const char *opname, 240 240 u8 tver, u16 pkey, u32 psn, u32 qpn) 241 241 {
+8 -8
drivers/infiniband/hw/hfi1/trace_ibhdrs.h
··· 101 101 u8 hfi1_trace_packet_hdr_len(struct hfi1_packet *packet); 102 102 const char *hfi1_trace_get_packet_l4_str(u8 l4); 103 103 void hfi1_trace_parse_9b_bth(struct ib_other_headers *ohdr, 104 - u8 *ack, u8 *becn, u8 *fecn, u8 *mig, 104 + u8 *ack, bool *becn, bool *fecn, u8 *mig, 105 105 u8 *se, u8 *pad, u8 *opcode, u8 *tver, 106 106 u16 *pkey, u32 *psn, u32 *qpn); 107 107 void hfi1_trace_parse_9b_hdr(struct ib_header *hdr, bool sc5, ··· 112 112 u8 *pad, u8 *se, u8 *tver, 113 113 u32 *psn, u32 *qpn); 114 114 void hfi1_trace_parse_16b_hdr(struct hfi1_16b_header *hdr, 115 - u8 *age, u8 *becn, u8 *fecn, 115 + u8 *age, bool *becn, bool *fecn, 116 116 u8 *l4, u8 *rc, u8 *sc, 117 117 u16 *entropy, u16 *len, u16 *pkey, 118 118 u32 *dlid, u32 *slid); 119 119 120 120 const char *hfi1_trace_fmt_lrh(struct trace_seq *p, bool bypass, 121 - u8 age, u8 becn, u8 fecn, u8 l4, 121 + u8 age, bool becn, bool fecn, u8 l4, 122 122 u8 lnh, const char *lnh_name, u8 lver, 123 123 u8 rc, u8 sc, u8 sl, u16 entropy, 124 124 u16 len, u16 pkey, u32 dlid, u32 slid); 125 125 126 126 const char *hfi1_trace_fmt_bth(struct trace_seq *p, bool bypass, 127 - u8 ack, u8 becn, u8 fecn, u8 mig, 127 + u8 ack, bool becn, bool fecn, u8 mig, 128 128 u8 se, u8 pad, u8 opcode, const char *opname, 129 129 u8 tver, u16 pkey, u32 psn, u32 qpn); 130 130 ··· 148 148 __field(u8, etype) 149 149 __field(u8, ack) 150 150 __field(u8, age) 151 - __field(u8, becn) 152 - __field(u8, fecn) 151 + __field(bool, becn) 152 + __field(bool, fecn) 153 153 __field(u8, l2) 154 154 __field(u8, l4) 155 155 __field(u8, lnh) ··· 290 290 __field(u8, hdr_type) 291 291 __field(u8, ack) 292 292 __field(u8, age) 293 - __field(u8, becn) 294 - __field(u8, fecn) 293 + __field(bool, becn) 294 + __field(bool, fecn) 295 295 __field(u8, l4) 296 296 __field(u8, lnh) 297 297 __field(u8, lver)
+11 -19
drivers/infiniband/hw/hfi1/trace_rx.h
··· 63 63 #define TRACE_SYSTEM hfi1_rx 64 64 65 65 TRACE_EVENT(hfi1_rcvhdr, 66 - TP_PROTO(struct hfi1_devdata *dd, 67 - u32 ctxt, 68 - u64 eflags, 69 - u32 etype, 70 - u32 hlen, 71 - u32 tlen, 72 - u32 updegr, 73 - u32 etail 74 - ), 75 - TP_ARGS(dd, ctxt, eflags, etype, hlen, tlen, updegr, etail), 76 - TP_STRUCT__entry(DD_DEV_ENTRY(dd) 66 + TP_PROTO(struct hfi1_packet *packet), 67 + TP_ARGS(packet), 68 + TP_STRUCT__entry(DD_DEV_ENTRY(packet->rcd->dd) 77 69 __field(u64, eflags) 78 70 __field(u32, ctxt) 79 71 __field(u32, etype) ··· 74 82 __field(u32, updegr) 75 83 __field(u32, etail) 76 84 ), 77 - TP_fast_assign(DD_DEV_ASSIGN(dd); 78 - __entry->eflags = eflags; 79 - __entry->ctxt = ctxt; 80 - __entry->etype = etype; 81 - __entry->hlen = hlen; 82 - __entry->tlen = tlen; 83 - __entry->updegr = updegr; 84 - __entry->etail = etail; 85 + TP_fast_assign(DD_DEV_ASSIGN(packet->rcd->dd); 86 + __entry->eflags = rhf_err_flags(packet->rhf); 87 + __entry->ctxt = packet->rcd->ctxt; 88 + __entry->etype = packet->etype; 89 + __entry->hlen = packet->hlen; 90 + __entry->tlen = packet->tlen; 91 + __entry->updegr = packet->updegr; 92 + __entry->etail = rhf_egr_index(packet->rhf); 85 93 ), 86 94 TP_printk( 87 95 "[%s] ctxt %d eflags 0x%llx etype %d,%s hlen %d tlen %d updegr %d etail %d",
+2 -7
drivers/infiniband/hw/hfi1/uc.c
··· 144 144 : IB_WC_SUCCESS); 145 145 if (local_ops) 146 146 atomic_dec(&qp->local_ops_pending); 147 - qp->s_hdrwords = 0; 148 147 goto done_free_tx; 149 148 } 150 149 /* ··· 266 267 break; 267 268 } 268 269 qp->s_len -= len; 269 - qp->s_hdrwords = hwords; 270 + ps->s_txreq->hdr_dwords = hwords; 270 271 ps->s_txreq->sde = priv->s_sde; 271 272 ps->s_txreq->ss = &qp->s_sge; 272 273 ps->s_txreq->s_cur_size = len; 273 274 hfi1_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), 274 275 mask_psn(qp->s_psn++), middle, ps); 275 - /* pbc */ 276 - ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2; 277 276 return 1; 278 277 279 278 done_free_tx: ··· 285 288 bail_no_tx: 286 289 ps->s_txreq = NULL; 287 290 qp->s_flags &= ~RVT_S_BUSY; 288 - qp->s_hdrwords = 0; 289 291 return 0; 290 292 } 291 293 ··· 476 480 wc.port_num = 0; 477 481 /* Signal completion event if the solicited bit is set. */ 478 482 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 479 - (ohdr->bth[0] & 480 - cpu_to_be32(IB_BTH_SOLICITED)) != 0); 483 + ib_bth_is_solicited(ohdr)); 481 484 break; 482 485 483 486 case OP(RDMA_WRITE_FIRST):
+20 -19
drivers/infiniband/hw/hfi1/ud.c
··· 340 340 extra_bytes = -wqe->length & 3; 341 341 nwords = ((wqe->length + extra_bytes) >> 2) + SIZE_OF_CRC; 342 342 /* header size in dwords LRH+BTH+DETH = (8+12+8)/4. */ 343 - qp->s_hdrwords = 7; 343 + ps->s_txreq->hdr_dwords = 7; 344 344 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) 345 - qp->s_hdrwords++; 345 + ps->s_txreq->hdr_dwords++; 346 346 347 347 if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) { 348 348 grh = &ps->s_txreq->phdr.hdr.ibh.u.l.grh; 349 - qp->s_hdrwords += hfi1_make_grh(ibp, grh, 350 - rdma_ah_read_grh(ah_attr), 351 - qp->s_hdrwords - 2, nwords); 349 + ps->s_txreq->hdr_dwords += 350 + hfi1_make_grh(ibp, grh, rdma_ah_read_grh(ah_attr), 351 + ps->s_txreq->hdr_dwords - LRH_9B_DWORDS, 352 + nwords); 352 353 lrh0 = HFI1_LRH_GRH; 353 354 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth; 354 355 } else { ··· 382 381 } 383 382 } 384 383 hfi1_make_bth_deth(qp, wqe, ohdr, &pkey, extra_bytes, false); 385 - len = qp->s_hdrwords + nwords; 384 + len = ps->s_txreq->hdr_dwords + nwords; 386 385 387 386 /* Setup the packet */ 388 387 ps->s_txreq->phdr.hdr.hdr_type = HFI1_PKT_TYPE_9B; ··· 406 405 ppd = ppd_from_ibp(ibp); 407 406 ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr; 408 407 /* header size in dwords 16B LRH+BTH+DETH = (16+12+8)/4. */ 409 - qp->s_hdrwords = 9; 408 + ps->s_txreq->hdr_dwords = 9; 410 409 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) 411 - qp->s_hdrwords++; 410 + ps->s_txreq->hdr_dwords++; 412 411 413 412 /* SW provides space for CRC and LT for bypass packets. */ 414 - extra_bytes = hfi1_get_16b_padding((qp->s_hdrwords << 2), 413 + extra_bytes = hfi1_get_16b_padding((ps->s_txreq->hdr_dwords << 2), 415 414 wqe->length); 416 415 nwords = ((wqe->length + extra_bytes + SIZE_OF_LT) >> 2) + SIZE_OF_CRC; 417 416 ··· 429 428 grd->sgid_index = 0; 430 429 } 431 430 grh = &ps->s_txreq->phdr.hdr.opah.u.l.grh; 432 - qp->s_hdrwords += hfi1_make_grh(ibp, grh, grd, 433 - qp->s_hdrwords - 4, nwords); 431 + ps->s_txreq->hdr_dwords += hfi1_make_grh( 432 + ibp, grh, grd, 433 + ps->s_txreq->hdr_dwords - LRH_16B_DWORDS, 434 + nwords); 434 435 ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth; 435 436 l4 = OPA_16B_L4_IB_GLOBAL; 436 437 } else { ··· 455 452 456 453 hfi1_make_bth_deth(qp, wqe, ohdr, &pkey, extra_bytes, true); 457 454 /* Convert dwords to flits */ 458 - len = (qp->s_hdrwords + nwords) >> 1; 455 + len = (ps->s_txreq->hdr_dwords + nwords) >> 1; 459 456 460 457 /* Setup the packet */ 461 458 ps->s_txreq->phdr.hdr.hdr_type = HFI1_PKT_TYPE_16B; ··· 565 562 priv->s_ahg->ahgcount = 0; 566 563 priv->s_ahg->ahgidx = 0; 567 564 priv->s_ahg->tx_flags = 0; 568 - /* pbc */ 569 - ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2; 570 565 571 566 return 1; 572 567 ··· 579 578 bail_no_tx: 580 579 ps->s_txreq = NULL; 581 580 qp->s_flags &= ~RVT_S_BUSY; 582 - qp->s_hdrwords = 0; 583 581 return 0; 584 582 } 585 583 ··· 649 649 struct ib_grh *grh = &hdr.u.l.grh; 650 650 651 651 grh->version_tclass_flow = old_grh->version_tclass_flow; 652 - grh->paylen = cpu_to_be16((hwords - 4 + nwords) << 2); 652 + grh->paylen = cpu_to_be16( 653 + (hwords - LRH_16B_DWORDS + nwords) << 2); 653 654 grh->hop_limit = 0xff; 654 655 grh->sgid = old_grh->dgid; 655 656 grh->dgid = old_grh->sgid; ··· 704 703 struct ib_grh *grh = &hdr.u.l.grh; 705 704 706 705 grh->version_tclass_flow = old_grh->version_tclass_flow; 707 - grh->paylen = cpu_to_be16((hwords - 2 + SIZE_OF_CRC) << 2); 706 + grh->paylen = cpu_to_be16( 707 + (hwords - LRH_9B_DWORDS + SIZE_OF_CRC) << 2); 708 708 grh->hop_limit = 0xff; 709 709 grh->sgid = old_grh->dgid; 710 710 grh->dgid = old_grh->sgid; ··· 1048 1046 wc.port_num = qp->port_num; 1049 1047 /* Signal completion event if the solicited bit is set. */ 1050 1048 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1051 - (ohdr->bth[0] & 1052 - cpu_to_be32(IB_BTH_SOLICITED)) != 0); 1049 + ib_bth_is_solicited(ohdr)); 1053 1050 return; 1054 1051 1055 1052 drop:
+5 -5
drivers/infiniband/hw/hfi1/verbs.c
··· 835 835 { 836 836 int ret = 0; 837 837 struct hfi1_sdma_header *phdr = &tx->phdr; 838 - u16 hdrbytes = tx->hdr_dwords << 2; 838 + u16 hdrbytes = (tx->hdr_dwords + sizeof(pbc) / 4) << 2; 839 839 u8 extra_bytes = 0; 840 840 841 841 if (tx->phdr.hdr.hdr_type) { ··· 901 901 { 902 902 struct hfi1_qp_priv *priv = qp->priv; 903 903 struct hfi1_ahg_info *ahg_info = priv->s_ahg; 904 - u32 hdrwords = qp->s_hdrwords; 904 + u32 hdrwords = ps->s_txreq->hdr_dwords; 905 905 u32 len = ps->s_txreq->s_cur_size; 906 906 u32 plen; 907 907 struct hfi1_ibdev *dev = ps->dev; ··· 919 919 } else { 920 920 dwords = (len + 3) >> 2; 921 921 } 922 - plen = hdrwords + dwords + 2; 922 + plen = hdrwords + dwords + sizeof(pbc) / 4; 923 923 924 924 tx = ps->s_txreq; 925 925 if (!sdma_txreq_built(&tx->txreq)) { ··· 1038 1038 u64 pbc) 1039 1039 { 1040 1040 struct hfi1_qp_priv *priv = qp->priv; 1041 - u32 hdrwords = qp->s_hdrwords; 1041 + u32 hdrwords = ps->s_txreq->hdr_dwords; 1042 1042 struct rvt_sge_state *ss = ps->s_txreq->ss; 1043 1043 u32 len = ps->s_txreq->s_cur_size; 1044 1044 u32 dwords; ··· 1064 1064 dwords = (len + 3) >> 2; 1065 1065 hdr = (u32 *)&ps->s_txreq->phdr.hdr.ibh; 1066 1066 } 1067 - plen = hdrwords + dwords + 2; 1067 + plen = hdrwords + dwords + sizeof(pbc) / 4; 1068 1068 1069 1069 /* only RC/UC use complete */ 1070 1070 switch (qp->ibqp.qp_type) {
+11 -13
drivers/infiniband/hw/hfi1/verbs.h
··· 105 105 HFI1_HAS_GRH = (1 << 0), 106 106 }; 107 107 108 + #define LRH_16B_BYTES (FIELD_SIZEOF(struct hfi1_16b_header, lrh)) 109 + #define LRH_16B_DWORDS (LRH_16B_BYTES / sizeof(u32)) 110 + #define LRH_9B_BYTES (FIELD_SIZEOF(struct ib_header, lrh)) 111 + #define LRH_9B_DWORDS (LRH_9B_BYTES / sizeof(u32)) 112 + 108 113 struct hfi1_16b_header { 109 114 u32 lrh[4]; 110 115 union { ··· 251 246 } 252 247 253 248 /* 254 - * Send if not busy or waiting for I/O and either 255 - * a RC response is pending or we can process send work requests. 256 - */ 257 - static inline int hfi1_send_ok(struct rvt_qp *qp) 258 - { 259 - return !(qp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT_IO)) && 260 - (qp->s_hdrwords || (qp->s_flags & RVT_S_RESP_PENDING) || 261 - !(qp->s_flags & RVT_S_ANY_WAIT_SEND)); 262 - } 263 - 264 - /* 265 249 * This must be called with s_lock held. 266 250 */ 267 251 void hfi1_bad_pkey(struct hfi1_ibport *ibp, u32 key, u32 sl, ··· 363 369 void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, 364 370 enum ib_wc_status status); 365 371 366 - void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp, 367 - bool is_fecn); 372 + void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn); 368 373 369 374 int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps); 370 375 ··· 407 414 * is not invoked. 408 415 */ 409 416 __copy_user_nocache(dst, (void __user *)src, n, 0); 417 + } 418 + 419 + static inline bool opa_bth_is_migration(struct ib_other_headers *ohdr) 420 + { 421 + return ohdr->bth[1] & cpu_to_be32(OPA_BTH_MIG_REQ); 410 422 } 411 423 412 424 extern const enum ib_wc_opcode ib_hfi1_wc_opcode[];
+7
drivers/infiniband/hw/hfi1/verbs_txreq.h
··· 113 113 return NULL; 114 114 } 115 115 116 + static inline bool verbs_txreq_queued(struct rvt_qp *qp) 117 + { 118 + struct hfi1_qp_priv *priv = qp->priv; 119 + 120 + return iowait_packet_queued(&priv->s_iowait); 121 + } 122 + 116 123 void hfi1_put_txreq(struct verbs_txreq *tx); 117 124 int verbs_txreq_init(struct hfi1_ibdev *dev); 118 125 void verbs_txreq_exit(struct hfi1_ibdev *dev);
+3 -3
drivers/infiniband/hw/hns/hns_roce_common.h
··· 43 43 __raw_writel((__force u32)cpu_to_le32(value), (addr)) 44 44 45 45 #define roce_get_field(origin, mask, shift) \ 46 - (((origin) & (mask)) >> (shift)) 46 + (((le32_to_cpu(origin)) & (mask)) >> (shift)) 47 47 48 48 #define roce_get_bit(origin, shift) \ 49 49 roce_get_field((origin), (1ul << (shift)), (shift)) 50 50 51 51 #define roce_set_field(origin, mask, shift, val) \ 52 52 do { \ 53 - (origin) &= (~(mask)); \ 54 - (origin) |= (((u32)(val) << (shift)) & (mask)); \ 53 + (origin) &= ~cpu_to_le32(mask); \ 54 + (origin) |= cpu_to_le32(((u32)(val) << (shift)) & (mask)); \ 55 55 } while (0) 56 56 57 57 #define roce_set_bit(origin, shift, val) \
+5 -5
drivers/infiniband/hw/hns/hns_roce_device.h
··· 345 345 struct hns_roce_cq_buf hr_buf; 346 346 spinlock_t lock; 347 347 struct ib_umem *umem; 348 - void (*comp)(struct hns_roce_cq *); 349 - void (*event)(struct hns_roce_cq *, enum hns_roce_event); 348 + void (*comp)(struct hns_roce_cq *cq); 349 + void (*event)(struct hns_roce_cq *cq, enum hns_roce_event event_type); 350 350 351 351 struct hns_roce_uar *uar; 352 352 u32 cq_depth; ··· 466 466 struct ib_qp ibqp; 467 467 struct hns_roce_buf hr_buf; 468 468 struct hns_roce_wq rq; 469 - __le64 doorbell_qpn; 469 + u32 doorbell_qpn; 470 470 __le32 sq_signal_bits; 471 471 u32 sq_next_wqe; 472 472 int sq_max_wqes_per_wr; ··· 486 486 u32 atomic_rd_en; 487 487 u32 pkey_index; 488 488 u32 qkey; 489 - void (*event)(struct hns_roce_qp *, 490 - enum hns_roce_event); 489 + void (*event)(struct hns_roce_qp *qp, 490 + enum hns_roce_event event_type); 491 491 unsigned long qpn; 492 492 493 493 atomic_t refcount;
+41 -17
drivers/infiniband/hw/hns/hns_roce_hw_v1.c
··· 195 195 196 196 memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN); 197 197 198 - ud_sq_wqe->va0_l = (u32)wr->sg_list[0].addr; 199 - ud_sq_wqe->va0_h = (wr->sg_list[0].addr) >> 32; 200 - ud_sq_wqe->l_key0 = wr->sg_list[0].lkey; 198 + ud_sq_wqe->va0_l = 199 + cpu_to_le32((u32)wr->sg_list[0].addr); 200 + ud_sq_wqe->va0_h = 201 + cpu_to_le32((wr->sg_list[0].addr) >> 32); 202 + ud_sq_wqe->l_key0 = 203 + cpu_to_le32(wr->sg_list[0].lkey); 201 204 202 - ud_sq_wqe->va1_l = (u32)wr->sg_list[1].addr; 203 - ud_sq_wqe->va1_h = (wr->sg_list[1].addr) >> 32; 204 - ud_sq_wqe->l_key1 = wr->sg_list[1].lkey; 205 + ud_sq_wqe->va1_l = 206 + cpu_to_le32((u32)wr->sg_list[1].addr); 207 + ud_sq_wqe->va1_h = 208 + cpu_to_le32((wr->sg_list[1].addr) >> 32); 209 + ud_sq_wqe->l_key1 = 210 + cpu_to_le32(wr->sg_list[1].lkey); 205 211 ind++; 206 212 } else if (ibqp->qp_type == IB_QPT_RC) { 213 + u32 tmp_len = 0; 214 + 207 215 ctrl = wqe; 208 216 memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg)); 209 217 for (i = 0; i < wr->num_sge; i++) 210 - ctrl->msg_length += wr->sg_list[i].length; 218 + tmp_len += wr->sg_list[i].length; 219 + 220 + ctrl->msg_length = 221 + cpu_to_le32(le32_to_cpu(ctrl->msg_length) + tmp_len); 211 222 212 223 ctrl->sgl_pa_h = 0; 213 224 ctrl->flag = 0; 214 - ctrl->imm_data = send_ieth(wr); 225 + 226 + switch (wr->opcode) { 227 + case IB_WR_SEND_WITH_IMM: 228 + case IB_WR_RDMA_WRITE_WITH_IMM: 229 + ctrl->imm_data = wr->ex.imm_data; 230 + break; 231 + case IB_WR_SEND_WITH_INV: 232 + ctrl->inv_key = 233 + cpu_to_le32(wr->ex.invalidate_rkey); 234 + break; 235 + default: 236 + ctrl->imm_data = 0; 237 + break; 238 + } 215 239 216 240 /*Ctrl field, ctrl set type: sig, solic, imm, fence */ 217 241 /* SO wait for conforming application scenarios */ ··· 282 258 283 259 dseg = wqe; 284 260 if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) { 285 - if (ctrl->msg_length > 286 - hr_dev->caps.max_sq_inline) { 261 + if (le32_to_cpu(ctrl->msg_length) > 262 + hr_dev->caps.max_sq_inline) { 287 263 ret = -EINVAL; 288 264 *bad_wr = wr; 289 265 dev_err(dev, "inline len(1-%d)=%d, illegal", ··· 297 273 wr->sg_list[i].length); 298 274 wqe += wr->sg_list[i].length; 299 275 } 300 - ctrl->flag |= HNS_ROCE_WQE_INLINE; 276 + ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE); 301 277 } else { 302 278 /*sqe num is two */ 303 279 for (i = 0; i < wr->num_sge; i++) ··· 330 306 SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn); 331 307 roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1); 332 308 333 - doorbell[0] = sq_db.u32_4; 334 - doorbell[1] = sq_db.u32_8; 309 + doorbell[0] = le32_to_cpu(sq_db.u32_4); 310 + doorbell[1] = le32_to_cpu(sq_db.u32_8); 335 311 336 312 hns_roce_write64_k(doorbell, qp->sq.db_reg_l); 337 313 qp->sq_next_wqe = ind; ··· 427 403 roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S, 428 404 1); 429 405 430 - doorbell[0] = rq_db.u32_4; 431 - doorbell[1] = rq_db.u32_8; 406 + doorbell[0] = le32_to_cpu(rq_db.u32_4); 407 + doorbell[1] = le32_to_cpu(rq_db.u32_8); 432 408 433 409 hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l); 434 410 } ··· 2285 2261 CQE_BYTE_4_WQE_INDEX_M, 2286 2262 CQE_BYTE_4_WQE_INDEX_S)& 2287 2263 ((*cur_qp)->sq.wqe_cnt-1)); 2288 - switch (sq_wqe->flag & HNS_ROCE_WQE_OPCODE_MASK) { 2264 + switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) { 2289 2265 case HNS_ROCE_WQE_OPCODE_SEND: 2290 2266 wc->opcode = IB_WC_SEND; 2291 2267 break; ··· 2306 2282 wc->status = IB_WC_GENERAL_ERR; 2307 2283 break; 2308 2284 } 2309 - wc->wc_flags = (sq_wqe->flag & HNS_ROCE_WQE_IMM ? 2285 + wc->wc_flags = (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_IMM ? 2310 2286 IB_WC_WITH_IMM : 0); 2311 2287 2312 2288 wq = &(*cur_qp)->sq;
+130 -128
drivers/infiniband/hw/hns/hns_roce_hw_v1.h
··· 200 200 #define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M GENMASK(4, 0) 201 201 202 202 struct hns_roce_cq_context { 203 - u32 cqc_byte_4; 204 - u32 cq_bt_l; 205 - u32 cqc_byte_12; 206 - u32 cur_cqe_ba0_l; 207 - u32 cqc_byte_20; 208 - u32 cqe_tptr_addr_l; 209 - u32 cur_cqe_ba1_l; 210 - u32 cqc_byte_32; 203 + __le32 cqc_byte_4; 204 + __le32 cq_bt_l; 205 + __le32 cqc_byte_12; 206 + __le32 cur_cqe_ba0_l; 207 + __le32 cqc_byte_20; 208 + __le32 cqe_tptr_addr_l; 209 + __le32 cur_cqe_ba1_l; 210 + __le32 cqc_byte_32; 211 211 }; 212 212 213 213 #define CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S 0 ··· 257 257 (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S) 258 258 259 259 struct hns_roce_cqe { 260 - u32 cqe_byte_4; 260 + __le32 cqe_byte_4; 261 261 union { 262 - u32 r_key; 263 - u32 immediate_data; 262 + __le32 r_key; 263 + __be32 immediate_data; 264 264 }; 265 - u32 byte_cnt; 266 - u32 cqe_byte_16; 267 - u32 cqe_byte_20; 268 - u32 s_mac_l; 269 - u32 cqe_byte_28; 270 - u32 reserved; 265 + __le32 byte_cnt; 266 + __le32 cqe_byte_16; 267 + __le32 cqe_byte_20; 268 + __le32 s_mac_l; 269 + __le32 cqe_byte_28; 270 + __le32 reserved; 271 271 }; 272 272 273 273 #define CQE_BYTE_4_OWNER_S 7 ··· 308 308 #define CQ_DB_REQ_NOT (1 << 16) 309 309 310 310 struct hns_roce_v1_mpt_entry { 311 - u32 mpt_byte_4; 312 - u32 pbl_addr_l; 313 - u32 mpt_byte_12; 314 - u32 virt_addr_l; 315 - u32 virt_addr_h; 316 - u32 length; 317 - u32 mpt_byte_28; 318 - u32 pa0_l; 319 - u32 mpt_byte_36; 320 - u32 mpt_byte_40; 321 - u32 mpt_byte_44; 322 - u32 mpt_byte_48; 323 - u32 pa4_l; 324 - u32 mpt_byte_56; 325 - u32 mpt_byte_60; 326 - u32 mpt_byte_64; 311 + __le32 mpt_byte_4; 312 + __le32 pbl_addr_l; 313 + __le32 mpt_byte_12; 314 + __le32 virt_addr_l; 315 + __le32 virt_addr_h; 316 + __le32 length; 317 + __le32 mpt_byte_28; 318 + __le32 pa0_l; 319 + __le32 mpt_byte_36; 320 + __le32 mpt_byte_40; 321 + __le32 mpt_byte_44; 322 + __le32 mpt_byte_48; 323 + __le32 pa4_l; 324 + __le32 mpt_byte_56; 325 + __le32 mpt_byte_60; 326 + __le32 mpt_byte_64; 327 327 }; 328 328 329 329 #define MPT_BYTE_4_KEY_STATE_S 0 ··· 408 408 (((1UL << 8) - 1) << MPT_BYTE_64_L_KEY_IDX_H_S) 409 409 410 410 struct hns_roce_wqe_ctrl_seg { 411 - __be32 sgl_pa_h; 412 - __be32 flag; 413 - __be32 imm_data; 414 - __be32 msg_length; 411 + __le32 sgl_pa_h; 412 + __le32 flag; 413 + union { 414 + __be32 imm_data; 415 + __le32 inv_key; 416 + }; 417 + __le32 msg_length; 415 418 }; 416 419 417 420 struct hns_roce_wqe_data_seg { 418 - __be64 addr; 419 - __be32 lkey; 420 - __be32 len; 421 + __le64 addr; 422 + __le32 lkey; 423 + __le32 len; 421 424 }; 422 425 423 426 struct hns_roce_wqe_raddr_seg { 424 - __be32 rkey; 425 - __be32 len;/* reserved */ 426 - __be64 raddr; 427 + __le32 rkey; 428 + __le32 len;/* reserved */ 429 + __le64 raddr; 427 430 }; 428 431 429 432 struct hns_roce_rq_wqe_ctrl { 430 - 431 - u32 rwqe_byte_4; 432 - u32 rocee_sgl_ba_l; 433 - u32 rwqe_byte_12; 434 - u32 reserved[5]; 433 + __le32 rwqe_byte_4; 434 + __le32 rocee_sgl_ba_l; 435 + __le32 rwqe_byte_12; 436 + __le32 reserved[5]; 435 437 }; 436 438 437 439 #define RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S 16 ··· 445 443 #define GID_LEN 16 446 444 447 445 struct hns_roce_ud_send_wqe { 448 - u32 dmac_h; 449 - u32 u32_8; 450 - u32 immediate_data; 446 + __le32 dmac_h; 447 + __le32 u32_8; 448 + __le32 immediate_data; 451 449 452 - u32 u32_16; 450 + __le32 u32_16; 453 451 union { 454 452 unsigned char dgid[GID_LEN]; 455 453 struct { 456 - u32 u32_20; 457 - u32 u32_24; 458 - u32 u32_28; 459 - u32 u32_32; 454 + __le32 u32_20; 455 + __le32 u32_24; 456 + __le32 u32_28; 457 + __le32 u32_32; 460 458 }; 461 459 }; 462 460 463 - u32 u32_36; 464 - u32 u32_40; 461 + __le32 u32_36; 462 + __le32 u32_40; 465 463 466 - u32 va0_l; 467 - u32 va0_h; 468 - u32 l_key0; 464 + __le32 va0_l; 465 + __le32 va0_h; 466 + __le32 l_key0; 469 467 470 - u32 va1_l; 471 - u32 va1_h; 472 - u32 l_key1; 468 + __le32 va1_l; 469 + __le32 va1_h; 470 + __le32 l_key1; 473 471 }; 474 472 475 473 #define UD_SEND_WQE_U32_4_DMAC_0_S 0 ··· 537 535 (((1UL << 8) - 1) << UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S) 538 536 539 537 struct hns_roce_sqp_context { 540 - u32 qp1c_bytes_4; 541 - u32 sq_rq_bt_l; 542 - u32 qp1c_bytes_12; 543 - u32 qp1c_bytes_16; 544 - u32 qp1c_bytes_20; 545 - u32 cur_rq_wqe_ba_l; 546 - u32 qp1c_bytes_28; 547 - u32 qp1c_bytes_32; 548 - u32 cur_sq_wqe_ba_l; 549 - u32 qp1c_bytes_40; 538 + __le32 qp1c_bytes_4; 539 + __le32 sq_rq_bt_l; 540 + __le32 qp1c_bytes_12; 541 + __le32 qp1c_bytes_16; 542 + __le32 qp1c_bytes_20; 543 + __le32 cur_rq_wqe_ba_l; 544 + __le32 qp1c_bytes_28; 545 + __le32 qp1c_bytes_32; 546 + __le32 cur_sq_wqe_ba_l; 547 + __le32 qp1c_bytes_40; 550 548 }; 551 549 552 550 #define QP1C_BYTES_4_QP_STATE_S 0 ··· 628 626 #define HNS_ROCE_WQE_OPCODE_MASK (15<<16) 629 627 630 628 struct hns_roce_qp_context { 631 - u32 qpc_bytes_4; 632 - u32 qpc_bytes_8; 633 - u32 qpc_bytes_12; 634 - u32 qpc_bytes_16; 635 - u32 sq_rq_bt_l; 636 - u32 qpc_bytes_24; 637 - u32 irrl_ba_l; 638 - u32 qpc_bytes_32; 639 - u32 qpc_bytes_36; 640 - u32 dmac_l; 641 - u32 qpc_bytes_44; 642 - u32 qpc_bytes_48; 643 - u8 dgid[16]; 644 - u32 qpc_bytes_68; 645 - u32 cur_rq_wqe_ba_l; 646 - u32 qpc_bytes_76; 647 - u32 rx_rnr_time; 648 - u32 qpc_bytes_84; 649 - u32 qpc_bytes_88; 629 + __le32 qpc_bytes_4; 630 + __le32 qpc_bytes_8; 631 + __le32 qpc_bytes_12; 632 + __le32 qpc_bytes_16; 633 + __le32 sq_rq_bt_l; 634 + __le32 qpc_bytes_24; 635 + __le32 irrl_ba_l; 636 + __le32 qpc_bytes_32; 637 + __le32 qpc_bytes_36; 638 + __le32 dmac_l; 639 + __le32 qpc_bytes_44; 640 + __le32 qpc_bytes_48; 641 + u8 dgid[16]; 642 + __le32 qpc_bytes_68; 643 + __le32 cur_rq_wqe_ba_l; 644 + __le32 qpc_bytes_76; 645 + __le32 rx_rnr_time; 646 + __le32 qpc_bytes_84; 647 + __le32 qpc_bytes_88; 650 648 union { 651 - u32 rx_sge_len; 652 - u32 dma_length; 649 + __le32 rx_sge_len; 650 + __le32 dma_length; 653 651 }; 654 652 union { 655 - u32 rx_sge_num; 656 - u32 rx_send_pktn; 657 - u32 r_key; 653 + __le32 rx_sge_num; 654 + __le32 rx_send_pktn; 655 + __le32 r_key; 658 656 }; 659 - u32 va_l; 660 - u32 va_h; 661 - u32 qpc_bytes_108; 662 - u32 qpc_bytes_112; 663 - u32 rx_cur_sq_wqe_ba_l; 664 - u32 qpc_bytes_120; 665 - u32 qpc_bytes_124; 666 - u32 qpc_bytes_128; 667 - u32 qpc_bytes_132; 668 - u32 qpc_bytes_136; 669 - u32 qpc_bytes_140; 670 - u32 qpc_bytes_144; 671 - u32 qpc_bytes_148; 657 + __le32 va_l; 658 + __le32 va_h; 659 + __le32 qpc_bytes_108; 660 + __le32 qpc_bytes_112; 661 + __le32 rx_cur_sq_wqe_ba_l; 662 + __le32 qpc_bytes_120; 663 + __le32 qpc_bytes_124; 664 + __le32 qpc_bytes_128; 665 + __le32 qpc_bytes_132; 666 + __le32 qpc_bytes_136; 667 + __le32 qpc_bytes_140; 668 + __le32 qpc_bytes_144; 669 + __le32 qpc_bytes_148; 672 670 union { 673 - u32 rnr_retry; 674 - u32 ack_time; 671 + __le32 rnr_retry; 672 + __le32 ack_time; 675 673 }; 676 - u32 qpc_bytes_156; 677 - u32 pkt_use_len; 678 - u32 qpc_bytes_164; 679 - u32 qpc_bytes_168; 674 + __le32 qpc_bytes_156; 675 + __le32 pkt_use_len; 676 + __le32 qpc_bytes_164; 677 + __le32 qpc_bytes_168; 680 678 union { 681 - u32 sge_use_len; 682 - u32 pa_use_len; 679 + __le32 sge_use_len; 680 + __le32 pa_use_len; 683 681 }; 684 - u32 qpc_bytes_176; 685 - u32 qpc_bytes_180; 686 - u32 tx_cur_sq_wqe_ba_l; 687 - u32 qpc_bytes_188; 688 - u32 rvd21; 682 + __le32 qpc_bytes_176; 683 + __le32 qpc_bytes_180; 684 + __le32 tx_cur_sq_wqe_ba_l; 685 + __le32 qpc_bytes_188; 686 + __le32 rvd21; 689 687 }; 690 688 691 689 #define QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S 0 ··· 998 996 #define HCR_GO_BIT 15 999 997 1000 998 struct hns_roce_rq_db { 1001 - u32 u32_4; 1002 - u32 u32_8; 999 + __le32 u32_4; 1000 + __le32 u32_8; 1003 1001 }; 1004 1002 1005 1003 #define RQ_DOORBELL_U32_4_RQ_HEAD_S 0 ··· 1015 1013 #define RQ_DOORBELL_U32_8_HW_SYNC_S 31 1016 1014 1017 1015 struct hns_roce_sq_db { 1018 - u32 u32_4; 1019 - u32 u32_8; 1016 + __le32 u32_4; 1017 + __le32 u32_8; 1020 1018 }; 1021 1019 1022 1020 #define SQ_DOORBELL_U32_4_SQ_HEAD_S 0
+43 -15
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
··· 63 63 int i; 64 64 65 65 if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) { 66 - if (rc_sq_wqe->msg_len > hr_dev->caps.max_sq_inline) { 66 + if (le32_to_cpu(rc_sq_wqe->msg_len) > 67 + hr_dev->caps.max_sq_inline) { 67 68 *bad_wr = wr; 68 69 dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal", 69 70 rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline); ··· 137 136 unsigned long flags; 138 137 unsigned int ind; 139 138 void *wqe = NULL; 139 + u32 tmp_len = 0; 140 140 bool loopback; 141 141 int ret = 0; 142 142 u8 *smac; ··· 220 218 HNS_ROCE_V2_WQE_OP_SEND); 221 219 222 220 for (i = 0; i < wr->num_sge; i++) 223 - ud_sq_wqe->msg_len += wr->sg_list[i].length; 221 + tmp_len += wr->sg_list[i].length; 224 222 225 - ud_sq_wqe->immtdata = send_ieth(wr); 223 + ud_sq_wqe->msg_len = 224 + cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len); 225 + 226 + switch (wr->opcode) { 227 + case IB_WR_SEND_WITH_IMM: 228 + case IB_WR_RDMA_WRITE_WITH_IMM: 229 + ud_sq_wqe->immtdata = wr->ex.imm_data; 230 + break; 231 + default: 232 + ud_sq_wqe->immtdata = 0; 233 + break; 234 + } 226 235 227 236 /* Set sig attr */ 228 237 roce_set_bit(ud_sq_wqe->byte_4, ··· 267 254 V2_UD_SEND_WQE_BYTE_24_UDPSPN_M, 268 255 V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0); 269 256 ud_sq_wqe->qkey = 270 - cpu_to_be32(ud_wr(wr)->remote_qkey & 0x80000000) ? 271 - qp->qkey : ud_wr(wr)->remote_qkey; 257 + cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ? 258 + qp->qkey : ud_wr(wr)->remote_qkey); 272 259 roce_set_field(ud_sq_wqe->byte_32, 273 260 V2_UD_SEND_WQE_BYTE_32_DQPN_M, 274 261 V2_UD_SEND_WQE_BYTE_32_DQPN_S, ··· 277 264 roce_set_field(ud_sq_wqe->byte_36, 278 265 V2_UD_SEND_WQE_BYTE_36_VLAN_M, 279 266 V2_UD_SEND_WQE_BYTE_36_VLAN_S, 280 - ah->av.vlan); 267 + le16_to_cpu(ah->av.vlan)); 281 268 roce_set_field(ud_sq_wqe->byte_36, 282 269 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M, 283 270 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S, ··· 296 283 roce_set_field(ud_sq_wqe->byte_40, 297 284 V2_UD_SEND_WQE_BYTE_40_SL_M, 298 285 V2_UD_SEND_WQE_BYTE_40_SL_S, 299 - ah->av.sl_tclass_flowlabel >> 300 - HNS_ROCE_SL_SHIFT); 286 + le32_to_cpu(ah->av.sl_tclass_flowlabel) >> 287 + HNS_ROCE_SL_SHIFT); 301 288 roce_set_field(ud_sq_wqe->byte_40, 302 289 V2_UD_SEND_WQE_BYTE_40_PORTN_M, 303 290 V2_UD_SEND_WQE_BYTE_40_PORTN_S, ··· 324 311 rc_sq_wqe = wqe; 325 312 memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe)); 326 313 for (i = 0; i < wr->num_sge; i++) 327 - rc_sq_wqe->msg_len += wr->sg_list[i].length; 314 + tmp_len += wr->sg_list[i].length; 328 315 329 - rc_sq_wqe->inv_key_immtdata = send_ieth(wr); 316 + rc_sq_wqe->msg_len = 317 + cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len); 318 + 319 + switch (wr->opcode) { 320 + case IB_WR_SEND_WITH_IMM: 321 + case IB_WR_RDMA_WRITE_WITH_IMM: 322 + rc_sq_wqe->immtdata = wr->ex.imm_data; 323 + break; 324 + case IB_WR_SEND_WITH_INV: 325 + rc_sq_wqe->inv_key = 326 + cpu_to_le32(wr->ex.invalidate_rkey); 327 + break; 328 + default: 329 + rc_sq_wqe->immtdata = 0; 330 + break; 331 + } 330 332 331 333 roce_set_bit(rc_sq_wqe->byte_4, 332 334 V2_RC_SEND_WQE_BYTE_4_FENCE_S, ··· 479 451 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M, 480 452 V2_DB_PARAMETER_SL_S, qp->sl); 481 453 482 - hns_roce_write64_k((__be32 *)&sq_db, qp->sq.db_reg_l); 454 + hns_roce_write64_k((__le32 *)&sq_db, qp->sq.db_reg_l); 483 455 484 456 qp->sq_next_wqe = ind; 485 457 qp->next_sge = sge_ind; ··· 541 513 } 542 514 543 515 if (i < hr_qp->rq.max_gs) { 544 - dseg[i].lkey = cpu_to_be32(HNS_ROCE_INVALID_LKEY); 516 + dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY); 545 517 dseg[i].addr = 0; 546 518 } 547 519 ··· 574 546 roce_set_field(rq_db.parameter, V2_DB_PARAMETER_CONS_IDX_M, 575 547 V2_DB_PARAMETER_CONS_IDX_S, hr_qp->rq.head); 576 548 577 - hns_roce_write64_k((__be32 *)&rq_db, hr_qp->rq.db_reg_l); 549 + hns_roce_write64_k((__le32 *)&rq_db, hr_qp->rq.db_reg_l); 578 550 } 579 551 spin_unlock_irqrestore(&hr_qp->rq.lock, flags); 580 552 ··· 2151 2123 u8 dest_rd_atomic; 2152 2124 u32 access_flags; 2153 2125 2154 - dest_rd_atomic = !!(attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ? 2126 + dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ? 2155 2127 attr->max_dest_rd_atomic : hr_qp->resp_depth; 2156 2128 2157 - access_flags = !!(attr_mask & IB_QP_ACCESS_FLAGS) ? 2129 + access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ? 2158 2130 attr->qp_access_flags : hr_qp->atomic_rd_en; 2159 2131 2160 2132 if (!dest_rd_atomic)
+143 -140
drivers/infiniband/hw/hns/hns_roce_hw_v2.h
··· 224 224 }; 225 225 226 226 struct hns_roce_v2_cq_context { 227 - u32 byte_4_pg_ceqn; 228 - u32 byte_8_cqn; 229 - u32 cqe_cur_blk_addr; 230 - u32 byte_16_hop_addr; 231 - u32 cqe_nxt_blk_addr; 232 - u32 byte_24_pgsz_addr; 233 - u32 byte_28_cq_pi; 234 - u32 byte_32_cq_ci; 235 - u32 cqe_ba; 236 - u32 byte_40_cqe_ba; 237 - u32 byte_44_db_record; 238 - u32 db_record_addr; 239 - u32 byte_52_cqe_cnt; 240 - u32 byte_56_cqe_period_maxcnt; 241 - u32 cqe_report_timer; 242 - u32 byte_64_se_cqe_idx; 227 + __le32 byte_4_pg_ceqn; 228 + __le32 byte_8_cqn; 229 + __le32 cqe_cur_blk_addr; 230 + __le32 byte_16_hop_addr; 231 + __le32 cqe_nxt_blk_addr; 232 + __le32 byte_24_pgsz_addr; 233 + __le32 byte_28_cq_pi; 234 + __le32 byte_32_cq_ci; 235 + __le32 cqe_ba; 236 + __le32 byte_40_cqe_ba; 237 + __le32 byte_44_db_record; 238 + __le32 db_record_addr; 239 + __le32 byte_52_cqe_cnt; 240 + __le32 byte_56_cqe_period_maxcnt; 241 + __le32 cqe_report_timer; 242 + __le32 byte_64_se_cqe_idx; 243 243 }; 244 244 #define HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM 0x0 245 245 #define HNS_ROCE_V2_CQ_DEFAULT_INTERVAL 0x0 ··· 328 328 }; 329 329 330 330 struct hns_roce_v2_qp_context { 331 - u32 byte_4_sqpn_tst; 332 - u32 wqe_sge_ba; 333 - u32 byte_12_sq_hop; 334 - u32 byte_16_buf_ba_pg_sz; 335 - u32 byte_20_smac_sgid_idx; 336 - u32 byte_24_mtu_tc; 337 - u32 byte_28_at_fl; 331 + __le32 byte_4_sqpn_tst; 332 + __le32 wqe_sge_ba; 333 + __le32 byte_12_sq_hop; 334 + __le32 byte_16_buf_ba_pg_sz; 335 + __le32 byte_20_smac_sgid_idx; 336 + __le32 byte_24_mtu_tc; 337 + __le32 byte_28_at_fl; 338 338 u8 dgid[GID_LEN_V2]; 339 - u32 dmac; 340 - u32 byte_52_udpspn_dmac; 341 - u32 byte_56_dqpn_err; 342 - u32 byte_60_qpst_mapid; 343 - u32 qkey_xrcd; 344 - u32 byte_68_rq_db; 345 - u32 rq_db_record_addr; 346 - u32 byte_76_srqn_op_en; 347 - u32 byte_80_rnr_rx_cqn; 348 - u32 byte_84_rq_ci_pi; 349 - u32 rq_cur_blk_addr; 350 - u32 byte_92_srq_info; 351 - u32 byte_96_rx_reqmsn; 352 - u32 rq_nxt_blk_addr; 353 - u32 byte_104_rq_sge; 354 - u32 byte_108_rx_reqepsn; 355 - u32 rq_rnr_timer; 356 - u32 rx_msg_len; 357 - u32 rx_rkey_pkt_info; 358 - u64 rx_va; 359 - u32 byte_132_trrl; 360 - u32 trrl_ba; 361 - u32 byte_140_raq; 362 - u32 byte_144_raq; 363 - u32 byte_148_raq; 364 - u32 byte_152_raq; 365 - u32 byte_156_raq; 366 - u32 byte_160_sq_ci_pi; 367 - u32 sq_cur_blk_addr; 368 - u32 byte_168_irrl_idx; 369 - u32 byte_172_sq_psn; 370 - u32 byte_176_msg_pktn; 371 - u32 sq_cur_sge_blk_addr; 372 - u32 byte_184_irrl_idx; 373 - u32 cur_sge_offset; 374 - u32 byte_192_ext_sge; 375 - u32 byte_196_sq_psn; 376 - u32 byte_200_sq_max; 377 - u32 irrl_ba; 378 - u32 byte_208_irrl; 379 - u32 byte_212_lsn; 380 - u32 sq_timer; 381 - u32 byte_220_retry_psn_msn; 382 - u32 byte_224_retry_msg; 383 - u32 rx_sq_cur_blk_addr; 384 - u32 byte_232_irrl_sge; 385 - u32 irrl_cur_sge_offset; 386 - u32 byte_240_irrl_tail; 387 - u32 byte_244_rnr_rxack; 388 - u32 byte_248_ack_psn; 389 - u32 byte_252_err_txcqn; 390 - u32 byte_256_sqflush_rqcqe; 339 + __le32 dmac; 340 + __le32 byte_52_udpspn_dmac; 341 + __le32 byte_56_dqpn_err; 342 + __le32 byte_60_qpst_mapid; 343 + __le32 qkey_xrcd; 344 + __le32 byte_68_rq_db; 345 + __le32 rq_db_record_addr; 346 + __le32 byte_76_srqn_op_en; 347 + __le32 byte_80_rnr_rx_cqn; 348 + __le32 byte_84_rq_ci_pi; 349 + __le32 rq_cur_blk_addr; 350 + __le32 byte_92_srq_info; 351 + __le32 byte_96_rx_reqmsn; 352 + __le32 rq_nxt_blk_addr; 353 + __le32 byte_104_rq_sge; 354 + __le32 byte_108_rx_reqepsn; 355 + __le32 rq_rnr_timer; 356 + __le32 rx_msg_len; 357 + __le32 rx_rkey_pkt_info; 358 + __le64 rx_va; 359 + __le32 byte_132_trrl; 360 + __le32 trrl_ba; 361 + __le32 byte_140_raq; 362 + __le32 byte_144_raq; 363 + __le32 byte_148_raq; 364 + __le32 byte_152_raq; 365 + __le32 byte_156_raq; 366 + __le32 byte_160_sq_ci_pi; 367 + __le32 sq_cur_blk_addr; 368 + __le32 byte_168_irrl_idx; 369 + __le32 byte_172_sq_psn; 370 + __le32 byte_176_msg_pktn; 371 + __le32 sq_cur_sge_blk_addr; 372 + __le32 byte_184_irrl_idx; 373 + __le32 cur_sge_offset; 374 + __le32 byte_192_ext_sge; 375 + __le32 byte_196_sq_psn; 376 + __le32 byte_200_sq_max; 377 + __le32 irrl_ba; 378 + __le32 byte_208_irrl; 379 + __le32 byte_212_lsn; 380 + __le32 sq_timer; 381 + __le32 byte_220_retry_psn_msn; 382 + __le32 byte_224_retry_msg; 383 + __le32 rx_sq_cur_blk_addr; 384 + __le32 byte_232_irrl_sge; 385 + __le32 irrl_cur_sge_offset; 386 + __le32 byte_240_irrl_tail; 387 + __le32 byte_244_rnr_rxack; 388 + __le32 byte_248_ack_psn; 389 + __le32 byte_252_err_txcqn; 390 + __le32 byte_256_sqflush_rqcqe; 391 391 }; 392 392 393 393 #define V2_QPC_BYTE_4_TST_S 0 ··· 761 761 #define V2_QPC_BYTE_256_SQ_FLUSH_IDX_M GENMASK(31, 16) 762 762 763 763 struct hns_roce_v2_cqe { 764 - u32 byte_4; 764 + __le32 byte_4; 765 765 union { 766 766 __le32 rkey; 767 767 __be32 immtdata; 768 768 }; 769 - u32 byte_12; 770 - u32 byte_16; 771 - u32 byte_cnt; 769 + __le32 byte_12; 770 + __le32 byte_16; 771 + __le32 byte_cnt; 772 772 u8 smac[4]; 773 - u32 byte_28; 774 - u32 byte_32; 773 + __le32 byte_28; 774 + __le32 byte_32; 775 775 }; 776 776 777 777 #define V2_CQE_BYTE_4_OPCODE_S 0 ··· 901 901 #define V2_DB_PARAMETER_SL_M GENMASK(18, 16) 902 902 903 903 struct hns_roce_v2_cq_db { 904 - u32 byte_4; 905 - u32 parameter; 904 + __le32 byte_4; 905 + __le32 parameter; 906 906 }; 907 907 908 908 #define V2_CQ_DB_BYTE_4_TAG_S 0 ··· 920 920 #define V2_CQ_DB_PARAMETER_NOTIFY_S 24 921 921 922 922 struct hns_roce_v2_ud_send_wqe { 923 - u32 byte_4; 924 - u32 msg_len; 925 - u32 immtdata; 926 - u32 byte_16; 927 - u32 byte_20; 928 - u32 byte_24; 929 - u32 qkey; 930 - u32 byte_32; 931 - u32 byte_36; 932 - u32 byte_40; 933 - u32 dmac; 934 - u32 byte_48; 923 + __le32 byte_4; 924 + __le32 msg_len; 925 + __be32 immtdata; 926 + __le32 byte_16; 927 + __le32 byte_20; 928 + __le32 byte_24; 929 + __le32 qkey; 930 + __le32 byte_32; 931 + __le32 byte_36; 932 + __le32 byte_40; 933 + __le32 dmac; 934 + __le32 byte_48; 935 935 u8 dgid[GID_LEN_V2]; 936 936 937 937 }; ··· 1004 1004 #define V2_UD_SEND_WQE_BYTE_48_SMAC_INDX_M GENMASK(31, 24) 1005 1005 1006 1006 struct hns_roce_v2_rc_send_wqe { 1007 - u32 byte_4; 1008 - u32 msg_len; 1009 - u32 inv_key_immtdata; 1010 - u32 byte_16; 1011 - u32 byte_20; 1012 - u32 rkey; 1013 - u64 va; 1007 + __le32 byte_4; 1008 + __le32 msg_len; 1009 + union { 1010 + __le32 inv_key; 1011 + __be32 immtdata; 1012 + }; 1013 + __le32 byte_16; 1014 + __le32 byte_20; 1015 + __le32 rkey; 1016 + __le64 va; 1014 1017 }; 1015 1018 1016 1019 #define V2_RC_SEND_WQE_BYTE_4_OPCODE_S 0 ··· 1041 1038 #define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0) 1042 1039 1043 1040 struct hns_roce_v2_wqe_data_seg { 1044 - __be32 len; 1045 - __be32 lkey; 1046 - __be64 addr; 1041 + __le32 len; 1042 + __le32 lkey; 1043 + __le64 addr; 1047 1044 }; 1048 1045 1049 1046 struct hns_roce_v2_db { 1050 - u32 byte_4; 1051 - u32 parameter; 1047 + __le32 byte_4; 1048 + __le32 parameter; 1052 1049 }; 1053 1050 1054 1051 struct hns_roce_query_version { ··· 1108 1105 #define PF_RES_DATA_5_PF_EQC_BT_NUM_M GENMASK(25, 16) 1109 1106 1110 1107 struct hns_roce_vf_res_a { 1111 - u32 vf_id; 1112 - u32 vf_qpc_bt_idx_num; 1113 - u32 vf_srqc_bt_idx_num; 1114 - u32 vf_cqc_bt_idx_num; 1115 - u32 vf_mpt_bt_idx_num; 1116 - u32 vf_eqc_bt_idx_num; 1108 + __le32 vf_id; 1109 + __le32 vf_qpc_bt_idx_num; 1110 + __le32 vf_srqc_bt_idx_num; 1111 + __le32 vf_cqc_bt_idx_num; 1112 + __le32 vf_mpt_bt_idx_num; 1113 + __le32 vf_eqc_bt_idx_num; 1117 1114 }; 1118 1115 1119 1116 #define VF_RES_A_DATA_1_VF_QPC_BT_IDX_S 0 ··· 1147 1144 #define VF_RES_A_DATA_5_VF_EQC_NUM_M GENMASK(25, 16) 1148 1145 1149 1146 struct hns_roce_vf_res_b { 1150 - u32 rsv0; 1151 - u32 vf_smac_idx_num; 1152 - u32 vf_sgid_idx_num; 1153 - u32 vf_qid_idx_sl_num; 1154 - u32 rsv[2]; 1147 + __le32 rsv0; 1148 + __le32 vf_smac_idx_num; 1149 + __le32 vf_sgid_idx_num; 1150 + __le32 vf_qid_idx_sl_num; 1151 + __le32 rsv[2]; 1155 1152 }; 1156 1153 1157 1154 #define VF_RES_B_DATA_0_VF_ID_S 0 ··· 1183 1180 #define ROCEE_VF_SGID_CFG4_SGID_TYPE_M GENMASK(1, 0) 1184 1181 1185 1182 struct hns_roce_cfg_bt_attr { 1186 - u32 vf_qpc_cfg; 1187 - u32 vf_srqc_cfg; 1188 - u32 vf_cqc_cfg; 1189 - u32 vf_mpt_cfg; 1190 - u32 rsv[2]; 1183 + __le32 vf_qpc_cfg; 1184 + __le32 vf_srqc_cfg; 1185 + __le32 vf_cqc_cfg; 1186 + __le32 vf_mpt_cfg; 1187 + __le32 rsv[2]; 1191 1188 }; 1192 1189 1193 1190 #define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S 0 ··· 1227 1224 #define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M GENMASK(9, 8) 1228 1225 1229 1226 struct hns_roce_cmq_desc { 1230 - u16 opcode; 1231 - u16 flag; 1232 - u16 retval; 1233 - u16 rsv; 1234 - u32 data[6]; 1227 + __le16 opcode; 1228 + __le16 flag; 1229 + __le16 retval; 1230 + __le16 rsv; 1231 + __le32 data[6]; 1235 1232 }; 1236 1233 1237 1234 #define HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS 10000 ··· 1277 1274 }; 1278 1275 1279 1276 struct hns_roce_eq_context { 1280 - u32 byte_4; 1281 - u32 byte_8; 1282 - u32 byte_12; 1283 - u32 eqe_report_timer; 1284 - u32 eqe_ba0; 1285 - u32 eqe_ba1; 1286 - u32 byte_28; 1287 - u32 byte_32; 1288 - u32 byte_36; 1289 - u32 nxt_eqe_ba0; 1290 - u32 nxt_eqe_ba1; 1291 - u32 rsv[5]; 1277 + __le32 byte_4; 1278 + __le32 byte_8; 1279 + __le32 byte_12; 1280 + __le32 eqe_report_timer; 1281 + __le32 eqe_ba0; 1282 + __le32 eqe_ba1; 1283 + __le32 byte_28; 1284 + __le32 byte_32; 1285 + __le32 byte_36; 1286 + __le32 nxt_eqe_ba0; 1287 + __le32 nxt_eqe_ba1; 1288 + __le32 rsv[5]; 1292 1289 }; 1293 1290 1294 1291 #define HNS_ROCE_AEQ_DEFAULT_BURST_NUM 0x0
+1 -2
drivers/infiniband/hw/hns/hns_roce_main.c
··· 200 200 201 201 memset(props, 0, sizeof(*props)); 202 202 203 - props->sys_image_guid = hr_dev->sys_image_guid; 203 + props->sys_image_guid = cpu_to_be32(hr_dev->sys_image_guid); 204 204 props->max_mr_size = (u64)(~(0ULL)); 205 205 props->page_size_cap = hr_dev->caps.page_size_cap; 206 206 props->vendor_id = hr_dev->vendor_id; ··· 636 636 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table); 637 637 638 638 err_unmap_mtt: 639 - hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table); 640 639 if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) 641 640 hns_roce_cleanup_hem_table(hr_dev, 642 641 &hr_dev->mr_table.mtt_cqe_table);
+2 -16
drivers/infiniband/hw/hns/hns_roce_qp.c
··· 512 512 hr_qp->ibqp.qp_type = init_attr->qp_type; 513 513 514 514 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 515 - hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR; 515 + hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_ALL_WR); 516 516 else 517 - hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR; 517 + hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR); 518 518 519 519 ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject, 520 520 !!init_attr->srq, hr_qp); ··· 936 936 } 937 937 } 938 938 EXPORT_SYMBOL_GPL(hns_roce_unlock_cqs); 939 - 940 - __be32 send_ieth(struct ib_send_wr *wr) 941 - { 942 - switch (wr->opcode) { 943 - case IB_WR_SEND_WITH_IMM: 944 - case IB_WR_RDMA_WRITE_WITH_IMM: 945 - return cpu_to_le32(wr->ex.imm_data); 946 - case IB_WR_SEND_WITH_INV: 947 - return cpu_to_le32(wr->ex.invalidate_rkey); 948 - default: 949 - return 0; 950 - } 951 - } 952 - EXPORT_SYMBOL_GPL(send_ieth); 953 939 954 940 static void *get_wqe(struct hns_roce_qp *hr_qp, int offset) 955 941 {
+1 -2
drivers/infiniband/hw/qib/qib_rc.c
··· 1913 1913 wc.port_num = 0; 1914 1914 /* Signal completion event if the solicited bit is set. */ 1915 1915 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1916 - (ohdr->bth[0] & 1917 - cpu_to_be32(IB_BTH_SOLICITED)) != 0); 1916 + ib_bth_is_solicited(ohdr)); 1918 1917 break; 1919 1918 1920 1919 case OP(RDMA_WRITE_FIRST):
+1 -2
drivers/infiniband/hw/qib/qib_uc.c
··· 401 401 wc.port_num = 0; 402 402 /* Signal completion event if the solicited bit is set. */ 403 403 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 404 - (ohdr->bth[0] & 405 - cpu_to_be32(IB_BTH_SOLICITED)) != 0); 404 + ib_bth_is_solicited(ohdr)); 406 405 break; 407 406 408 407 case OP(RDMA_WRITE_FIRST):
+1 -2
drivers/infiniband/hw/qib/qib_ud.c
··· 579 579 wc.port_num = qp->port_num; 580 580 /* Signal completion event if the solicited bit is set. */ 581 581 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 582 - (ohdr->bth[0] & 583 - cpu_to_be32(IB_BTH_SOLICITED)) != 0); 582 + ib_bth_is_solicited(ohdr)); 584 583 return; 585 584 586 585 drop:
+4 -10
drivers/infiniband/sw/rxe/rxe_av.c
··· 52 52 return 0; 53 53 } 54 54 55 - int rxe_av_from_attr(struct rxe_dev *rxe, u8 port_num, 56 - struct rxe_av *av, struct rdma_ah_attr *attr) 55 + void rxe_av_from_attr(u8 port_num, struct rxe_av *av, 56 + struct rdma_ah_attr *attr) 57 57 { 58 58 memset(av, 0, sizeof(*av)); 59 59 memcpy(&av->grh, rdma_ah_read_grh(attr), 60 60 sizeof(*rdma_ah_read_grh(attr))); 61 61 av->port_num = port_num; 62 - return 0; 63 62 } 64 63 65 - int rxe_av_to_attr(struct rxe_dev *rxe, struct rxe_av *av, 66 - struct rdma_ah_attr *attr) 64 + void rxe_av_to_attr(struct rxe_av *av, struct rdma_ah_attr *attr) 67 65 { 68 66 attr->type = RDMA_AH_ATTR_TYPE_ROCE; 69 67 memcpy(rdma_ah_retrieve_grh(attr), &av->grh, sizeof(av->grh)); 70 68 rdma_ah_set_ah_flags(attr, IB_AH_GRH); 71 69 rdma_ah_set_port_num(attr, av->port_num); 72 - return 0; 73 70 } 74 71 75 - int rxe_av_fill_ip_info(struct rxe_dev *rxe, 76 - struct rxe_av *av, 72 + void rxe_av_fill_ip_info(struct rxe_av *av, 77 73 struct rdma_ah_attr *attr, 78 74 struct ib_gid_attr *sgid_attr, 79 75 union ib_gid *sgid) ··· 77 81 rdma_gid2ip(&av->sgid_addr._sockaddr, sgid); 78 82 rdma_gid2ip(&av->dgid_addr._sockaddr, &rdma_ah_read_grh(attr)->dgid); 79 83 av->network_type = ib_gid_to_network_type(sgid_attr->gid_type, sgid); 80 - 81 - return 0; 82 84 } 83 85 84 86 struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt)
+4 -6
drivers/infiniband/sw/rxe/rxe_loc.h
··· 38 38 39 39 int rxe_av_chk_attr(struct rxe_dev *rxe, struct rdma_ah_attr *attr); 40 40 41 - int rxe_av_from_attr(struct rxe_dev *rxe, u8 port_num, 42 - struct rxe_av *av, struct rdma_ah_attr *attr); 41 + void rxe_av_from_attr(u8 port_num, struct rxe_av *av, 42 + struct rdma_ah_attr *attr); 43 43 44 - int rxe_av_to_attr(struct rxe_dev *rxe, struct rxe_av *av, 45 - struct rdma_ah_attr *attr); 44 + void rxe_av_to_attr(struct rxe_av *av, struct rdma_ah_attr *attr); 46 45 47 - int rxe_av_fill_ip_info(struct rxe_dev *rxe, 48 - struct rxe_av *av, 46 + void rxe_av_fill_ip_info(struct rxe_av *av, 49 47 struct rdma_ah_attr *attr, 50 48 struct ib_gid_attr *sgid_attr, 51 49 union ib_gid *sgid);
+6 -9
drivers/infiniband/sw/rxe/rxe_qp.c
··· 633 633 ib_get_cached_gid(&rxe->ib_dev, 1, 634 634 rdma_ah_read_grh(&attr->ah_attr)->sgid_index, 635 635 &sgid, &sgid_attr); 636 - rxe_av_from_attr(rxe, attr->port_num, &qp->pri_av, 637 - &attr->ah_attr); 638 - rxe_av_fill_ip_info(rxe, &qp->pri_av, &attr->ah_attr, 636 + rxe_av_from_attr(attr->port_num, &qp->pri_av, &attr->ah_attr); 637 + rxe_av_fill_ip_info(&qp->pri_av, &attr->ah_attr, 639 638 &sgid_attr, &sgid); 640 639 if (sgid_attr.ndev) 641 640 dev_put(sgid_attr.ndev); ··· 647 648 ib_get_cached_gid(&rxe->ib_dev, 1, sgid_index, 648 649 &sgid, &sgid_attr); 649 650 650 - rxe_av_from_attr(rxe, attr->alt_port_num, &qp->alt_av, 651 + rxe_av_from_attr(attr->alt_port_num, &qp->alt_av, 651 652 &attr->alt_ah_attr); 652 - rxe_av_fill_ip_info(rxe, &qp->alt_av, &attr->alt_ah_attr, 653 + rxe_av_fill_ip_info(&qp->alt_av, &attr->alt_ah_attr, 653 654 &sgid_attr, &sgid); 654 655 if (sgid_attr.ndev) 655 656 dev_put(sgid_attr.ndev); ··· 764 765 /* called by the query qp verb */ 765 766 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) 766 767 { 767 - struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 768 - 769 768 *attr = qp->attr; 770 769 771 770 attr->rq_psn = qp->resp.psn; ··· 778 781 attr->cap.max_recv_sge = qp->rq.max_sge; 779 782 } 780 783 781 - rxe_av_to_attr(rxe, &qp->pri_av, &attr->ah_attr); 782 - rxe_av_to_attr(rxe, &qp->alt_av, &attr->alt_ah_attr); 784 + rxe_av_to_attr(&qp->pri_av, &attr->ah_attr); 785 + rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr); 783 786 784 787 if (qp->req.state == QP_STATE_DRAIN) { 785 788 attr->sq_draining = 1;
+4 -6
drivers/infiniband/sw/rxe/rxe_verbs.c
··· 271 271 return err; 272 272 } 273 273 274 - err = rxe_av_from_attr(rxe, rdma_ah_get_port_num(attr), av, attr); 275 - if (!err) 276 - err = rxe_av_fill_ip_info(rxe, av, attr, &sgid_attr, &sgid); 274 + rxe_av_from_attr(rdma_ah_get_port_num(attr), av, attr); 275 + rxe_av_fill_ip_info(av, attr, &sgid_attr, &sgid); 277 276 278 277 if (sgid_attr.ndev) 279 278 dev_put(sgid_attr.ndev); 280 - return err; 279 + return 0; 281 280 } 282 281 283 282 static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, ··· 334 335 335 336 static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) 336 337 { 337 - struct rxe_dev *rxe = to_rdev(ibah->device); 338 338 struct rxe_ah *ah = to_rah(ibah); 339 339 340 340 memset(attr, 0, sizeof(*attr)); 341 341 attr->type = ibah->type; 342 - rxe_av_to_attr(rxe, &ah->av, attr); 342 + rxe_av_to_attr(&ah->av, attr); 343 343 return 0; 344 344 } 345 345
+3
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 2306 2306 priv->ca, ipoib_event); 2307 2307 ib_register_event_handler(&priv->event_handler); 2308 2308 2309 + /* call event handler to ensure pkey in sync */ 2310 + queue_work(ipoib_workqueue, &priv->flush_heavy); 2311 + 2309 2312 result = register_netdev(priv->dev); 2310 2313 if (result) { 2311 2314 pr_warn("%s: couldn't register ipoib port %d; error %d\n",
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/eq.c
··· 51 51 52 52 enum { 53 53 MLX5_NUM_SPARE_EQE = 0x80, 54 - MLX5_NUM_ASYNC_EQE = 0x100, 54 + MLX5_NUM_ASYNC_EQE = 0x1000, 55 55 MLX5_NUM_CMD_EQE = 32, 56 56 MLX5_NUM_PF_DRAIN = 64, 57 57 };
+1 -1
include/linux/mlx5/driver.h
··· 1277 1277 int eqn; 1278 1278 int err; 1279 1279 1280 - err = mlx5_vector2eqn(dev, vector, &eqn, &irq); 1280 + err = mlx5_vector2eqn(dev, MLX5_EQ_VEC_COMP_BASE + vector, &eqn, &irq); 1281 1281 if (err) 1282 1282 return NULL; 1283 1283
+13 -6
include/rdma/ib_hdrs.h
··· 313 313 return (u32)((be32_to_cpu(ohdr->bth[1])) & IB_QPN_MASK); 314 314 } 315 315 316 - static inline u8 ib_bth_get_becn(struct ib_other_headers *ohdr) 316 + static inline bool ib_bth_get_becn(struct ib_other_headers *ohdr) 317 317 { 318 - return (u8)((be32_to_cpu(ohdr->bth[1]) >> IB_BECN_SHIFT) & 319 - IB_BECN_MASK); 318 + return (ohdr->bth[1]) & cpu_to_be32(IB_BECN_SMASK); 320 319 } 321 320 322 - static inline u8 ib_bth_get_fecn(struct ib_other_headers *ohdr) 321 + static inline bool ib_bth_get_fecn(struct ib_other_headers *ohdr) 323 322 { 324 - return (u8)((be32_to_cpu(ohdr->bth[1]) >> IB_FECN_SHIFT) & 325 - IB_FECN_MASK); 323 + return (ohdr->bth[1]) & cpu_to_be32(IB_FECN_SMASK); 326 324 } 327 325 328 326 static inline u8 ib_bth_get_tver(struct ib_other_headers *ohdr) ··· 329 331 IB_BTH_TVER_MASK); 330 332 } 331 333 334 + static inline bool ib_bth_is_solicited(struct ib_other_headers *ohdr) 335 + { 336 + return ohdr->bth[0] & cpu_to_be32(IB_BTH_SOLICITED); 337 + } 338 + 339 + static inline bool ib_bth_is_migration(struct ib_other_headers *ohdr) 340 + { 341 + return ohdr->bth[0] & cpu_to_be32(IB_BTH_MIG_REQ); 342 + } 332 343 #endif /* IB_HDRS_H */
+14 -6
include/rdma/ib_verbs.h
··· 874 874 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult); 875 875 876 876 enum rdma_ah_attr_type { 877 + RDMA_AH_ATTR_TYPE_UNDEFINED, 877 878 RDMA_AH_ATTR_TYPE_IB, 878 879 RDMA_AH_ATTR_TYPE_ROCE, 879 880 RDMA_AH_ATTR_TYPE_OPA, ··· 3811 3810 grh->traffic_class = traffic_class; 3812 3811 } 3813 3812 3814 - /*Get AH type */ 3813 + /** 3814 + * rdma_ah_find_type - Return address handle type. 3815 + * 3816 + * @dev: Device to be checked 3817 + * @port_num: Port number 3818 + */ 3815 3819 static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev, 3816 - u32 port_num) 3820 + u8 port_num) 3817 3821 { 3818 3822 if (rdma_protocol_roce(dev, port_num)) 3819 3823 return RDMA_AH_ATTR_TYPE_ROCE; 3820 - else if ((rdma_protocol_ib(dev, port_num)) && 3821 - (rdma_cap_opa_ah(dev, port_num))) 3822 - return RDMA_AH_ATTR_TYPE_OPA; 3823 - else 3824 + if (rdma_protocol_ib(dev, port_num)) { 3825 + if (rdma_cap_opa_ah(dev, port_num)) 3826 + return RDMA_AH_ATTR_TYPE_OPA; 3824 3827 return RDMA_AH_ATTR_TYPE_IB; 3828 + } 3829 + 3830 + return RDMA_AH_ATTR_TYPE_UNDEFINED; 3825 3831 } 3826 3832 3827 3833 /**
+6 -8
include/uapi/rdma/rdma_netlink.h
··· 227 227 RDMA_NLDEV_CMD_UNSPEC, 228 228 229 229 RDMA_NLDEV_CMD_GET, /* can dump */ 230 - RDMA_NLDEV_CMD_SET, 231 - RDMA_NLDEV_CMD_NEW, 232 - RDMA_NLDEV_CMD_DEL, 233 230 234 - RDMA_NLDEV_CMD_PORT_GET, /* can dump */ 235 - RDMA_NLDEV_CMD_PORT_SET, 236 - RDMA_NLDEV_CMD_PORT_NEW, 237 - RDMA_NLDEV_CMD_PORT_DEL, 231 + /* 2 - 4 are free to use */ 238 232 239 - RDMA_NLDEV_CMD_RES_GET, /* can dump */ 233 + RDMA_NLDEV_CMD_PORT_GET = 5, /* can dump */ 234 + 235 + /* 6 - 8 are free to use */ 236 + 237 + RDMA_NLDEV_CMD_RES_GET = 9, /* can dump */ 240 238 241 239 RDMA_NLDEV_CMD_RES_QP_GET, /* can dump */ 242 240