Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RDMA/bnxt_re: Share a page to expose per SRQ info with userspace

Gen P7 adapters needs to share a toggle bits information received
in kernel driver with the user space. User space needs this
info to arm the SRQ.

User space application can get this page using the
UAPI routines. Library will mmap this page and get the
toggle bits to be used in the next ARM Doorbell.

Uses a hash list to map the SRQ structure from the SRQ ID.
SRQ structure is retrieved from the hash list while the
library calls the UAPI routine to get the toggle page
mapping. Currently the full page is mapped per SRQ. This
can be optimized to enable multiple SRQs from the same
application share the same page and different offsets
in the page

Signed-off-by: Chandramohan Akula <chandramohan.akula@broadcom.com>
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
Link: https://patch.msgid.link/1724945645-14989-4-git-send-email-selvin.xavier@broadcom.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>

authored by

Chandramohan Akula and committed by
Leon Romanovsky
181028a0 b4207630

+47 -2
+2
drivers/infiniband/hw/bnxt_re/bnxt_re.h
··· 141 141 #define BNXT_RE_GRC_FIFO_REG_BASE 0x2000 142 142 143 143 #define MAX_CQ_HASH_BITS (16) 144 + #define MAX_SRQ_HASH_BITS (16) 144 145 struct bnxt_re_dev { 145 146 struct ib_device ibdev; 146 147 struct list_head list; ··· 197 196 struct work_struct dbq_fifo_check_work; 198 197 struct delayed_work dbq_pacing_work; 199 198 DECLARE_HASHTABLE(cq_hash, MAX_CQ_HASH_BITS); 199 + DECLARE_HASHTABLE(srq_hash, MAX_SRQ_HASH_BITS); 200 200 }; 201 201 202 202 #define to_bnxt_re_dev(ptr, member) \
+33 -1
drivers/infiniband/hw/bnxt_re/ib_verbs.c
··· 1707 1707 1708 1708 if (qplib_srq->cq) 1709 1709 nq = qplib_srq->cq->nq; 1710 + if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) { 1711 + free_page((unsigned long)srq->uctx_srq_page); 1712 + hash_del(&srq->hash_entry); 1713 + } 1710 1714 bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq); 1711 1715 ib_umem_release(srq->umem); 1712 1716 atomic_dec(&rdev->stats.res.srq_count); ··· 1815 1811 } 1816 1812 1817 1813 if (udata) { 1818 - struct bnxt_re_srq_resp resp; 1814 + struct bnxt_re_srq_resp resp = {}; 1819 1815 1820 1816 resp.srqid = srq->qplib_srq.id; 1817 + if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) { 1818 + hash_add(rdev->srq_hash, &srq->hash_entry, srq->qplib_srq.id); 1819 + srq->uctx_srq_page = (void *)get_zeroed_page(GFP_KERNEL); 1820 + if (!srq->uctx_srq_page) { 1821 + rc = -ENOMEM; 1822 + goto fail; 1823 + } 1824 + resp.comp_mask |= BNXT_RE_SRQ_TOGGLE_PAGE_SUPPORT; 1825 + } 1821 1826 rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); 1822 1827 if (rc) { 1823 1828 ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!"); ··· 4304 4291 return cq; 4305 4292 } 4306 4293 4294 + static struct bnxt_re_srq *bnxt_re_search_for_srq(struct bnxt_re_dev *rdev, u32 srq_id) 4295 + { 4296 + struct bnxt_re_srq *srq = NULL, *tmp_srq; 4297 + 4298 + hash_for_each_possible(rdev->srq_hash, tmp_srq, hash_entry, srq_id) { 4299 + if (tmp_srq->qplib_srq.id == srq_id) { 4300 + srq = tmp_srq; 4301 + break; 4302 + } 4303 + } 4304 + return srq; 4305 + } 4306 + 4307 4307 /* Helper function to mmap the virtual memory from user app */ 4308 4308 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma) 4309 4309 { ··· 4545 4519 struct bnxt_re_ucontext *uctx; 4546 4520 struct ib_ucontext *ib_uctx; 4547 4521 struct bnxt_re_dev *rdev; 4522 + struct bnxt_re_srq *srq; 4548 4523 u32 length = PAGE_SIZE; 4549 4524 struct bnxt_re_cq *cq; 4550 4525 u64 mem_offset; ··· 4577 4550 addr = (u64)cq->uctx_cq_page; 4578 4551 break; 4579 4552 case BNXT_RE_SRQ_TOGGLE_MEM: 4553 + srq = bnxt_re_search_for_srq(rdev, res_id); 4554 + if (!srq) 4555 + return -EINVAL; 4556 + 4557 + addr = (u64)srq->uctx_srq_page; 4580 4558 break; 4581 4559 4582 4560 default:
+1
drivers/infiniband/hw/bnxt_re/ib_verbs.h
··· 78 78 struct ib_umem *umem; 79 79 spinlock_t lock; /* protect srq */ 80 80 void *uctx_srq_page; 81 + struct hlist_node hash_entry; 81 82 }; 82 83 83 84 struct bnxt_re_qp {
+5 -1
drivers/infiniband/hw/bnxt_re/main.c
··· 139 139 if (bnxt_re_hwrm_qcaps(rdev)) 140 140 dev_err(rdev_to_dev(rdev), 141 141 "Failed to query hwrm qcaps\n"); 142 - if (bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx)) 142 + if (bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx)) { 143 143 cctx->modes.toggle_bits |= BNXT_QPLIB_CQ_TOGGLE_BIT; 144 + cctx->modes.toggle_bits |= BNXT_QPLIB_SRQ_TOGGLE_BIT; 145 + } 144 146 } 145 147 146 148 static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev) ··· 1773 1771 bnxt_re_vf_res_config(rdev); 1774 1772 } 1775 1773 hash_init(rdev->cq_hash); 1774 + if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) 1775 + hash_init(rdev->srq_hash); 1776 1776 1777 1777 return 0; 1778 1778 free_sctx:
+6
include/uapi/rdma/bnxt_re-abi.h
··· 141 141 __aligned_u64 srq_handle; 142 142 }; 143 143 144 + enum bnxt_re_srq_mask { 145 + BNXT_RE_SRQ_TOGGLE_PAGE_SUPPORT = 0x1, 146 + }; 147 + 144 148 struct bnxt_re_srq_resp { 145 149 __u32 srqid; 150 + __u32 rsvd; /* padding */ 151 + __aligned_u64 comp_mask; 146 152 }; 147 153 148 154 enum bnxt_re_shpg_offt {