Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rdma/siw: completion queue methods

Broken up commit to add the Soft iWarp RDMA driver.

Signed-off-by: Bernard Metzler <bmt@zurich.ibm.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>

authored by

Bernard Metzler and committed by
Jason Gunthorpe
b0fff731 8b6a361b

+101
+101
drivers/infiniband/sw/siw/siw_cq.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause 2 + 3 + /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ 4 + /* Copyright (c) 2008-2019, IBM Corporation */ 5 + 6 + #include <linux/errno.h> 7 + #include <linux/types.h> 8 + 9 + #include <rdma/ib_verbs.h> 10 + 11 + #include "siw.h" 12 + 13 + static int map_wc_opcode[SIW_NUM_OPCODES] = { 14 + [SIW_OP_WRITE] = IB_WC_RDMA_WRITE, 15 + [SIW_OP_SEND] = IB_WC_SEND, 16 + [SIW_OP_SEND_WITH_IMM] = IB_WC_SEND, 17 + [SIW_OP_READ] = IB_WC_RDMA_READ, 18 + [SIW_OP_READ_LOCAL_INV] = IB_WC_RDMA_READ, 19 + [SIW_OP_COMP_AND_SWAP] = IB_WC_COMP_SWAP, 20 + [SIW_OP_FETCH_AND_ADD] = IB_WC_FETCH_ADD, 21 + [SIW_OP_INVAL_STAG] = IB_WC_LOCAL_INV, 22 + [SIW_OP_REG_MR] = IB_WC_REG_MR, 23 + [SIW_OP_RECEIVE] = IB_WC_RECV, 24 + [SIW_OP_READ_RESPONSE] = -1 /* not used */ 25 + }; 26 + 27 + static struct { 28 + enum siw_opcode siw; 29 + enum ib_wc_status ib; 30 + } map_cqe_status[SIW_NUM_WC_STATUS] = { 31 + { SIW_WC_SUCCESS, IB_WC_SUCCESS }, 32 + { SIW_WC_LOC_LEN_ERR, IB_WC_LOC_LEN_ERR }, 33 + { SIW_WC_LOC_PROT_ERR, IB_WC_LOC_PROT_ERR }, 34 + { SIW_WC_LOC_QP_OP_ERR, IB_WC_LOC_QP_OP_ERR }, 35 + { SIW_WC_WR_FLUSH_ERR, IB_WC_WR_FLUSH_ERR }, 36 + { SIW_WC_BAD_RESP_ERR, IB_WC_BAD_RESP_ERR }, 37 + { SIW_WC_LOC_ACCESS_ERR, IB_WC_LOC_ACCESS_ERR }, 38 + { SIW_WC_REM_ACCESS_ERR, IB_WC_REM_ACCESS_ERR }, 39 + { SIW_WC_REM_INV_REQ_ERR, IB_WC_REM_INV_REQ_ERR }, 40 + { SIW_WC_GENERAL_ERR, IB_WC_GENERAL_ERR } 41 + }; 42 + 43 + /* 44 + * Reap one CQE from the CQ. Only used by kernel clients 45 + * during CQ normal operation. Might be called during CQ 46 + * flush for user mapped CQE array as well. 47 + */ 48 + int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc) 49 + { 50 + struct siw_cqe *cqe; 51 + unsigned long flags; 52 + 53 + spin_lock_irqsave(&cq->lock, flags); 54 + 55 + cqe = &cq->queue[cq->cq_get % cq->num_cqe]; 56 + if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) { 57 + memset(wc, 0, sizeof(*wc)); 58 + wc->wr_id = cqe->id; 59 + wc->status = map_cqe_status[cqe->status].ib; 60 + wc->opcode = map_wc_opcode[cqe->opcode]; 61 + wc->byte_len = cqe->bytes; 62 + 63 + /* 64 + * During CQ flush, also user land CQE's may get 65 + * reaped here, which do not hold a QP reference 66 + * and do not qualify for memory extension verbs. 67 + */ 68 + if (likely(cq->kernel_verbs)) { 69 + if (cqe->flags & SIW_WQE_REM_INVAL) { 70 + wc->ex.invalidate_rkey = cqe->inval_stag; 71 + wc->wc_flags = IB_WC_WITH_INVALIDATE; 72 + } 73 + wc->qp = cqe->base_qp; 74 + siw_dbg_cq(cq, "idx %u, type %d, flags %2x, id 0x%p\n", 75 + cq->cq_get % cq->num_cqe, cqe->opcode, 76 + cqe->flags, (void *)cqe->id); 77 + } 78 + WRITE_ONCE(cqe->flags, 0); 79 + cq->cq_get++; 80 + 81 + spin_unlock_irqrestore(&cq->lock, flags); 82 + 83 + return 1; 84 + } 85 + spin_unlock_irqrestore(&cq->lock, flags); 86 + 87 + return 0; 88 + } 89 + 90 + /* 91 + * siw_cq_flush() 92 + * 93 + * Flush all CQ elements. 94 + */ 95 + void siw_cq_flush(struct siw_cq *cq) 96 + { 97 + struct ib_wc wc; 98 + 99 + while (siw_reap_cqe(cq, &wc)) 100 + ; 101 + }