Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
3
4#include <linux/module.h>
5#include <linux/printk.h>
6#include <rdma/ib_addr.h>
7#include <rdma/ib_user_verbs.h>
8
9#include "ionic_fw.h"
10#include "ionic_ibdev.h"
11
12#define IONIC_OP(version, opname) \
13 ((version) < 2 ? IONIC_V1_OP_##opname : IONIC_V2_OP_##opname)
14
15static bool ionic_next_cqe(struct ionic_ibdev *dev, struct ionic_cq *cq,
16 struct ionic_v1_cqe **cqe)
17{
18 struct ionic_v1_cqe *qcqe = ionic_queue_at_prod(&cq->q);
19
20 if (unlikely(cq->color != ionic_v1_cqe_color(qcqe)))
21 return false;
22
23 /* Prevent out-of-order reads of the CQE */
24 dma_rmb();
25
26 *cqe = qcqe;
27
28 return true;
29}
30
31static int ionic_flush_recv(struct ionic_qp *qp, struct ib_wc *wc)
32{
33 struct ionic_rq_meta *meta;
34 struct ionic_v1_wqe *wqe;
35
36 if (!qp->rq_flush)
37 return 0;
38
39 if (ionic_queue_empty(&qp->rq))
40 return 0;
41
42 wqe = ionic_queue_at_cons(&qp->rq);
43
44 /* wqe_id must be a valid queue index */
45 if (unlikely(wqe->base.wqe_id >> qp->rq.depth_log2)) {
46 ibdev_warn(qp->ibqp.device,
47 "flush qp %u recv index %llu invalid\n",
48 qp->qpid, (unsigned long long)wqe->base.wqe_id);
49 return -EIO;
50 }
51
52 /* wqe_id must indicate a request that is outstanding */
53 meta = &qp->rq_meta[wqe->base.wqe_id];
54 if (unlikely(meta->next != IONIC_META_POSTED)) {
55 ibdev_warn(qp->ibqp.device,
56 "flush qp %u recv index %llu not posted\n",
57 qp->qpid, (unsigned long long)wqe->base.wqe_id);
58 return -EIO;
59 }
60
61 ionic_queue_consume(&qp->rq);
62
63 memset(wc, 0, sizeof(*wc));
64
65 wc->status = IB_WC_WR_FLUSH_ERR;
66 wc->wr_id = meta->wrid;
67 wc->qp = &qp->ibqp;
68
69 meta->next = qp->rq_meta_head;
70 qp->rq_meta_head = meta;
71
72 return 1;
73}
74
75static int ionic_flush_recv_many(struct ionic_qp *qp,
76 struct ib_wc *wc, int nwc)
77{
78 int rc = 0, npolled = 0;
79
80 while (npolled < nwc) {
81 rc = ionic_flush_recv(qp, wc + npolled);
82 if (rc <= 0)
83 break;
84
85 npolled += rc;
86 }
87
88 return npolled ?: rc;
89}
90
91static int ionic_flush_send(struct ionic_qp *qp, struct ib_wc *wc)
92{
93 struct ionic_sq_meta *meta;
94
95 if (!qp->sq_flush)
96 return 0;
97
98 if (ionic_queue_empty(&qp->sq))
99 return 0;
100
101 meta = &qp->sq_meta[qp->sq.cons];
102
103 ionic_queue_consume(&qp->sq);
104
105 memset(wc, 0, sizeof(*wc));
106
107 wc->status = IB_WC_WR_FLUSH_ERR;
108 wc->wr_id = meta->wrid;
109 wc->qp = &qp->ibqp;
110
111 return 1;
112}
113
114static int ionic_flush_send_many(struct ionic_qp *qp,
115 struct ib_wc *wc, int nwc)
116{
117 int rc = 0, npolled = 0;
118
119 while (npolled < nwc) {
120 rc = ionic_flush_send(qp, wc + npolled);
121 if (rc <= 0)
122 break;
123
124 npolled += rc;
125 }
126
127 return npolled ?: rc;
128}
129
130static int ionic_poll_recv(struct ionic_ibdev *dev, struct ionic_cq *cq,
131 struct ionic_qp *cqe_qp, struct ionic_v1_cqe *cqe,
132 struct ib_wc *wc)
133{
134 struct ionic_qp *qp = NULL;
135 struct ionic_rq_meta *meta;
136 u32 src_qpn, st_len;
137 u16 vlan_tag;
138 u8 op;
139
140 if (cqe_qp->rq_flush)
141 return 0;
142
143 qp = cqe_qp;
144
145 st_len = be32_to_cpu(cqe->status_length);
146
147 /* ignore wqe_id in case of flush error */
148 if (ionic_v1_cqe_error(cqe) && st_len == IONIC_STS_WQE_FLUSHED_ERR) {
149 cqe_qp->rq_flush = true;
150 cq->flush = true;
151 list_move_tail(&qp->cq_flush_rq, &cq->flush_rq);
152
153 /* posted recvs (if any) flushed by ionic_flush_recv */
154 return 0;
155 }
156
157 /* there had better be something in the recv queue to complete */
158 if (ionic_queue_empty(&qp->rq)) {
159 ibdev_warn(&dev->ibdev, "qp %u is empty\n", qp->qpid);
160 return -EIO;
161 }
162
163 /* wqe_id must be a valid queue index */
164 if (unlikely(cqe->recv.wqe_id >> qp->rq.depth_log2)) {
165 ibdev_warn(&dev->ibdev,
166 "qp %u recv index %llu invalid\n",
167 qp->qpid, (unsigned long long)cqe->recv.wqe_id);
168 return -EIO;
169 }
170
171 /* wqe_id must indicate a request that is outstanding */
172 meta = &qp->rq_meta[cqe->recv.wqe_id];
173 if (unlikely(meta->next != IONIC_META_POSTED)) {
174 ibdev_warn(&dev->ibdev,
175 "qp %u recv index %llu not posted\n",
176 qp->qpid, (unsigned long long)cqe->recv.wqe_id);
177 return -EIO;
178 }
179
180 meta->next = qp->rq_meta_head;
181 qp->rq_meta_head = meta;
182
183 memset(wc, 0, sizeof(*wc));
184
185 wc->wr_id = meta->wrid;
186
187 wc->qp = &cqe_qp->ibqp;
188
189 if (ionic_v1_cqe_error(cqe)) {
190 wc->vendor_err = st_len;
191 wc->status = ionic_to_ib_status(st_len);
192
193 cqe_qp->rq_flush = true;
194 cq->flush = true;
195 list_move_tail(&qp->cq_flush_rq, &cq->flush_rq);
196
197 ibdev_warn(&dev->ibdev,
198 "qp %d recv cqe with error\n", qp->qpid);
199 print_hex_dump(KERN_WARNING, "cqe ", DUMP_PREFIX_OFFSET, 16, 1,
200 cqe, BIT(cq->q.stride_log2), true);
201 goto out;
202 }
203
204 wc->vendor_err = 0;
205 wc->status = IB_WC_SUCCESS;
206
207 src_qpn = be32_to_cpu(cqe->recv.src_qpn_op);
208 op = src_qpn >> IONIC_V1_CQE_RECV_OP_SHIFT;
209
210 src_qpn &= IONIC_V1_CQE_RECV_QPN_MASK;
211 op &= IONIC_V1_CQE_RECV_OP_MASK;
212
213 wc->opcode = IB_WC_RECV;
214 switch (op) {
215 case IONIC_V1_CQE_RECV_OP_RDMA_IMM:
216 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
217 wc->wc_flags |= IB_WC_WITH_IMM;
218 wc->ex.imm_data = cqe->recv.imm_data_rkey; /* be32 in wc */
219 break;
220 case IONIC_V1_CQE_RECV_OP_SEND_IMM:
221 wc->wc_flags |= IB_WC_WITH_IMM;
222 wc->ex.imm_data = cqe->recv.imm_data_rkey; /* be32 in wc */
223 break;
224 case IONIC_V1_CQE_RECV_OP_SEND_INV:
225 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
226 wc->ex.invalidate_rkey = be32_to_cpu(cqe->recv.imm_data_rkey);
227 break;
228 }
229
230 wc->byte_len = st_len;
231 wc->src_qp = src_qpn;
232
233 if (qp->ibqp.qp_type == IB_QPT_UD ||
234 qp->ibqp.qp_type == IB_QPT_GSI) {
235 wc->wc_flags |= IB_WC_GRH | IB_WC_WITH_SMAC;
236 ether_addr_copy(wc->smac, cqe->recv.src_mac);
237
238 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
239 if (ionic_v1_cqe_recv_is_ipv4(cqe))
240 wc->network_hdr_type = RDMA_NETWORK_IPV4;
241 else
242 wc->network_hdr_type = RDMA_NETWORK_IPV6;
243
244 if (ionic_v1_cqe_recv_is_vlan(cqe))
245 wc->wc_flags |= IB_WC_WITH_VLAN;
246
247 /* vlan_tag in cqe will be valid from dpath even if no vlan */
248 vlan_tag = be16_to_cpu(cqe->recv.vlan_tag);
249 wc->vlan_id = vlan_tag & 0xfff; /* 802.1q VID */
250 wc->sl = vlan_tag >> VLAN_PRIO_SHIFT; /* 802.1q PCP */
251 }
252
253 wc->pkey_index = 0;
254 wc->port_num = 1;
255
256out:
257 ionic_queue_consume(&qp->rq);
258
259 return 1;
260}
261
262static bool ionic_peek_send(struct ionic_qp *qp)
263{
264 struct ionic_sq_meta *meta;
265
266 if (qp->sq_flush)
267 return false;
268
269 /* completed all send queue requests */
270 if (ionic_queue_empty(&qp->sq))
271 return false;
272
273 meta = &qp->sq_meta[qp->sq.cons];
274
275 /* waiting for remote completion */
276 if (meta->remote && meta->seq == qp->sq_msn_cons)
277 return false;
278
279 /* waiting for local completion */
280 if (!meta->remote && !meta->local_comp)
281 return false;
282
283 return true;
284}
285
286static int ionic_poll_send(struct ionic_ibdev *dev, struct ionic_cq *cq,
287 struct ionic_qp *qp, struct ib_wc *wc)
288{
289 struct ionic_sq_meta *meta;
290
291 if (qp->sq_flush)
292 return 0;
293
294 do {
295 /* completed all send queue requests */
296 if (ionic_queue_empty(&qp->sq))
297 goto out_empty;
298
299 meta = &qp->sq_meta[qp->sq.cons];
300
301 /* waiting for remote completion */
302 if (meta->remote && meta->seq == qp->sq_msn_cons)
303 goto out_empty;
304
305 /* waiting for local completion */
306 if (!meta->remote && !meta->local_comp)
307 goto out_empty;
308
309 ionic_queue_consume(&qp->sq);
310
311 /* produce wc only if signaled or error status */
312 } while (!meta->signal && meta->ibsts == IB_WC_SUCCESS);
313
314 memset(wc, 0, sizeof(*wc));
315
316 wc->status = meta->ibsts;
317 wc->wr_id = meta->wrid;
318 wc->qp = &qp->ibqp;
319
320 if (meta->ibsts == IB_WC_SUCCESS) {
321 wc->byte_len = meta->len;
322 wc->opcode = meta->ibop;
323 } else {
324 wc->vendor_err = meta->len;
325
326 qp->sq_flush = true;
327 cq->flush = true;
328 list_move_tail(&qp->cq_flush_sq, &cq->flush_sq);
329 }
330
331 return 1;
332
333out_empty:
334 if (qp->sq_flush_rcvd) {
335 qp->sq_flush = true;
336 cq->flush = true;
337 list_move_tail(&qp->cq_flush_sq, &cq->flush_sq);
338 }
339 return 0;
340}
341
342static int ionic_poll_send_many(struct ionic_ibdev *dev, struct ionic_cq *cq,
343 struct ionic_qp *qp, struct ib_wc *wc, int nwc)
344{
345 int rc = 0, npolled = 0;
346
347 while (npolled < nwc) {
348 rc = ionic_poll_send(dev, cq, qp, wc + npolled);
349 if (rc <= 0)
350 break;
351
352 npolled += rc;
353 }
354
355 return npolled ?: rc;
356}
357
358static int ionic_validate_cons(u16 prod, u16 cons,
359 u16 comp, u16 mask)
360{
361 if (((prod - cons) & mask) <= ((comp - cons) & mask))
362 return -EIO;
363
364 return 0;
365}
366
367static int ionic_comp_msn(struct ionic_qp *qp, struct ionic_v1_cqe *cqe)
368{
369 struct ionic_sq_meta *meta;
370 u16 cqe_seq, cqe_idx;
371 int rc;
372
373 if (qp->sq_flush)
374 return 0;
375
376 cqe_seq = be32_to_cpu(cqe->send.msg_msn) & qp->sq.mask;
377
378 rc = ionic_validate_cons(qp->sq_msn_prod,
379 qp->sq_msn_cons,
380 cqe_seq - 1,
381 qp->sq.mask);
382 if (rc) {
383 ibdev_warn(qp->ibqp.device,
384 "qp %u bad msn %#x seq %u for prod %u cons %u\n",
385 qp->qpid, be32_to_cpu(cqe->send.msg_msn),
386 cqe_seq, qp->sq_msn_prod, qp->sq_msn_cons);
387 return rc;
388 }
389
390 qp->sq_msn_cons = cqe_seq;
391
392 if (ionic_v1_cqe_error(cqe)) {
393 cqe_idx = qp->sq_msn_idx[(cqe_seq - 1) & qp->sq.mask];
394
395 meta = &qp->sq_meta[cqe_idx];
396 meta->len = be32_to_cpu(cqe->status_length);
397 meta->ibsts = ionic_to_ib_status(meta->len);
398
399 ibdev_warn(qp->ibqp.device,
400 "qp %d msn cqe with error\n", qp->qpid);
401 print_hex_dump(KERN_WARNING, "cqe ", DUMP_PREFIX_OFFSET, 16, 1,
402 cqe, sizeof(*cqe), true);
403 }
404
405 return 0;
406}
407
408static int ionic_comp_npg(struct ionic_qp *qp, struct ionic_v1_cqe *cqe)
409{
410 struct ionic_sq_meta *meta;
411 u16 cqe_idx;
412 u32 st_len;
413
414 if (qp->sq_flush)
415 return 0;
416
417 st_len = be32_to_cpu(cqe->status_length);
418
419 if (ionic_v1_cqe_error(cqe) && st_len == IONIC_STS_WQE_FLUSHED_ERR) {
420 /*
421 * Flush cqe does not consume a wqe on the device, and maybe
422 * no such work request is posted.
423 *
424 * The driver should begin flushing after the last indicated
425 * normal or error completion. Here, only set a hint that the
426 * flush request was indicated. In poll_send, if nothing more
427 * can be polled normally, then begin flushing.
428 */
429 qp->sq_flush_rcvd = true;
430 return 0;
431 }
432
433 cqe_idx = cqe->send.npg_wqe_id & qp->sq.mask;
434 meta = &qp->sq_meta[cqe_idx];
435 meta->local_comp = true;
436
437 if (ionic_v1_cqe_error(cqe)) {
438 meta->len = st_len;
439 meta->ibsts = ionic_to_ib_status(st_len);
440 meta->remote = false;
441 ibdev_warn(qp->ibqp.device,
442 "qp %d npg cqe with error\n", qp->qpid);
443 print_hex_dump(KERN_WARNING, "cqe ", DUMP_PREFIX_OFFSET, 16, 1,
444 cqe, sizeof(*cqe), true);
445 }
446
447 return 0;
448}
449
450static void ionic_reserve_sync_cq(struct ionic_ibdev *dev, struct ionic_cq *cq)
451{
452 if (!ionic_queue_empty(&cq->q)) {
453 cq->credit += ionic_queue_length(&cq->q);
454 cq->q.cons = cq->q.prod;
455
456 ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype,
457 ionic_queue_dbell_val(&cq->q));
458 }
459}
460
461static void ionic_reserve_cq(struct ionic_ibdev *dev, struct ionic_cq *cq,
462 int spend)
463{
464 cq->credit -= spend;
465
466 if (cq->credit <= 0)
467 ionic_reserve_sync_cq(dev, cq);
468}
469
470static int ionic_poll_vcq_cq(struct ionic_ibdev *dev,
471 struct ionic_cq *cq,
472 int nwc, struct ib_wc *wc)
473{
474 struct ionic_qp *qp, *qp_next;
475 struct ionic_v1_cqe *cqe;
476 int rc = 0, npolled = 0;
477 unsigned long irqflags;
478 u32 qtf, qid;
479 bool peek;
480 u8 type;
481
482 if (nwc < 1)
483 return 0;
484
485 spin_lock_irqsave(&cq->lock, irqflags);
486
487 /* poll already indicated work completions for send queue */
488 list_for_each_entry_safe(qp, qp_next, &cq->poll_sq, cq_poll_sq) {
489 if (npolled == nwc)
490 goto out;
491
492 spin_lock(&qp->sq_lock);
493 rc = ionic_poll_send_many(dev, cq, qp, wc + npolled,
494 nwc - npolled);
495 spin_unlock(&qp->sq_lock);
496
497 if (rc > 0)
498 npolled += rc;
499
500 if (npolled < nwc)
501 list_del_init(&qp->cq_poll_sq);
502 }
503
504 /* poll for more work completions */
505 while (likely(ionic_next_cqe(dev, cq, &cqe))) {
506 if (npolled == nwc)
507 goto out;
508
509 qtf = ionic_v1_cqe_qtf(cqe);
510 qid = ionic_v1_cqe_qtf_qid(qtf);
511 type = ionic_v1_cqe_qtf_type(qtf);
512
513 /*
514 * Safe to access QP without additional reference here as,
515 * 1. We hold cq->lock throughout
516 * 2. ionic_destroy_qp() acquires the same cq->lock before cleanup
517 * 3. QP is removed from qp_tbl before any cleanup begins
518 * This ensures no concurrent access between polling and destruction.
519 */
520 qp = xa_load(&dev->qp_tbl, qid);
521 if (unlikely(!qp)) {
522 ibdev_dbg(&dev->ibdev, "missing qp for qid %u\n", qid);
523 goto cq_next;
524 }
525
526 switch (type) {
527 case IONIC_V1_CQE_TYPE_RECV:
528 spin_lock(&qp->rq_lock);
529 rc = ionic_poll_recv(dev, cq, qp, cqe, wc + npolled);
530 spin_unlock(&qp->rq_lock);
531
532 if (rc < 0)
533 goto out;
534
535 npolled += rc;
536
537 break;
538
539 case IONIC_V1_CQE_TYPE_SEND_MSN:
540 spin_lock(&qp->sq_lock);
541 rc = ionic_comp_msn(qp, cqe);
542 if (!rc) {
543 rc = ionic_poll_send_many(dev, cq, qp,
544 wc + npolled,
545 nwc - npolled);
546 peek = ionic_peek_send(qp);
547 }
548 spin_unlock(&qp->sq_lock);
549
550 if (rc < 0)
551 goto out;
552
553 npolled += rc;
554
555 if (peek)
556 list_move_tail(&qp->cq_poll_sq, &cq->poll_sq);
557 break;
558
559 case IONIC_V1_CQE_TYPE_SEND_NPG:
560 spin_lock(&qp->sq_lock);
561 rc = ionic_comp_npg(qp, cqe);
562 if (!rc) {
563 rc = ionic_poll_send_many(dev, cq, qp,
564 wc + npolled,
565 nwc - npolled);
566 peek = ionic_peek_send(qp);
567 }
568 spin_unlock(&qp->sq_lock);
569
570 if (rc < 0)
571 goto out;
572
573 npolled += rc;
574
575 if (peek)
576 list_move_tail(&qp->cq_poll_sq, &cq->poll_sq);
577 break;
578
579 default:
580 ibdev_warn(&dev->ibdev,
581 "unexpected cqe type %u\n", type);
582 rc = -EIO;
583 goto out;
584 }
585
586cq_next:
587 ionic_queue_produce(&cq->q);
588 cq->color = ionic_color_wrap(cq->q.prod, cq->color);
589 }
590
591 /* lastly, flush send and recv queues */
592 if (likely(!cq->flush))
593 goto out;
594
595 cq->flush = false;
596
597 list_for_each_entry_safe(qp, qp_next, &cq->flush_sq, cq_flush_sq) {
598 if (npolled == nwc)
599 goto out;
600
601 spin_lock(&qp->sq_lock);
602 rc = ionic_flush_send_many(qp, wc + npolled, nwc - npolled);
603 spin_unlock(&qp->sq_lock);
604
605 if (rc > 0)
606 npolled += rc;
607
608 if (npolled < nwc)
609 list_del_init(&qp->cq_flush_sq);
610 else
611 cq->flush = true;
612 }
613
614 list_for_each_entry_safe(qp, qp_next, &cq->flush_rq, cq_flush_rq) {
615 if (npolled == nwc)
616 goto out;
617
618 spin_lock(&qp->rq_lock);
619 rc = ionic_flush_recv_many(qp, wc + npolled, nwc - npolled);
620 spin_unlock(&qp->rq_lock);
621
622 if (rc > 0)
623 npolled += rc;
624
625 if (npolled < nwc)
626 list_del_init(&qp->cq_flush_rq);
627 else
628 cq->flush = true;
629 }
630
631out:
632 /* in case credit was depleted (more work posted than cq depth) */
633 if (cq->credit <= 0)
634 ionic_reserve_sync_cq(dev, cq);
635
636 spin_unlock_irqrestore(&cq->lock, irqflags);
637
638 return npolled ?: rc;
639}
640
641int ionic_poll_cq(struct ib_cq *ibcq, int nwc, struct ib_wc *wc)
642{
643 struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device);
644 struct ionic_vcq *vcq = to_ionic_vcq(ibcq);
645 int rc_tmp, rc = 0, npolled = 0;
646 int cq_i, cq_x, cq_ix;
647
648 cq_x = vcq->poll_idx;
649 vcq->poll_idx ^= dev->lif_cfg.udma_count - 1;
650
651 for (cq_i = 0; npolled < nwc && cq_i < dev->lif_cfg.udma_count; ++cq_i) {
652 cq_ix = cq_i ^ cq_x;
653
654 if (!(vcq->udma_mask & BIT(cq_ix)))
655 continue;
656
657 rc_tmp = ionic_poll_vcq_cq(dev, &vcq->cq[cq_ix],
658 nwc - npolled,
659 wc + npolled);
660
661 if (rc_tmp >= 0)
662 npolled += rc_tmp;
663 else if (!rc)
664 rc = rc_tmp;
665 }
666
667 return npolled ?: rc;
668}
669
670static int ionic_req_notify_vcq_cq(struct ionic_ibdev *dev, struct ionic_cq *cq,
671 enum ib_cq_notify_flags flags)
672{
673 u64 dbell_val = cq->q.dbell;
674
675 if (flags & IB_CQ_SOLICITED) {
676 cq->arm_sol_prod = ionic_queue_next(&cq->q, cq->arm_sol_prod);
677 dbell_val |= cq->arm_sol_prod | IONIC_CQ_RING_SOL;
678 } else {
679 cq->arm_any_prod = ionic_queue_next(&cq->q, cq->arm_any_prod);
680 dbell_val |= cq->arm_any_prod | IONIC_CQ_RING_ARM;
681 }
682
683 ionic_reserve_sync_cq(dev, cq);
684
685 ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype, dbell_val);
686
687 /*
688 * IB_CQ_REPORT_MISSED_EVENTS:
689 *
690 * The queue index in ring zero guarantees no missed events.
691 *
692 * Here, we check if the color bit in the next cqe is flipped. If it
693 * is flipped, then progress can be made by immediately polling the cq.
694 * Still, the cq will be armed, and an event will be generated. The cq
695 * may be empty when polled after the event, because the next poll
696 * after arming the cq can empty it.
697 */
698 return (flags & IB_CQ_REPORT_MISSED_EVENTS) &&
699 cq->color == ionic_v1_cqe_color(ionic_queue_at_prod(&cq->q));
700}
701
702int ionic_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
703{
704 struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device);
705 struct ionic_vcq *vcq = to_ionic_vcq(ibcq);
706 int rc = 0, cq_i;
707
708 for (cq_i = 0; cq_i < dev->lif_cfg.udma_count; ++cq_i) {
709 if (!(vcq->udma_mask & BIT(cq_i)))
710 continue;
711
712 if (ionic_req_notify_vcq_cq(dev, &vcq->cq[cq_i], flags))
713 rc = 1;
714 }
715
716 return rc;
717}
718
719static s64 ionic_prep_inline(void *data, u32 max_data,
720 const struct ib_sge *ib_sgl, int num_sge)
721{
722 static const s64 bit_31 = 1u << 31;
723 s64 len = 0, sg_len;
724 int sg_i;
725
726 for (sg_i = 0; sg_i < num_sge; ++sg_i) {
727 sg_len = ib_sgl[sg_i].length;
728
729 /* sge length zero means 2GB */
730 if (unlikely(sg_len == 0))
731 sg_len = bit_31;
732
733 /* greater than max inline data is invalid */
734 if (unlikely(len + sg_len > max_data))
735 return -EINVAL;
736
737 memcpy(data + len, (void *)ib_sgl[sg_i].addr, sg_len);
738
739 len += sg_len;
740 }
741
742 return len;
743}
744
745static s64 ionic_prep_pld(struct ionic_v1_wqe *wqe,
746 union ionic_v1_pld *pld,
747 int spec, u32 max_sge,
748 const struct ib_sge *ib_sgl,
749 int num_sge)
750{
751 static const s64 bit_31 = 1l << 31;
752 struct ionic_sge *sgl;
753 __be32 *spec32 = NULL;
754 __be16 *spec16 = NULL;
755 s64 len = 0, sg_len;
756 int sg_i = 0;
757
758 if (unlikely(num_sge < 0 || (u32)num_sge > max_sge))
759 return -EINVAL;
760
761 if (spec && num_sge > IONIC_V1_SPEC_FIRST_SGE) {
762 sg_i = IONIC_V1_SPEC_FIRST_SGE;
763
764 if (num_sge > 8) {
765 wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SPEC16);
766 spec16 = pld->spec16;
767 } else {
768 wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SPEC32);
769 spec32 = pld->spec32;
770 }
771 }
772
773 sgl = &pld->sgl[sg_i];
774
775 for (sg_i = 0; sg_i < num_sge; ++sg_i) {
776 sg_len = ib_sgl[sg_i].length;
777
778 /* sge length zero means 2GB */
779 if (unlikely(sg_len == 0))
780 sg_len = bit_31;
781
782 /* greater than 2GB data is invalid */
783 if (unlikely(len + sg_len > bit_31))
784 return -EINVAL;
785
786 sgl[sg_i].va = cpu_to_be64(ib_sgl[sg_i].addr);
787 sgl[sg_i].len = cpu_to_be32(sg_len);
788 sgl[sg_i].lkey = cpu_to_be32(ib_sgl[sg_i].lkey);
789
790 if (spec32) {
791 spec32[sg_i] = sgl[sg_i].len;
792 } else if (spec16) {
793 if (unlikely(sg_len > U16_MAX))
794 return -EINVAL;
795 spec16[sg_i] = cpu_to_be16(sg_len);
796 }
797
798 len += sg_len;
799 }
800
801 return len;
802}
803
804static void ionic_prep_base(struct ionic_qp *qp,
805 const struct ib_send_wr *wr,
806 struct ionic_sq_meta *meta,
807 struct ionic_v1_wqe *wqe)
808{
809 meta->wrid = wr->wr_id;
810 meta->ibsts = IB_WC_SUCCESS;
811 meta->signal = false;
812 meta->local_comp = false;
813
814 wqe->base.wqe_id = qp->sq.prod;
815
816 if (wr->send_flags & IB_SEND_FENCE)
817 wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_FENCE);
818
819 if (wr->send_flags & IB_SEND_SOLICITED)
820 wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SOL);
821
822 if (qp->sig_all || wr->send_flags & IB_SEND_SIGNALED) {
823 wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SIG);
824 meta->signal = true;
825 }
826
827 meta->seq = qp->sq_msn_prod;
828 meta->remote =
829 qp->ibqp.qp_type != IB_QPT_UD &&
830 qp->ibqp.qp_type != IB_QPT_GSI &&
831 !ionic_ibop_is_local(wr->opcode);
832
833 if (meta->remote) {
834 qp->sq_msn_idx[meta->seq] = qp->sq.prod;
835 qp->sq_msn_prod = ionic_queue_next(&qp->sq, qp->sq_msn_prod);
836 }
837
838 ionic_queue_produce(&qp->sq);
839}
840
841static int ionic_prep_common(struct ionic_qp *qp,
842 const struct ib_send_wr *wr,
843 struct ionic_sq_meta *meta,
844 struct ionic_v1_wqe *wqe)
845{
846 s64 signed_len;
847 u32 mval;
848
849 if (wr->send_flags & IB_SEND_INLINE) {
850 wqe->base.num_sge_key = 0;
851 wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_INL);
852 mval = ionic_v1_send_wqe_max_data(qp->sq.stride_log2, false);
853 signed_len = ionic_prep_inline(wqe->common.pld.data, mval,
854 wr->sg_list, wr->num_sge);
855 } else {
856 wqe->base.num_sge_key = wr->num_sge;
857 mval = ionic_v1_send_wqe_max_sge(qp->sq.stride_log2,
858 qp->sq_spec,
859 false);
860 signed_len = ionic_prep_pld(wqe, &wqe->common.pld,
861 qp->sq_spec, mval,
862 wr->sg_list, wr->num_sge);
863 }
864
865 if (unlikely(signed_len < 0))
866 return signed_len;
867
868 meta->len = signed_len;
869 wqe->common.length = cpu_to_be32(signed_len);
870
871 ionic_prep_base(qp, wr, meta, wqe);
872
873 return 0;
874}
875
876static void ionic_prep_sq_wqe(struct ionic_qp *qp, void *wqe)
877{
878 memset(wqe, 0, 1u << qp->sq.stride_log2);
879}
880
881static void ionic_prep_rq_wqe(struct ionic_qp *qp, void *wqe)
882{
883 memset(wqe, 0, 1u << qp->rq.stride_log2);
884}
885
886static int ionic_prep_send(struct ionic_qp *qp,
887 const struct ib_send_wr *wr)
888{
889 struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
890 struct ionic_sq_meta *meta;
891 struct ionic_v1_wqe *wqe;
892
893 meta = &qp->sq_meta[qp->sq.prod];
894 wqe = ionic_queue_at_prod(&qp->sq);
895
896 ionic_prep_sq_wqe(qp, wqe);
897
898 meta->ibop = IB_WC_SEND;
899
900 switch (wr->opcode) {
901 case IB_WR_SEND:
902 wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND);
903 break;
904 case IB_WR_SEND_WITH_IMM:
905 wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND_IMM);
906 wqe->base.imm_data_key = wr->ex.imm_data;
907 break;
908 case IB_WR_SEND_WITH_INV:
909 wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND_INV);
910 wqe->base.imm_data_key =
911 cpu_to_be32(wr->ex.invalidate_rkey);
912 break;
913 default:
914 return -EINVAL;
915 }
916
917 return ionic_prep_common(qp, wr, meta, wqe);
918}
919
920static int ionic_prep_send_ud(struct ionic_qp *qp,
921 const struct ib_ud_wr *wr)
922{
923 struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
924 struct ionic_sq_meta *meta;
925 struct ionic_v1_wqe *wqe;
926 struct ionic_ah *ah;
927
928 if (unlikely(!wr->ah))
929 return -EINVAL;
930
931 ah = to_ionic_ah(wr->ah);
932
933 meta = &qp->sq_meta[qp->sq.prod];
934 wqe = ionic_queue_at_prod(&qp->sq);
935
936 ionic_prep_sq_wqe(qp, wqe);
937
938 wqe->common.send.ah_id = cpu_to_be32(ah->ahid);
939 wqe->common.send.dest_qpn = cpu_to_be32(wr->remote_qpn);
940 wqe->common.send.dest_qkey = cpu_to_be32(wr->remote_qkey);
941
942 meta->ibop = IB_WC_SEND;
943
944 switch (wr->wr.opcode) {
945 case IB_WR_SEND:
946 wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND);
947 break;
948 case IB_WR_SEND_WITH_IMM:
949 wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND_IMM);
950 wqe->base.imm_data_key = wr->wr.ex.imm_data;
951 break;
952 default:
953 return -EINVAL;
954 }
955
956 return ionic_prep_common(qp, &wr->wr, meta, wqe);
957}
958
959static int ionic_prep_rdma(struct ionic_qp *qp,
960 const struct ib_rdma_wr *wr)
961{
962 struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
963 struct ionic_sq_meta *meta;
964 struct ionic_v1_wqe *wqe;
965
966 meta = &qp->sq_meta[qp->sq.prod];
967 wqe = ionic_queue_at_prod(&qp->sq);
968
969 ionic_prep_sq_wqe(qp, wqe);
970
971 meta->ibop = IB_WC_RDMA_WRITE;
972
973 switch (wr->wr.opcode) {
974 case IB_WR_RDMA_READ:
975 if (wr->wr.send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE))
976 return -EINVAL;
977 meta->ibop = IB_WC_RDMA_READ;
978 wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, RDMA_READ);
979 break;
980 case IB_WR_RDMA_WRITE:
981 if (wr->wr.send_flags & IB_SEND_SOLICITED)
982 return -EINVAL;
983 wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, RDMA_WRITE);
984 break;
985 case IB_WR_RDMA_WRITE_WITH_IMM:
986 wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, RDMA_WRITE_IMM);
987 wqe->base.imm_data_key = wr->wr.ex.imm_data;
988 break;
989 default:
990 return -EINVAL;
991 }
992
993 wqe->common.rdma.remote_va_high = cpu_to_be32(wr->remote_addr >> 32);
994 wqe->common.rdma.remote_va_low = cpu_to_be32(wr->remote_addr);
995 wqe->common.rdma.remote_rkey = cpu_to_be32(wr->rkey);
996
997 return ionic_prep_common(qp, &wr->wr, meta, wqe);
998}
999
1000static int ionic_prep_atomic(struct ionic_qp *qp,
1001 const struct ib_atomic_wr *wr)
1002{
1003 struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
1004 struct ionic_sq_meta *meta;
1005 struct ionic_v1_wqe *wqe;
1006
1007 if (wr->wr.num_sge != 1 || wr->wr.sg_list[0].length != 8)
1008 return -EINVAL;
1009
1010 if (wr->wr.send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE))
1011 return -EINVAL;
1012
1013 meta = &qp->sq_meta[qp->sq.prod];
1014 wqe = ionic_queue_at_prod(&qp->sq);
1015
1016 ionic_prep_sq_wqe(qp, wqe);
1017
1018 meta->ibop = IB_WC_RDMA_WRITE;
1019
1020 switch (wr->wr.opcode) {
1021 case IB_WR_ATOMIC_CMP_AND_SWP:
1022 meta->ibop = IB_WC_COMP_SWAP;
1023 wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, ATOMIC_CS);
1024 wqe->atomic.swap_add_high = cpu_to_be32(wr->swap >> 32);
1025 wqe->atomic.swap_add_low = cpu_to_be32(wr->swap);
1026 wqe->atomic.compare_high = cpu_to_be32(wr->compare_add >> 32);
1027 wqe->atomic.compare_low = cpu_to_be32(wr->compare_add);
1028 break;
1029 case IB_WR_ATOMIC_FETCH_AND_ADD:
1030 meta->ibop = IB_WC_FETCH_ADD;
1031 wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, ATOMIC_FA);
1032 wqe->atomic.swap_add_high = cpu_to_be32(wr->compare_add >> 32);
1033 wqe->atomic.swap_add_low = cpu_to_be32(wr->compare_add);
1034 break;
1035 default:
1036 return -EINVAL;
1037 }
1038
1039 wqe->atomic.remote_va_high = cpu_to_be32(wr->remote_addr >> 32);
1040 wqe->atomic.remote_va_low = cpu_to_be32(wr->remote_addr);
1041 wqe->atomic.remote_rkey = cpu_to_be32(wr->rkey);
1042
1043 wqe->base.num_sge_key = 1;
1044 wqe->atomic.sge.va = cpu_to_be64(wr->wr.sg_list[0].addr);
1045 wqe->atomic.sge.len = cpu_to_be32(8);
1046 wqe->atomic.sge.lkey = cpu_to_be32(wr->wr.sg_list[0].lkey);
1047
1048 return ionic_prep_common(qp, &wr->wr, meta, wqe);
1049}
1050
1051static int ionic_prep_inv(struct ionic_qp *qp,
1052 const struct ib_send_wr *wr)
1053{
1054 struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
1055 struct ionic_sq_meta *meta;
1056 struct ionic_v1_wqe *wqe;
1057
1058 if (wr->send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE))
1059 return -EINVAL;
1060
1061 meta = &qp->sq_meta[qp->sq.prod];
1062 wqe = ionic_queue_at_prod(&qp->sq);
1063
1064 ionic_prep_sq_wqe(qp, wqe);
1065
1066 wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, LOCAL_INV);
1067 wqe->base.imm_data_key = cpu_to_be32(wr->ex.invalidate_rkey);
1068
1069 meta->len = 0;
1070 meta->ibop = IB_WC_LOCAL_INV;
1071
1072 ionic_prep_base(qp, wr, meta, wqe);
1073
1074 return 0;
1075}
1076
1077static int ionic_prep_reg(struct ionic_qp *qp,
1078 const struct ib_reg_wr *wr)
1079{
1080 struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
1081 struct ionic_mr *mr = to_ionic_mr(wr->mr);
1082 struct ionic_sq_meta *meta;
1083 struct ionic_v1_wqe *wqe;
1084 __le64 dma_addr;
1085 int flags;
1086
1087 if (wr->wr.send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE))
1088 return -EINVAL;
1089
1090 /* must call ib_map_mr_sg before posting reg wr */
1091 if (!mr->buf.tbl_pages)
1092 return -EINVAL;
1093
1094 meta = &qp->sq_meta[qp->sq.prod];
1095 wqe = ionic_queue_at_prod(&qp->sq);
1096
1097 ionic_prep_sq_wqe(qp, wqe);
1098
1099 flags = to_ionic_mr_flags(wr->access);
1100
1101 wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, REG_MR);
1102 wqe->base.num_sge_key = wr->key;
1103 wqe->base.imm_data_key = cpu_to_be32(mr->ibmr.lkey);
1104 wqe->reg_mr.va = cpu_to_be64(mr->ibmr.iova);
1105 wqe->reg_mr.length = cpu_to_be64(mr->ibmr.length);
1106 wqe->reg_mr.offset = ionic_pgtbl_off(&mr->buf, mr->ibmr.iova);
1107 dma_addr = ionic_pgtbl_dma(&mr->buf, mr->ibmr.iova);
1108 wqe->reg_mr.dma_addr = cpu_to_be64(le64_to_cpu(dma_addr));
1109
1110 wqe->reg_mr.map_count = cpu_to_be32(mr->buf.tbl_pages);
1111 wqe->reg_mr.flags = cpu_to_be16(flags);
1112 wqe->reg_mr.dir_size_log2 = 0;
1113 wqe->reg_mr.page_size_log2 = order_base_2(mr->ibmr.page_size);
1114
1115 meta->len = 0;
1116 meta->ibop = IB_WC_REG_MR;
1117
1118 ionic_prep_base(qp, &wr->wr, meta, wqe);
1119
1120 return 0;
1121}
1122
1123static int ionic_prep_one_rc(struct ionic_qp *qp,
1124 const struct ib_send_wr *wr)
1125{
1126 struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
1127 int rc = 0;
1128
1129 switch (wr->opcode) {
1130 case IB_WR_SEND:
1131 case IB_WR_SEND_WITH_IMM:
1132 case IB_WR_SEND_WITH_INV:
1133 rc = ionic_prep_send(qp, wr);
1134 break;
1135 case IB_WR_RDMA_READ:
1136 case IB_WR_RDMA_WRITE:
1137 case IB_WR_RDMA_WRITE_WITH_IMM:
1138 rc = ionic_prep_rdma(qp, rdma_wr(wr));
1139 break;
1140 case IB_WR_ATOMIC_CMP_AND_SWP:
1141 case IB_WR_ATOMIC_FETCH_AND_ADD:
1142 rc = ionic_prep_atomic(qp, atomic_wr(wr));
1143 break;
1144 case IB_WR_LOCAL_INV:
1145 rc = ionic_prep_inv(qp, wr);
1146 break;
1147 case IB_WR_REG_MR:
1148 rc = ionic_prep_reg(qp, reg_wr(wr));
1149 break;
1150 default:
1151 ibdev_dbg(&dev->ibdev, "invalid opcode %d\n", wr->opcode);
1152 rc = -EINVAL;
1153 }
1154
1155 return rc;
1156}
1157
1158static int ionic_prep_one_ud(struct ionic_qp *qp,
1159 const struct ib_send_wr *wr)
1160{
1161 struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
1162 int rc = 0;
1163
1164 switch (wr->opcode) {
1165 case IB_WR_SEND:
1166 case IB_WR_SEND_WITH_IMM:
1167 rc = ionic_prep_send_ud(qp, ud_wr(wr));
1168 break;
1169 default:
1170 ibdev_dbg(&dev->ibdev, "invalid opcode %d\n", wr->opcode);
1171 rc = -EINVAL;
1172 }
1173
1174 return rc;
1175}
1176
1177static int ionic_prep_recv(struct ionic_qp *qp,
1178 const struct ib_recv_wr *wr)
1179{
1180 struct ionic_rq_meta *meta;
1181 struct ionic_v1_wqe *wqe;
1182 s64 signed_len;
1183 u32 mval;
1184
1185 wqe = ionic_queue_at_prod(&qp->rq);
1186
1187 /* if wqe is owned by device, caller can try posting again soon */
1188 if (wqe->base.flags & cpu_to_be16(IONIC_V1_FLAG_FENCE))
1189 return -EAGAIN;
1190
1191 meta = qp->rq_meta_head;
1192 if (unlikely(meta == IONIC_META_LAST) ||
1193 unlikely(meta == IONIC_META_POSTED))
1194 return -EIO;
1195
1196 ionic_prep_rq_wqe(qp, wqe);
1197
1198 mval = ionic_v1_recv_wqe_max_sge(qp->rq.stride_log2, qp->rq_spec,
1199 false);
1200 signed_len = ionic_prep_pld(wqe, &wqe->recv.pld,
1201 qp->rq_spec, mval,
1202 wr->sg_list, wr->num_sge);
1203 if (signed_len < 0)
1204 return signed_len;
1205
1206 meta->wrid = wr->wr_id;
1207
1208 wqe->base.wqe_id = meta - qp->rq_meta;
1209 wqe->base.num_sge_key = wr->num_sge;
1210
1211 /* total length for recv goes in base imm_data_key */
1212 wqe->base.imm_data_key = cpu_to_be32(signed_len);
1213
1214 ionic_queue_produce(&qp->rq);
1215
1216 qp->rq_meta_head = meta->next;
1217 meta->next = IONIC_META_POSTED;
1218
1219 return 0;
1220}
1221
1222static int ionic_post_send_common(struct ionic_ibdev *dev,
1223 struct ionic_vcq *vcq,
1224 struct ionic_cq *cq,
1225 struct ionic_qp *qp,
1226 const struct ib_send_wr *wr,
1227 const struct ib_send_wr **bad)
1228{
1229 unsigned long irqflags;
1230 bool notify = false;
1231 int spend, rc = 0;
1232
1233 if (!bad)
1234 return -EINVAL;
1235
1236 if (!qp->has_sq) {
1237 *bad = wr;
1238 return -EINVAL;
1239 }
1240
1241 if (qp->state < IB_QPS_RTS) {
1242 *bad = wr;
1243 return -EINVAL;
1244 }
1245
1246 spin_lock_irqsave(&qp->sq_lock, irqflags);
1247
1248 while (wr) {
1249 if (ionic_queue_full(&qp->sq)) {
1250 ibdev_dbg(&dev->ibdev, "queue full");
1251 rc = -ENOMEM;
1252 goto out;
1253 }
1254
1255 if (qp->ibqp.qp_type == IB_QPT_UD ||
1256 qp->ibqp.qp_type == IB_QPT_GSI)
1257 rc = ionic_prep_one_ud(qp, wr);
1258 else
1259 rc = ionic_prep_one_rc(qp, wr);
1260 if (rc)
1261 goto out;
1262
1263 wr = wr->next;
1264 }
1265
1266out:
1267 spin_unlock_irqrestore(&qp->sq_lock, irqflags);
1268
1269 spin_lock_irqsave(&cq->lock, irqflags);
1270 spin_lock(&qp->sq_lock);
1271
1272 if (likely(qp->sq.prod != qp->sq_old_prod)) {
1273 /* ring cq doorbell just in time */
1274 spend = (qp->sq.prod - qp->sq_old_prod) & qp->sq.mask;
1275 ionic_reserve_cq(dev, cq, spend);
1276
1277 qp->sq_old_prod = qp->sq.prod;
1278
1279 ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.sq_qtype,
1280 ionic_queue_dbell_val(&qp->sq));
1281 }
1282
1283 if (qp->sq_flush) {
1284 notify = true;
1285 cq->flush = true;
1286 list_move_tail(&qp->cq_flush_sq, &cq->flush_sq);
1287 }
1288
1289 spin_unlock(&qp->sq_lock);
1290 spin_unlock_irqrestore(&cq->lock, irqflags);
1291
1292 if (notify && vcq->ibcq.comp_handler)
1293 vcq->ibcq.comp_handler(&vcq->ibcq, vcq->ibcq.cq_context);
1294
1295 *bad = wr;
1296 return rc;
1297}
1298
1299static int ionic_post_recv_common(struct ionic_ibdev *dev,
1300 struct ionic_vcq *vcq,
1301 struct ionic_cq *cq,
1302 struct ionic_qp *qp,
1303 const struct ib_recv_wr *wr,
1304 const struct ib_recv_wr **bad)
1305{
1306 unsigned long irqflags;
1307 bool notify = false;
1308 int spend, rc = 0;
1309
1310 if (!bad)
1311 return -EINVAL;
1312
1313 if (!qp->has_rq) {
1314 *bad = wr;
1315 return -EINVAL;
1316 }
1317
1318 if (qp->state < IB_QPS_INIT) {
1319 *bad = wr;
1320 return -EINVAL;
1321 }
1322
1323 spin_lock_irqsave(&qp->rq_lock, irqflags);
1324
1325 while (wr) {
1326 if (ionic_queue_full(&qp->rq)) {
1327 ibdev_dbg(&dev->ibdev, "queue full");
1328 rc = -ENOMEM;
1329 goto out;
1330 }
1331
1332 rc = ionic_prep_recv(qp, wr);
1333 if (rc)
1334 goto out;
1335
1336 wr = wr->next;
1337 }
1338
1339out:
1340 if (!cq) {
1341 spin_unlock_irqrestore(&qp->rq_lock, irqflags);
1342 goto out_unlocked;
1343 }
1344 spin_unlock_irqrestore(&qp->rq_lock, irqflags);
1345
1346 spin_lock_irqsave(&cq->lock, irqflags);
1347 spin_lock(&qp->rq_lock);
1348
1349 if (likely(qp->rq.prod != qp->rq_old_prod)) {
1350 /* ring cq doorbell just in time */
1351 spend = (qp->rq.prod - qp->rq_old_prod) & qp->rq.mask;
1352 ionic_reserve_cq(dev, cq, spend);
1353
1354 qp->rq_old_prod = qp->rq.prod;
1355
1356 ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.rq_qtype,
1357 ionic_queue_dbell_val(&qp->rq));
1358 }
1359
1360 if (qp->rq_flush) {
1361 notify = true;
1362 cq->flush = true;
1363 list_move_tail(&qp->cq_flush_rq, &cq->flush_rq);
1364 }
1365
1366 spin_unlock(&qp->rq_lock);
1367 spin_unlock_irqrestore(&cq->lock, irqflags);
1368
1369 if (notify && vcq->ibcq.comp_handler)
1370 vcq->ibcq.comp_handler(&vcq->ibcq, vcq->ibcq.cq_context);
1371
1372out_unlocked:
1373 *bad = wr;
1374 return rc;
1375}
1376
1377int ionic_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
1378 const struct ib_send_wr **bad)
1379{
1380 struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
1381 struct ionic_vcq *vcq = to_ionic_vcq(ibqp->send_cq);
1382 struct ionic_qp *qp = to_ionic_qp(ibqp);
1383 struct ionic_cq *cq =
1384 to_ionic_vcq_cq(ibqp->send_cq, qp->udma_idx);
1385
1386 return ionic_post_send_common(dev, vcq, cq, qp, wr, bad);
1387}
1388
1389int ionic_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1390 const struct ib_recv_wr **bad)
1391{
1392 struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
1393 struct ionic_vcq *vcq = to_ionic_vcq(ibqp->recv_cq);
1394 struct ionic_qp *qp = to_ionic_qp(ibqp);
1395 struct ionic_cq *cq =
1396 to_ionic_vcq_cq(ibqp->recv_cq, qp->udma_idx);
1397
1398 return ionic_post_recv_common(dev, vcq, cq, qp, wr, bad);
1399}