Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef DEF_RDMAVT_INCQP_H
2#define DEF_RDMAVT_INCQP_H
3
4/*
5 * Copyright(c) 2016 - 2020 Intel Corporation.
6 *
7 * This file is provided under a dual BSD/GPLv2 license. When using or
8 * redistributing this file, you may do so under either license.
9 *
10 * GPL LICENSE SUMMARY
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * BSD LICENSE
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51#include <rdma/rdma_vt.h>
52#include <rdma/ib_pack.h>
53#include <rdma/ib_verbs.h>
54#include <rdma/rdmavt_cq.h>
55#include <rdma/rvt-abi.h>
56/*
57 * Atomic bit definitions for r_aflags.
58 */
59#define RVT_R_WRID_VALID 0
60#define RVT_R_REWIND_SGE 1
61
62/*
63 * Bit definitions for r_flags.
64 */
65#define RVT_R_REUSE_SGE 0x01
66#define RVT_R_RDMAR_SEQ 0x02
67#define RVT_R_RSP_NAK 0x04
68#define RVT_R_RSP_SEND 0x08
69#define RVT_R_COMM_EST 0x10
70
71/*
72 * If a packet's QP[23:16] bits match this value, then it is
73 * a PSM packet and the hardware will expect a KDETH header
74 * following the BTH.
75 */
76#define RVT_KDETH_QP_PREFIX 0x80
77#define RVT_KDETH_QP_SUFFIX 0xffff
78#define RVT_KDETH_QP_PREFIX_MASK 0x00ff0000
79#define RVT_KDETH_QP_PREFIX_SHIFT 16
80#define RVT_KDETH_QP_BASE (u32)(RVT_KDETH_QP_PREFIX << \
81 RVT_KDETH_QP_PREFIX_SHIFT)
82#define RVT_KDETH_QP_MAX (u32)(RVT_KDETH_QP_BASE + RVT_KDETH_QP_SUFFIX)
83
84/*
85 * If a packet's LNH == BTH and DEST QPN[23:16] in the BTH match this
86 * prefix value, then it is an AIP packet with a DETH containing the entropy
87 * value in byte 4 following the BTH.
88 */
89#define RVT_AIP_QP_PREFIX 0x81
90#define RVT_AIP_QP_SUFFIX 0xffff
91#define RVT_AIP_QP_PREFIX_MASK 0x00ff0000
92#define RVT_AIP_QP_PREFIX_SHIFT 16
93#define RVT_AIP_QP_BASE (u32)(RVT_AIP_QP_PREFIX << \
94 RVT_AIP_QP_PREFIX_SHIFT)
95#define RVT_AIP_QPN_MAX BIT(RVT_AIP_QP_PREFIX_SHIFT)
96#define RVT_AIP_QP_MAX (u32)(RVT_AIP_QP_BASE + RVT_AIP_QPN_MAX - 1)
97
98/*
99 * Bit definitions for s_flags.
100 *
101 * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
102 * RVT_S_BUSY - send tasklet is processing the QP
103 * RVT_S_TIMER - the RC retry timer is active
104 * RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
105 * RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
106 * before processing the next SWQE
107 * RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
108 * before processing the next SWQE
109 * RVT_S_WAIT_RNR - waiting for RNR timeout
110 * RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
111 * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating
112 * next send completion entry not via send DMA
113 * RVT_S_WAIT_PIO - waiting for a send buffer to be available
114 * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available
115 * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
116 * RVT_S_WAIT_KMEM - waiting for kernel memory to be available
117 * RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
118 * RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests
119 * RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK
120 * RVT_S_ECN - a BECN was queued to the send engine
121 * RVT_S_MAX_BIT_MASK - The max bit that can be used by rdmavt
122 */
123#define RVT_S_SIGNAL_REQ_WR 0x0001
124#define RVT_S_BUSY 0x0002
125#define RVT_S_TIMER 0x0004
126#define RVT_S_RESP_PENDING 0x0008
127#define RVT_S_ACK_PENDING 0x0010
128#define RVT_S_WAIT_FENCE 0x0020
129#define RVT_S_WAIT_RDMAR 0x0040
130#define RVT_S_WAIT_RNR 0x0080
131#define RVT_S_WAIT_SSN_CREDIT 0x0100
132#define RVT_S_WAIT_DMA 0x0200
133#define RVT_S_WAIT_PIO 0x0400
134#define RVT_S_WAIT_TX 0x0800
135#define RVT_S_WAIT_DMA_DESC 0x1000
136#define RVT_S_WAIT_KMEM 0x2000
137#define RVT_S_WAIT_PSN 0x4000
138#define RVT_S_WAIT_ACK 0x8000
139#define RVT_S_SEND_ONE 0x10000
140#define RVT_S_UNLIMITED_CREDIT 0x20000
141#define RVT_S_ECN 0x40000
142#define RVT_S_MAX_BIT_MASK 0x800000
143
144/*
145 * Drivers should use s_flags starting with bit 31 down to the bit next to
146 * RVT_S_MAX_BIT_MASK
147 */
148
149/*
150 * Wait flags that would prevent any packet type from being sent.
151 */
152#define RVT_S_ANY_WAIT_IO \
153 (RVT_S_WAIT_PIO | RVT_S_WAIT_TX | \
154 RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
155
156/*
157 * Wait flags that would prevent send work requests from making progress.
158 */
159#define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \
160 RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \
161 RVT_S_WAIT_PSN | RVT_S_WAIT_ACK)
162
163#define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
164
165/* Number of bits to pay attention to in the opcode for checking qp type */
166#define RVT_OPCODE_QP_MASK 0xE0
167
168/* Flags for checking QP state (see ib_rvt_state_ops[]) */
169#define RVT_POST_SEND_OK 0x01
170#define RVT_POST_RECV_OK 0x02
171#define RVT_PROCESS_RECV_OK 0x04
172#define RVT_PROCESS_SEND_OK 0x08
173#define RVT_PROCESS_NEXT_SEND_OK 0x10
174#define RVT_FLUSH_SEND 0x20
175#define RVT_FLUSH_RECV 0x40
176#define RVT_PROCESS_OR_FLUSH_SEND \
177 (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND)
178#define RVT_SEND_OR_FLUSH_OR_RECV_OK \
179 (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND | RVT_PROCESS_RECV_OK)
180
181/*
182 * Internal send flags
183 */
184#define RVT_SEND_RESERVE_USED IB_SEND_RESERVED_START
185#define RVT_SEND_COMPLETION_ONLY (IB_SEND_RESERVED_START << 1)
186
187/**
188 * rvt_ud_wr - IB UD work plus AH cache
189 * @wr: valid IB work request
190 * @attr: pointer to an allocated AH attribute
191 *
192 * Special case the UD WR so we can keep track of the AH attributes.
193 *
194 * NOTE: This data structure is stricly ordered wr then attr. I.e the attr
195 * MUST come after wr. The ib_ud_wr is sized and copied in rvt_post_one_wr.
196 * The copy assumes that wr is first.
197 */
198struct rvt_ud_wr {
199 struct ib_ud_wr wr;
200 struct rdma_ah_attr *attr;
201};
202
203/*
204 * Send work request queue entry.
205 * The size of the sg_list is determined when the QP is created and stored
206 * in qp->s_max_sge.
207 */
208struct rvt_swqe {
209 union {
210 struct ib_send_wr wr; /* don't use wr.sg_list */
211 struct rvt_ud_wr ud_wr;
212 struct ib_reg_wr reg_wr;
213 struct ib_rdma_wr rdma_wr;
214 struct ib_atomic_wr atomic_wr;
215 };
216 u32 psn; /* first packet sequence number */
217 u32 lpsn; /* last packet sequence number */
218 u32 ssn; /* send sequence number */
219 u32 length; /* total length of data in sg_list */
220 void *priv; /* driver dependent field */
221 struct rvt_sge sg_list[];
222};
223
224/**
225 * struct rvt_krwq - kernel struct receive work request
226 * @p_lock: lock to protect producer of the kernel buffer
227 * @head: index of next entry to fill
228 * @c_lock:lock to protect consumer of the kernel buffer
229 * @tail: index of next entry to pull
230 * @count: count is aproximate of total receive enteries posted
231 * @rvt_rwqe: struct of receive work request queue entry
232 *
233 * This structure is used to contain the head pointer,
234 * tail pointer and receive work queue entries for kernel
235 * mode user.
236 */
237struct rvt_krwq {
238 spinlock_t p_lock; /* protect producer */
239 u32 head; /* new work requests posted to the head */
240
241 /* protect consumer */
242 spinlock_t c_lock ____cacheline_aligned_in_smp;
243 u32 tail; /* receives pull requests from here. */
244 u32 count; /* approx count of receive entries posted */
245 struct rvt_rwqe *curr_wq;
246 struct rvt_rwqe wq[];
247};
248
249/*
250 * rvt_get_swqe_ah - Return the pointer to the struct rvt_ah
251 * @swqe: valid Send WQE
252 *
253 */
254static inline struct rvt_ah *rvt_get_swqe_ah(struct rvt_swqe *swqe)
255{
256 return ibah_to_rvtah(swqe->ud_wr.wr.ah);
257}
258
259/**
260 * rvt_get_swqe_ah_attr - Return the cached ah attribute information
261 * @swqe: valid Send WQE
262 *
263 */
264static inline struct rdma_ah_attr *rvt_get_swqe_ah_attr(struct rvt_swqe *swqe)
265{
266 return swqe->ud_wr.attr;
267}
268
269/**
270 * rvt_get_swqe_remote_qpn - Access the remote QPN value
271 * @swqe: valid Send WQE
272 *
273 */
274static inline u32 rvt_get_swqe_remote_qpn(struct rvt_swqe *swqe)
275{
276 return swqe->ud_wr.wr.remote_qpn;
277}
278
279/**
280 * rvt_get_swqe_remote_qkey - Acces the remote qkey value
281 * @swqe: valid Send WQE
282 *
283 */
284static inline u32 rvt_get_swqe_remote_qkey(struct rvt_swqe *swqe)
285{
286 return swqe->ud_wr.wr.remote_qkey;
287}
288
289/**
290 * rvt_get_swqe_pkey_index - Access the pkey index
291 * @swqe: valid Send WQE
292 *
293 */
294static inline u16 rvt_get_swqe_pkey_index(struct rvt_swqe *swqe)
295{
296 return swqe->ud_wr.wr.pkey_index;
297}
298
299struct rvt_rq {
300 struct rvt_rwq *wq;
301 struct rvt_krwq *kwq;
302 u32 size; /* size of RWQE array */
303 u8 max_sge;
304 /* protect changes in this struct */
305 spinlock_t lock ____cacheline_aligned_in_smp;
306};
307
308/*
309 * This structure holds the information that the send tasklet needs
310 * to send a RDMA read response or atomic operation.
311 */
312struct rvt_ack_entry {
313 struct rvt_sge rdma_sge;
314 u64 atomic_data;
315 u32 psn;
316 u32 lpsn;
317 u8 opcode;
318 u8 sent;
319 void *priv;
320};
321
322#define RC_QP_SCALING_INTERVAL 5
323
324#define RVT_OPERATION_PRIV 0x00000001
325#define RVT_OPERATION_ATOMIC 0x00000002
326#define RVT_OPERATION_ATOMIC_SGE 0x00000004
327#define RVT_OPERATION_LOCAL 0x00000008
328#define RVT_OPERATION_USE_RESERVE 0x00000010
329#define RVT_OPERATION_IGN_RNR_CNT 0x00000020
330
331#define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
332
333/**
334 * rvt_operation_params - op table entry
335 * @length - the length to copy into the swqe entry
336 * @qpt_support - a bit mask indicating QP type support
337 * @flags - RVT_OPERATION flags (see above)
338 *
339 * This supports table driven post send so that
340 * the driver can have differing an potentially
341 * different sets of operations.
342 *
343 **/
344
345struct rvt_operation_params {
346 size_t length;
347 u32 qpt_support;
348 u32 flags;
349};
350
351/*
352 * Common variables are protected by both r_rq.lock and s_lock in that order
353 * which only happens in modify_qp() or changing the QP 'state'.
354 */
355struct rvt_qp {
356 struct ib_qp ibqp;
357 void *priv; /* Driver private data */
358 /* read mostly fields above and below */
359 struct rdma_ah_attr remote_ah_attr;
360 struct rdma_ah_attr alt_ah_attr;
361 struct rvt_qp __rcu *next; /* link list for QPN hash table */
362 struct rvt_swqe *s_wq; /* send work queue */
363 struct rvt_mmap_info *ip;
364
365 unsigned long timeout_jiffies; /* computed from timeout */
366
367 int srate_mbps; /* s_srate (below) converted to Mbit/s */
368 pid_t pid; /* pid for user mode QPs */
369 u32 remote_qpn;
370 u32 qkey; /* QKEY for this QP (for UD or RD) */
371 u32 s_size; /* send work queue size */
372
373 u16 pmtu; /* decoded from path_mtu */
374 u8 log_pmtu; /* shift for pmtu */
375 u8 state; /* QP state */
376 u8 allowed_ops; /* high order bits of allowed opcodes */
377 u8 qp_access_flags;
378 u8 alt_timeout; /* Alternate path timeout for this QP */
379 u8 timeout; /* Timeout for this QP */
380 u8 s_srate;
381 u8 s_mig_state;
382 u8 port_num;
383 u8 s_pkey_index; /* PKEY index to use */
384 u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
385 u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
386 u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
387 u8 s_retry_cnt; /* number of times to retry */
388 u8 s_rnr_retry_cnt;
389 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
390 u8 s_max_sge; /* size of s_wq->sg_list */
391 u8 s_draining;
392
393 /* start of read/write fields */
394 atomic_t refcount ____cacheline_aligned_in_smp;
395 wait_queue_head_t wait;
396
397 struct rvt_ack_entry *s_ack_queue;
398 struct rvt_sge_state s_rdma_read_sge;
399
400 spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */
401 u32 r_psn; /* expected rcv packet sequence number */
402 unsigned long r_aflags;
403 u64 r_wr_id; /* ID for current receive WQE */
404 u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
405 u32 r_len; /* total length of r_sge */
406 u32 r_rcv_len; /* receive data len processed */
407 u32 r_msn; /* message sequence number */
408
409 u8 r_state; /* opcode of last packet received */
410 u8 r_flags;
411 u8 r_head_ack_queue; /* index into s_ack_queue[] */
412 u8 r_adefered; /* defered ack count */
413
414 struct list_head rspwait; /* link for waiting to respond */
415
416 struct rvt_sge_state r_sge; /* current receive data */
417 struct rvt_rq r_rq; /* receive work queue */
418
419 /* post send line */
420 spinlock_t s_hlock ____cacheline_aligned_in_smp;
421 u32 s_head; /* new entries added here */
422 u32 s_next_psn; /* PSN for next request */
423 u32 s_avail; /* number of entries avail */
424 u32 s_ssn; /* SSN of tail entry */
425 atomic_t s_reserved_used; /* reserved entries in use */
426
427 spinlock_t s_lock ____cacheline_aligned_in_smp;
428 u32 s_flags;
429 struct rvt_sge_state *s_cur_sge;
430 struct rvt_swqe *s_wqe;
431 struct rvt_sge_state s_sge; /* current send request data */
432 struct rvt_mregion *s_rdma_mr;
433 u32 s_len; /* total length of s_sge */
434 u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
435 u32 s_last_psn; /* last response PSN processed */
436 u32 s_sending_psn; /* lowest PSN that is being sent */
437 u32 s_sending_hpsn; /* highest PSN that is being sent */
438 u32 s_psn; /* current packet sequence number */
439 u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
440 u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
441 u32 s_tail; /* next entry to process */
442 u32 s_cur; /* current work queue entry */
443 u32 s_acked; /* last un-ACK'ed entry */
444 u32 s_last; /* last completed entry */
445 u32 s_lsn; /* limit sequence number (credit) */
446 u32 s_ahgpsn; /* set to the psn in the copy of the header */
447 u16 s_cur_size; /* size of send packet in bytes */
448 u16 s_rdma_ack_cnt;
449 u8 s_hdrwords; /* size of s_hdr in 32 bit words */
450 s8 s_ahgidx;
451 u8 s_state; /* opcode of last packet sent */
452 u8 s_ack_state; /* opcode of packet to ACK */
453 u8 s_nak_state; /* non-zero if NAK is pending */
454 u8 r_nak_state; /* non-zero if NAK is pending */
455 u8 s_retry; /* requester retry counter */
456 u8 s_rnr_retry; /* requester RNR retry counter */
457 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
458 u8 s_tail_ack_queue; /* index into s_ack_queue[] */
459 u8 s_acked_ack_queue; /* index into s_ack_queue[] */
460
461 struct rvt_sge_state s_ack_rdma_sge;
462 struct timer_list s_timer;
463 struct hrtimer s_rnr_timer;
464
465 atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */
466
467 /*
468 * This sge list MUST be last. Do not add anything below here.
469 */
470 struct rvt_sge r_sg_list[] /* verified SGEs */
471 ____cacheline_aligned_in_smp;
472};
473
474struct rvt_srq {
475 struct ib_srq ibsrq;
476 struct rvt_rq rq;
477 struct rvt_mmap_info *ip;
478 /* send signal when number of RWQEs < limit */
479 u32 limit;
480};
481
482static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq)
483{
484 return container_of(ibsrq, struct rvt_srq, ibsrq);
485}
486
487static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp)
488{
489 return container_of(ibqp, struct rvt_qp, ibqp);
490}
491
492#define RVT_QPN_MAX BIT(24)
493#define RVT_QPNMAP_ENTRIES (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
494#define RVT_BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE)
495#define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE - 1)
496#define RVT_QPN_MASK IB_QPN_MASK
497
498/*
499 * QPN-map pages start out as NULL, they get allocated upon
500 * first use and are never deallocated. This way,
501 * large bitmaps are not allocated unless large numbers of QPs are used.
502 */
503struct rvt_qpn_map {
504 void *page;
505};
506
507struct rvt_qpn_table {
508 spinlock_t lock; /* protect changes to the qp table */
509 unsigned flags; /* flags for QP0/1 allocated for each port */
510 u32 last; /* last QP number allocated */
511 u32 nmaps; /* size of the map table */
512 u16 limit;
513 u8 incr;
514 /* bit map of free QP numbers other than 0/1 */
515 struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES];
516};
517
518struct rvt_qp_ibdev {
519 u32 qp_table_size;
520 u32 qp_table_bits;
521 struct rvt_qp __rcu **qp_table;
522 spinlock_t qpt_lock; /* qptable lock */
523 struct rvt_qpn_table qpn_table;
524};
525
526/*
527 * There is one struct rvt_mcast for each multicast GID.
528 * All attached QPs are then stored as a list of
529 * struct rvt_mcast_qp.
530 */
531struct rvt_mcast_qp {
532 struct list_head list;
533 struct rvt_qp *qp;
534};
535
536struct rvt_mcast_addr {
537 union ib_gid mgid;
538 u16 lid;
539};
540
541struct rvt_mcast {
542 struct rb_node rb_node;
543 struct rvt_mcast_addr mcast_addr;
544 struct list_head qp_list;
545 wait_queue_head_t wait;
546 atomic_t refcount;
547 int n_attached;
548};
549
550/*
551 * Since struct rvt_swqe is not a fixed size, we can't simply index into
552 * struct rvt_qp.s_wq. This function does the array index computation.
553 */
554static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp,
555 unsigned n)
556{
557 return (struct rvt_swqe *)((char *)qp->s_wq +
558 (sizeof(struct rvt_swqe) +
559 qp->s_max_sge *
560 sizeof(struct rvt_sge)) * n);
561}
562
563/*
564 * Since struct rvt_rwqe is not a fixed size, we can't simply index into
565 * struct rvt_rwq.wq. This function does the array index computation.
566 */
567static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
568{
569 return (struct rvt_rwqe *)
570 ((char *)rq->kwq->curr_wq +
571 (sizeof(struct rvt_rwqe) +
572 rq->max_sge * sizeof(struct ib_sge)) * n);
573}
574
575/**
576 * rvt_is_user_qp - return if this is user mode QP
577 * @qp - the target QP
578 */
579static inline bool rvt_is_user_qp(struct rvt_qp *qp)
580{
581 return !!qp->pid;
582}
583
584/**
585 * rvt_get_qp - get a QP reference
586 * @qp - the QP to hold
587 */
588static inline void rvt_get_qp(struct rvt_qp *qp)
589{
590 atomic_inc(&qp->refcount);
591}
592
593/**
594 * rvt_put_qp - release a QP reference
595 * @qp - the QP to release
596 */
597static inline void rvt_put_qp(struct rvt_qp *qp)
598{
599 if (qp && atomic_dec_and_test(&qp->refcount))
600 wake_up(&qp->wait);
601}
602
603/**
604 * rvt_put_swqe - drop mr refs held by swqe
605 * @wqe - the send wqe
606 *
607 * This drops any mr references held by the swqe
608 */
609static inline void rvt_put_swqe(struct rvt_swqe *wqe)
610{
611 int i;
612
613 for (i = 0; i < wqe->wr.num_sge; i++) {
614 struct rvt_sge *sge = &wqe->sg_list[i];
615
616 rvt_put_mr(sge->mr);
617 }
618}
619
620/**
621 * rvt_qp_wqe_reserve - reserve operation
622 * @qp - the rvt qp
623 * @wqe - the send wqe
624 *
625 * This routine used in post send to record
626 * a wqe relative reserved operation use.
627 */
628static inline void rvt_qp_wqe_reserve(
629 struct rvt_qp *qp,
630 struct rvt_swqe *wqe)
631{
632 atomic_inc(&qp->s_reserved_used);
633}
634
635/**
636 * rvt_qp_wqe_unreserve - clean reserved operation
637 * @qp - the rvt qp
638 * @flags - send wqe flags
639 *
640 * This decrements the reserve use count.
641 *
642 * This call MUST precede the change to
643 * s_last to insure that post send sees a stable
644 * s_avail.
645 *
646 * An smp_mp__after_atomic() is used to insure
647 * the compiler does not juggle the order of the s_last
648 * ring index and the decrementing of s_reserved_used.
649 */
650static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags)
651{
652 if (unlikely(flags & RVT_SEND_RESERVE_USED)) {
653 atomic_dec(&qp->s_reserved_used);
654 /* insure no compiler re-order up to s_last change */
655 smp_mb__after_atomic();
656 }
657}
658
659extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
660
661/*
662 * Compare the lower 24 bits of the msn values.
663 * Returns an integer <, ==, or > than zero.
664 */
665static inline int rvt_cmp_msn(u32 a, u32 b)
666{
667 return (((int)a) - ((int)b)) << 8;
668}
669
670__be32 rvt_compute_aeth(struct rvt_qp *qp);
671
672void rvt_get_credit(struct rvt_qp *qp, u32 aeth);
673
674u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len);
675
676/**
677 * rvt_div_round_up_mtu - round up divide
678 * @qp - the qp pair
679 * @len - the length
680 *
681 * Perform a shift based mtu round up divide
682 */
683static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len)
684{
685 return (len + qp->pmtu - 1) >> qp->log_pmtu;
686}
687
688/**
689 * @qp - the qp pair
690 * @len - the length
691 *
692 * Perform a shift based mtu divide
693 */
694static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
695{
696 return len >> qp->log_pmtu;
697}
698
699/**
700 * rvt_timeout_to_jiffies - Convert a ULP timeout input into jiffies
701 * @timeout - timeout input(0 - 31).
702 *
703 * Return a timeout value in jiffies.
704 */
705static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
706{
707 if (timeout > 31)
708 timeout = 31;
709
710 return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL;
711}
712
713/**
714 * rvt_lookup_qpn - return the QP with the given QPN
715 * @ibp: the ibport
716 * @qpn: the QP number to look up
717 *
718 * The caller must hold the rcu_read_lock(), and keep the lock until
719 * the returned qp is no longer in use.
720 */
721static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
722 struct rvt_ibport *rvp,
723 u32 qpn) __must_hold(RCU)
724{
725 struct rvt_qp *qp = NULL;
726
727 if (unlikely(qpn <= 1)) {
728 qp = rcu_dereference(rvp->qp[qpn]);
729 } else {
730 u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits);
731
732 for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp;
733 qp = rcu_dereference(qp->next))
734 if (qp->ibqp.qp_num == qpn)
735 break;
736 }
737 return qp;
738}
739
740/**
741 * rvt_mod_retry_timer - mod a retry timer
742 * @qp - the QP
743 * @shift - timeout shift to wait for multiple packets
744 * Modify a potentially already running retry timer
745 */
746static inline void rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift)
747{
748 struct ib_qp *ibqp = &qp->ibqp;
749 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
750
751 lockdep_assert_held(&qp->s_lock);
752 qp->s_flags |= RVT_S_TIMER;
753 /* 4.096 usec. * (1 << qp->timeout) */
754 mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies +
755 (qp->timeout_jiffies << shift));
756}
757
758static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
759{
760 return rvt_mod_retry_timer_ext(qp, 0);
761}
762
763/**
764 * rvt_put_qp_swqe - drop refs held by swqe
765 * @qp: the send qp
766 * @wqe: the send wqe
767 *
768 * This drops any references held by the swqe
769 */
770static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
771{
772 rvt_put_swqe(wqe);
773 if (qp->allowed_ops == IB_OPCODE_UD)
774 rdma_destroy_ah_attr(wqe->ud_wr.attr);
775}
776
777/**
778 * rvt_qp_sqwe_incr - increment ring index
779 * @qp: the qp
780 * @val: the starting value
781 *
782 * Return: the new value wrapping as appropriate
783 */
784static inline u32
785rvt_qp_swqe_incr(struct rvt_qp *qp, u32 val)
786{
787 if (++val >= qp->s_size)
788 val = 0;
789 return val;
790}
791
792int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
793
794/**
795 * rvt_recv_cq - add a new entry to completion queue
796 * by receive queue
797 * @qp: receive queue
798 * @wc: work completion entry to add
799 * @solicited: true if @entry is solicited
800 *
801 * This is wrapper function for rvt_enter_cq function call by
802 * receive queue. If rvt_cq_enter return false, it means cq is
803 * full and the qp is put into error state.
804 */
805static inline void rvt_recv_cq(struct rvt_qp *qp, struct ib_wc *wc,
806 bool solicited)
807{
808 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.recv_cq);
809
810 if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
811 rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
812}
813
814/**
815 * rvt_send_cq - add a new entry to completion queue
816 * by send queue
817 * @qp: send queue
818 * @wc: work completion entry to add
819 * @solicited: true if @entry is solicited
820 *
821 * This is wrapper function for rvt_enter_cq function call by
822 * send queue. If rvt_cq_enter return false, it means cq is
823 * full and the qp is put into error state.
824 */
825static inline void rvt_send_cq(struct rvt_qp *qp, struct ib_wc *wc,
826 bool solicited)
827{
828 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq);
829
830 if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
831 rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
832}
833
834/**
835 * rvt_qp_complete_swqe - insert send completion
836 * @qp - the qp
837 * @wqe - the send wqe
838 * @opcode - wc operation (driver dependent)
839 * @status - completion status
840 *
841 * Update the s_last information, and then insert a send
842 * completion into the completion
843 * queue if the qp indicates it should be done.
844 *
845 * See IBTA 10.7.3.1 for info on completion
846 * control.
847 *
848 * Return: new last
849 */
850static inline u32
851rvt_qp_complete_swqe(struct rvt_qp *qp,
852 struct rvt_swqe *wqe,
853 enum ib_wc_opcode opcode,
854 enum ib_wc_status status)
855{
856 bool need_completion;
857 u64 wr_id;
858 u32 byte_len, last;
859 int flags = wqe->wr.send_flags;
860
861 rvt_qp_wqe_unreserve(qp, flags);
862 rvt_put_qp_swqe(qp, wqe);
863
864 need_completion =
865 !(flags & RVT_SEND_RESERVE_USED) &&
866 (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
867 (flags & IB_SEND_SIGNALED) ||
868 status != IB_WC_SUCCESS);
869 if (need_completion) {
870 wr_id = wqe->wr.wr_id;
871 byte_len = wqe->length;
872 /* above fields required before writing s_last */
873 }
874 last = rvt_qp_swqe_incr(qp, qp->s_last);
875 /* see rvt_qp_is_avail() */
876 smp_store_release(&qp->s_last, last);
877 if (need_completion) {
878 struct ib_wc w = {
879 .wr_id = wr_id,
880 .status = status,
881 .opcode = opcode,
882 .qp = &qp->ibqp,
883 .byte_len = byte_len,
884 };
885 rvt_send_cq(qp, &w, status != IB_WC_SUCCESS);
886 }
887 return last;
888}
889
890extern const int ib_rvt_state_ops[];
891
892struct rvt_dev_info;
893int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only);
894void rvt_comm_est(struct rvt_qp *qp);
895void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
896unsigned long rvt_rnr_tbl_to_usec(u32 index);
897enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t);
898void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth);
899void rvt_del_timers_sync(struct rvt_qp *qp);
900void rvt_stop_rc_timers(struct rvt_qp *qp);
901void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift);
902static inline void rvt_add_retry_timer(struct rvt_qp *qp)
903{
904 rvt_add_retry_timer_ext(qp, 0);
905}
906
907void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
908 void *data, u32 length,
909 bool release, bool copy_last);
910void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
911 enum ib_wc_status status);
912void rvt_ruc_loopback(struct rvt_qp *qp);
913
914/**
915 * struct rvt_qp_iter - the iterator for QPs
916 * @qp - the current QP
917 *
918 * This structure defines the current iterator
919 * state for sequenced access to all QPs relative
920 * to an rvt_dev_info.
921 */
922struct rvt_qp_iter {
923 struct rvt_qp *qp;
924 /* private: backpointer */
925 struct rvt_dev_info *rdi;
926 /* private: callback routine */
927 void (*cb)(struct rvt_qp *qp, u64 v);
928 /* private: for arg to callback routine */
929 u64 v;
930 /* private: number of SMI,GSI QPs for device */
931 int specials;
932 /* private: current iterator index */
933 int n;
934};
935
936/**
937 * ib_cq_tail - Return tail index of cq buffer
938 * @send_cq - The cq for send
939 *
940 * This is called in qp_iter_print to get tail
941 * of cq buffer.
942 */
943static inline u32 ib_cq_tail(struct ib_cq *send_cq)
944{
945 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
946
947 return ibcq_to_rvtcq(send_cq)->ip ?
948 RDMA_READ_UAPI_ATOMIC(cq->queue->tail) :
949 ibcq_to_rvtcq(send_cq)->kqueue->tail;
950}
951
952/**
953 * ib_cq_head - Return head index of cq buffer
954 * @send_cq - The cq for send
955 *
956 * This is called in qp_iter_print to get head
957 * of cq buffer.
958 */
959static inline u32 ib_cq_head(struct ib_cq *send_cq)
960{
961 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
962
963 return ibcq_to_rvtcq(send_cq)->ip ?
964 RDMA_READ_UAPI_ATOMIC(cq->queue->head) :
965 ibcq_to_rvtcq(send_cq)->kqueue->head;
966}
967
968/**
969 * rvt_free_rq - free memory allocated for rvt_rq struct
970 * @rvt_rq: request queue data structure
971 *
972 * This function should only be called if the rvt_mmap_info()
973 * has not succeeded.
974 */
975static inline void rvt_free_rq(struct rvt_rq *rq)
976{
977 kvfree(rq->kwq);
978 rq->kwq = NULL;
979 vfree(rq->wq);
980 rq->wq = NULL;
981}
982
983/**
984 * rvt_to_iport - Get the ibport pointer
985 * @qp: the qp pointer
986 *
987 * This function returns the ibport pointer from the qp pointer.
988 */
989static inline struct rvt_ibport *rvt_to_iport(struct rvt_qp *qp)
990{
991 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
992
993 return rdi->ports[qp->port_num - 1];
994}
995
996/**
997 * rvt_rc_credit_avail - Check if there are enough RC credits for the request
998 * @qp: the qp
999 * @wqe: the request
1000 *
1001 * This function returns false when there are not enough credits for the given
1002 * request and true otherwise.
1003 */
1004static inline bool rvt_rc_credit_avail(struct rvt_qp *qp, struct rvt_swqe *wqe)
1005{
1006 lockdep_assert_held(&qp->s_lock);
1007 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
1008 rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
1009 struct rvt_ibport *rvp = rvt_to_iport(qp);
1010
1011 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
1012 rvp->n_rc_crwaits++;
1013 return false;
1014 }
1015 return true;
1016}
1017
1018struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
1019 u64 v,
1020 void (*cb)(struct rvt_qp *qp, u64 v));
1021int rvt_qp_iter_next(struct rvt_qp_iter *iter);
1022void rvt_qp_iter(struct rvt_dev_info *rdi,
1023 u64 v,
1024 void (*cb)(struct rvt_qp *qp, u64 v));
1025void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey);
1026#endif /* DEF_RDMAVT_INCQP_H */