]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/rdma/rdmavt_qp.h
Merge tag 'asoc-fix-v4.13-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/broon...
[mirror_ubuntu-artful-kernel.git] / include / rdma / rdmavt_qp.h
1 #ifndef DEF_RDMAVT_INCQP_H
2 #define DEF_RDMAVT_INCQP_H
3
4 /*
5 * Copyright(c) 2016, 2017 Intel Corporation.
6 *
7 * This file is provided under a dual BSD/GPLv2 license. When using or
8 * redistributing this file, you may do so under either license.
9 *
10 * GPL LICENSE SUMMARY
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * BSD LICENSE
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51 #include <rdma/rdma_vt.h>
52 #include <rdma/ib_pack.h>
53 #include <rdma/ib_verbs.h>
54 #include <rdma/rdmavt_cq.h>
55 /*
56 * Atomic bit definitions for r_aflags.
57 */
58 #define RVT_R_WRID_VALID 0
59 #define RVT_R_REWIND_SGE 1
60
61 /*
62 * Bit definitions for r_flags.
63 */
64 #define RVT_R_REUSE_SGE 0x01
65 #define RVT_R_RDMAR_SEQ 0x02
66 #define RVT_R_RSP_NAK 0x04
67 #define RVT_R_RSP_SEND 0x08
68 #define RVT_R_COMM_EST 0x10
69
70 /*
71 * Bit definitions for s_flags.
72 *
73 * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
74 * RVT_S_BUSY - send tasklet is processing the QP
75 * RVT_S_TIMER - the RC retry timer is active
76 * RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
77 * RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
78 * before processing the next SWQE
79 * RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
80 * before processing the next SWQE
81 * RVT_S_WAIT_RNR - waiting for RNR timeout
82 * RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
83 * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating
84 * next send completion entry not via send DMA
85 * RVT_S_WAIT_PIO - waiting for a send buffer to be available
86 * RVT_S_WAIT_PIO_DRAIN - waiting for a qp to drain pio packets
87 * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available
88 * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
89 * RVT_S_WAIT_KMEM - waiting for kernel memory to be available
90 * RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
91 * RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests
92 * RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK
93 * RVT_S_ECN - a BECN was queued to the send engine
94 */
95 #define RVT_S_SIGNAL_REQ_WR 0x0001
96 #define RVT_S_BUSY 0x0002
97 #define RVT_S_TIMER 0x0004
98 #define RVT_S_RESP_PENDING 0x0008
99 #define RVT_S_ACK_PENDING 0x0010
100 #define RVT_S_WAIT_FENCE 0x0020
101 #define RVT_S_WAIT_RDMAR 0x0040
102 #define RVT_S_WAIT_RNR 0x0080
103 #define RVT_S_WAIT_SSN_CREDIT 0x0100
104 #define RVT_S_WAIT_DMA 0x0200
105 #define RVT_S_WAIT_PIO 0x0400
106 #define RVT_S_WAIT_PIO_DRAIN 0x0800
107 #define RVT_S_WAIT_TX 0x1000
108 #define RVT_S_WAIT_DMA_DESC 0x2000
109 #define RVT_S_WAIT_KMEM 0x4000
110 #define RVT_S_WAIT_PSN 0x8000
111 #define RVT_S_WAIT_ACK 0x10000
112 #define RVT_S_SEND_ONE 0x20000
113 #define RVT_S_UNLIMITED_CREDIT 0x40000
114 #define RVT_S_AHG_VALID 0x80000
115 #define RVT_S_AHG_CLEAR 0x100000
116 #define RVT_S_ECN 0x200000
117
118 /*
119 * Wait flags that would prevent any packet type from being sent.
120 */
121 #define RVT_S_ANY_WAIT_IO \
122 (RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN | RVT_S_WAIT_TX | \
123 RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
124
125 /*
126 * Wait flags that would prevent send work requests from making progress.
127 */
128 #define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \
129 RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \
130 RVT_S_WAIT_PSN | RVT_S_WAIT_ACK)
131
132 #define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
133
134 /* Number of bits to pay attention to in the opcode for checking qp type */
135 #define RVT_OPCODE_QP_MASK 0xE0
136
137 /* Flags for checking QP state (see ib_rvt_state_ops[]) */
138 #define RVT_POST_SEND_OK 0x01
139 #define RVT_POST_RECV_OK 0x02
140 #define RVT_PROCESS_RECV_OK 0x04
141 #define RVT_PROCESS_SEND_OK 0x08
142 #define RVT_PROCESS_NEXT_SEND_OK 0x10
143 #define RVT_FLUSH_SEND 0x20
144 #define RVT_FLUSH_RECV 0x40
145 #define RVT_PROCESS_OR_FLUSH_SEND \
146 (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND)
147 #define RVT_SEND_OR_FLUSH_OR_RECV_OK \
148 (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND | RVT_PROCESS_RECV_OK)
149
150 /*
151 * Internal send flags
152 */
153 #define RVT_SEND_RESERVE_USED IB_SEND_RESERVED_START
154 #define RVT_SEND_COMPLETION_ONLY (IB_SEND_RESERVED_START << 1)
155
156 /*
157 * Send work request queue entry.
158 * The size of the sg_list is determined when the QP is created and stored
159 * in qp->s_max_sge.
160 */
161 struct rvt_swqe {
162 union {
163 struct ib_send_wr wr; /* don't use wr.sg_list */
164 struct ib_ud_wr ud_wr;
165 struct ib_reg_wr reg_wr;
166 struct ib_rdma_wr rdma_wr;
167 struct ib_atomic_wr atomic_wr;
168 };
169 u32 psn; /* first packet sequence number */
170 u32 lpsn; /* last packet sequence number */
171 u32 ssn; /* send sequence number */
172 u32 length; /* total length of data in sg_list */
173 struct rvt_sge sg_list[0];
174 };
175
176 /*
177 * Receive work request queue entry.
178 * The size of the sg_list is determined when the QP (or SRQ) is created
179 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
180 */
181 struct rvt_rwqe {
182 u64 wr_id;
183 u8 num_sge;
184 struct ib_sge sg_list[0];
185 };
186
187 /*
188 * This structure is used to contain the head pointer, tail pointer,
189 * and receive work queue entries as a single memory allocation so
190 * it can be mmap'ed into user space.
191 * Note that the wq array elements are variable size so you can't
192 * just index into the array to get the N'th element;
193 * use get_rwqe_ptr() instead.
194 */
195 struct rvt_rwq {
196 u32 head; /* new work requests posted to the head */
197 u32 tail; /* receives pull requests from here. */
198 struct rvt_rwqe wq[0];
199 };
200
201 struct rvt_rq {
202 struct rvt_rwq *wq;
203 u32 size; /* size of RWQE array */
204 u8 max_sge;
205 /* protect changes in this struct */
206 spinlock_t lock ____cacheline_aligned_in_smp;
207 };
208
209 /*
210 * This structure is used by rvt_mmap() to validate an offset
211 * when an mmap() request is made. The vm_area_struct then uses
212 * this as its vm_private_data.
213 */
214 struct rvt_mmap_info {
215 struct list_head pending_mmaps;
216 struct ib_ucontext *context;
217 void *obj;
218 __u64 offset;
219 struct kref ref;
220 unsigned size;
221 };
222
223 /*
224 * This structure holds the information that the send tasklet needs
225 * to send a RDMA read response or atomic operation.
226 */
227 struct rvt_ack_entry {
228 struct rvt_sge rdma_sge;
229 u64 atomic_data;
230 u32 psn;
231 u32 lpsn;
232 u8 opcode;
233 u8 sent;
234 };
235
236 #define RC_QP_SCALING_INTERVAL 5
237
238 #define RVT_OPERATION_PRIV 0x00000001
239 #define RVT_OPERATION_ATOMIC 0x00000002
240 #define RVT_OPERATION_ATOMIC_SGE 0x00000004
241 #define RVT_OPERATION_LOCAL 0x00000008
242 #define RVT_OPERATION_USE_RESERVE 0x00000010
243
244 #define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
245
246 /**
247 * rvt_operation_params - op table entry
248 * @length - the length to copy into the swqe entry
249 * @qpt_support - a bit mask indicating QP type support
250 * @flags - RVT_OPERATION flags (see above)
251 *
252 * This supports table driven post send so that
253 * the driver can have differing an potentially
254 * different sets of operations.
255 *
256 **/
257
258 struct rvt_operation_params {
259 size_t length;
260 u32 qpt_support;
261 u32 flags;
262 };
263
264 /*
265 * Common variables are protected by both r_rq.lock and s_lock in that order
266 * which only happens in modify_qp() or changing the QP 'state'.
267 */
268 struct rvt_qp {
269 struct ib_qp ibqp;
270 void *priv; /* Driver private data */
271 /* read mostly fields above and below */
272 struct rdma_ah_attr remote_ah_attr;
273 struct rdma_ah_attr alt_ah_attr;
274 struct rvt_qp __rcu *next; /* link list for QPN hash table */
275 struct rvt_swqe *s_wq; /* send work queue */
276 struct rvt_mmap_info *ip;
277
278 unsigned long timeout_jiffies; /* computed from timeout */
279
280 enum ib_mtu path_mtu;
281 int srate_mbps; /* s_srate (below) converted to Mbit/s */
282 pid_t pid; /* pid for user mode QPs */
283 u32 remote_qpn;
284 u32 qkey; /* QKEY for this QP (for UD or RD) */
285 u32 s_size; /* send work queue size */
286 u32 s_ahgpsn; /* set to the psn in the copy of the header */
287
288 u16 pmtu; /* decoded from path_mtu */
289 u8 log_pmtu; /* shift for pmtu */
290 u8 state; /* QP state */
291 u8 allowed_ops; /* high order bits of allowed opcodes */
292 u8 qp_access_flags;
293 u8 alt_timeout; /* Alternate path timeout for this QP */
294 u8 timeout; /* Timeout for this QP */
295 u8 s_srate;
296 u8 s_mig_state;
297 u8 port_num;
298 u8 s_pkey_index; /* PKEY index to use */
299 u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
300 u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
301 u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
302 u8 s_retry_cnt; /* number of times to retry */
303 u8 s_rnr_retry_cnt;
304 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
305 u8 s_max_sge; /* size of s_wq->sg_list */
306 u8 s_draining;
307
308 /* start of read/write fields */
309 atomic_t refcount ____cacheline_aligned_in_smp;
310 wait_queue_head_t wait;
311
312 struct rvt_ack_entry *s_ack_queue;
313 struct rvt_sge_state s_rdma_read_sge;
314
315 spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */
316 u32 r_psn; /* expected rcv packet sequence number */
317 unsigned long r_aflags;
318 u64 r_wr_id; /* ID for current receive WQE */
319 u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
320 u32 r_len; /* total length of r_sge */
321 u32 r_rcv_len; /* receive data len processed */
322 u32 r_msn; /* message sequence number */
323
324 u8 r_state; /* opcode of last packet received */
325 u8 r_flags;
326 u8 r_head_ack_queue; /* index into s_ack_queue[] */
327 u8 r_adefered; /* defered ack count */
328
329 struct list_head rspwait; /* link for waiting to respond */
330
331 struct rvt_sge_state r_sge; /* current receive data */
332 struct rvt_rq r_rq; /* receive work queue */
333
334 /* post send line */
335 spinlock_t s_hlock ____cacheline_aligned_in_smp;
336 u32 s_head; /* new entries added here */
337 u32 s_next_psn; /* PSN for next request */
338 u32 s_avail; /* number of entries avail */
339 u32 s_ssn; /* SSN of tail entry */
340 atomic_t s_reserved_used; /* reserved entries in use */
341
342 spinlock_t s_lock ____cacheline_aligned_in_smp;
343 u32 s_flags;
344 struct rvt_sge_state *s_cur_sge;
345 struct rvt_swqe *s_wqe;
346 struct rvt_sge_state s_sge; /* current send request data */
347 struct rvt_mregion *s_rdma_mr;
348 u32 s_cur_size; /* size of send packet in bytes */
349 u32 s_len; /* total length of s_sge */
350 u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
351 u32 s_last_psn; /* last response PSN processed */
352 u32 s_sending_psn; /* lowest PSN that is being sent */
353 u32 s_sending_hpsn; /* highest PSN that is being sent */
354 u32 s_psn; /* current packet sequence number */
355 u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
356 u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
357 u32 s_tail; /* next entry to process */
358 u32 s_cur; /* current work queue entry */
359 u32 s_acked; /* last un-ACK'ed entry */
360 u32 s_last; /* last completed entry */
361 u32 s_lsn; /* limit sequence number (credit) */
362 u16 s_hdrwords; /* size of s_hdr in 32 bit words */
363 u16 s_rdma_ack_cnt;
364 s8 s_ahgidx;
365 u8 s_state; /* opcode of last packet sent */
366 u8 s_ack_state; /* opcode of packet to ACK */
367 u8 s_nak_state; /* non-zero if NAK is pending */
368 u8 r_nak_state; /* non-zero if NAK is pending */
369 u8 s_retry; /* requester retry counter */
370 u8 s_rnr_retry; /* requester RNR retry counter */
371 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
372 u8 s_tail_ack_queue; /* index into s_ack_queue[] */
373
374 struct rvt_sge_state s_ack_rdma_sge;
375 struct timer_list s_timer;
376 struct hrtimer s_rnr_timer;
377
378 atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */
379
380 /*
381 * This sge list MUST be last. Do not add anything below here.
382 */
383 struct rvt_sge r_sg_list[0] /* verified SGEs */
384 ____cacheline_aligned_in_smp;
385 };
386
387 struct rvt_srq {
388 struct ib_srq ibsrq;
389 struct rvt_rq rq;
390 struct rvt_mmap_info *ip;
391 /* send signal when number of RWQEs < limit */
392 u32 limit;
393 };
394
395 #define RVT_QPN_MAX BIT(24)
396 #define RVT_QPNMAP_ENTRIES (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
397 #define RVT_BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE)
398 #define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE - 1)
399 #define RVT_QPN_MASK 0xFFFFFF
400
401 /*
402 * QPN-map pages start out as NULL, they get allocated upon
403 * first use and are never deallocated. This way,
404 * large bitmaps are not allocated unless large numbers of QPs are used.
405 */
406 struct rvt_qpn_map {
407 void *page;
408 };
409
410 struct rvt_qpn_table {
411 spinlock_t lock; /* protect changes to the qp table */
412 unsigned flags; /* flags for QP0/1 allocated for each port */
413 u32 last; /* last QP number allocated */
414 u32 nmaps; /* size of the map table */
415 u16 limit;
416 u8 incr;
417 /* bit map of free QP numbers other than 0/1 */
418 struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES];
419 };
420
421 struct rvt_qp_ibdev {
422 u32 qp_table_size;
423 u32 qp_table_bits;
424 struct rvt_qp __rcu **qp_table;
425 spinlock_t qpt_lock; /* qptable lock */
426 struct rvt_qpn_table qpn_table;
427 };
428
429 /*
430 * There is one struct rvt_mcast for each multicast GID.
431 * All attached QPs are then stored as a list of
432 * struct rvt_mcast_qp.
433 */
434 struct rvt_mcast_qp {
435 struct list_head list;
436 struct rvt_qp *qp;
437 };
438
439 struct rvt_mcast_addr {
440 union ib_gid mgid;
441 u16 lid;
442 };
443
444 struct rvt_mcast {
445 struct rb_node rb_node;
446 struct rvt_mcast_addr mcast_addr;
447 struct list_head qp_list;
448 wait_queue_head_t wait;
449 atomic_t refcount;
450 int n_attached;
451 };
452
453 /*
454 * Since struct rvt_swqe is not a fixed size, we can't simply index into
455 * struct rvt_qp.s_wq. This function does the array index computation.
456 */
457 static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp,
458 unsigned n)
459 {
460 return (struct rvt_swqe *)((char *)qp->s_wq +
461 (sizeof(struct rvt_swqe) +
462 qp->s_max_sge *
463 sizeof(struct rvt_sge)) * n);
464 }
465
466 /*
467 * Since struct rvt_rwqe is not a fixed size, we can't simply index into
468 * struct rvt_rwq.wq. This function does the array index computation.
469 */
470 static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
471 {
472 return (struct rvt_rwqe *)
473 ((char *)rq->wq->wq +
474 (sizeof(struct rvt_rwqe) +
475 rq->max_sge * sizeof(struct ib_sge)) * n);
476 }
477
478 /**
479 * rvt_is_user_qp - return if this is user mode QP
480 * @qp - the target QP
481 */
482 static inline bool rvt_is_user_qp(struct rvt_qp *qp)
483 {
484 return !!qp->pid;
485 }
486
487 /**
488 * rvt_get_qp - get a QP reference
489 * @qp - the QP to hold
490 */
491 static inline void rvt_get_qp(struct rvt_qp *qp)
492 {
493 atomic_inc(&qp->refcount);
494 }
495
496 /**
497 * rvt_put_qp - release a QP reference
498 * @qp - the QP to release
499 */
500 static inline void rvt_put_qp(struct rvt_qp *qp)
501 {
502 if (qp && atomic_dec_and_test(&qp->refcount))
503 wake_up(&qp->wait);
504 }
505
506 /**
507 * rvt_put_swqe - drop mr refs held by swqe
508 * @wqe - the send wqe
509 *
510 * This drops any mr references held by the swqe
511 */
512 static inline void rvt_put_swqe(struct rvt_swqe *wqe)
513 {
514 int i;
515
516 for (i = 0; i < wqe->wr.num_sge; i++) {
517 struct rvt_sge *sge = &wqe->sg_list[i];
518
519 rvt_put_mr(sge->mr);
520 }
521 }
522
523 /**
524 * rvt_qp_wqe_reserve - reserve operation
525 * @qp - the rvt qp
526 * @wqe - the send wqe
527 *
528 * This routine used in post send to record
529 * a wqe relative reserved operation use.
530 */
531 static inline void rvt_qp_wqe_reserve(
532 struct rvt_qp *qp,
533 struct rvt_swqe *wqe)
534 {
535 atomic_inc(&qp->s_reserved_used);
536 }
537
538 /**
539 * rvt_qp_wqe_unreserve - clean reserved operation
540 * @qp - the rvt qp
541 * @wqe - the send wqe
542 *
543 * This decrements the reserve use count.
544 *
545 * This call MUST precede the change to
546 * s_last to insure that post send sees a stable
547 * s_avail.
548 *
549 * An smp_mp__after_atomic() is used to insure
550 * the compiler does not juggle the order of the s_last
551 * ring index and the decrementing of s_reserved_used.
552 */
553 static inline void rvt_qp_wqe_unreserve(
554 struct rvt_qp *qp,
555 struct rvt_swqe *wqe)
556 {
557 if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
558 atomic_dec(&qp->s_reserved_used);
559 /* insure no compiler re-order up to s_last change */
560 smp_mb__after_atomic();
561 }
562 }
563
564 extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
565
566 /**
567 * rvt_qp_swqe_complete() - insert send completion
568 * @qp - the qp
569 * @wqe - the send wqe
570 * @status - completion status
571 *
572 * Insert a send completion into the completion
573 * queue if the qp indicates it should be done.
574 *
575 * See IBTA 10.7.3.1 for info on completion
576 * control.
577 */
578 static inline void rvt_qp_swqe_complete(
579 struct rvt_qp *qp,
580 struct rvt_swqe *wqe,
581 enum ib_wc_opcode opcode,
582 enum ib_wc_status status)
583 {
584 if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED))
585 return;
586 if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
587 (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
588 status != IB_WC_SUCCESS) {
589 struct ib_wc wc;
590
591 memset(&wc, 0, sizeof(wc));
592 wc.wr_id = wqe->wr.wr_id;
593 wc.status = status;
594 wc.opcode = opcode;
595 wc.qp = &qp->ibqp;
596 wc.byte_len = wqe->length;
597 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
598 status != IB_WC_SUCCESS);
599 }
600 }
601
602 /*
603 * Compare the lower 24 bits of the msn values.
604 * Returns an integer <, ==, or > than zero.
605 */
606 static inline int rvt_cmp_msn(u32 a, u32 b)
607 {
608 return (((int)a) - ((int)b)) << 8;
609 }
610
611 /**
612 * rvt_compute_aeth - compute the AETH (syndrome + MSN)
613 * @qp: the queue pair to compute the AETH for
614 *
615 * Returns the AETH.
616 */
617 __be32 rvt_compute_aeth(struct rvt_qp *qp);
618
619 /**
620 * rvt_get_credit - flush the send work queue of a QP
621 * @qp: the qp who's send work queue to flush
622 * @aeth: the Acknowledge Extended Transport Header
623 *
624 * The QP s_lock should be held.
625 */
626 void rvt_get_credit(struct rvt_qp *qp, u32 aeth);
627
628 /**
629 * @qp - the qp pair
630 * @len - the length
631 *
632 * Perform a shift based mtu round up divide
633 */
634 static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len)
635 {
636 return (len + qp->pmtu - 1) >> qp->log_pmtu;
637 }
638
639 /**
640 * @qp - the qp pair
641 * @len - the length
642 *
643 * Perform a shift based mtu divide
644 */
645 static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
646 {
647 return len >> qp->log_pmtu;
648 }
649
650 /**
651 * rvt_timeout_to_jiffies - Convert a ULP timeout input into jiffies
652 * @timeout - timeout input(0 - 31).
653 *
654 * Return a timeout value in jiffies.
655 */
656 static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
657 {
658 if (timeout > 31)
659 timeout = 31;
660
661 return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL;
662 }
663
664 extern const int ib_rvt_state_ops[];
665
666 struct rvt_dev_info;
667 void rvt_comm_est(struct rvt_qp *qp);
668 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
669 void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
670 unsigned long rvt_rnr_tbl_to_usec(u32 index);
671 enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t);
672 void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth);
673 void rvt_del_timers_sync(struct rvt_qp *qp);
674 void rvt_stop_rc_timers(struct rvt_qp *qp);
675 void rvt_add_retry_timer(struct rvt_qp *qp);
676
677 #endif /* DEF_RDMAVT_INCQP_H */