]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - include/rdma/rdmavt_qp.h
UBUNTU: SAUCE: (no-up) Update zfs to 0.6.5.8-0ubuntu9
[mirror_ubuntu-zesty-kernel.git] / include / rdma / rdmavt_qp.h
1 #ifndef DEF_RDMAVT_INCQP_H
2 #define DEF_RDMAVT_INCQP_H
3
4 /*
5 * Copyright(c) 2016 Intel Corporation.
6 *
7 * This file is provided under a dual BSD/GPLv2 license. When using or
8 * redistributing this file, you may do so under either license.
9 *
10 * GPL LICENSE SUMMARY
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * BSD LICENSE
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51 #include <rdma/rdma_vt.h>
52 #include <rdma/ib_pack.h>
53 #include <rdma/ib_verbs.h>
54 #include <rdma/rdmavt_cq.h>
55 /*
56 * Atomic bit definitions for r_aflags.
57 */
58 #define RVT_R_WRID_VALID 0
59 #define RVT_R_REWIND_SGE 1
60
61 /*
62 * Bit definitions for r_flags.
63 */
64 #define RVT_R_REUSE_SGE 0x01
65 #define RVT_R_RDMAR_SEQ 0x02
66 #define RVT_R_RSP_NAK 0x04
67 #define RVT_R_RSP_SEND 0x08
68 #define RVT_R_COMM_EST 0x10
69
70 /*
71 * Bit definitions for s_flags.
72 *
73 * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
74 * RVT_S_BUSY - send tasklet is processing the QP
75 * RVT_S_TIMER - the RC retry timer is active
76 * RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
77 * RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
78 * before processing the next SWQE
79 * RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
80 * before processing the next SWQE
81 * RVT_S_WAIT_RNR - waiting for RNR timeout
82 * RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
83 * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating
84 * next send completion entry not via send DMA
85 * RVT_S_WAIT_PIO - waiting for a send buffer to be available
86 * RVT_S_WAIT_PIO_DRAIN - waiting for a qp to drain pio packets
87 * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available
88 * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
89 * RVT_S_WAIT_KMEM - waiting for kernel memory to be available
90 * RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
91 * RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests
92 * RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK
93 * RVT_S_ECN - a BECN was queued to the send engine
94 */
95 #define RVT_S_SIGNAL_REQ_WR 0x0001
96 #define RVT_S_BUSY 0x0002
97 #define RVT_S_TIMER 0x0004
98 #define RVT_S_RESP_PENDING 0x0008
99 #define RVT_S_ACK_PENDING 0x0010
100 #define RVT_S_WAIT_FENCE 0x0020
101 #define RVT_S_WAIT_RDMAR 0x0040
102 #define RVT_S_WAIT_RNR 0x0080
103 #define RVT_S_WAIT_SSN_CREDIT 0x0100
104 #define RVT_S_WAIT_DMA 0x0200
105 #define RVT_S_WAIT_PIO 0x0400
106 #define RVT_S_WAIT_PIO_DRAIN 0x0800
107 #define RVT_S_WAIT_TX 0x1000
108 #define RVT_S_WAIT_DMA_DESC 0x2000
109 #define RVT_S_WAIT_KMEM 0x4000
110 #define RVT_S_WAIT_PSN 0x8000
111 #define RVT_S_WAIT_ACK 0x10000
112 #define RVT_S_SEND_ONE 0x20000
113 #define RVT_S_UNLIMITED_CREDIT 0x40000
114 #define RVT_S_AHG_VALID 0x80000
115 #define RVT_S_AHG_CLEAR 0x100000
116 #define RVT_S_ECN 0x200000
117
118 /*
119 * Wait flags that would prevent any packet type from being sent.
120 */
121 #define RVT_S_ANY_WAIT_IO \
122 (RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN | RVT_S_WAIT_TX | \
123 RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
124
125 /*
126 * Wait flags that would prevent send work requests from making progress.
127 */
128 #define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \
129 RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \
130 RVT_S_WAIT_PSN | RVT_S_WAIT_ACK)
131
132 #define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
133
134 /* Number of bits to pay attention to in the opcode for checking qp type */
135 #define RVT_OPCODE_QP_MASK 0xE0
136
137 /* Flags for checking QP state (see ib_rvt_state_ops[]) */
138 #define RVT_POST_SEND_OK 0x01
139 #define RVT_POST_RECV_OK 0x02
140 #define RVT_PROCESS_RECV_OK 0x04
141 #define RVT_PROCESS_SEND_OK 0x08
142 #define RVT_PROCESS_NEXT_SEND_OK 0x10
143 #define RVT_FLUSH_SEND 0x20
144 #define RVT_FLUSH_RECV 0x40
145 #define RVT_PROCESS_OR_FLUSH_SEND \
146 (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND)
147
148 /*
149 * Internal send flags
150 */
151 #define RVT_SEND_RESERVE_USED IB_SEND_RESERVED_START
152 #define RVT_SEND_COMPLETION_ONLY (IB_SEND_RESERVED_START << 1)
153
154 /*
155 * Send work request queue entry.
156 * The size of the sg_list is determined when the QP is created and stored
157 * in qp->s_max_sge.
158 */
159 struct rvt_swqe {
160 union {
161 struct ib_send_wr wr; /* don't use wr.sg_list */
162 struct ib_ud_wr ud_wr;
163 struct ib_reg_wr reg_wr;
164 struct ib_rdma_wr rdma_wr;
165 struct ib_atomic_wr atomic_wr;
166 };
167 u32 psn; /* first packet sequence number */
168 u32 lpsn; /* last packet sequence number */
169 u32 ssn; /* send sequence number */
170 u32 length; /* total length of data in sg_list */
171 struct rvt_sge sg_list[0];
172 };
173
174 /*
175 * Receive work request queue entry.
176 * The size of the sg_list is determined when the QP (or SRQ) is created
177 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
178 */
179 struct rvt_rwqe {
180 u64 wr_id;
181 u8 num_sge;
182 struct ib_sge sg_list[0];
183 };
184
185 /*
186 * This structure is used to contain the head pointer, tail pointer,
187 * and receive work queue entries as a single memory allocation so
188 * it can be mmap'ed into user space.
189 * Note that the wq array elements are variable size so you can't
190 * just index into the array to get the N'th element;
191 * use get_rwqe_ptr() instead.
192 */
193 struct rvt_rwq {
194 u32 head; /* new work requests posted to the head */
195 u32 tail; /* receives pull requests from here. */
196 struct rvt_rwqe wq[0];
197 };
198
199 struct rvt_rq {
200 struct rvt_rwq *wq;
201 u32 size; /* size of RWQE array */
202 u8 max_sge;
203 /* protect changes in this struct */
204 spinlock_t lock ____cacheline_aligned_in_smp;
205 };
206
207 /*
208 * This structure is used by rvt_mmap() to validate an offset
209 * when an mmap() request is made. The vm_area_struct then uses
210 * this as its vm_private_data.
211 */
212 struct rvt_mmap_info {
213 struct list_head pending_mmaps;
214 struct ib_ucontext *context;
215 void *obj;
216 __u64 offset;
217 struct kref ref;
218 unsigned size;
219 };
220
221 /*
222 * This structure holds the information that the send tasklet needs
223 * to send a RDMA read response or atomic operation.
224 */
225 struct rvt_ack_entry {
226 struct rvt_sge rdma_sge;
227 u64 atomic_data;
228 u32 psn;
229 u32 lpsn;
230 u8 opcode;
231 u8 sent;
232 };
233
234 #define RC_QP_SCALING_INTERVAL 5
235
236 #define RVT_OPERATION_PRIV 0x00000001
237 #define RVT_OPERATION_ATOMIC 0x00000002
238 #define RVT_OPERATION_ATOMIC_SGE 0x00000004
239 #define RVT_OPERATION_LOCAL 0x00000008
240 #define RVT_OPERATION_USE_RESERVE 0x00000010
241
242 #define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
243
244 /**
245 * rvt_operation_params - op table entry
246 * @length - the length to copy into the swqe entry
247 * @qpt_support - a bit mask indicating QP type support
248 * @flags - RVT_OPERATION flags (see above)
249 *
250 * This supports table driven post send so that
251 * the driver can have differing an potentially
252 * different sets of operations.
253 *
254 **/
255
256 struct rvt_operation_params {
257 size_t length;
258 u32 qpt_support;
259 u32 flags;
260 };
261
262 /*
263 * Common variables are protected by both r_rq.lock and s_lock in that order
264 * which only happens in modify_qp() or changing the QP 'state'.
265 */
266 struct rvt_qp {
267 struct ib_qp ibqp;
268 void *priv; /* Driver private data */
269 /* read mostly fields above and below */
270 struct ib_ah_attr remote_ah_attr;
271 struct ib_ah_attr alt_ah_attr;
272 struct rvt_qp __rcu *next; /* link list for QPN hash table */
273 struct rvt_swqe *s_wq; /* send work queue */
274 struct rvt_mmap_info *ip;
275
276 unsigned long timeout_jiffies; /* computed from timeout */
277
278 enum ib_mtu path_mtu;
279 int srate_mbps; /* s_srate (below) converted to Mbit/s */
280 pid_t pid; /* pid for user mode QPs */
281 u32 remote_qpn;
282 u32 qkey; /* QKEY for this QP (for UD or RD) */
283 u32 s_size; /* send work queue size */
284 u32 s_ahgpsn; /* set to the psn in the copy of the header */
285
286 u16 pmtu; /* decoded from path_mtu */
287 u8 log_pmtu; /* shift for pmtu */
288 u8 state; /* QP state */
289 u8 allowed_ops; /* high order bits of allowed opcodes */
290 u8 qp_access_flags;
291 u8 alt_timeout; /* Alternate path timeout for this QP */
292 u8 timeout; /* Timeout for this QP */
293 u8 s_srate;
294 u8 s_mig_state;
295 u8 port_num;
296 u8 s_pkey_index; /* PKEY index to use */
297 u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
298 u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
299 u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
300 u8 s_retry_cnt; /* number of times to retry */
301 u8 s_rnr_retry_cnt;
302 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
303 u8 s_max_sge; /* size of s_wq->sg_list */
304 u8 s_draining;
305
306 /* start of read/write fields */
307 atomic_t refcount ____cacheline_aligned_in_smp;
308 wait_queue_head_t wait;
309
310 struct rvt_ack_entry *s_ack_queue;
311 struct rvt_sge_state s_rdma_read_sge;
312
313 spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */
314 u32 r_psn; /* expected rcv packet sequence number */
315 unsigned long r_aflags;
316 u64 r_wr_id; /* ID for current receive WQE */
317 u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
318 u32 r_len; /* total length of r_sge */
319 u32 r_rcv_len; /* receive data len processed */
320 u32 r_msn; /* message sequence number */
321
322 u8 r_state; /* opcode of last packet received */
323 u8 r_flags;
324 u8 r_head_ack_queue; /* index into s_ack_queue[] */
325
326 struct list_head rspwait; /* link for waiting to respond */
327
328 struct rvt_sge_state r_sge; /* current receive data */
329 struct rvt_rq r_rq; /* receive work queue */
330
331 /* post send line */
332 spinlock_t s_hlock ____cacheline_aligned_in_smp;
333 u32 s_head; /* new entries added here */
334 u32 s_next_psn; /* PSN for next request */
335 u32 s_avail; /* number of entries avail */
336 u32 s_ssn; /* SSN of tail entry */
337 atomic_t s_reserved_used; /* reserved entries in use */
338
339 spinlock_t s_lock ____cacheline_aligned_in_smp;
340 u32 s_flags;
341 struct rvt_sge_state *s_cur_sge;
342 struct rvt_swqe *s_wqe;
343 struct rvt_sge_state s_sge; /* current send request data */
344 struct rvt_mregion *s_rdma_mr;
345 u32 s_cur_size; /* size of send packet in bytes */
346 u32 s_len; /* total length of s_sge */
347 u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
348 u32 s_last_psn; /* last response PSN processed */
349 u32 s_sending_psn; /* lowest PSN that is being sent */
350 u32 s_sending_hpsn; /* highest PSN that is being sent */
351 u32 s_psn; /* current packet sequence number */
352 u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
353 u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
354 u32 s_tail; /* next entry to process */
355 u32 s_cur; /* current work queue entry */
356 u32 s_acked; /* last un-ACK'ed entry */
357 u32 s_last; /* last completed entry */
358 u32 s_lsn; /* limit sequence number (credit) */
359 u16 s_hdrwords; /* size of s_hdr in 32 bit words */
360 u16 s_rdma_ack_cnt;
361 s8 s_ahgidx;
362 u8 s_state; /* opcode of last packet sent */
363 u8 s_ack_state; /* opcode of packet to ACK */
364 u8 s_nak_state; /* non-zero if NAK is pending */
365 u8 r_nak_state; /* non-zero if NAK is pending */
366 u8 s_retry; /* requester retry counter */
367 u8 s_rnr_retry; /* requester RNR retry counter */
368 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
369 u8 s_tail_ack_queue; /* index into s_ack_queue[] */
370
371 struct rvt_sge_state s_ack_rdma_sge;
372 struct timer_list s_timer;
373
374 atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */
375
376 /*
377 * This sge list MUST be last. Do not add anything below here.
378 */
379 struct rvt_sge r_sg_list[0] /* verified SGEs */
380 ____cacheline_aligned_in_smp;
381 };
382
383 struct rvt_srq {
384 struct ib_srq ibsrq;
385 struct rvt_rq rq;
386 struct rvt_mmap_info *ip;
387 /* send signal when number of RWQEs < limit */
388 u32 limit;
389 };
390
391 #define RVT_QPN_MAX BIT(24)
392 #define RVT_QPNMAP_ENTRIES (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
393 #define RVT_BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE)
394 #define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE - 1)
395 #define RVT_QPN_MASK 0xFFFFFF
396
397 /*
398 * QPN-map pages start out as NULL, they get allocated upon
399 * first use and are never deallocated. This way,
400 * large bitmaps are not allocated unless large numbers of QPs are used.
401 */
402 struct rvt_qpn_map {
403 void *page;
404 };
405
406 struct rvt_qpn_table {
407 spinlock_t lock; /* protect changes to the qp table */
408 unsigned flags; /* flags for QP0/1 allocated for each port */
409 u32 last; /* last QP number allocated */
410 u32 nmaps; /* size of the map table */
411 u16 limit;
412 u8 incr;
413 /* bit map of free QP numbers other than 0/1 */
414 struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES];
415 };
416
417 struct rvt_qp_ibdev {
418 u32 qp_table_size;
419 u32 qp_table_bits;
420 struct rvt_qp __rcu **qp_table;
421 spinlock_t qpt_lock; /* qptable lock */
422 struct rvt_qpn_table qpn_table;
423 };
424
425 /*
426 * There is one struct rvt_mcast for each multicast GID.
427 * All attached QPs are then stored as a list of
428 * struct rvt_mcast_qp.
429 */
430 struct rvt_mcast_qp {
431 struct list_head list;
432 struct rvt_qp *qp;
433 };
434
435 struct rvt_mcast {
436 struct rb_node rb_node;
437 union ib_gid mgid;
438 struct list_head qp_list;
439 wait_queue_head_t wait;
440 atomic_t refcount;
441 int n_attached;
442 };
443
444 /*
445 * Since struct rvt_swqe is not a fixed size, we can't simply index into
446 * struct rvt_qp.s_wq. This function does the array index computation.
447 */
448 static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp,
449 unsigned n)
450 {
451 return (struct rvt_swqe *)((char *)qp->s_wq +
452 (sizeof(struct rvt_swqe) +
453 qp->s_max_sge *
454 sizeof(struct rvt_sge)) * n);
455 }
456
457 /*
458 * Since struct rvt_rwqe is not a fixed size, we can't simply index into
459 * struct rvt_rwq.wq. This function does the array index computation.
460 */
461 static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
462 {
463 return (struct rvt_rwqe *)
464 ((char *)rq->wq->wq +
465 (sizeof(struct rvt_rwqe) +
466 rq->max_sge * sizeof(struct ib_sge)) * n);
467 }
468
469 /**
470 * rvt_get_qp - get a QP reference
471 * @qp - the QP to hold
472 */
473 static inline void rvt_get_qp(struct rvt_qp *qp)
474 {
475 atomic_inc(&qp->refcount);
476 }
477
478 /**
479 * rvt_put_qp - release a QP reference
480 * @qp - the QP to release
481 */
482 static inline void rvt_put_qp(struct rvt_qp *qp)
483 {
484 if (qp && atomic_dec_and_test(&qp->refcount))
485 wake_up(&qp->wait);
486 }
487
488 /**
489 * rvt_put_swqe - drop mr refs held by swqe
490 * @wqe - the send wqe
491 *
492 * This drops any mr references held by the swqe
493 */
494 static inline void rvt_put_swqe(struct rvt_swqe *wqe)
495 {
496 int i;
497
498 for (i = 0; i < wqe->wr.num_sge; i++) {
499 struct rvt_sge *sge = &wqe->sg_list[i];
500
501 rvt_put_mr(sge->mr);
502 }
503 }
504
505 /**
506 * rvt_qp_wqe_reserve - reserve operation
507 * @qp - the rvt qp
508 * @wqe - the send wqe
509 *
510 * This routine used in post send to record
511 * a wqe relative reserved operation use.
512 */
513 static inline void rvt_qp_wqe_reserve(
514 struct rvt_qp *qp,
515 struct rvt_swqe *wqe)
516 {
517 wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
518 atomic_inc(&qp->s_reserved_used);
519 }
520
521 /**
522 * rvt_qp_wqe_unreserve - clean reserved operation
523 * @qp - the rvt qp
524 * @wqe - the send wqe
525 *
526 * This decrements the reserve use count.
527 *
528 * This call MUST precede the change to
529 * s_last to insure that post send sees a stable
530 * s_avail.
531 *
532 * An smp_mp__after_atomic() is used to insure
533 * the compiler does not juggle the order of the s_last
534 * ring index and the decrementing of s_reserved_used.
535 */
536 static inline void rvt_qp_wqe_unreserve(
537 struct rvt_qp *qp,
538 struct rvt_swqe *wqe)
539 {
540 if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
541 wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
542 atomic_dec(&qp->s_reserved_used);
543 /* insure no compiler re-order up to s_last change */
544 smp_mb__after_atomic();
545 }
546 }
547
548 extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
549
550 /**
551 * rvt_qp_swqe_complete() - insert send completion
552 * @qp - the qp
553 * @wqe - the send wqe
554 * @status - completion status
555 *
556 * Insert a send completion into the completion
557 * queue if the qp indicates it should be done.
558 *
559 * See IBTA 10.7.3.1 for info on completion
560 * control.
561 */
562 static inline void rvt_qp_swqe_complete(
563 struct rvt_qp *qp,
564 struct rvt_swqe *wqe,
565 enum ib_wc_status status)
566 {
567 if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED))
568 return;
569 if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
570 (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
571 status != IB_WC_SUCCESS) {
572 struct ib_wc wc;
573
574 memset(&wc, 0, sizeof(wc));
575 wc.wr_id = wqe->wr.wr_id;
576 wc.status = status;
577 wc.opcode = ib_rvt_wc_opcode[wqe->wr.opcode];
578 wc.qp = &qp->ibqp;
579 wc.byte_len = wqe->length;
580 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
581 status != IB_WC_SUCCESS);
582 }
583 }
584
585 /**
586 * @qp - the qp pair
587 * @len - the length
588 *
589 * Perform a shift based mtu round up divide
590 */
591 static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len)
592 {
593 return (len + qp->pmtu - 1) >> qp->log_pmtu;
594 }
595
596 /**
597 * @qp - the qp pair
598 * @len - the length
599 *
600 * Perform a shift based mtu divide
601 */
602 static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
603 {
604 return len >> qp->log_pmtu;
605 }
606
607 extern const int ib_rvt_state_ops[];
608
609 struct rvt_dev_info;
610 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
611
612 #endif /* DEF_RDMAVT_INCQP_H */