2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 #include <rdma/rdma_vt.h>
50 #include <rdma/rdmavt_qp.h>
54 #include "verbs_txreq.h"
57 /* cut down ridiculously long IB macro names */
58 #define OP(x) RC_OP(x)
61 * hfi1_add_retry_timer - add/start a retry timer
64 * add a retry timer on the QP
66 static inline void hfi1_add_retry_timer(struct rvt_qp
*qp
)
68 struct ib_qp
*ibqp
= &qp
->ibqp
;
69 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
71 lockdep_assert_held(&qp
->s_lock
);
72 qp
->s_flags
|= RVT_S_TIMER
;
73 /* 4.096 usec. * (1 << qp->timeout) */
74 qp
->s_timer
.expires
= jiffies
+ qp
->timeout_jiffies
+
76 add_timer(&qp
->s_timer
);
80 * hfi1_add_rnr_timer - add/start an rnr timer
82 * @to - timeout in usecs
84 * add an rnr timer on the QP
86 void hfi1_add_rnr_timer(struct rvt_qp
*qp
, u32 to
)
88 struct hfi1_qp_priv
*priv
= qp
->priv
;
90 lockdep_assert_held(&qp
->s_lock
);
91 qp
->s_flags
|= RVT_S_WAIT_RNR
;
92 priv
->s_rnr_timer
.expires
= jiffies
+ usecs_to_jiffies(to
);
93 add_timer(&priv
->s_rnr_timer
);
97 * hfi1_mod_retry_timer - mod a retry timer
100 * Modify a potentially already running retry
103 static inline void hfi1_mod_retry_timer(struct rvt_qp
*qp
)
105 struct ib_qp
*ibqp
= &qp
->ibqp
;
106 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
108 lockdep_assert_held(&qp
->s_lock
);
109 qp
->s_flags
|= RVT_S_TIMER
;
110 /* 4.096 usec. * (1 << qp->timeout) */
111 mod_timer(&qp
->s_timer
, jiffies
+ qp
->timeout_jiffies
+
116 * hfi1_stop_retry_timer - stop a retry timer
119 * stop a retry timer and return if the timer
122 static inline int hfi1_stop_retry_timer(struct rvt_qp
*qp
)
126 lockdep_assert_held(&qp
->s_lock
);
127 /* Remove QP from retry */
128 if (qp
->s_flags
& RVT_S_TIMER
) {
129 qp
->s_flags
&= ~RVT_S_TIMER
;
130 rval
= del_timer(&qp
->s_timer
);
136 * hfi1_stop_rc_timers - stop all timers
139 * stop any pending timers
141 void hfi1_stop_rc_timers(struct rvt_qp
*qp
)
143 struct hfi1_qp_priv
*priv
= qp
->priv
;
145 lockdep_assert_held(&qp
->s_lock
);
146 /* Remove QP from all timers */
147 if (qp
->s_flags
& (RVT_S_TIMER
| RVT_S_WAIT_RNR
)) {
148 qp
->s_flags
&= ~(RVT_S_TIMER
| RVT_S_WAIT_RNR
);
149 del_timer(&qp
->s_timer
);
150 del_timer(&priv
->s_rnr_timer
);
155 * hfi1_stop_rnr_timer - stop an rnr timer
158 * stop an rnr timer and return if the timer
161 static inline int hfi1_stop_rnr_timer(struct rvt_qp
*qp
)
164 struct hfi1_qp_priv
*priv
= qp
->priv
;
166 lockdep_assert_held(&qp
->s_lock
);
167 /* Remove QP from rnr timer */
168 if (qp
->s_flags
& RVT_S_WAIT_RNR
) {
169 qp
->s_flags
&= ~RVT_S_WAIT_RNR
;
170 rval
= del_timer(&priv
->s_rnr_timer
);
176 * hfi1_del_timers_sync - wait for any timeout routines to exit
179 void hfi1_del_timers_sync(struct rvt_qp
*qp
)
181 struct hfi1_qp_priv
*priv
= qp
->priv
;
183 del_timer_sync(&qp
->s_timer
);
184 del_timer_sync(&priv
->s_rnr_timer
);
187 static u32
restart_sge(struct rvt_sge_state
*ss
, struct rvt_swqe
*wqe
,
192 len
= delta_psn(psn
, wqe
->psn
) * pmtu
;
193 ss
->sge
= wqe
->sg_list
[0];
194 ss
->sg_list
= wqe
->sg_list
+ 1;
195 ss
->num_sge
= wqe
->wr
.num_sge
;
196 ss
->total_len
= wqe
->length
;
197 hfi1_skip_sge(ss
, len
, 0);
198 return wqe
->length
- len
;
202 * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
203 * @dev: the device for this QP
204 * @qp: a pointer to the QP
205 * @ohdr: a pointer to the IB header being constructed
206 * @ps: the xmit packet state
208 * Return 1 if constructed; otherwise, return 0.
209 * Note that we are in the responder's side of the QP context.
210 * Note the QP s_lock must be held.
212 static int make_rc_ack(struct hfi1_ibdev
*dev
, struct rvt_qp
*qp
,
213 struct ib_other_headers
*ohdr
,
214 struct hfi1_pkt_state
*ps
)
216 struct rvt_ack_entry
*e
;
223 struct hfi1_qp_priv
*priv
= qp
->priv
;
225 lockdep_assert_held(&qp
->s_lock
);
226 /* Don't send an ACK if we aren't supposed to. */
227 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
))
230 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
233 switch (qp
->s_ack_state
) {
234 case OP(RDMA_READ_RESPONSE_LAST
):
235 case OP(RDMA_READ_RESPONSE_ONLY
):
236 e
= &qp
->s_ack_queue
[qp
->s_tail_ack_queue
];
237 if (e
->rdma_sge
.mr
) {
238 rvt_put_mr(e
->rdma_sge
.mr
);
239 e
->rdma_sge
.mr
= NULL
;
242 case OP(ATOMIC_ACKNOWLEDGE
):
244 * We can increment the tail pointer now that the last
245 * response has been sent instead of only being
248 if (++qp
->s_tail_ack_queue
> HFI1_MAX_RDMA_ATOMIC
)
249 qp
->s_tail_ack_queue
= 0;
252 case OP(ACKNOWLEDGE
):
253 /* Check for no next entry in the queue. */
254 if (qp
->r_head_ack_queue
== qp
->s_tail_ack_queue
) {
255 if (qp
->s_flags
& RVT_S_ACK_PENDING
)
260 e
= &qp
->s_ack_queue
[qp
->s_tail_ack_queue
];
261 if (e
->opcode
== OP(RDMA_READ_REQUEST
)) {
263 * If a RDMA read response is being resent and
264 * we haven't seen the duplicate request yet,
265 * then stop sending the remaining responses the
266 * responder has seen until the requester re-sends it.
268 len
= e
->rdma_sge
.sge_length
;
269 if (len
&& !e
->rdma_sge
.mr
) {
270 qp
->s_tail_ack_queue
= qp
->r_head_ack_queue
;
273 /* Copy SGE state in case we need to resend */
274 ps
->s_txreq
->mr
= e
->rdma_sge
.mr
;
276 rvt_get_mr(ps
->s_txreq
->mr
);
277 qp
->s_ack_rdma_sge
.sge
= e
->rdma_sge
;
278 qp
->s_ack_rdma_sge
.num_sge
= 1;
279 ps
->s_txreq
->ss
= &qp
->s_ack_rdma_sge
;
282 qp
->s_ack_state
= OP(RDMA_READ_RESPONSE_FIRST
);
284 qp
->s_ack_state
= OP(RDMA_READ_RESPONSE_ONLY
);
287 ohdr
->u
.aeth
= hfi1_compute_aeth(qp
);
289 qp
->s_ack_rdma_psn
= e
->psn
;
290 bth2
= mask_psn(qp
->s_ack_rdma_psn
++);
292 /* COMPARE_SWAP or FETCH_ADD */
293 ps
->s_txreq
->ss
= NULL
;
295 qp
->s_ack_state
= OP(ATOMIC_ACKNOWLEDGE
);
296 ohdr
->u
.at
.aeth
= hfi1_compute_aeth(qp
);
297 ib_u64_put(e
->atomic_data
, &ohdr
->u
.at
.atomic_ack_eth
);
298 hwords
+= sizeof(ohdr
->u
.at
) / sizeof(u32
);
299 bth2
= mask_psn(e
->psn
);
302 bth0
= qp
->s_ack_state
<< 24;
305 case OP(RDMA_READ_RESPONSE_FIRST
):
306 qp
->s_ack_state
= OP(RDMA_READ_RESPONSE_MIDDLE
);
308 case OP(RDMA_READ_RESPONSE_MIDDLE
):
309 ps
->s_txreq
->ss
= &qp
->s_ack_rdma_sge
;
310 ps
->s_txreq
->mr
= qp
->s_ack_rdma_sge
.sge
.mr
;
312 rvt_get_mr(ps
->s_txreq
->mr
);
313 len
= qp
->s_ack_rdma_sge
.sge
.sge_length
;
316 middle
= HFI1_CAP_IS_KSET(SDMA_AHG
);
318 ohdr
->u
.aeth
= hfi1_compute_aeth(qp
);
320 qp
->s_ack_state
= OP(RDMA_READ_RESPONSE_LAST
);
321 e
= &qp
->s_ack_queue
[qp
->s_tail_ack_queue
];
324 bth0
= qp
->s_ack_state
<< 24;
325 bth2
= mask_psn(qp
->s_ack_rdma_psn
++);
331 * Send a regular ACK.
332 * Set the s_ack_state so we wait until after sending
333 * the ACK before setting s_ack_state to ACKNOWLEDGE
336 qp
->s_ack_state
= OP(SEND_ONLY
);
337 qp
->s_flags
&= ~RVT_S_ACK_PENDING
;
338 ps
->s_txreq
->ss
= NULL
;
341 cpu_to_be32((qp
->r_msn
& HFI1_MSN_MASK
) |
343 HFI1_AETH_CREDIT_SHIFT
));
345 ohdr
->u
.aeth
= hfi1_compute_aeth(qp
);
348 bth0
= OP(ACKNOWLEDGE
) << 24;
349 bth2
= mask_psn(qp
->s_ack_psn
);
351 qp
->s_rdma_ack_cnt
++;
352 qp
->s_hdrwords
= hwords
;
353 ps
->s_txreq
->sde
= priv
->s_sde
;
354 ps
->s_txreq
->s_cur_size
= len
;
355 hfi1_make_ruc_header(qp
, ohdr
, bth0
, bth2
, middle
, ps
);
357 ps
->s_txreq
->hdr_dwords
= qp
->s_hdrwords
+ 2;
361 qp
->s_ack_state
= OP(ACKNOWLEDGE
);
363 * Ensure s_rdma_ack_cnt changes are committed prior to resetting
367 qp
->s_flags
&= ~(RVT_S_RESP_PENDING
374 * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
375 * @qp: a pointer to the QP
377 * Assumes s_lock is held.
379 * Return 1 if constructed; otherwise, return 0.
381 int hfi1_make_rc_req(struct rvt_qp
*qp
, struct hfi1_pkt_state
*ps
)
383 struct hfi1_qp_priv
*priv
= qp
->priv
;
384 struct hfi1_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
385 struct ib_other_headers
*ohdr
;
386 struct rvt_sge_state
*ss
;
387 struct rvt_swqe
*wqe
;
388 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
398 lockdep_assert_held(&qp
->s_lock
);
399 ps
->s_txreq
= get_txreq(ps
->dev
, qp
);
400 if (IS_ERR(ps
->s_txreq
))
403 ohdr
= &ps
->s_txreq
->phdr
.hdr
.u
.oth
;
404 if (qp
->remote_ah_attr
.ah_flags
& IB_AH_GRH
)
405 ohdr
= &ps
->s_txreq
->phdr
.hdr
.u
.l
.oth
;
407 /* Sending responses has higher priority over sending requests. */
408 if ((qp
->s_flags
& RVT_S_RESP_PENDING
) &&
409 make_rc_ack(dev
, qp
, ohdr
, ps
))
412 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_SEND_OK
)) {
413 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_FLUSH_SEND
))
415 /* We are in the error state, flush the work request. */
416 smp_read_barrier_depends(); /* see post_one_send() */
417 if (qp
->s_last
== ACCESS_ONCE(qp
->s_head
))
419 /* If DMAs are in progress, we can't flush immediately. */
420 if (iowait_sdma_pending(&priv
->s_iowait
)) {
421 qp
->s_flags
|= RVT_S_WAIT_DMA
;
425 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_last
);
426 hfi1_send_complete(qp
, wqe
, qp
->s_last
!= qp
->s_acked
?
427 IB_WC_SUCCESS
: IB_WC_WR_FLUSH_ERR
);
428 /* will get called again */
432 if (qp
->s_flags
& (RVT_S_WAIT_RNR
| RVT_S_WAIT_ACK
))
435 if (cmp_psn(qp
->s_psn
, qp
->s_sending_hpsn
) <= 0) {
436 if (cmp_psn(qp
->s_sending_psn
, qp
->s_sending_hpsn
) <= 0) {
437 qp
->s_flags
|= RVT_S_WAIT_PSN
;
440 qp
->s_sending_psn
= qp
->s_psn
;
441 qp
->s_sending_hpsn
= qp
->s_psn
- 1;
444 /* Send a request. */
445 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_cur
);
446 switch (qp
->s_state
) {
448 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_NEXT_SEND_OK
))
451 * Resend an old request or start a new one.
453 * We keep track of the current SWQE so that
454 * we don't reset the "furthest progress" state
455 * if we need to back up.
458 if (qp
->s_cur
== qp
->s_tail
) {
459 /* Check if send work queue is empty. */
460 if (qp
->s_tail
== qp
->s_head
) {
465 * If a fence is requested, wait for previous
466 * RDMA read and atomic operations to finish.
468 if ((wqe
->wr
.send_flags
& IB_SEND_FENCE
) &&
469 qp
->s_num_rd_atomic
) {
470 qp
->s_flags
|= RVT_S_WAIT_FENCE
;
474 * Local operations are processed immediately
475 * after all prior requests have completed
477 if (wqe
->wr
.opcode
== IB_WR_REG_MR
||
478 wqe
->wr
.opcode
== IB_WR_LOCAL_INV
) {
482 if (qp
->s_last
!= qp
->s_cur
)
484 if (++qp
->s_cur
== qp
->s_size
)
486 if (++qp
->s_tail
== qp
->s_size
)
488 if (!(wqe
->wr
.send_flags
&
489 RVT_SEND_COMPLETION_ONLY
)) {
490 err
= rvt_invalidate_rkey(
492 wqe
->wr
.ex
.invalidate_rkey
);
495 hfi1_send_complete(qp
, wqe
,
496 err
? IB_WC_LOC_PROT_ERR
499 atomic_dec(&qp
->local_ops_pending
);
505 qp
->s_psn
= wqe
->psn
;
508 * Note that we have to be careful not to modify the
509 * original work request since we may need to resend
514 bth2
= mask_psn(qp
->s_psn
);
515 switch (wqe
->wr
.opcode
) {
517 case IB_WR_SEND_WITH_IMM
:
518 case IB_WR_SEND_WITH_INV
:
519 /* If no credit, return. */
520 if (!(qp
->s_flags
& RVT_S_UNLIMITED_CREDIT
) &&
521 cmp_msn(wqe
->ssn
, qp
->s_lsn
+ 1) > 0) {
522 qp
->s_flags
|= RVT_S_WAIT_SSN_CREDIT
;
526 qp
->s_state
= OP(SEND_FIRST
);
530 if (wqe
->wr
.opcode
== IB_WR_SEND
) {
531 qp
->s_state
= OP(SEND_ONLY
);
532 } else if (wqe
->wr
.opcode
== IB_WR_SEND_WITH_IMM
) {
533 qp
->s_state
= OP(SEND_ONLY_WITH_IMMEDIATE
);
534 /* Immediate data comes after the BTH */
535 ohdr
->u
.imm_data
= wqe
->wr
.ex
.imm_data
;
538 qp
->s_state
= OP(SEND_ONLY_WITH_INVALIDATE
);
539 /* Invalidate rkey comes after the BTH */
540 ohdr
->u
.ieth
= cpu_to_be32(
541 wqe
->wr
.ex
.invalidate_rkey
);
544 if (wqe
->wr
.send_flags
& IB_SEND_SOLICITED
)
545 bth0
|= IB_BTH_SOLICITED
;
546 bth2
|= IB_BTH_REQ_ACK
;
547 if (++qp
->s_cur
== qp
->s_size
)
551 case IB_WR_RDMA_WRITE
:
552 if (newreq
&& !(qp
->s_flags
& RVT_S_UNLIMITED_CREDIT
))
555 case IB_WR_RDMA_WRITE_WITH_IMM
:
556 /* If no credit, return. */
557 if (!(qp
->s_flags
& RVT_S_UNLIMITED_CREDIT
) &&
558 cmp_msn(wqe
->ssn
, qp
->s_lsn
+ 1) > 0) {
559 qp
->s_flags
|= RVT_S_WAIT_SSN_CREDIT
;
563 wqe
->rdma_wr
.remote_addr
,
565 ohdr
->u
.rc
.reth
.rkey
=
566 cpu_to_be32(wqe
->rdma_wr
.rkey
);
567 ohdr
->u
.rc
.reth
.length
= cpu_to_be32(len
);
568 hwords
+= sizeof(struct ib_reth
) / sizeof(u32
);
570 qp
->s_state
= OP(RDMA_WRITE_FIRST
);
574 if (wqe
->wr
.opcode
== IB_WR_RDMA_WRITE
) {
575 qp
->s_state
= OP(RDMA_WRITE_ONLY
);
578 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE
);
579 /* Immediate data comes after RETH */
580 ohdr
->u
.rc
.imm_data
= wqe
->wr
.ex
.imm_data
;
582 if (wqe
->wr
.send_flags
& IB_SEND_SOLICITED
)
583 bth0
|= IB_BTH_SOLICITED
;
585 bth2
|= IB_BTH_REQ_ACK
;
586 if (++qp
->s_cur
== qp
->s_size
)
590 case IB_WR_RDMA_READ
:
592 * Don't allow more operations to be started
593 * than the QP limits allow.
596 if (qp
->s_num_rd_atomic
>=
597 qp
->s_max_rd_atomic
) {
598 qp
->s_flags
|= RVT_S_WAIT_RDMAR
;
601 qp
->s_num_rd_atomic
++;
602 if (!(qp
->s_flags
& RVT_S_UNLIMITED_CREDIT
))
606 wqe
->rdma_wr
.remote_addr
,
608 ohdr
->u
.rc
.reth
.rkey
=
609 cpu_to_be32(wqe
->rdma_wr
.rkey
);
610 ohdr
->u
.rc
.reth
.length
= cpu_to_be32(len
);
611 qp
->s_state
= OP(RDMA_READ_REQUEST
);
612 hwords
+= sizeof(ohdr
->u
.rc
.reth
) / sizeof(u32
);
615 bth2
|= IB_BTH_REQ_ACK
;
616 if (++qp
->s_cur
== qp
->s_size
)
620 case IB_WR_ATOMIC_CMP_AND_SWP
:
621 case IB_WR_ATOMIC_FETCH_AND_ADD
:
623 * Don't allow more operations to be started
624 * than the QP limits allow.
627 if (qp
->s_num_rd_atomic
>=
628 qp
->s_max_rd_atomic
) {
629 qp
->s_flags
|= RVT_S_WAIT_RDMAR
;
632 qp
->s_num_rd_atomic
++;
633 if (!(qp
->s_flags
& RVT_S_UNLIMITED_CREDIT
))
636 if (wqe
->wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
637 qp
->s_state
= OP(COMPARE_SWAP
);
638 put_ib_ateth_swap(wqe
->atomic_wr
.swap
,
639 &ohdr
->u
.atomic_eth
);
640 put_ib_ateth_compare(wqe
->atomic_wr
.compare_add
,
641 &ohdr
->u
.atomic_eth
);
643 qp
->s_state
= OP(FETCH_ADD
);
644 put_ib_ateth_swap(wqe
->atomic_wr
.compare_add
,
645 &ohdr
->u
.atomic_eth
);
646 put_ib_ateth_compare(0, &ohdr
->u
.atomic_eth
);
648 put_ib_ateth_vaddr(wqe
->atomic_wr
.remote_addr
,
649 &ohdr
->u
.atomic_eth
);
650 ohdr
->u
.atomic_eth
.rkey
= cpu_to_be32(
651 wqe
->atomic_wr
.rkey
);
652 hwords
+= sizeof(struct ib_atomic_eth
) / sizeof(u32
);
655 bth2
|= IB_BTH_REQ_ACK
;
656 if (++qp
->s_cur
== qp
->s_size
)
663 qp
->s_sge
.sge
= wqe
->sg_list
[0];
664 qp
->s_sge
.sg_list
= wqe
->sg_list
+ 1;
665 qp
->s_sge
.num_sge
= wqe
->wr
.num_sge
;
666 qp
->s_sge
.total_len
= wqe
->length
;
667 qp
->s_len
= wqe
->length
;
670 if (qp
->s_tail
>= qp
->s_size
)
673 if (wqe
->wr
.opcode
== IB_WR_RDMA_READ
)
674 qp
->s_psn
= wqe
->lpsn
+ 1;
679 case OP(RDMA_READ_RESPONSE_FIRST
):
681 * qp->s_state is normally set to the opcode of the
682 * last packet constructed for new requests and therefore
683 * is never set to RDMA read response.
684 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
685 * thread to indicate a SEND needs to be restarted from an
686 * earlier PSN without interfering with the sending thread.
689 qp
->s_len
= restart_sge(&qp
->s_sge
, wqe
, qp
->s_psn
, pmtu
);
692 qp
->s_state
= OP(SEND_MIDDLE
);
694 case OP(SEND_MIDDLE
):
695 bth2
= mask_psn(qp
->s_psn
++);
700 middle
= HFI1_CAP_IS_KSET(SDMA_AHG
);
703 if (wqe
->wr
.opcode
== IB_WR_SEND
) {
704 qp
->s_state
= OP(SEND_LAST
);
705 } else if (wqe
->wr
.opcode
== IB_WR_SEND_WITH_IMM
) {
706 qp
->s_state
= OP(SEND_LAST_WITH_IMMEDIATE
);
707 /* Immediate data comes after the BTH */
708 ohdr
->u
.imm_data
= wqe
->wr
.ex
.imm_data
;
711 qp
->s_state
= OP(SEND_LAST_WITH_INVALIDATE
);
712 /* invalidate data comes after the BTH */
713 ohdr
->u
.ieth
= cpu_to_be32(wqe
->wr
.ex
.invalidate_rkey
);
716 if (wqe
->wr
.send_flags
& IB_SEND_SOLICITED
)
717 bth0
|= IB_BTH_SOLICITED
;
718 bth2
|= IB_BTH_REQ_ACK
;
720 if (qp
->s_cur
>= qp
->s_size
)
724 case OP(RDMA_READ_RESPONSE_LAST
):
726 * qp->s_state is normally set to the opcode of the
727 * last packet constructed for new requests and therefore
728 * is never set to RDMA read response.
729 * RDMA_READ_RESPONSE_LAST is used by the ACK processing
730 * thread to indicate a RDMA write needs to be restarted from
731 * an earlier PSN without interfering with the sending thread.
734 qp
->s_len
= restart_sge(&qp
->s_sge
, wqe
, qp
->s_psn
, pmtu
);
736 case OP(RDMA_WRITE_FIRST
):
737 qp
->s_state
= OP(RDMA_WRITE_MIDDLE
);
739 case OP(RDMA_WRITE_MIDDLE
):
740 bth2
= mask_psn(qp
->s_psn
++);
745 middle
= HFI1_CAP_IS_KSET(SDMA_AHG
);
748 if (wqe
->wr
.opcode
== IB_WR_RDMA_WRITE
) {
749 qp
->s_state
= OP(RDMA_WRITE_LAST
);
751 qp
->s_state
= OP(RDMA_WRITE_LAST_WITH_IMMEDIATE
);
752 /* Immediate data comes after the BTH */
753 ohdr
->u
.imm_data
= wqe
->wr
.ex
.imm_data
;
755 if (wqe
->wr
.send_flags
& IB_SEND_SOLICITED
)
756 bth0
|= IB_BTH_SOLICITED
;
758 bth2
|= IB_BTH_REQ_ACK
;
760 if (qp
->s_cur
>= qp
->s_size
)
764 case OP(RDMA_READ_RESPONSE_MIDDLE
):
766 * qp->s_state is normally set to the opcode of the
767 * last packet constructed for new requests and therefore
768 * is never set to RDMA read response.
769 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
770 * thread to indicate a RDMA read needs to be restarted from
771 * an earlier PSN without interfering with the sending thread.
774 len
= (delta_psn(qp
->s_psn
, wqe
->psn
)) * pmtu
;
776 wqe
->rdma_wr
.remote_addr
+ len
,
778 ohdr
->u
.rc
.reth
.rkey
=
779 cpu_to_be32(wqe
->rdma_wr
.rkey
);
780 ohdr
->u
.rc
.reth
.length
= cpu_to_be32(wqe
->length
- len
);
781 qp
->s_state
= OP(RDMA_READ_REQUEST
);
782 hwords
+= sizeof(ohdr
->u
.rc
.reth
) / sizeof(u32
);
783 bth2
= mask_psn(qp
->s_psn
) | IB_BTH_REQ_ACK
;
784 qp
->s_psn
= wqe
->lpsn
+ 1;
788 if (qp
->s_cur
== qp
->s_size
)
792 qp
->s_sending_hpsn
= bth2
;
793 delta
= delta_psn(bth2
, wqe
->psn
);
794 if (delta
&& delta
% HFI1_PSN_CREDIT
== 0)
795 bth2
|= IB_BTH_REQ_ACK
;
796 if (qp
->s_flags
& RVT_S_SEND_ONE
) {
797 qp
->s_flags
&= ~RVT_S_SEND_ONE
;
798 qp
->s_flags
|= RVT_S_WAIT_ACK
;
799 bth2
|= IB_BTH_REQ_ACK
;
802 qp
->s_hdrwords
= hwords
;
803 ps
->s_txreq
->sde
= priv
->s_sde
;
804 ps
->s_txreq
->ss
= ss
;
805 ps
->s_txreq
->s_cur_size
= len
;
806 hfi1_make_ruc_header(
809 bth0
| (qp
->s_state
<< 24),
814 ps
->s_txreq
->hdr_dwords
= qp
->s_hdrwords
+ 2;
818 hfi1_put_txreq(ps
->s_txreq
);
823 hfi1_put_txreq(ps
->s_txreq
);
827 qp
->s_flags
&= ~RVT_S_BUSY
;
833 * hfi1_send_rc_ack - Construct an ACK packet and send it
834 * @qp: a pointer to the QP
836 * This is called from hfi1_rc_rcv() and handle_receive_interrupt().
837 * Note that RDMA reads and atomics are handled in the
838 * send side QP state and send engine.
840 void hfi1_send_rc_ack(struct hfi1_ctxtdata
*rcd
, struct rvt_qp
*qp
,
843 struct hfi1_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
844 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
845 u64 pbc
, pbc_flags
= 0;
851 struct send_context
*sc
;
852 struct pio_buf
*pbuf
;
853 struct ib_header hdr
;
854 struct ib_other_headers
*ohdr
;
857 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
858 if (qp
->s_flags
& RVT_S_RESP_PENDING
)
861 /* Ensure s_rdma_ack_cnt changes are committed */
862 smp_read_barrier_depends();
863 if (qp
->s_rdma_ack_cnt
)
866 /* Construct the header */
867 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */
869 if (unlikely(qp
->remote_ah_attr
.ah_flags
& IB_AH_GRH
)) {
870 hwords
+= hfi1_make_grh(ibp
, &hdr
.u
.l
.grh
,
871 &qp
->remote_ah_attr
.grh
, hwords
, 0);
878 /* read pkey_index w/o lock (its atomic) */
879 bth0
= hfi1_get_pkey(ibp
, qp
->s_pkey_index
) | (OP(ACKNOWLEDGE
) << 24);
880 if (qp
->s_mig_state
== IB_MIG_MIGRATED
)
881 bth0
|= IB_BTH_MIG_REQ
;
883 ohdr
->u
.aeth
= cpu_to_be32((qp
->r_msn
& HFI1_MSN_MASK
) |
885 HFI1_AETH_CREDIT_SHIFT
));
887 ohdr
->u
.aeth
= hfi1_compute_aeth(qp
);
888 sc5
= ibp
->sl_to_sc
[qp
->remote_ah_attr
.sl
];
889 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
890 pbc_flags
|= ((!!(sc5
& 0x10)) << PBC_DC_INFO_SHIFT
);
891 lrh0
|= (sc5
& 0xf) << 12 | (qp
->remote_ah_attr
.sl
& 0xf) << 4;
892 hdr
.lrh
[0] = cpu_to_be16(lrh0
);
893 hdr
.lrh
[1] = cpu_to_be16(qp
->remote_ah_attr
.dlid
);
894 hdr
.lrh
[2] = cpu_to_be16(hwords
+ SIZE_OF_CRC
);
895 hdr
.lrh
[3] = cpu_to_be16(ppd
->lid
| qp
->remote_ah_attr
.src_path_bits
);
896 ohdr
->bth
[0] = cpu_to_be32(bth0
);
897 ohdr
->bth
[1] = cpu_to_be32(qp
->remote_qpn
);
898 ohdr
->bth
[1] |= cpu_to_be32((!!is_fecn
) << HFI1_BECN_SHIFT
);
899 ohdr
->bth
[2] = cpu_to_be32(mask_psn(qp
->r_ack_psn
));
901 /* Don't try to send ACKs if the link isn't ACTIVE */
902 if (driver_lstate(ppd
) != IB_PORT_ACTIVE
)
906 plen
= 2 /* PBC */ + hwords
;
907 vl
= sc_to_vlt(ppd
->dd
, sc5
);
908 pbc
= create_pbc(ppd
, pbc_flags
, qp
->srate_mbps
, vl
, plen
);
910 pbuf
= sc_buffer_alloc(sc
, plen
, NULL
, NULL
);
913 * We have no room to send at the moment. Pass
914 * responsibility for sending the ACK to the send engine
915 * so that when enough buffer space becomes available,
916 * the ACK is sent ahead of other outgoing packets.
921 trace_ack_output_ibhdr(dd_from_ibdev(qp
->ibqp
.device
), &hdr
);
923 /* write the pbc and data */
924 ppd
->dd
->pio_inline_send(ppd
->dd
, pbuf
, pbc
, &hdr
, hwords
);
929 spin_lock_irqsave(&qp
->s_lock
, flags
);
930 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
))
932 this_cpu_inc(*ibp
->rvp
.rc_qacks
);
933 qp
->s_flags
|= RVT_S_ACK_PENDING
| RVT_S_RESP_PENDING
;
934 qp
->s_nak_state
= qp
->r_nak_state
;
935 qp
->s_ack_psn
= qp
->r_ack_psn
;
937 qp
->s_flags
|= RVT_S_ECN
;
939 /* Schedule the send engine. */
940 hfi1_schedule_send(qp
);
942 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
946 * reset_psn - reset the QP state to send starting from PSN
948 * @psn: the packet sequence number to restart at
950 * This is called from hfi1_rc_rcv() to process an incoming RC ACK
952 * Called at interrupt level with the QP s_lock held.
954 static void reset_psn(struct rvt_qp
*qp
, u32 psn
)
957 struct rvt_swqe
*wqe
= rvt_get_swqe_ptr(qp
, n
);
960 lockdep_assert_held(&qp
->s_lock
);
964 * If we are starting the request from the beginning,
965 * let the normal send code handle initialization.
967 if (cmp_psn(psn
, wqe
->psn
) <= 0) {
968 qp
->s_state
= OP(SEND_LAST
);
972 /* Find the work request opcode corresponding to the given PSN. */
973 opcode
= wqe
->wr
.opcode
;
977 if (++n
== qp
->s_size
)
981 wqe
= rvt_get_swqe_ptr(qp
, n
);
982 diff
= cmp_psn(psn
, wqe
->psn
);
987 * If we are starting the request from the beginning,
988 * let the normal send code handle initialization.
991 qp
->s_state
= OP(SEND_LAST
);
994 opcode
= wqe
->wr
.opcode
;
998 * Set the state to restart in the middle of a request.
999 * Don't change the s_sge, s_cur_sge, or s_cur_size.
1000 * See hfi1_make_rc_req().
1004 case IB_WR_SEND_WITH_IMM
:
1005 qp
->s_state
= OP(RDMA_READ_RESPONSE_FIRST
);
1008 case IB_WR_RDMA_WRITE
:
1009 case IB_WR_RDMA_WRITE_WITH_IMM
:
1010 qp
->s_state
= OP(RDMA_READ_RESPONSE_LAST
);
1013 case IB_WR_RDMA_READ
:
1014 qp
->s_state
= OP(RDMA_READ_RESPONSE_MIDDLE
);
1019 * This case shouldn't happen since its only
1022 qp
->s_state
= OP(SEND_LAST
);
1027 * Set RVT_S_WAIT_PSN as rc_complete() may start the timer
1028 * asynchronously before the send engine can get scheduled.
1029 * Doing it in hfi1_make_rc_req() is too late.
1031 if ((cmp_psn(qp
->s_psn
, qp
->s_sending_hpsn
) <= 0) &&
1032 (cmp_psn(qp
->s_sending_psn
, qp
->s_sending_hpsn
) <= 0))
1033 qp
->s_flags
|= RVT_S_WAIT_PSN
;
1034 qp
->s_flags
&= ~RVT_S_AHG_VALID
;
1038 * Back up requester to resend the last un-ACKed request.
1039 * The QP r_lock and s_lock should be held and interrupts disabled.
1041 static void restart_rc(struct rvt_qp
*qp
, u32 psn
, int wait
)
1043 struct rvt_swqe
*wqe
= rvt_get_swqe_ptr(qp
, qp
->s_acked
);
1044 struct hfi1_ibport
*ibp
;
1046 lockdep_assert_held(&qp
->r_lock
);
1047 lockdep_assert_held(&qp
->s_lock
);
1048 if (qp
->s_retry
== 0) {
1049 if (qp
->s_mig_state
== IB_MIG_ARMED
) {
1050 hfi1_migrate_qp(qp
);
1051 qp
->s_retry
= qp
->s_retry_cnt
;
1052 } else if (qp
->s_last
== qp
->s_acked
) {
1053 hfi1_send_complete(qp
, wqe
, IB_WC_RETRY_EXC_ERR
);
1054 rvt_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
1056 } else { /* need to handle delayed completion */
1063 ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
1064 if (wqe
->wr
.opcode
== IB_WR_RDMA_READ
)
1065 ibp
->rvp
.n_rc_resends
++;
1067 ibp
->rvp
.n_rc_resends
+= delta_psn(qp
->s_psn
, psn
);
1069 qp
->s_flags
&= ~(RVT_S_WAIT_FENCE
| RVT_S_WAIT_RDMAR
|
1070 RVT_S_WAIT_SSN_CREDIT
| RVT_S_WAIT_PSN
|
1073 qp
->s_flags
|= RVT_S_SEND_ONE
;
1078 * This is called from s_timer for missing responses.
1080 void hfi1_rc_timeout(unsigned long arg
)
1082 struct rvt_qp
*qp
= (struct rvt_qp
*)arg
;
1083 struct hfi1_ibport
*ibp
;
1084 unsigned long flags
;
1086 spin_lock_irqsave(&qp
->r_lock
, flags
);
1087 spin_lock(&qp
->s_lock
);
1088 if (qp
->s_flags
& RVT_S_TIMER
) {
1089 ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
1090 ibp
->rvp
.n_rc_timeouts
++;
1091 qp
->s_flags
&= ~RVT_S_TIMER
;
1092 del_timer(&qp
->s_timer
);
1093 trace_hfi1_timeout(qp
, qp
->s_last_psn
+ 1);
1094 restart_rc(qp
, qp
->s_last_psn
+ 1, 1);
1095 hfi1_schedule_send(qp
);
1097 spin_unlock(&qp
->s_lock
);
1098 spin_unlock_irqrestore(&qp
->r_lock
, flags
);
1102 * This is called from s_timer for RNR timeouts.
1104 void hfi1_rc_rnr_retry(unsigned long arg
)
1106 struct rvt_qp
*qp
= (struct rvt_qp
*)arg
;
1107 unsigned long flags
;
1109 spin_lock_irqsave(&qp
->s_lock
, flags
);
1110 hfi1_stop_rnr_timer(qp
);
1111 hfi1_schedule_send(qp
);
1112 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1116 * Set qp->s_sending_psn to the next PSN after the given one.
1117 * This would be psn+1 except when RDMA reads are present.
1119 static void reset_sending_psn(struct rvt_qp
*qp
, u32 psn
)
1121 struct rvt_swqe
*wqe
;
1124 lockdep_assert_held(&qp
->s_lock
);
1125 /* Find the work request corresponding to the given PSN. */
1127 wqe
= rvt_get_swqe_ptr(qp
, n
);
1128 if (cmp_psn(psn
, wqe
->lpsn
) <= 0) {
1129 if (wqe
->wr
.opcode
== IB_WR_RDMA_READ
)
1130 qp
->s_sending_psn
= wqe
->lpsn
+ 1;
1132 qp
->s_sending_psn
= psn
+ 1;
1135 if (++n
== qp
->s_size
)
1137 if (n
== qp
->s_tail
)
1143 * This should be called with the QP s_lock held and interrupts disabled.
1145 void hfi1_rc_send_complete(struct rvt_qp
*qp
, struct ib_header
*hdr
)
1147 struct ib_other_headers
*ohdr
;
1148 struct rvt_swqe
*wqe
;
1152 lockdep_assert_held(&qp
->s_lock
);
1153 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_OR_FLUSH_SEND
))
1156 /* Find out where the BTH is */
1157 if ((be16_to_cpu(hdr
->lrh
[0]) & 3) == HFI1_LRH_BTH
)
1160 ohdr
= &hdr
->u
.l
.oth
;
1162 opcode
= be32_to_cpu(ohdr
->bth
[0]) >> 24;
1163 if (opcode
>= OP(RDMA_READ_RESPONSE_FIRST
) &&
1164 opcode
<= OP(ATOMIC_ACKNOWLEDGE
)) {
1165 WARN_ON(!qp
->s_rdma_ack_cnt
);
1166 qp
->s_rdma_ack_cnt
--;
1170 psn
= be32_to_cpu(ohdr
->bth
[2]);
1171 reset_sending_psn(qp
, psn
);
1174 * Start timer after a packet requesting an ACK has been sent and
1175 * there are still requests that haven't been acked.
1177 if ((psn
& IB_BTH_REQ_ACK
) && qp
->s_acked
!= qp
->s_tail
&&
1179 (RVT_S_TIMER
| RVT_S_WAIT_RNR
| RVT_S_WAIT_PSN
)) &&
1180 (ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
))
1181 hfi1_add_retry_timer(qp
);
1183 while (qp
->s_last
!= qp
->s_acked
) {
1186 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_last
);
1187 if (cmp_psn(wqe
->lpsn
, qp
->s_sending_psn
) >= 0 &&
1188 cmp_psn(qp
->s_sending_psn
, qp
->s_sending_hpsn
) <= 0)
1190 s_last
= qp
->s_last
;
1191 if (++s_last
>= qp
->s_size
)
1193 qp
->s_last
= s_last
;
1194 /* see post_send() */
1197 rvt_qp_swqe_complete(qp
, wqe
, IB_WC_SUCCESS
);
1200 * If we were waiting for sends to complete before re-sending,
1201 * and they are now complete, restart sending.
1203 trace_hfi1_sendcomplete(qp
, psn
);
1204 if (qp
->s_flags
& RVT_S_WAIT_PSN
&&
1205 cmp_psn(qp
->s_sending_psn
, qp
->s_sending_hpsn
) > 0) {
1206 qp
->s_flags
&= ~RVT_S_WAIT_PSN
;
1207 qp
->s_sending_psn
= qp
->s_psn
;
1208 qp
->s_sending_hpsn
= qp
->s_psn
- 1;
1209 hfi1_schedule_send(qp
);
1213 static inline void update_last_psn(struct rvt_qp
*qp
, u32 psn
)
1215 qp
->s_last_psn
= psn
;
1219 * Generate a SWQE completion.
1220 * This is similar to hfi1_send_complete but has to check to be sure
1221 * that the SGEs are not being referenced if the SWQE is being resent.
1223 static struct rvt_swqe
*do_rc_completion(struct rvt_qp
*qp
,
1224 struct rvt_swqe
*wqe
,
1225 struct hfi1_ibport
*ibp
)
1227 lockdep_assert_held(&qp
->s_lock
);
1229 * Don't decrement refcount and don't generate a
1230 * completion if the SWQE is being resent until the send
1233 if (cmp_psn(wqe
->lpsn
, qp
->s_sending_psn
) < 0 ||
1234 cmp_psn(qp
->s_sending_psn
, qp
->s_sending_hpsn
) > 0) {
1238 s_last
= qp
->s_last
;
1239 if (++s_last
>= qp
->s_size
)
1241 qp
->s_last
= s_last
;
1242 /* see post_send() */
1244 rvt_qp_swqe_complete(qp
, wqe
, IB_WC_SUCCESS
);
1246 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
1248 this_cpu_inc(*ibp
->rvp
.rc_delayed_comp
);
1250 * If send progress not running attempt to progress
1253 if (ppd
->dd
->flags
& HFI1_HAS_SEND_DMA
) {
1254 struct sdma_engine
*engine
;
1257 /* For now use sc to find engine */
1258 sc5
= ibp
->sl_to_sc
[qp
->remote_ah_attr
.sl
];
1259 engine
= qp_to_sdma_engine(qp
, sc5
);
1260 sdma_engine_progress_schedule(engine
);
1264 qp
->s_retry
= qp
->s_retry_cnt
;
1265 update_last_psn(qp
, wqe
->lpsn
);
1268 * If we are completing a request which is in the process of
1269 * being resent, we can stop re-sending it since we know the
1270 * responder has already seen it.
1272 if (qp
->s_acked
== qp
->s_cur
) {
1273 if (++qp
->s_cur
>= qp
->s_size
)
1275 qp
->s_acked
= qp
->s_cur
;
1276 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_cur
);
1277 if (qp
->s_acked
!= qp
->s_tail
) {
1278 qp
->s_state
= OP(SEND_LAST
);
1279 qp
->s_psn
= wqe
->psn
;
1282 if (++qp
->s_acked
>= qp
->s_size
)
1284 if (qp
->state
== IB_QPS_SQD
&& qp
->s_acked
== qp
->s_cur
)
1286 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_acked
);
1292 * do_rc_ack - process an incoming RC ACK
1293 * @qp: the QP the ACK came in on
1294 * @psn: the packet sequence number of the ACK
1295 * @opcode: the opcode of the request that resulted in the ACK
1297 * This is called from rc_rcv_resp() to process an incoming RC ACK
1299 * May be called at interrupt level, with the QP s_lock held.
1300 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
1302 static int do_rc_ack(struct rvt_qp
*qp
, u32 aeth
, u32 psn
, int opcode
,
1303 u64 val
, struct hfi1_ctxtdata
*rcd
)
1305 struct hfi1_ibport
*ibp
;
1306 enum ib_wc_status status
;
1307 struct rvt_swqe
*wqe
;
1313 lockdep_assert_held(&qp
->s_lock
);
1315 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
1316 * requests and implicitly NAK RDMA read and atomic requests issued
1317 * before the NAK'ed request. The MSN won't include the NAK'ed
1318 * request but will include an ACK'ed request(s).
1323 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_acked
);
1324 ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
1327 * The MSN might be for a later WQE than the PSN indicates so
1328 * only complete WQEs that the PSN finishes.
1330 while ((diff
= delta_psn(ack_psn
, wqe
->lpsn
)) >= 0) {
1332 * RDMA_READ_RESPONSE_ONLY is a special case since
1333 * we want to generate completion events for everything
1334 * before the RDMA read, copy the data, then generate
1335 * the completion for the read.
1337 if (wqe
->wr
.opcode
== IB_WR_RDMA_READ
&&
1338 opcode
== OP(RDMA_READ_RESPONSE_ONLY
) &&
1344 * If this request is a RDMA read or atomic, and the ACK is
1345 * for a later operation, this ACK NAKs the RDMA read or
1346 * atomic. In other words, only a RDMA_READ_LAST or ONLY
1347 * can ACK a RDMA read and likewise for atomic ops. Note
1348 * that the NAK case can only happen if relaxed ordering is
1349 * used and requests are sent after an RDMA read or atomic
1350 * is sent but before the response is received.
1352 if ((wqe
->wr
.opcode
== IB_WR_RDMA_READ
&&
1353 (opcode
!= OP(RDMA_READ_RESPONSE_LAST
) || diff
!= 0)) ||
1354 ((wqe
->wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
||
1355 wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
) &&
1356 (opcode
!= OP(ATOMIC_ACKNOWLEDGE
) || diff
!= 0))) {
1357 /* Retry this request. */
1358 if (!(qp
->r_flags
& RVT_R_RDMAR_SEQ
)) {
1359 qp
->r_flags
|= RVT_R_RDMAR_SEQ
;
1360 restart_rc(qp
, qp
->s_last_psn
+ 1, 0);
1361 if (list_empty(&qp
->rspwait
)) {
1362 qp
->r_flags
|= RVT_R_RSP_SEND
;
1364 list_add_tail(&qp
->rspwait
,
1365 &rcd
->qp_wait_list
);
1369 * No need to process the ACK/NAK since we are
1370 * restarting an earlier request.
1374 if (wqe
->wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
||
1375 wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
) {
1376 u64
*vaddr
= wqe
->sg_list
[0].vaddr
;
1379 if (qp
->s_num_rd_atomic
&&
1380 (wqe
->wr
.opcode
== IB_WR_RDMA_READ
||
1381 wqe
->wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
||
1382 wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
)) {
1383 qp
->s_num_rd_atomic
--;
1384 /* Restart sending task if fence is complete */
1385 if ((qp
->s_flags
& RVT_S_WAIT_FENCE
) &&
1386 !qp
->s_num_rd_atomic
) {
1387 qp
->s_flags
&= ~(RVT_S_WAIT_FENCE
|
1389 hfi1_schedule_send(qp
);
1390 } else if (qp
->s_flags
& RVT_S_WAIT_RDMAR
) {
1391 qp
->s_flags
&= ~(RVT_S_WAIT_RDMAR
|
1393 hfi1_schedule_send(qp
);
1396 wqe
= do_rc_completion(qp
, wqe
, ibp
);
1397 if (qp
->s_acked
== qp
->s_tail
)
1401 switch (aeth
>> 29) {
1403 this_cpu_inc(*ibp
->rvp
.rc_acks
);
1404 if (qp
->s_acked
!= qp
->s_tail
) {
1406 * We are expecting more ACKs so
1407 * mod the retry timer.
1409 hfi1_mod_retry_timer(qp
);
1411 * We can stop re-sending the earlier packets and
1412 * continue with the next packet the receiver wants.
1414 if (cmp_psn(qp
->s_psn
, psn
) <= 0)
1415 reset_psn(qp
, psn
+ 1);
1417 /* No more acks - kill all timers */
1418 hfi1_stop_rc_timers(qp
);
1419 if (cmp_psn(qp
->s_psn
, psn
) <= 0) {
1420 qp
->s_state
= OP(SEND_LAST
);
1421 qp
->s_psn
= psn
+ 1;
1424 if (qp
->s_flags
& RVT_S_WAIT_ACK
) {
1425 qp
->s_flags
&= ~RVT_S_WAIT_ACK
;
1426 hfi1_schedule_send(qp
);
1428 hfi1_get_credit(qp
, aeth
);
1429 qp
->s_rnr_retry
= qp
->s_rnr_retry_cnt
;
1430 qp
->s_retry
= qp
->s_retry_cnt
;
1431 update_last_psn(qp
, psn
);
1434 case 1: /* RNR NAK */
1435 ibp
->rvp
.n_rnr_naks
++;
1436 if (qp
->s_acked
== qp
->s_tail
)
1438 if (qp
->s_flags
& RVT_S_WAIT_RNR
)
1440 if (qp
->s_rnr_retry
== 0) {
1441 status
= IB_WC_RNR_RETRY_EXC_ERR
;
1444 if (qp
->s_rnr_retry_cnt
< 7)
1447 /* The last valid PSN is the previous PSN. */
1448 update_last_psn(qp
, psn
- 1);
1450 ibp
->rvp
.n_rc_resends
+= delta_psn(qp
->s_psn
, psn
);
1454 qp
->s_flags
&= ~(RVT_S_WAIT_SSN_CREDIT
| RVT_S_WAIT_ACK
);
1455 hfi1_stop_rc_timers(qp
);
1457 ib_hfi1_rnr_table
[(aeth
>> HFI1_AETH_CREDIT_SHIFT
) &
1458 HFI1_AETH_CREDIT_MASK
];
1459 hfi1_add_rnr_timer(qp
, to
);
1463 if (qp
->s_acked
== qp
->s_tail
)
1465 /* The last valid PSN is the previous PSN. */
1466 update_last_psn(qp
, psn
- 1);
1467 switch ((aeth
>> HFI1_AETH_CREDIT_SHIFT
) &
1468 HFI1_AETH_CREDIT_MASK
) {
1469 case 0: /* PSN sequence error */
1470 ibp
->rvp
.n_seq_naks
++;
1472 * Back up to the responder's expected PSN.
1473 * Note that we might get a NAK in the middle of an
1474 * RDMA READ response which terminates the RDMA
1477 restart_rc(qp
, psn
, 0);
1478 hfi1_schedule_send(qp
);
1481 case 1: /* Invalid Request */
1482 status
= IB_WC_REM_INV_REQ_ERR
;
1483 ibp
->rvp
.n_other_naks
++;
1486 case 2: /* Remote Access Error */
1487 status
= IB_WC_REM_ACCESS_ERR
;
1488 ibp
->rvp
.n_other_naks
++;
1491 case 3: /* Remote Operation Error */
1492 status
= IB_WC_REM_OP_ERR
;
1493 ibp
->rvp
.n_other_naks
++;
1495 if (qp
->s_last
== qp
->s_acked
) {
1496 hfi1_send_complete(qp
, wqe
, status
);
1497 rvt_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
1502 /* Ignore other reserved NAK error codes */
1505 qp
->s_retry
= qp
->s_retry_cnt
;
1506 qp
->s_rnr_retry
= qp
->s_rnr_retry_cnt
;
1509 default: /* 2: reserved */
1511 /* Ignore reserved NAK codes. */
1514 /* cannot be reached */
1516 hfi1_stop_rc_timers(qp
);
1521 * We have seen an out of sequence RDMA read middle or last packet.
1522 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
1524 static void rdma_seq_err(struct rvt_qp
*qp
, struct hfi1_ibport
*ibp
, u32 psn
,
1525 struct hfi1_ctxtdata
*rcd
)
1527 struct rvt_swqe
*wqe
;
1529 lockdep_assert_held(&qp
->s_lock
);
1530 /* Remove QP from retry timer */
1531 hfi1_stop_rc_timers(qp
);
1533 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_acked
);
1535 while (cmp_psn(psn
, wqe
->lpsn
) > 0) {
1536 if (wqe
->wr
.opcode
== IB_WR_RDMA_READ
||
1537 wqe
->wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
||
1538 wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
)
1540 wqe
= do_rc_completion(qp
, wqe
, ibp
);
1543 ibp
->rvp
.n_rdma_seq
++;
1544 qp
->r_flags
|= RVT_R_RDMAR_SEQ
;
1545 restart_rc(qp
, qp
->s_last_psn
+ 1, 0);
1546 if (list_empty(&qp
->rspwait
)) {
1547 qp
->r_flags
|= RVT_R_RSP_SEND
;
1549 list_add_tail(&qp
->rspwait
, &rcd
->qp_wait_list
);
1554 * rc_rcv_resp - process an incoming RC response packet
1555 * @ibp: the port this packet came in on
1556 * @ohdr: the other headers for this packet
1557 * @data: the packet data
1558 * @tlen: the packet length
1559 * @qp: the QP for this packet
1560 * @opcode: the opcode for this packet
1561 * @psn: the packet sequence number for this packet
1562 * @hdrsize: the header length
1563 * @pmtu: the path MTU
1565 * This is called from hfi1_rc_rcv() to process an incoming RC response
1566 * packet for the given QP.
1567 * Called at interrupt level.
1569 static void rc_rcv_resp(struct hfi1_ibport
*ibp
,
1570 struct ib_other_headers
*ohdr
,
1571 void *data
, u32 tlen
, struct rvt_qp
*qp
,
1572 u32 opcode
, u32 psn
, u32 hdrsize
, u32 pmtu
,
1573 struct hfi1_ctxtdata
*rcd
)
1575 struct rvt_swqe
*wqe
;
1576 enum ib_wc_status status
;
1577 unsigned long flags
;
1583 spin_lock_irqsave(&qp
->s_lock
, flags
);
1585 trace_hfi1_ack(qp
, psn
);
1587 /* Ignore invalid responses. */
1588 smp_read_barrier_depends(); /* see post_one_send */
1589 if (cmp_psn(psn
, ACCESS_ONCE(qp
->s_next_psn
)) >= 0)
1592 /* Ignore duplicate responses. */
1593 diff
= cmp_psn(psn
, qp
->s_last_psn
);
1594 if (unlikely(diff
<= 0)) {
1595 /* Update credits for "ghost" ACKs */
1596 if (diff
== 0 && opcode
== OP(ACKNOWLEDGE
)) {
1597 aeth
= be32_to_cpu(ohdr
->u
.aeth
);
1598 if ((aeth
>> 29) == 0)
1599 hfi1_get_credit(qp
, aeth
);
1605 * Skip everything other than the PSN we expect, if we are waiting
1606 * for a reply to a restarted RDMA read or atomic op.
1608 if (qp
->r_flags
& RVT_R_RDMAR_SEQ
) {
1609 if (cmp_psn(psn
, qp
->s_last_psn
+ 1) != 0)
1611 qp
->r_flags
&= ~RVT_R_RDMAR_SEQ
;
1614 if (unlikely(qp
->s_acked
== qp
->s_tail
))
1616 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_acked
);
1617 status
= IB_WC_SUCCESS
;
1620 case OP(ACKNOWLEDGE
):
1621 case OP(ATOMIC_ACKNOWLEDGE
):
1622 case OP(RDMA_READ_RESPONSE_FIRST
):
1623 aeth
= be32_to_cpu(ohdr
->u
.aeth
);
1624 if (opcode
== OP(ATOMIC_ACKNOWLEDGE
))
1625 val
= ib_u64_get(&ohdr
->u
.at
.atomic_ack_eth
);
1628 if (!do_rc_ack(qp
, aeth
, psn
, opcode
, val
, rcd
) ||
1629 opcode
!= OP(RDMA_READ_RESPONSE_FIRST
))
1631 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_acked
);
1632 if (unlikely(wqe
->wr
.opcode
!= IB_WR_RDMA_READ
))
1635 * If this is a response to a resent RDMA read, we
1636 * have to be careful to copy the data to the right
1639 qp
->s_rdma_read_len
= restart_sge(&qp
->s_rdma_read_sge
,
1643 case OP(RDMA_READ_RESPONSE_MIDDLE
):
1644 /* no AETH, no ACK */
1645 if (unlikely(cmp_psn(psn
, qp
->s_last_psn
+ 1)))
1647 if (unlikely(wqe
->wr
.opcode
!= IB_WR_RDMA_READ
))
1650 if (unlikely(tlen
!= (hdrsize
+ pmtu
+ 4)))
1652 if (unlikely(pmtu
>= qp
->s_rdma_read_len
))
1656 * We got a response so update the timeout.
1657 * 4.096 usec. * (1 << qp->timeout)
1659 qp
->s_flags
|= RVT_S_TIMER
;
1660 mod_timer(&qp
->s_timer
, jiffies
+ qp
->timeout_jiffies
);
1661 if (qp
->s_flags
& RVT_S_WAIT_ACK
) {
1662 qp
->s_flags
&= ~RVT_S_WAIT_ACK
;
1663 hfi1_schedule_send(qp
);
1666 if (opcode
== OP(RDMA_READ_RESPONSE_MIDDLE
))
1667 qp
->s_retry
= qp
->s_retry_cnt
;
1670 * Update the RDMA receive state but do the copy w/o
1671 * holding the locks and blocking interrupts.
1673 qp
->s_rdma_read_len
-= pmtu
;
1674 update_last_psn(qp
, psn
);
1675 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1676 hfi1_copy_sge(&qp
->s_rdma_read_sge
, data
, pmtu
, 0, 0);
1679 case OP(RDMA_READ_RESPONSE_ONLY
):
1680 aeth
= be32_to_cpu(ohdr
->u
.aeth
);
1681 if (!do_rc_ack(qp
, aeth
, psn
, opcode
, 0, rcd
))
1683 /* Get the number of bytes the message was padded by. */
1684 pad
= (be32_to_cpu(ohdr
->bth
[0]) >> 20) & 3;
1686 * Check that the data size is >= 0 && <= pmtu.
1687 * Remember to account for ICRC (4).
1689 if (unlikely(tlen
< (hdrsize
+ pad
+ 4)))
1692 * If this is a response to a resent RDMA read, we
1693 * have to be careful to copy the data to the right
1696 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_acked
);
1697 qp
->s_rdma_read_len
= restart_sge(&qp
->s_rdma_read_sge
,
1701 case OP(RDMA_READ_RESPONSE_LAST
):
1702 /* ACKs READ req. */
1703 if (unlikely(cmp_psn(psn
, qp
->s_last_psn
+ 1)))
1705 if (unlikely(wqe
->wr
.opcode
!= IB_WR_RDMA_READ
))
1707 /* Get the number of bytes the message was padded by. */
1708 pad
= (be32_to_cpu(ohdr
->bth
[0]) >> 20) & 3;
1710 * Check that the data size is >= 1 && <= pmtu.
1711 * Remember to account for ICRC (4).
1713 if (unlikely(tlen
<= (hdrsize
+ pad
+ 4)))
1716 tlen
-= hdrsize
+ pad
+ 4;
1717 if (unlikely(tlen
!= qp
->s_rdma_read_len
))
1719 aeth
= be32_to_cpu(ohdr
->u
.aeth
);
1720 hfi1_copy_sge(&qp
->s_rdma_read_sge
, data
, tlen
, 0, 0);
1721 WARN_ON(qp
->s_rdma_read_sge
.num_sge
);
1722 (void)do_rc_ack(qp
, aeth
, psn
,
1723 OP(RDMA_READ_RESPONSE_LAST
), 0, rcd
);
1728 status
= IB_WC_LOC_QP_OP_ERR
;
1732 rdma_seq_err(qp
, ibp
, psn
, rcd
);
1736 status
= IB_WC_LOC_LEN_ERR
;
1738 if (qp
->s_last
== qp
->s_acked
) {
1739 hfi1_send_complete(qp
, wqe
, status
);
1740 rvt_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
1743 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1748 static inline void rc_defered_ack(struct hfi1_ctxtdata
*rcd
,
1751 if (list_empty(&qp
->rspwait
)) {
1752 qp
->r_flags
|= RVT_R_RSP_NAK
;
1754 list_add_tail(&qp
->rspwait
, &rcd
->qp_wait_list
);
1758 static inline void rc_cancel_ack(struct rvt_qp
*qp
)
1760 struct hfi1_qp_priv
*priv
= qp
->priv
;
1762 priv
->r_adefered
= 0;
1763 if (list_empty(&qp
->rspwait
))
1765 list_del_init(&qp
->rspwait
);
1766 qp
->r_flags
&= ~RVT_R_RSP_NAK
;
1771 * rc_rcv_error - process an incoming duplicate or error RC packet
1772 * @ohdr: the other headers for this packet
1773 * @data: the packet data
1774 * @qp: the QP for this packet
1775 * @opcode: the opcode for this packet
1776 * @psn: the packet sequence number for this packet
1777 * @diff: the difference between the PSN and the expected PSN
1779 * This is called from hfi1_rc_rcv() to process an unexpected
1780 * incoming RC packet for the given QP.
1781 * Called at interrupt level.
1782 * Return 1 if no more processing is needed; otherwise return 0 to
1783 * schedule a response to be sent.
1785 static noinline
int rc_rcv_error(struct ib_other_headers
*ohdr
, void *data
,
1786 struct rvt_qp
*qp
, u32 opcode
, u32 psn
,
1787 int diff
, struct hfi1_ctxtdata
*rcd
)
1789 struct hfi1_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
1790 struct rvt_ack_entry
*e
;
1791 unsigned long flags
;
1795 trace_hfi1_rcv_error(qp
, psn
);
1798 * Packet sequence error.
1799 * A NAK will ACK earlier sends and RDMA writes.
1800 * Don't queue the NAK if we already sent one.
1802 if (!qp
->r_nak_state
) {
1803 ibp
->rvp
.n_rc_seqnak
++;
1804 qp
->r_nak_state
= IB_NAK_PSN_ERROR
;
1805 /* Use the expected PSN. */
1806 qp
->r_ack_psn
= qp
->r_psn
;
1808 * Wait to send the sequence NAK until all packets
1809 * in the receive queue have been processed.
1810 * Otherwise, we end up propagating congestion.
1812 rc_defered_ack(rcd
, qp
);
1818 * Handle a duplicate request. Don't re-execute SEND, RDMA
1819 * write or atomic op. Don't NAK errors, just silently drop
1820 * the duplicate request. Note that r_sge, r_len, and
1821 * r_rcv_len may be in use so don't modify them.
1823 * We are supposed to ACK the earliest duplicate PSN but we
1824 * can coalesce an outstanding duplicate ACK. We have to
1825 * send the earliest so that RDMA reads can be restarted at
1826 * the requester's expected PSN.
1828 * First, find where this duplicate PSN falls within the
1829 * ACKs previously sent.
1830 * old_req is true if there is an older response that is scheduled
1831 * to be sent before sending this one.
1835 ibp
->rvp
.n_rc_dupreq
++;
1837 spin_lock_irqsave(&qp
->s_lock
, flags
);
1839 for (i
= qp
->r_head_ack_queue
; ; i
= prev
) {
1840 if (i
== qp
->s_tail_ack_queue
)
1845 prev
= HFI1_MAX_RDMA_ATOMIC
;
1846 if (prev
== qp
->r_head_ack_queue
) {
1850 e
= &qp
->s_ack_queue
[prev
];
1855 if (cmp_psn(psn
, e
->psn
) >= 0) {
1856 if (prev
== qp
->s_tail_ack_queue
&&
1857 cmp_psn(psn
, e
->lpsn
) <= 0)
1863 case OP(RDMA_READ_REQUEST
): {
1864 struct ib_reth
*reth
;
1869 * If we didn't find the RDMA read request in the ack queue,
1870 * we can ignore this request.
1872 if (!e
|| e
->opcode
!= OP(RDMA_READ_REQUEST
))
1874 /* RETH comes after BTH */
1875 reth
= &ohdr
->u
.rc
.reth
;
1877 * Address range must be a subset of the original
1878 * request and start on pmtu boundaries.
1879 * We reuse the old ack_queue slot since the requester
1880 * should not back up and request an earlier PSN for the
1883 offset
= delta_psn(psn
, e
->psn
) * qp
->pmtu
;
1884 len
= be32_to_cpu(reth
->length
);
1885 if (unlikely(offset
+ len
!= e
->rdma_sge
.sge_length
))
1887 if (e
->rdma_sge
.mr
) {
1888 rvt_put_mr(e
->rdma_sge
.mr
);
1889 e
->rdma_sge
.mr
= NULL
;
1892 u32 rkey
= be32_to_cpu(reth
->rkey
);
1893 u64 vaddr
= get_ib_reth_vaddr(reth
);
1896 ok
= rvt_rkey_ok(qp
, &e
->rdma_sge
, len
, vaddr
, rkey
,
1897 IB_ACCESS_REMOTE_READ
);
1901 e
->rdma_sge
.vaddr
= NULL
;
1902 e
->rdma_sge
.length
= 0;
1903 e
->rdma_sge
.sge_length
= 0;
1908 qp
->s_tail_ack_queue
= prev
;
1912 case OP(COMPARE_SWAP
):
1913 case OP(FETCH_ADD
): {
1915 * If we didn't find the atomic request in the ack queue
1916 * or the send engine is already backed up to send an
1917 * earlier entry, we can ignore this request.
1919 if (!e
|| e
->opcode
!= (u8
)opcode
|| old_req
)
1921 qp
->s_tail_ack_queue
= prev
;
1927 * Ignore this operation if it doesn't request an ACK
1928 * or an earlier RDMA read or atomic is going to be resent.
1930 if (!(psn
& IB_BTH_REQ_ACK
) || old_req
)
1933 * Resend the most recent ACK if this request is
1934 * after all the previous RDMA reads and atomics.
1936 if (i
== qp
->r_head_ack_queue
) {
1937 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1938 qp
->r_nak_state
= 0;
1939 qp
->r_ack_psn
= qp
->r_psn
- 1;
1944 * Resend the RDMA read or atomic op which
1945 * ACKs this duplicate request.
1947 qp
->s_tail_ack_queue
= i
;
1950 qp
->s_ack_state
= OP(ACKNOWLEDGE
);
1951 qp
->s_flags
|= RVT_S_RESP_PENDING
;
1952 qp
->r_nak_state
= 0;
1953 hfi1_schedule_send(qp
);
1956 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1964 void hfi1_rc_error(struct rvt_qp
*qp
, enum ib_wc_status err
)
1966 unsigned long flags
;
1969 spin_lock_irqsave(&qp
->s_lock
, flags
);
1970 lastwqe
= rvt_error_qp(qp
, err
);
1971 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1976 ev
.device
= qp
->ibqp
.device
;
1977 ev
.element
.qp
= &qp
->ibqp
;
1978 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
1979 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
1983 static inline void update_ack_queue(struct rvt_qp
*qp
, unsigned n
)
1988 if (next
> HFI1_MAX_RDMA_ATOMIC
)
1990 qp
->s_tail_ack_queue
= next
;
1991 qp
->s_ack_state
= OP(ACKNOWLEDGE
);
1994 static void log_cca_event(struct hfi1_pportdata
*ppd
, u8 sl
, u32 rlid
,
1995 u32 lqpn
, u32 rqpn
, u8 svc_type
)
1997 struct opa_hfi1_cong_log_event_internal
*cc_event
;
1998 unsigned long flags
;
2000 if (sl
>= OPA_MAX_SLS
)
2003 spin_lock_irqsave(&ppd
->cc_log_lock
, flags
);
2005 ppd
->threshold_cong_event_map
[sl
/ 8] |= 1 << (sl
% 8);
2006 ppd
->threshold_event_counter
++;
2008 cc_event
= &ppd
->cc_events
[ppd
->cc_log_idx
++];
2009 if (ppd
->cc_log_idx
== OPA_CONG_LOG_ELEMS
)
2010 ppd
->cc_log_idx
= 0;
2011 cc_event
->lqpn
= lqpn
& RVT_QPN_MASK
;
2012 cc_event
->rqpn
= rqpn
& RVT_QPN_MASK
;
2014 cc_event
->svc_type
= svc_type
;
2015 cc_event
->rlid
= rlid
;
2016 /* keep timestamp in units of 1.024 usec */
2017 cc_event
->timestamp
= ktime_to_ns(ktime_get()) / 1024;
2019 spin_unlock_irqrestore(&ppd
->cc_log_lock
, flags
);
2022 void process_becn(struct hfi1_pportdata
*ppd
, u8 sl
, u16 rlid
, u32 lqpn
,
2023 u32 rqpn
, u8 svc_type
)
2025 struct cca_timer
*cca_timer
;
2026 u16 ccti
, ccti_incr
, ccti_timer
, ccti_limit
;
2027 u8 trigger_threshold
;
2028 struct cc_state
*cc_state
;
2029 unsigned long flags
;
2031 if (sl
>= OPA_MAX_SLS
)
2034 cc_state
= get_cc_state(ppd
);
2040 * 1) increase CCTI (for this SL)
2041 * 2) select IPG (i.e., call set_link_ipg())
2044 ccti_limit
= cc_state
->cct
.ccti_limit
;
2045 ccti_incr
= cc_state
->cong_setting
.entries
[sl
].ccti_increase
;
2046 ccti_timer
= cc_state
->cong_setting
.entries
[sl
].ccti_timer
;
2048 cc_state
->cong_setting
.entries
[sl
].trigger_threshold
;
2050 spin_lock_irqsave(&ppd
->cca_timer_lock
, flags
);
2052 cca_timer
= &ppd
->cca_timer
[sl
];
2053 if (cca_timer
->ccti
< ccti_limit
) {
2054 if (cca_timer
->ccti
+ ccti_incr
<= ccti_limit
)
2055 cca_timer
->ccti
+= ccti_incr
;
2057 cca_timer
->ccti
= ccti_limit
;
2061 ccti
= cca_timer
->ccti
;
2063 if (!hrtimer_active(&cca_timer
->hrtimer
)) {
2064 /* ccti_timer is in units of 1.024 usec */
2065 unsigned long nsec
= 1024 * ccti_timer
;
2067 hrtimer_start(&cca_timer
->hrtimer
, ns_to_ktime(nsec
),
2071 spin_unlock_irqrestore(&ppd
->cca_timer_lock
, flags
);
2073 if ((trigger_threshold
!= 0) && (ccti
>= trigger_threshold
))
2074 log_cca_event(ppd
, sl
, rlid
, lqpn
, rqpn
, svc_type
);
2078 * hfi1_rc_rcv - process an incoming RC packet
2079 * @rcd: the context pointer
2080 * @hdr: the header of this packet
2081 * @rcv_flags: flags relevant to rcv processing
2082 * @data: the packet data
2083 * @tlen: the packet length
2084 * @qp: the QP for this packet
2086 * This is called from qp_rcv() to process an incoming RC packet
2088 * May be called at interrupt level.
2090 void hfi1_rc_rcv(struct hfi1_packet
*packet
)
2092 struct hfi1_ctxtdata
*rcd
= packet
->rcd
;
2093 struct ib_header
*hdr
= packet
->hdr
;
2094 u32 rcv_flags
= packet
->rcv_flags
;
2095 void *data
= packet
->ebuf
;
2096 u32 tlen
= packet
->tlen
;
2097 struct rvt_qp
*qp
= packet
->qp
;
2098 struct hfi1_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
2099 struct ib_other_headers
*ohdr
= packet
->ohdr
;
2101 u32 hdrsize
= packet
->hlen
;
2105 u32 pmtu
= qp
->pmtu
;
2107 struct ib_reth
*reth
;
2108 unsigned long flags
;
2109 int ret
, is_fecn
= 0;
2113 lockdep_assert_held(&qp
->r_lock
);
2114 bth0
= be32_to_cpu(ohdr
->bth
[0]);
2115 if (hfi1_ruc_check_hdr(ibp
, hdr
, rcv_flags
& HFI1_HAS_GRH
, qp
, bth0
))
2118 is_fecn
= process_ecn(qp
, packet
, false);
2120 psn
= be32_to_cpu(ohdr
->bth
[2]);
2121 opcode
= (bth0
>> 24) & 0xff;
2124 * Process responses (ACKs) before anything else. Note that the
2125 * packet sequence number will be for something in the send work
2126 * queue rather than the expected receive packet sequence number.
2127 * In other words, this QP is the requester.
2129 if (opcode
>= OP(RDMA_READ_RESPONSE_FIRST
) &&
2130 opcode
<= OP(ATOMIC_ACKNOWLEDGE
)) {
2131 rc_rcv_resp(ibp
, ohdr
, data
, tlen
, qp
, opcode
, psn
,
2132 hdrsize
, pmtu
, rcd
);
2138 /* Compute 24 bits worth of difference. */
2139 diff
= delta_psn(psn
, qp
->r_psn
);
2140 if (unlikely(diff
)) {
2141 if (rc_rcv_error(ohdr
, data
, qp
, opcode
, psn
, diff
, rcd
))
2146 /* Check for opcode sequence errors. */
2147 switch (qp
->r_state
) {
2148 case OP(SEND_FIRST
):
2149 case OP(SEND_MIDDLE
):
2150 if (opcode
== OP(SEND_MIDDLE
) ||
2151 opcode
== OP(SEND_LAST
) ||
2152 opcode
== OP(SEND_LAST_WITH_IMMEDIATE
) ||
2153 opcode
== OP(SEND_LAST_WITH_INVALIDATE
))
2157 case OP(RDMA_WRITE_FIRST
):
2158 case OP(RDMA_WRITE_MIDDLE
):
2159 if (opcode
== OP(RDMA_WRITE_MIDDLE
) ||
2160 opcode
== OP(RDMA_WRITE_LAST
) ||
2161 opcode
== OP(RDMA_WRITE_LAST_WITH_IMMEDIATE
))
2166 if (opcode
== OP(SEND_MIDDLE
) ||
2167 opcode
== OP(SEND_LAST
) ||
2168 opcode
== OP(SEND_LAST_WITH_IMMEDIATE
) ||
2169 opcode
== OP(SEND_LAST_WITH_INVALIDATE
) ||
2170 opcode
== OP(RDMA_WRITE_MIDDLE
) ||
2171 opcode
== OP(RDMA_WRITE_LAST
) ||
2172 opcode
== OP(RDMA_WRITE_LAST_WITH_IMMEDIATE
))
2175 * Note that it is up to the requester to not send a new
2176 * RDMA read or atomic operation before receiving an ACK
2177 * for the previous operation.
2182 if (qp
->state
== IB_QPS_RTR
&& !(qp
->r_flags
& RVT_R_COMM_EST
))
2185 /* OK, process the packet. */
2187 case OP(SEND_FIRST
):
2188 ret
= hfi1_rvt_get_rwqe(qp
, 0);
2195 case OP(SEND_MIDDLE
):
2196 case OP(RDMA_WRITE_MIDDLE
):
2198 /* Check for invalid length PMTU or posted rwqe len. */
2199 if (unlikely(tlen
!= (hdrsize
+ pmtu
+ 4)))
2201 qp
->r_rcv_len
+= pmtu
;
2202 if (unlikely(qp
->r_rcv_len
> qp
->r_len
))
2204 hfi1_copy_sge(&qp
->r_sge
, data
, pmtu
, 1, 0);
2207 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE
):
2209 ret
= hfi1_rvt_get_rwqe(qp
, 1);
2217 case OP(SEND_ONLY_WITH_IMMEDIATE
):
2218 case OP(SEND_ONLY_WITH_INVALIDATE
):
2219 ret
= hfi1_rvt_get_rwqe(qp
, 0);
2225 if (opcode
== OP(SEND_ONLY
))
2226 goto no_immediate_data
;
2227 if (opcode
== OP(SEND_ONLY_WITH_INVALIDATE
))
2229 /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
2230 case OP(SEND_LAST_WITH_IMMEDIATE
):
2232 wc
.ex
.imm_data
= ohdr
->u
.imm_data
;
2233 wc
.wc_flags
= IB_WC_WITH_IMM
;
2235 case OP(SEND_LAST_WITH_INVALIDATE
):
2237 rkey
= be32_to_cpu(ohdr
->u
.ieth
);
2238 if (rvt_invalidate_rkey(qp
, rkey
))
2239 goto no_immediate_data
;
2240 wc
.ex
.invalidate_rkey
= rkey
;
2241 wc
.wc_flags
= IB_WC_WITH_INVALIDATE
;
2243 case OP(RDMA_WRITE_LAST
):
2244 copy_last
= ibpd_to_rvtpd(qp
->ibqp
.pd
)->user
;
2251 /* Get the number of bytes the message was padded by. */
2252 pad
= (bth0
>> 20) & 3;
2253 /* Check for invalid length. */
2254 /* LAST len should be >= 1 */
2255 if (unlikely(tlen
< (hdrsize
+ pad
+ 4)))
2257 /* Don't count the CRC. */
2258 tlen
-= (hdrsize
+ pad
+ 4);
2259 wc
.byte_len
= tlen
+ qp
->r_rcv_len
;
2260 if (unlikely(wc
.byte_len
> qp
->r_len
))
2262 hfi1_copy_sge(&qp
->r_sge
, data
, tlen
, 1, copy_last
);
2263 rvt_put_ss(&qp
->r_sge
);
2265 if (!__test_and_clear_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
))
2267 wc
.wr_id
= qp
->r_wr_id
;
2268 wc
.status
= IB_WC_SUCCESS
;
2269 if (opcode
== OP(RDMA_WRITE_LAST_WITH_IMMEDIATE
) ||
2270 opcode
== OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE
))
2271 wc
.opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
2273 wc
.opcode
= IB_WC_RECV
;
2275 wc
.src_qp
= qp
->remote_qpn
;
2276 wc
.slid
= qp
->remote_ah_attr
.dlid
;
2278 * It seems that IB mandates the presence of an SL in a
2279 * work completion only for the UD transport (see section
2280 * 11.4.2 of IBTA Vol. 1).
2282 * However, the way the SL is chosen below is consistent
2283 * with the way that IB/qib works and is trying avoid
2284 * introducing incompatibilities.
2286 * See also OPA Vol. 1, section 9.7.6, and table 9-17.
2288 wc
.sl
= qp
->remote_ah_attr
.sl
;
2289 /* zero fields that are N/A */
2292 wc
.dlid_path_bits
= 0;
2294 /* Signal completion event if the solicited bit is set. */
2295 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
,
2296 (bth0
& IB_BTH_SOLICITED
) != 0);
2299 case OP(RDMA_WRITE_ONLY
):
2302 case OP(RDMA_WRITE_FIRST
):
2303 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE
):
2304 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)))
2307 reth
= &ohdr
->u
.rc
.reth
;
2308 qp
->r_len
= be32_to_cpu(reth
->length
);
2310 qp
->r_sge
.sg_list
= NULL
;
2311 if (qp
->r_len
!= 0) {
2312 u32 rkey
= be32_to_cpu(reth
->rkey
);
2313 u64 vaddr
= get_ib_reth_vaddr(reth
);
2316 /* Check rkey & NAK */
2317 ok
= rvt_rkey_ok(qp
, &qp
->r_sge
.sge
, qp
->r_len
, vaddr
,
2318 rkey
, IB_ACCESS_REMOTE_WRITE
);
2321 qp
->r_sge
.num_sge
= 1;
2323 qp
->r_sge
.num_sge
= 0;
2324 qp
->r_sge
.sge
.mr
= NULL
;
2325 qp
->r_sge
.sge
.vaddr
= NULL
;
2326 qp
->r_sge
.sge
.length
= 0;
2327 qp
->r_sge
.sge
.sge_length
= 0;
2329 if (opcode
== OP(RDMA_WRITE_FIRST
))
2331 else if (opcode
== OP(RDMA_WRITE_ONLY
))
2332 goto no_immediate_data
;
2333 ret
= hfi1_rvt_get_rwqe(qp
, 1);
2338 wc
.ex
.imm_data
= ohdr
->u
.rc
.imm_data
;
2339 wc
.wc_flags
= IB_WC_WITH_IMM
;
2342 case OP(RDMA_READ_REQUEST
): {
2343 struct rvt_ack_entry
*e
;
2347 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_READ
)))
2349 next
= qp
->r_head_ack_queue
+ 1;
2350 /* s_ack_queue is size HFI1_MAX_RDMA_ATOMIC+1 so use > not >= */
2351 if (next
> HFI1_MAX_RDMA_ATOMIC
)
2353 spin_lock_irqsave(&qp
->s_lock
, flags
);
2354 if (unlikely(next
== qp
->s_tail_ack_queue
)) {
2355 if (!qp
->s_ack_queue
[next
].sent
)
2356 goto nack_inv_unlck
;
2357 update_ack_queue(qp
, next
);
2359 e
= &qp
->s_ack_queue
[qp
->r_head_ack_queue
];
2360 if (e
->opcode
== OP(RDMA_READ_REQUEST
) && e
->rdma_sge
.mr
) {
2361 rvt_put_mr(e
->rdma_sge
.mr
);
2362 e
->rdma_sge
.mr
= NULL
;
2364 reth
= &ohdr
->u
.rc
.reth
;
2365 len
= be32_to_cpu(reth
->length
);
2367 u32 rkey
= be32_to_cpu(reth
->rkey
);
2368 u64 vaddr
= get_ib_reth_vaddr(reth
);
2371 /* Check rkey & NAK */
2372 ok
= rvt_rkey_ok(qp
, &e
->rdma_sge
, len
, vaddr
,
2373 rkey
, IB_ACCESS_REMOTE_READ
);
2375 goto nack_acc_unlck
;
2377 * Update the next expected PSN. We add 1 later
2378 * below, so only add the remainder here.
2380 qp
->r_psn
+= rvt_div_mtu(qp
, len
- 1);
2382 e
->rdma_sge
.mr
= NULL
;
2383 e
->rdma_sge
.vaddr
= NULL
;
2384 e
->rdma_sge
.length
= 0;
2385 e
->rdma_sge
.sge_length
= 0;
2390 e
->lpsn
= qp
->r_psn
;
2392 * We need to increment the MSN here instead of when we
2393 * finish sending the result since a duplicate request would
2394 * increment it more than once.
2398 qp
->r_state
= opcode
;
2399 qp
->r_nak_state
= 0;
2400 qp
->r_head_ack_queue
= next
;
2402 /* Schedule the send engine. */
2403 qp
->s_flags
|= RVT_S_RESP_PENDING
;
2404 hfi1_schedule_send(qp
);
2406 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
2412 case OP(COMPARE_SWAP
):
2413 case OP(FETCH_ADD
): {
2414 struct ib_atomic_eth
*ateth
;
2415 struct rvt_ack_entry
*e
;
2422 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_ATOMIC
)))
2424 next
= qp
->r_head_ack_queue
+ 1;
2425 if (next
> HFI1_MAX_RDMA_ATOMIC
)
2427 spin_lock_irqsave(&qp
->s_lock
, flags
);
2428 if (unlikely(next
== qp
->s_tail_ack_queue
)) {
2429 if (!qp
->s_ack_queue
[next
].sent
)
2430 goto nack_inv_unlck
;
2431 update_ack_queue(qp
, next
);
2433 e
= &qp
->s_ack_queue
[qp
->r_head_ack_queue
];
2434 if (e
->opcode
== OP(RDMA_READ_REQUEST
) && e
->rdma_sge
.mr
) {
2435 rvt_put_mr(e
->rdma_sge
.mr
);
2436 e
->rdma_sge
.mr
= NULL
;
2438 ateth
= &ohdr
->u
.atomic_eth
;
2439 vaddr
= get_ib_ateth_vaddr(ateth
);
2440 if (unlikely(vaddr
& (sizeof(u64
) - 1)))
2441 goto nack_inv_unlck
;
2442 rkey
= be32_to_cpu(ateth
->rkey
);
2443 /* Check rkey & NAK */
2444 if (unlikely(!rvt_rkey_ok(qp
, &qp
->r_sge
.sge
, sizeof(u64
),
2446 IB_ACCESS_REMOTE_ATOMIC
)))
2447 goto nack_acc_unlck
;
2448 /* Perform atomic OP and save result. */
2449 maddr
= (atomic64_t
*)qp
->r_sge
.sge
.vaddr
;
2450 sdata
= get_ib_ateth_swap(ateth
);
2451 e
->atomic_data
= (opcode
== OP(FETCH_ADD
)) ?
2452 (u64
)atomic64_add_return(sdata
, maddr
) - sdata
:
2453 (u64
)cmpxchg((u64
*)qp
->r_sge
.sge
.vaddr
,
2454 get_ib_ateth_compare(ateth
),
2456 rvt_put_mr(qp
->r_sge
.sge
.mr
);
2457 qp
->r_sge
.num_sge
= 0;
2464 qp
->r_state
= opcode
;
2465 qp
->r_nak_state
= 0;
2466 qp
->r_head_ack_queue
= next
;
2468 /* Schedule the send engine. */
2469 qp
->s_flags
|= RVT_S_RESP_PENDING
;
2470 hfi1_schedule_send(qp
);
2472 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
2479 /* NAK unknown opcodes. */
2483 qp
->r_state
= opcode
;
2484 qp
->r_ack_psn
= psn
;
2485 qp
->r_nak_state
= 0;
2486 /* Send an ACK if requested or required. */
2487 if (psn
& IB_BTH_REQ_ACK
) {
2488 struct hfi1_qp_priv
*priv
= qp
->priv
;
2490 if (packet
->numpkt
== 0) {
2494 if (priv
->r_adefered
>= HFI1_PSN_CREDIT
) {
2498 if (unlikely(is_fecn
)) {
2503 rc_defered_ack(rcd
, qp
);
2508 qp
->r_nak_state
= qp
->r_min_rnr_timer
| IB_RNR_NAK
;
2509 qp
->r_ack_psn
= qp
->r_psn
;
2510 /* Queue RNR NAK for later */
2511 rc_defered_ack(rcd
, qp
);
2515 hfi1_rc_error(qp
, IB_WC_LOC_QP_OP_ERR
);
2516 qp
->r_nak_state
= IB_NAK_REMOTE_OPERATIONAL_ERROR
;
2517 qp
->r_ack_psn
= qp
->r_psn
;
2518 /* Queue NAK for later */
2519 rc_defered_ack(rcd
, qp
);
2523 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
2525 hfi1_rc_error(qp
, IB_WC_LOC_QP_OP_ERR
);
2526 qp
->r_nak_state
= IB_NAK_INVALID_REQUEST
;
2527 qp
->r_ack_psn
= qp
->r_psn
;
2528 /* Queue NAK for later */
2529 rc_defered_ack(rcd
, qp
);
2533 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
2535 hfi1_rc_error(qp
, IB_WC_LOC_PROT_ERR
);
2536 qp
->r_nak_state
= IB_NAK_REMOTE_ACCESS_ERROR
;
2537 qp
->r_ack_psn
= qp
->r_psn
;
2539 hfi1_send_rc_ack(rcd
, qp
, is_fecn
);
2542 void hfi1_rc_hdrerr(
2543 struct hfi1_ctxtdata
*rcd
,
2544 struct ib_header
*hdr
,
2548 int has_grh
= rcv_flags
& HFI1_HAS_GRH
;
2549 struct ib_other_headers
*ohdr
;
2550 struct hfi1_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
2558 ohdr
= &hdr
->u
.l
.oth
;
2560 bth0
= be32_to_cpu(ohdr
->bth
[0]);
2561 if (hfi1_ruc_check_hdr(ibp
, hdr
, has_grh
, qp
, bth0
))
2564 psn
= be32_to_cpu(ohdr
->bth
[2]);
2565 opcode
= (bth0
>> 24) & 0xff;
2567 /* Only deal with RDMA Writes for now */
2568 if (opcode
< IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST
) {
2569 diff
= delta_psn(psn
, qp
->r_psn
);
2570 if (!qp
->r_nak_state
&& diff
>= 0) {
2571 ibp
->rvp
.n_rc_seqnak
++;
2572 qp
->r_nak_state
= IB_NAK_PSN_ERROR
;
2573 /* Use the expected PSN. */
2574 qp
->r_ack_psn
= qp
->r_psn
;
2576 * Wait to send the sequence
2577 * NAK until all packets
2578 * in the receive queue have
2580 * Otherwise, we end up
2581 * propagating congestion.
2583 rc_defered_ack(rcd
, qp
);
2584 } /* Out of sequence NAK */
2585 } /* QP Request NAKs */