2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/spinlock.h>
53 #include "verbs_txreq.h"
57 * Convert the AETH RNR timeout code into the number of microseconds.
59 const u32 ib_hfi1_rnr_table
[32] = {
60 655360, /* 00: 655.36 */
80 10240, /* 14: 10.24 */
81 15360, /* 15: 15.36 */
82 20480, /* 16: 20.48 */
83 30720, /* 17: 30.72 */
84 40960, /* 18: 40.96 */
85 61440, /* 19: 61.44 */
86 81920, /* 1A: 81.92 */
87 122880, /* 1B: 122.88 */
88 163840, /* 1C: 163.84 */
89 245760, /* 1D: 245.76 */
90 327680, /* 1E: 327.68 */
91 491520 /* 1F: 491.52 */
95 * Validate a RWQE and fill in the SGE state.
98 static int init_sge(struct rvt_qp
*qp
, struct rvt_rwqe
*wqe
)
102 struct rvt_lkey_table
*rkt
;
104 struct rvt_sge_state
*ss
;
106 rkt
= &to_idev(qp
->ibqp
.device
)->rdi
.lkey_table
;
107 pd
= ibpd_to_rvtpd(qp
->ibqp
.srq
? qp
->ibqp
.srq
->pd
: qp
->ibqp
.pd
);
109 ss
->sg_list
= qp
->r_sg_list
;
111 for (i
= j
= 0; i
< wqe
->num_sge
; i
++) {
112 if (wqe
->sg_list
[i
].length
== 0)
115 if (!rvt_lkey_ok(rkt
, pd
, j
? &ss
->sg_list
[j
- 1] : &ss
->sge
,
116 &wqe
->sg_list
[i
], IB_ACCESS_LOCAL_WRITE
))
118 qp
->r_len
+= wqe
->sg_list
[i
].length
;
122 ss
->total_len
= qp
->r_len
;
128 struct rvt_sge
*sge
= --j
? &ss
->sg_list
[j
- 1] : &ss
->sge
;
133 memset(&wc
, 0, sizeof(wc
));
134 wc
.wr_id
= wqe
->wr_id
;
135 wc
.status
= IB_WC_LOC_PROT_ERR
;
136 wc
.opcode
= IB_WC_RECV
;
138 /* Signal solicited completion event. */
139 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
, 1);
146 * hfi1_rvt_get_rwqe - copy the next RWQE into the QP's RWQE
148 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
150 * Return -1 if there is a local error, 0 if no RWQE is available,
151 * otherwise return 1.
153 * Can be called from interrupt level.
155 int hfi1_rvt_get_rwqe(struct rvt_qp
*qp
, int wr_id_only
)
161 struct rvt_rwqe
*wqe
;
162 void (*handler
)(struct ib_event
*, void *);
167 srq
= ibsrq_to_rvtsrq(qp
->ibqp
.srq
);
168 handler
= srq
->ibsrq
.event_handler
;
176 spin_lock_irqsave(&rq
->lock
, flags
);
177 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
)) {
184 /* Validate tail before using it since it is user writable. */
185 if (tail
>= rq
->size
)
187 if (unlikely(tail
== wq
->head
)) {
191 /* Make sure entry is read after head index is read. */
193 wqe
= rvt_get_rwqe_ptr(rq
, tail
);
195 * Even though we update the tail index in memory, the verbs
196 * consumer is not supposed to post more entries until a
197 * completion is generated.
199 if (++tail
>= rq
->size
)
202 if (!wr_id_only
&& !init_sge(qp
, wqe
)) {
206 qp
->r_wr_id
= wqe
->wr_id
;
209 set_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
);
214 * Validate head pointer value and compute
215 * the number of remaining WQEs.
221 n
+= rq
->size
- tail
;
224 if (n
< srq
->limit
) {
228 spin_unlock_irqrestore(&rq
->lock
, flags
);
229 ev
.device
= qp
->ibqp
.device
;
230 ev
.element
.srq
= qp
->ibqp
.srq
;
231 ev
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
232 handler(&ev
, srq
->ibsrq
.srq_context
);
237 spin_unlock_irqrestore(&rq
->lock
, flags
);
242 static __be64
get_sguid(struct hfi1_ibport
*ibp
, unsigned index
)
245 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
247 return cpu_to_be64(ppd
->guid
);
249 return ibp
->guids
[index
- 1];
252 static int gid_ok(union ib_gid
*gid
, __be64 gid_prefix
, __be64 id
)
254 return (gid
->global
.interface_id
== id
&&
255 (gid
->global
.subnet_prefix
== gid_prefix
||
256 gid
->global
.subnet_prefix
== IB_DEFAULT_GID_PREFIX
));
261 * This should be called with the QP r_lock held.
263 * The s_lock will be acquired around the hfi1_migrate_qp() call.
265 int hfi1_ruc_check_hdr(struct hfi1_ibport
*ibp
, struct hfi1_ib_header
*hdr
,
266 int has_grh
, struct rvt_qp
*qp
, u32 bth0
)
270 u8 sc5
= ibp
->sl_to_sc
[qp
->remote_ah_attr
.sl
];
272 if (qp
->s_mig_state
== IB_MIG_ARMED
&& (bth0
& IB_BTH_MIG_REQ
)) {
274 if (qp
->alt_ah_attr
.ah_flags
& IB_AH_GRH
)
277 if (!(qp
->alt_ah_attr
.ah_flags
& IB_AH_GRH
))
279 guid
= get_sguid(ibp
, qp
->alt_ah_attr
.grh
.sgid_index
);
280 if (!gid_ok(&hdr
->u
.l
.grh
.dgid
, ibp
->rvp
.gid_prefix
,
285 qp
->alt_ah_attr
.grh
.dgid
.global
.subnet_prefix
,
286 qp
->alt_ah_attr
.grh
.dgid
.global
.interface_id
))
289 if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp
), (u16
)bth0
,
290 sc5
, be16_to_cpu(hdr
->lrh
[3])))) {
291 hfi1_bad_pqkey(ibp
, OPA_TRAP_BAD_P_KEY
,
293 (be16_to_cpu(hdr
->lrh
[0]) >> 4) & 0xF,
295 be16_to_cpu(hdr
->lrh
[3]),
296 be16_to_cpu(hdr
->lrh
[1]));
299 /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
300 if (be16_to_cpu(hdr
->lrh
[3]) != qp
->alt_ah_attr
.dlid
||
301 ppd_from_ibp(ibp
)->port
!= qp
->alt_ah_attr
.port_num
)
303 spin_lock_irqsave(&qp
->s_lock
, flags
);
305 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
308 if (qp
->remote_ah_attr
.ah_flags
& IB_AH_GRH
)
311 if (!(qp
->remote_ah_attr
.ah_flags
& IB_AH_GRH
))
313 guid
= get_sguid(ibp
,
314 qp
->remote_ah_attr
.grh
.sgid_index
);
315 if (!gid_ok(&hdr
->u
.l
.grh
.dgid
, ibp
->rvp
.gid_prefix
,
320 qp
->remote_ah_attr
.grh
.dgid
.global
.subnet_prefix
,
321 qp
->remote_ah_attr
.grh
.dgid
.global
.interface_id
))
324 if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp
), (u16
)bth0
,
325 sc5
, be16_to_cpu(hdr
->lrh
[3])))) {
326 hfi1_bad_pqkey(ibp
, OPA_TRAP_BAD_P_KEY
,
328 (be16_to_cpu(hdr
->lrh
[0]) >> 4) & 0xF,
330 be16_to_cpu(hdr
->lrh
[3]),
331 be16_to_cpu(hdr
->lrh
[1]));
334 /* Validate the SLID. See Ch. 9.6.1.5 */
335 if (be16_to_cpu(hdr
->lrh
[3]) != qp
->remote_ah_attr
.dlid
||
336 ppd_from_ibp(ibp
)->port
!= qp
->port_num
)
338 if (qp
->s_mig_state
== IB_MIG_REARM
&&
339 !(bth0
& IB_BTH_MIG_REQ
))
340 qp
->s_mig_state
= IB_MIG_ARMED
;
350 * ruc_loopback - handle UC and RC loopback requests
351 * @sqp: the sending QP
353 * This is called from hfi1_do_send() to
354 * forward a WQE addressed to the same HFI.
355 * Note that although we are single threaded due to the tasklet, we still
356 * have to protect against post_send(). We don't have to worry about
357 * receive interrupts since this is a connected protocol and all packets
358 * will pass through here.
360 static void ruc_loopback(struct rvt_qp
*sqp
)
362 struct hfi1_ibport
*ibp
= to_iport(sqp
->ibqp
.device
, sqp
->port_num
);
364 struct rvt_swqe
*wqe
;
370 enum ib_wc_status send_status
;
380 * Note that we check the responder QP state after
381 * checking the requester's state.
383 qp
= rvt_lookup_qpn(ib_to_rvt(sqp
->ibqp
.device
), &ibp
->rvp
,
386 spin_lock_irqsave(&sqp
->s_lock
, flags
);
388 /* Return if we are already busy processing a work request. */
389 if ((sqp
->s_flags
& (RVT_S_BUSY
| RVT_S_ANY_WAIT
)) ||
390 !(ib_rvt_state_ops
[sqp
->state
] & RVT_PROCESS_OR_FLUSH_SEND
))
393 sqp
->s_flags
|= RVT_S_BUSY
;
396 smp_read_barrier_depends(); /* see post_one_send() */
397 if (sqp
->s_last
== ACCESS_ONCE(sqp
->s_head
))
399 wqe
= rvt_get_swqe_ptr(sqp
, sqp
->s_last
);
401 /* Return if it is not OK to start a new work request. */
402 if (!(ib_rvt_state_ops
[sqp
->state
] & RVT_PROCESS_NEXT_SEND_OK
)) {
403 if (!(ib_rvt_state_ops
[sqp
->state
] & RVT_FLUSH_SEND
))
405 /* We are in the error state, flush the work request. */
406 send_status
= IB_WC_WR_FLUSH_ERR
;
411 * We can rely on the entry not changing without the s_lock
412 * being held until we update s_last.
413 * We increment s_cur to indicate s_last is in progress.
415 if (sqp
->s_last
== sqp
->s_cur
) {
416 if (++sqp
->s_cur
>= sqp
->s_size
)
419 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
421 if (!qp
|| !(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
) ||
422 qp
->ibqp
.qp_type
!= sqp
->ibqp
.qp_type
) {
423 ibp
->rvp
.n_pkt_drops
++;
425 * For RC, the requester would timeout and retry so
426 * shortcut the timeouts and just signal too many retries.
428 if (sqp
->ibqp
.qp_type
== IB_QPT_RC
)
429 send_status
= IB_WC_RETRY_EXC_ERR
;
431 send_status
= IB_WC_SUCCESS
;
435 memset(&wc
, 0, sizeof(wc
));
436 send_status
= IB_WC_SUCCESS
;
439 sqp
->s_sge
.sge
= wqe
->sg_list
[0];
440 sqp
->s_sge
.sg_list
= wqe
->sg_list
+ 1;
441 sqp
->s_sge
.num_sge
= wqe
->wr
.num_sge
;
442 sqp
->s_len
= wqe
->length
;
443 switch (wqe
->wr
.opcode
) {
447 case IB_WR_LOCAL_INV
:
448 if (!(wqe
->wr
.send_flags
& RVT_SEND_COMPLETION_ONLY
)) {
449 if (rvt_invalidate_rkey(sqp
,
450 wqe
->wr
.ex
.invalidate_rkey
))
451 send_status
= IB_WC_LOC_PROT_ERR
;
456 case IB_WR_SEND_WITH_INV
:
457 if (!rvt_invalidate_rkey(qp
, wqe
->wr
.ex
.invalidate_rkey
)) {
458 wc
.wc_flags
= IB_WC_WITH_INVALIDATE
;
459 wc
.ex
.invalidate_rkey
= wqe
->wr
.ex
.invalidate_rkey
;
463 case IB_WR_SEND_WITH_IMM
:
464 wc
.wc_flags
= IB_WC_WITH_IMM
;
465 wc
.ex
.imm_data
= wqe
->wr
.ex
.imm_data
;
469 ret
= hfi1_rvt_get_rwqe(qp
, 0);
476 case IB_WR_RDMA_WRITE_WITH_IMM
:
477 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)))
479 wc
.wc_flags
= IB_WC_WITH_IMM
;
480 wc
.ex
.imm_data
= wqe
->wr
.ex
.imm_data
;
481 ret
= hfi1_rvt_get_rwqe(qp
, 1);
486 /* skip copy_last set and qp_access_flags recheck */
488 case IB_WR_RDMA_WRITE
:
489 copy_last
= ibpd_to_rvtpd(qp
->ibqp
.pd
)->user
;
490 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)))
493 if (wqe
->length
== 0)
495 if (unlikely(!rvt_rkey_ok(qp
, &qp
->r_sge
.sge
, wqe
->length
,
496 wqe
->rdma_wr
.remote_addr
,
498 IB_ACCESS_REMOTE_WRITE
)))
500 qp
->r_sge
.sg_list
= NULL
;
501 qp
->r_sge
.num_sge
= 1;
502 qp
->r_sge
.total_len
= wqe
->length
;
505 case IB_WR_RDMA_READ
:
506 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_READ
)))
508 if (unlikely(!rvt_rkey_ok(qp
, &sqp
->s_sge
.sge
, wqe
->length
,
509 wqe
->rdma_wr
.remote_addr
,
511 IB_ACCESS_REMOTE_READ
)))
514 sqp
->s_sge
.sg_list
= NULL
;
515 sqp
->s_sge
.num_sge
= 1;
516 qp
->r_sge
.sge
= wqe
->sg_list
[0];
517 qp
->r_sge
.sg_list
= wqe
->sg_list
+ 1;
518 qp
->r_sge
.num_sge
= wqe
->wr
.num_sge
;
519 qp
->r_sge
.total_len
= wqe
->length
;
522 case IB_WR_ATOMIC_CMP_AND_SWP
:
523 case IB_WR_ATOMIC_FETCH_AND_ADD
:
524 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_ATOMIC
)))
526 if (unlikely(!rvt_rkey_ok(qp
, &qp
->r_sge
.sge
, sizeof(u64
),
527 wqe
->atomic_wr
.remote_addr
,
529 IB_ACCESS_REMOTE_ATOMIC
)))
531 /* Perform atomic OP and save result. */
532 maddr
= (atomic64_t
*)qp
->r_sge
.sge
.vaddr
;
533 sdata
= wqe
->atomic_wr
.compare_add
;
534 *(u64
*)sqp
->s_sge
.sge
.vaddr
=
535 (wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
) ?
536 (u64
)atomic64_add_return(sdata
, maddr
) - sdata
:
537 (u64
)cmpxchg((u64
*)qp
->r_sge
.sge
.vaddr
,
538 sdata
, wqe
->atomic_wr
.swap
);
539 rvt_put_mr(qp
->r_sge
.sge
.mr
);
540 qp
->r_sge
.num_sge
= 0;
544 send_status
= IB_WC_LOC_QP_OP_ERR
;
548 sge
= &sqp
->s_sge
.sge
;
550 u32 len
= sqp
->s_len
;
552 if (len
> sge
->length
)
554 if (len
> sge
->sge_length
)
555 len
= sge
->sge_length
;
556 WARN_ON_ONCE(len
== 0);
557 hfi1_copy_sge(&qp
->r_sge
, sge
->vaddr
, len
, release
, copy_last
);
560 sge
->sge_length
-= len
;
561 if (sge
->sge_length
== 0) {
564 if (--sqp
->s_sge
.num_sge
)
565 *sge
= *sqp
->s_sge
.sg_list
++;
566 } else if (sge
->length
== 0 && sge
->mr
->lkey
) {
567 if (++sge
->n
>= RVT_SEGSZ
) {
568 if (++sge
->m
>= sge
->mr
->mapsz
)
573 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
575 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
580 rvt_put_ss(&qp
->r_sge
);
582 if (!test_and_clear_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
))
585 if (wqe
->wr
.opcode
== IB_WR_RDMA_WRITE_WITH_IMM
)
586 wc
.opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
588 wc
.opcode
= IB_WC_RECV
;
589 wc
.wr_id
= qp
->r_wr_id
;
590 wc
.status
= IB_WC_SUCCESS
;
591 wc
.byte_len
= wqe
->length
;
593 wc
.src_qp
= qp
->remote_qpn
;
594 wc
.slid
= qp
->remote_ah_attr
.dlid
;
595 wc
.sl
= qp
->remote_ah_attr
.sl
;
597 /* Signal completion event if the solicited bit is set. */
598 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
,
599 wqe
->wr
.send_flags
& IB_SEND_SOLICITED
);
602 spin_lock_irqsave(&sqp
->s_lock
, flags
);
603 ibp
->rvp
.n_loop_pkts
++;
605 sqp
->s_rnr_retry
= sqp
->s_rnr_retry_cnt
;
606 hfi1_send_complete(sqp
, wqe
, send_status
);
608 atomic_dec(&sqp
->local_ops_pending
);
615 if (qp
->ibqp
.qp_type
== IB_QPT_UC
)
617 ibp
->rvp
.n_rnr_naks
++;
619 * Note: we don't need the s_lock held since the BUSY flag
620 * makes this single threaded.
622 if (sqp
->s_rnr_retry
== 0) {
623 send_status
= IB_WC_RNR_RETRY_EXC_ERR
;
626 if (sqp
->s_rnr_retry_cnt
< 7)
628 spin_lock_irqsave(&sqp
->s_lock
, flags
);
629 if (!(ib_rvt_state_ops
[sqp
->state
] & RVT_PROCESS_RECV_OK
))
631 to
= ib_hfi1_rnr_table
[qp
->r_min_rnr_timer
];
632 hfi1_add_rnr_timer(sqp
, to
);
636 send_status
= IB_WC_REM_OP_ERR
;
637 wc
.status
= IB_WC_LOC_QP_OP_ERR
;
641 send_status
= IB_WC_REM_INV_REQ_ERR
;
642 wc
.status
= IB_WC_LOC_QP_OP_ERR
;
646 send_status
= IB_WC_REM_ACCESS_ERR
;
647 wc
.status
= IB_WC_LOC_PROT_ERR
;
649 /* responder goes to error state */
650 hfi1_rc_error(qp
, wc
.status
);
653 spin_lock_irqsave(&sqp
->s_lock
, flags
);
654 hfi1_send_complete(sqp
, wqe
, send_status
);
655 if (sqp
->ibqp
.qp_type
== IB_QPT_RC
) {
656 int lastwqe
= rvt_error_qp(sqp
, IB_WC_WR_FLUSH_ERR
);
658 sqp
->s_flags
&= ~RVT_S_BUSY
;
659 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
663 ev
.device
= sqp
->ibqp
.device
;
664 ev
.element
.qp
= &sqp
->ibqp
;
665 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
666 sqp
->ibqp
.event_handler(&ev
, sqp
->ibqp
.qp_context
);
671 sqp
->s_flags
&= ~RVT_S_BUSY
;
673 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
679 * hfi1_make_grh - construct a GRH header
680 * @ibp: a pointer to the IB port
681 * @hdr: a pointer to the GRH header being constructed
682 * @grh: the global route address to send to
683 * @hwords: the number of 32 bit words of header being sent
684 * @nwords: the number of 32 bit words of data being sent
686 * Return the size of the header in 32 bit words.
688 u32
hfi1_make_grh(struct hfi1_ibport
*ibp
, struct ib_grh
*hdr
,
689 struct ib_global_route
*grh
, u32 hwords
, u32 nwords
)
691 hdr
->version_tclass_flow
=
692 cpu_to_be32((IB_GRH_VERSION
<< IB_GRH_VERSION_SHIFT
) |
693 (grh
->traffic_class
<< IB_GRH_TCLASS_SHIFT
) |
694 (grh
->flow_label
<< IB_GRH_FLOW_SHIFT
));
695 hdr
->paylen
= cpu_to_be16((hwords
- 2 + nwords
+ SIZE_OF_CRC
) << 2);
696 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
697 hdr
->next_hdr
= IB_GRH_NEXT_HDR
;
698 hdr
->hop_limit
= grh
->hop_limit
;
699 /* The SGID is 32-bit aligned. */
700 hdr
->sgid
.global
.subnet_prefix
= ibp
->rvp
.gid_prefix
;
701 hdr
->sgid
.global
.interface_id
=
702 grh
->sgid_index
&& grh
->sgid_index
< ARRAY_SIZE(ibp
->guids
) ?
703 ibp
->guids
[grh
->sgid_index
- 1] :
704 cpu_to_be64(ppd_from_ibp(ibp
)->guid
);
705 hdr
->dgid
= grh
->dgid
;
707 /* GRH header size in 32-bit words. */
708 return sizeof(struct ib_grh
) / sizeof(u32
);
711 #define BTH2_OFFSET (offsetof(struct hfi1_sdma_header, hdr.u.oth.bth[2]) / 4)
714 * build_ahg - create ahg in s_ahg
715 * @qp: a pointer to QP
716 * @npsn: the next PSN for the request/response
718 * This routine handles the AHG by allocating an ahg entry and causing the
719 * copy of the first middle.
721 * Subsequent middles use the copied entry, editing the
722 * PSN with 1 or 2 edits.
724 static inline void build_ahg(struct rvt_qp
*qp
, u32 npsn
)
726 struct hfi1_qp_priv
*priv
= qp
->priv
;
728 if (unlikely(qp
->s_flags
& RVT_S_AHG_CLEAR
))
730 if (!(qp
->s_flags
& RVT_S_AHG_VALID
)) {
731 /* first middle that needs copy */
732 if (qp
->s_ahgidx
< 0)
733 qp
->s_ahgidx
= sdma_ahg_alloc(priv
->s_sde
);
734 if (qp
->s_ahgidx
>= 0) {
736 priv
->s_ahg
->tx_flags
|= SDMA_TXREQ_F_AHG_COPY
;
737 /* save to protect a change in another thread */
738 priv
->s_ahg
->ahgidx
= qp
->s_ahgidx
;
739 qp
->s_flags
|= RVT_S_AHG_VALID
;
742 /* subsequent middle after valid */
743 if (qp
->s_ahgidx
>= 0) {
744 priv
->s_ahg
->tx_flags
|= SDMA_TXREQ_F_USE_AHG
;
745 priv
->s_ahg
->ahgidx
= qp
->s_ahgidx
;
746 priv
->s_ahg
->ahgcount
++;
747 priv
->s_ahg
->ahgdesc
[0] =
748 sdma_build_ahg_descriptor(
749 (__force u16
)cpu_to_be16((u16
)npsn
),
753 if ((npsn
& 0xffff0000) !=
754 (qp
->s_ahgpsn
& 0xffff0000)) {
755 priv
->s_ahg
->ahgcount
++;
756 priv
->s_ahg
->ahgdesc
[1] =
757 sdma_build_ahg_descriptor(
758 (__force u16
)cpu_to_be16(
768 void hfi1_make_ruc_header(struct rvt_qp
*qp
, struct hfi1_other_headers
*ohdr
,
769 u32 bth0
, u32 bth2
, int middle
,
770 struct hfi1_pkt_state
*ps
)
772 struct hfi1_qp_priv
*priv
= qp
->priv
;
773 struct hfi1_ibport
*ibp
= ps
->ibp
;
779 /* Construct the header. */
780 extra_bytes
= -qp
->s_cur_size
& 3;
781 nwords
= (qp
->s_cur_size
+ extra_bytes
) >> 2;
783 if (unlikely(qp
->remote_ah_attr
.ah_flags
& IB_AH_GRH
)) {
784 qp
->s_hdrwords
+= hfi1_make_grh(ibp
,
785 &ps
->s_txreq
->phdr
.hdr
.u
.l
.grh
,
786 &qp
->remote_ah_attr
.grh
,
787 qp
->s_hdrwords
, nwords
);
791 lrh0
|= (priv
->s_sc
& 0xf) << 12 | (qp
->remote_ah_attr
.sl
& 0xf) << 4;
793 * reset s_ahg/AHG fields
795 * This insures that the ahgentry/ahgcount
796 * are at a non-AHG default to protect
797 * build_verbs_tx_desc() from using
800 * build_ahg() will modify as appropriate
801 * to use the AHG feature.
803 priv
->s_ahg
->tx_flags
= 0;
804 priv
->s_ahg
->ahgcount
= 0;
805 priv
->s_ahg
->ahgidx
= 0;
806 if (qp
->s_mig_state
== IB_MIG_MIGRATED
)
807 bth0
|= IB_BTH_MIG_REQ
;
813 qp
->s_flags
&= ~RVT_S_AHG_VALID
;
814 ps
->s_txreq
->phdr
.hdr
.lrh
[0] = cpu_to_be16(lrh0
);
815 ps
->s_txreq
->phdr
.hdr
.lrh
[1] = cpu_to_be16(qp
->remote_ah_attr
.dlid
);
816 ps
->s_txreq
->phdr
.hdr
.lrh
[2] =
817 cpu_to_be16(qp
->s_hdrwords
+ nwords
+ SIZE_OF_CRC
);
818 ps
->s_txreq
->phdr
.hdr
.lrh
[3] = cpu_to_be16(ppd_from_ibp(ibp
)->lid
|
819 qp
->remote_ah_attr
.src_path_bits
);
820 bth0
|= hfi1_get_pkey(ibp
, qp
->s_pkey_index
);
821 bth0
|= extra_bytes
<< 20;
822 ohdr
->bth
[0] = cpu_to_be32(bth0
);
823 bth1
= qp
->remote_qpn
;
824 if (qp
->s_flags
& RVT_S_ECN
) {
825 qp
->s_flags
&= ~RVT_S_ECN
;
826 /* we recently received a FECN, so return a BECN */
827 bth1
|= (HFI1_BECN_MASK
<< HFI1_BECN_SHIFT
);
829 ohdr
->bth
[1] = cpu_to_be32(bth1
);
830 ohdr
->bth
[2] = cpu_to_be32(bth2
);
833 /* when sending, force a reschedule every one of these periods */
834 #define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */
836 void _hfi1_do_send(struct work_struct
*work
)
838 struct iowait
*wait
= container_of(work
, struct iowait
, iowork
);
839 struct rvt_qp
*qp
= iowait_to_qp(wait
);
845 * hfi1_do_send - perform a send on a QP
846 * @work: contains a pointer to the QP
848 * Process entries in the send work queue until credit or queue is
849 * exhausted. Only allow one CPU to send a packet per QP (tasklet).
850 * Otherwise, two threads could send packets out of order.
852 void hfi1_do_send(struct rvt_qp
*qp
)
854 struct hfi1_pkt_state ps
;
855 struct hfi1_qp_priv
*priv
= qp
->priv
;
856 int (*make_req
)(struct rvt_qp
*qp
, struct hfi1_pkt_state
*ps
);
857 unsigned long timeout
;
858 unsigned long timeout_int
;
861 ps
.dev
= to_idev(qp
->ibqp
.device
);
862 ps
.ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
863 ps
.ppd
= ppd_from_ibp(ps
.ibp
);
865 switch (qp
->ibqp
.qp_type
) {
867 if (!loopback
&& ((qp
->remote_ah_attr
.dlid
& ~((1 << ps
.ppd
->lmc
873 make_req
= hfi1_make_rc_req
;
874 timeout_int
= (qp
->timeout_jiffies
);
877 if (!loopback
&& ((qp
->remote_ah_attr
.dlid
& ~((1 << ps
.ppd
->lmc
883 make_req
= hfi1_make_uc_req
;
884 timeout_int
= SEND_RESCHED_TIMEOUT
;
887 make_req
= hfi1_make_ud_req
;
888 timeout_int
= SEND_RESCHED_TIMEOUT
;
891 spin_lock_irqsave(&qp
->s_lock
, ps
.flags
);
893 /* Return if we are already busy processing a work request. */
894 if (!hfi1_send_ok(qp
)) {
895 spin_unlock_irqrestore(&qp
->s_lock
, ps
.flags
);
899 qp
->s_flags
|= RVT_S_BUSY
;
901 timeout
= jiffies
+ (timeout_int
) / 8;
902 cpu
= priv
->s_sde
? priv
->s_sde
->cpu
:
903 cpumask_first(cpumask_of_node(ps
.ppd
->dd
->node
));
904 /* insure a pre-built packet is handled */
905 ps
.s_txreq
= get_waiting_verbs_txreq(qp
);
907 /* Check for a constructed packet to be sent. */
908 if (qp
->s_hdrwords
!= 0) {
909 spin_unlock_irqrestore(&qp
->s_lock
, ps
.flags
);
911 * If the packet cannot be sent now, return and
912 * the send tasklet will be woken up later.
914 if (hfi1_verbs_send(qp
, &ps
))
916 /* Record that s_ahg is empty. */
918 /* allow other tasks to run */
919 if (unlikely(time_after(jiffies
, timeout
))) {
920 if (workqueue_congested(cpu
,
925 qp
->s_flags
&= ~RVT_S_BUSY
;
926 hfi1_schedule_send(qp
);
927 spin_unlock_irqrestore(
931 *ps
.ppd
->dd
->send_schedule
);
934 if (!irqs_disabled()) {
937 *ps
.ppd
->dd
->send_schedule
);
939 timeout
= jiffies
+ (timeout_int
) / 8;
941 spin_lock_irqsave(&qp
->s_lock
, ps
.flags
);
943 } while (make_req(qp
, &ps
));
945 spin_unlock_irqrestore(&qp
->s_lock
, ps
.flags
);
949 * This should be called with s_lock held.
951 void hfi1_send_complete(struct rvt_qp
*qp
, struct rvt_swqe
*wqe
,
952 enum ib_wc_status status
)
957 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_OR_FLUSH_SEND
))
962 if (++last
>= qp
->s_size
)
965 /* See post_send() */
967 for (i
= 0; i
< wqe
->wr
.num_sge
; i
++) {
968 struct rvt_sge
*sge
= &wqe
->sg_list
[i
];
972 if (qp
->ibqp
.qp_type
== IB_QPT_UD
||
973 qp
->ibqp
.qp_type
== IB_QPT_SMI
||
974 qp
->ibqp
.qp_type
== IB_QPT_GSI
)
975 atomic_dec(&ibah_to_rvtah(wqe
->ud_wr
.ah
)->refcount
);
977 /* See ch. 11.2.4.1 and 10.7.3.1 */
978 if (!(qp
->s_flags
& RVT_S_SIGNAL_REQ_WR
) ||
979 (wqe
->wr
.send_flags
& IB_SEND_SIGNALED
) ||
980 status
!= IB_WC_SUCCESS
) {
983 memset(&wc
, 0, sizeof(wc
));
984 wc
.wr_id
= wqe
->wr
.wr_id
;
986 wc
.opcode
= ib_hfi1_wc_opcode
[wqe
->wr
.opcode
];
988 if (status
== IB_WC_SUCCESS
)
989 wc
.byte_len
= wqe
->length
;
990 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.send_cq
), &wc
,
991 status
!= IB_WC_SUCCESS
);
994 if (qp
->s_acked
== old_last
)
996 if (qp
->s_cur
== old_last
)
998 if (qp
->s_tail
== old_last
)
1000 if (qp
->state
== IB_QPS_SQD
&& last
== qp
->s_cur
)