2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include "ipath_verbs.h"
37 #include "ipath_kernel.h"
39 /* cut down ridiculously long IB macro names */
40 #define OP(x) IB_OPCODE_RC_##x
42 static u32
restart_sge(struct ipath_sge_state
*ss
, struct ipath_swqe
*wqe
,
47 len
= ((psn
- wqe
->psn
) & IPATH_PSN_MASK
) * pmtu
;
48 ss
->sge
= wqe
->sg_list
[0];
49 ss
->sg_list
= wqe
->sg_list
+ 1;
50 ss
->num_sge
= wqe
->wr
.num_sge
;
51 ipath_skip_sge(ss
, len
);
52 return wqe
->length
- len
;
56 * ipath_init_restart- initialize the qp->s_sge after a restart
57 * @qp: the QP who's SGE we're restarting
58 * @wqe: the work queue to initialize the QP's SGE from
60 * The QP s_lock should be held and interrupts disabled.
62 static void ipath_init_restart(struct ipath_qp
*qp
, struct ipath_swqe
*wqe
)
64 struct ipath_ibdev
*dev
;
66 qp
->s_len
= restart_sge(&qp
->s_sge
, wqe
, qp
->s_psn
,
67 ib_mtu_enum_to_int(qp
->path_mtu
));
68 dev
= to_idev(qp
->ibqp
.device
);
69 spin_lock(&dev
->pending_lock
);
70 if (list_empty(&qp
->timerwait
))
71 list_add_tail(&qp
->timerwait
,
72 &dev
->pending
[dev
->pending_index
]);
73 spin_unlock(&dev
->pending_lock
);
77 * ipath_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
78 * @qp: a pointer to the QP
79 * @ohdr: a pointer to the IB header being constructed
82 * Return 1 if constructed; otherwise, return 0.
83 * Note that we are in the responder's side of the QP context.
84 * Note the QP s_lock must be held.
86 static int ipath_make_rc_ack(struct ipath_ibdev
*dev
, struct ipath_qp
*qp
,
87 struct ipath_other_headers
*ohdr
, u32 pmtu
)
89 struct ipath_ack_entry
*e
;
95 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
98 switch (qp
->s_ack_state
) {
99 case OP(RDMA_READ_RESPONSE_LAST
):
100 case OP(RDMA_READ_RESPONSE_ONLY
):
101 case OP(ATOMIC_ACKNOWLEDGE
):
103 * We can increment the tail pointer now that the last
104 * response has been sent instead of only being
107 if (++qp
->s_tail_ack_queue
> IPATH_MAX_RDMA_ATOMIC
)
108 qp
->s_tail_ack_queue
= 0;
111 case OP(ACKNOWLEDGE
):
112 /* Check for no next entry in the queue. */
113 if (qp
->r_head_ack_queue
== qp
->s_tail_ack_queue
) {
114 if (qp
->s_flags
& IPATH_S_ACK_PENDING
)
116 qp
->s_ack_state
= OP(ACKNOWLEDGE
);
120 e
= &qp
->s_ack_queue
[qp
->s_tail_ack_queue
];
121 if (e
->opcode
== OP(RDMA_READ_REQUEST
)) {
122 /* Copy SGE state in case we need to resend */
123 qp
->s_ack_rdma_sge
= e
->rdma_sge
;
124 qp
->s_cur_sge
= &qp
->s_ack_rdma_sge
;
125 len
= e
->rdma_sge
.sge
.sge_length
;
128 qp
->s_ack_state
= OP(RDMA_READ_RESPONSE_FIRST
);
130 qp
->s_ack_state
= OP(RDMA_READ_RESPONSE_ONLY
);
133 ohdr
->u
.aeth
= ipath_compute_aeth(qp
);
135 qp
->s_ack_rdma_psn
= e
->psn
;
136 bth2
= qp
->s_ack_rdma_psn
++ & IPATH_PSN_MASK
;
138 /* COMPARE_SWAP or FETCH_ADD */
139 qp
->s_cur_sge
= NULL
;
141 qp
->s_ack_state
= OP(ATOMIC_ACKNOWLEDGE
);
142 ohdr
->u
.at
.aeth
= ipath_compute_aeth(qp
);
143 ohdr
->u
.at
.atomic_ack_eth
[0] =
144 cpu_to_be32(e
->atomic_data
>> 32);
145 ohdr
->u
.at
.atomic_ack_eth
[1] =
146 cpu_to_be32(e
->atomic_data
);
147 hwords
+= sizeof(ohdr
->u
.at
) / sizeof(u32
);
151 bth0
= qp
->s_ack_state
<< 24;
154 case OP(RDMA_READ_RESPONSE_FIRST
):
155 qp
->s_ack_state
= OP(RDMA_READ_RESPONSE_MIDDLE
);
157 case OP(RDMA_READ_RESPONSE_MIDDLE
):
158 len
= qp
->s_ack_rdma_sge
.sge
.sge_length
;
162 ohdr
->u
.aeth
= ipath_compute_aeth(qp
);
164 qp
->s_ack_state
= OP(RDMA_READ_RESPONSE_LAST
);
165 qp
->s_ack_queue
[qp
->s_tail_ack_queue
].sent
= 1;
167 bth0
= qp
->s_ack_state
<< 24;
168 bth2
= qp
->s_ack_rdma_psn
++ & IPATH_PSN_MASK
;
174 * Send a regular ACK.
175 * Set the s_ack_state so we wait until after sending
176 * the ACK before setting s_ack_state to ACKNOWLEDGE
179 qp
->s_ack_state
= OP(SEND_ONLY
);
180 qp
->s_flags
&= ~IPATH_S_ACK_PENDING
;
181 qp
->s_cur_sge
= NULL
;
184 cpu_to_be32((qp
->r_msn
& IPATH_MSN_MASK
) |
186 IPATH_AETH_CREDIT_SHIFT
));
188 ohdr
->u
.aeth
= ipath_compute_aeth(qp
);
191 bth0
= OP(ACKNOWLEDGE
) << 24;
192 bth2
= qp
->s_ack_psn
& IPATH_PSN_MASK
;
194 qp
->s_hdrwords
= hwords
;
195 qp
->s_cur_size
= len
;
196 ipath_make_ruc_header(dev
, qp
, ohdr
, bth0
, bth2
);
204 * ipath_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
205 * @qp: a pointer to the QP
207 * Return 1 if constructed; otherwise, return 0.
209 int ipath_make_rc_req(struct ipath_qp
*qp
)
211 struct ipath_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
212 struct ipath_other_headers
*ohdr
;
213 struct ipath_sge_state
*ss
;
214 struct ipath_swqe
*wqe
;
219 u32 pmtu
= ib_mtu_enum_to_int(qp
->path_mtu
);
224 ohdr
= &qp
->s_hdr
.u
.oth
;
225 if (qp
->remote_ah_attr
.ah_flags
& IB_AH_GRH
)
226 ohdr
= &qp
->s_hdr
.u
.l
.oth
;
229 * The lock is needed to synchronize between the sending tasklet,
230 * the receive interrupt handler, and timeout resends.
232 spin_lock_irqsave(&qp
->s_lock
, flags
);
234 /* Sending responses has higher priority over sending requests. */
235 if ((qp
->r_head_ack_queue
!= qp
->s_tail_ack_queue
||
236 (qp
->s_flags
& IPATH_S_ACK_PENDING
) ||
237 qp
->s_ack_state
!= OP(ACKNOWLEDGE
)) &&
238 ipath_make_rc_ack(dev
, qp
, ohdr
, pmtu
))
241 if (!(ib_ipath_state_ops
[qp
->state
] & IPATH_PROCESS_SEND_OK
) ||
242 qp
->s_rnr_timeout
|| qp
->s_wait_credit
)
245 /* Limit the number of packets sent without an ACK. */
246 if (ipath_cmp24(qp
->s_psn
, qp
->s_last_psn
+ IPATH_PSN_CREDIT
) > 0) {
247 qp
->s_wait_credit
= 1;
252 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
254 bth0
= 1 << 22; /* Set M bit */
256 /* Send a request. */
257 wqe
= get_swqe_ptr(qp
, qp
->s_cur
);
258 switch (qp
->s_state
) {
261 * Resend an old request or start a new one.
263 * We keep track of the current SWQE so that
264 * we don't reset the "furthest progress" state
265 * if we need to back up.
268 if (qp
->s_cur
== qp
->s_tail
) {
269 /* Check if send work queue is empty. */
270 if (qp
->s_tail
== qp
->s_head
)
273 * If a fence is requested, wait for previous
274 * RDMA read and atomic operations to finish.
276 if ((wqe
->wr
.send_flags
& IB_SEND_FENCE
) &&
277 qp
->s_num_rd_atomic
) {
278 qp
->s_flags
|= IPATH_S_FENCE_PENDING
;
281 wqe
->psn
= qp
->s_next_psn
;
285 * Note that we have to be careful not to modify the
286 * original work request since we may need to resend
292 switch (wqe
->wr
.opcode
) {
294 case IB_WR_SEND_WITH_IMM
:
295 /* If no credit, return. */
296 if (qp
->s_lsn
!= (u32
) -1 &&
297 ipath_cmp24(wqe
->ssn
, qp
->s_lsn
+ 1) > 0)
299 wqe
->lpsn
= wqe
->psn
;
301 wqe
->lpsn
+= (len
- 1) / pmtu
;
302 qp
->s_state
= OP(SEND_FIRST
);
306 if (wqe
->wr
.opcode
== IB_WR_SEND
)
307 qp
->s_state
= OP(SEND_ONLY
);
309 qp
->s_state
= OP(SEND_ONLY_WITH_IMMEDIATE
);
310 /* Immediate data comes after the BTH */
311 ohdr
->u
.imm_data
= wqe
->wr
.ex
.imm_data
;
314 if (wqe
->wr
.send_flags
& IB_SEND_SOLICITED
)
316 bth2
= 1 << 31; /* Request ACK. */
317 if (++qp
->s_cur
== qp
->s_size
)
321 case IB_WR_RDMA_WRITE
:
322 if (newreq
&& qp
->s_lsn
!= (u32
) -1)
325 case IB_WR_RDMA_WRITE_WITH_IMM
:
326 /* If no credit, return. */
327 if (qp
->s_lsn
!= (u32
) -1 &&
328 ipath_cmp24(wqe
->ssn
, qp
->s_lsn
+ 1) > 0)
330 ohdr
->u
.rc
.reth
.vaddr
=
331 cpu_to_be64(wqe
->wr
.wr
.rdma
.remote_addr
);
332 ohdr
->u
.rc
.reth
.rkey
=
333 cpu_to_be32(wqe
->wr
.wr
.rdma
.rkey
);
334 ohdr
->u
.rc
.reth
.length
= cpu_to_be32(len
);
335 hwords
+= sizeof(struct ib_reth
) / sizeof(u32
);
336 wqe
->lpsn
= wqe
->psn
;
338 wqe
->lpsn
+= (len
- 1) / pmtu
;
339 qp
->s_state
= OP(RDMA_WRITE_FIRST
);
343 if (wqe
->wr
.opcode
== IB_WR_RDMA_WRITE
)
344 qp
->s_state
= OP(RDMA_WRITE_ONLY
);
347 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE
);
348 /* Immediate data comes after RETH */
349 ohdr
->u
.rc
.imm_data
= wqe
->wr
.ex
.imm_data
;
351 if (wqe
->wr
.send_flags
& IB_SEND_SOLICITED
)
354 bth2
= 1 << 31; /* Request ACK. */
355 if (++qp
->s_cur
== qp
->s_size
)
359 case IB_WR_RDMA_READ
:
361 * Don't allow more operations to be started
362 * than the QP limits allow.
365 if (qp
->s_num_rd_atomic
>=
366 qp
->s_max_rd_atomic
) {
367 qp
->s_flags
|= IPATH_S_RDMAR_PENDING
;
370 qp
->s_num_rd_atomic
++;
371 if (qp
->s_lsn
!= (u32
) -1)
374 * Adjust s_next_psn to count the
375 * expected number of responses.
378 qp
->s_next_psn
+= (len
- 1) / pmtu
;
379 wqe
->lpsn
= qp
->s_next_psn
++;
381 ohdr
->u
.rc
.reth
.vaddr
=
382 cpu_to_be64(wqe
->wr
.wr
.rdma
.remote_addr
);
383 ohdr
->u
.rc
.reth
.rkey
=
384 cpu_to_be32(wqe
->wr
.wr
.rdma
.rkey
);
385 ohdr
->u
.rc
.reth
.length
= cpu_to_be32(len
);
386 qp
->s_state
= OP(RDMA_READ_REQUEST
);
387 hwords
+= sizeof(ohdr
->u
.rc
.reth
) / sizeof(u32
);
390 if (++qp
->s_cur
== qp
->s_size
)
394 case IB_WR_ATOMIC_CMP_AND_SWP
:
395 case IB_WR_ATOMIC_FETCH_AND_ADD
:
397 * Don't allow more operations to be started
398 * than the QP limits allow.
401 if (qp
->s_num_rd_atomic
>=
402 qp
->s_max_rd_atomic
) {
403 qp
->s_flags
|= IPATH_S_RDMAR_PENDING
;
406 qp
->s_num_rd_atomic
++;
407 if (qp
->s_lsn
!= (u32
) -1)
409 wqe
->lpsn
= wqe
->psn
;
411 if (wqe
->wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
412 qp
->s_state
= OP(COMPARE_SWAP
);
413 ohdr
->u
.atomic_eth
.swap_data
= cpu_to_be64(
414 wqe
->wr
.wr
.atomic
.swap
);
415 ohdr
->u
.atomic_eth
.compare_data
= cpu_to_be64(
416 wqe
->wr
.wr
.atomic
.compare_add
);
418 qp
->s_state
= OP(FETCH_ADD
);
419 ohdr
->u
.atomic_eth
.swap_data
= cpu_to_be64(
420 wqe
->wr
.wr
.atomic
.compare_add
);
421 ohdr
->u
.atomic_eth
.compare_data
= 0;
423 ohdr
->u
.atomic_eth
.vaddr
[0] = cpu_to_be32(
424 wqe
->wr
.wr
.atomic
.remote_addr
>> 32);
425 ohdr
->u
.atomic_eth
.vaddr
[1] = cpu_to_be32(
426 wqe
->wr
.wr
.atomic
.remote_addr
);
427 ohdr
->u
.atomic_eth
.rkey
= cpu_to_be32(
428 wqe
->wr
.wr
.atomic
.rkey
);
429 hwords
+= sizeof(struct ib_atomic_eth
) / sizeof(u32
);
432 if (++qp
->s_cur
== qp
->s_size
)
439 qp
->s_sge
.sge
= wqe
->sg_list
[0];
440 qp
->s_sge
.sg_list
= wqe
->sg_list
+ 1;
441 qp
->s_sge
.num_sge
= wqe
->wr
.num_sge
;
442 qp
->s_len
= wqe
->length
;
445 if (qp
->s_tail
>= qp
->s_size
)
448 bth2
|= qp
->s_psn
& IPATH_PSN_MASK
;
449 if (wqe
->wr
.opcode
== IB_WR_RDMA_READ
)
450 qp
->s_psn
= wqe
->lpsn
+ 1;
453 if (ipath_cmp24(qp
->s_psn
, qp
->s_next_psn
) > 0)
454 qp
->s_next_psn
= qp
->s_psn
;
457 * Put the QP on the pending list so lost ACKs will cause
458 * a retry. More than one request can be pending so the
459 * QP may already be on the dev->pending list.
461 spin_lock(&dev
->pending_lock
);
462 if (list_empty(&qp
->timerwait
))
463 list_add_tail(&qp
->timerwait
,
464 &dev
->pending
[dev
->pending_index
]);
465 spin_unlock(&dev
->pending_lock
);
468 case OP(RDMA_READ_RESPONSE_FIRST
):
470 * This case can only happen if a send is restarted.
471 * See ipath_restart_rc().
473 ipath_init_restart(qp
, wqe
);
476 qp
->s_state
= OP(SEND_MIDDLE
);
478 case OP(SEND_MIDDLE
):
479 bth2
= qp
->s_psn
++ & IPATH_PSN_MASK
;
480 if (ipath_cmp24(qp
->s_psn
, qp
->s_next_psn
) > 0)
481 qp
->s_next_psn
= qp
->s_psn
;
488 if (wqe
->wr
.opcode
== IB_WR_SEND
)
489 qp
->s_state
= OP(SEND_LAST
);
491 qp
->s_state
= OP(SEND_LAST_WITH_IMMEDIATE
);
492 /* Immediate data comes after the BTH */
493 ohdr
->u
.imm_data
= wqe
->wr
.ex
.imm_data
;
496 if (wqe
->wr
.send_flags
& IB_SEND_SOLICITED
)
498 bth2
|= 1 << 31; /* Request ACK. */
500 if (qp
->s_cur
>= qp
->s_size
)
504 case OP(RDMA_READ_RESPONSE_LAST
):
506 * This case can only happen if a RDMA write is restarted.
507 * See ipath_restart_rc().
509 ipath_init_restart(qp
, wqe
);
511 case OP(RDMA_WRITE_FIRST
):
512 qp
->s_state
= OP(RDMA_WRITE_MIDDLE
);
514 case OP(RDMA_WRITE_MIDDLE
):
515 bth2
= qp
->s_psn
++ & IPATH_PSN_MASK
;
516 if (ipath_cmp24(qp
->s_psn
, qp
->s_next_psn
) > 0)
517 qp
->s_next_psn
= qp
->s_psn
;
524 if (wqe
->wr
.opcode
== IB_WR_RDMA_WRITE
)
525 qp
->s_state
= OP(RDMA_WRITE_LAST
);
527 qp
->s_state
= OP(RDMA_WRITE_LAST_WITH_IMMEDIATE
);
528 /* Immediate data comes after the BTH */
529 ohdr
->u
.imm_data
= wqe
->wr
.ex
.imm_data
;
531 if (wqe
->wr
.send_flags
& IB_SEND_SOLICITED
)
534 bth2
|= 1 << 31; /* Request ACK. */
536 if (qp
->s_cur
>= qp
->s_size
)
540 case OP(RDMA_READ_RESPONSE_MIDDLE
):
542 * This case can only happen if a RDMA read is restarted.
543 * See ipath_restart_rc().
545 ipath_init_restart(qp
, wqe
);
546 len
= ((qp
->s_psn
- wqe
->psn
) & IPATH_PSN_MASK
) * pmtu
;
547 ohdr
->u
.rc
.reth
.vaddr
=
548 cpu_to_be64(wqe
->wr
.wr
.rdma
.remote_addr
+ len
);
549 ohdr
->u
.rc
.reth
.rkey
=
550 cpu_to_be32(wqe
->wr
.wr
.rdma
.rkey
);
551 ohdr
->u
.rc
.reth
.length
= cpu_to_be32(qp
->s_len
);
552 qp
->s_state
= OP(RDMA_READ_REQUEST
);
553 hwords
+= sizeof(ohdr
->u
.rc
.reth
) / sizeof(u32
);
554 bth2
= qp
->s_psn
++ & IPATH_PSN_MASK
;
555 if (ipath_cmp24(qp
->s_psn
, qp
->s_next_psn
) > 0)
556 qp
->s_next_psn
= qp
->s_psn
;
560 if (qp
->s_cur
== qp
->s_size
)
564 if (ipath_cmp24(qp
->s_psn
, qp
->s_last_psn
+ IPATH_PSN_CREDIT
- 1) >= 0)
565 bth2
|= 1 << 31; /* Request ACK. */
567 qp
->s_hdrwords
= hwords
;
569 qp
->s_cur_size
= len
;
570 ipath_make_ruc_header(dev
, qp
, ohdr
, bth0
| (qp
->s_state
<< 24), bth2
);
574 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
579 * send_rc_ack - Construct an ACK packet and send it
580 * @qp: a pointer to the QP
582 * This is called from ipath_rc_rcv() and only uses the receive
584 * Note that RDMA reads and atomics are handled in the
585 * send side QP state and tasklet.
587 static void send_rc_ack(struct ipath_qp
*qp
)
589 struct ipath_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
590 struct ipath_devdata
*dd
;
595 struct ipath_ib_header hdr
;
596 struct ipath_other_headers
*ohdr
;
599 spin_lock_irqsave(&qp
->s_lock
, flags
);
601 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
602 if (qp
->r_head_ack_queue
!= qp
->s_tail_ack_queue
||
603 (qp
->s_flags
& IPATH_S_ACK_PENDING
) ||
604 qp
->s_ack_state
!= OP(ACKNOWLEDGE
))
607 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
610 piobuf
= ipath_getpiobuf(dd
, 0, NULL
);
613 * We are out of PIO buffers at the moment.
614 * Pass responsibility for sending the ACK to the
615 * send tasklet so that when a PIO buffer becomes
616 * available, the ACK is sent ahead of other outgoing
619 spin_lock_irqsave(&qp
->s_lock
, flags
);
623 /* Construct the header. */
625 lrh0
= IPATH_LRH_BTH
;
626 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
628 if (unlikely(qp
->remote_ah_attr
.ah_flags
& IB_AH_GRH
)) {
629 hwords
+= ipath_make_grh(dev
, &hdr
.u
.l
.grh
,
630 &qp
->remote_ah_attr
.grh
,
633 lrh0
= IPATH_LRH_GRH
;
635 /* read pkey_index w/o lock (its atomic) */
636 bth0
= ipath_get_pkey(dd
, qp
->s_pkey_index
) |
637 (OP(ACKNOWLEDGE
) << 24) | (1 << 22);
639 ohdr
->u
.aeth
= cpu_to_be32((qp
->r_msn
& IPATH_MSN_MASK
) |
641 IPATH_AETH_CREDIT_SHIFT
));
643 ohdr
->u
.aeth
= ipath_compute_aeth(qp
);
644 lrh0
|= qp
->remote_ah_attr
.sl
<< 4;
645 hdr
.lrh
[0] = cpu_to_be16(lrh0
);
646 hdr
.lrh
[1] = cpu_to_be16(qp
->remote_ah_attr
.dlid
);
647 hdr
.lrh
[2] = cpu_to_be16(hwords
+ SIZE_OF_CRC
);
648 hdr
.lrh
[3] = cpu_to_be16(dd
->ipath_lid
);
649 ohdr
->bth
[0] = cpu_to_be32(bth0
);
650 ohdr
->bth
[1] = cpu_to_be32(qp
->remote_qpn
);
651 ohdr
->bth
[2] = cpu_to_be32(qp
->r_ack_psn
& IPATH_PSN_MASK
);
653 writeq(hwords
+ 1, piobuf
);
655 if (dd
->ipath_flags
& IPATH_PIO_FLUSH_WC
) {
656 u32
*hdrp
= (u32
*) &hdr
;
659 __iowrite32_copy(piobuf
+ 2, hdrp
, hwords
- 1);
661 __raw_writel(hdrp
[hwords
- 1], piobuf
+ hwords
+ 1);
663 __iowrite32_copy(piobuf
+ 2, (u32
*) &hdr
, hwords
);
667 dev
->n_unicast_xmit
++;
672 qp
->s_flags
|= IPATH_S_ACK_PENDING
;
673 qp
->s_nak_state
= qp
->r_nak_state
;
674 qp
->s_ack_psn
= qp
->r_ack_psn
;
675 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
677 /* Call ipath_do_rc_send() in another thread. */
678 tasklet_hi_schedule(&qp
->s_task
);
685 * reset_psn - reset the QP state to send starting from PSN
687 * @psn: the packet sequence number to restart at
689 * This is called from ipath_rc_rcv() to process an incoming RC ACK
691 * Called at interrupt level with the QP s_lock held.
693 static void reset_psn(struct ipath_qp
*qp
, u32 psn
)
696 struct ipath_swqe
*wqe
= get_swqe_ptr(qp
, n
);
702 * If we are starting the request from the beginning,
703 * let the normal send code handle initialization.
705 if (ipath_cmp24(psn
, wqe
->psn
) <= 0) {
706 qp
->s_state
= OP(SEND_LAST
);
710 /* Find the work request opcode corresponding to the given PSN. */
711 opcode
= wqe
->wr
.opcode
;
715 if (++n
== qp
->s_size
)
719 wqe
= get_swqe_ptr(qp
, n
);
720 diff
= ipath_cmp24(psn
, wqe
->psn
);
725 * If we are starting the request from the beginning,
726 * let the normal send code handle initialization.
729 qp
->s_state
= OP(SEND_LAST
);
732 opcode
= wqe
->wr
.opcode
;
736 * Set the state to restart in the middle of a request.
737 * Don't change the s_sge, s_cur_sge, or s_cur_size.
738 * See ipath_do_rc_send().
742 case IB_WR_SEND_WITH_IMM
:
743 qp
->s_state
= OP(RDMA_READ_RESPONSE_FIRST
);
746 case IB_WR_RDMA_WRITE
:
747 case IB_WR_RDMA_WRITE_WITH_IMM
:
748 qp
->s_state
= OP(RDMA_READ_RESPONSE_LAST
);
751 case IB_WR_RDMA_READ
:
752 qp
->s_state
= OP(RDMA_READ_RESPONSE_MIDDLE
);
757 * This case shouldn't happen since its only
760 qp
->s_state
= OP(SEND_LAST
);
767 * ipath_restart_rc - back up requester to resend the last un-ACKed request
768 * @qp: the QP to restart
769 * @psn: packet sequence number for the request
770 * @wc: the work completion request
772 * The QP s_lock should be held and interrupts disabled.
774 void ipath_restart_rc(struct ipath_qp
*qp
, u32 psn
, struct ib_wc
*wc
)
776 struct ipath_swqe
*wqe
= get_swqe_ptr(qp
, qp
->s_last
);
777 struct ipath_ibdev
*dev
;
779 if (qp
->s_retry
== 0) {
780 wc
->wr_id
= wqe
->wr
.wr_id
;
781 wc
->status
= IB_WC_RETRY_EXC_ERR
;
782 wc
->opcode
= ib_ipath_wc_opcode
[wqe
->wr
.opcode
];
787 wc
->src_qp
= qp
->remote_qpn
;
790 wc
->slid
= qp
->remote_ah_attr
.dlid
;
791 wc
->sl
= qp
->remote_ah_attr
.sl
;
792 wc
->dlid_path_bits
= 0;
794 ipath_sqerror_qp(qp
, wc
);
800 * Remove the QP from the timeout queue.
801 * Note: it may already have been removed by ipath_ib_timer().
803 dev
= to_idev(qp
->ibqp
.device
);
804 spin_lock(&dev
->pending_lock
);
805 if (!list_empty(&qp
->timerwait
))
806 list_del_init(&qp
->timerwait
);
807 spin_unlock(&dev
->pending_lock
);
809 if (wqe
->wr
.opcode
== IB_WR_RDMA_READ
)
812 dev
->n_rc_resends
+= (qp
->s_psn
- psn
) & IPATH_PSN_MASK
;
815 tasklet_hi_schedule(&qp
->s_task
);
821 static inline void update_last_psn(struct ipath_qp
*qp
, u32 psn
)
823 if (qp
->s_last_psn
!= psn
) {
824 qp
->s_last_psn
= psn
;
825 if (qp
->s_wait_credit
) {
826 qp
->s_wait_credit
= 0;
827 tasklet_hi_schedule(&qp
->s_task
);
833 * do_rc_ack - process an incoming RC ACK
834 * @qp: the QP the ACK came in on
835 * @psn: the packet sequence number of the ACK
836 * @opcode: the opcode of the request that resulted in the ACK
838 * This is called from ipath_rc_rcv_resp() to process an incoming RC ACK
840 * Called at interrupt level with the QP s_lock held and interrupts disabled.
841 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
843 static int do_rc_ack(struct ipath_qp
*qp
, u32 aeth
, u32 psn
, int opcode
,
846 struct ipath_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
848 struct ipath_swqe
*wqe
;
854 * Remove the QP from the timeout queue (or RNR timeout queue).
855 * If ipath_ib_timer() has already removed it,
856 * it's OK since we hold the QP s_lock and ipath_restart_rc()
857 * just won't find anything to restart if we ACK everything.
859 spin_lock(&dev
->pending_lock
);
860 if (!list_empty(&qp
->timerwait
))
861 list_del_init(&qp
->timerwait
);
862 spin_unlock(&dev
->pending_lock
);
865 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
866 * requests and implicitly NAK RDMA read and atomic requests issued
867 * before the NAK'ed request. The MSN won't include the NAK'ed
868 * request but will include an ACK'ed request(s).
873 wqe
= get_swqe_ptr(qp
, qp
->s_last
);
876 * The MSN might be for a later WQE than the PSN indicates so
877 * only complete WQEs that the PSN finishes.
879 while ((diff
= ipath_cmp24(ack_psn
, wqe
->lpsn
)) >= 0) {
881 * RDMA_READ_RESPONSE_ONLY is a special case since
882 * we want to generate completion events for everything
883 * before the RDMA read, copy the data, then generate
884 * the completion for the read.
886 if (wqe
->wr
.opcode
== IB_WR_RDMA_READ
&&
887 opcode
== OP(RDMA_READ_RESPONSE_ONLY
) &&
893 * If this request is a RDMA read or atomic, and the ACK is
894 * for a later operation, this ACK NAKs the RDMA read or
895 * atomic. In other words, only a RDMA_READ_LAST or ONLY
896 * can ACK a RDMA read and likewise for atomic ops. Note
897 * that the NAK case can only happen if relaxed ordering is
898 * used and requests are sent after an RDMA read or atomic
899 * is sent but before the response is received.
901 if ((wqe
->wr
.opcode
== IB_WR_RDMA_READ
&&
902 (opcode
!= OP(RDMA_READ_RESPONSE_LAST
) || diff
!= 0)) ||
903 ((wqe
->wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
||
904 wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
) &&
905 (opcode
!= OP(ATOMIC_ACKNOWLEDGE
) || diff
!= 0))) {
907 * The last valid PSN seen is the previous
910 update_last_psn(qp
, wqe
->psn
- 1);
911 /* Retry this request. */
912 ipath_restart_rc(qp
, wqe
->psn
, &wc
);
914 * No need to process the ACK/NAK since we are
915 * restarting an earlier request.
919 if (wqe
->wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
||
920 wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
)
921 *(u64
*) wqe
->sg_list
[0].vaddr
= val
;
922 if (qp
->s_num_rd_atomic
&&
923 (wqe
->wr
.opcode
== IB_WR_RDMA_READ
||
924 wqe
->wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
||
925 wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
)) {
926 qp
->s_num_rd_atomic
--;
927 /* Restart sending task if fence is complete */
928 if ((qp
->s_flags
& IPATH_S_FENCE_PENDING
) &&
929 !qp
->s_num_rd_atomic
) {
930 qp
->s_flags
&= ~IPATH_S_FENCE_PENDING
;
931 tasklet_hi_schedule(&qp
->s_task
);
932 } else if (qp
->s_flags
& IPATH_S_RDMAR_PENDING
) {
933 qp
->s_flags
&= ~IPATH_S_RDMAR_PENDING
;
934 tasklet_hi_schedule(&qp
->s_task
);
937 /* Post a send completion queue entry if requested. */
938 if (!(qp
->s_flags
& IPATH_S_SIGNAL_REQ_WR
) ||
939 (wqe
->wr
.send_flags
& IB_SEND_SIGNALED
)) {
940 wc
.wr_id
= wqe
->wr
.wr_id
;
941 wc
.status
= IB_WC_SUCCESS
;
942 wc
.opcode
= ib_ipath_wc_opcode
[wqe
->wr
.opcode
];
944 wc
.byte_len
= wqe
->length
;
947 wc
.src_qp
= qp
->remote_qpn
;
950 wc
.slid
= qp
->remote_ah_attr
.dlid
;
951 wc
.sl
= qp
->remote_ah_attr
.sl
;
952 wc
.dlid_path_bits
= 0;
954 ipath_cq_enter(to_icq(qp
->ibqp
.send_cq
), &wc
, 0);
956 qp
->s_retry
= qp
->s_retry_cnt
;
958 * If we are completing a request which is in the process of
959 * being resent, we can stop resending it since we know the
960 * responder has already seen it.
962 if (qp
->s_last
== qp
->s_cur
) {
963 if (++qp
->s_cur
>= qp
->s_size
)
965 qp
->s_last
= qp
->s_cur
;
966 if (qp
->s_last
== qp
->s_tail
)
968 wqe
= get_swqe_ptr(qp
, qp
->s_cur
);
969 qp
->s_state
= OP(SEND_LAST
);
970 qp
->s_psn
= wqe
->psn
;
972 if (++qp
->s_last
>= qp
->s_size
)
974 if (qp
->s_last
== qp
->s_tail
)
976 wqe
= get_swqe_ptr(qp
, qp
->s_last
);
980 switch (aeth
>> 29) {
983 /* If this is a partial ACK, reset the retransmit timer. */
984 if (qp
->s_last
!= qp
->s_tail
) {
985 spin_lock(&dev
->pending_lock
);
986 if (list_empty(&qp
->timerwait
))
987 list_add_tail(&qp
->timerwait
,
988 &dev
->pending
[dev
->pending_index
]);
989 spin_unlock(&dev
->pending_lock
);
991 * If we get a partial ACK for a resent operation,
992 * we can stop resending the earlier packets and
993 * continue with the next packet the receiver wants.
995 if (ipath_cmp24(qp
->s_psn
, psn
) <= 0) {
996 reset_psn(qp
, psn
+ 1);
997 tasklet_hi_schedule(&qp
->s_task
);
999 } else if (ipath_cmp24(qp
->s_psn
, psn
) <= 0) {
1000 qp
->s_state
= OP(SEND_LAST
);
1001 qp
->s_psn
= psn
+ 1;
1003 ipath_get_credit(qp
, aeth
);
1004 qp
->s_rnr_retry
= qp
->s_rnr_retry_cnt
;
1005 qp
->s_retry
= qp
->s_retry_cnt
;
1006 update_last_psn(qp
, psn
);
1010 case 1: /* RNR NAK */
1012 if (qp
->s_last
== qp
->s_tail
)
1014 if (qp
->s_rnr_retry
== 0) {
1015 wc
.status
= IB_WC_RNR_RETRY_EXC_ERR
;
1018 if (qp
->s_rnr_retry_cnt
< 7)
1021 /* The last valid PSN is the previous PSN. */
1022 update_last_psn(qp
, psn
- 1);
1024 if (wqe
->wr
.opcode
== IB_WR_RDMA_READ
)
1025 dev
->n_rc_resends
++;
1027 dev
->n_rc_resends
+=
1028 (qp
->s_psn
- psn
) & IPATH_PSN_MASK
;
1033 ib_ipath_rnr_table
[(aeth
>> IPATH_AETH_CREDIT_SHIFT
) &
1034 IPATH_AETH_CREDIT_MASK
];
1035 ipath_insert_rnr_queue(qp
);
1039 if (qp
->s_last
== qp
->s_tail
)
1041 /* The last valid PSN is the previous PSN. */
1042 update_last_psn(qp
, psn
- 1);
1043 switch ((aeth
>> IPATH_AETH_CREDIT_SHIFT
) &
1044 IPATH_AETH_CREDIT_MASK
) {
1045 case 0: /* PSN sequence error */
1048 * Back up to the responder's expected PSN.
1049 * Note that we might get a NAK in the middle of an
1050 * RDMA READ response which terminates the RDMA
1053 ipath_restart_rc(qp
, psn
, &wc
);
1056 case 1: /* Invalid Request */
1057 wc
.status
= IB_WC_REM_INV_REQ_ERR
;
1058 dev
->n_other_naks
++;
1061 case 2: /* Remote Access Error */
1062 wc
.status
= IB_WC_REM_ACCESS_ERR
;
1063 dev
->n_other_naks
++;
1066 case 3: /* Remote Operation Error */
1067 wc
.status
= IB_WC_REM_OP_ERR
;
1068 dev
->n_other_naks
++;
1070 wc
.wr_id
= wqe
->wr
.wr_id
;
1071 wc
.opcode
= ib_ipath_wc_opcode
[wqe
->wr
.opcode
];
1076 wc
.src_qp
= qp
->remote_qpn
;
1079 wc
.slid
= qp
->remote_ah_attr
.dlid
;
1080 wc
.sl
= qp
->remote_ah_attr
.sl
;
1081 wc
.dlid_path_bits
= 0;
1083 ipath_sqerror_qp(qp
, &wc
);
1087 /* Ignore other reserved NAK error codes */
1090 qp
->s_rnr_retry
= qp
->s_rnr_retry_cnt
;
1093 default: /* 2: reserved */
1095 /* Ignore reserved NAK codes. */
1104 * ipath_rc_rcv_resp - process an incoming RC response packet
1105 * @dev: the device this packet came in on
1106 * @ohdr: the other headers for this packet
1107 * @data: the packet data
1108 * @tlen: the packet length
1109 * @qp: the QP for this packet
1110 * @opcode: the opcode for this packet
1111 * @psn: the packet sequence number for this packet
1112 * @hdrsize: the header length
1113 * @pmtu: the path MTU
1114 * @header_in_data: true if part of the header data is in the data buffer
1116 * This is called from ipath_rc_rcv() to process an incoming RC response
1117 * packet for the given QP.
1118 * Called at interrupt level.
1120 static inline void ipath_rc_rcv_resp(struct ipath_ibdev
*dev
,
1121 struct ipath_other_headers
*ohdr
,
1122 void *data
, u32 tlen
,
1123 struct ipath_qp
*qp
,
1125 u32 psn
, u32 hdrsize
, u32 pmtu
,
1128 struct ipath_swqe
*wqe
;
1129 unsigned long flags
;
1136 spin_lock_irqsave(&qp
->s_lock
, flags
);
1138 /* Ignore invalid responses. */
1139 if (ipath_cmp24(psn
, qp
->s_next_psn
) >= 0)
1142 /* Ignore duplicate responses. */
1143 diff
= ipath_cmp24(psn
, qp
->s_last_psn
);
1144 if (unlikely(diff
<= 0)) {
1145 /* Update credits for "ghost" ACKs */
1146 if (diff
== 0 && opcode
== OP(ACKNOWLEDGE
)) {
1147 if (!header_in_data
)
1148 aeth
= be32_to_cpu(ohdr
->u
.aeth
);
1150 aeth
= be32_to_cpu(((__be32
*) data
)[0]);
1151 data
+= sizeof(__be32
);
1153 if ((aeth
>> 29) == 0)
1154 ipath_get_credit(qp
, aeth
);
1159 if (unlikely(qp
->s_last
== qp
->s_tail
))
1161 wqe
= get_swqe_ptr(qp
, qp
->s_last
);
1164 case OP(ACKNOWLEDGE
):
1165 case OP(ATOMIC_ACKNOWLEDGE
):
1166 case OP(RDMA_READ_RESPONSE_FIRST
):
1167 if (!header_in_data
)
1168 aeth
= be32_to_cpu(ohdr
->u
.aeth
);
1170 aeth
= be32_to_cpu(((__be32
*) data
)[0]);
1171 data
+= sizeof(__be32
);
1173 if (opcode
== OP(ATOMIC_ACKNOWLEDGE
)) {
1174 if (!header_in_data
) {
1175 __be32
*p
= ohdr
->u
.at
.atomic_ack_eth
;
1177 val
= ((u64
) be32_to_cpu(p
[0]) << 32) |
1180 val
= be64_to_cpu(((__be64
*) data
)[0]);
1183 if (!do_rc_ack(qp
, aeth
, psn
, opcode
, val
) ||
1184 opcode
!= OP(RDMA_READ_RESPONSE_FIRST
))
1187 wqe
= get_swqe_ptr(qp
, qp
->s_last
);
1188 if (unlikely(wqe
->wr
.opcode
!= IB_WR_RDMA_READ
))
1191 * If this is a response to a resent RDMA read, we
1192 * have to be careful to copy the data to the right
1195 qp
->s_rdma_read_len
= restart_sge(&qp
->s_rdma_read_sge
,
1199 case OP(RDMA_READ_RESPONSE_MIDDLE
):
1200 /* no AETH, no ACK */
1201 if (unlikely(ipath_cmp24(psn
, qp
->s_last_psn
+ 1))) {
1203 ipath_restart_rc(qp
, qp
->s_last_psn
+ 1, &wc
);
1206 if (unlikely(wqe
->wr
.opcode
!= IB_WR_RDMA_READ
))
1209 if (unlikely(tlen
!= (hdrsize
+ pmtu
+ 4)))
1211 if (unlikely(pmtu
>= qp
->s_rdma_read_len
))
1214 /* We got a response so update the timeout. */
1215 spin_lock(&dev
->pending_lock
);
1216 if (qp
->s_rnr_timeout
== 0 && !list_empty(&qp
->timerwait
))
1217 list_move_tail(&qp
->timerwait
,
1218 &dev
->pending
[dev
->pending_index
]);
1219 spin_unlock(&dev
->pending_lock
);
1221 if (opcode
== OP(RDMA_READ_RESPONSE_MIDDLE
))
1222 qp
->s_retry
= qp
->s_retry_cnt
;
1225 * Update the RDMA receive state but do the copy w/o
1226 * holding the locks and blocking interrupts.
1228 qp
->s_rdma_read_len
-= pmtu
;
1229 update_last_psn(qp
, psn
);
1230 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1231 ipath_copy_sge(&qp
->s_rdma_read_sge
, data
, pmtu
);
1234 case OP(RDMA_READ_RESPONSE_ONLY
):
1235 if (!header_in_data
)
1236 aeth
= be32_to_cpu(ohdr
->u
.aeth
);
1238 aeth
= be32_to_cpu(((__be32
*) data
)[0]);
1239 if (!do_rc_ack(qp
, aeth
, psn
, opcode
, 0))
1241 /* Get the number of bytes the message was padded by. */
1242 pad
= (be32_to_cpu(ohdr
->bth
[0]) >> 20) & 3;
1244 * Check that the data size is >= 0 && <= pmtu.
1245 * Remember to account for the AETH header (4) and
1248 if (unlikely(tlen
< (hdrsize
+ pad
+ 8)))
1251 * If this is a response to a resent RDMA read, we
1252 * have to be careful to copy the data to the right
1255 wqe
= get_swqe_ptr(qp
, qp
->s_last
);
1256 qp
->s_rdma_read_len
= restart_sge(&qp
->s_rdma_read_sge
,
1260 case OP(RDMA_READ_RESPONSE_LAST
):
1261 /* ACKs READ req. */
1262 if (unlikely(ipath_cmp24(psn
, qp
->s_last_psn
+ 1))) {
1264 ipath_restart_rc(qp
, qp
->s_last_psn
+ 1, &wc
);
1267 if (unlikely(wqe
->wr
.opcode
!= IB_WR_RDMA_READ
))
1269 /* Get the number of bytes the message was padded by. */
1270 pad
= (be32_to_cpu(ohdr
->bth
[0]) >> 20) & 3;
1272 * Check that the data size is >= 1 && <= pmtu.
1273 * Remember to account for the AETH header (4) and
1276 if (unlikely(tlen
<= (hdrsize
+ pad
+ 8)))
1279 tlen
-= hdrsize
+ pad
+ 8;
1280 if (unlikely(tlen
!= qp
->s_rdma_read_len
))
1282 if (!header_in_data
)
1283 aeth
= be32_to_cpu(ohdr
->u
.aeth
);
1285 aeth
= be32_to_cpu(((__be32
*) data
)[0]);
1286 data
+= sizeof(__be32
);
1288 ipath_copy_sge(&qp
->s_rdma_read_sge
, data
, tlen
);
1289 (void) do_rc_ack(qp
, aeth
, psn
,
1290 OP(RDMA_READ_RESPONSE_LAST
), 0);
1295 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1299 wc
.status
= IB_WC_LOC_QP_OP_ERR
;
1303 wc
.status
= IB_WC_LOC_LEN_ERR
;
1305 wc
.wr_id
= wqe
->wr
.wr_id
;
1306 wc
.opcode
= ib_ipath_wc_opcode
[wqe
->wr
.opcode
];
1311 wc
.src_qp
= qp
->remote_qpn
;
1314 wc
.slid
= qp
->remote_ah_attr
.dlid
;
1315 wc
.sl
= qp
->remote_ah_attr
.sl
;
1316 wc
.dlid_path_bits
= 0;
1318 ipath_sqerror_qp(qp
, &wc
);
1319 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1325 * ipath_rc_rcv_error - process an incoming duplicate or error RC packet
1326 * @dev: the device this packet came in on
1327 * @ohdr: the other headers for this packet
1328 * @data: the packet data
1329 * @qp: the QP for this packet
1330 * @opcode: the opcode for this packet
1331 * @psn: the packet sequence number for this packet
1332 * @diff: the difference between the PSN and the expected PSN
1333 * @header_in_data: true if part of the header data is in the data buffer
1335 * This is called from ipath_rc_rcv() to process an unexpected
1336 * incoming RC packet for the given QP.
1337 * Called at interrupt level.
1338 * Return 1 if no more processing is needed; otherwise return 0 to
1339 * schedule a response to be sent.
1341 static inline int ipath_rc_rcv_error(struct ipath_ibdev
*dev
,
1342 struct ipath_other_headers
*ohdr
,
1344 struct ipath_qp
*qp
,
1350 struct ipath_ack_entry
*e
;
1353 unsigned long flags
;
1357 * Packet sequence error.
1358 * A NAK will ACK earlier sends and RDMA writes.
1359 * Don't queue the NAK if we already sent one.
1361 if (!qp
->r_nak_state
) {
1362 qp
->r_nak_state
= IB_NAK_PSN_ERROR
;
1363 /* Use the expected PSN. */
1364 qp
->r_ack_psn
= qp
->r_psn
;
1371 * Handle a duplicate request. Don't re-execute SEND, RDMA
1372 * write or atomic op. Don't NAK errors, just silently drop
1373 * the duplicate request. Note that r_sge, r_len, and
1374 * r_rcv_len may be in use so don't modify them.
1376 * We are supposed to ACK the earliest duplicate PSN but we
1377 * can coalesce an outstanding duplicate ACK. We have to
1378 * send the earliest so that RDMA reads can be restarted at
1379 * the requester's expected PSN.
1381 * First, find where this duplicate PSN falls within the
1382 * ACKs previously sent.
1384 psn
&= IPATH_PSN_MASK
;
1387 spin_lock_irqsave(&qp
->s_lock
, flags
);
1388 for (i
= qp
->r_head_ack_queue
; ; i
= prev
) {
1389 if (i
== qp
->s_tail_ack_queue
)
1394 prev
= IPATH_MAX_RDMA_ATOMIC
;
1395 if (prev
== qp
->r_head_ack_queue
) {
1399 e
= &qp
->s_ack_queue
[prev
];
1404 if (ipath_cmp24(psn
, e
->psn
) >= 0) {
1405 if (prev
== qp
->s_tail_ack_queue
)
1411 case OP(RDMA_READ_REQUEST
): {
1412 struct ib_reth
*reth
;
1417 * If we didn't find the RDMA read request in the ack queue,
1418 * or the send tasklet is already backed up to send an
1419 * earlier entry, we can ignore this request.
1421 if (!e
|| e
->opcode
!= OP(RDMA_READ_REQUEST
) || old_req
)
1423 /* RETH comes after BTH */
1424 if (!header_in_data
)
1425 reth
= &ohdr
->u
.rc
.reth
;
1427 reth
= (struct ib_reth
*)data
;
1428 data
+= sizeof(*reth
);
1431 * Address range must be a subset of the original
1432 * request and start on pmtu boundaries.
1433 * We reuse the old ack_queue slot since the requester
1434 * should not back up and request an earlier PSN for the
1437 offset
= ((psn
- e
->psn
) & IPATH_PSN_MASK
) *
1438 ib_mtu_enum_to_int(qp
->path_mtu
);
1439 len
= be32_to_cpu(reth
->length
);
1440 if (unlikely(offset
+ len
> e
->rdma_sge
.sge
.sge_length
))
1443 u32 rkey
= be32_to_cpu(reth
->rkey
);
1444 u64 vaddr
= be64_to_cpu(reth
->vaddr
);
1447 ok
= ipath_rkey_ok(qp
, &e
->rdma_sge
,
1449 IB_ACCESS_REMOTE_READ
);
1453 e
->rdma_sge
.sg_list
= NULL
;
1454 e
->rdma_sge
.num_sge
= 0;
1455 e
->rdma_sge
.sge
.mr
= NULL
;
1456 e
->rdma_sge
.sge
.vaddr
= NULL
;
1457 e
->rdma_sge
.sge
.length
= 0;
1458 e
->rdma_sge
.sge
.sge_length
= 0;
1461 qp
->s_ack_state
= OP(ACKNOWLEDGE
);
1462 qp
->s_tail_ack_queue
= prev
;
1466 case OP(COMPARE_SWAP
):
1467 case OP(FETCH_ADD
): {
1469 * If we didn't find the atomic request in the ack queue
1470 * or the send tasklet is already backed up to send an
1471 * earlier entry, we can ignore this request.
1473 if (!e
|| e
->opcode
!= (u8
) opcode
|| old_req
)
1475 qp
->s_ack_state
= OP(ACKNOWLEDGE
);
1476 qp
->s_tail_ack_queue
= prev
;
1484 * Resend the most recent ACK if this request is
1485 * after all the previous RDMA reads and atomics.
1487 if (i
== qp
->r_head_ack_queue
) {
1488 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1489 qp
->r_nak_state
= 0;
1490 qp
->r_ack_psn
= qp
->r_psn
- 1;
1494 * Try to send a simple ACK to work around a Mellanox bug
1495 * which doesn't accept a RDMA read response or atomic
1496 * response as an ACK for earlier SENDs or RDMA writes.
1498 if (qp
->r_head_ack_queue
== qp
->s_tail_ack_queue
&&
1499 !(qp
->s_flags
& IPATH_S_ACK_PENDING
) &&
1500 qp
->s_ack_state
== OP(ACKNOWLEDGE
)) {
1501 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1502 qp
->r_nak_state
= 0;
1503 qp
->r_ack_psn
= qp
->s_ack_queue
[i
].psn
- 1;
1507 * Resend the RDMA read or atomic op which
1508 * ACKs this duplicate request.
1510 qp
->s_ack_state
= OP(ACKNOWLEDGE
);
1511 qp
->s_tail_ack_queue
= i
;
1514 qp
->r_nak_state
= 0;
1515 tasklet_hi_schedule(&qp
->s_task
);
1518 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1526 static void ipath_rc_error(struct ipath_qp
*qp
, enum ib_wc_status err
)
1528 unsigned long flags
;
1531 spin_lock_irqsave(&qp
->s_lock
, flags
);
1532 qp
->state
= IB_QPS_ERR
;
1533 lastwqe
= ipath_error_qp(qp
, err
);
1534 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1539 ev
.device
= qp
->ibqp
.device
;
1540 ev
.element
.qp
= &qp
->ibqp
;
1541 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
1542 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
1546 static inline void ipath_update_ack_queue(struct ipath_qp
*qp
, unsigned n
)
1548 unsigned long flags
;
1552 if (next
> IPATH_MAX_RDMA_ATOMIC
)
1554 spin_lock_irqsave(&qp
->s_lock
, flags
);
1555 if (n
== qp
->s_tail_ack_queue
) {
1556 qp
->s_tail_ack_queue
= next
;
1557 qp
->s_ack_state
= OP(ACKNOWLEDGE
);
1559 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1563 * ipath_rc_rcv - process an incoming RC packet
1564 * @dev: the device this packet came in on
1565 * @hdr: the header of this packet
1566 * @has_grh: true if the header has a GRH
1567 * @data: the packet data
1568 * @tlen: the packet length
1569 * @qp: the QP for this packet
1571 * This is called from ipath_qp_rcv() to process an incoming RC packet
1573 * Called at interrupt level.
1575 void ipath_rc_rcv(struct ipath_ibdev
*dev
, struct ipath_ib_header
*hdr
,
1576 int has_grh
, void *data
, u32 tlen
, struct ipath_qp
*qp
)
1578 struct ipath_other_headers
*ohdr
;
1584 u32 pmtu
= ib_mtu_enum_to_int(qp
->path_mtu
);
1586 struct ib_reth
*reth
;
1589 /* Validate the SLID. See Ch. 9.6.1.5 */
1590 if (unlikely(be16_to_cpu(hdr
->lrh
[3]) != qp
->remote_ah_attr
.dlid
))
1596 hdrsize
= 8 + 12; /* LRH + BTH */
1597 psn
= be32_to_cpu(ohdr
->bth
[2]);
1600 ohdr
= &hdr
->u
.l
.oth
;
1601 hdrsize
= 8 + 40 + 12; /* LRH + GRH + BTH */
1603 * The header with GRH is 60 bytes and the core driver sets
1604 * the eager header buffer size to 56 bytes so the last 4
1605 * bytes of the BTH header (PSN) is in the data buffer.
1607 header_in_data
= dev
->dd
->ipath_rcvhdrentsize
== 16;
1608 if (header_in_data
) {
1609 psn
= be32_to_cpu(((__be32
*) data
)[0]);
1610 data
+= sizeof(__be32
);
1612 psn
= be32_to_cpu(ohdr
->bth
[2]);
1616 * Process responses (ACKs) before anything else. Note that the
1617 * packet sequence number will be for something in the send work
1618 * queue rather than the expected receive packet sequence number.
1619 * In other words, this QP is the requester.
1621 opcode
= be32_to_cpu(ohdr
->bth
[0]) >> 24;
1622 if (opcode
>= OP(RDMA_READ_RESPONSE_FIRST
) &&
1623 opcode
<= OP(ATOMIC_ACKNOWLEDGE
)) {
1624 ipath_rc_rcv_resp(dev
, ohdr
, data
, tlen
, qp
, opcode
, psn
,
1625 hdrsize
, pmtu
, header_in_data
);
1629 /* Compute 24 bits worth of difference. */
1630 diff
= ipath_cmp24(psn
, qp
->r_psn
);
1631 if (unlikely(diff
)) {
1632 if (ipath_rc_rcv_error(dev
, ohdr
, data
, qp
, opcode
,
1633 psn
, diff
, header_in_data
))
1638 /* Check for opcode sequence errors. */
1639 switch (qp
->r_state
) {
1640 case OP(SEND_FIRST
):
1641 case OP(SEND_MIDDLE
):
1642 if (opcode
== OP(SEND_MIDDLE
) ||
1643 opcode
== OP(SEND_LAST
) ||
1644 opcode
== OP(SEND_LAST_WITH_IMMEDIATE
))
1647 ipath_rc_error(qp
, IB_WC_REM_INV_REQ_ERR
);
1648 qp
->r_nak_state
= IB_NAK_INVALID_REQUEST
;
1649 qp
->r_ack_psn
= qp
->r_psn
;
1652 case OP(RDMA_WRITE_FIRST
):
1653 case OP(RDMA_WRITE_MIDDLE
):
1654 if (opcode
== OP(RDMA_WRITE_MIDDLE
) ||
1655 opcode
== OP(RDMA_WRITE_LAST
) ||
1656 opcode
== OP(RDMA_WRITE_LAST_WITH_IMMEDIATE
))
1661 if (opcode
== OP(SEND_MIDDLE
) ||
1662 opcode
== OP(SEND_LAST
) ||
1663 opcode
== OP(SEND_LAST_WITH_IMMEDIATE
) ||
1664 opcode
== OP(RDMA_WRITE_MIDDLE
) ||
1665 opcode
== OP(RDMA_WRITE_LAST
) ||
1666 opcode
== OP(RDMA_WRITE_LAST_WITH_IMMEDIATE
))
1669 * Note that it is up to the requester to not send a new
1670 * RDMA read or atomic operation before receiving an ACK
1671 * for the previous operation.
1679 /* OK, process the packet. */
1681 case OP(SEND_FIRST
):
1682 if (!ipath_get_rwqe(qp
, 0)) {
1684 qp
->r_nak_state
= IB_RNR_NAK
| qp
->r_min_rnr_timer
;
1685 qp
->r_ack_psn
= qp
->r_psn
;
1690 case OP(SEND_MIDDLE
):
1691 case OP(RDMA_WRITE_MIDDLE
):
1693 /* Check for invalid length PMTU or posted rwqe len. */
1694 if (unlikely(tlen
!= (hdrsize
+ pmtu
+ 4)))
1696 qp
->r_rcv_len
+= pmtu
;
1697 if (unlikely(qp
->r_rcv_len
> qp
->r_len
))
1699 ipath_copy_sge(&qp
->r_sge
, data
, pmtu
);
1702 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE
):
1704 if (!ipath_get_rwqe(qp
, 1))
1709 case OP(SEND_ONLY_WITH_IMMEDIATE
):
1710 if (!ipath_get_rwqe(qp
, 0))
1713 if (opcode
== OP(SEND_ONLY
))
1716 case OP(SEND_LAST_WITH_IMMEDIATE
):
1718 if (header_in_data
) {
1719 wc
.imm_data
= *(__be32
*) data
;
1720 data
+= sizeof(__be32
);
1722 /* Immediate data comes after BTH */
1723 wc
.imm_data
= ohdr
->u
.imm_data
;
1726 wc
.wc_flags
= IB_WC_WITH_IMM
;
1729 case OP(RDMA_WRITE_LAST
):
1731 /* Get the number of bytes the message was padded by. */
1732 pad
= (be32_to_cpu(ohdr
->bth
[0]) >> 20) & 3;
1733 /* Check for invalid length. */
1734 /* XXX LAST len should be >= 1 */
1735 if (unlikely(tlen
< (hdrsize
+ pad
+ 4)))
1737 /* Don't count the CRC. */
1738 tlen
-= (hdrsize
+ pad
+ 4);
1739 wc
.byte_len
= tlen
+ qp
->r_rcv_len
;
1740 if (unlikely(wc
.byte_len
> qp
->r_len
))
1742 ipath_copy_sge(&qp
->r_sge
, data
, tlen
);
1744 if (!qp
->r_wrid_valid
)
1746 qp
->r_wrid_valid
= 0;
1747 wc
.wr_id
= qp
->r_wr_id
;
1748 wc
.status
= IB_WC_SUCCESS
;
1749 wc
.opcode
= IB_WC_RECV
;
1752 wc
.src_qp
= qp
->remote_qpn
;
1754 wc
.slid
= qp
->remote_ah_attr
.dlid
;
1755 wc
.sl
= qp
->remote_ah_attr
.sl
;
1756 wc
.dlid_path_bits
= 0;
1758 /* Signal completion event if the solicited bit is set. */
1759 ipath_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
,
1761 __constant_cpu_to_be32(1 << 23)) != 0);
1764 case OP(RDMA_WRITE_FIRST
):
1765 case OP(RDMA_WRITE_ONLY
):
1766 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE
):
1767 if (unlikely(!(qp
->qp_access_flags
&
1768 IB_ACCESS_REMOTE_WRITE
)))
1771 /* RETH comes after BTH */
1772 if (!header_in_data
)
1773 reth
= &ohdr
->u
.rc
.reth
;
1775 reth
= (struct ib_reth
*)data
;
1776 data
+= sizeof(*reth
);
1778 hdrsize
+= sizeof(*reth
);
1779 qp
->r_len
= be32_to_cpu(reth
->length
);
1781 if (qp
->r_len
!= 0) {
1782 u32 rkey
= be32_to_cpu(reth
->rkey
);
1783 u64 vaddr
= be64_to_cpu(reth
->vaddr
);
1786 /* Check rkey & NAK */
1787 ok
= ipath_rkey_ok(qp
, &qp
->r_sge
,
1788 qp
->r_len
, vaddr
, rkey
,
1789 IB_ACCESS_REMOTE_WRITE
);
1793 qp
->r_sge
.sg_list
= NULL
;
1794 qp
->r_sge
.sge
.mr
= NULL
;
1795 qp
->r_sge
.sge
.vaddr
= NULL
;
1796 qp
->r_sge
.sge
.length
= 0;
1797 qp
->r_sge
.sge
.sge_length
= 0;
1799 if (opcode
== OP(RDMA_WRITE_FIRST
))
1801 else if (opcode
== OP(RDMA_WRITE_ONLY
))
1803 if (!ipath_get_rwqe(qp
, 1))
1807 case OP(RDMA_READ_REQUEST
): {
1808 struct ipath_ack_entry
*e
;
1812 if (unlikely(!(qp
->qp_access_flags
&
1813 IB_ACCESS_REMOTE_READ
)))
1815 next
= qp
->r_head_ack_queue
+ 1;
1816 if (next
> IPATH_MAX_RDMA_ATOMIC
)
1818 if (unlikely(next
== qp
->s_tail_ack_queue
)) {
1819 if (!qp
->s_ack_queue
[next
].sent
)
1821 ipath_update_ack_queue(qp
, next
);
1823 e
= &qp
->s_ack_queue
[qp
->r_head_ack_queue
];
1824 /* RETH comes after BTH */
1825 if (!header_in_data
)
1826 reth
= &ohdr
->u
.rc
.reth
;
1828 reth
= (struct ib_reth
*)data
;
1829 data
+= sizeof(*reth
);
1831 len
= be32_to_cpu(reth
->length
);
1833 u32 rkey
= be32_to_cpu(reth
->rkey
);
1834 u64 vaddr
= be64_to_cpu(reth
->vaddr
);
1837 /* Check rkey & NAK */
1838 ok
= ipath_rkey_ok(qp
, &e
->rdma_sge
, len
, vaddr
,
1839 rkey
, IB_ACCESS_REMOTE_READ
);
1843 * Update the next expected PSN. We add 1 later
1844 * below, so only add the remainder here.
1847 qp
->r_psn
+= (len
- 1) / pmtu
;
1849 e
->rdma_sge
.sg_list
= NULL
;
1850 e
->rdma_sge
.num_sge
= 0;
1851 e
->rdma_sge
.sge
.mr
= NULL
;
1852 e
->rdma_sge
.sge
.vaddr
= NULL
;
1853 e
->rdma_sge
.sge
.length
= 0;
1854 e
->rdma_sge
.sge
.sge_length
= 0;
1860 * We need to increment the MSN here instead of when we
1861 * finish sending the result since a duplicate request would
1862 * increment it more than once.
1866 qp
->r_state
= opcode
;
1867 qp
->r_nak_state
= 0;
1869 qp
->r_head_ack_queue
= next
;
1871 /* Call ipath_do_rc_send() in another thread. */
1872 tasklet_hi_schedule(&qp
->s_task
);
1877 case OP(COMPARE_SWAP
):
1878 case OP(FETCH_ADD
): {
1879 struct ib_atomic_eth
*ateth
;
1880 struct ipath_ack_entry
*e
;
1887 if (unlikely(!(qp
->qp_access_flags
&
1888 IB_ACCESS_REMOTE_ATOMIC
)))
1890 next
= qp
->r_head_ack_queue
+ 1;
1891 if (next
> IPATH_MAX_RDMA_ATOMIC
)
1893 if (unlikely(next
== qp
->s_tail_ack_queue
)) {
1894 if (!qp
->s_ack_queue
[next
].sent
)
1896 ipath_update_ack_queue(qp
, next
);
1898 if (!header_in_data
)
1899 ateth
= &ohdr
->u
.atomic_eth
;
1901 ateth
= (struct ib_atomic_eth
*)data
;
1902 vaddr
= ((u64
) be32_to_cpu(ateth
->vaddr
[0]) << 32) |
1903 be32_to_cpu(ateth
->vaddr
[1]);
1904 if (unlikely(vaddr
& (sizeof(u64
) - 1)))
1906 rkey
= be32_to_cpu(ateth
->rkey
);
1907 /* Check rkey & NAK */
1908 if (unlikely(!ipath_rkey_ok(qp
, &qp
->r_sge
,
1909 sizeof(u64
), vaddr
, rkey
,
1910 IB_ACCESS_REMOTE_ATOMIC
)))
1912 /* Perform atomic OP and save result. */
1913 maddr
= (atomic64_t
*) qp
->r_sge
.sge
.vaddr
;
1914 sdata
= be64_to_cpu(ateth
->swap_data
);
1915 e
= &qp
->s_ack_queue
[qp
->r_head_ack_queue
];
1916 e
->atomic_data
= (opcode
== OP(FETCH_ADD
)) ?
1917 (u64
) atomic64_add_return(sdata
, maddr
) - sdata
:
1918 (u64
) cmpxchg((u64
*) qp
->r_sge
.sge
.vaddr
,
1919 be64_to_cpu(ateth
->compare_data
),
1923 e
->psn
= psn
& IPATH_PSN_MASK
;
1926 qp
->r_state
= opcode
;
1927 qp
->r_nak_state
= 0;
1929 qp
->r_head_ack_queue
= next
;
1931 /* Call ipath_do_rc_send() in another thread. */
1932 tasklet_hi_schedule(&qp
->s_task
);
1938 /* NAK unknown opcodes. */
1942 qp
->r_state
= opcode
;
1943 qp
->r_ack_psn
= psn
;
1944 qp
->r_nak_state
= 0;
1945 /* Send an ACK if requested or required. */
1946 if (psn
& (1 << 31))
1951 ipath_rc_error(qp
, IB_WC_REM_ACCESS_ERR
);
1952 qp
->r_nak_state
= IB_NAK_REMOTE_ACCESS_ERROR
;
1953 qp
->r_ack_psn
= qp
->r_psn
;