1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
3 * Copyright(c) 2018 Intel Corporation.
16 * DOC: TID RDMA READ protocol
18 * This is an end-to-end protocol at the hfi1 level between two nodes that
19 * improves performance by avoiding data copy on the requester side. It
20 * converts a qualified RDMA READ request into a TID RDMA READ request on
21 * the requester side and thereafter handles the request and response
22 * differently. To be qualified, the RDMA READ request should meet the
24 * -- The total data length should be greater than 256K;
25 * -- The total data length should be a multiple of 4K page size;
26 * -- Each local scatter-gather entry should be 4K page aligned;
27 * -- Each local scatter-gather entry should be a multiple of 4K page size;
30 #define RCV_TID_FLOW_TABLE_CTRL_FLOW_VALID_SMASK BIT_ULL(32)
31 #define RCV_TID_FLOW_TABLE_CTRL_HDR_SUPP_EN_SMASK BIT_ULL(33)
32 #define RCV_TID_FLOW_TABLE_CTRL_KEEP_AFTER_SEQ_ERR_SMASK BIT_ULL(34)
33 #define RCV_TID_FLOW_TABLE_CTRL_KEEP_ON_GEN_ERR_SMASK BIT_ULL(35)
34 #define RCV_TID_FLOW_TABLE_STATUS_SEQ_MISMATCH_SMASK BIT_ULL(37)
35 #define RCV_TID_FLOW_TABLE_STATUS_GEN_MISMATCH_SMASK BIT_ULL(38)
37 /* Maximum number of packets within a flow generation. */
38 #define MAX_TID_FLOW_PSN BIT(HFI1_KDETH_BTH_SEQ_SHIFT)
40 #define GENERATION_MASK 0xFFFFF
42 static u32
mask_generation(u32 a
)
44 return a
& GENERATION_MASK
;
47 /* Reserved generation value to set to unused flows for kernel contexts */
48 #define KERN_GENERATION_RESERVED mask_generation(U32_MAX)
51 * J_KEY for kernel contexts when TID RDMA is used.
52 * See generate_jkey() in hfi.h for more information.
54 #define TID_RDMA_JKEY 32
55 #define HFI1_KERNEL_MIN_JKEY HFI1_ADMIN_JKEY_RANGE
56 #define HFI1_KERNEL_MAX_JKEY (2 * HFI1_ADMIN_JKEY_RANGE - 1)
58 /* Maximum number of segments in flight per QP request. */
59 #define TID_RDMA_MAX_READ_SEGS_PER_REQ 6
60 #define TID_RDMA_MAX_WRITE_SEGS_PER_REQ 4
61 #define MAX_REQ max_t(u16, TID_RDMA_MAX_READ_SEGS_PER_REQ, \
62 TID_RDMA_MAX_WRITE_SEGS_PER_REQ)
63 #define MAX_FLOWS roundup_pow_of_two(MAX_REQ + 1)
65 #define MAX_EXPECTED_PAGES (MAX_EXPECTED_BUFFER / PAGE_SIZE)
67 #define TID_RDMA_DESTQP_FLOW_SHIFT 11
68 #define TID_RDMA_DESTQP_FLOW_MASK 0x1f
70 #define TID_OPFN_QP_CTXT_MASK 0xff
71 #define TID_OPFN_QP_CTXT_SHIFT 56
72 #define TID_OPFN_QP_KDETH_MASK 0xff
73 #define TID_OPFN_QP_KDETH_SHIFT 48
74 #define TID_OPFN_MAX_LEN_MASK 0x7ff
75 #define TID_OPFN_MAX_LEN_SHIFT 37
76 #define TID_OPFN_TIMEOUT_MASK 0x1f
77 #define TID_OPFN_TIMEOUT_SHIFT 32
78 #define TID_OPFN_RESERVED_MASK 0x3f
79 #define TID_OPFN_RESERVED_SHIFT 26
80 #define TID_OPFN_URG_MASK 0x1
81 #define TID_OPFN_URG_SHIFT 25
82 #define TID_OPFN_VER_MASK 0x7
83 #define TID_OPFN_VER_SHIFT 22
84 #define TID_OPFN_JKEY_MASK 0x3f
85 #define TID_OPFN_JKEY_SHIFT 16
86 #define TID_OPFN_MAX_READ_MASK 0x3f
87 #define TID_OPFN_MAX_READ_SHIFT 10
88 #define TID_OPFN_MAX_WRITE_MASK 0x3f
89 #define TID_OPFN_MAX_WRITE_SHIFT 4
95 * NNNNNNNNKKKKKKKK MMMMMMMMMMMTTTTT DDDDDDUVVVJJJJJJ RRRRRRWWWWWWCCCC
96 * 3210987654321098 7654321098765432 1098765432109876 5432109876543210
97 * N - the context Number
110 static u32 tid_rdma_flow_wt
;
112 static void tid_rdma_trigger_resume(struct work_struct
*work
);
113 static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request
*req
);
114 static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request
*req
,
116 static void hfi1_init_trdma_req(struct rvt_qp
*qp
,
117 struct tid_rdma_request
*req
);
118 static void hfi1_tid_write_alloc_resources(struct rvt_qp
*qp
, bool intr_ctx
);
119 static void hfi1_tid_timeout(struct timer_list
*t
);
120 static void hfi1_add_tid_reap_timer(struct rvt_qp
*qp
);
121 static void hfi1_mod_tid_reap_timer(struct rvt_qp
*qp
);
122 static void hfi1_mod_tid_retry_timer(struct rvt_qp
*qp
);
123 static int hfi1_stop_tid_retry_timer(struct rvt_qp
*qp
);
124 static void hfi1_tid_retry_timeout(struct timer_list
*t
);
125 static int make_tid_rdma_ack(struct rvt_qp
*qp
,
126 struct ib_other_headers
*ohdr
,
127 struct hfi1_pkt_state
*ps
);
128 static void hfi1_do_tid_send(struct rvt_qp
*qp
);
129 static u32
read_r_next_psn(struct hfi1_devdata
*dd
, u8 ctxt
, u8 fidx
);
130 static void tid_rdma_rcv_err(struct hfi1_packet
*packet
,
131 struct ib_other_headers
*ohdr
,
132 struct rvt_qp
*qp
, u32 psn
, int diff
, bool fecn
);
133 static void update_r_next_psn_fecn(struct hfi1_packet
*packet
,
134 struct hfi1_qp_priv
*priv
,
135 struct hfi1_ctxtdata
*rcd
,
136 struct tid_rdma_flow
*flow
,
139 static u64
tid_rdma_opfn_encode(struct tid_rdma_params
*p
)
142 (((u64
)p
->qp
& TID_OPFN_QP_CTXT_MASK
) <<
143 TID_OPFN_QP_CTXT_SHIFT
) |
144 ((((u64
)p
->qp
>> 16) & TID_OPFN_QP_KDETH_MASK
) <<
145 TID_OPFN_QP_KDETH_SHIFT
) |
146 (((u64
)((p
->max_len
>> PAGE_SHIFT
) - 1) &
147 TID_OPFN_MAX_LEN_MASK
) << TID_OPFN_MAX_LEN_SHIFT
) |
148 (((u64
)p
->timeout
& TID_OPFN_TIMEOUT_MASK
) <<
149 TID_OPFN_TIMEOUT_SHIFT
) |
150 (((u64
)p
->urg
& TID_OPFN_URG_MASK
) << TID_OPFN_URG_SHIFT
) |
151 (((u64
)p
->jkey
& TID_OPFN_JKEY_MASK
) << TID_OPFN_JKEY_SHIFT
) |
152 (((u64
)p
->max_read
& TID_OPFN_MAX_READ_MASK
) <<
153 TID_OPFN_MAX_READ_SHIFT
) |
154 (((u64
)p
->max_write
& TID_OPFN_MAX_WRITE_MASK
) <<
155 TID_OPFN_MAX_WRITE_SHIFT
);
158 static void tid_rdma_opfn_decode(struct tid_rdma_params
*p
, u64 data
)
160 p
->max_len
= (((data
>> TID_OPFN_MAX_LEN_SHIFT
) &
161 TID_OPFN_MAX_LEN_MASK
) + 1) << PAGE_SHIFT
;
162 p
->jkey
= (data
>> TID_OPFN_JKEY_SHIFT
) & TID_OPFN_JKEY_MASK
;
163 p
->max_write
= (data
>> TID_OPFN_MAX_WRITE_SHIFT
) &
164 TID_OPFN_MAX_WRITE_MASK
;
165 p
->max_read
= (data
>> TID_OPFN_MAX_READ_SHIFT
) &
166 TID_OPFN_MAX_READ_MASK
;
168 ((((data
>> TID_OPFN_QP_KDETH_SHIFT
) & TID_OPFN_QP_KDETH_MASK
)
170 ((data
>> TID_OPFN_QP_CTXT_SHIFT
) & TID_OPFN_QP_CTXT_MASK
));
171 p
->urg
= (data
>> TID_OPFN_URG_SHIFT
) & TID_OPFN_URG_MASK
;
172 p
->timeout
= (data
>> TID_OPFN_TIMEOUT_SHIFT
) & TID_OPFN_TIMEOUT_MASK
;
175 void tid_rdma_opfn_init(struct rvt_qp
*qp
, struct tid_rdma_params
*p
)
177 struct hfi1_qp_priv
*priv
= qp
->priv
;
179 p
->qp
= (kdeth_qp
<< 16) | priv
->rcd
->ctxt
;
180 p
->max_len
= TID_RDMA_MAX_SEGMENT_SIZE
;
181 p
->jkey
= priv
->rcd
->jkey
;
182 p
->max_read
= TID_RDMA_MAX_READ_SEGS_PER_REQ
;
183 p
->max_write
= TID_RDMA_MAX_WRITE_SEGS_PER_REQ
;
184 p
->timeout
= qp
->timeout
;
185 p
->urg
= is_urg_masked(priv
->rcd
);
188 bool tid_rdma_conn_req(struct rvt_qp
*qp
, u64
*data
)
190 struct hfi1_qp_priv
*priv
= qp
->priv
;
192 *data
= tid_rdma_opfn_encode(&priv
->tid_rdma
.local
);
196 bool tid_rdma_conn_reply(struct rvt_qp
*qp
, u64 data
)
198 struct hfi1_qp_priv
*priv
= qp
->priv
;
199 struct tid_rdma_params
*remote
, *old
;
202 old
= rcu_dereference_protected(priv
->tid_rdma
.remote
,
203 lockdep_is_held(&priv
->opfn
.lock
));
206 * If data passed in is zero, return true so as not to continue the
207 * negotiation process
209 if (!data
|| !HFI1_CAP_IS_KSET(TID_RDMA
))
212 * If kzalloc fails, return false. This will result in:
213 * * at the requester a new OPFN request being generated to retry
215 * * at the responder, 0 being returned to the requester so as to
216 * disable TID RDMA at both the requester and the responder
218 remote
= kzalloc(sizeof(*remote
), GFP_ATOMIC
);
224 tid_rdma_opfn_decode(remote
, data
);
225 priv
->tid_timer_timeout_jiffies
=
226 usecs_to_jiffies((((4096UL * (1UL << remote
->timeout
)) /
228 trace_hfi1_opfn_param(qp
, 0, &priv
->tid_rdma
.local
);
229 trace_hfi1_opfn_param(qp
, 1, remote
);
230 rcu_assign_pointer(priv
->tid_rdma
.remote
, remote
);
232 * A TID RDMA READ request's segment size is not equal to
233 * remote->max_len only when the request's data length is smaller
234 * than remote->max_len. In that case, there will be only one segment.
235 * Therefore, when priv->pkts_ps is used to calculate req->cur_seg
236 * during retry, it will lead to req->cur_seg = 0, which is exactly
239 priv
->pkts_ps
= (u16
)rvt_div_mtu(qp
, remote
->max_len
);
240 priv
->timeout_shift
= ilog2(priv
->pkts_ps
- 1) + 1;
243 RCU_INIT_POINTER(priv
->tid_rdma
.remote
, NULL
);
244 priv
->timeout_shift
= 0;
247 kfree_rcu(old
, rcu_head
);
251 bool tid_rdma_conn_resp(struct rvt_qp
*qp
, u64
*data
)
255 ret
= tid_rdma_conn_reply(qp
, *data
);
258 * If tid_rdma_conn_reply() returns error, set *data as 0 to indicate
259 * TID RDMA could not be enabled. This will result in TID RDMA being
260 * disabled at the requester too.
263 (void)tid_rdma_conn_req(qp
, data
);
267 void tid_rdma_conn_error(struct rvt_qp
*qp
)
269 struct hfi1_qp_priv
*priv
= qp
->priv
;
270 struct tid_rdma_params
*old
;
272 old
= rcu_dereference_protected(priv
->tid_rdma
.remote
,
273 lockdep_is_held(&priv
->opfn
.lock
));
274 RCU_INIT_POINTER(priv
->tid_rdma
.remote
, NULL
);
276 kfree_rcu(old
, rcu_head
);
279 /* This is called at context initialization time */
280 int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata
*rcd
, int reinit
)
285 BUILD_BUG_ON(TID_RDMA_JKEY
< HFI1_KERNEL_MIN_JKEY
);
286 BUILD_BUG_ON(TID_RDMA_JKEY
> HFI1_KERNEL_MAX_JKEY
);
287 rcd
->jkey
= TID_RDMA_JKEY
;
288 hfi1_set_ctxt_jkey(rcd
->dd
, rcd
, rcd
->jkey
);
289 return hfi1_alloc_ctxt_rcv_groups(rcd
);
293 * qp_to_rcd - determine the receive context used by a qp
296 * This routine returns the receive context associated
299 * Returns the context.
301 static struct hfi1_ctxtdata
*qp_to_rcd(struct rvt_dev_info
*rdi
,
304 struct hfi1_ibdev
*verbs_dev
= container_of(rdi
,
307 struct hfi1_devdata
*dd
= container_of(verbs_dev
,
312 if (qp
->ibqp
.qp_num
== 0)
315 ctxt
= hfi1_get_qp_map(dd
, qp
->ibqp
.qp_num
>> dd
->qos_shift
);
316 return dd
->rcd
[ctxt
];
319 int hfi1_qp_priv_init(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
,
320 struct ib_qp_init_attr
*init_attr
)
322 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
325 qpriv
->rcd
= qp_to_rcd(rdi
, qp
);
327 spin_lock_init(&qpriv
->opfn
.lock
);
328 INIT_WORK(&qpriv
->opfn
.opfn_work
, opfn_send_conn_request
);
329 INIT_WORK(&qpriv
->tid_rdma
.trigger_work
, tid_rdma_trigger_resume
);
330 qpriv
->flow_state
.psn
= 0;
331 qpriv
->flow_state
.index
= RXE_NUM_TID_FLOWS
;
332 qpriv
->flow_state
.last_index
= RXE_NUM_TID_FLOWS
;
333 qpriv
->flow_state
.generation
= KERN_GENERATION_RESERVED
;
334 qpriv
->s_state
= TID_OP(WRITE_RESP
);
335 qpriv
->s_tid_cur
= HFI1_QP_WQE_INVALID
;
336 qpriv
->s_tid_head
= HFI1_QP_WQE_INVALID
;
337 qpriv
->s_tid_tail
= HFI1_QP_WQE_INVALID
;
338 qpriv
->rnr_nak_state
= TID_RNR_NAK_INIT
;
339 qpriv
->r_tid_head
= HFI1_QP_WQE_INVALID
;
340 qpriv
->r_tid_tail
= HFI1_QP_WQE_INVALID
;
341 qpriv
->r_tid_ack
= HFI1_QP_WQE_INVALID
;
342 qpriv
->r_tid_alloc
= HFI1_QP_WQE_INVALID
;
343 atomic_set(&qpriv
->n_requests
, 0);
344 atomic_set(&qpriv
->n_tid_requests
, 0);
345 timer_setup(&qpriv
->s_tid_timer
, hfi1_tid_timeout
, 0);
346 timer_setup(&qpriv
->s_tid_retry_timer
, hfi1_tid_retry_timeout
, 0);
347 INIT_LIST_HEAD(&qpriv
->tid_wait
);
349 if (init_attr
->qp_type
== IB_QPT_RC
&& HFI1_CAP_IS_KSET(TID_RDMA
)) {
350 struct hfi1_devdata
*dd
= qpriv
->rcd
->dd
;
352 qpriv
->pages
= kzalloc_node(TID_RDMA_MAX_PAGES
*
353 sizeof(*qpriv
->pages
),
354 GFP_KERNEL
, dd
->node
);
357 for (i
= 0; i
< qp
->s_size
; i
++) {
358 struct hfi1_swqe_priv
*priv
;
359 struct rvt_swqe
*wqe
= rvt_get_swqe_ptr(qp
, i
);
361 priv
= kzalloc_node(sizeof(*priv
), GFP_KERNEL
,
366 hfi1_init_trdma_req(qp
, &priv
->tid_req
);
367 priv
->tid_req
.e
.swqe
= wqe
;
370 for (i
= 0; i
< rvt_max_atomic(rdi
); i
++) {
371 struct hfi1_ack_priv
*priv
;
373 priv
= kzalloc_node(sizeof(*priv
), GFP_KERNEL
,
378 hfi1_init_trdma_req(qp
, &priv
->tid_req
);
379 priv
->tid_req
.e
.ack
= &qp
->s_ack_queue
[i
];
381 ret
= hfi1_kern_exp_rcv_alloc_flows(&priv
->tid_req
,
387 qp
->s_ack_queue
[i
].priv
= priv
;
394 void hfi1_qp_priv_tid_free(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
)
396 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
397 struct rvt_swqe
*wqe
;
400 if (qp
->ibqp
.qp_type
== IB_QPT_RC
&& HFI1_CAP_IS_KSET(TID_RDMA
)) {
401 for (i
= 0; i
< qp
->s_size
; i
++) {
402 wqe
= rvt_get_swqe_ptr(qp
, i
);
406 for (i
= 0; i
< rvt_max_atomic(rdi
); i
++) {
407 struct hfi1_ack_priv
*priv
= qp
->s_ack_queue
[i
].priv
;
410 hfi1_kern_exp_rcv_free_flows(&priv
->tid_req
);
412 qp
->s_ack_queue
[i
].priv
= NULL
;
414 cancel_work_sync(&qpriv
->opfn
.opfn_work
);
420 /* Flow and tid waiter functions */
424 * There are two locks involved with the queuing
425 * routines: the qp s_lock and the exp_lock.
427 * Since the tid space allocation is called from
428 * the send engine, the qp s_lock is already held.
430 * The allocation routines will get the exp_lock.
432 * The first_qp() call is provided to allow the head of
433 * the rcd wait queue to be fetched under the exp_lock and
434 * followed by a drop of the exp_lock.
436 * Any qp in the wait list will have the qp reference count held
437 * to hold the qp in memory.
441 * return head of rcd wait list
443 * Must hold the exp_lock.
445 * Get a reference to the QP to hold the QP in memory.
447 * The caller must release the reference when the local
448 * is no longer being used.
450 static struct rvt_qp
*first_qp(struct hfi1_ctxtdata
*rcd
,
451 struct tid_queue
*queue
)
452 __must_hold(&rcd
->exp_lock
)
454 struct hfi1_qp_priv
*priv
;
456 lockdep_assert_held(&rcd
->exp_lock
);
457 priv
= list_first_entry_or_null(&queue
->queue_head
,
462 rvt_get_qp(priv
->owner
);
467 * kernel_tid_waiters - determine rcd wait
468 * @rcd: the receive context
469 * @qp: the head of the qp being processed
471 * This routine will return false IFF
472 * the list is NULL or the head of the
473 * list is the indicated qp.
475 * Must hold the qp s_lock and the exp_lock.
478 * false if either of the conditions below are satisfied:
479 * 1. The list is empty or
480 * 2. The indicated qp is at the head of the list and the
481 * HFI1_S_WAIT_TID_SPACE bit is set in qp->s_flags.
482 * true is returned otherwise.
484 static bool kernel_tid_waiters(struct hfi1_ctxtdata
*rcd
,
485 struct tid_queue
*queue
, struct rvt_qp
*qp
)
486 __must_hold(&rcd
->exp_lock
) __must_hold(&qp
->s_lock
)
491 lockdep_assert_held(&qp
->s_lock
);
492 lockdep_assert_held(&rcd
->exp_lock
);
493 fqp
= first_qp(rcd
, queue
);
494 if (!fqp
|| (fqp
== qp
&& (qp
->s_flags
& HFI1_S_WAIT_TID_SPACE
)))
501 * dequeue_tid_waiter - dequeue the qp from the list
502 * @qp - the qp to remove the wait list
504 * This routine removes the indicated qp from the
505 * wait list if it is there.
507 * This should be done after the hardware flow and
508 * tid array resources have been allocated.
510 * Must hold the qp s_lock and the rcd exp_lock.
512 * It assumes the s_lock to protect the s_flags
513 * field and to reliably test the HFI1_S_WAIT_TID_SPACE flag.
515 static void dequeue_tid_waiter(struct hfi1_ctxtdata
*rcd
,
516 struct tid_queue
*queue
, struct rvt_qp
*qp
)
517 __must_hold(&rcd
->exp_lock
) __must_hold(&qp
->s_lock
)
519 struct hfi1_qp_priv
*priv
= qp
->priv
;
521 lockdep_assert_held(&qp
->s_lock
);
522 lockdep_assert_held(&rcd
->exp_lock
);
523 if (list_empty(&priv
->tid_wait
))
525 list_del_init(&priv
->tid_wait
);
526 qp
->s_flags
&= ~HFI1_S_WAIT_TID_SPACE
;
532 * queue_qp_for_tid_wait - suspend QP on tid space
533 * @rcd: the receive context
536 * The qp is inserted at the tail of the rcd
537 * wait queue and the HFI1_S_WAIT_TID_SPACE s_flag is set.
539 * Must hold the qp s_lock and the exp_lock.
541 static void queue_qp_for_tid_wait(struct hfi1_ctxtdata
*rcd
,
542 struct tid_queue
*queue
, struct rvt_qp
*qp
)
543 __must_hold(&rcd
->exp_lock
) __must_hold(&qp
->s_lock
)
545 struct hfi1_qp_priv
*priv
= qp
->priv
;
547 lockdep_assert_held(&qp
->s_lock
);
548 lockdep_assert_held(&rcd
->exp_lock
);
549 if (list_empty(&priv
->tid_wait
)) {
550 qp
->s_flags
|= HFI1_S_WAIT_TID_SPACE
;
551 list_add_tail(&priv
->tid_wait
, &queue
->queue_head
);
552 priv
->tid_enqueue
= ++queue
->enqueue
;
553 rcd
->dd
->verbs_dev
.n_tidwait
++;
554 trace_hfi1_qpsleep(qp
, HFI1_S_WAIT_TID_SPACE
);
560 * __trigger_tid_waiter - trigger tid waiter
563 * This is a private entrance to schedule the qp
564 * assuming the caller is holding the qp->s_lock.
566 static void __trigger_tid_waiter(struct rvt_qp
*qp
)
567 __must_hold(&qp
->s_lock
)
569 lockdep_assert_held(&qp
->s_lock
);
570 if (!(qp
->s_flags
& HFI1_S_WAIT_TID_SPACE
))
572 trace_hfi1_qpwakeup(qp
, HFI1_S_WAIT_TID_SPACE
);
573 hfi1_schedule_send(qp
);
577 * tid_rdma_schedule_tid_wakeup - schedule wakeup for a qp
580 * trigger a schedule or a waiting qp in a deadlock
581 * safe manner. The qp reference is held prior
582 * to this call via first_qp().
584 * If the qp trigger was already scheduled (!rval)
585 * the the reference is dropped, otherwise the resume
586 * or the destroy cancel will dispatch the reference.
588 static void tid_rdma_schedule_tid_wakeup(struct rvt_qp
*qp
)
590 struct hfi1_qp_priv
*priv
;
591 struct hfi1_ibport
*ibp
;
592 struct hfi1_pportdata
*ppd
;
593 struct hfi1_devdata
*dd
;
600 ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
601 ppd
= ppd_from_ibp(ibp
);
602 dd
= dd_from_ibdev(qp
->ibqp
.device
);
604 rval
= queue_work_on(priv
->s_sde
?
606 cpumask_first(cpumask_of_node(dd
->node
)),
608 &priv
->tid_rdma
.trigger_work
);
614 * tid_rdma_trigger_resume - field a trigger work request
615 * @work - the work item
617 * Complete the off qp trigger processing by directly
618 * calling the progress routine.
620 static void tid_rdma_trigger_resume(struct work_struct
*work
)
622 struct tid_rdma_qp_params
*tr
;
623 struct hfi1_qp_priv
*priv
;
626 tr
= container_of(work
, struct tid_rdma_qp_params
, trigger_work
);
627 priv
= container_of(tr
, struct hfi1_qp_priv
, tid_rdma
);
629 spin_lock_irq(&qp
->s_lock
);
630 if (qp
->s_flags
& HFI1_S_WAIT_TID_SPACE
) {
631 spin_unlock_irq(&qp
->s_lock
);
632 hfi1_do_send(priv
->owner
, true);
634 spin_unlock_irq(&qp
->s_lock
);
640 * tid_rdma_flush_wait - unwind any tid space wait
642 * This is called when resetting a qp to
643 * allow a destroy or reset to get rid
644 * of any tid space linkage and reference counts.
646 static void _tid_rdma_flush_wait(struct rvt_qp
*qp
, struct tid_queue
*queue
)
647 __must_hold(&qp
->s_lock
)
649 struct hfi1_qp_priv
*priv
;
653 lockdep_assert_held(&qp
->s_lock
);
655 qp
->s_flags
&= ~HFI1_S_WAIT_TID_SPACE
;
656 spin_lock(&priv
->rcd
->exp_lock
);
657 if (!list_empty(&priv
->tid_wait
)) {
658 list_del_init(&priv
->tid_wait
);
659 qp
->s_flags
&= ~HFI1_S_WAIT_TID_SPACE
;
663 spin_unlock(&priv
->rcd
->exp_lock
);
666 void hfi1_tid_rdma_flush_wait(struct rvt_qp
*qp
)
667 __must_hold(&qp
->s_lock
)
669 struct hfi1_qp_priv
*priv
= qp
->priv
;
671 _tid_rdma_flush_wait(qp
, &priv
->rcd
->flow_queue
);
672 _tid_rdma_flush_wait(qp
, &priv
->rcd
->rarr_queue
);
677 * kern_reserve_flow - allocate a hardware flow
678 * @rcd - the context to use for allocation
679 * @last - the index of the preferred flow. Use RXE_NUM_TID_FLOWS to
680 * signify "don't care".
682 * Use a bit mask based allocation to reserve a hardware
683 * flow for use in receiving KDETH data packets. If a preferred flow is
684 * specified the function will attempt to reserve that flow again, if
687 * The exp_lock must be held.
690 * On success: a value postive value between 0 and RXE_NUM_TID_FLOWS - 1
691 * On failure: -EAGAIN
693 static int kern_reserve_flow(struct hfi1_ctxtdata
*rcd
, int last
)
694 __must_hold(&rcd
->exp_lock
)
698 /* Attempt to reserve the preferred flow index */
699 if (last
>= 0 && last
< RXE_NUM_TID_FLOWS
&&
700 !test_and_set_bit(last
, &rcd
->flow_mask
))
703 nr
= ffz(rcd
->flow_mask
);
704 BUILD_BUG_ON(RXE_NUM_TID_FLOWS
>=
705 (sizeof(rcd
->flow_mask
) * BITS_PER_BYTE
));
706 if (nr
> (RXE_NUM_TID_FLOWS
- 1))
708 set_bit(nr
, &rcd
->flow_mask
);
712 static void kern_set_hw_flow(struct hfi1_ctxtdata
*rcd
, u32 generation
,
717 reg
= ((u64
)generation
<< HFI1_KDETH_BTH_SEQ_SHIFT
) |
718 RCV_TID_FLOW_TABLE_CTRL_FLOW_VALID_SMASK
|
719 RCV_TID_FLOW_TABLE_CTRL_KEEP_AFTER_SEQ_ERR_SMASK
|
720 RCV_TID_FLOW_TABLE_CTRL_KEEP_ON_GEN_ERR_SMASK
|
721 RCV_TID_FLOW_TABLE_STATUS_SEQ_MISMATCH_SMASK
|
722 RCV_TID_FLOW_TABLE_STATUS_GEN_MISMATCH_SMASK
;
724 if (generation
!= KERN_GENERATION_RESERVED
)
725 reg
|= RCV_TID_FLOW_TABLE_CTRL_HDR_SUPP_EN_SMASK
;
727 write_uctxt_csr(rcd
->dd
, rcd
->ctxt
,
728 RCV_TID_FLOW_TABLE
+ 8 * flow_idx
, reg
);
731 static u32
kern_setup_hw_flow(struct hfi1_ctxtdata
*rcd
, u32 flow_idx
)
732 __must_hold(&rcd
->exp_lock
)
734 u32 generation
= rcd
->flows
[flow_idx
].generation
;
736 kern_set_hw_flow(rcd
, generation
, flow_idx
);
740 static u32
kern_flow_generation_next(u32 gen
)
742 u32 generation
= mask_generation(gen
+ 1);
744 if (generation
== KERN_GENERATION_RESERVED
)
745 generation
= mask_generation(generation
+ 1);
749 static void kern_clear_hw_flow(struct hfi1_ctxtdata
*rcd
, u32 flow_idx
)
750 __must_hold(&rcd
->exp_lock
)
752 rcd
->flows
[flow_idx
].generation
=
753 kern_flow_generation_next(rcd
->flows
[flow_idx
].generation
);
754 kern_set_hw_flow(rcd
, KERN_GENERATION_RESERVED
, flow_idx
);
757 int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata
*rcd
, struct rvt_qp
*qp
)
759 struct hfi1_qp_priv
*qpriv
= (struct hfi1_qp_priv
*)qp
->priv
;
760 struct tid_flow_state
*fs
= &qpriv
->flow_state
;
765 /* The QP already has an allocated flow */
766 if (fs
->index
!= RXE_NUM_TID_FLOWS
)
769 spin_lock_irqsave(&rcd
->exp_lock
, flags
);
770 if (kernel_tid_waiters(rcd
, &rcd
->flow_queue
, qp
))
773 ret
= kern_reserve_flow(rcd
, fs
->last_index
);
777 fs
->last_index
= fs
->index
;
779 /* Generation received in a RESYNC overrides default flow generation */
780 if (fs
->generation
!= KERN_GENERATION_RESERVED
)
781 rcd
->flows
[fs
->index
].generation
= fs
->generation
;
782 fs
->generation
= kern_setup_hw_flow(rcd
, fs
->index
);
784 dequeue_tid_waiter(rcd
, &rcd
->flow_queue
, qp
);
785 /* get head before dropping lock */
786 fqp
= first_qp(rcd
, &rcd
->flow_queue
);
787 spin_unlock_irqrestore(&rcd
->exp_lock
, flags
);
789 tid_rdma_schedule_tid_wakeup(fqp
);
792 queue_qp_for_tid_wait(rcd
, &rcd
->flow_queue
, qp
);
793 spin_unlock_irqrestore(&rcd
->exp_lock
, flags
);
797 void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata
*rcd
, struct rvt_qp
*qp
)
799 struct hfi1_qp_priv
*qpriv
= (struct hfi1_qp_priv
*)qp
->priv
;
800 struct tid_flow_state
*fs
= &qpriv
->flow_state
;
804 if (fs
->index
>= RXE_NUM_TID_FLOWS
)
806 spin_lock_irqsave(&rcd
->exp_lock
, flags
);
807 kern_clear_hw_flow(rcd
, fs
->index
);
808 clear_bit(fs
->index
, &rcd
->flow_mask
);
809 fs
->index
= RXE_NUM_TID_FLOWS
;
811 fs
->generation
= KERN_GENERATION_RESERVED
;
813 /* get head before dropping lock */
814 fqp
= first_qp(rcd
, &rcd
->flow_queue
);
815 spin_unlock_irqrestore(&rcd
->exp_lock
, flags
);
818 __trigger_tid_waiter(fqp
);
821 tid_rdma_schedule_tid_wakeup(fqp
);
825 void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata
*rcd
)
829 for (i
= 0; i
< RXE_NUM_TID_FLOWS
; i
++) {
830 rcd
->flows
[i
].generation
= mask_generation(prandom_u32());
831 kern_set_hw_flow(rcd
, KERN_GENERATION_RESERVED
, i
);
835 /* TID allocation functions */
836 static u8
trdma_pset_order(struct tid_rdma_pageset
*s
)
840 return ilog2(count
) + 1;
844 * tid_rdma_find_phys_blocks_4k - get groups base on mr info
845 * @npages - number of pages
846 * @pages - pointer to an array of page structs
847 * @list - page set array to return
849 * This routine returns the number of groups associated with
850 * the current sge information. This implementation is based
851 * on the expected receive find_phys_blocks() adjusted to
852 * use the MR information vs. the pfn.
855 * the number of RcvArray entries
857 static u32
tid_rdma_find_phys_blocks_4k(struct tid_rdma_flow
*flow
,
860 struct tid_rdma_pageset
*list
)
862 u32 pagecount
, pageidx
, setcount
= 0, i
;
863 void *vaddr
, *this_vaddr
;
869 * Look for sets of physically contiguous pages in the user buffer.
870 * This will allow us to optimize Expected RcvArray entry usage by
871 * using the bigger supported sizes.
873 vaddr
= page_address(pages
[0]);
874 trace_hfi1_tid_flow_page(flow
->req
->qp
, flow
, 0, 0, 0, vaddr
);
875 for (pageidx
= 0, pagecount
= 1, i
= 1; i
<= npages
; i
++) {
876 this_vaddr
= i
< npages
? page_address(pages
[i
]) : NULL
;
877 trace_hfi1_tid_flow_page(flow
->req
->qp
, flow
, i
, 0, 0,
880 * If the vaddr's are not sequential, pages are not physically
883 if (this_vaddr
!= (vaddr
+ PAGE_SIZE
)) {
885 * At this point we have to loop over the set of
886 * physically contiguous pages and break them down it
887 * sizes supported by the HW.
888 * There are two main constraints:
889 * 1. The max buffer size is MAX_EXPECTED_BUFFER.
890 * If the total set size is bigger than that
891 * program only a MAX_EXPECTED_BUFFER chunk.
892 * 2. The buffer size has to be a power of two. If
893 * it is not, round down to the closes power of
894 * 2 and program that size.
897 int maxpages
= pagecount
;
898 u32 bufsize
= pagecount
* PAGE_SIZE
;
900 if (bufsize
> MAX_EXPECTED_BUFFER
)
902 MAX_EXPECTED_BUFFER
>>
904 else if (!is_power_of_2(bufsize
))
906 rounddown_pow_of_two(bufsize
) >>
909 list
[setcount
].idx
= pageidx
;
910 list
[setcount
].count
= maxpages
;
911 trace_hfi1_tid_pageset(flow
->req
->qp
, setcount
,
913 list
[setcount
].count
);
914 pagecount
-= maxpages
;
926 /* insure we always return an even number of sets */
928 list
[setcount
++].count
= 0;
933 * tid_flush_pages - dump out pages into pagesets
934 * @list - list of pagesets
935 * @idx - pointer to current page index
936 * @pages - number of pages to dump
937 * @sets - current number of pagesset
939 * This routine flushes out accumuated pages.
941 * To insure an even number of sets the
942 * code may add a filler.
944 * This can happen with when pages is not
945 * a power of 2 or pages is a power of 2
946 * less than the maximum pages.
949 * The new number of sets
952 static u32
tid_flush_pages(struct tid_rdma_pageset
*list
,
953 u32
*idx
, u32 pages
, u32 sets
)
956 u32 maxpages
= pages
;
958 if (maxpages
> MAX_EXPECTED_PAGES
)
959 maxpages
= MAX_EXPECTED_PAGES
;
960 else if (!is_power_of_2(maxpages
))
961 maxpages
= rounddown_pow_of_two(maxpages
);
962 list
[sets
].idx
= *idx
;
963 list
[sets
++].count
= maxpages
;
967 /* might need a filler */
969 list
[sets
++].count
= 0;
974 * tid_rdma_find_phys_blocks_8k - get groups base on mr info
975 * @pages - pointer to an array of page structs
976 * @npages - number of pages
977 * @list - page set array to return
979 * This routine parses an array of pages to compute pagesets
980 * in an 8k compatible way.
982 * pages are tested two at a time, i, i + 1 for contiguous
983 * pages and i - 1 and i contiguous pages.
985 * If any condition is false, any accumlated pages are flushed and
986 * v0,v1 are emitted as separate PAGE_SIZE pagesets
988 * Otherwise, the current 8k is totaled for a future flush.
991 * The number of pagesets
992 * list set with the returned number of pagesets
995 static u32
tid_rdma_find_phys_blocks_8k(struct tid_rdma_flow
*flow
,
998 struct tid_rdma_pageset
*list
)
1000 u32 idx
, sets
= 0, i
;
1002 void *v0
, *v1
, *vm1
;
1006 for (idx
= 0, i
= 0, vm1
= NULL
; i
< npages
; i
+= 2) {
1008 v0
= page_address(pages
[i
]);
1009 trace_hfi1_tid_flow_page(flow
->req
->qp
, flow
, i
, 1, 0, v0
);
1010 v1
= i
+ 1 < npages
?
1011 page_address(pages
[i
+ 1]) : NULL
;
1012 trace_hfi1_tid_flow_page(flow
->req
->qp
, flow
, i
, 1, 1, v1
);
1013 /* compare i, i + 1 vaddr */
1014 if (v1
!= (v0
+ PAGE_SIZE
)) {
1015 /* flush out pages */
1016 sets
= tid_flush_pages(list
, &idx
, pagecnt
, sets
);
1017 /* output v0,v1 as two pagesets */
1018 list
[sets
].idx
= idx
++;
1019 list
[sets
++].count
= 1;
1021 list
[sets
].count
= 1;
1022 list
[sets
++].idx
= idx
++;
1024 list
[sets
++].count
= 0;
1030 /* i,i+1 consecutive, look at i-1,i */
1031 if (vm1
&& v0
!= (vm1
+ PAGE_SIZE
)) {
1032 /* flush out pages */
1033 sets
= tid_flush_pages(list
, &idx
, pagecnt
, sets
);
1036 /* pages will always be a multiple of 8k */
1040 /* move to next pair */
1042 /* dump residual pages at end */
1043 sets
= tid_flush_pages(list
, &idx
, npages
- idx
, sets
);
1044 /* by design cannot be odd sets */
1050 * Find pages for one segment of a sge array represented by @ss. The function
1051 * does not check the sge, the sge must have been checked for alignment with a
1052 * prior call to hfi1_kern_trdma_ok. Other sge checking is done as part of
1053 * rvt_lkey_ok and rvt_rkey_ok. Also, the function only modifies the local sge
1054 * copy maintained in @ss->sge, the original sge is not modified.
1056 * Unlike IB RDMA WRITE, we can't decrement ss->num_sge here because we are not
1057 * releasing the MR reference count at the same time. Otherwise, we'll "leak"
1058 * references to the MR. This difference requires that we keep track of progress
1059 * into the sg_list. This is done by the cur_seg cursor in the tid_rdma_request
1062 static u32
kern_find_pages(struct tid_rdma_flow
*flow
,
1063 struct page
**pages
,
1064 struct rvt_sge_state
*ss
, bool *last
)
1066 struct tid_rdma_request
*req
= flow
->req
;
1067 struct rvt_sge
*sge
= &ss
->sge
;
1068 u32 length
= flow
->req
->seg_len
;
1069 u32 len
= PAGE_SIZE
;
1072 while (length
&& req
->isge
< ss
->num_sge
) {
1073 pages
[i
++] = virt_to_page(sge
->vaddr
);
1077 sge
->sge_length
-= len
;
1078 if (!sge
->sge_length
) {
1079 if (++req
->isge
< ss
->num_sge
)
1080 *sge
= ss
->sg_list
[req
->isge
- 1];
1081 } else if (sge
->length
== 0 && sge
->mr
->lkey
) {
1082 if (++sge
->n
>= RVT_SEGSZ
) {
1086 sge
->vaddr
= sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
1087 sge
->length
= sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
1092 flow
->length
= flow
->req
->seg_len
- length
;
1093 *last
= req
->isge
== ss
->num_sge
? false : true;
1097 static void dma_unmap_flow(struct tid_rdma_flow
*flow
)
1099 struct hfi1_devdata
*dd
;
1101 struct tid_rdma_pageset
*pset
;
1103 dd
= flow
->req
->rcd
->dd
;
1104 for (i
= 0, pset
= &flow
->pagesets
[0]; i
< flow
->npagesets
;
1106 if (pset
->count
&& pset
->addr
) {
1107 dma_unmap_page(&dd
->pcidev
->dev
,
1109 PAGE_SIZE
* pset
->count
,
1116 static int dma_map_flow(struct tid_rdma_flow
*flow
, struct page
**pages
)
1119 struct hfi1_devdata
*dd
= flow
->req
->rcd
->dd
;
1120 struct tid_rdma_pageset
*pset
;
1122 for (i
= 0, pset
= &flow
->pagesets
[0]; i
< flow
->npagesets
;
1125 pset
->addr
= dma_map_page(&dd
->pcidev
->dev
,
1128 PAGE_SIZE
* pset
->count
,
1131 if (dma_mapping_error(&dd
->pcidev
->dev
, pset
->addr
)) {
1132 dma_unmap_flow(flow
);
1141 static inline bool dma_mapped(struct tid_rdma_flow
*flow
)
1143 return !!flow
->pagesets
[0].mapped
;
1147 * Get pages pointers and identify contiguous physical memory chunks for a
1148 * segment. All segments are of length flow->req->seg_len.
1150 static int kern_get_phys_blocks(struct tid_rdma_flow
*flow
,
1151 struct page
**pages
,
1152 struct rvt_sge_state
*ss
, bool *last
)
1156 /* Reuse previously computed pagesets, if any */
1157 if (flow
->npagesets
) {
1158 trace_hfi1_tid_flow_alloc(flow
->req
->qp
, flow
->req
->setup_head
,
1160 if (!dma_mapped(flow
))
1161 return dma_map_flow(flow
, pages
);
1165 npages
= kern_find_pages(flow
, pages
, ss
, last
);
1167 if (flow
->req
->qp
->pmtu
== enum_to_mtu(OPA_MTU_4096
))
1169 tid_rdma_find_phys_blocks_4k(flow
, pages
, npages
,
1173 tid_rdma_find_phys_blocks_8k(flow
, pages
, npages
,
1176 return dma_map_flow(flow
, pages
);
1179 static inline void kern_add_tid_node(struct tid_rdma_flow
*flow
,
1180 struct hfi1_ctxtdata
*rcd
, char *s
,
1181 struct tid_group
*grp
, u8 cnt
)
1183 struct kern_tid_node
*node
= &flow
->tnode
[flow
->tnode_cnt
++];
1185 WARN_ON_ONCE(flow
->tnode_cnt
>=
1186 (TID_RDMA_MAX_SEGMENT_SIZE
>> PAGE_SHIFT
));
1187 if (WARN_ON_ONCE(cnt
& 1))
1189 "unexpected odd allocation cnt %u map 0x%x used %u",
1190 cnt
, grp
->map
, grp
->used
);
1193 node
->map
= grp
->map
;
1195 trace_hfi1_tid_node_add(flow
->req
->qp
, s
, flow
->tnode_cnt
- 1,
1196 grp
->base
, grp
->map
, grp
->used
, cnt
);
1200 * Try to allocate pageset_count TID's from TID groups for a context
1202 * This function allocates TID's without moving groups between lists or
1203 * modifying grp->map. This is done as follows, being cogizant of the lists
1204 * between which the TID groups will move:
1205 * 1. First allocate complete groups of 8 TID's since this is more efficient,
1206 * these groups will move from group->full without affecting used
1207 * 2. If more TID's are needed allocate from used (will move from used->full or
1209 * 3. If we still don't have the required number of TID's go back and look again
1210 * at a complete group (will move from group->used)
1212 static int kern_alloc_tids(struct tid_rdma_flow
*flow
)
1214 struct hfi1_ctxtdata
*rcd
= flow
->req
->rcd
;
1215 struct hfi1_devdata
*dd
= rcd
->dd
;
1216 u32 ngroups
, pageidx
= 0;
1217 struct tid_group
*group
= NULL
, *used
;
1220 flow
->tnode_cnt
= 0;
1221 ngroups
= flow
->npagesets
/ dd
->rcv_entries
.group_size
;
1225 /* First look at complete groups */
1226 list_for_each_entry(group
, &rcd
->tid_group_list
.list
, list
) {
1227 kern_add_tid_node(flow
, rcd
, "complete groups", group
,
1230 pageidx
+= group
->size
;
1235 if (pageidx
>= flow
->npagesets
)
1239 /* Now look at partially used groups */
1240 list_for_each_entry(used
, &rcd
->tid_used_list
.list
, list
) {
1241 use
= min_t(u32
, flow
->npagesets
- pageidx
,
1242 used
->size
- used
->used
);
1243 kern_add_tid_node(flow
, rcd
, "used groups", used
, use
);
1246 if (pageidx
>= flow
->npagesets
)
1251 * Look again at a complete group, continuing from where we left.
1252 * However, if we are at the head, we have reached the end of the
1253 * complete groups list from the first loop above
1255 if (group
&& &group
->list
== &rcd
->tid_group_list
.list
)
1257 group
= list_prepare_entry(group
, &rcd
->tid_group_list
.list
,
1259 if (list_is_last(&group
->list
, &rcd
->tid_group_list
.list
))
1261 group
= list_next_entry(group
, list
);
1262 use
= min_t(u32
, flow
->npagesets
- pageidx
, group
->size
);
1263 kern_add_tid_node(flow
, rcd
, "complete continue", group
, use
);
1265 if (pageidx
>= flow
->npagesets
)
1268 trace_hfi1_msg_alloc_tids(flow
->req
->qp
, " insufficient tids: needed ",
1269 (u64
)flow
->npagesets
);
1275 static void kern_program_rcv_group(struct tid_rdma_flow
*flow
, int grp_num
,
1278 struct hfi1_ctxtdata
*rcd
= flow
->req
->rcd
;
1279 struct hfi1_devdata
*dd
= rcd
->dd
;
1280 struct kern_tid_node
*node
= &flow
->tnode
[grp_num
];
1281 struct tid_group
*grp
= node
->grp
;
1282 struct tid_rdma_pageset
*pset
;
1283 u32 pmtu_pg
= flow
->req
->qp
->pmtu
>> PAGE_SHIFT
;
1284 u32 rcventry
, npages
= 0, pair
= 0, tidctrl
;
1287 for (i
= 0; i
< grp
->size
; i
++) {
1288 rcventry
= grp
->base
+ i
;
1290 if (node
->map
& BIT(i
) || cnt
>= node
->cnt
) {
1291 rcv_array_wc_fill(dd
, rcventry
);
1294 pset
= &flow
->pagesets
[(*pset_idx
)++];
1296 hfi1_put_tid(dd
, rcventry
, PT_EXPECTED
,
1297 pset
->addr
, trdma_pset_order(pset
));
1299 hfi1_put_tid(dd
, rcventry
, PT_INVALID
, 0, 0);
1301 npages
+= pset
->count
;
1303 rcventry
-= rcd
->expected_base
;
1304 tidctrl
= pair
? 0x3 : rcventry
& 0x1 ? 0x2 : 0x1;
1306 * A single TID entry will be used to use a rcvarr pair (with
1307 * tidctrl 0x3), if ALL these are true (a) the bit pos is even
1308 * (b) the group map shows current and the next bits as free
1309 * indicating two consecutive rcvarry entries are available (c)
1310 * we actually need 2 more entries
1312 pair
= !(i
& 0x1) && !((node
->map
>> i
) & 0x3) &&
1313 node
->cnt
>= cnt
+ 2;
1317 flow
->tid_entry
[flow
->tidcnt
++] =
1318 EXP_TID_SET(IDX
, rcventry
>> 1) |
1319 EXP_TID_SET(CTRL
, tidctrl
) |
1320 EXP_TID_SET(LEN
, npages
);
1321 trace_hfi1_tid_entry_alloc(/* entry */
1322 flow
->req
->qp
, flow
->tidcnt
- 1,
1323 flow
->tid_entry
[flow
->tidcnt
- 1]);
1325 /* Efficient DIV_ROUND_UP(npages, pmtu_pg) */
1326 flow
->npkts
+= (npages
+ pmtu_pg
- 1) >> ilog2(pmtu_pg
);
1330 if (grp
->used
== grp
->size
- 1)
1331 tid_group_move(grp
, &rcd
->tid_used_list
,
1332 &rcd
->tid_full_list
);
1333 else if (!grp
->used
)
1334 tid_group_move(grp
, &rcd
->tid_group_list
,
1335 &rcd
->tid_used_list
);
1343 static void kern_unprogram_rcv_group(struct tid_rdma_flow
*flow
, int grp_num
)
1345 struct hfi1_ctxtdata
*rcd
= flow
->req
->rcd
;
1346 struct hfi1_devdata
*dd
= rcd
->dd
;
1347 struct kern_tid_node
*node
= &flow
->tnode
[grp_num
];
1348 struct tid_group
*grp
= node
->grp
;
1352 for (i
= 0; i
< grp
->size
; i
++) {
1353 rcventry
= grp
->base
+ i
;
1355 if (node
->map
& BIT(i
) || cnt
>= node
->cnt
) {
1356 rcv_array_wc_fill(dd
, rcventry
);
1360 hfi1_put_tid(dd
, rcventry
, PT_INVALID
, 0, 0);
1363 grp
->map
&= ~BIT(i
);
1366 if (grp
->used
== grp
->size
- 1)
1367 tid_group_move(grp
, &rcd
->tid_full_list
,
1368 &rcd
->tid_used_list
);
1369 else if (!grp
->used
)
1370 tid_group_move(grp
, &rcd
->tid_used_list
,
1371 &rcd
->tid_group_list
);
1373 if (WARN_ON_ONCE(cnt
& 1)) {
1374 struct hfi1_ctxtdata
*rcd
= flow
->req
->rcd
;
1375 struct hfi1_devdata
*dd
= rcd
->dd
;
1377 dd_dev_err(dd
, "unexpected odd free cnt %u map 0x%x used %u",
1378 cnt
, grp
->map
, grp
->used
);
1382 static void kern_program_rcvarray(struct tid_rdma_flow
*flow
)
1389 for (i
= 0; i
< flow
->tnode_cnt
; i
++)
1390 kern_program_rcv_group(flow
, i
, &pset_idx
);
1391 trace_hfi1_tid_flow_alloc(flow
->req
->qp
, flow
->req
->setup_head
, flow
);
1395 * hfi1_kern_exp_rcv_setup() - setup TID's and flow for one segment of a
1398 * @req: TID RDMA request for which the segment/flow is being set up
1399 * @ss: sge state, maintains state across successive segments of a sge
1400 * @last: set to true after the last sge segment has been processed
1403 * (1) finds a free flow entry in the flow circular buffer
1404 * (2) finds pages and continuous physical chunks constituing one segment
1406 * (3) allocates TID group entries for those chunks
1407 * (4) programs rcvarray entries in the hardware corresponding to those
1409 * (5) computes a tidarray with formatted TID entries which can be sent
1411 * (6) Reserves and programs HW flows.
1412 * (7) It also manages queing the QP when TID/flow resources are not
1415 * @req points to struct tid_rdma_request of which the segments are a part. The
1416 * function uses qp, rcd and seg_len members of @req. In the absence of errors,
1417 * req->flow_idx is the index of the flow which has been prepared in this
1418 * invocation of function call. With flow = &req->flows[req->flow_idx],
1419 * flow->tid_entry contains the TID array which the sender can use for TID RDMA
1420 * sends and flow->npkts contains number of packets required to send the
1423 * hfi1_check_sge_align should be called prior to calling this function and if
1424 * it signals error TID RDMA cannot be used for this sge and this function
1425 * should not be called.
1427 * For the queuing, caller must hold the flow->req->qp s_lock from the send
1428 * engine and the function will procure the exp_lock.
1431 * The function returns -EAGAIN if sufficient number of TID/flow resources to
1432 * map the segment could not be allocated. In this case the function should be
1433 * called again with previous arguments to retry the TID allocation. There are
1434 * no other error returns. The function returns 0 on success.
1436 int hfi1_kern_exp_rcv_setup(struct tid_rdma_request
*req
,
1437 struct rvt_sge_state
*ss
, bool *last
)
1438 __must_hold(&req
->qp
->s_lock
)
1440 struct tid_rdma_flow
*flow
= &req
->flows
[req
->setup_head
];
1441 struct hfi1_ctxtdata
*rcd
= req
->rcd
;
1442 struct hfi1_qp_priv
*qpriv
= req
->qp
->priv
;
1443 unsigned long flags
;
1445 u16 clear_tail
= req
->clear_tail
;
1447 lockdep_assert_held(&req
->qp
->s_lock
);
1449 * We return error if either (a) we don't have space in the flow
1450 * circular buffer, or (b) we already have max entries in the buffer.
1451 * Max entries depend on the type of request we are processing and the
1452 * negotiated TID RDMA parameters.
1454 if (!CIRC_SPACE(req
->setup_head
, clear_tail
, MAX_FLOWS
) ||
1455 CIRC_CNT(req
->setup_head
, clear_tail
, MAX_FLOWS
) >=
1460 * Get pages, identify contiguous physical memory chunks for the segment
1461 * If we can not determine a DMA address mapping we will treat it just
1462 * like if we ran out of space above.
1464 if (kern_get_phys_blocks(flow
, qpriv
->pages
, ss
, last
)) {
1465 hfi1_wait_kmem(flow
->req
->qp
);
1469 spin_lock_irqsave(&rcd
->exp_lock
, flags
);
1470 if (kernel_tid_waiters(rcd
, &rcd
->rarr_queue
, flow
->req
->qp
))
1474 * At this point we know the number of pagesets and hence the number of
1475 * TID's to map the segment. Allocate the TID's from the TID groups. If
1476 * we cannot allocate the required number we exit and try again later
1478 if (kern_alloc_tids(flow
))
1481 * Finally program the TID entries with the pagesets, compute the
1482 * tidarray and enable the HW flow
1484 kern_program_rcvarray(flow
);
1487 * Setup the flow state with relevant information.
1488 * This information is used for tracking the sequence of data packets
1490 * The flow is setup here as this is the most accurate time and place
1491 * to do so. Doing at a later time runs the risk of the flow data in
1492 * qpriv getting out of sync.
1494 memset(&flow
->flow_state
, 0x0, sizeof(flow
->flow_state
));
1495 flow
->idx
= qpriv
->flow_state
.index
;
1496 flow
->flow_state
.generation
= qpriv
->flow_state
.generation
;
1497 flow
->flow_state
.spsn
= qpriv
->flow_state
.psn
;
1498 flow
->flow_state
.lpsn
= flow
->flow_state
.spsn
+ flow
->npkts
- 1;
1499 flow
->flow_state
.r_next_psn
=
1500 full_flow_psn(flow
, flow
->flow_state
.spsn
);
1501 qpriv
->flow_state
.psn
+= flow
->npkts
;
1503 dequeue_tid_waiter(rcd
, &rcd
->rarr_queue
, flow
->req
->qp
);
1504 /* get head before dropping lock */
1505 fqp
= first_qp(rcd
, &rcd
->rarr_queue
);
1506 spin_unlock_irqrestore(&rcd
->exp_lock
, flags
);
1507 tid_rdma_schedule_tid_wakeup(fqp
);
1509 req
->setup_head
= (req
->setup_head
+ 1) & (MAX_FLOWS
- 1);
1512 queue_qp_for_tid_wait(rcd
, &rcd
->rarr_queue
, flow
->req
->qp
);
1513 spin_unlock_irqrestore(&rcd
->exp_lock
, flags
);
1517 static void hfi1_tid_rdma_reset_flow(struct tid_rdma_flow
*flow
)
1519 flow
->npagesets
= 0;
1523 * This function is called after one segment has been successfully sent to
1524 * release the flow and TID HW/SW resources for that segment. The segments for a
1525 * TID RDMA request are setup and cleared in FIFO order which is managed using a
1528 int hfi1_kern_exp_rcv_clear(struct tid_rdma_request
*req
)
1529 __must_hold(&req
->qp
->s_lock
)
1531 struct tid_rdma_flow
*flow
= &req
->flows
[req
->clear_tail
];
1532 struct hfi1_ctxtdata
*rcd
= req
->rcd
;
1533 unsigned long flags
;
1537 lockdep_assert_held(&req
->qp
->s_lock
);
1538 /* Exit if we have nothing in the flow circular buffer */
1539 if (!CIRC_CNT(req
->setup_head
, req
->clear_tail
, MAX_FLOWS
))
1542 spin_lock_irqsave(&rcd
->exp_lock
, flags
);
1544 for (i
= 0; i
< flow
->tnode_cnt
; i
++)
1545 kern_unprogram_rcv_group(flow
, i
);
1546 /* To prevent double unprogramming */
1547 flow
->tnode_cnt
= 0;
1548 /* get head before dropping lock */
1549 fqp
= first_qp(rcd
, &rcd
->rarr_queue
);
1550 spin_unlock_irqrestore(&rcd
->exp_lock
, flags
);
1552 dma_unmap_flow(flow
);
1554 hfi1_tid_rdma_reset_flow(flow
);
1555 req
->clear_tail
= (req
->clear_tail
+ 1) & (MAX_FLOWS
- 1);
1557 if (fqp
== req
->qp
) {
1558 __trigger_tid_waiter(fqp
);
1561 tid_rdma_schedule_tid_wakeup(fqp
);
1568 * This function is called to release all the tid entries for
1571 void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request
*req
)
1572 __must_hold(&req
->qp
->s_lock
)
1574 /* Use memory barrier for proper ordering */
1575 while (CIRC_CNT(req
->setup_head
, req
->clear_tail
, MAX_FLOWS
)) {
1576 if (hfi1_kern_exp_rcv_clear(req
))
1582 * hfi1_kern_exp_rcv_free_flows - free priviously allocated flow information
1583 * @req - the tid rdma request to be cleaned
1585 static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request
*req
)
1592 * __trdma_clean_swqe - clean up for large sized QPs
1593 * @qp: the queue patch
1594 * @wqe: the send wqe
1596 void __trdma_clean_swqe(struct rvt_qp
*qp
, struct rvt_swqe
*wqe
)
1598 struct hfi1_swqe_priv
*p
= wqe
->priv
;
1600 hfi1_kern_exp_rcv_free_flows(&p
->tid_req
);
1604 * This can be called at QP create time or in the data path.
1606 static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request
*req
,
1609 struct tid_rdma_flow
*flows
;
1612 if (likely(req
->flows
))
1614 flows
= kmalloc_node(MAX_FLOWS
* sizeof(*flows
), gfp
,
1619 for (i
= 0; i
< MAX_FLOWS
; i
++) {
1621 flows
[i
].npagesets
= 0;
1622 flows
[i
].pagesets
[0].mapped
= 0;
1623 flows
[i
].resync_npkts
= 0;
1629 static void hfi1_init_trdma_req(struct rvt_qp
*qp
,
1630 struct tid_rdma_request
*req
)
1632 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
1635 * Initialize various TID RDMA request variables.
1636 * These variables are "static", which is why they
1637 * can be pre-initialized here before the WRs has
1638 * even been submitted.
1639 * However, non-NULL values for these variables do not
1640 * imply that this WQE has been enabled for TID RDMA.
1641 * Drivers should check the WQE's opcode to determine
1642 * if a request is a TID RDMA one or not.
1645 req
->rcd
= qpriv
->rcd
;
1648 u64
hfi1_access_sw_tid_wait(const struct cntr_entry
*entry
,
1649 void *context
, int vl
, int mode
, u64 data
)
1651 struct hfi1_devdata
*dd
= context
;
1653 return dd
->verbs_dev
.n_tidwait
;
1656 static struct tid_rdma_flow
*find_flow_ib(struct tid_rdma_request
*req
,
1660 struct tid_rdma_flow
*flow
;
1662 head
= req
->setup_head
;
1663 tail
= req
->clear_tail
;
1664 for ( ; CIRC_CNT(head
, tail
, MAX_FLOWS
);
1665 tail
= CIRC_NEXT(tail
, MAX_FLOWS
)) {
1666 flow
= &req
->flows
[tail
];
1667 if (cmp_psn(psn
, flow
->flow_state
.ib_spsn
) >= 0 &&
1668 cmp_psn(psn
, flow
->flow_state
.ib_lpsn
) <= 0) {
1677 /* TID RDMA READ functions */
1678 u32
hfi1_build_tid_rdma_read_packet(struct rvt_swqe
*wqe
,
1679 struct ib_other_headers
*ohdr
, u32
*bth1
,
1680 u32
*bth2
, u32
*len
)
1682 struct tid_rdma_request
*req
= wqe_to_tid_req(wqe
);
1683 struct tid_rdma_flow
*flow
= &req
->flows
[req
->flow_idx
];
1684 struct rvt_qp
*qp
= req
->qp
;
1685 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
1686 struct hfi1_swqe_priv
*wpriv
= wqe
->priv
;
1687 struct tid_rdma_read_req
*rreq
= &ohdr
->u
.tid_rdma
.r_req
;
1688 struct tid_rdma_params
*remote
;
1690 void *req_addr
= NULL
;
1692 /* This is the IB psn used to send the request */
1693 *bth2
= mask_psn(flow
->flow_state
.ib_spsn
+ flow
->pkt
);
1694 trace_hfi1_tid_flow_build_read_pkt(qp
, req
->flow_idx
, flow
);
1696 /* TID Entries for TID RDMA READ payload */
1697 req_addr
= &flow
->tid_entry
[flow
->tid_idx
];
1698 req_len
= sizeof(*flow
->tid_entry
) *
1699 (flow
->tidcnt
- flow
->tid_idx
);
1701 memset(&ohdr
->u
.tid_rdma
.r_req
, 0, sizeof(ohdr
->u
.tid_rdma
.r_req
));
1702 wpriv
->ss
.sge
.vaddr
= req_addr
;
1703 wpriv
->ss
.sge
.sge_length
= req_len
;
1704 wpriv
->ss
.sge
.length
= wpriv
->ss
.sge
.sge_length
;
1706 * We can safely zero these out. Since the first SGE covers the
1707 * entire packet, nothing else should even look at the MR.
1709 wpriv
->ss
.sge
.mr
= NULL
;
1710 wpriv
->ss
.sge
.m
= 0;
1711 wpriv
->ss
.sge
.n
= 0;
1713 wpriv
->ss
.sg_list
= NULL
;
1714 wpriv
->ss
.total_len
= wpriv
->ss
.sge
.sge_length
;
1715 wpriv
->ss
.num_sge
= 1;
1717 /* Construct the TID RDMA READ REQ packet header */
1719 remote
= rcu_dereference(qpriv
->tid_rdma
.remote
);
1721 KDETH_RESET(rreq
->kdeth0
, KVER
, 0x1);
1722 KDETH_RESET(rreq
->kdeth1
, JKEY
, remote
->jkey
);
1723 rreq
->reth
.vaddr
= cpu_to_be64(wqe
->rdma_wr
.remote_addr
+
1724 req
->cur_seg
* req
->seg_len
+ flow
->sent
);
1725 rreq
->reth
.rkey
= cpu_to_be32(wqe
->rdma_wr
.rkey
);
1726 rreq
->reth
.length
= cpu_to_be32(*len
);
1727 rreq
->tid_flow_psn
=
1728 cpu_to_be32((flow
->flow_state
.generation
<<
1729 HFI1_KDETH_BTH_SEQ_SHIFT
) |
1730 ((flow
->flow_state
.spsn
+ flow
->pkt
) &
1731 HFI1_KDETH_BTH_SEQ_MASK
));
1733 cpu_to_be32(qpriv
->tid_rdma
.local
.qp
|
1734 ((flow
->idx
& TID_RDMA_DESTQP_FLOW_MASK
) <<
1735 TID_RDMA_DESTQP_FLOW_SHIFT
) |
1737 rreq
->verbs_qp
= cpu_to_be32(qp
->remote_qpn
);
1738 *bth1
&= ~RVT_QPN_MASK
;
1739 *bth1
|= remote
->qp
;
1740 *bth2
|= IB_BTH_REQ_ACK
;
1743 /* We are done with this segment */
1746 qp
->s_state
= TID_OP(READ_REQ
);
1748 req
->flow_idx
= (req
->flow_idx
+ 1) & (MAX_FLOWS
- 1);
1749 qpriv
->pending_tid_r_segs
++;
1750 qp
->s_num_rd_atomic
++;
1752 /* Set the TID RDMA READ request payload size */
1755 return sizeof(ohdr
->u
.tid_rdma
.r_req
) / sizeof(u32
);
1759 * @len: contains the data length to read upon entry and the read request
1760 * payload length upon exit.
1762 u32
hfi1_build_tid_rdma_read_req(struct rvt_qp
*qp
, struct rvt_swqe
*wqe
,
1763 struct ib_other_headers
*ohdr
, u32
*bth1
,
1764 u32
*bth2
, u32
*len
)
1765 __must_hold(&qp
->s_lock
)
1767 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
1768 struct tid_rdma_request
*req
= wqe_to_tid_req(wqe
);
1769 struct tid_rdma_flow
*flow
= NULL
;
1773 u32 npkts
= rvt_div_round_up_mtu(qp
, *len
);
1775 trace_hfi1_tid_req_build_read_req(qp
, 0, wqe
->wr
.opcode
, wqe
->psn
,
1778 * Check sync conditions. Make sure that there are no pending
1779 * segments before freeing the flow.
1782 if (req
->state
== TID_REQUEST_SYNC
) {
1783 if (qpriv
->pending_tid_r_segs
)
1786 hfi1_kern_clear_hw_flow(req
->rcd
, qp
);
1787 qpriv
->s_flags
&= ~HFI1_R_TID_SW_PSN
;
1788 req
->state
= TID_REQUEST_ACTIVE
;
1792 * If the request for this segment is resent, the tid resources should
1793 * have been allocated before. In this case, req->flow_idx should
1794 * fall behind req->setup_head.
1796 if (req
->flow_idx
== req
->setup_head
) {
1798 if (req
->state
== TID_REQUEST_RESEND
) {
1800 * This is the first new segment for a request whose
1801 * earlier segments have been re-sent. We need to
1802 * set up the sge pointer correctly.
1804 restart_sge(&qp
->s_sge
, wqe
, req
->s_next_psn
,
1807 req
->state
= TID_REQUEST_ACTIVE
;
1811 * Check sync. The last PSN of each generation is reserved for
1814 if ((qpriv
->flow_state
.psn
+ npkts
) > MAX_TID_FLOW_PSN
- 1) {
1815 req
->state
= TID_REQUEST_SYNC
;
1819 /* Allocate the flow if not yet */
1820 if (hfi1_kern_setup_hw_flow(qpriv
->rcd
, qp
))
1824 * The following call will advance req->setup_head after
1825 * allocating the tid entries.
1827 if (hfi1_kern_exp_rcv_setup(req
, &qp
->s_sge
, &last
)) {
1828 req
->state
= TID_REQUEST_QUEUED
;
1831 * We don't have resources for this segment. The QP has
1832 * already been queued.
1838 /* req->flow_idx should only be one slot behind req->setup_head */
1839 flow
= &req
->flows
[req
->flow_idx
];
1844 /* Set the first and last IB PSN for the flow in use.*/
1845 flow
->flow_state
.ib_spsn
= req
->s_next_psn
;
1846 flow
->flow_state
.ib_lpsn
=
1847 flow
->flow_state
.ib_spsn
+ flow
->npkts
- 1;
1850 /* Calculate the next segment start psn.*/
1851 req
->s_next_psn
+= flow
->npkts
;
1853 /* Build the packet header */
1854 hdwords
= hfi1_build_tid_rdma_read_packet(wqe
, ohdr
, bth1
, bth2
, len
);
1860 * Validate and accept the TID RDMA READ request parameters.
1861 * Return 0 if the request is accepted successfully;
1862 * Return 1 otherwise.
1864 static int tid_rdma_rcv_read_request(struct rvt_qp
*qp
,
1865 struct rvt_ack_entry
*e
,
1866 struct hfi1_packet
*packet
,
1867 struct ib_other_headers
*ohdr
,
1868 u32 bth0
, u32 psn
, u64 vaddr
, u32 len
)
1870 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
1871 struct tid_rdma_request
*req
;
1872 struct tid_rdma_flow
*flow
;
1873 u32 flow_psn
, i
, tidlen
= 0, pktlen
, tlen
;
1875 req
= ack_to_tid_req(e
);
1877 /* Validate the payload first */
1878 flow
= &req
->flows
[req
->setup_head
];
1880 /* payload length = packet length - (header length + ICRC length) */
1881 pktlen
= packet
->tlen
- (packet
->hlen
+ 4);
1882 if (pktlen
> sizeof(flow
->tid_entry
))
1884 memcpy(flow
->tid_entry
, packet
->ebuf
, pktlen
);
1885 flow
->tidcnt
= pktlen
/ sizeof(*flow
->tid_entry
);
1888 * Walk the TID_ENTRY list to make sure we have enough space for a
1889 * complete segment. Also calculate the number of required packets.
1891 flow
->npkts
= rvt_div_round_up_mtu(qp
, len
);
1892 for (i
= 0; i
< flow
->tidcnt
; i
++) {
1893 trace_hfi1_tid_entry_rcv_read_req(qp
, i
,
1894 flow
->tid_entry
[i
]);
1895 tlen
= EXP_TID_GET(flow
->tid_entry
[i
], LEN
);
1900 * For tid pair (tidctr == 3), the buffer size of the pair
1901 * should be the sum of the buffer size described by each
1902 * tid entry. However, only the first entry needs to be
1903 * specified in the request (see WFR HAS Section 8.5.7.1).
1907 if (tidlen
* PAGE_SIZE
< len
)
1910 /* Empty the flow array */
1911 req
->clear_tail
= req
->setup_head
;
1914 flow
->tid_offset
= 0;
1916 flow
->tid_qpn
= be32_to_cpu(ohdr
->u
.tid_rdma
.r_req
.tid_flow_qp
);
1917 flow
->idx
= (flow
->tid_qpn
>> TID_RDMA_DESTQP_FLOW_SHIFT
) &
1918 TID_RDMA_DESTQP_FLOW_MASK
;
1919 flow_psn
= mask_psn(be32_to_cpu(ohdr
->u
.tid_rdma
.r_req
.tid_flow_psn
));
1920 flow
->flow_state
.generation
= flow_psn
>> HFI1_KDETH_BTH_SEQ_SHIFT
;
1921 flow
->flow_state
.spsn
= flow_psn
& HFI1_KDETH_BTH_SEQ_MASK
;
1924 flow
->flow_state
.lpsn
= flow
->flow_state
.spsn
+
1926 flow
->flow_state
.ib_spsn
= psn
;
1927 flow
->flow_state
.ib_lpsn
= flow
->flow_state
.ib_spsn
+ flow
->npkts
- 1;
1929 trace_hfi1_tid_flow_rcv_read_req(qp
, req
->setup_head
, flow
);
1930 /* Set the initial flow index to the current flow. */
1931 req
->flow_idx
= req
->setup_head
;
1933 /* advance circular buffer head */
1934 req
->setup_head
= (req
->setup_head
+ 1) & (MAX_FLOWS
- 1);
1937 * Compute last PSN for request.
1939 e
->opcode
= (bth0
>> 24) & 0xff;
1941 e
->lpsn
= psn
+ flow
->npkts
- 1;
1944 req
->n_flows
= qpriv
->tid_rdma
.local
.max_read
;
1945 req
->state
= TID_REQUEST_ACTIVE
;
1950 req
->seg_len
= qpriv
->tid_rdma
.local
.max_len
;
1951 req
->total_len
= len
;
1952 req
->total_segs
= 1;
1953 req
->r_flow_psn
= e
->psn
;
1955 trace_hfi1_tid_req_rcv_read_req(qp
, 0, e
->opcode
, e
->psn
, e
->lpsn
,
1960 static int tid_rdma_rcv_error(struct hfi1_packet
*packet
,
1961 struct ib_other_headers
*ohdr
,
1962 struct rvt_qp
*qp
, u32 psn
, int diff
)
1964 struct hfi1_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
1965 struct hfi1_ctxtdata
*rcd
= ((struct hfi1_qp_priv
*)qp
->priv
)->rcd
;
1966 struct hfi1_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
1967 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
1968 struct rvt_ack_entry
*e
;
1969 struct tid_rdma_request
*req
;
1970 unsigned long flags
;
1974 trace_hfi1_rsp_tid_rcv_error(qp
, psn
);
1975 trace_hfi1_tid_rdma_rcv_err(qp
, 0, psn
, diff
);
1977 /* sequence error */
1978 if (!qp
->r_nak_state
) {
1979 ibp
->rvp
.n_rc_seqnak
++;
1980 qp
->r_nak_state
= IB_NAK_PSN_ERROR
;
1981 qp
->r_ack_psn
= qp
->r_psn
;
1982 rc_defered_ack(rcd
, qp
);
1987 ibp
->rvp
.n_rc_dupreq
++;
1989 spin_lock_irqsave(&qp
->s_lock
, flags
);
1990 e
= find_prev_entry(qp
, psn
, &prev
, NULL
, &old_req
);
1991 if (!e
|| (e
->opcode
!= TID_OP(READ_REQ
) &&
1992 e
->opcode
!= TID_OP(WRITE_REQ
)))
1995 req
= ack_to_tid_req(e
);
1996 req
->r_flow_psn
= psn
;
1997 trace_hfi1_tid_req_rcv_err(qp
, 0, e
->opcode
, e
->psn
, e
->lpsn
, req
);
1998 if (e
->opcode
== TID_OP(READ_REQ
)) {
1999 struct ib_reth
*reth
;
2006 reth
= &ohdr
->u
.tid_rdma
.r_req
.reth
;
2008 * The requester always restarts from the start of the original
2011 len
= be32_to_cpu(reth
->length
);
2012 if (psn
!= e
->psn
|| len
!= req
->total_len
)
2015 release_rdma_sge_mr(e
);
2017 rkey
= be32_to_cpu(reth
->rkey
);
2018 vaddr
= get_ib_reth_vaddr(reth
);
2021 ok
= rvt_rkey_ok(qp
, &e
->rdma_sge
, len
, vaddr
, rkey
,
2022 IB_ACCESS_REMOTE_READ
);
2027 * If all the response packets for the current request have
2028 * been sent out and this request is complete (old_request
2029 * == false) and the TID flow may be unusable (the
2030 * req->clear_tail is advanced). However, when an earlier
2031 * request is received, this request will not be complete any
2032 * more (qp->s_tail_ack_queue is moved back, see below).
2033 * Consequently, we need to update the TID flow info everytime
2034 * a duplicate request is received.
2036 bth0
= be32_to_cpu(ohdr
->bth
[0]);
2037 if (tid_rdma_rcv_read_request(qp
, e
, packet
, ohdr
, bth0
, psn
,
2042 * True if the request is already scheduled (between
2043 * qp->s_tail_ack_queue and qp->r_head_ack_queue);
2048 struct flow_state
*fstate
;
2049 bool schedule
= false;
2052 if (req
->state
== TID_REQUEST_RESEND
) {
2053 req
->state
= TID_REQUEST_RESEND_ACTIVE
;
2054 } else if (req
->state
== TID_REQUEST_INIT_RESEND
) {
2055 req
->state
= TID_REQUEST_INIT
;
2060 * True if the request is already scheduled (between
2061 * qp->s_tail_ack_queue and qp->r_head_ack_queue).
2062 * Also, don't change requests, which are at the SYNC
2063 * point and haven't generated any responses yet.
2064 * There is nothing to retransmit for them yet.
2066 if (old_req
|| req
->state
== TID_REQUEST_INIT
||
2067 (req
->state
== TID_REQUEST_SYNC
&& !req
->cur_seg
)) {
2068 for (i
= prev
+ 1; ; i
++) {
2069 if (i
> rvt_size_atomic(&dev
->rdi
))
2071 if (i
== qp
->r_head_ack_queue
)
2073 e
= &qp
->s_ack_queue
[i
];
2074 req
= ack_to_tid_req(e
);
2075 if (e
->opcode
== TID_OP(WRITE_REQ
) &&
2076 req
->state
== TID_REQUEST_INIT
)
2077 req
->state
= TID_REQUEST_INIT_RESEND
;
2080 * If the state of the request has been changed,
2081 * the first leg needs to get scheduled in order to
2082 * pick up the change. Otherwise, normal response
2083 * processing should take care of it.
2090 * If there is no more allocated segment, just schedule the qp
2091 * without changing any state.
2093 if (req
->clear_tail
== req
->setup_head
)
2096 * If this request has sent responses for segments, which have
2097 * not received data yet (flow_idx != clear_tail), the flow_idx
2098 * pointer needs to be adjusted so the same responses can be
2101 if (CIRC_CNT(req
->flow_idx
, req
->clear_tail
, MAX_FLOWS
)) {
2102 fstate
= &req
->flows
[req
->clear_tail
].flow_state
;
2103 qpriv
->pending_tid_w_segs
-=
2104 CIRC_CNT(req
->flow_idx
, req
->clear_tail
,
2107 CIRC_ADD(req
->clear_tail
,
2108 delta_psn(psn
, fstate
->resp_ib_psn
),
2110 qpriv
->pending_tid_w_segs
+=
2111 delta_psn(psn
, fstate
->resp_ib_psn
);
2113 * When flow_idx == setup_head, we've gotten a duplicate
2114 * request for a segment, which has not been allocated
2115 * yet. In that case, don't adjust this request.
2116 * However, we still want to go through the loop below
2117 * to adjust all subsequent requests.
2119 if (CIRC_CNT(req
->setup_head
, req
->flow_idx
,
2121 req
->cur_seg
= delta_psn(psn
, e
->psn
);
2122 req
->state
= TID_REQUEST_RESEND_ACTIVE
;
2126 for (i
= prev
+ 1; ; i
++) {
2128 * Look at everything up to and including
2131 if (i
> rvt_size_atomic(&dev
->rdi
))
2133 if (i
== qp
->r_head_ack_queue
)
2135 e
= &qp
->s_ack_queue
[i
];
2136 req
= ack_to_tid_req(e
);
2137 trace_hfi1_tid_req_rcv_err(qp
, 0, e
->opcode
, e
->psn
,
2139 if (e
->opcode
!= TID_OP(WRITE_REQ
) ||
2140 req
->cur_seg
== req
->comp_seg
||
2141 req
->state
== TID_REQUEST_INIT
||
2142 req
->state
== TID_REQUEST_INIT_RESEND
) {
2143 if (req
->state
== TID_REQUEST_INIT
)
2144 req
->state
= TID_REQUEST_INIT_RESEND
;
2147 qpriv
->pending_tid_w_segs
-=
2148 CIRC_CNT(req
->flow_idx
,
2151 req
->flow_idx
= req
->clear_tail
;
2152 req
->state
= TID_REQUEST_RESEND
;
2153 req
->cur_seg
= req
->comp_seg
;
2155 qpriv
->s_flags
&= ~HFI1_R_TID_WAIT_INTERLCK
;
2157 /* Re-process old requests.*/
2158 if (qp
->s_acked_ack_queue
== qp
->s_tail_ack_queue
)
2159 qp
->s_acked_ack_queue
= prev
;
2160 qp
->s_tail_ack_queue
= prev
;
2162 * Since the qp->s_tail_ack_queue is modified, the
2163 * qp->s_ack_state must be changed to re-initialize
2164 * qp->s_ack_rdma_sge; Otherwise, we will end up in
2165 * wrong memory region.
2167 qp
->s_ack_state
= OP(ACKNOWLEDGE
);
2170 * It's possible to receive a retry psn that is earlier than an RNRNAK
2171 * psn. In this case, the rnrnak state should be cleared.
2173 if (qpriv
->rnr_nak_state
) {
2174 qp
->s_nak_state
= 0;
2175 qpriv
->rnr_nak_state
= TID_RNR_NAK_INIT
;
2176 qp
->r_psn
= e
->lpsn
+ 1;
2177 hfi1_tid_write_alloc_resources(qp
, true);
2180 qp
->r_state
= e
->opcode
;
2181 qp
->r_nak_state
= 0;
2182 qp
->s_flags
|= RVT_S_RESP_PENDING
;
2183 hfi1_schedule_send(qp
);
2185 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
2190 void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet
*packet
)
2192 /* HANDLER FOR TID RDMA READ REQUEST packet (Responder side)*/
2195 * 1. Verify TID RDMA READ REQ as per IB_OPCODE_RC_RDMA_READ
2196 * (see hfi1_rc_rcv())
2197 * 2. Put TID RDMA READ REQ into the response queueu (s_ack_queue)
2198 * - Setup struct tid_rdma_req with request info
2199 * - Initialize struct tid_rdma_flow info;
2200 * - Copy TID entries;
2201 * 3. Set the qp->s_ack_state.
2202 * 4. Set RVT_S_RESP_PENDING in s_flags.
2203 * 5. Kick the send engine (hfi1_schedule_send())
2205 struct hfi1_ctxtdata
*rcd
= packet
->rcd
;
2206 struct rvt_qp
*qp
= packet
->qp
;
2207 struct hfi1_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
2208 struct ib_other_headers
*ohdr
= packet
->ohdr
;
2209 struct rvt_ack_entry
*e
;
2210 unsigned long flags
;
2211 struct ib_reth
*reth
;
2212 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
2213 u32 bth0
, psn
, len
, rkey
;
2218 u8 nack_state
= IB_NAK_INVALID_REQUEST
;
2220 bth0
= be32_to_cpu(ohdr
->bth
[0]);
2221 if (hfi1_ruc_check_hdr(ibp
, packet
))
2224 fecn
= process_ecn(qp
, packet
);
2225 psn
= mask_psn(be32_to_cpu(ohdr
->bth
[2]));
2226 trace_hfi1_rsp_rcv_tid_read_req(qp
, psn
);
2228 if (qp
->state
== IB_QPS_RTR
&& !(qp
->r_flags
& RVT_R_COMM_EST
))
2231 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_READ
)))
2234 reth
= &ohdr
->u
.tid_rdma
.r_req
.reth
;
2235 vaddr
= be64_to_cpu(reth
->vaddr
);
2236 len
= be32_to_cpu(reth
->length
);
2237 /* The length needs to be in multiples of PAGE_SIZE */
2238 if (!len
|| len
& ~PAGE_MASK
|| len
> qpriv
->tid_rdma
.local
.max_len
)
2241 diff
= delta_psn(psn
, qp
->r_psn
);
2242 if (unlikely(diff
)) {
2243 tid_rdma_rcv_err(packet
, ohdr
, qp
, psn
, diff
, fecn
);
2247 /* We've verified the request, insert it into the ack queue. */
2248 next
= qp
->r_head_ack_queue
+ 1;
2249 if (next
> rvt_size_atomic(ib_to_rvt(qp
->ibqp
.device
)))
2251 spin_lock_irqsave(&qp
->s_lock
, flags
);
2252 if (unlikely(next
== qp
->s_tail_ack_queue
)) {
2253 if (!qp
->s_ack_queue
[next
].sent
) {
2254 nack_state
= IB_NAK_REMOTE_OPERATIONAL_ERROR
;
2255 goto nack_inv_unlock
;
2257 update_ack_queue(qp
, next
);
2259 e
= &qp
->s_ack_queue
[qp
->r_head_ack_queue
];
2260 release_rdma_sge_mr(e
);
2262 rkey
= be32_to_cpu(reth
->rkey
);
2265 if (unlikely(!rvt_rkey_ok(qp
, &e
->rdma_sge
, qp
->r_len
, vaddr
,
2266 rkey
, IB_ACCESS_REMOTE_READ
)))
2269 /* Accept the request parameters */
2270 if (tid_rdma_rcv_read_request(qp
, e
, packet
, ohdr
, bth0
, psn
, vaddr
,
2272 goto nack_inv_unlock
;
2274 qp
->r_state
= e
->opcode
;
2275 qp
->r_nak_state
= 0;
2277 * We need to increment the MSN here instead of when we
2278 * finish sending the result since a duplicate request would
2279 * increment it more than once.
2282 qp
->r_psn
+= e
->lpsn
- e
->psn
+ 1;
2284 qp
->r_head_ack_queue
= next
;
2287 * For all requests other than TID WRITE which are added to the ack
2288 * queue, qpriv->r_tid_alloc follows qp->r_head_ack_queue. It is ok to
2289 * do this because of interlocks between these and TID WRITE
2290 * requests. The same change has also been made in hfi1_rc_rcv().
2292 qpriv
->r_tid_alloc
= qp
->r_head_ack_queue
;
2294 /* Schedule the send tasklet. */
2295 qp
->s_flags
|= RVT_S_RESP_PENDING
;
2297 qp
->s_flags
|= RVT_S_ECN
;
2298 hfi1_schedule_send(qp
);
2300 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
2304 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
2306 rvt_rc_error(qp
, IB_WC_LOC_QP_OP_ERR
);
2307 qp
->r_nak_state
= nack_state
;
2308 qp
->r_ack_psn
= qp
->r_psn
;
2309 /* Queue NAK for later */
2310 rc_defered_ack(rcd
, qp
);
2313 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
2314 rvt_rc_error(qp
, IB_WC_LOC_PROT_ERR
);
2315 qp
->r_nak_state
= IB_NAK_REMOTE_ACCESS_ERROR
;
2316 qp
->r_ack_psn
= qp
->r_psn
;
2319 u32
hfi1_build_tid_rdma_read_resp(struct rvt_qp
*qp
, struct rvt_ack_entry
*e
,
2320 struct ib_other_headers
*ohdr
, u32
*bth0
,
2321 u32
*bth1
, u32
*bth2
, u32
*len
, bool *last
)
2323 struct hfi1_ack_priv
*epriv
= e
->priv
;
2324 struct tid_rdma_request
*req
= &epriv
->tid_req
;
2325 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
2326 struct tid_rdma_flow
*flow
= &req
->flows
[req
->clear_tail
];
2327 u32 tidentry
= flow
->tid_entry
[flow
->tid_idx
];
2328 u32 tidlen
= EXP_TID_GET(tidentry
, LEN
) << PAGE_SHIFT
;
2329 struct tid_rdma_read_resp
*resp
= &ohdr
->u
.tid_rdma
.r_rsp
;
2330 u32 next_offset
, om
= KDETH_OM_LARGE
;
2333 struct tid_rdma_params
*remote
;
2335 *len
= min_t(u32
, qp
->pmtu
, tidlen
- flow
->tid_offset
);
2337 next_offset
= flow
->tid_offset
+ *len
;
2338 last_pkt
= (flow
->sent
>= flow
->length
);
2340 trace_hfi1_tid_entry_build_read_resp(qp
, flow
->tid_idx
, tidentry
);
2341 trace_hfi1_tid_flow_build_read_resp(qp
, req
->clear_tail
, flow
);
2344 remote
= rcu_dereference(qpriv
->tid_rdma
.remote
);
2349 KDETH_RESET(resp
->kdeth0
, KVER
, 0x1);
2350 KDETH_SET(resp
->kdeth0
, SH
, !last_pkt
);
2351 KDETH_SET(resp
->kdeth0
, INTR
, !!(!last_pkt
&& remote
->urg
));
2352 KDETH_SET(resp
->kdeth0
, TIDCTRL
, EXP_TID_GET(tidentry
, CTRL
));
2353 KDETH_SET(resp
->kdeth0
, TID
, EXP_TID_GET(tidentry
, IDX
));
2354 KDETH_SET(resp
->kdeth0
, OM
, om
== KDETH_OM_LARGE
);
2355 KDETH_SET(resp
->kdeth0
, OFFSET
, flow
->tid_offset
/ om
);
2356 KDETH_RESET(resp
->kdeth1
, JKEY
, remote
->jkey
);
2357 resp
->verbs_qp
= cpu_to_be32(qp
->remote_qpn
);
2360 resp
->aeth
= rvt_compute_aeth(qp
);
2361 resp
->verbs_psn
= cpu_to_be32(mask_psn(flow
->flow_state
.ib_spsn
+
2364 *bth0
= TID_OP(READ_RESP
) << 24;
2365 *bth1
= flow
->tid_qpn
;
2366 *bth2
= mask_psn(((flow
->flow_state
.spsn
+ flow
->pkt
++) &
2367 HFI1_KDETH_BTH_SEQ_MASK
) |
2368 (flow
->flow_state
.generation
<<
2369 HFI1_KDETH_BTH_SEQ_SHIFT
));
2372 /* Advance to next flow */
2373 req
->clear_tail
= (req
->clear_tail
+ 1) &
2376 if (next_offset
>= tidlen
) {
2377 flow
->tid_offset
= 0;
2380 flow
->tid_offset
= next_offset
;
2383 hdwords
= sizeof(ohdr
->u
.tid_rdma
.r_rsp
) / sizeof(u32
);
2389 static inline struct tid_rdma_request
*
2390 find_tid_request(struct rvt_qp
*qp
, u32 psn
, enum ib_wr_opcode opcode
)
2391 __must_hold(&qp
->s_lock
)
2393 struct rvt_swqe
*wqe
;
2394 struct tid_rdma_request
*req
= NULL
;
2397 end
= qp
->s_cur
+ 1;
2398 if (end
== qp
->s_size
)
2400 for (i
= qp
->s_acked
; i
!= end
;) {
2401 wqe
= rvt_get_swqe_ptr(qp
, i
);
2402 if (cmp_psn(psn
, wqe
->psn
) >= 0 &&
2403 cmp_psn(psn
, wqe
->lpsn
) <= 0) {
2404 if (wqe
->wr
.opcode
== opcode
)
2405 req
= wqe_to_tid_req(wqe
);
2408 if (++i
== qp
->s_size
)
2415 void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet
*packet
)
2417 /* HANDLER FOR TID RDMA READ RESPONSE packet (Requestor side */
2420 * 1. Find matching SWQE
2421 * 2. Check that the entire segment has been read.
2422 * 3. Remove HFI1_S_WAIT_TID_RESP from s_flags.
2423 * 4. Free the TID flow resources.
2424 * 5. Kick the send engine (hfi1_schedule_send())
2426 struct ib_other_headers
*ohdr
= packet
->ohdr
;
2427 struct rvt_qp
*qp
= packet
->qp
;
2428 struct hfi1_qp_priv
*priv
= qp
->priv
;
2429 struct hfi1_ctxtdata
*rcd
= packet
->rcd
;
2430 struct tid_rdma_request
*req
;
2431 struct tid_rdma_flow
*flow
;
2434 unsigned long flags
;
2437 trace_hfi1_sender_rcv_tid_read_resp(qp
);
2438 fecn
= process_ecn(qp
, packet
);
2439 kpsn
= mask_psn(be32_to_cpu(ohdr
->bth
[2]));
2440 aeth
= be32_to_cpu(ohdr
->u
.tid_rdma
.r_rsp
.aeth
);
2441 opcode
= (be32_to_cpu(ohdr
->bth
[0]) >> 24) & 0xff;
2443 spin_lock_irqsave(&qp
->s_lock
, flags
);
2444 ipsn
= mask_psn(be32_to_cpu(ohdr
->u
.tid_rdma
.r_rsp
.verbs_psn
));
2445 req
= find_tid_request(qp
, ipsn
, IB_WR_TID_RDMA_READ
);
2449 flow
= &req
->flows
[req
->clear_tail
];
2450 /* When header suppression is disabled */
2451 if (cmp_psn(ipsn
, flow
->flow_state
.ib_lpsn
)) {
2452 update_r_next_psn_fecn(packet
, priv
, rcd
, flow
, fecn
);
2454 if (cmp_psn(kpsn
, flow
->flow_state
.r_next_psn
))
2456 flow
->flow_state
.r_next_psn
= mask_psn(kpsn
+ 1);
2458 * Copy the payload to destination buffer if this packet is
2459 * delivered as an eager packet due to RSM rule and FECN.
2460 * The RSM rule selects FECN bit in BTH and SH bit in
2461 * KDETH header and therefore will not match the last
2462 * packet of each segment that has SH bit cleared.
2464 if (fecn
&& packet
->etype
== RHF_RCV_TYPE_EAGER
) {
2465 struct rvt_sge_state ss
;
2467 u32 tlen
= packet
->tlen
;
2468 u16 hdrsize
= packet
->hlen
;
2469 u8 pad
= packet
->pad
;
2470 u8 extra_bytes
= pad
+ packet
->extra_byte
+
2472 u32 pmtu
= qp
->pmtu
;
2474 if (unlikely(tlen
!= (hdrsize
+ pmtu
+ extra_bytes
)))
2476 len
= restart_sge(&ss
, req
->e
.swqe
, ipsn
, pmtu
);
2477 if (unlikely(len
< pmtu
))
2479 rvt_copy_sge(qp
, &ss
, packet
->payload
, pmtu
, false,
2481 /* Raise the sw sequence check flag for next packet */
2482 priv
->s_flags
|= HFI1_R_TID_SW_PSN
;
2487 flow
->flow_state
.r_next_psn
= mask_psn(kpsn
+ 1);
2489 priv
->pending_tid_r_segs
--;
2490 qp
->s_num_rd_atomic
--;
2491 if ((qp
->s_flags
& RVT_S_WAIT_FENCE
) &&
2492 !qp
->s_num_rd_atomic
) {
2493 qp
->s_flags
&= ~(RVT_S_WAIT_FENCE
|
2495 hfi1_schedule_send(qp
);
2497 if (qp
->s_flags
& RVT_S_WAIT_RDMAR
) {
2498 qp
->s_flags
&= ~(RVT_S_WAIT_RDMAR
| RVT_S_WAIT_ACK
);
2499 hfi1_schedule_send(qp
);
2502 trace_hfi1_ack(qp
, ipsn
);
2503 trace_hfi1_tid_req_rcv_read_resp(qp
, 0, req
->e
.swqe
->wr
.opcode
,
2504 req
->e
.swqe
->psn
, req
->e
.swqe
->lpsn
,
2506 trace_hfi1_tid_flow_rcv_read_resp(qp
, req
->clear_tail
, flow
);
2508 /* Release the tid resources */
2509 hfi1_kern_exp_rcv_clear(req
);
2511 if (!do_rc_ack(qp
, aeth
, ipsn
, opcode
, 0, rcd
))
2514 /* If not done yet, build next read request */
2515 if (++req
->comp_seg
>= req
->total_segs
) {
2517 req
->state
= TID_REQUEST_COMPLETE
;
2521 * Clear the hw flow under two conditions:
2522 * 1. This request is a sync point and it is complete;
2523 * 2. Current request is completed and there are no more requests.
2525 if ((req
->state
== TID_REQUEST_SYNC
&&
2526 req
->comp_seg
== req
->cur_seg
) ||
2527 priv
->tid_r_comp
== priv
->tid_r_reqs
) {
2528 hfi1_kern_clear_hw_flow(priv
->rcd
, qp
);
2529 priv
->s_flags
&= ~HFI1_R_TID_SW_PSN
;
2530 if (req
->state
== TID_REQUEST_SYNC
)
2531 req
->state
= TID_REQUEST_ACTIVE
;
2534 hfi1_schedule_send(qp
);
2539 * The test indicates that the send engine has finished its cleanup
2540 * after sending the request and it's now safe to put the QP into error
2541 * state. However, if the wqe queue is empty (qp->s_acked == qp->s_tail
2542 * == qp->s_head), it would be unsafe to complete the wqe pointed by
2543 * qp->s_acked here. Putting the qp into error state will safely flush
2544 * all remaining requests.
2546 if (qp
->s_last
== qp
->s_acked
)
2547 rvt_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
2550 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
2553 void hfi1_kern_read_tid_flow_free(struct rvt_qp
*qp
)
2554 __must_hold(&qp
->s_lock
)
2556 u32 n
= qp
->s_acked
;
2557 struct rvt_swqe
*wqe
;
2558 struct tid_rdma_request
*req
;
2559 struct hfi1_qp_priv
*priv
= qp
->priv
;
2561 lockdep_assert_held(&qp
->s_lock
);
2562 /* Free any TID entries */
2563 while (n
!= qp
->s_tail
) {
2564 wqe
= rvt_get_swqe_ptr(qp
, n
);
2565 if (wqe
->wr
.opcode
== IB_WR_TID_RDMA_READ
) {
2566 req
= wqe_to_tid_req(wqe
);
2567 hfi1_kern_exp_rcv_clear_all(req
);
2570 if (++n
== qp
->s_size
)
2574 hfi1_kern_clear_hw_flow(priv
->rcd
, qp
);
2577 static bool tid_rdma_tid_err(struct hfi1_ctxtdata
*rcd
,
2578 struct hfi1_packet
*packet
, u8 rcv_type
,
2581 struct rvt_qp
*qp
= packet
->qp
;
2582 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
2584 struct ib_other_headers
*ohdr
= packet
->ohdr
;
2585 struct rvt_ack_entry
*e
;
2586 struct tid_rdma_request
*req
;
2587 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
2590 if (rcv_type
>= RHF_RCV_TYPE_IB
)
2593 spin_lock(&qp
->s_lock
);
2596 * We've ran out of space in the eager buffer.
2597 * Eagerly received KDETH packets which require space in the
2598 * Eager buffer (packet that have payload) are TID RDMA WRITE
2599 * response packets. In this case, we have to re-transmit the
2600 * TID RDMA WRITE request.
2602 if (rcv_type
== RHF_RCV_TYPE_EAGER
) {
2603 hfi1_restart_rc(qp
, qp
->s_last_psn
+ 1, 1);
2604 hfi1_schedule_send(qp
);
2609 * For TID READ response, error out QP after freeing the tid
2612 if (opcode
== TID_OP(READ_RESP
)) {
2613 ipsn
= mask_psn(be32_to_cpu(ohdr
->u
.tid_rdma
.r_rsp
.verbs_psn
));
2614 if (cmp_psn(ipsn
, qp
->s_last_psn
) > 0 &&
2615 cmp_psn(ipsn
, qp
->s_psn
) < 0) {
2616 hfi1_kern_read_tid_flow_free(qp
);
2617 spin_unlock(&qp
->s_lock
);
2618 rvt_rc_error(qp
, IB_WC_LOC_QP_OP_ERR
);
2625 * Error out the qp for TID RDMA WRITE
2627 hfi1_kern_clear_hw_flow(qpriv
->rcd
, qp
);
2628 for (i
= 0; i
< rvt_max_atomic(rdi
); i
++) {
2629 e
= &qp
->s_ack_queue
[i
];
2630 if (e
->opcode
== TID_OP(WRITE_REQ
)) {
2631 req
= ack_to_tid_req(e
);
2632 hfi1_kern_exp_rcv_clear_all(req
);
2635 spin_unlock(&qp
->s_lock
);
2636 rvt_rc_error(qp
, IB_WC_LOC_LEN_ERR
);
2640 spin_unlock(&qp
->s_lock
);
2645 static void restart_tid_rdma_read_req(struct hfi1_ctxtdata
*rcd
,
2646 struct rvt_qp
*qp
, struct rvt_swqe
*wqe
)
2648 struct tid_rdma_request
*req
;
2649 struct tid_rdma_flow
*flow
;
2651 /* Start from the right segment */
2652 qp
->r_flags
|= RVT_R_RDMAR_SEQ
;
2653 req
= wqe_to_tid_req(wqe
);
2654 flow
= &req
->flows
[req
->clear_tail
];
2655 hfi1_restart_rc(qp
, flow
->flow_state
.ib_spsn
, 0);
2656 if (list_empty(&qp
->rspwait
)) {
2657 qp
->r_flags
|= RVT_R_RSP_SEND
;
2659 list_add_tail(&qp
->rspwait
, &rcd
->qp_wait_list
);
2664 * Handle the KDETH eflags for TID RDMA READ response.
2666 * Return true if the last packet for a segment has been received and it is
2667 * time to process the response normally; otherwise, return true.
2669 * The caller must hold the packet->qp->r_lock and the rcu_read_lock.
2671 static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata
*rcd
,
2672 struct hfi1_packet
*packet
, u8 rcv_type
,
2673 u8 rte
, u32 psn
, u32 ibpsn
)
2674 __must_hold(&packet
->qp
->r_lock
) __must_hold(RCU
)
2676 struct hfi1_pportdata
*ppd
= rcd
->ppd
;
2677 struct hfi1_devdata
*dd
= ppd
->dd
;
2678 struct hfi1_ibport
*ibp
;
2679 struct rvt_swqe
*wqe
;
2680 struct tid_rdma_request
*req
;
2681 struct tid_rdma_flow
*flow
;
2683 struct rvt_qp
*qp
= packet
->qp
;
2684 struct hfi1_qp_priv
*priv
= qp
->priv
;
2689 lockdep_assert_held(&qp
->r_lock
);
2690 spin_lock(&qp
->s_lock
);
2691 /* If the psn is out of valid range, drop the packet */
2692 if (cmp_psn(ibpsn
, qp
->s_last_psn
) < 0 ||
2693 cmp_psn(ibpsn
, qp
->s_psn
) > 0)
2697 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
2698 * requests and implicitly NAK RDMA read and atomic requests issued
2699 * before the NAK'ed request.
2701 ack_psn
= ibpsn
- 1;
2702 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_acked
);
2703 ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
2705 /* Complete WQEs that the PSN finishes. */
2706 while ((int)delta_psn(ack_psn
, wqe
->lpsn
) >= 0) {
2708 * If this request is a RDMA read or atomic, and the NACK is
2709 * for a later operation, this NACK NAKs the RDMA read or
2712 if (wqe
->wr
.opcode
== IB_WR_RDMA_READ
||
2713 wqe
->wr
.opcode
== IB_WR_TID_RDMA_READ
||
2714 wqe
->wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
||
2715 wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
) {
2716 /* Retry this request. */
2717 if (!(qp
->r_flags
& RVT_R_RDMAR_SEQ
)) {
2718 qp
->r_flags
|= RVT_R_RDMAR_SEQ
;
2719 if (wqe
->wr
.opcode
== IB_WR_TID_RDMA_READ
) {
2720 restart_tid_rdma_read_req(rcd
, qp
,
2723 hfi1_restart_rc(qp
, qp
->s_last_psn
+ 1,
2725 if (list_empty(&qp
->rspwait
)) {
2726 qp
->r_flags
|= RVT_R_RSP_SEND
;
2728 list_add_tail(/* wait */
2730 &rcd
->qp_wait_list
);
2735 * No need to process the NAK since we are
2736 * restarting an earlier request.
2741 wqe
= do_rc_completion(qp
, wqe
, ibp
);
2742 if (qp
->s_acked
== qp
->s_tail
)
2746 /* Handle the eflags for the request */
2747 if (wqe
->wr
.opcode
!= IB_WR_TID_RDMA_READ
)
2750 req
= wqe_to_tid_req(wqe
);
2752 case RHF_RCV_TYPE_EXPECTED
:
2754 case RHF_RTE_EXPECTED_FLOW_SEQ_ERR
:
2756 * On the first occurrence of a Flow Sequence error,
2757 * the flag TID_FLOW_SW_PSN is set.
2759 * After that, the flow is *not* reprogrammed and the
2760 * protocol falls back to SW PSN checking. This is done
2761 * to prevent continuous Flow Sequence errors for any
2762 * packets that could be still in the fabric.
2764 flow
= &req
->flows
[req
->clear_tail
];
2765 if (priv
->s_flags
& HFI1_R_TID_SW_PSN
) {
2767 flow
->flow_state
.r_next_psn
);
2769 if (!(qp
->r_flags
& RVT_R_RDMAR_SEQ
))
2770 restart_tid_rdma_read_req(rcd
,
2774 /* Drop the packet.*/
2776 } else if (diff
< 0) {
2778 * If a response packet for a restarted
2779 * request has come back, reset the
2782 if (qp
->r_flags
& RVT_R_RDMAR_SEQ
)
2786 /* Drop the packet.*/
2791 * If SW PSN verification is successful and
2792 * this is the last packet in the segment, tell
2793 * the caller to process it as a normal packet.
2795 fpsn
= full_flow_psn(flow
,
2796 flow
->flow_state
.lpsn
);
2797 if (cmp_psn(fpsn
, psn
) == 0) {
2799 if (qp
->r_flags
& RVT_R_RDMAR_SEQ
)
2803 flow
->flow_state
.r_next_psn
=
2808 last_psn
= read_r_next_psn(dd
, rcd
->ctxt
,
2810 flow
->flow_state
.r_next_psn
= last_psn
;
2811 priv
->s_flags
|= HFI1_R_TID_SW_PSN
;
2813 * If no request has been restarted yet,
2814 * restart the current one.
2816 if (!(qp
->r_flags
& RVT_R_RDMAR_SEQ
))
2817 restart_tid_rdma_read_req(rcd
, qp
,
2823 case RHF_RTE_EXPECTED_FLOW_GEN_ERR
:
2825 * Since the TID flow is able to ride through
2826 * generation mismatch, drop this stale packet.
2835 case RHF_RCV_TYPE_ERROR
:
2837 case RHF_RTE_ERROR_OP_CODE_ERR
:
2838 case RHF_RTE_ERROR_KHDR_MIN_LEN_ERR
:
2839 case RHF_RTE_ERROR_KHDR_HCRC_ERR
:
2840 case RHF_RTE_ERROR_KHDR_KVER_ERR
:
2841 case RHF_RTE_ERROR_CONTEXT_ERR
:
2842 case RHF_RTE_ERROR_KHDR_TID_ERR
:
2850 spin_unlock(&qp
->s_lock
);
2854 bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata
*rcd
,
2855 struct hfi1_pportdata
*ppd
,
2856 struct hfi1_packet
*packet
)
2858 struct hfi1_ibport
*ibp
= &ppd
->ibport_data
;
2859 struct hfi1_devdata
*dd
= ppd
->dd
;
2860 struct rvt_dev_info
*rdi
= &dd
->verbs_dev
.rdi
;
2861 u8 rcv_type
= rhf_rcv_type(packet
->rhf
);
2862 u8 rte
= rhf_rcv_type_err(packet
->rhf
);
2863 struct ib_header
*hdr
= packet
->hdr
;
2864 struct ib_other_headers
*ohdr
= NULL
;
2865 int lnh
= be16_to_cpu(hdr
->lrh
[0]) & 3;
2866 u16 lid
= be16_to_cpu(hdr
->lrh
[1]);
2868 u32 qp_num
, psn
, ibpsn
;
2870 struct hfi1_qp_priv
*qpriv
;
2871 unsigned long flags
;
2873 struct rvt_ack_entry
*e
;
2874 struct tid_rdma_request
*req
;
2875 struct tid_rdma_flow
*flow
;
2878 trace_hfi1_msg_handle_kdeth_eflags(NULL
, "Kdeth error: rhf ",
2880 if (packet
->rhf
& RHF_ICRC_ERR
)
2883 packet
->ohdr
= &hdr
->u
.oth
;
2884 ohdr
= packet
->ohdr
;
2885 trace_input_ibhdr(rcd
->dd
, packet
, !!(rhf_dc_info(packet
->rhf
)));
2887 /* Get the destination QP number. */
2888 qp_num
= be32_to_cpu(ohdr
->u
.tid_rdma
.r_rsp
.verbs_qp
) &
2890 if (lid
>= be16_to_cpu(IB_MULTICAST_LID_BASE
))
2893 psn
= mask_psn(be32_to_cpu(ohdr
->bth
[2]));
2894 opcode
= (be32_to_cpu(ohdr
->bth
[0]) >> 24) & 0xff;
2897 qp
= rvt_lookup_qpn(rdi
, &ibp
->rvp
, qp_num
);
2903 /* Check for valid receive state. */
2904 spin_lock_irqsave(&qp
->r_lock
, flags
);
2905 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
)) {
2906 ibp
->rvp
.n_pkt_drops
++;
2910 if (packet
->rhf
& RHF_TID_ERR
) {
2911 /* For TIDERR and RC QPs preemptively schedule a NAK */
2912 u32 tlen
= rhf_pkt_len(packet
->rhf
); /* in bytes */
2914 /* Sanity check packet */
2919 * Check for GRH. We should never get packets with GRH in this
2922 if (lnh
== HFI1_LRH_GRH
)
2925 if (tid_rdma_tid_err(rcd
, packet
, rcv_type
, opcode
))
2929 /* handle TID RDMA READ */
2930 if (opcode
== TID_OP(READ_RESP
)) {
2931 ibpsn
= be32_to_cpu(ohdr
->u
.tid_rdma
.r_rsp
.verbs_psn
);
2932 ibpsn
= mask_psn(ibpsn
);
2933 ret
= handle_read_kdeth_eflags(rcd
, packet
, rcv_type
, rte
, psn
,
2939 * qp->s_tail_ack_queue points to the rvt_ack_entry currently being
2940 * processed. These a completed sequentially so we can be sure that
2941 * the pointer will not change until the entire request has completed.
2943 spin_lock(&qp
->s_lock
);
2945 e
= &qp
->s_ack_queue
[qpriv
->r_tid_tail
];
2946 req
= ack_to_tid_req(e
);
2947 flow
= &req
->flows
[req
->clear_tail
];
2948 trace_hfi1_eflags_err_write(qp
, rcv_type
, rte
, psn
);
2949 trace_hfi1_rsp_handle_kdeth_eflags(qp
, psn
);
2950 trace_hfi1_tid_write_rsp_handle_kdeth_eflags(qp
);
2951 trace_hfi1_tid_req_handle_kdeth_eflags(qp
, 0, e
->opcode
, e
->psn
,
2953 trace_hfi1_tid_flow_handle_kdeth_eflags(qp
, req
->clear_tail
, flow
);
2956 case RHF_RCV_TYPE_EXPECTED
:
2958 case RHF_RTE_EXPECTED_FLOW_SEQ_ERR
:
2959 if (!(qpriv
->s_flags
& HFI1_R_TID_SW_PSN
)) {
2960 qpriv
->s_flags
|= HFI1_R_TID_SW_PSN
;
2961 flow
->flow_state
.r_next_psn
=
2962 read_r_next_psn(dd
, rcd
->ctxt
,
2964 qpriv
->r_next_psn_kdeth
=
2965 flow
->flow_state
.r_next_psn
;
2969 * If the received PSN does not match the next
2970 * expected PSN, NAK the packet.
2971 * However, only do that if we know that the a
2972 * NAK has already been sent. Otherwise, this
2973 * mismatch could be due to packets that were
2974 * already in flight.
2977 flow
->flow_state
.r_next_psn
);
2983 qpriv
->s_nak_state
= 0;
2985 * If SW PSN verification is successful and this
2986 * is the last packet in the segment, tell the
2987 * caller to process it as a normal packet.
2989 if (psn
== full_flow_psn(flow
,
2990 flow
->flow_state
.lpsn
))
2992 flow
->flow_state
.r_next_psn
=
2994 qpriv
->r_next_psn_kdeth
=
2995 flow
->flow_state
.r_next_psn
;
2999 case RHF_RTE_EXPECTED_FLOW_GEN_ERR
:
3007 case RHF_RCV_TYPE_ERROR
:
3009 case RHF_RTE_ERROR_OP_CODE_ERR
:
3010 case RHF_RTE_ERROR_KHDR_MIN_LEN_ERR
:
3011 case RHF_RTE_ERROR_KHDR_HCRC_ERR
:
3012 case RHF_RTE_ERROR_KHDR_KVER_ERR
:
3013 case RHF_RTE_ERROR_CONTEXT_ERR
:
3014 case RHF_RTE_ERROR_KHDR_TID_ERR
:
3023 spin_unlock(&qp
->s_lock
);
3025 spin_unlock_irqrestore(&qp
->r_lock
, flags
);
3031 ibp
->rvp
.n_rc_seqnak
++;
3032 if (!qpriv
->s_nak_state
) {
3033 qpriv
->s_nak_state
= IB_NAK_PSN_ERROR
;
3034 /* We are NAK'ing the next expected PSN */
3035 qpriv
->s_nak_psn
= mask_psn(flow
->flow_state
.r_next_psn
);
3036 qpriv
->s_flags
|= RVT_S_ACK_PENDING
;
3037 if (qpriv
->r_tid_ack
== HFI1_QP_WQE_INVALID
)
3038 qpriv
->r_tid_ack
= qpriv
->r_tid_tail
;
3039 hfi1_schedule_tid_send(qp
);
3045 * "Rewind" the TID request information.
3046 * This means that we reset the state back to ACTIVE,
3047 * find the proper flow, set the flow index to that flow,
3048 * and reset the flow information.
3050 void hfi1_tid_rdma_restart_req(struct rvt_qp
*qp
, struct rvt_swqe
*wqe
,
3053 struct tid_rdma_request
*req
= wqe_to_tid_req(wqe
);
3054 struct tid_rdma_flow
*flow
;
3055 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
3056 int diff
, delta_pkts
;
3060 if (wqe
->wr
.opcode
== IB_WR_TID_RDMA_READ
) {
3061 *bth2
= mask_psn(qp
->s_psn
);
3062 flow
= find_flow_ib(req
, *bth2
, &fidx
);
3064 trace_hfi1_msg_tid_restart_req(/* msg */
3065 qp
, "!!!!!! Could not find flow to restart: bth2 ",
3067 trace_hfi1_tid_req_restart_req(qp
, 0, wqe
->wr
.opcode
,
3068 wqe
->psn
, wqe
->lpsn
,
3073 fidx
= req
->acked_tail
;
3074 flow
= &req
->flows
[fidx
];
3075 *bth2
= mask_psn(req
->r_ack_psn
);
3078 if (wqe
->wr
.opcode
== IB_WR_TID_RDMA_READ
)
3079 delta_pkts
= delta_psn(*bth2
, flow
->flow_state
.ib_spsn
);
3081 delta_pkts
= delta_psn(*bth2
,
3083 flow
->flow_state
.spsn
));
3085 trace_hfi1_tid_flow_restart_req(qp
, fidx
, flow
);
3086 diff
= delta_pkts
+ flow
->resync_npkts
;
3091 flow
->tid_offset
= 0;
3093 for (tididx
= 0; tididx
< flow
->tidcnt
; tididx
++) {
3094 u32 tidentry
= flow
->tid_entry
[tididx
], tidlen
,
3097 flow
->tid_offset
= 0;
3098 tidlen
= EXP_TID_GET(tidentry
, LEN
) * PAGE_SIZE
;
3099 tidnpkts
= rvt_div_round_up_mtu(qp
, tidlen
);
3100 npkts
= min_t(u32
, diff
, tidnpkts
);
3102 flow
->sent
+= (npkts
== tidnpkts
? tidlen
:
3104 flow
->tid_offset
+= npkts
* qp
->pmtu
;
3110 if (wqe
->wr
.opcode
== IB_WR_TID_RDMA_WRITE
) {
3111 rvt_skip_sge(&qpriv
->tid_ss
, (req
->cur_seg
* req
->seg_len
) +
3114 * Packet PSN is based on flow_state.spsn + flow->pkt. However,
3115 * during a RESYNC, the generation is incremented and the
3116 * sequence is reset to 0. Since we've adjusted the npkts in the
3117 * flow and the SGE has been sufficiently advanced, we have to
3118 * adjust flow->pkt in order to calculate the correct PSN.
3120 flow
->pkt
-= flow
->resync_npkts
;
3123 if (flow
->tid_offset
==
3124 EXP_TID_GET(flow
->tid_entry
[tididx
], LEN
) * PAGE_SIZE
) {
3126 flow
->tid_offset
= 0;
3128 flow
->tid_idx
= tididx
;
3129 if (wqe
->wr
.opcode
== IB_WR_TID_RDMA_READ
)
3130 /* Move flow_idx to correct index */
3131 req
->flow_idx
= fidx
;
3133 req
->clear_tail
= fidx
;
3135 trace_hfi1_tid_flow_restart_req(qp
, fidx
, flow
);
3136 trace_hfi1_tid_req_restart_req(qp
, 0, wqe
->wr
.opcode
, wqe
->psn
,
3138 req
->state
= TID_REQUEST_ACTIVE
;
3139 if (wqe
->wr
.opcode
== IB_WR_TID_RDMA_WRITE
) {
3140 /* Reset all the flows that we are going to resend */
3141 fidx
= CIRC_NEXT(fidx
, MAX_FLOWS
);
3142 i
= qpriv
->s_tid_tail
;
3144 for (; CIRC_CNT(req
->setup_head
, fidx
, MAX_FLOWS
);
3145 fidx
= CIRC_NEXT(fidx
, MAX_FLOWS
)) {
3146 req
->flows
[fidx
].sent
= 0;
3147 req
->flows
[fidx
].pkt
= 0;
3148 req
->flows
[fidx
].tid_idx
= 0;
3149 req
->flows
[fidx
].tid_offset
= 0;
3150 req
->flows
[fidx
].resync_npkts
= 0;
3152 if (i
== qpriv
->s_tid_cur
)
3155 i
= (++i
== qp
->s_size
? 0 : i
);
3156 wqe
= rvt_get_swqe_ptr(qp
, i
);
3157 } while (wqe
->wr
.opcode
!= IB_WR_TID_RDMA_WRITE
);
3158 req
= wqe_to_tid_req(wqe
);
3159 req
->cur_seg
= req
->ack_seg
;
3160 fidx
= req
->acked_tail
;
3161 /* Pull req->clear_tail back */
3162 req
->clear_tail
= fidx
;
3167 void hfi1_qp_kern_exp_rcv_clear_all(struct rvt_qp
*qp
)
3170 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
3171 struct tid_flow_state
*fs
;
3173 if (qp
->ibqp
.qp_type
!= IB_QPT_RC
|| !HFI1_CAP_IS_KSET(TID_RDMA
))
3177 * First, clear the flow to help prevent any delayed packets from
3180 fs
= &qpriv
->flow_state
;
3181 if (fs
->index
!= RXE_NUM_TID_FLOWS
)
3182 hfi1_kern_clear_hw_flow(qpriv
->rcd
, qp
);
3184 for (i
= qp
->s_acked
; i
!= qp
->s_head
;) {
3185 struct rvt_swqe
*wqe
= rvt_get_swqe_ptr(qp
, i
);
3187 if (++i
== qp
->s_size
)
3189 /* Free only locally allocated TID entries */
3190 if (wqe
->wr
.opcode
!= IB_WR_TID_RDMA_READ
)
3193 struct hfi1_swqe_priv
*priv
= wqe
->priv
;
3195 ret
= hfi1_kern_exp_rcv_clear(&priv
->tid_req
);
3198 for (i
= qp
->s_acked_ack_queue
; i
!= qp
->r_head_ack_queue
;) {
3199 struct rvt_ack_entry
*e
= &qp
->s_ack_queue
[i
];
3201 if (++i
== rvt_max_atomic(ib_to_rvt(qp
->ibqp
.device
)))
3203 /* Free only locally allocated TID entries */
3204 if (e
->opcode
!= TID_OP(WRITE_REQ
))
3207 struct hfi1_ack_priv
*priv
= e
->priv
;
3209 ret
= hfi1_kern_exp_rcv_clear(&priv
->tid_req
);
3214 bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp
*qp
, struct rvt_swqe
*wqe
)
3216 struct rvt_swqe
*prev
;
3217 struct hfi1_qp_priv
*priv
= qp
->priv
;
3219 struct tid_rdma_request
*req
;
3221 s_prev
= (qp
->s_cur
== 0 ? qp
->s_size
: qp
->s_cur
) - 1;
3222 prev
= rvt_get_swqe_ptr(qp
, s_prev
);
3224 switch (wqe
->wr
.opcode
) {
3226 case IB_WR_SEND_WITH_IMM
:
3227 case IB_WR_SEND_WITH_INV
:
3228 case IB_WR_ATOMIC_CMP_AND_SWP
:
3229 case IB_WR_ATOMIC_FETCH_AND_ADD
:
3230 case IB_WR_RDMA_WRITE
:
3231 switch (prev
->wr
.opcode
) {
3232 case IB_WR_TID_RDMA_WRITE
:
3233 req
= wqe_to_tid_req(prev
);
3234 if (req
->ack_seg
!= req
->total_segs
)
3240 case IB_WR_RDMA_READ
:
3241 if (prev
->wr
.opcode
!= IB_WR_TID_RDMA_WRITE
)
3244 case IB_WR_TID_RDMA_READ
:
3245 switch (prev
->wr
.opcode
) {
3246 case IB_WR_RDMA_READ
:
3247 if (qp
->s_acked
!= qp
->s_cur
)
3250 case IB_WR_TID_RDMA_WRITE
:
3251 req
= wqe_to_tid_req(prev
);
3252 if (req
->ack_seg
!= req
->total_segs
)
3263 priv
->s_flags
|= HFI1_S_TID_WAIT_INTERLCK
;
3267 /* Does @sge meet the alignment requirements for tid rdma? */
3268 static inline bool hfi1_check_sge_align(struct rvt_qp
*qp
,
3269 struct rvt_sge
*sge
, int num_sge
)
3273 for (i
= 0; i
< num_sge
; i
++, sge
++) {
3274 trace_hfi1_sge_check_align(qp
, i
, sge
);
3275 if ((u64
)sge
->vaddr
& ~PAGE_MASK
||
3276 sge
->sge_length
& ~PAGE_MASK
)
3282 void setup_tid_rdma_wqe(struct rvt_qp
*qp
, struct rvt_swqe
*wqe
)
3284 struct hfi1_qp_priv
*qpriv
= (struct hfi1_qp_priv
*)qp
->priv
;
3285 struct hfi1_swqe_priv
*priv
= wqe
->priv
;
3286 struct tid_rdma_params
*remote
;
3287 enum ib_wr_opcode new_opcode
;
3288 bool do_tid_rdma
= false;
3289 struct hfi1_pportdata
*ppd
= qpriv
->rcd
->ppd
;
3291 if ((rdma_ah_get_dlid(&qp
->remote_ah_attr
) & ~((1 << ppd
->lmc
) - 1)) ==
3294 if (qpriv
->hdr_type
!= HFI1_PKT_TYPE_9B
)
3298 remote
= rcu_dereference(qpriv
->tid_rdma
.remote
);
3300 * If TID RDMA is disabled by the negotiation, don't
3306 if (wqe
->wr
.opcode
== IB_WR_RDMA_READ
) {
3307 if (hfi1_check_sge_align(qp
, &wqe
->sg_list
[0],
3309 new_opcode
= IB_WR_TID_RDMA_READ
;
3312 } else if (wqe
->wr
.opcode
== IB_WR_RDMA_WRITE
) {
3314 * TID RDMA is enabled for this RDMA WRITE request iff:
3315 * 1. The remote address is page-aligned,
3316 * 2. The length is larger than the minimum segment size,
3317 * 3. The length is page-multiple.
3319 if (!(wqe
->rdma_wr
.remote_addr
& ~PAGE_MASK
) &&
3320 !(wqe
->length
& ~PAGE_MASK
)) {
3321 new_opcode
= IB_WR_TID_RDMA_WRITE
;
3327 if (hfi1_kern_exp_rcv_alloc_flows(&priv
->tid_req
, GFP_ATOMIC
))
3329 wqe
->wr
.opcode
= new_opcode
;
3330 priv
->tid_req
.seg_len
=
3331 min_t(u32
, remote
->max_len
, wqe
->length
);
3332 priv
->tid_req
.total_segs
=
3333 DIV_ROUND_UP(wqe
->length
, priv
->tid_req
.seg_len
);
3334 /* Compute the last PSN of the request */
3335 wqe
->lpsn
= wqe
->psn
;
3336 if (wqe
->wr
.opcode
== IB_WR_TID_RDMA_READ
) {
3337 priv
->tid_req
.n_flows
= remote
->max_read
;
3338 qpriv
->tid_r_reqs
++;
3339 wqe
->lpsn
+= rvt_div_round_up_mtu(qp
, wqe
->length
) - 1;
3341 wqe
->lpsn
+= priv
->tid_req
.total_segs
- 1;
3342 atomic_inc(&qpriv
->n_requests
);
3345 priv
->tid_req
.cur_seg
= 0;
3346 priv
->tid_req
.comp_seg
= 0;
3347 priv
->tid_req
.ack_seg
= 0;
3348 priv
->tid_req
.state
= TID_REQUEST_INACTIVE
;
3351 * TID RDMA READ does not have ACKs so it does not
3352 * update the pointer. We have to reset it so TID RDMA
3353 * WRITE does not get confused.
3355 priv
->tid_req
.acked_tail
= priv
->tid_req
.setup_head
;
3356 trace_hfi1_tid_req_setup_tid_wqe(qp
, 1, wqe
->wr
.opcode
,
3357 wqe
->psn
, wqe
->lpsn
,
3364 /* TID RDMA WRITE functions */
3366 u32
hfi1_build_tid_rdma_write_req(struct rvt_qp
*qp
, struct rvt_swqe
*wqe
,
3367 struct ib_other_headers
*ohdr
,
3368 u32
*bth1
, u32
*bth2
, u32
*len
)
3370 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
3371 struct tid_rdma_request
*req
= wqe_to_tid_req(wqe
);
3372 struct tid_rdma_params
*remote
;
3375 remote
= rcu_dereference(qpriv
->tid_rdma
.remote
);
3377 * Set the number of flow to be used based on negotiated
3380 req
->n_flows
= remote
->max_write
;
3381 req
->state
= TID_REQUEST_ACTIVE
;
3383 KDETH_RESET(ohdr
->u
.tid_rdma
.w_req
.kdeth0
, KVER
, 0x1);
3384 KDETH_RESET(ohdr
->u
.tid_rdma
.w_req
.kdeth1
, JKEY
, remote
->jkey
);
3385 ohdr
->u
.tid_rdma
.w_req
.reth
.vaddr
=
3386 cpu_to_be64(wqe
->rdma_wr
.remote_addr
+ (wqe
->length
- *len
));
3387 ohdr
->u
.tid_rdma
.w_req
.reth
.rkey
=
3388 cpu_to_be32(wqe
->rdma_wr
.rkey
);
3389 ohdr
->u
.tid_rdma
.w_req
.reth
.length
= cpu_to_be32(*len
);
3390 ohdr
->u
.tid_rdma
.w_req
.verbs_qp
= cpu_to_be32(qp
->remote_qpn
);
3391 *bth1
&= ~RVT_QPN_MASK
;
3392 *bth1
|= remote
->qp
;
3393 qp
->s_state
= TID_OP(WRITE_REQ
);
3394 qp
->s_flags
|= HFI1_S_WAIT_TID_RESP
;
3395 *bth2
|= IB_BTH_REQ_ACK
;
3399 return sizeof(ohdr
->u
.tid_rdma
.w_req
) / sizeof(u32
);
3402 void hfi1_compute_tid_rdma_flow_wt(void)
3405 * Heuristic for computing the RNR timeout when waiting on the flow
3406 * queue. Rather than a computationaly expensive exact estimate of when
3407 * a flow will be available, we assume that if a QP is at position N in
3408 * the flow queue it has to wait approximately (N + 1) * (number of
3409 * segments between two sync points), assuming PMTU of 4K. The rationale
3410 * for this is that flows are released and recycled at each sync point.
3412 tid_rdma_flow_wt
= MAX_TID_FLOW_PSN
* enum_to_mtu(OPA_MTU_4096
) /
3413 TID_RDMA_MAX_SEGMENT_SIZE
;
3416 static u32
position_in_queue(struct hfi1_qp_priv
*qpriv
,
3417 struct tid_queue
*queue
)
3419 return qpriv
->tid_enqueue
- queue
->dequeue
;
3423 * @qp: points to rvt_qp context.
3424 * @to_seg: desired RNR timeout in segments.
3425 * Return: index of the next highest timeout in the ib_hfi1_rnr_table[]
3427 static u32
hfi1_compute_tid_rnr_timeout(struct rvt_qp
*qp
, u32 to_seg
)
3429 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
3434 bytes_per_us
= active_egress_rate(qpriv
->rcd
->ppd
) / 8;
3435 timeout
= (to_seg
* TID_RDMA_MAX_SEGMENT_SIZE
) / bytes_per_us
;
3437 * Find the next highest value in the RNR table to the required
3438 * timeout. This gives the responder some padding.
3440 for (i
= 1; i
<= IB_AETH_CREDIT_MASK
; i
++)
3441 if (rvt_rnr_tbl_to_usec(i
) >= timeout
)
3447 * Central place for resource allocation at TID write responder,
3448 * is called from write_req and write_data interrupt handlers as
3449 * well as the send thread when a queued QP is scheduled for
3450 * resource allocation.
3452 * Iterates over (a) segments of a request and then (b) queued requests
3453 * themselves to allocate resources for up to local->max_write
3454 * segments across multiple requests. Stop allocating when we
3455 * hit a sync point, resume allocating after data packets at
3456 * sync point have been received.
3458 * Resource allocation and sending of responses is decoupled. The
3459 * request/segment which are being allocated and sent are as follows.
3460 * Resources are allocated for:
3461 * [request: qpriv->r_tid_alloc, segment: req->alloc_seg]
3462 * The send thread sends:
3463 * [request: qp->s_tail_ack_queue, segment:req->cur_seg]
3465 static void hfi1_tid_write_alloc_resources(struct rvt_qp
*qp
, bool intr_ctx
)
3467 struct tid_rdma_request
*req
;
3468 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
3469 struct hfi1_ctxtdata
*rcd
= qpriv
->rcd
;
3470 struct tid_rdma_params
*local
= &qpriv
->tid_rdma
.local
;
3471 struct rvt_ack_entry
*e
;
3476 lockdep_assert_held(&qp
->s_lock
);
3479 trace_hfi1_rsp_tid_write_alloc_res(qp
, 0);
3480 trace_hfi1_tid_write_rsp_alloc_res(qp
);
3482 * Don't allocate more segments if a RNR NAK has already been
3483 * scheduled to avoid messing up qp->r_psn: the RNR NAK will
3484 * be sent only when all allocated segments have been sent.
3485 * However, if more segments are allocated before that, TID RDMA
3486 * WRITE RESP packets will be sent out for these new segments
3487 * before the RNR NAK packet. When the requester receives the
3488 * RNR NAK packet, it will restart with qp->s_last_psn + 1,
3489 * which does not match qp->r_psn and will be dropped.
3490 * Consequently, the requester will exhaust its retries and
3491 * put the qp into error state.
3493 if (qpriv
->rnr_nak_state
== TID_RNR_NAK_SEND
)
3496 /* No requests left to process */
3497 if (qpriv
->r_tid_alloc
== qpriv
->r_tid_head
) {
3498 /* If all data has been received, clear the flow */
3499 if (qpriv
->flow_state
.index
< RXE_NUM_TID_FLOWS
&&
3500 !qpriv
->alloc_w_segs
) {
3501 hfi1_kern_clear_hw_flow(rcd
, qp
);
3502 qpriv
->s_flags
&= ~HFI1_R_TID_SW_PSN
;
3507 e
= &qp
->s_ack_queue
[qpriv
->r_tid_alloc
];
3508 if (e
->opcode
!= TID_OP(WRITE_REQ
))
3510 req
= ack_to_tid_req(e
);
3511 trace_hfi1_tid_req_write_alloc_res(qp
, 0, e
->opcode
, e
->psn
,
3513 /* Finished allocating for all segments of this request */
3514 if (req
->alloc_seg
>= req
->total_segs
)
3517 /* Can allocate only a maximum of local->max_write for a QP */
3518 if (qpriv
->alloc_w_segs
>= local
->max_write
)
3521 /* Don't allocate at a sync point with data packets pending */
3522 if (qpriv
->sync_pt
&& qpriv
->alloc_w_segs
)
3525 /* All data received at the sync point, continue */
3526 if (qpriv
->sync_pt
&& !qpriv
->alloc_w_segs
) {
3527 hfi1_kern_clear_hw_flow(rcd
, qp
);
3528 qpriv
->sync_pt
= false;
3529 qpriv
->s_flags
&= ~HFI1_R_TID_SW_PSN
;
3532 /* Allocate flow if we don't have one */
3533 if (qpriv
->flow_state
.index
>= RXE_NUM_TID_FLOWS
) {
3534 ret
= hfi1_kern_setup_hw_flow(qpriv
->rcd
, qp
);
3536 to_seg
= tid_rdma_flow_wt
*
3537 position_in_queue(qpriv
,
3543 npkts
= rvt_div_round_up_mtu(qp
, req
->seg_len
);
3546 * We are at a sync point if we run out of KDETH PSN space.
3547 * Last PSN of every generation is reserved for RESYNC.
3549 if (qpriv
->flow_state
.psn
+ npkts
> MAX_TID_FLOW_PSN
- 1) {
3550 qpriv
->sync_pt
= true;
3555 * If overtaking req->acked_tail, send an RNR NAK. Because the
3556 * QP is not queued in this case, and the issue can only be
3557 * caused due a delay in scheduling the second leg which we
3558 * cannot estimate, we use a rather arbitrary RNR timeout of
3559 * (MAX_FLOWS / 2) segments
3561 if (!CIRC_SPACE(req
->setup_head
, req
->acked_tail
,
3564 to_seg
= MAX_FLOWS
>> 1;
3565 qpriv
->s_flags
|= RVT_S_ACK_PENDING
;
3566 hfi1_schedule_tid_send(qp
);
3570 /* Try to allocate rcv array / TID entries */
3571 ret
= hfi1_kern_exp_rcv_setup(req
, &req
->ss
, &last
);
3573 to_seg
= position_in_queue(qpriv
, &rcd
->rarr_queue
);
3577 qpriv
->alloc_w_segs
++;
3581 /* Begin processing the next request */
3582 if (++qpriv
->r_tid_alloc
>
3583 rvt_size_atomic(ib_to_rvt(qp
->ibqp
.device
)))
3584 qpriv
->r_tid_alloc
= 0;
3588 * Schedule an RNR NAK to be sent if (a) flow or rcv array allocation
3589 * has failed (b) we are called from the rcv handler interrupt context
3590 * (c) an RNR NAK has not already been scheduled
3592 if (ret
== -EAGAIN
&& intr_ctx
&& !qp
->r_nak_state
)
3598 lockdep_assert_held(&qp
->r_lock
);
3600 /* Set r_nak_state to prevent unrelated events from generating NAK's */
3601 qp
->r_nak_state
= hfi1_compute_tid_rnr_timeout(qp
, to_seg
) | IB_RNR_NAK
;
3603 /* Pull back r_psn to the segment being RNR NAK'd */
3604 qp
->r_psn
= e
->psn
+ req
->alloc_seg
;
3605 qp
->r_ack_psn
= qp
->r_psn
;
3607 * Pull back r_head_ack_queue to the ack entry following the request
3608 * being RNR NAK'd. This allows resources to be allocated to the request
3609 * if the queued QP is scheduled.
3611 qp
->r_head_ack_queue
= qpriv
->r_tid_alloc
+ 1;
3612 if (qp
->r_head_ack_queue
> rvt_size_atomic(ib_to_rvt(qp
->ibqp
.device
)))
3613 qp
->r_head_ack_queue
= 0;
3614 qpriv
->r_tid_head
= qp
->r_head_ack_queue
;
3616 * These send side fields are used in make_rc_ack(). They are set in
3617 * hfi1_send_rc_ack() but must be set here before dropping qp->s_lock
3620 qp
->s_nak_state
= qp
->r_nak_state
;
3621 qp
->s_ack_psn
= qp
->r_ack_psn
;
3623 * Clear the ACK PENDING flag to prevent unwanted ACK because we
3624 * have modified qp->s_ack_psn here.
3626 qp
->s_flags
&= ~(RVT_S_ACK_PENDING
);
3628 trace_hfi1_rsp_tid_write_alloc_res(qp
, qp
->r_psn
);
3630 * qpriv->rnr_nak_state is used to determine when the scheduled RNR NAK
3631 * has actually been sent. qp->s_flags RVT_S_ACK_PENDING bit cannot be
3632 * used for this because qp->s_lock is dropped before calling
3633 * hfi1_send_rc_ack() leading to inconsistency between the receive
3634 * interrupt handlers and the send thread in make_rc_ack()
3636 qpriv
->rnr_nak_state
= TID_RNR_NAK_SEND
;
3639 * Schedule RNR NAK to be sent. RNR NAK's are scheduled from the receive
3640 * interrupt handlers but will be sent from the send engine behind any
3641 * previous responses that may have been scheduled
3643 rc_defered_ack(rcd
, qp
);
3646 void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet
*packet
)
3648 /* HANDLER FOR TID RDMA WRITE REQUEST packet (Responder side)*/
3651 * 1. Verify TID RDMA WRITE REQ as per IB_OPCODE_RC_RDMA_WRITE_FIRST
3652 * (see hfi1_rc_rcv())
3653 * - Don't allow 0-length requests.
3654 * 2. Put TID RDMA WRITE REQ into the response queueu (s_ack_queue)
3655 * - Setup struct tid_rdma_req with request info
3656 * - Prepare struct tid_rdma_flow array?
3657 * 3. Set the qp->s_ack_state as state diagram in design doc.
3658 * 4. Set RVT_S_RESP_PENDING in s_flags.
3659 * 5. Kick the send engine (hfi1_schedule_send())
3661 struct hfi1_ctxtdata
*rcd
= packet
->rcd
;
3662 struct rvt_qp
*qp
= packet
->qp
;
3663 struct hfi1_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
3664 struct ib_other_headers
*ohdr
= packet
->ohdr
;
3665 struct rvt_ack_entry
*e
;
3666 unsigned long flags
;
3667 struct ib_reth
*reth
;
3668 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
3669 struct tid_rdma_request
*req
;
3670 u32 bth0
, psn
, len
, rkey
, num_segs
;
3676 bth0
= be32_to_cpu(ohdr
->bth
[0]);
3677 if (hfi1_ruc_check_hdr(ibp
, packet
))
3680 fecn
= process_ecn(qp
, packet
);
3681 psn
= mask_psn(be32_to_cpu(ohdr
->bth
[2]));
3682 trace_hfi1_rsp_rcv_tid_write_req(qp
, psn
);
3684 if (qp
->state
== IB_QPS_RTR
&& !(qp
->r_flags
& RVT_R_COMM_EST
))
3687 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)))
3690 reth
= &ohdr
->u
.tid_rdma
.w_req
.reth
;
3691 vaddr
= be64_to_cpu(reth
->vaddr
);
3692 len
= be32_to_cpu(reth
->length
);
3694 num_segs
= DIV_ROUND_UP(len
, qpriv
->tid_rdma
.local
.max_len
);
3695 diff
= delta_psn(psn
, qp
->r_psn
);
3696 if (unlikely(diff
)) {
3697 tid_rdma_rcv_err(packet
, ohdr
, qp
, psn
, diff
, fecn
);
3702 * The resent request which was previously RNR NAK'd is inserted at the
3703 * location of the original request, which is one entry behind
3706 if (qpriv
->rnr_nak_state
)
3707 qp
->r_head_ack_queue
= qp
->r_head_ack_queue
?
3708 qp
->r_head_ack_queue
- 1 :
3709 rvt_size_atomic(ib_to_rvt(qp
->ibqp
.device
));
3711 /* We've verified the request, insert it into the ack queue. */
3712 next
= qp
->r_head_ack_queue
+ 1;
3713 if (next
> rvt_size_atomic(ib_to_rvt(qp
->ibqp
.device
)))
3715 spin_lock_irqsave(&qp
->s_lock
, flags
);
3716 if (unlikely(next
== qp
->s_acked_ack_queue
)) {
3717 if (!qp
->s_ack_queue
[next
].sent
)
3718 goto nack_inv_unlock
;
3719 update_ack_queue(qp
, next
);
3721 e
= &qp
->s_ack_queue
[qp
->r_head_ack_queue
];
3722 req
= ack_to_tid_req(e
);
3724 /* Bring previously RNR NAK'd request back to life */
3725 if (qpriv
->rnr_nak_state
) {
3726 qp
->r_nak_state
= 0;
3727 qp
->s_nak_state
= 0;
3728 qpriv
->rnr_nak_state
= TID_RNR_NAK_INIT
;
3729 qp
->r_psn
= e
->lpsn
+ 1;
3730 req
->state
= TID_REQUEST_INIT
;
3734 release_rdma_sge_mr(e
);
3736 /* The length needs to be in multiples of PAGE_SIZE */
3737 if (!len
|| len
& ~PAGE_MASK
)
3738 goto nack_inv_unlock
;
3740 rkey
= be32_to_cpu(reth
->rkey
);
3743 if (e
->opcode
== TID_OP(WRITE_REQ
) &&
3744 (req
->setup_head
!= req
->clear_tail
||
3745 req
->clear_tail
!= req
->acked_tail
))
3746 goto nack_inv_unlock
;
3748 if (unlikely(!rvt_rkey_ok(qp
, &e
->rdma_sge
, qp
->r_len
, vaddr
,
3749 rkey
, IB_ACCESS_REMOTE_WRITE
)))
3752 qp
->r_psn
+= num_segs
- 1;
3754 e
->opcode
= (bth0
>> 24) & 0xff;
3756 e
->lpsn
= qp
->r_psn
;
3759 req
->n_flows
= min_t(u16
, num_segs
, qpriv
->tid_rdma
.local
.max_write
);
3760 req
->state
= TID_REQUEST_INIT
;
3766 req
->seg_len
= qpriv
->tid_rdma
.local
.max_len
;
3767 req
->total_len
= len
;
3768 req
->total_segs
= num_segs
;
3769 req
->r_flow_psn
= e
->psn
;
3770 req
->ss
.sge
= e
->rdma_sge
;
3771 req
->ss
.num_sge
= 1;
3773 req
->flow_idx
= req
->setup_head
;
3774 req
->clear_tail
= req
->setup_head
;
3775 req
->acked_tail
= req
->setup_head
;
3777 qp
->r_state
= e
->opcode
;
3778 qp
->r_nak_state
= 0;
3780 * We need to increment the MSN here instead of when we
3781 * finish sending the result since a duplicate request would
3782 * increment it more than once.
3787 trace_hfi1_tid_req_rcv_write_req(qp
, 0, e
->opcode
, e
->psn
, e
->lpsn
,
3790 if (qpriv
->r_tid_tail
== HFI1_QP_WQE_INVALID
) {
3791 qpriv
->r_tid_tail
= qp
->r_head_ack_queue
;
3792 } else if (qpriv
->r_tid_tail
== qpriv
->r_tid_head
) {
3793 struct tid_rdma_request
*ptr
;
3795 e
= &qp
->s_ack_queue
[qpriv
->r_tid_tail
];
3796 ptr
= ack_to_tid_req(e
);
3798 if (e
->opcode
!= TID_OP(WRITE_REQ
) ||
3799 ptr
->comp_seg
== ptr
->total_segs
) {
3800 if (qpriv
->r_tid_tail
== qpriv
->r_tid_ack
)
3801 qpriv
->r_tid_ack
= qp
->r_head_ack_queue
;
3802 qpriv
->r_tid_tail
= qp
->r_head_ack_queue
;
3806 qp
->r_head_ack_queue
= next
;
3807 qpriv
->r_tid_head
= qp
->r_head_ack_queue
;
3809 hfi1_tid_write_alloc_resources(qp
, true);
3810 trace_hfi1_tid_write_rsp_rcv_req(qp
);
3812 /* Schedule the send tasklet. */
3813 qp
->s_flags
|= RVT_S_RESP_PENDING
;
3815 qp
->s_flags
|= RVT_S_ECN
;
3816 hfi1_schedule_send(qp
);
3818 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
3822 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
3824 rvt_rc_error(qp
, IB_WC_LOC_QP_OP_ERR
);
3825 qp
->r_nak_state
= IB_NAK_INVALID_REQUEST
;
3826 qp
->r_ack_psn
= qp
->r_psn
;
3827 /* Queue NAK for later */
3828 rc_defered_ack(rcd
, qp
);
3831 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
3832 rvt_rc_error(qp
, IB_WC_LOC_PROT_ERR
);
3833 qp
->r_nak_state
= IB_NAK_REMOTE_ACCESS_ERROR
;
3834 qp
->r_ack_psn
= qp
->r_psn
;
3837 u32
hfi1_build_tid_rdma_write_resp(struct rvt_qp
*qp
, struct rvt_ack_entry
*e
,
3838 struct ib_other_headers
*ohdr
, u32
*bth1
,
3840 struct rvt_sge_state
**ss
)
3842 struct hfi1_ack_priv
*epriv
= e
->priv
;
3843 struct tid_rdma_request
*req
= &epriv
->tid_req
;
3844 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
3845 struct tid_rdma_flow
*flow
= NULL
;
3846 u32 resp_len
= 0, hdwords
= 0;
3847 void *resp_addr
= NULL
;
3848 struct tid_rdma_params
*remote
;
3850 trace_hfi1_tid_req_build_write_resp(qp
, 0, e
->opcode
, e
->psn
, e
->lpsn
,
3852 trace_hfi1_tid_write_rsp_build_resp(qp
);
3853 trace_hfi1_rsp_build_tid_write_resp(qp
, bth2
);
3854 flow
= &req
->flows
[req
->flow_idx
];
3855 switch (req
->state
) {
3858 * Try to allocate resources here in case QP was queued and was
3859 * later scheduled when resources became available
3861 hfi1_tid_write_alloc_resources(qp
, false);
3863 /* We've already sent everything which is ready */
3864 if (req
->cur_seg
>= req
->alloc_seg
)
3868 * Resources can be assigned but responses cannot be sent in
3869 * rnr_nak state, till the resent request is received
3871 if (qpriv
->rnr_nak_state
== TID_RNR_NAK_SENT
)
3874 req
->state
= TID_REQUEST_ACTIVE
;
3875 trace_hfi1_tid_flow_build_write_resp(qp
, req
->flow_idx
, flow
);
3876 req
->flow_idx
= CIRC_NEXT(req
->flow_idx
, MAX_FLOWS
);
3877 hfi1_add_tid_reap_timer(qp
);
3880 case TID_REQUEST_RESEND_ACTIVE
:
3881 case TID_REQUEST_RESEND
:
3882 trace_hfi1_tid_flow_build_write_resp(qp
, req
->flow_idx
, flow
);
3883 req
->flow_idx
= CIRC_NEXT(req
->flow_idx
, MAX_FLOWS
);
3884 if (!CIRC_CNT(req
->setup_head
, req
->flow_idx
, MAX_FLOWS
))
3885 req
->state
= TID_REQUEST_ACTIVE
;
3887 hfi1_mod_tid_reap_timer(qp
);
3890 flow
->flow_state
.resp_ib_psn
= bth2
;
3891 resp_addr
= (void *)flow
->tid_entry
;
3892 resp_len
= sizeof(*flow
->tid_entry
) * flow
->tidcnt
;
3895 memset(&ohdr
->u
.tid_rdma
.w_rsp
, 0, sizeof(ohdr
->u
.tid_rdma
.w_rsp
));
3896 epriv
->ss
.sge
.vaddr
= resp_addr
;
3897 epriv
->ss
.sge
.sge_length
= resp_len
;
3898 epriv
->ss
.sge
.length
= epriv
->ss
.sge
.sge_length
;
3900 * We can safely zero these out. Since the first SGE covers the
3901 * entire packet, nothing else should even look at the MR.
3903 epriv
->ss
.sge
.mr
= NULL
;
3904 epriv
->ss
.sge
.m
= 0;
3905 epriv
->ss
.sge
.n
= 0;
3907 epriv
->ss
.sg_list
= NULL
;
3908 epriv
->ss
.total_len
= epriv
->ss
.sge
.sge_length
;
3909 epriv
->ss
.num_sge
= 1;
3912 *len
= epriv
->ss
.total_len
;
3914 /* Construct the TID RDMA WRITE RESP packet header */
3916 remote
= rcu_dereference(qpriv
->tid_rdma
.remote
);
3918 KDETH_RESET(ohdr
->u
.tid_rdma
.w_rsp
.kdeth0
, KVER
, 0x1);
3919 KDETH_RESET(ohdr
->u
.tid_rdma
.w_rsp
.kdeth1
, JKEY
, remote
->jkey
);
3920 ohdr
->u
.tid_rdma
.w_rsp
.aeth
= rvt_compute_aeth(qp
);
3921 ohdr
->u
.tid_rdma
.w_rsp
.tid_flow_psn
=
3922 cpu_to_be32((flow
->flow_state
.generation
<<
3923 HFI1_KDETH_BTH_SEQ_SHIFT
) |
3924 (flow
->flow_state
.spsn
&
3925 HFI1_KDETH_BTH_SEQ_MASK
));
3926 ohdr
->u
.tid_rdma
.w_rsp
.tid_flow_qp
=
3927 cpu_to_be32(qpriv
->tid_rdma
.local
.qp
|
3928 ((flow
->idx
& TID_RDMA_DESTQP_FLOW_MASK
) <<
3929 TID_RDMA_DESTQP_FLOW_SHIFT
) |
3931 ohdr
->u
.tid_rdma
.w_rsp
.verbs_qp
= cpu_to_be32(qp
->remote_qpn
);
3934 hdwords
= sizeof(ohdr
->u
.tid_rdma
.w_rsp
) / sizeof(u32
);
3935 qpriv
->pending_tid_w_segs
++;
3940 static void hfi1_add_tid_reap_timer(struct rvt_qp
*qp
)
3942 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
3944 lockdep_assert_held(&qp
->s_lock
);
3945 if (!(qpriv
->s_flags
& HFI1_R_TID_RSC_TIMER
)) {
3946 qpriv
->s_flags
|= HFI1_R_TID_RSC_TIMER
;
3947 qpriv
->s_tid_timer
.expires
= jiffies
+
3948 qpriv
->tid_timer_timeout_jiffies
;
3949 add_timer(&qpriv
->s_tid_timer
);
3953 static void hfi1_mod_tid_reap_timer(struct rvt_qp
*qp
)
3955 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
3957 lockdep_assert_held(&qp
->s_lock
);
3958 qpriv
->s_flags
|= HFI1_R_TID_RSC_TIMER
;
3959 mod_timer(&qpriv
->s_tid_timer
, jiffies
+
3960 qpriv
->tid_timer_timeout_jiffies
);
3963 static int hfi1_stop_tid_reap_timer(struct rvt_qp
*qp
)
3965 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
3968 lockdep_assert_held(&qp
->s_lock
);
3969 if (qpriv
->s_flags
& HFI1_R_TID_RSC_TIMER
) {
3970 rval
= del_timer(&qpriv
->s_tid_timer
);
3971 qpriv
->s_flags
&= ~HFI1_R_TID_RSC_TIMER
;
3976 void hfi1_del_tid_reap_timer(struct rvt_qp
*qp
)
3978 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
3980 del_timer_sync(&qpriv
->s_tid_timer
);
3981 qpriv
->s_flags
&= ~HFI1_R_TID_RSC_TIMER
;
3984 static void hfi1_tid_timeout(struct timer_list
*t
)
3986 struct hfi1_qp_priv
*qpriv
= from_timer(qpriv
, t
, s_tid_timer
);
3987 struct rvt_qp
*qp
= qpriv
->owner
;
3988 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
3989 unsigned long flags
;
3992 spin_lock_irqsave(&qp
->r_lock
, flags
);
3993 spin_lock(&qp
->s_lock
);
3994 if (qpriv
->s_flags
& HFI1_R_TID_RSC_TIMER
) {
3995 dd_dev_warn(dd_from_ibdev(qp
->ibqp
.device
), "[QP%u] %s %d\n",
3996 qp
->ibqp
.qp_num
, __func__
, __LINE__
);
3997 trace_hfi1_msg_tid_timeout(/* msg */
3998 qp
, "resource timeout = ",
3999 (u64
)qpriv
->tid_timer_timeout_jiffies
);
4000 hfi1_stop_tid_reap_timer(qp
);
4002 * Go though the entire ack queue and clear any outstanding
4003 * HW flow and RcvArray resources.
4005 hfi1_kern_clear_hw_flow(qpriv
->rcd
, qp
);
4006 for (i
= 0; i
< rvt_max_atomic(rdi
); i
++) {
4007 struct tid_rdma_request
*req
=
4008 ack_to_tid_req(&qp
->s_ack_queue
[i
]);
4010 hfi1_kern_exp_rcv_clear_all(req
);
4012 spin_unlock(&qp
->s_lock
);
4013 if (qp
->ibqp
.event_handler
) {
4016 ev
.device
= qp
->ibqp
.device
;
4017 ev
.element
.qp
= &qp
->ibqp
;
4018 ev
.event
= IB_EVENT_QP_FATAL
;
4019 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
4021 rvt_rc_error(qp
, IB_WC_RESP_TIMEOUT_ERR
);
4024 spin_unlock(&qp
->s_lock
);
4026 spin_unlock_irqrestore(&qp
->r_lock
, flags
);
4029 void hfi1_rc_rcv_tid_rdma_write_resp(struct hfi1_packet
*packet
)
4031 /* HANDLER FOR TID RDMA WRITE RESPONSE packet (Requestor side */
4034 * 1. Find matching SWQE
4035 * 2. Check that TIDENTRY array has enough space for a complete
4036 * segment. If not, put QP in error state.
4037 * 3. Save response data in struct tid_rdma_req and struct tid_rdma_flow
4038 * 4. Remove HFI1_S_WAIT_TID_RESP from s_flags.
4039 * 5. Set qp->s_state
4040 * 6. Kick the send engine (hfi1_schedule_send())
4042 struct ib_other_headers
*ohdr
= packet
->ohdr
;
4043 struct rvt_qp
*qp
= packet
->qp
;
4044 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
4045 struct hfi1_ctxtdata
*rcd
= packet
->rcd
;
4046 struct rvt_swqe
*wqe
;
4047 struct tid_rdma_request
*req
;
4048 struct tid_rdma_flow
*flow
;
4049 enum ib_wc_status status
;
4050 u32 opcode
, aeth
, psn
, flow_psn
, i
, tidlen
= 0, pktlen
;
4052 unsigned long flags
;
4054 fecn
= process_ecn(qp
, packet
);
4055 psn
= mask_psn(be32_to_cpu(ohdr
->bth
[2]));
4056 aeth
= be32_to_cpu(ohdr
->u
.tid_rdma
.w_rsp
.aeth
);
4057 opcode
= (be32_to_cpu(ohdr
->bth
[0]) >> 24) & 0xff;
4059 spin_lock_irqsave(&qp
->s_lock
, flags
);
4061 /* Ignore invalid responses */
4062 if (cmp_psn(psn
, qp
->s_next_psn
) >= 0)
4065 /* Ignore duplicate responses. */
4066 if (unlikely(cmp_psn(psn
, qp
->s_last_psn
) <= 0))
4069 if (unlikely(qp
->s_acked
== qp
->s_tail
))
4073 * If we are waiting for a particular packet sequence number
4074 * due to a request being resent, check for it. Otherwise,
4075 * ensure that we haven't missed anything.
4077 if (qp
->r_flags
& RVT_R_RDMAR_SEQ
) {
4078 if (cmp_psn(psn
, qp
->s_last_psn
+ 1) != 0)
4080 qp
->r_flags
&= ~RVT_R_RDMAR_SEQ
;
4083 wqe
= rvt_get_swqe_ptr(qp
, qpriv
->s_tid_cur
);
4084 if (unlikely(wqe
->wr
.opcode
!= IB_WR_TID_RDMA_WRITE
))
4087 req
= wqe_to_tid_req(wqe
);
4089 * If we've lost ACKs and our acked_tail pointer is too far
4090 * behind, don't overwrite segments. Just drop the packet and
4091 * let the reliability protocol take care of it.
4093 if (!CIRC_SPACE(req
->setup_head
, req
->acked_tail
, MAX_FLOWS
))
4097 * The call to do_rc_ack() should be last in the chain of
4098 * packet checks because it will end up updating the QP state.
4099 * Therefore, anything that would prevent the packet from
4100 * being accepted as a successful response should be prior
4103 if (!do_rc_ack(qp
, aeth
, psn
, opcode
, 0, rcd
))
4106 trace_hfi1_ack(qp
, psn
);
4108 flow
= &req
->flows
[req
->setup_head
];
4111 flow
->tid_offset
= 0;
4113 flow
->resync_npkts
= 0;
4114 flow
->tid_qpn
= be32_to_cpu(ohdr
->u
.tid_rdma
.w_rsp
.tid_flow_qp
);
4115 flow
->idx
= (flow
->tid_qpn
>> TID_RDMA_DESTQP_FLOW_SHIFT
) &
4116 TID_RDMA_DESTQP_FLOW_MASK
;
4117 flow_psn
= mask_psn(be32_to_cpu(ohdr
->u
.tid_rdma
.w_rsp
.tid_flow_psn
));
4118 flow
->flow_state
.generation
= flow_psn
>> HFI1_KDETH_BTH_SEQ_SHIFT
;
4119 flow
->flow_state
.spsn
= flow_psn
& HFI1_KDETH_BTH_SEQ_MASK
;
4120 flow
->flow_state
.resp_ib_psn
= psn
;
4121 flow
->length
= min_t(u32
, req
->seg_len
,
4122 (wqe
->length
- (req
->comp_seg
* req
->seg_len
)));
4124 flow
->npkts
= rvt_div_round_up_mtu(qp
, flow
->length
);
4125 flow
->flow_state
.lpsn
= flow
->flow_state
.spsn
+
4127 /* payload length = packet length - (header length + ICRC length) */
4128 pktlen
= packet
->tlen
- (packet
->hlen
+ 4);
4129 if (pktlen
> sizeof(flow
->tid_entry
)) {
4130 status
= IB_WC_LOC_LEN_ERR
;
4133 memcpy(flow
->tid_entry
, packet
->ebuf
, pktlen
);
4134 flow
->tidcnt
= pktlen
/ sizeof(*flow
->tid_entry
);
4135 trace_hfi1_tid_flow_rcv_write_resp(qp
, req
->setup_head
, flow
);
4138 trace_hfi1_tid_write_sender_rcv_resp(qp
, 0);
4140 * Walk the TID_ENTRY list to make sure we have enough space for a
4143 for (i
= 0; i
< flow
->tidcnt
; i
++) {
4144 trace_hfi1_tid_entry_rcv_write_resp(/* entry */
4145 qp
, i
, flow
->tid_entry
[i
]);
4146 if (!EXP_TID_GET(flow
->tid_entry
[i
], LEN
)) {
4147 status
= IB_WC_LOC_LEN_ERR
;
4150 tidlen
+= EXP_TID_GET(flow
->tid_entry
[i
], LEN
);
4152 if (tidlen
* PAGE_SIZE
< flow
->length
) {
4153 status
= IB_WC_LOC_LEN_ERR
;
4157 trace_hfi1_tid_req_rcv_write_resp(qp
, 0, wqe
->wr
.opcode
, wqe
->psn
,
4160 * If this is the first response for this request, set the initial
4161 * flow index to the current flow.
4163 if (!cmp_psn(psn
, wqe
->psn
)) {
4164 req
->r_last_acked
= mask_psn(wqe
->psn
- 1);
4165 /* Set acked flow index to head index */
4166 req
->acked_tail
= req
->setup_head
;
4169 /* advance circular buffer head */
4170 req
->setup_head
= CIRC_NEXT(req
->setup_head
, MAX_FLOWS
);
4171 req
->state
= TID_REQUEST_ACTIVE
;
4174 * If all responses for this TID RDMA WRITE request have been received
4175 * advance the pointer to the next one.
4176 * Since TID RDMA requests could be mixed in with regular IB requests,
4177 * they might not appear sequentially in the queue. Therefore, the
4178 * next request needs to be "found".
4180 if (qpriv
->s_tid_cur
!= qpriv
->s_tid_head
&&
4181 req
->comp_seg
== req
->total_segs
) {
4182 for (i
= qpriv
->s_tid_cur
+ 1; ; i
++) {
4183 if (i
== qp
->s_size
)
4185 wqe
= rvt_get_swqe_ptr(qp
, i
);
4186 if (i
== qpriv
->s_tid_head
)
4188 if (wqe
->wr
.opcode
== IB_WR_TID_RDMA_WRITE
)
4191 qpriv
->s_tid_cur
= i
;
4193 qp
->s_flags
&= ~HFI1_S_WAIT_TID_RESP
;
4194 hfi1_schedule_tid_send(qp
);
4198 status
= IB_WC_LOC_QP_OP_ERR
;
4200 rvt_error_qp(qp
, status
);
4203 qp
->s_flags
|= RVT_S_ECN
;
4204 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
4207 bool hfi1_build_tid_rdma_packet(struct rvt_swqe
*wqe
,
4208 struct ib_other_headers
*ohdr
,
4209 u32
*bth1
, u32
*bth2
, u32
*len
)
4211 struct tid_rdma_request
*req
= wqe_to_tid_req(wqe
);
4212 struct tid_rdma_flow
*flow
= &req
->flows
[req
->clear_tail
];
4213 struct tid_rdma_params
*remote
;
4214 struct rvt_qp
*qp
= req
->qp
;
4215 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
4216 u32 tidentry
= flow
->tid_entry
[flow
->tid_idx
];
4217 u32 tidlen
= EXP_TID_GET(tidentry
, LEN
) << PAGE_SHIFT
;
4218 struct tid_rdma_write_data
*wd
= &ohdr
->u
.tid_rdma
.w_data
;
4219 u32 next_offset
, om
= KDETH_OM_LARGE
;
4223 hfi1_trdma_send_complete(qp
, wqe
, IB_WC_REM_INV_RD_REQ_ERR
);
4224 rvt_error_qp(qp
, IB_WC_REM_INV_RD_REQ_ERR
);
4227 *len
= min_t(u32
, qp
->pmtu
, tidlen
- flow
->tid_offset
);
4229 next_offset
= flow
->tid_offset
+ *len
;
4230 last_pkt
= (flow
->tid_idx
== (flow
->tidcnt
- 1) &&
4231 next_offset
>= tidlen
) || (flow
->sent
>= flow
->length
);
4232 trace_hfi1_tid_entry_build_write_data(qp
, flow
->tid_idx
, tidentry
);
4233 trace_hfi1_tid_flow_build_write_data(qp
, req
->clear_tail
, flow
);
4236 remote
= rcu_dereference(qpriv
->tid_rdma
.remote
);
4237 KDETH_RESET(wd
->kdeth0
, KVER
, 0x1);
4238 KDETH_SET(wd
->kdeth0
, SH
, !last_pkt
);
4239 KDETH_SET(wd
->kdeth0
, INTR
, !!(!last_pkt
&& remote
->urg
));
4240 KDETH_SET(wd
->kdeth0
, TIDCTRL
, EXP_TID_GET(tidentry
, CTRL
));
4241 KDETH_SET(wd
->kdeth0
, TID
, EXP_TID_GET(tidentry
, IDX
));
4242 KDETH_SET(wd
->kdeth0
, OM
, om
== KDETH_OM_LARGE
);
4243 KDETH_SET(wd
->kdeth0
, OFFSET
, flow
->tid_offset
/ om
);
4244 KDETH_RESET(wd
->kdeth1
, JKEY
, remote
->jkey
);
4245 wd
->verbs_qp
= cpu_to_be32(qp
->remote_qpn
);
4248 *bth1
= flow
->tid_qpn
;
4249 *bth2
= mask_psn(((flow
->flow_state
.spsn
+ flow
->pkt
++) &
4250 HFI1_KDETH_BTH_SEQ_MASK
) |
4251 (flow
->flow_state
.generation
<<
4252 HFI1_KDETH_BTH_SEQ_SHIFT
));
4254 /* PSNs are zero-based, so +1 to count number of packets */
4255 if (flow
->flow_state
.lpsn
+ 1 +
4256 rvt_div_round_up_mtu(qp
, req
->seg_len
) >
4258 req
->state
= TID_REQUEST_SYNC
;
4259 *bth2
|= IB_BTH_REQ_ACK
;
4262 if (next_offset
>= tidlen
) {
4263 flow
->tid_offset
= 0;
4266 flow
->tid_offset
= next_offset
;
4271 void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet
*packet
)
4273 struct rvt_qp
*qp
= packet
->qp
;
4274 struct hfi1_qp_priv
*priv
= qp
->priv
;
4275 struct hfi1_ctxtdata
*rcd
= priv
->rcd
;
4276 struct ib_other_headers
*ohdr
= packet
->ohdr
;
4277 struct rvt_ack_entry
*e
;
4278 struct tid_rdma_request
*req
;
4279 struct tid_rdma_flow
*flow
;
4280 struct hfi1_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
4281 unsigned long flags
;
4286 fecn
= process_ecn(qp
, packet
);
4287 psn
= mask_psn(be32_to_cpu(ohdr
->bth
[2]));
4288 opcode
= (be32_to_cpu(ohdr
->bth
[0]) >> 24) & 0xff;
4291 * All error handling should be done by now. If we are here, the packet
4292 * is either good or been accepted by the error handler.
4294 spin_lock_irqsave(&qp
->s_lock
, flags
);
4295 e
= &qp
->s_ack_queue
[priv
->r_tid_tail
];
4296 req
= ack_to_tid_req(e
);
4297 flow
= &req
->flows
[req
->clear_tail
];
4298 if (cmp_psn(psn
, full_flow_psn(flow
, flow
->flow_state
.lpsn
))) {
4299 update_r_next_psn_fecn(packet
, priv
, rcd
, flow
, fecn
);
4301 if (cmp_psn(psn
, flow
->flow_state
.r_next_psn
))
4304 flow
->flow_state
.r_next_psn
= mask_psn(psn
+ 1);
4306 * Copy the payload to destination buffer if this packet is
4307 * delivered as an eager packet due to RSM rule and FECN.
4308 * The RSM rule selects FECN bit in BTH and SH bit in
4309 * KDETH header and therefore will not match the last
4310 * packet of each segment that has SH bit cleared.
4312 if (fecn
&& packet
->etype
== RHF_RCV_TYPE_EAGER
) {
4313 struct rvt_sge_state ss
;
4315 u32 tlen
= packet
->tlen
;
4316 u16 hdrsize
= packet
->hlen
;
4317 u8 pad
= packet
->pad
;
4318 u8 extra_bytes
= pad
+ packet
->extra_byte
+
4320 u32 pmtu
= qp
->pmtu
;
4322 if (unlikely(tlen
!= (hdrsize
+ pmtu
+ extra_bytes
)))
4324 len
= req
->comp_seg
* req
->seg_len
;
4325 len
+= delta_psn(psn
,
4326 full_flow_psn(flow
, flow
->flow_state
.spsn
)) *
4328 if (unlikely(req
->total_len
- len
< pmtu
))
4332 * The e->rdma_sge field is set when TID RDMA WRITE REQ
4333 * is first received and is never modified thereafter.
4335 ss
.sge
= e
->rdma_sge
;
4338 ss
.total_len
= req
->total_len
;
4339 rvt_skip_sge(&ss
, len
, false);
4340 rvt_copy_sge(qp
, &ss
, packet
->payload
, pmtu
, false,
4342 /* Raise the sw sequence check flag for next packet */
4343 priv
->r_next_psn_kdeth
= mask_psn(psn
+ 1);
4344 priv
->s_flags
|= HFI1_R_TID_SW_PSN
;
4348 flow
->flow_state
.r_next_psn
= mask_psn(psn
+ 1);
4349 hfi1_kern_exp_rcv_clear(req
);
4350 priv
->alloc_w_segs
--;
4351 rcd
->flows
[flow
->idx
].psn
= psn
& HFI1_KDETH_BTH_SEQ_MASK
;
4353 priv
->s_nak_state
= 0;
4356 * Release the flow if one of the following conditions has been met:
4357 * - The request has reached a sync point AND all outstanding
4358 * segments have been completed, or
4359 * - The entire request is complete and there are no more requests
4360 * (of any kind) in the queue.
4362 trace_hfi1_rsp_rcv_tid_write_data(qp
, psn
);
4363 trace_hfi1_tid_req_rcv_write_data(qp
, 0, e
->opcode
, e
->psn
, e
->lpsn
,
4365 trace_hfi1_tid_write_rsp_rcv_data(qp
);
4366 if (priv
->r_tid_ack
== HFI1_QP_WQE_INVALID
)
4367 priv
->r_tid_ack
= priv
->r_tid_tail
;
4369 if (opcode
== TID_OP(WRITE_DATA_LAST
)) {
4370 release_rdma_sge_mr(e
);
4371 for (next
= priv
->r_tid_tail
+ 1; ; next
++) {
4372 if (next
> rvt_size_atomic(&dev
->rdi
))
4374 if (next
== priv
->r_tid_head
)
4376 e
= &qp
->s_ack_queue
[next
];
4377 if (e
->opcode
== TID_OP(WRITE_REQ
))
4380 priv
->r_tid_tail
= next
;
4381 if (++qp
->s_acked_ack_queue
> rvt_size_atomic(&dev
->rdi
))
4382 qp
->s_acked_ack_queue
= 0;
4385 hfi1_tid_write_alloc_resources(qp
, true);
4388 * If we need to generate more responses, schedule the
4391 if (req
->cur_seg
< req
->total_segs
||
4392 qp
->s_tail_ack_queue
!= qp
->r_head_ack_queue
) {
4393 qp
->s_flags
|= RVT_S_RESP_PENDING
;
4394 hfi1_schedule_send(qp
);
4397 priv
->pending_tid_w_segs
--;
4398 if (priv
->s_flags
& HFI1_R_TID_RSC_TIMER
) {
4399 if (priv
->pending_tid_w_segs
)
4400 hfi1_mod_tid_reap_timer(req
->qp
);
4402 hfi1_stop_tid_reap_timer(req
->qp
);
4406 priv
->s_flags
|= RVT_S_ACK_PENDING
;
4407 hfi1_schedule_tid_send(qp
);
4409 priv
->r_next_psn_kdeth
= flow
->flow_state
.r_next_psn
;
4411 qp
->s_flags
|= RVT_S_ECN
;
4412 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
4416 if (!priv
->s_nak_state
) {
4417 priv
->s_nak_state
= IB_NAK_PSN_ERROR
;
4418 priv
->s_nak_psn
= flow
->flow_state
.r_next_psn
;
4419 priv
->s_flags
|= RVT_S_ACK_PENDING
;
4420 if (priv
->r_tid_ack
== HFI1_QP_WQE_INVALID
)
4421 priv
->r_tid_ack
= priv
->r_tid_tail
;
4422 hfi1_schedule_tid_send(qp
);
4427 static bool hfi1_tid_rdma_is_resync_psn(u32 psn
)
4429 return (bool)((psn
& HFI1_KDETH_BTH_SEQ_MASK
) ==
4430 HFI1_KDETH_BTH_SEQ_MASK
);
4433 u32
hfi1_build_tid_rdma_write_ack(struct rvt_qp
*qp
, struct rvt_ack_entry
*e
,
4434 struct ib_other_headers
*ohdr
, u16 iflow
,
4435 u32
*bth1
, u32
*bth2
)
4437 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
4438 struct tid_flow_state
*fs
= &qpriv
->flow_state
;
4439 struct tid_rdma_request
*req
= ack_to_tid_req(e
);
4440 struct tid_rdma_flow
*flow
= &req
->flows
[iflow
];
4441 struct tid_rdma_params
*remote
;
4444 remote
= rcu_dereference(qpriv
->tid_rdma
.remote
);
4445 KDETH_RESET(ohdr
->u
.tid_rdma
.ack
.kdeth1
, JKEY
, remote
->jkey
);
4446 ohdr
->u
.tid_rdma
.ack
.verbs_qp
= cpu_to_be32(qp
->remote_qpn
);
4450 if (qpriv
->resync
) {
4451 *bth2
= mask_psn((fs
->generation
<<
4452 HFI1_KDETH_BTH_SEQ_SHIFT
) - 1);
4453 ohdr
->u
.tid_rdma
.ack
.aeth
= rvt_compute_aeth(qp
);
4454 } else if (qpriv
->s_nak_state
) {
4455 *bth2
= mask_psn(qpriv
->s_nak_psn
);
4456 ohdr
->u
.tid_rdma
.ack
.aeth
=
4457 cpu_to_be32((qp
->r_msn
& IB_MSN_MASK
) |
4458 (qpriv
->s_nak_state
<<
4459 IB_AETH_CREDIT_SHIFT
));
4461 *bth2
= full_flow_psn(flow
, flow
->flow_state
.lpsn
);
4462 ohdr
->u
.tid_rdma
.ack
.aeth
= rvt_compute_aeth(qp
);
4464 KDETH_RESET(ohdr
->u
.tid_rdma
.ack
.kdeth0
, KVER
, 0x1);
4465 ohdr
->u
.tid_rdma
.ack
.tid_flow_qp
=
4466 cpu_to_be32(qpriv
->tid_rdma
.local
.qp
|
4467 ((flow
->idx
& TID_RDMA_DESTQP_FLOW_MASK
) <<
4468 TID_RDMA_DESTQP_FLOW_SHIFT
) |
4471 ohdr
->u
.tid_rdma
.ack
.tid_flow_psn
= 0;
4472 ohdr
->u
.tid_rdma
.ack
.verbs_psn
=
4473 cpu_to_be32(flow
->flow_state
.resp_ib_psn
);
4475 if (qpriv
->resync
) {
4477 * If the PSN before the current expect KDETH PSN is the
4478 * RESYNC PSN, then we never received a good TID RDMA WRITE
4479 * DATA packet after a previous RESYNC.
4480 * In this case, the next expected KDETH PSN stays the same.
4482 if (hfi1_tid_rdma_is_resync_psn(qpriv
->r_next_psn_kdeth
- 1)) {
4483 ohdr
->u
.tid_rdma
.ack
.tid_flow_psn
=
4484 cpu_to_be32(qpriv
->r_next_psn_kdeth_save
);
4487 * Because the KDETH PSNs jump during a RESYNC, it's
4488 * not possible to infer (or compute) the previous value
4489 * of r_next_psn_kdeth in the case of back-to-back
4490 * RESYNC packets. Therefore, we save it.
4492 qpriv
->r_next_psn_kdeth_save
=
4493 qpriv
->r_next_psn_kdeth
- 1;
4494 ohdr
->u
.tid_rdma
.ack
.tid_flow_psn
=
4495 cpu_to_be32(qpriv
->r_next_psn_kdeth_save
);
4496 qpriv
->r_next_psn_kdeth
= mask_psn(*bth2
+ 1);
4498 qpriv
->resync
= false;
4501 return sizeof(ohdr
->u
.tid_rdma
.ack
) / sizeof(u32
);
4504 void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet
*packet
)
4506 struct ib_other_headers
*ohdr
= packet
->ohdr
;
4507 struct rvt_qp
*qp
= packet
->qp
;
4508 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
4509 struct rvt_swqe
*wqe
;
4510 struct tid_rdma_request
*req
;
4511 struct tid_rdma_flow
*flow
;
4512 u32 aeth
, psn
, req_psn
, ack_psn
, flpsn
, resync_psn
, ack_kpsn
;
4513 unsigned long flags
;
4516 trace_hfi1_tid_write_sender_rcv_tid_ack(qp
, 0);
4517 process_ecn(qp
, packet
);
4518 psn
= mask_psn(be32_to_cpu(ohdr
->bth
[2]));
4519 aeth
= be32_to_cpu(ohdr
->u
.tid_rdma
.ack
.aeth
);
4520 req_psn
= mask_psn(be32_to_cpu(ohdr
->u
.tid_rdma
.ack
.verbs_psn
));
4521 resync_psn
= mask_psn(be32_to_cpu(ohdr
->u
.tid_rdma
.ack
.tid_flow_psn
));
4523 spin_lock_irqsave(&qp
->s_lock
, flags
);
4524 trace_hfi1_rcv_tid_ack(qp
, aeth
, psn
, req_psn
, resync_psn
);
4526 /* If we are waiting for an ACK to RESYNC, drop any other packets */
4527 if ((qp
->s_flags
& HFI1_S_WAIT_HALT
) &&
4528 cmp_psn(psn
, qpriv
->s_resync_psn
))
4532 if (hfi1_tid_rdma_is_resync_psn(psn
))
4533 ack_kpsn
= resync_psn
;
4541 if (unlikely(qp
->s_acked
== qp
->s_tail
))
4544 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_acked
);
4546 if (wqe
->wr
.opcode
!= IB_WR_TID_RDMA_WRITE
)
4549 req
= wqe_to_tid_req(wqe
);
4550 trace_hfi1_tid_req_rcv_tid_ack(qp
, 0, wqe
->wr
.opcode
, wqe
->psn
,
4552 flow
= &req
->flows
[req
->acked_tail
];
4553 trace_hfi1_tid_flow_rcv_tid_ack(qp
, req
->acked_tail
, flow
);
4555 /* Drop stale ACK/NAK */
4556 if (cmp_psn(psn
, full_flow_psn(flow
, flow
->flow_state
.spsn
)) < 0 ||
4557 cmp_psn(req_psn
, flow
->flow_state
.resp_ib_psn
) < 0)
4560 while (cmp_psn(ack_kpsn
,
4561 full_flow_psn(flow
, flow
->flow_state
.lpsn
)) >= 0 &&
4562 req
->ack_seg
< req
->cur_seg
) {
4564 /* advance acked segment pointer */
4565 req
->acked_tail
= CIRC_NEXT(req
->acked_tail
, MAX_FLOWS
);
4566 req
->r_last_acked
= flow
->flow_state
.resp_ib_psn
;
4567 trace_hfi1_tid_req_rcv_tid_ack(qp
, 0, wqe
->wr
.opcode
, wqe
->psn
,
4569 if (req
->ack_seg
== req
->total_segs
) {
4570 req
->state
= TID_REQUEST_COMPLETE
;
4571 wqe
= do_rc_completion(qp
, wqe
,
4572 to_iport(qp
->ibqp
.device
,
4574 trace_hfi1_sender_rcv_tid_ack(qp
);
4575 atomic_dec(&qpriv
->n_tid_requests
);
4576 if (qp
->s_acked
== qp
->s_tail
)
4578 if (wqe
->wr
.opcode
!= IB_WR_TID_RDMA_WRITE
)
4580 req
= wqe_to_tid_req(wqe
);
4582 flow
= &req
->flows
[req
->acked_tail
];
4583 trace_hfi1_tid_flow_rcv_tid_ack(qp
, req
->acked_tail
, flow
);
4586 trace_hfi1_tid_req_rcv_tid_ack(qp
, 0, wqe
->wr
.opcode
, wqe
->psn
,
4588 switch (aeth
>> 29) {
4590 if (qpriv
->s_flags
& RVT_S_WAIT_ACK
)
4591 qpriv
->s_flags
&= ~RVT_S_WAIT_ACK
;
4592 if (!hfi1_tid_rdma_is_resync_psn(psn
)) {
4593 /* Check if there is any pending TID ACK */
4594 if (wqe
->wr
.opcode
== IB_WR_TID_RDMA_WRITE
&&
4595 req
->ack_seg
< req
->cur_seg
)
4596 hfi1_mod_tid_retry_timer(qp
);
4598 hfi1_stop_tid_retry_timer(qp
);
4599 hfi1_schedule_send(qp
);
4601 u32 spsn
, fpsn
, last_acked
, generation
;
4602 struct tid_rdma_request
*rptr
;
4605 hfi1_stop_tid_retry_timer(qp
);
4606 /* Allow new requests (see hfi1_make_tid_rdma_pkt) */
4607 qp
->s_flags
&= ~HFI1_S_WAIT_HALT
;
4609 * Clear RVT_S_SEND_ONE flag in case that the TID RDMA
4610 * ACK is received after the TID retry timer is fired
4611 * again. In this case, do not send any more TID
4612 * RESYNC request or wait for any more TID ACK packet.
4614 qpriv
->s_flags
&= ~RVT_S_SEND_ONE
;
4615 hfi1_schedule_send(qp
);
4617 if ((qp
->s_acked
== qpriv
->s_tid_tail
&&
4618 req
->ack_seg
== req
->total_segs
) ||
4619 qp
->s_acked
== qp
->s_tail
) {
4620 qpriv
->s_state
= TID_OP(WRITE_DATA_LAST
);
4624 if (req
->ack_seg
== req
->comp_seg
) {
4625 qpriv
->s_state
= TID_OP(WRITE_DATA
);
4630 * The PSN to start with is the next PSN after the
4633 psn
= mask_psn(psn
+ 1);
4634 generation
= psn
>> HFI1_KDETH_BTH_SEQ_SHIFT
;
4638 * Update to the correct WQE when we get an ACK(RESYNC)
4639 * in the middle of a request.
4641 if (delta_psn(ack_psn
, wqe
->lpsn
))
4642 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_acked
);
4643 req
= wqe_to_tid_req(wqe
);
4644 flow
= &req
->flows
[req
->acked_tail
];
4646 * RESYNC re-numbers the PSN ranges of all remaining
4647 * segments. Also, PSN's start from 0 in the middle of a
4648 * segment and the first segment size is less than the
4649 * default number of packets. flow->resync_npkts is used
4650 * to track the number of packets from the start of the
4651 * real segment to the point of 0 PSN after the RESYNC
4652 * in order to later correctly rewind the SGE.
4654 fpsn
= full_flow_psn(flow
, flow
->flow_state
.spsn
);
4655 req
->r_ack_psn
= psn
;
4656 flow
->resync_npkts
+=
4657 delta_psn(mask_psn(resync_psn
+ 1), fpsn
);
4659 * Renumber all packet sequence number ranges
4660 * based on the new generation.
4662 last_acked
= qp
->s_acked
;
4665 /* start from last acked segment */
4666 for (fidx
= rptr
->acked_tail
;
4667 CIRC_CNT(rptr
->setup_head
, fidx
,
4669 fidx
= CIRC_NEXT(fidx
, MAX_FLOWS
)) {
4673 flow
= &rptr
->flows
[fidx
];
4674 gen
= flow
->flow_state
.generation
;
4675 if (WARN_ON(gen
== generation
&&
4676 flow
->flow_state
.spsn
!=
4679 lpsn
= flow
->flow_state
.lpsn
;
4680 lpsn
= full_flow_psn(flow
, lpsn
);
4683 mask_psn(resync_psn
)
4685 flow
->flow_state
.generation
=
4687 flow
->flow_state
.spsn
= spsn
;
4688 flow
->flow_state
.lpsn
=
4689 flow
->flow_state
.spsn
+
4692 spsn
+= flow
->npkts
;
4693 resync_psn
+= flow
->npkts
;
4694 trace_hfi1_tid_flow_rcv_tid_ack(qp
,
4698 if (++last_acked
== qpriv
->s_tid_cur
+ 1)
4700 if (last_acked
== qp
->s_size
)
4702 wqe
= rvt_get_swqe_ptr(qp
, last_acked
);
4703 rptr
= wqe_to_tid_req(wqe
);
4705 req
->cur_seg
= req
->ack_seg
;
4706 qpriv
->s_tid_tail
= qp
->s_acked
;
4707 qpriv
->s_state
= TID_OP(WRITE_REQ
);
4708 hfi1_schedule_tid_send(qp
);
4711 qpriv
->s_retry
= qp
->s_retry_cnt
;
4715 hfi1_stop_tid_retry_timer(qp
);
4716 switch ((aeth
>> IB_AETH_CREDIT_SHIFT
) &
4717 IB_AETH_CREDIT_MASK
) {
4718 case 0: /* PSN sequence error */
4721 flow
= &req
->flows
[req
->acked_tail
];
4722 flpsn
= full_flow_psn(flow
, flow
->flow_state
.lpsn
);
4723 if (cmp_psn(psn
, flpsn
) > 0)
4725 trace_hfi1_tid_flow_rcv_tid_ack(qp
, req
->acked_tail
,
4727 req
->r_ack_psn
= mask_psn(be32_to_cpu(ohdr
->bth
[2]));
4728 req
->cur_seg
= req
->ack_seg
;
4729 qpriv
->s_tid_tail
= qp
->s_acked
;
4730 qpriv
->s_state
= TID_OP(WRITE_REQ
);
4731 qpriv
->s_retry
= qp
->s_retry_cnt
;
4732 hfi1_schedule_tid_send(qp
);
4745 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
4748 void hfi1_add_tid_retry_timer(struct rvt_qp
*qp
)
4750 struct hfi1_qp_priv
*priv
= qp
->priv
;
4751 struct ib_qp
*ibqp
= &qp
->ibqp
;
4752 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
4754 lockdep_assert_held(&qp
->s_lock
);
4755 if (!(priv
->s_flags
& HFI1_S_TID_RETRY_TIMER
)) {
4756 priv
->s_flags
|= HFI1_S_TID_RETRY_TIMER
;
4757 priv
->s_tid_retry_timer
.expires
= jiffies
+
4758 priv
->tid_retry_timeout_jiffies
+ rdi
->busy_jiffies
;
4759 add_timer(&priv
->s_tid_retry_timer
);
4763 static void hfi1_mod_tid_retry_timer(struct rvt_qp
*qp
)
4765 struct hfi1_qp_priv
*priv
= qp
->priv
;
4766 struct ib_qp
*ibqp
= &qp
->ibqp
;
4767 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
4769 lockdep_assert_held(&qp
->s_lock
);
4770 priv
->s_flags
|= HFI1_S_TID_RETRY_TIMER
;
4771 mod_timer(&priv
->s_tid_retry_timer
, jiffies
+
4772 priv
->tid_retry_timeout_jiffies
+ rdi
->busy_jiffies
);
4775 static int hfi1_stop_tid_retry_timer(struct rvt_qp
*qp
)
4777 struct hfi1_qp_priv
*priv
= qp
->priv
;
4780 lockdep_assert_held(&qp
->s_lock
);
4781 if (priv
->s_flags
& HFI1_S_TID_RETRY_TIMER
) {
4782 rval
= del_timer(&priv
->s_tid_retry_timer
);
4783 priv
->s_flags
&= ~HFI1_S_TID_RETRY_TIMER
;
4788 void hfi1_del_tid_retry_timer(struct rvt_qp
*qp
)
4790 struct hfi1_qp_priv
*priv
= qp
->priv
;
4792 del_timer_sync(&priv
->s_tid_retry_timer
);
4793 priv
->s_flags
&= ~HFI1_S_TID_RETRY_TIMER
;
4796 static void hfi1_tid_retry_timeout(struct timer_list
*t
)
4798 struct hfi1_qp_priv
*priv
= from_timer(priv
, t
, s_tid_retry_timer
);
4799 struct rvt_qp
*qp
= priv
->owner
;
4800 struct rvt_swqe
*wqe
;
4801 unsigned long flags
;
4802 struct tid_rdma_request
*req
;
4804 spin_lock_irqsave(&qp
->r_lock
, flags
);
4805 spin_lock(&qp
->s_lock
);
4806 trace_hfi1_tid_write_sender_retry_timeout(qp
, 0);
4807 if (priv
->s_flags
& HFI1_S_TID_RETRY_TIMER
) {
4808 hfi1_stop_tid_retry_timer(qp
);
4809 if (!priv
->s_retry
) {
4810 trace_hfi1_msg_tid_retry_timeout(/* msg */
4812 "Exhausted retries. Tid retry timeout = ",
4813 (u64
)priv
->tid_retry_timeout_jiffies
);
4815 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_acked
);
4816 hfi1_trdma_send_complete(qp
, wqe
, IB_WC_RETRY_EXC_ERR
);
4817 rvt_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
4819 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_acked
);
4820 req
= wqe_to_tid_req(wqe
);
4821 trace_hfi1_tid_req_tid_retry_timeout(/* req */
4822 qp
, 0, wqe
->wr
.opcode
, wqe
->psn
, wqe
->lpsn
, req
);
4824 priv
->s_flags
&= ~RVT_S_WAIT_ACK
;
4825 /* Only send one packet (the RESYNC) */
4826 priv
->s_flags
|= RVT_S_SEND_ONE
;
4828 * No additional request shall be made by this QP until
4829 * the RESYNC has been complete.
4831 qp
->s_flags
|= HFI1_S_WAIT_HALT
;
4832 priv
->s_state
= TID_OP(RESYNC
);
4834 hfi1_schedule_tid_send(qp
);
4837 spin_unlock(&qp
->s_lock
);
4838 spin_unlock_irqrestore(&qp
->r_lock
, flags
);
4841 u32
hfi1_build_tid_rdma_resync(struct rvt_qp
*qp
, struct rvt_swqe
*wqe
,
4842 struct ib_other_headers
*ohdr
, u32
*bth1
,
4843 u32
*bth2
, u16 fidx
)
4845 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
4846 struct tid_rdma_params
*remote
;
4847 struct tid_rdma_request
*req
= wqe_to_tid_req(wqe
);
4848 struct tid_rdma_flow
*flow
= &req
->flows
[fidx
];
4852 remote
= rcu_dereference(qpriv
->tid_rdma
.remote
);
4853 KDETH_RESET(ohdr
->u
.tid_rdma
.ack
.kdeth1
, JKEY
, remote
->jkey
);
4854 ohdr
->u
.tid_rdma
.ack
.verbs_qp
= cpu_to_be32(qp
->remote_qpn
);
4858 generation
= kern_flow_generation_next(flow
->flow_state
.generation
);
4859 *bth2
= mask_psn((generation
<< HFI1_KDETH_BTH_SEQ_SHIFT
) - 1);
4860 qpriv
->s_resync_psn
= *bth2
;
4861 *bth2
|= IB_BTH_REQ_ACK
;
4862 KDETH_RESET(ohdr
->u
.tid_rdma
.ack
.kdeth0
, KVER
, 0x1);
4864 return sizeof(ohdr
->u
.tid_rdma
.resync
) / sizeof(u32
);
4867 void hfi1_rc_rcv_tid_rdma_resync(struct hfi1_packet
*packet
)
4869 struct ib_other_headers
*ohdr
= packet
->ohdr
;
4870 struct rvt_qp
*qp
= packet
->qp
;
4871 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
4872 struct hfi1_ctxtdata
*rcd
= qpriv
->rcd
;
4873 struct hfi1_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
4874 struct rvt_ack_entry
*e
;
4875 struct tid_rdma_request
*req
;
4876 struct tid_rdma_flow
*flow
;
4877 struct tid_flow_state
*fs
= &qpriv
->flow_state
;
4878 u32 psn
, generation
, idx
, gen_next
;
4880 unsigned long flags
;
4882 fecn
= process_ecn(qp
, packet
);
4883 psn
= mask_psn(be32_to_cpu(ohdr
->bth
[2]));
4885 generation
= mask_psn(psn
+ 1) >> HFI1_KDETH_BTH_SEQ_SHIFT
;
4886 spin_lock_irqsave(&qp
->s_lock
, flags
);
4888 gen_next
= (fs
->generation
== KERN_GENERATION_RESERVED
) ?
4889 generation
: kern_flow_generation_next(fs
->generation
);
4891 * RESYNC packet contains the "next" generation and can only be
4892 * from the current or previous generations
4894 if (generation
!= mask_generation(gen_next
- 1) &&
4895 generation
!= gen_next
)
4897 /* Already processing a resync */
4901 spin_lock(&rcd
->exp_lock
);
4902 if (fs
->index
>= RXE_NUM_TID_FLOWS
) {
4904 * If we don't have a flow, save the generation so it can be
4905 * applied when a new flow is allocated
4907 fs
->generation
= generation
;
4909 /* Reprogram the QP flow with new generation */
4910 rcd
->flows
[fs
->index
].generation
= generation
;
4911 fs
->generation
= kern_setup_hw_flow(rcd
, fs
->index
);
4915 * Disable SW PSN checking since a RESYNC is equivalent to a
4916 * sync point and the flow has/will be reprogrammed
4918 qpriv
->s_flags
&= ~HFI1_R_TID_SW_PSN
;
4919 trace_hfi1_tid_write_rsp_rcv_resync(qp
);
4922 * Reset all TID flow information with the new generation.
4923 * This is done for all requests and segments after the
4924 * last received segment
4926 for (idx
= qpriv
->r_tid_tail
; ; idx
++) {
4929 if (idx
> rvt_size_atomic(&dev
->rdi
))
4931 e
= &qp
->s_ack_queue
[idx
];
4932 if (e
->opcode
== TID_OP(WRITE_REQ
)) {
4933 req
= ack_to_tid_req(e
);
4934 trace_hfi1_tid_req_rcv_resync(qp
, 0, e
->opcode
, e
->psn
,
4937 /* start from last unacked segment */
4938 for (flow_idx
= req
->clear_tail
;
4939 CIRC_CNT(req
->setup_head
, flow_idx
,
4941 flow_idx
= CIRC_NEXT(flow_idx
, MAX_FLOWS
)) {
4945 flow
= &req
->flows
[flow_idx
];
4946 lpsn
= full_flow_psn(flow
,
4947 flow
->flow_state
.lpsn
);
4948 next
= flow
->flow_state
.r_next_psn
;
4949 flow
->npkts
= delta_psn(lpsn
, next
- 1);
4950 flow
->flow_state
.generation
= fs
->generation
;
4951 flow
->flow_state
.spsn
= fs
->psn
;
4952 flow
->flow_state
.lpsn
=
4953 flow
->flow_state
.spsn
+ flow
->npkts
- 1;
4954 flow
->flow_state
.r_next_psn
=
4956 flow
->flow_state
.spsn
);
4957 fs
->psn
+= flow
->npkts
;
4958 trace_hfi1_tid_flow_rcv_resync(qp
, flow_idx
,
4962 if (idx
== qp
->s_tail_ack_queue
)
4966 spin_unlock(&rcd
->exp_lock
);
4967 qpriv
->resync
= true;
4968 /* RESYNC request always gets a TID RDMA ACK. */
4969 qpriv
->s_nak_state
= 0;
4970 qpriv
->s_flags
|= RVT_S_ACK_PENDING
;
4971 hfi1_schedule_tid_send(qp
);
4974 qp
->s_flags
|= RVT_S_ECN
;
4975 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
4979 * Call this function when the last TID RDMA WRITE DATA packet for a request
4982 static void update_tid_tail(struct rvt_qp
*qp
)
4983 __must_hold(&qp
->s_lock
)
4985 struct hfi1_qp_priv
*priv
= qp
->priv
;
4987 struct rvt_swqe
*wqe
;
4989 lockdep_assert_held(&qp
->s_lock
);
4990 /* Can't move beyond s_tid_cur */
4991 if (priv
->s_tid_tail
== priv
->s_tid_cur
)
4993 for (i
= priv
->s_tid_tail
+ 1; ; i
++) {
4994 if (i
== qp
->s_size
)
4997 if (i
== priv
->s_tid_cur
)
4999 wqe
= rvt_get_swqe_ptr(qp
, i
);
5000 if (wqe
->wr
.opcode
== IB_WR_TID_RDMA_WRITE
)
5003 priv
->s_tid_tail
= i
;
5004 priv
->s_state
= TID_OP(WRITE_RESP
);
5007 int hfi1_make_tid_rdma_pkt(struct rvt_qp
*qp
, struct hfi1_pkt_state
*ps
)
5008 __must_hold(&qp
->s_lock
)
5010 struct hfi1_qp_priv
*priv
= qp
->priv
;
5011 struct rvt_swqe
*wqe
;
5012 u32 bth1
= 0, bth2
= 0, hwords
= 5, len
, middle
= 0;
5013 struct ib_other_headers
*ohdr
;
5014 struct rvt_sge_state
*ss
= &qp
->s_sge
;
5015 struct rvt_ack_entry
*e
= &qp
->s_ack_queue
[qp
->s_tail_ack_queue
];
5016 struct tid_rdma_request
*req
= ack_to_tid_req(e
);
5018 u8 opcode
= TID_OP(WRITE_DATA
);
5020 lockdep_assert_held(&qp
->s_lock
);
5021 trace_hfi1_tid_write_sender_make_tid_pkt(qp
, 0);
5023 * Prioritize the sending of the requests and responses over the
5024 * sending of the TID RDMA data packets.
5026 if (((atomic_read(&priv
->n_tid_requests
) < HFI1_TID_RDMA_WRITE_CNT
) &&
5027 atomic_read(&priv
->n_requests
) &&
5028 !(qp
->s_flags
& (RVT_S_BUSY
| RVT_S_WAIT_ACK
|
5029 HFI1_S_ANY_WAIT_IO
))) ||
5030 (e
->opcode
== TID_OP(WRITE_REQ
) && req
->cur_seg
< req
->alloc_seg
&&
5031 !(qp
->s_flags
& (RVT_S_BUSY
| HFI1_S_ANY_WAIT_IO
)))) {
5032 struct iowait_work
*iowork
;
5034 iowork
= iowait_get_ib_work(&priv
->s_iowait
);
5035 ps
->s_txreq
= get_waiting_verbs_txreq(iowork
);
5036 if (ps
->s_txreq
|| hfi1_make_rc_req(qp
, ps
)) {
5037 priv
->s_flags
|= HFI1_S_TID_BUSY_SET
;
5042 ps
->s_txreq
= get_txreq(ps
->dev
, qp
);
5046 ohdr
= &ps
->s_txreq
->phdr
.hdr
.ibh
.u
.oth
;
5048 if ((priv
->s_flags
& RVT_S_ACK_PENDING
) &&
5049 make_tid_rdma_ack(qp
, ohdr
, ps
))
5053 * Bail out if we can't send data.
5054 * Be reminded that this check must been done after the call to
5055 * make_tid_rdma_ack() because the responding QP could be in
5056 * RTR state where it can send TID RDMA ACK, not TID RDMA WRITE DATA.
5058 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_SEND_OK
))
5061 if (priv
->s_flags
& RVT_S_WAIT_ACK
)
5064 /* Check whether there is anything to do. */
5065 if (priv
->s_tid_tail
== HFI1_QP_WQE_INVALID
)
5067 wqe
= rvt_get_swqe_ptr(qp
, priv
->s_tid_tail
);
5068 req
= wqe_to_tid_req(wqe
);
5069 trace_hfi1_tid_req_make_tid_pkt(qp
, 0, wqe
->wr
.opcode
, wqe
->psn
,
5071 switch (priv
->s_state
) {
5072 case TID_OP(WRITE_REQ
):
5073 case TID_OP(WRITE_RESP
):
5074 priv
->tid_ss
.sge
= wqe
->sg_list
[0];
5075 priv
->tid_ss
.sg_list
= wqe
->sg_list
+ 1;
5076 priv
->tid_ss
.num_sge
= wqe
->wr
.num_sge
;
5077 priv
->tid_ss
.total_len
= wqe
->length
;
5079 if (priv
->s_state
== TID_OP(WRITE_REQ
))
5080 hfi1_tid_rdma_restart_req(qp
, wqe
, &bth2
);
5081 priv
->s_state
= TID_OP(WRITE_DATA
);
5084 case TID_OP(WRITE_DATA
):
5086 * 1. Check whether TID RDMA WRITE RESP available.
5088 * 2.1 If have more segments and no TID RDMA WRITE RESP,
5089 * set HFI1_S_WAIT_TID_RESP
5090 * 2.2 Return indicating no progress made.
5092 * 3.1 Build TID RDMA WRITE DATA packet.
5093 * 3.2 If last packet in segment:
5094 * 3.2.1 Change KDETH header bits
5095 * 3.2.2 Advance RESP pointers.
5096 * 3.3 Return indicating progress made.
5098 trace_hfi1_sender_make_tid_pkt(qp
);
5099 trace_hfi1_tid_write_sender_make_tid_pkt(qp
, 0);
5100 wqe
= rvt_get_swqe_ptr(qp
, priv
->s_tid_tail
);
5101 req
= wqe_to_tid_req(wqe
);
5104 if (!req
->comp_seg
|| req
->cur_seg
== req
->comp_seg
)
5107 trace_hfi1_tid_req_make_tid_pkt(qp
, 0, wqe
->wr
.opcode
,
5108 wqe
->psn
, wqe
->lpsn
, req
);
5109 last
= hfi1_build_tid_rdma_packet(wqe
, ohdr
, &bth1
, &bth2
,
5113 /* move pointer to next flow */
5114 req
->clear_tail
= CIRC_NEXT(req
->clear_tail
,
5116 if (++req
->cur_seg
< req
->total_segs
) {
5117 if (!CIRC_CNT(req
->setup_head
, req
->clear_tail
,
5119 qp
->s_flags
|= HFI1_S_WAIT_TID_RESP
;
5121 priv
->s_state
= TID_OP(WRITE_DATA_LAST
);
5122 opcode
= TID_OP(WRITE_DATA_LAST
);
5124 /* Advance the s_tid_tail now */
5125 update_tid_tail(qp
);
5128 hwords
+= sizeof(ohdr
->u
.tid_rdma
.w_data
) / sizeof(u32
);
5132 case TID_OP(RESYNC
):
5133 trace_hfi1_sender_make_tid_pkt(qp
);
5134 /* Use generation from the most recently received response */
5135 wqe
= rvt_get_swqe_ptr(qp
, priv
->s_tid_cur
);
5136 req
= wqe_to_tid_req(wqe
);
5137 /* If no responses for this WQE look at the previous one */
5138 if (!req
->comp_seg
) {
5139 wqe
= rvt_get_swqe_ptr(qp
,
5140 (!priv
->s_tid_cur
? qp
->s_size
:
5141 priv
->s_tid_cur
) - 1);
5142 req
= wqe_to_tid_req(wqe
);
5144 hwords
+= hfi1_build_tid_rdma_resync(qp
, wqe
, ohdr
, &bth1
,
5146 CIRC_PREV(req
->setup_head
,
5150 opcode
= TID_OP(RESYNC
);
5156 if (priv
->s_flags
& RVT_S_SEND_ONE
) {
5157 priv
->s_flags
&= ~RVT_S_SEND_ONE
;
5158 priv
->s_flags
|= RVT_S_WAIT_ACK
;
5159 bth2
|= IB_BTH_REQ_ACK
;
5162 ps
->s_txreq
->hdr_dwords
= hwords
;
5163 ps
->s_txreq
->sde
= priv
->s_sde
;
5164 ps
->s_txreq
->ss
= ss
;
5165 ps
->s_txreq
->s_cur_size
= len
;
5166 hfi1_make_ruc_header(qp
, ohdr
, (opcode
<< 24), bth1
, bth2
,
5170 hfi1_put_txreq(ps
->s_txreq
);
5173 priv
->s_flags
&= ~RVT_S_BUSY
;
5175 * If we didn't get a txreq, the QP will be woken up later to try
5176 * again, set the flags to the the wake up which work item to wake
5178 * (A better algorithm should be found to do this and generalize the
5179 * sleep/wakeup flags.)
5181 iowait_set_flag(&priv
->s_iowait
, IOWAIT_PENDING_TID
);
5185 static int make_tid_rdma_ack(struct rvt_qp
*qp
,
5186 struct ib_other_headers
*ohdr
,
5187 struct hfi1_pkt_state
*ps
)
5189 struct rvt_ack_entry
*e
;
5190 struct hfi1_qp_priv
*qpriv
= qp
->priv
;
5191 struct hfi1_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
5194 u32 bth1
= 0, bth2
= 0;
5197 struct tid_rdma_request
*req
, *nreq
;
5199 trace_hfi1_tid_write_rsp_make_tid_ack(qp
);
5200 /* Don't send an ACK if we aren't supposed to. */
5201 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
))
5204 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
5207 e
= &qp
->s_ack_queue
[qpriv
->r_tid_ack
];
5208 req
= ack_to_tid_req(e
);
5210 * In the RESYNC case, we are exactly one segment past the
5211 * previously sent ack or at the previously sent NAK. So to send
5212 * the resync ack, we go back one segment (which might be part of
5213 * the previous request) and let the do-while loop execute again.
5214 * The advantage of executing the do-while loop is that any data
5215 * received after the previous ack is automatically acked in the
5216 * RESYNC ack. It turns out that for the do-while loop we only need
5217 * to pull back qpriv->r_tid_ack, not the segment
5218 * indices/counters. The scheme works even if the previous request
5219 * was not a TID WRITE request.
5221 if (qpriv
->resync
) {
5222 if (!req
->ack_seg
|| req
->ack_seg
== req
->total_segs
)
5223 qpriv
->r_tid_ack
= !qpriv
->r_tid_ack
?
5224 rvt_size_atomic(&dev
->rdi
) :
5225 qpriv
->r_tid_ack
- 1;
5226 e
= &qp
->s_ack_queue
[qpriv
->r_tid_ack
];
5227 req
= ack_to_tid_req(e
);
5230 trace_hfi1_rsp_make_tid_ack(qp
, e
->psn
);
5231 trace_hfi1_tid_req_make_tid_ack(qp
, 0, e
->opcode
, e
->psn
, e
->lpsn
,
5234 * If we've sent all the ACKs that we can, we are done
5235 * until we get more segments...
5237 if (!qpriv
->s_nak_state
&& !qpriv
->resync
&&
5238 req
->ack_seg
== req
->comp_seg
)
5243 * To deal with coalesced ACKs, the acked_tail pointer
5244 * into the flow array is used. The distance between it
5245 * and the clear_tail is the number of flows that are
5249 /* Get up-to-date value */
5250 CIRC_CNT(req
->clear_tail
, req
->acked_tail
,
5252 /* Advance acked index */
5253 req
->acked_tail
= req
->clear_tail
;
5256 * req->clear_tail points to the segment currently being
5257 * received. So, when sending an ACK, the previous
5258 * segment is being ACK'ed.
5260 flow
= CIRC_PREV(req
->acked_tail
, MAX_FLOWS
);
5261 if (req
->ack_seg
!= req
->total_segs
)
5263 req
->state
= TID_REQUEST_COMPLETE
;
5265 next
= qpriv
->r_tid_ack
+ 1;
5266 if (next
> rvt_size_atomic(&dev
->rdi
))
5268 qpriv
->r_tid_ack
= next
;
5269 if (qp
->s_ack_queue
[next
].opcode
!= TID_OP(WRITE_REQ
))
5271 nreq
= ack_to_tid_req(&qp
->s_ack_queue
[next
]);
5272 if (!nreq
->comp_seg
|| nreq
->ack_seg
== nreq
->comp_seg
)
5275 /* Move to the next ack entry now */
5276 e
= &qp
->s_ack_queue
[qpriv
->r_tid_ack
];
5277 req
= ack_to_tid_req(e
);
5281 * At this point qpriv->r_tid_ack == qpriv->r_tid_tail but e and
5282 * req could be pointing at the previous ack queue entry
5284 if (qpriv
->s_nak_state
||
5286 !hfi1_tid_rdma_is_resync_psn(qpriv
->r_next_psn_kdeth
- 1) &&
5287 (cmp_psn(qpriv
->r_next_psn_kdeth
- 1,
5288 full_flow_psn(&req
->flows
[flow
],
5289 req
->flows
[flow
].flow_state
.lpsn
)) > 0))) {
5291 * A NAK will implicitly acknowledge all previous TID RDMA
5292 * requests. Therefore, we NAK with the req->acked_tail
5293 * segment for the request at qpriv->r_tid_ack (same at
5294 * this point as the req->clear_tail segment for the
5295 * qpriv->r_tid_tail request)
5297 e
= &qp
->s_ack_queue
[qpriv
->r_tid_ack
];
5298 req
= ack_to_tid_req(e
);
5299 flow
= req
->acked_tail
;
5300 } else if (req
->ack_seg
== req
->total_segs
&&
5301 qpriv
->s_flags
& HFI1_R_TID_WAIT_INTERLCK
)
5302 qpriv
->s_flags
&= ~HFI1_R_TID_WAIT_INTERLCK
;
5304 trace_hfi1_tid_write_rsp_make_tid_ack(qp
);
5305 trace_hfi1_tid_req_make_tid_ack(qp
, 0, e
->opcode
, e
->psn
, e
->lpsn
,
5307 hwords
+= hfi1_build_tid_rdma_write_ack(qp
, e
, ohdr
, flow
, &bth1
,
5310 qpriv
->s_flags
&= ~RVT_S_ACK_PENDING
;
5311 ps
->s_txreq
->hdr_dwords
= hwords
;
5312 ps
->s_txreq
->sde
= qpriv
->s_sde
;
5313 ps
->s_txreq
->s_cur_size
= len
;
5314 ps
->s_txreq
->ss
= NULL
;
5315 hfi1_make_ruc_header(qp
, ohdr
, (TID_OP(ACK
) << 24), bth1
, bth2
, middle
,
5317 ps
->s_txreq
->txreq
.flags
|= SDMA_TXREQ_F_VIP
;
5321 * Ensure s_rdma_ack_cnt changes are committed prior to resetting
5322 * RVT_S_RESP_PENDING
5325 qpriv
->s_flags
&= ~RVT_S_ACK_PENDING
;
5329 static int hfi1_send_tid_ok(struct rvt_qp
*qp
)
5331 struct hfi1_qp_priv
*priv
= qp
->priv
;
5333 return !(priv
->s_flags
& RVT_S_BUSY
||
5334 qp
->s_flags
& HFI1_S_ANY_WAIT_IO
) &&
5335 (verbs_txreq_queued(iowait_get_tid_work(&priv
->s_iowait
)) ||
5336 (priv
->s_flags
& RVT_S_RESP_PENDING
) ||
5337 !(qp
->s_flags
& HFI1_S_ANY_TID_WAIT_SEND
));
5340 void _hfi1_do_tid_send(struct work_struct
*work
)
5342 struct iowait_work
*w
= container_of(work
, struct iowait_work
, iowork
);
5343 struct rvt_qp
*qp
= iowait_to_qp(w
->iow
);
5345 hfi1_do_tid_send(qp
);
5348 static void hfi1_do_tid_send(struct rvt_qp
*qp
)
5350 struct hfi1_pkt_state ps
;
5351 struct hfi1_qp_priv
*priv
= qp
->priv
;
5353 ps
.dev
= to_idev(qp
->ibqp
.device
);
5354 ps
.ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
5355 ps
.ppd
= ppd_from_ibp(ps
.ibp
);
5356 ps
.wait
= iowait_get_tid_work(&priv
->s_iowait
);
5357 ps
.in_thread
= false;
5358 ps
.timeout_int
= qp
->timeout_jiffies
/ 8;
5360 trace_hfi1_rc_do_tid_send(qp
, false);
5361 spin_lock_irqsave(&qp
->s_lock
, ps
.flags
);
5363 /* Return if we are already busy processing a work request. */
5364 if (!hfi1_send_tid_ok(qp
)) {
5365 if (qp
->s_flags
& HFI1_S_ANY_WAIT_IO
)
5366 iowait_set_flag(&priv
->s_iowait
, IOWAIT_PENDING_TID
);
5367 spin_unlock_irqrestore(&qp
->s_lock
, ps
.flags
);
5371 priv
->s_flags
|= RVT_S_BUSY
;
5373 ps
.timeout
= jiffies
+ ps
.timeout_int
;
5374 ps
.cpu
= priv
->s_sde
? priv
->s_sde
->cpu
:
5375 cpumask_first(cpumask_of_node(ps
.ppd
->dd
->node
));
5376 ps
.pkts_sent
= false;
5378 /* insure a pre-built packet is handled */
5379 ps
.s_txreq
= get_waiting_verbs_txreq(ps
.wait
);
5381 /* Check for a constructed packet to be sent. */
5383 if (priv
->s_flags
& HFI1_S_TID_BUSY_SET
) {
5384 qp
->s_flags
|= RVT_S_BUSY
;
5385 ps
.wait
= iowait_get_ib_work(&priv
->s_iowait
);
5387 spin_unlock_irqrestore(&qp
->s_lock
, ps
.flags
);
5390 * If the packet cannot be sent now, return and
5391 * the send tasklet will be woken up later.
5393 if (hfi1_verbs_send(qp
, &ps
))
5396 /* allow other tasks to run */
5397 if (hfi1_schedule_send_yield(qp
, &ps
, true))
5400 spin_lock_irqsave(&qp
->s_lock
, ps
.flags
);
5401 if (priv
->s_flags
& HFI1_S_TID_BUSY_SET
) {
5402 qp
->s_flags
&= ~RVT_S_BUSY
;
5403 priv
->s_flags
&= ~HFI1_S_TID_BUSY_SET
;
5404 ps
.wait
= iowait_get_tid_work(&priv
->s_iowait
);
5405 if (iowait_flag_set(&priv
->s_iowait
,
5407 hfi1_schedule_send(qp
);
5410 } while (hfi1_make_tid_rdma_pkt(qp
, &ps
));
5411 iowait_starve_clear(ps
.pkts_sent
, &priv
->s_iowait
);
5412 spin_unlock_irqrestore(&qp
->s_lock
, ps
.flags
);
5415 static bool _hfi1_schedule_tid_send(struct rvt_qp
*qp
)
5417 struct hfi1_qp_priv
*priv
= qp
->priv
;
5418 struct hfi1_ibport
*ibp
=
5419 to_iport(qp
->ibqp
.device
, qp
->port_num
);
5420 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
5421 struct hfi1_devdata
*dd
= dd_from_ibdev(qp
->ibqp
.device
);
5423 return iowait_tid_schedule(&priv
->s_iowait
, ppd
->hfi1_wq
,
5426 cpumask_first(cpumask_of_node(dd
->node
)));
5430 * hfi1_schedule_tid_send - schedule progress on TID RDMA state machine
5433 * This schedules qp progress on the TID RDMA state machine. Caller
5434 * should hold the s_lock.
5435 * Unlike hfi1_schedule_send(), this cannot use hfi1_send_ok() because
5436 * the two state machines can step on each other with respect to the
5438 * Therefore, a modified test is used.
5439 * @return true if the second leg is scheduled;
5440 * false if the second leg is not scheduled.
5442 bool hfi1_schedule_tid_send(struct rvt_qp
*qp
)
5444 lockdep_assert_held(&qp
->s_lock
);
5445 if (hfi1_send_tid_ok(qp
)) {
5447 * The following call returns true if the qp is not on the
5448 * queue and false if the qp is already on the queue before
5449 * this call. Either way, the qp will be on the queue when the
5452 _hfi1_schedule_tid_send(qp
);
5455 if (qp
->s_flags
& HFI1_S_ANY_WAIT_IO
)
5456 iowait_set_flag(&((struct hfi1_qp_priv
*)qp
->priv
)->s_iowait
,
5457 IOWAIT_PENDING_TID
);
5461 bool hfi1_tid_rdma_ack_interlock(struct rvt_qp
*qp
, struct rvt_ack_entry
*e
)
5463 struct rvt_ack_entry
*prev
;
5464 struct tid_rdma_request
*req
;
5465 struct hfi1_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
5466 struct hfi1_qp_priv
*priv
= qp
->priv
;
5469 s_prev
= qp
->s_tail_ack_queue
== 0 ? rvt_size_atomic(&dev
->rdi
) :
5470 (qp
->s_tail_ack_queue
- 1);
5471 prev
= &qp
->s_ack_queue
[s_prev
];
5473 if ((e
->opcode
== TID_OP(READ_REQ
) ||
5474 e
->opcode
== OP(RDMA_READ_REQUEST
)) &&
5475 prev
->opcode
== TID_OP(WRITE_REQ
)) {
5476 req
= ack_to_tid_req(prev
);
5477 if (req
->ack_seg
!= req
->total_segs
) {
5478 priv
->s_flags
|= HFI1_R_TID_WAIT_INTERLCK
;
5485 static u32
read_r_next_psn(struct hfi1_devdata
*dd
, u8 ctxt
, u8 fidx
)
5490 * The only sane way to get the amount of
5491 * progress is to read the HW flow state.
5493 reg
= read_uctxt_csr(dd
, ctxt
, RCV_TID_FLOW_TABLE
+ (8 * fidx
));
5494 return mask_psn(reg
);
5497 static void tid_rdma_rcv_err(struct hfi1_packet
*packet
,
5498 struct ib_other_headers
*ohdr
,
5499 struct rvt_qp
*qp
, u32 psn
, int diff
, bool fecn
)
5501 unsigned long flags
;
5503 tid_rdma_rcv_error(packet
, ohdr
, qp
, psn
, diff
);
5505 spin_lock_irqsave(&qp
->s_lock
, flags
);
5506 qp
->s_flags
|= RVT_S_ECN
;
5507 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
5511 static void update_r_next_psn_fecn(struct hfi1_packet
*packet
,
5512 struct hfi1_qp_priv
*priv
,
5513 struct hfi1_ctxtdata
*rcd
,
5514 struct tid_rdma_flow
*flow
,
5518 * If a start/middle packet is delivered here due to
5519 * RSM rule and FECN, we need to update the r_next_psn.
5521 if (fecn
&& packet
->etype
== RHF_RCV_TYPE_EAGER
&&
5522 !(priv
->s_flags
& HFI1_R_TID_SW_PSN
)) {
5523 struct hfi1_devdata
*dd
= rcd
->dd
;
5525 flow
->flow_state
.r_next_psn
=
5526 read_r_next_psn(dd
, rcd
->ctxt
, flow
->idx
);