]>
Commit | Line | Data |
---|---|---|
5190f052 MM |
1 | // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) |
2 | /* | |
3 | * Copyright(c) 2018 Intel Corporation. | |
4 | * | |
5 | */ | |
6 | ||
7 | #include "hfi.h" | |
37356e78 | 8 | #include "qp.h" |
742a3826 | 9 | #include "rc.h" |
5190f052 MM |
10 | #include "verbs.h" |
11 | #include "tid_rdma.h" | |
838b6fd2 | 12 | #include "exp_rcv.h" |
a131d164 | 13 | #include "trace.h" |
5190f052 | 14 | |
742a3826 KW |
15 | /** |
16 | * DOC: TID RDMA READ protocol | |
17 | * | |
18 | * This is an end-to-end protocol at the hfi1 level between two nodes that | |
19 | * improves performance by avoiding data copy on the requester side. It | |
20 | * converts a qualified RDMA READ request into a TID RDMA READ request on | |
21 | * the requester side and thereafter handles the request and response | |
22 | * differently. To be qualified, the RDMA READ request should meet the | |
23 | * following: | |
24 | * -- The total data length should be greater than 256K; | |
25 | * -- The total data length should be a multiple of 4K page size; | |
26 | * -- Each local scatter-gather entry should be 4K page aligned; | |
27 | * -- Each local scatter-gather entry should be a multiple of 4K page size; | |
28 | */ | |
29 | ||
37356e78 KW |
30 | #define RCV_TID_FLOW_TABLE_CTRL_FLOW_VALID_SMASK BIT_ULL(32) |
31 | #define RCV_TID_FLOW_TABLE_CTRL_HDR_SUPP_EN_SMASK BIT_ULL(33) | |
32 | #define RCV_TID_FLOW_TABLE_CTRL_KEEP_AFTER_SEQ_ERR_SMASK BIT_ULL(34) | |
33 | #define RCV_TID_FLOW_TABLE_CTRL_KEEP_ON_GEN_ERR_SMASK BIT_ULL(35) | |
34 | #define RCV_TID_FLOW_TABLE_STATUS_SEQ_MISMATCH_SMASK BIT_ULL(37) | |
35 | #define RCV_TID_FLOW_TABLE_STATUS_GEN_MISMATCH_SMASK BIT_ULL(38) | |
36 | ||
742a3826 KW |
37 | /* Maximum number of packets within a flow generation. */ |
38 | #define MAX_TID_FLOW_PSN BIT(HFI1_KDETH_BTH_SEQ_SHIFT) | |
39 | ||
37356e78 KW |
40 | #define GENERATION_MASK 0xFFFFF |
41 | ||
42 | static u32 mask_generation(u32 a) | |
43 | { | |
44 | return a & GENERATION_MASK; | |
45 | } | |
46 | ||
47 | /* Reserved generation value to set to unused flows for kernel contexts */ | |
48 | #define KERN_GENERATION_RESERVED mask_generation(U32_MAX) | |
49 | ||
d22a207d KW |
50 | /* |
51 | * J_KEY for kernel contexts when TID RDMA is used. | |
52 | * See generate_jkey() in hfi.h for more information. | |
53 | */ | |
54 | #define TID_RDMA_JKEY 32 | |
55 | #define HFI1_KERNEL_MIN_JKEY HFI1_ADMIN_JKEY_RANGE | |
56 | #define HFI1_KERNEL_MAX_JKEY (2 * HFI1_ADMIN_JKEY_RANGE - 1) | |
57 | ||
838b6fd2 | 58 | /* Maximum number of segments in flight per QP request. */ |
d22a207d KW |
59 | #define TID_RDMA_MAX_READ_SEGS_PER_REQ 6 |
60 | #define TID_RDMA_MAX_WRITE_SEGS_PER_REQ 4 | |
838b6fd2 KW |
61 | #define MAX_REQ max_t(u16, TID_RDMA_MAX_READ_SEGS_PER_REQ, \ |
62 | TID_RDMA_MAX_WRITE_SEGS_PER_REQ) | |
63 | #define MAX_FLOWS roundup_pow_of_two(MAX_REQ + 1) | |
64 | ||
65 | #define MAX_EXPECTED_PAGES (MAX_EXPECTED_BUFFER / PAGE_SIZE) | |
d22a207d | 66 | |
742a3826 KW |
67 | #define TID_RDMA_DESTQP_FLOW_SHIFT 11 |
68 | #define TID_RDMA_DESTQP_FLOW_MASK 0x1f | |
69 | ||
9905bf06 KW |
70 | #define TID_FLOW_SW_PSN BIT(0) |
71 | ||
d22a207d KW |
72 | #define TID_OPFN_QP_CTXT_MASK 0xff |
73 | #define TID_OPFN_QP_CTXT_SHIFT 56 | |
74 | #define TID_OPFN_QP_KDETH_MASK 0xff | |
75 | #define TID_OPFN_QP_KDETH_SHIFT 48 | |
76 | #define TID_OPFN_MAX_LEN_MASK 0x7ff | |
77 | #define TID_OPFN_MAX_LEN_SHIFT 37 | |
78 | #define TID_OPFN_TIMEOUT_MASK 0x1f | |
79 | #define TID_OPFN_TIMEOUT_SHIFT 32 | |
80 | #define TID_OPFN_RESERVED_MASK 0x3f | |
81 | #define TID_OPFN_RESERVED_SHIFT 26 | |
82 | #define TID_OPFN_URG_MASK 0x1 | |
83 | #define TID_OPFN_URG_SHIFT 25 | |
84 | #define TID_OPFN_VER_MASK 0x7 | |
85 | #define TID_OPFN_VER_SHIFT 22 | |
86 | #define TID_OPFN_JKEY_MASK 0x3f | |
87 | #define TID_OPFN_JKEY_SHIFT 16 | |
88 | #define TID_OPFN_MAX_READ_MASK 0x3f | |
89 | #define TID_OPFN_MAX_READ_SHIFT 10 | |
90 | #define TID_OPFN_MAX_WRITE_MASK 0x3f | |
91 | #define TID_OPFN_MAX_WRITE_SHIFT 4 | |
92 | ||
93 | /* | |
94 | * OPFN TID layout | |
95 | * | |
96 | * 63 47 31 15 | |
97 | * NNNNNNNNKKKKKKKK MMMMMMMMMMMTTTTT DDDDDDUVVVJJJJJJ RRRRRRWWWWWWCCCC | |
98 | * 3210987654321098 7654321098765432 1098765432109876 5432109876543210 | |
99 | * N - the context Number | |
100 | * K - the Kdeth_qp | |
101 | * M - Max_len | |
102 | * T - Timeout | |
103 | * D - reserveD | |
104 | * V - version | |
105 | * U - Urg capable | |
106 | * J - Jkey | |
107 | * R - max_Read | |
108 | * W - max_Write | |
109 | * C - Capcode | |
110 | */ | |
111 | ||
07b92370 KW |
112 | static u32 tid_rdma_flow_wt; |
113 | ||
37356e78 | 114 | static void tid_rdma_trigger_resume(struct work_struct *work); |
838b6fd2 KW |
115 | static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req); |
116 | static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req, | |
117 | gfp_t gfp); | |
118 | static void hfi1_init_trdma_req(struct rvt_qp *qp, | |
119 | struct tid_rdma_request *req); | |
07b92370 | 120 | static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx); |
3c759e00 KW |
121 | static void hfi1_tid_timeout(struct timer_list *t); |
122 | static void hfi1_add_tid_reap_timer(struct rvt_qp *qp); | |
123 | static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp); | |
37356e78 | 124 | |
d22a207d KW |
125 | static u64 tid_rdma_opfn_encode(struct tid_rdma_params *p) |
126 | { | |
127 | return | |
128 | (((u64)p->qp & TID_OPFN_QP_CTXT_MASK) << | |
129 | TID_OPFN_QP_CTXT_SHIFT) | | |
130 | ((((u64)p->qp >> 16) & TID_OPFN_QP_KDETH_MASK) << | |
131 | TID_OPFN_QP_KDETH_SHIFT) | | |
132 | (((u64)((p->max_len >> PAGE_SHIFT) - 1) & | |
133 | TID_OPFN_MAX_LEN_MASK) << TID_OPFN_MAX_LEN_SHIFT) | | |
134 | (((u64)p->timeout & TID_OPFN_TIMEOUT_MASK) << | |
135 | TID_OPFN_TIMEOUT_SHIFT) | | |
136 | (((u64)p->urg & TID_OPFN_URG_MASK) << TID_OPFN_URG_SHIFT) | | |
137 | (((u64)p->jkey & TID_OPFN_JKEY_MASK) << TID_OPFN_JKEY_SHIFT) | | |
138 | (((u64)p->max_read & TID_OPFN_MAX_READ_MASK) << | |
139 | TID_OPFN_MAX_READ_SHIFT) | | |
140 | (((u64)p->max_write & TID_OPFN_MAX_WRITE_MASK) << | |
141 | TID_OPFN_MAX_WRITE_SHIFT); | |
142 | } | |
143 | ||
144 | static void tid_rdma_opfn_decode(struct tid_rdma_params *p, u64 data) | |
145 | { | |
146 | p->max_len = (((data >> TID_OPFN_MAX_LEN_SHIFT) & | |
147 | TID_OPFN_MAX_LEN_MASK) + 1) << PAGE_SHIFT; | |
148 | p->jkey = (data >> TID_OPFN_JKEY_SHIFT) & TID_OPFN_JKEY_MASK; | |
149 | p->max_write = (data >> TID_OPFN_MAX_WRITE_SHIFT) & | |
150 | TID_OPFN_MAX_WRITE_MASK; | |
151 | p->max_read = (data >> TID_OPFN_MAX_READ_SHIFT) & | |
152 | TID_OPFN_MAX_READ_MASK; | |
153 | p->qp = | |
154 | ((((data >> TID_OPFN_QP_KDETH_SHIFT) & TID_OPFN_QP_KDETH_MASK) | |
155 | << 16) | | |
156 | ((data >> TID_OPFN_QP_CTXT_SHIFT) & TID_OPFN_QP_CTXT_MASK)); | |
157 | p->urg = (data >> TID_OPFN_URG_SHIFT) & TID_OPFN_URG_MASK; | |
158 | p->timeout = (data >> TID_OPFN_TIMEOUT_SHIFT) & TID_OPFN_TIMEOUT_MASK; | |
159 | } | |
160 | ||
161 | void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p) | |
162 | { | |
163 | struct hfi1_qp_priv *priv = qp->priv; | |
164 | ||
165 | p->qp = (kdeth_qp << 16) | priv->rcd->ctxt; | |
166 | p->max_len = TID_RDMA_MAX_SEGMENT_SIZE; | |
167 | p->jkey = priv->rcd->jkey; | |
168 | p->max_read = TID_RDMA_MAX_READ_SEGS_PER_REQ; | |
169 | p->max_write = TID_RDMA_MAX_WRITE_SEGS_PER_REQ; | |
170 | p->timeout = qp->timeout; | |
171 | p->urg = is_urg_masked(priv->rcd); | |
172 | } | |
173 | ||
174 | bool tid_rdma_conn_req(struct rvt_qp *qp, u64 *data) | |
175 | { | |
176 | struct hfi1_qp_priv *priv = qp->priv; | |
177 | ||
178 | *data = tid_rdma_opfn_encode(&priv->tid_rdma.local); | |
179 | return true; | |
180 | } | |
181 | ||
182 | bool tid_rdma_conn_reply(struct rvt_qp *qp, u64 data) | |
183 | { | |
184 | struct hfi1_qp_priv *priv = qp->priv; | |
185 | struct tid_rdma_params *remote, *old; | |
186 | bool ret = true; | |
187 | ||
188 | old = rcu_dereference_protected(priv->tid_rdma.remote, | |
189 | lockdep_is_held(&priv->opfn.lock)); | |
190 | data &= ~0xfULL; | |
191 | /* | |
192 | * If data passed in is zero, return true so as not to continue the | |
193 | * negotiation process | |
194 | */ | |
195 | if (!data || !HFI1_CAP_IS_KSET(TID_RDMA)) | |
196 | goto null; | |
197 | /* | |
198 | * If kzalloc fails, return false. This will result in: | |
199 | * * at the requester a new OPFN request being generated to retry | |
200 | * the negotiation | |
201 | * * at the responder, 0 being returned to the requester so as to | |
202 | * disable TID RDMA at both the requester and the responder | |
203 | */ | |
204 | remote = kzalloc(sizeof(*remote), GFP_ATOMIC); | |
205 | if (!remote) { | |
206 | ret = false; | |
207 | goto null; | |
208 | } | |
209 | ||
210 | tid_rdma_opfn_decode(remote, data); | |
211 | priv->tid_timer_timeout_jiffies = | |
212 | usecs_to_jiffies((((4096UL * (1UL << remote->timeout)) / | |
213 | 1000UL) << 3) * 7); | |
a131d164 KW |
214 | trace_hfi1_opfn_param(qp, 0, &priv->tid_rdma.local); |
215 | trace_hfi1_opfn_param(qp, 1, remote); | |
d22a207d KW |
216 | rcu_assign_pointer(priv->tid_rdma.remote, remote); |
217 | /* | |
218 | * A TID RDMA READ request's segment size is not equal to | |
219 | * remote->max_len only when the request's data length is smaller | |
220 | * than remote->max_len. In that case, there will be only one segment. | |
221 | * Therefore, when priv->pkts_ps is used to calculate req->cur_seg | |
222 | * during retry, it will lead to req->cur_seg = 0, which is exactly | |
223 | * what is expected. | |
224 | */ | |
225 | priv->pkts_ps = (u16)rvt_div_mtu(qp, remote->max_len); | |
226 | priv->timeout_shift = ilog2(priv->pkts_ps - 1) + 1; | |
227 | goto free; | |
228 | null: | |
229 | RCU_INIT_POINTER(priv->tid_rdma.remote, NULL); | |
230 | priv->timeout_shift = 0; | |
231 | free: | |
232 | if (old) | |
233 | kfree_rcu(old, rcu_head); | |
234 | return ret; | |
235 | } | |
236 | ||
237 | bool tid_rdma_conn_resp(struct rvt_qp *qp, u64 *data) | |
238 | { | |
239 | bool ret; | |
240 | ||
241 | ret = tid_rdma_conn_reply(qp, *data); | |
242 | *data = 0; | |
243 | /* | |
244 | * If tid_rdma_conn_reply() returns error, set *data as 0 to indicate | |
245 | * TID RDMA could not be enabled. This will result in TID RDMA being | |
246 | * disabled at the requester too. | |
247 | */ | |
248 | if (ret) | |
249 | (void)tid_rdma_conn_req(qp, data); | |
250 | return ret; | |
251 | } | |
252 | ||
253 | void tid_rdma_conn_error(struct rvt_qp *qp) | |
254 | { | |
255 | struct hfi1_qp_priv *priv = qp->priv; | |
256 | struct tid_rdma_params *old; | |
257 | ||
258 | old = rcu_dereference_protected(priv->tid_rdma.remote, | |
259 | lockdep_is_held(&priv->opfn.lock)); | |
260 | RCU_INIT_POINTER(priv->tid_rdma.remote, NULL); | |
261 | if (old) | |
262 | kfree_rcu(old, rcu_head); | |
263 | } | |
264 | ||
265 | /* This is called at context initialization time */ | |
266 | int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit) | |
267 | { | |
268 | if (reinit) | |
269 | return 0; | |
270 | ||
271 | BUILD_BUG_ON(TID_RDMA_JKEY < HFI1_KERNEL_MIN_JKEY); | |
272 | BUILD_BUG_ON(TID_RDMA_JKEY > HFI1_KERNEL_MAX_JKEY); | |
273 | rcd->jkey = TID_RDMA_JKEY; | |
274 | hfi1_set_ctxt_jkey(rcd->dd, rcd, rcd->jkey); | |
838b6fd2 | 275 | return hfi1_alloc_ctxt_rcv_groups(rcd); |
d22a207d KW |
276 | } |
277 | ||
5190f052 MM |
278 | /** |
279 | * qp_to_rcd - determine the receive context used by a qp | |
280 | * @qp - the qp | |
281 | * | |
282 | * This routine returns the receive context associated | |
283 | * with a a qp's qpn. | |
284 | * | |
285 | * Returns the context. | |
286 | */ | |
287 | static struct hfi1_ctxtdata *qp_to_rcd(struct rvt_dev_info *rdi, | |
288 | struct rvt_qp *qp) | |
289 | { | |
290 | struct hfi1_ibdev *verbs_dev = container_of(rdi, | |
291 | struct hfi1_ibdev, | |
292 | rdi); | |
293 | struct hfi1_devdata *dd = container_of(verbs_dev, | |
294 | struct hfi1_devdata, | |
295 | verbs_dev); | |
296 | unsigned int ctxt; | |
297 | ||
298 | if (qp->ibqp.qp_num == 0) | |
299 | ctxt = 0; | |
300 | else | |
301 | ctxt = ((qp->ibqp.qp_num >> dd->qos_shift) % | |
302 | (dd->n_krcv_queues - 1)) + 1; | |
303 | ||
304 | return dd->rcd[ctxt]; | |
305 | } | |
306 | ||
307 | int hfi1_qp_priv_init(struct rvt_dev_info *rdi, struct rvt_qp *qp, | |
308 | struct ib_qp_init_attr *init_attr) | |
309 | { | |
310 | struct hfi1_qp_priv *qpriv = qp->priv; | |
838b6fd2 | 311 | int i, ret; |
5190f052 MM |
312 | |
313 | qpriv->rcd = qp_to_rcd(rdi, qp); | |
314 | ||
48a615dc KW |
315 | spin_lock_init(&qpriv->opfn.lock); |
316 | INIT_WORK(&qpriv->opfn.opfn_work, opfn_send_conn_request); | |
37356e78 KW |
317 | INIT_WORK(&qpriv->tid_rdma.trigger_work, tid_rdma_trigger_resume); |
318 | qpriv->flow_state.psn = 0; | |
319 | qpriv->flow_state.index = RXE_NUM_TID_FLOWS; | |
320 | qpriv->flow_state.last_index = RXE_NUM_TID_FLOWS; | |
321 | qpriv->flow_state.generation = KERN_GENERATION_RESERVED; | |
72a0ea99 KW |
322 | qpriv->s_tid_cur = HFI1_QP_WQE_INVALID; |
323 | qpriv->s_tid_head = HFI1_QP_WQE_INVALID; | |
324 | qpriv->s_tid_tail = HFI1_QP_WQE_INVALID; | |
07b92370 KW |
325 | qpriv->rnr_nak_state = TID_RNR_NAK_INIT; |
326 | qpriv->r_tid_head = HFI1_QP_WQE_INVALID; | |
327 | qpriv->r_tid_tail = HFI1_QP_WQE_INVALID; | |
328 | qpriv->r_tid_ack = HFI1_QP_WQE_INVALID; | |
329 | qpriv->r_tid_alloc = HFI1_QP_WQE_INVALID; | |
3c759e00 | 330 | timer_setup(&qpriv->s_tid_timer, hfi1_tid_timeout, 0); |
37356e78 | 331 | INIT_LIST_HEAD(&qpriv->tid_wait); |
48a615dc | 332 | |
838b6fd2 KW |
333 | if (init_attr->qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) { |
334 | struct hfi1_devdata *dd = qpriv->rcd->dd; | |
335 | ||
336 | qpriv->pages = kzalloc_node(TID_RDMA_MAX_PAGES * | |
337 | sizeof(*qpriv->pages), | |
338 | GFP_KERNEL, dd->node); | |
339 | if (!qpriv->pages) | |
340 | return -ENOMEM; | |
341 | for (i = 0; i < qp->s_size; i++) { | |
342 | struct hfi1_swqe_priv *priv; | |
343 | struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i); | |
344 | ||
345 | priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, | |
346 | dd->node); | |
347 | if (!priv) | |
348 | return -ENOMEM; | |
349 | ||
350 | hfi1_init_trdma_req(qp, &priv->tid_req); | |
351 | priv->tid_req.e.swqe = wqe; | |
352 | wqe->priv = priv; | |
353 | } | |
354 | for (i = 0; i < rvt_max_atomic(rdi); i++) { | |
355 | struct hfi1_ack_priv *priv; | |
356 | ||
357 | priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, | |
358 | dd->node); | |
359 | if (!priv) | |
360 | return -ENOMEM; | |
361 | ||
362 | hfi1_init_trdma_req(qp, &priv->tid_req); | |
363 | priv->tid_req.e.ack = &qp->s_ack_queue[i]; | |
364 | ||
365 | ret = hfi1_kern_exp_rcv_alloc_flows(&priv->tid_req, | |
366 | GFP_KERNEL); | |
367 | if (ret) { | |
368 | kfree(priv); | |
369 | return ret; | |
370 | } | |
371 | qp->s_ack_queue[i].priv = priv; | |
372 | } | |
373 | } | |
374 | ||
5190f052 MM |
375 | return 0; |
376 | } | |
48a615dc KW |
377 | |
378 | void hfi1_qp_priv_tid_free(struct rvt_dev_info *rdi, struct rvt_qp *qp) | |
379 | { | |
838b6fd2 KW |
380 | struct hfi1_qp_priv *qpriv = qp->priv; |
381 | struct rvt_swqe *wqe; | |
382 | u32 i; | |
383 | ||
384 | if (qp->ibqp.qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) { | |
385 | for (i = 0; i < qp->s_size; i++) { | |
386 | wqe = rvt_get_swqe_ptr(qp, i); | |
387 | kfree(wqe->priv); | |
388 | wqe->priv = NULL; | |
389 | } | |
390 | for (i = 0; i < rvt_max_atomic(rdi); i++) { | |
391 | struct hfi1_ack_priv *priv = qp->s_ack_queue[i].priv; | |
392 | ||
393 | if (priv) | |
394 | hfi1_kern_exp_rcv_free_flows(&priv->tid_req); | |
395 | kfree(priv); | |
396 | qp->s_ack_queue[i].priv = NULL; | |
397 | } | |
398 | cancel_work_sync(&qpriv->opfn.opfn_work); | |
399 | kfree(qpriv->pages); | |
400 | qpriv->pages = NULL; | |
401 | } | |
48a615dc | 402 | } |
37356e78 KW |
403 | |
404 | /* Flow and tid waiter functions */ | |
405 | /** | |
406 | * DOC: lock ordering | |
407 | * | |
408 | * There are two locks involved with the queuing | |
409 | * routines: the qp s_lock and the exp_lock. | |
410 | * | |
411 | * Since the tid space allocation is called from | |
412 | * the send engine, the qp s_lock is already held. | |
413 | * | |
414 | * The allocation routines will get the exp_lock. | |
415 | * | |
416 | * The first_qp() call is provided to allow the head of | |
417 | * the rcd wait queue to be fetched under the exp_lock and | |
418 | * followed by a drop of the exp_lock. | |
419 | * | |
420 | * Any qp in the wait list will have the qp reference count held | |
421 | * to hold the qp in memory. | |
422 | */ | |
423 | ||
424 | /* | |
425 | * return head of rcd wait list | |
426 | * | |
427 | * Must hold the exp_lock. | |
428 | * | |
429 | * Get a reference to the QP to hold the QP in memory. | |
430 | * | |
431 | * The caller must release the reference when the local | |
432 | * is no longer being used. | |
433 | */ | |
434 | static struct rvt_qp *first_qp(struct hfi1_ctxtdata *rcd, | |
435 | struct tid_queue *queue) | |
436 | __must_hold(&rcd->exp_lock) | |
437 | { | |
438 | struct hfi1_qp_priv *priv; | |
439 | ||
440 | lockdep_assert_held(&rcd->exp_lock); | |
441 | priv = list_first_entry_or_null(&queue->queue_head, | |
442 | struct hfi1_qp_priv, | |
443 | tid_wait); | |
444 | if (!priv) | |
445 | return NULL; | |
446 | rvt_get_qp(priv->owner); | |
447 | return priv->owner; | |
448 | } | |
449 | ||
450 | /** | |
451 | * kernel_tid_waiters - determine rcd wait | |
452 | * @rcd: the receive context | |
453 | * @qp: the head of the qp being processed | |
454 | * | |
455 | * This routine will return false IFF | |
456 | * the list is NULL or the head of the | |
457 | * list is the indicated qp. | |
458 | * | |
459 | * Must hold the qp s_lock and the exp_lock. | |
460 | * | |
461 | * Return: | |
462 | * false if either of the conditions below are statisfied: | |
463 | * 1. The list is empty or | |
464 | * 2. The indicated qp is at the head of the list and the | |
465 | * HFI1_S_WAIT_TID_SPACE bit is set in qp->s_flags. | |
466 | * true is returned otherwise. | |
467 | */ | |
468 | static bool kernel_tid_waiters(struct hfi1_ctxtdata *rcd, | |
469 | struct tid_queue *queue, struct rvt_qp *qp) | |
470 | __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock) | |
471 | { | |
472 | struct rvt_qp *fqp; | |
473 | bool ret = true; | |
474 | ||
475 | lockdep_assert_held(&qp->s_lock); | |
476 | lockdep_assert_held(&rcd->exp_lock); | |
477 | fqp = first_qp(rcd, queue); | |
478 | if (!fqp || (fqp == qp && (qp->s_flags & HFI1_S_WAIT_TID_SPACE))) | |
479 | ret = false; | |
480 | rvt_put_qp(fqp); | |
481 | return ret; | |
482 | } | |
483 | ||
484 | /** | |
485 | * dequeue_tid_waiter - dequeue the qp from the list | |
486 | * @qp - the qp to remove the wait list | |
487 | * | |
488 | * This routine removes the indicated qp from the | |
489 | * wait list if it is there. | |
490 | * | |
491 | * This should be done after the hardware flow and | |
492 | * tid array resources have been allocated. | |
493 | * | |
494 | * Must hold the qp s_lock and the rcd exp_lock. | |
495 | * | |
496 | * It assumes the s_lock to protect the s_flags | |
497 | * field and to reliably test the HFI1_S_WAIT_TID_SPACE flag. | |
498 | */ | |
499 | static void dequeue_tid_waiter(struct hfi1_ctxtdata *rcd, | |
500 | struct tid_queue *queue, struct rvt_qp *qp) | |
501 | __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock) | |
502 | { | |
503 | struct hfi1_qp_priv *priv = qp->priv; | |
504 | ||
505 | lockdep_assert_held(&qp->s_lock); | |
506 | lockdep_assert_held(&rcd->exp_lock); | |
507 | if (list_empty(&priv->tid_wait)) | |
508 | return; | |
509 | list_del_init(&priv->tid_wait); | |
510 | qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE; | |
511 | queue->dequeue++; | |
512 | rvt_put_qp(qp); | |
513 | } | |
514 | ||
515 | /** | |
516 | * queue_qp_for_tid_wait - suspend QP on tid space | |
517 | * @rcd: the receive context | |
518 | * @qp: the qp | |
519 | * | |
520 | * The qp is inserted at the tail of the rcd | |
521 | * wait queue and the HFI1_S_WAIT_TID_SPACE s_flag is set. | |
522 | * | |
523 | * Must hold the qp s_lock and the exp_lock. | |
524 | */ | |
525 | static void queue_qp_for_tid_wait(struct hfi1_ctxtdata *rcd, | |
526 | struct tid_queue *queue, struct rvt_qp *qp) | |
527 | __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock) | |
528 | { | |
529 | struct hfi1_qp_priv *priv = qp->priv; | |
530 | ||
531 | lockdep_assert_held(&qp->s_lock); | |
532 | lockdep_assert_held(&rcd->exp_lock); | |
533 | if (list_empty(&priv->tid_wait)) { | |
534 | qp->s_flags |= HFI1_S_WAIT_TID_SPACE; | |
535 | list_add_tail(&priv->tid_wait, &queue->queue_head); | |
536 | priv->tid_enqueue = ++queue->enqueue; | |
2f16a696 | 537 | rcd->dd->verbs_dev.n_tidwait++; |
37356e78 KW |
538 | trace_hfi1_qpsleep(qp, HFI1_S_WAIT_TID_SPACE); |
539 | rvt_get_qp(qp); | |
540 | } | |
541 | } | |
542 | ||
543 | /** | |
544 | * __trigger_tid_waiter - trigger tid waiter | |
545 | * @qp: the qp | |
546 | * | |
547 | * This is a private entrance to schedule the qp | |
548 | * assuming the caller is holding the qp->s_lock. | |
549 | */ | |
550 | static void __trigger_tid_waiter(struct rvt_qp *qp) | |
551 | __must_hold(&qp->s_lock) | |
552 | { | |
553 | lockdep_assert_held(&qp->s_lock); | |
554 | if (!(qp->s_flags & HFI1_S_WAIT_TID_SPACE)) | |
555 | return; | |
556 | trace_hfi1_qpwakeup(qp, HFI1_S_WAIT_TID_SPACE); | |
557 | hfi1_schedule_send(qp); | |
558 | } | |
559 | ||
560 | /** | |
561 | * tid_rdma_schedule_tid_wakeup - schedule wakeup for a qp | |
562 | * @qp - the qp | |
563 | * | |
564 | * trigger a schedule or a waiting qp in a deadlock | |
565 | * safe manner. The qp reference is held prior | |
566 | * to this call via first_qp(). | |
567 | * | |
568 | * If the qp trigger was already scheduled (!rval) | |
569 | * the the reference is dropped, otherwise the resume | |
570 | * or the destroy cancel will dispatch the reference. | |
571 | */ | |
572 | static void tid_rdma_schedule_tid_wakeup(struct rvt_qp *qp) | |
573 | { | |
574 | struct hfi1_qp_priv *priv; | |
575 | struct hfi1_ibport *ibp; | |
576 | struct hfi1_pportdata *ppd; | |
577 | struct hfi1_devdata *dd; | |
578 | bool rval; | |
579 | ||
580 | if (!qp) | |
581 | return; | |
582 | ||
583 | priv = qp->priv; | |
584 | ibp = to_iport(qp->ibqp.device, qp->port_num); | |
585 | ppd = ppd_from_ibp(ibp); | |
586 | dd = dd_from_ibdev(qp->ibqp.device); | |
587 | ||
588 | rval = queue_work_on(priv->s_sde ? | |
589 | priv->s_sde->cpu : | |
590 | cpumask_first(cpumask_of_node(dd->node)), | |
591 | ppd->hfi1_wq, | |
592 | &priv->tid_rdma.trigger_work); | |
593 | if (!rval) | |
594 | rvt_put_qp(qp); | |
595 | } | |
596 | ||
597 | /** | |
598 | * tid_rdma_trigger_resume - field a trigger work request | |
599 | * @work - the work item | |
600 | * | |
601 | * Complete the off qp trigger processing by directly | |
602 | * calling the progress routine. | |
603 | */ | |
604 | static void tid_rdma_trigger_resume(struct work_struct *work) | |
605 | { | |
606 | struct tid_rdma_qp_params *tr; | |
607 | struct hfi1_qp_priv *priv; | |
608 | struct rvt_qp *qp; | |
609 | ||
610 | tr = container_of(work, struct tid_rdma_qp_params, trigger_work); | |
611 | priv = container_of(tr, struct hfi1_qp_priv, tid_rdma); | |
612 | qp = priv->owner; | |
613 | spin_lock_irq(&qp->s_lock); | |
614 | if (qp->s_flags & HFI1_S_WAIT_TID_SPACE) { | |
615 | spin_unlock_irq(&qp->s_lock); | |
616 | hfi1_do_send(priv->owner, true); | |
617 | } else { | |
618 | spin_unlock_irq(&qp->s_lock); | |
619 | } | |
620 | rvt_put_qp(qp); | |
621 | } | |
622 | ||
623 | /** | |
624 | * tid_rdma_flush_wait - unwind any tid space wait | |
625 | * | |
626 | * This is called when resetting a qp to | |
627 | * allow a destroy or reset to get rid | |
628 | * of any tid space linkage and reference counts. | |
629 | */ | |
630 | static void _tid_rdma_flush_wait(struct rvt_qp *qp, struct tid_queue *queue) | |
631 | __must_hold(&qp->s_lock) | |
632 | { | |
633 | struct hfi1_qp_priv *priv; | |
634 | ||
635 | if (!qp) | |
636 | return; | |
637 | lockdep_assert_held(&qp->s_lock); | |
638 | priv = qp->priv; | |
639 | qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE; | |
640 | spin_lock(&priv->rcd->exp_lock); | |
641 | if (!list_empty(&priv->tid_wait)) { | |
642 | list_del_init(&priv->tid_wait); | |
643 | qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE; | |
644 | queue->dequeue++; | |
645 | rvt_put_qp(qp); | |
646 | } | |
647 | spin_unlock(&priv->rcd->exp_lock); | |
648 | } | |
649 | ||
650 | void hfi1_tid_rdma_flush_wait(struct rvt_qp *qp) | |
651 | __must_hold(&qp->s_lock) | |
652 | { | |
653 | struct hfi1_qp_priv *priv = qp->priv; | |
654 | ||
655 | _tid_rdma_flush_wait(qp, &priv->rcd->flow_queue); | |
838b6fd2 | 656 | _tid_rdma_flush_wait(qp, &priv->rcd->rarr_queue); |
37356e78 KW |
657 | } |
658 | ||
659 | /* Flow functions */ | |
660 | /** | |
661 | * kern_reserve_flow - allocate a hardware flow | |
662 | * @rcd - the context to use for allocation | |
663 | * @last - the index of the preferred flow. Use RXE_NUM_TID_FLOWS to | |
664 | * signify "don't care". | |
665 | * | |
666 | * Use a bit mask based allocation to reserve a hardware | |
667 | * flow for use in receiving KDETH data packets. If a preferred flow is | |
668 | * specified the function will attempt to reserve that flow again, if | |
669 | * available. | |
670 | * | |
671 | * The exp_lock must be held. | |
672 | * | |
673 | * Return: | |
674 | * On success: a value postive value between 0 and RXE_NUM_TID_FLOWS - 1 | |
675 | * On failure: -EAGAIN | |
676 | */ | |
677 | static int kern_reserve_flow(struct hfi1_ctxtdata *rcd, int last) | |
678 | __must_hold(&rcd->exp_lock) | |
679 | { | |
680 | int nr; | |
681 | ||
682 | /* Attempt to reserve the preferred flow index */ | |
683 | if (last >= 0 && last < RXE_NUM_TID_FLOWS && | |
684 | !test_and_set_bit(last, &rcd->flow_mask)) | |
685 | return last; | |
686 | ||
687 | nr = ffz(rcd->flow_mask); | |
688 | BUILD_BUG_ON(RXE_NUM_TID_FLOWS >= | |
689 | (sizeof(rcd->flow_mask) * BITS_PER_BYTE)); | |
690 | if (nr > (RXE_NUM_TID_FLOWS - 1)) | |
691 | return -EAGAIN; | |
692 | set_bit(nr, &rcd->flow_mask); | |
693 | return nr; | |
694 | } | |
695 | ||
696 | static void kern_set_hw_flow(struct hfi1_ctxtdata *rcd, u32 generation, | |
697 | u32 flow_idx) | |
698 | { | |
699 | u64 reg; | |
700 | ||
701 | reg = ((u64)generation << HFI1_KDETH_BTH_SEQ_SHIFT) | | |
702 | RCV_TID_FLOW_TABLE_CTRL_FLOW_VALID_SMASK | | |
703 | RCV_TID_FLOW_TABLE_CTRL_KEEP_AFTER_SEQ_ERR_SMASK | | |
704 | RCV_TID_FLOW_TABLE_CTRL_KEEP_ON_GEN_ERR_SMASK | | |
705 | RCV_TID_FLOW_TABLE_STATUS_SEQ_MISMATCH_SMASK | | |
706 | RCV_TID_FLOW_TABLE_STATUS_GEN_MISMATCH_SMASK; | |
707 | ||
708 | if (generation != KERN_GENERATION_RESERVED) | |
709 | reg |= RCV_TID_FLOW_TABLE_CTRL_HDR_SUPP_EN_SMASK; | |
710 | ||
711 | write_uctxt_csr(rcd->dd, rcd->ctxt, | |
712 | RCV_TID_FLOW_TABLE + 8 * flow_idx, reg); | |
713 | } | |
714 | ||
715 | static u32 kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx) | |
716 | __must_hold(&rcd->exp_lock) | |
717 | { | |
718 | u32 generation = rcd->flows[flow_idx].generation; | |
719 | ||
720 | kern_set_hw_flow(rcd, generation, flow_idx); | |
721 | return generation; | |
722 | } | |
723 | ||
724 | static u32 kern_flow_generation_next(u32 gen) | |
725 | { | |
726 | u32 generation = mask_generation(gen + 1); | |
727 | ||
728 | if (generation == KERN_GENERATION_RESERVED) | |
729 | generation = mask_generation(generation + 1); | |
730 | return generation; | |
731 | } | |
732 | ||
733 | static void kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx) | |
734 | __must_hold(&rcd->exp_lock) | |
735 | { | |
736 | rcd->flows[flow_idx].generation = | |
737 | kern_flow_generation_next(rcd->flows[flow_idx].generation); | |
738 | kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, flow_idx); | |
739 | } | |
740 | ||
741 | int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp) | |
742 | { | |
743 | struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv; | |
744 | struct tid_flow_state *fs = &qpriv->flow_state; | |
745 | struct rvt_qp *fqp; | |
746 | unsigned long flags; | |
747 | int ret = 0; | |
748 | ||
749 | /* The QP already has an allocated flow */ | |
750 | if (fs->index != RXE_NUM_TID_FLOWS) | |
751 | return ret; | |
752 | ||
753 | spin_lock_irqsave(&rcd->exp_lock, flags); | |
754 | if (kernel_tid_waiters(rcd, &rcd->flow_queue, qp)) | |
755 | goto queue; | |
756 | ||
757 | ret = kern_reserve_flow(rcd, fs->last_index); | |
758 | if (ret < 0) | |
759 | goto queue; | |
760 | fs->index = ret; | |
761 | fs->last_index = fs->index; | |
762 | ||
763 | /* Generation received in a RESYNC overrides default flow generation */ | |
764 | if (fs->generation != KERN_GENERATION_RESERVED) | |
765 | rcd->flows[fs->index].generation = fs->generation; | |
766 | fs->generation = kern_setup_hw_flow(rcd, fs->index); | |
767 | fs->psn = 0; | |
768 | fs->flags = 0; | |
769 | dequeue_tid_waiter(rcd, &rcd->flow_queue, qp); | |
770 | /* get head before dropping lock */ | |
771 | fqp = first_qp(rcd, &rcd->flow_queue); | |
772 | spin_unlock_irqrestore(&rcd->exp_lock, flags); | |
773 | ||
774 | tid_rdma_schedule_tid_wakeup(fqp); | |
775 | return 0; | |
776 | queue: | |
777 | queue_qp_for_tid_wait(rcd, &rcd->flow_queue, qp); | |
778 | spin_unlock_irqrestore(&rcd->exp_lock, flags); | |
779 | return -EAGAIN; | |
780 | } | |
781 | ||
782 | void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp) | |
783 | { | |
784 | struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv; | |
785 | struct tid_flow_state *fs = &qpriv->flow_state; | |
786 | struct rvt_qp *fqp; | |
787 | unsigned long flags; | |
788 | ||
789 | if (fs->index >= RXE_NUM_TID_FLOWS) | |
790 | return; | |
791 | spin_lock_irqsave(&rcd->exp_lock, flags); | |
792 | kern_clear_hw_flow(rcd, fs->index); | |
793 | clear_bit(fs->index, &rcd->flow_mask); | |
794 | fs->index = RXE_NUM_TID_FLOWS; | |
795 | fs->psn = 0; | |
796 | fs->generation = KERN_GENERATION_RESERVED; | |
797 | ||
798 | /* get head before dropping lock */ | |
799 | fqp = first_qp(rcd, &rcd->flow_queue); | |
800 | spin_unlock_irqrestore(&rcd->exp_lock, flags); | |
801 | ||
802 | if (fqp == qp) { | |
803 | __trigger_tid_waiter(fqp); | |
804 | rvt_put_qp(fqp); | |
805 | } else { | |
806 | tid_rdma_schedule_tid_wakeup(fqp); | |
807 | } | |
808 | } | |
809 | ||
810 | void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd) | |
811 | { | |
812 | int i; | |
813 | ||
814 | for (i = 0; i < RXE_NUM_TID_FLOWS; i++) { | |
815 | rcd->flows[i].generation = mask_generation(prandom_u32()); | |
816 | kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, i); | |
817 | } | |
818 | } | |
838b6fd2 KW |
819 | |
820 | /* TID allocation functions */ | |
821 | static u8 trdma_pset_order(struct tid_rdma_pageset *s) | |
822 | { | |
823 | u8 count = s->count; | |
824 | ||
825 | return ilog2(count) + 1; | |
826 | } | |
827 | ||
828 | /** | |
829 | * tid_rdma_find_phys_blocks_4k - get groups base on mr info | |
830 | * @npages - number of pages | |
831 | * @pages - pointer to an array of page structs | |
832 | * @list - page set array to return | |
833 | * | |
834 | * This routine returns the number of groups associated with | |
835 | * the current sge information. This implementation is based | |
836 | * on the expected receive find_phys_blocks() adjusted to | |
837 | * use the MR information vs. the pfn. | |
838 | * | |
839 | * Return: | |
840 | * the number of RcvArray entries | |
841 | */ | |
842 | static u32 tid_rdma_find_phys_blocks_4k(struct tid_rdma_flow *flow, | |
843 | struct page **pages, | |
844 | u32 npages, | |
845 | struct tid_rdma_pageset *list) | |
846 | { | |
847 | u32 pagecount, pageidx, setcount = 0, i; | |
848 | void *vaddr, *this_vaddr; | |
849 | ||
850 | if (!npages) | |
851 | return 0; | |
852 | ||
853 | /* | |
854 | * Look for sets of physically contiguous pages in the user buffer. | |
855 | * This will allow us to optimize Expected RcvArray entry usage by | |
856 | * using the bigger supported sizes. | |
857 | */ | |
858 | vaddr = page_address(pages[0]); | |
84f4a40d | 859 | trace_hfi1_tid_flow_page(flow->req->qp, flow, 0, 0, 0, vaddr); |
838b6fd2 KW |
860 | for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) { |
861 | this_vaddr = i < npages ? page_address(pages[i]) : NULL; | |
84f4a40d KW |
862 | trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 0, 0, |
863 | this_vaddr); | |
838b6fd2 KW |
864 | /* |
865 | * If the vaddr's are not sequential, pages are not physically | |
866 | * contiguous. | |
867 | */ | |
868 | if (this_vaddr != (vaddr + PAGE_SIZE)) { | |
869 | /* | |
870 | * At this point we have to loop over the set of | |
871 | * physically contiguous pages and break them down it | |
872 | * sizes supported by the HW. | |
873 | * There are two main constraints: | |
874 | * 1. The max buffer size is MAX_EXPECTED_BUFFER. | |
875 | * If the total set size is bigger than that | |
876 | * program only a MAX_EXPECTED_BUFFER chunk. | |
877 | * 2. The buffer size has to be a power of two. If | |
878 | * it is not, round down to the closes power of | |
879 | * 2 and program that size. | |
880 | */ | |
881 | while (pagecount) { | |
882 | int maxpages = pagecount; | |
883 | u32 bufsize = pagecount * PAGE_SIZE; | |
884 | ||
885 | if (bufsize > MAX_EXPECTED_BUFFER) | |
886 | maxpages = | |
887 | MAX_EXPECTED_BUFFER >> | |
888 | PAGE_SHIFT; | |
889 | else if (!is_power_of_2(bufsize)) | |
890 | maxpages = | |
891 | rounddown_pow_of_two(bufsize) >> | |
892 | PAGE_SHIFT; | |
893 | ||
894 | list[setcount].idx = pageidx; | |
895 | list[setcount].count = maxpages; | |
84f4a40d KW |
896 | trace_hfi1_tid_pageset(flow->req->qp, setcount, |
897 | list[setcount].idx, | |
898 | list[setcount].count); | |
838b6fd2 KW |
899 | pagecount -= maxpages; |
900 | pageidx += maxpages; | |
901 | setcount++; | |
902 | } | |
903 | pageidx = i; | |
904 | pagecount = 1; | |
905 | vaddr = this_vaddr; | |
906 | } else { | |
907 | vaddr += PAGE_SIZE; | |
908 | pagecount++; | |
909 | } | |
910 | } | |
911 | /* insure we always return an even number of sets */ | |
912 | if (setcount & 1) | |
913 | list[setcount++].count = 0; | |
914 | return setcount; | |
915 | } | |
916 | ||
917 | /** | |
918 | * tid_flush_pages - dump out pages into pagesets | |
919 | * @list - list of pagesets | |
920 | * @idx - pointer to current page index | |
921 | * @pages - number of pages to dump | |
922 | * @sets - current number of pagesset | |
923 | * | |
924 | * This routine flushes out accumuated pages. | |
925 | * | |
926 | * To insure an even number of sets the | |
927 | * code may add a filler. | |
928 | * | |
929 | * This can happen with when pages is not | |
930 | * a power of 2 or pages is a power of 2 | |
931 | * less than the maximum pages. | |
932 | * | |
933 | * Return: | |
934 | * The new number of sets | |
935 | */ | |
936 | ||
937 | static u32 tid_flush_pages(struct tid_rdma_pageset *list, | |
938 | u32 *idx, u32 pages, u32 sets) | |
939 | { | |
940 | while (pages) { | |
941 | u32 maxpages = pages; | |
942 | ||
943 | if (maxpages > MAX_EXPECTED_PAGES) | |
944 | maxpages = MAX_EXPECTED_PAGES; | |
945 | else if (!is_power_of_2(maxpages)) | |
946 | maxpages = rounddown_pow_of_two(maxpages); | |
947 | list[sets].idx = *idx; | |
948 | list[sets++].count = maxpages; | |
949 | *idx += maxpages; | |
950 | pages -= maxpages; | |
951 | } | |
952 | /* might need a filler */ | |
953 | if (sets & 1) | |
954 | list[sets++].count = 0; | |
955 | return sets; | |
956 | } | |
957 | ||
958 | /** | |
959 | * tid_rdma_find_phys_blocks_8k - get groups base on mr info | |
960 | * @pages - pointer to an array of page structs | |
961 | * @npages - number of pages | |
962 | * @list - page set array to return | |
963 | * | |
964 | * This routine parses an array of pages to compute pagesets | |
965 | * in an 8k compatible way. | |
966 | * | |
967 | * pages are tested two at a time, i, i + 1 for contiguous | |
968 | * pages and i - 1 and i contiguous pages. | |
969 | * | |
970 | * If any condition is false, any accumlated pages are flushed and | |
971 | * v0,v1 are emitted as separate PAGE_SIZE pagesets | |
972 | * | |
973 | * Otherwise, the current 8k is totaled for a future flush. | |
974 | * | |
975 | * Return: | |
976 | * The number of pagesets | |
977 | * list set with the returned number of pagesets | |
978 | * | |
979 | */ | |
980 | static u32 tid_rdma_find_phys_blocks_8k(struct tid_rdma_flow *flow, | |
981 | struct page **pages, | |
982 | u32 npages, | |
983 | struct tid_rdma_pageset *list) | |
984 | { | |
985 | u32 idx, sets = 0, i; | |
986 | u32 pagecnt = 0; | |
987 | void *v0, *v1, *vm1; | |
988 | ||
989 | if (!npages) | |
990 | return 0; | |
991 | for (idx = 0, i = 0, vm1 = NULL; i < npages; i += 2) { | |
992 | /* get a new v0 */ | |
993 | v0 = page_address(pages[i]); | |
84f4a40d | 994 | trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 0, v0); |
838b6fd2 KW |
995 | v1 = i + 1 < npages ? |
996 | page_address(pages[i + 1]) : NULL; | |
84f4a40d | 997 | trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 1, v1); |
838b6fd2 KW |
998 | /* compare i, i + 1 vaddr */ |
999 | if (v1 != (v0 + PAGE_SIZE)) { | |
1000 | /* flush out pages */ | |
1001 | sets = tid_flush_pages(list, &idx, pagecnt, sets); | |
1002 | /* output v0,v1 as two pagesets */ | |
1003 | list[sets].idx = idx++; | |
1004 | list[sets++].count = 1; | |
1005 | if (v1) { | |
1006 | list[sets].count = 1; | |
1007 | list[sets++].idx = idx++; | |
1008 | } else { | |
1009 | list[sets++].count = 0; | |
1010 | } | |
1011 | vm1 = NULL; | |
1012 | pagecnt = 0; | |
1013 | continue; | |
1014 | } | |
1015 | /* i,i+1 consecutive, look at i-1,i */ | |
1016 | if (vm1 && v0 != (vm1 + PAGE_SIZE)) { | |
1017 | /* flush out pages */ | |
1018 | sets = tid_flush_pages(list, &idx, pagecnt, sets); | |
1019 | pagecnt = 0; | |
1020 | } | |
1021 | /* pages will always be a multiple of 8k */ | |
1022 | pagecnt += 2; | |
1023 | /* save i-1 */ | |
1024 | vm1 = v1; | |
1025 | /* move to next pair */ | |
1026 | } | |
1027 | /* dump residual pages at end */ | |
1028 | sets = tid_flush_pages(list, &idx, npages - idx, sets); | |
1029 | /* by design cannot be odd sets */ | |
1030 | WARN_ON(sets & 1); | |
1031 | return sets; | |
1032 | } | |
1033 | ||
1034 | /** | |
1035 | * Find pages for one segment of a sge array represented by @ss. The function | |
1036 | * does not check the sge, the sge must have been checked for alignment with a | |
1037 | * prior call to hfi1_kern_trdma_ok. Other sge checking is done as part of | |
1038 | * rvt_lkey_ok and rvt_rkey_ok. Also, the function only modifies the local sge | |
1039 | * copy maintained in @ss->sge, the original sge is not modified. | |
1040 | * | |
1041 | * Unlike IB RDMA WRITE, we can't decrement ss->num_sge here because we are not | |
1042 | * releasing the MR reference count at the same time. Otherwise, we'll "leak" | |
1043 | * references to the MR. This difference requires that we keep track of progress | |
1044 | * into the sg_list. This is done by the cur_seg cursor in the tid_rdma_request | |
1045 | * structure. | |
1046 | */ | |
1047 | static u32 kern_find_pages(struct tid_rdma_flow *flow, | |
1048 | struct page **pages, | |
1049 | struct rvt_sge_state *ss, bool *last) | |
1050 | { | |
1051 | struct tid_rdma_request *req = flow->req; | |
1052 | struct rvt_sge *sge = &ss->sge; | |
1053 | u32 length = flow->req->seg_len; | |
1054 | u32 len = PAGE_SIZE; | |
1055 | u32 i = 0; | |
1056 | ||
1057 | while (length && req->isge < ss->num_sge) { | |
1058 | pages[i++] = virt_to_page(sge->vaddr); | |
1059 | ||
1060 | sge->vaddr += len; | |
1061 | sge->length -= len; | |
1062 | sge->sge_length -= len; | |
1063 | if (!sge->sge_length) { | |
1064 | if (++req->isge < ss->num_sge) | |
1065 | *sge = ss->sg_list[req->isge - 1]; | |
1066 | } else if (sge->length == 0 && sge->mr->lkey) { | |
1067 | if (++sge->n >= RVT_SEGSZ) { | |
1068 | ++sge->m; | |
1069 | sge->n = 0; | |
1070 | } | |
1071 | sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; | |
1072 | sge->length = sge->mr->map[sge->m]->segs[sge->n].length; | |
1073 | } | |
1074 | length -= len; | |
1075 | } | |
1076 | ||
1077 | flow->length = flow->req->seg_len - length; | |
1078 | *last = req->isge == ss->num_sge ? false : true; | |
1079 | return i; | |
1080 | } | |
1081 | ||
1082 | static void dma_unmap_flow(struct tid_rdma_flow *flow) | |
1083 | { | |
1084 | struct hfi1_devdata *dd; | |
1085 | int i; | |
1086 | struct tid_rdma_pageset *pset; | |
1087 | ||
1088 | dd = flow->req->rcd->dd; | |
1089 | for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets; | |
1090 | i++, pset++) { | |
1091 | if (pset->count && pset->addr) { | |
1092 | dma_unmap_page(&dd->pcidev->dev, | |
1093 | pset->addr, | |
1094 | PAGE_SIZE * pset->count, | |
1095 | DMA_FROM_DEVICE); | |
1096 | pset->mapped = 0; | |
1097 | } | |
1098 | } | |
1099 | } | |
1100 | ||
1101 | static int dma_map_flow(struct tid_rdma_flow *flow, struct page **pages) | |
1102 | { | |
1103 | int i; | |
1104 | struct hfi1_devdata *dd = flow->req->rcd->dd; | |
1105 | struct tid_rdma_pageset *pset; | |
1106 | ||
1107 | for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets; | |
1108 | i++, pset++) { | |
1109 | if (pset->count) { | |
1110 | pset->addr = dma_map_page(&dd->pcidev->dev, | |
1111 | pages[pset->idx], | |
1112 | 0, | |
1113 | PAGE_SIZE * pset->count, | |
1114 | DMA_FROM_DEVICE); | |
1115 | ||
1116 | if (dma_mapping_error(&dd->pcidev->dev, pset->addr)) { | |
1117 | dma_unmap_flow(flow); | |
1118 | return -ENOMEM; | |
1119 | } | |
1120 | pset->mapped = 1; | |
1121 | } | |
1122 | } | |
1123 | return 0; | |
1124 | } | |
1125 | ||
1126 | static inline bool dma_mapped(struct tid_rdma_flow *flow) | |
1127 | { | |
1128 | return !!flow->pagesets[0].mapped; | |
1129 | } | |
1130 | ||
1131 | /* | |
1132 | * Get pages pointers and identify contiguous physical memory chunks for a | |
1133 | * segment. All segments are of length flow->req->seg_len. | |
1134 | */ | |
1135 | static int kern_get_phys_blocks(struct tid_rdma_flow *flow, | |
1136 | struct page **pages, | |
1137 | struct rvt_sge_state *ss, bool *last) | |
1138 | { | |
1139 | u8 npages; | |
1140 | ||
1141 | /* Reuse previously computed pagesets, if any */ | |
1142 | if (flow->npagesets) { | |
84f4a40d KW |
1143 | trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head, |
1144 | flow); | |
838b6fd2 KW |
1145 | if (!dma_mapped(flow)) |
1146 | return dma_map_flow(flow, pages); | |
1147 | return 0; | |
1148 | } | |
1149 | ||
1150 | npages = kern_find_pages(flow, pages, ss, last); | |
1151 | ||
1152 | if (flow->req->qp->pmtu == enum_to_mtu(OPA_MTU_4096)) | |
1153 | flow->npagesets = | |
1154 | tid_rdma_find_phys_blocks_4k(flow, pages, npages, | |
1155 | flow->pagesets); | |
1156 | else | |
1157 | flow->npagesets = | |
1158 | tid_rdma_find_phys_blocks_8k(flow, pages, npages, | |
1159 | flow->pagesets); | |
1160 | ||
1161 | return dma_map_flow(flow, pages); | |
1162 | } | |
1163 | ||
1164 | static inline void kern_add_tid_node(struct tid_rdma_flow *flow, | |
1165 | struct hfi1_ctxtdata *rcd, char *s, | |
1166 | struct tid_group *grp, u8 cnt) | |
1167 | { | |
1168 | struct kern_tid_node *node = &flow->tnode[flow->tnode_cnt++]; | |
1169 | ||
1170 | WARN_ON_ONCE(flow->tnode_cnt >= | |
1171 | (TID_RDMA_MAX_SEGMENT_SIZE >> PAGE_SHIFT)); | |
1172 | if (WARN_ON_ONCE(cnt & 1)) | |
1173 | dd_dev_err(rcd->dd, | |
1174 | "unexpected odd allocation cnt %u map 0x%x used %u", | |
1175 | cnt, grp->map, grp->used); | |
1176 | ||
1177 | node->grp = grp; | |
1178 | node->map = grp->map; | |
1179 | node->cnt = cnt; | |
84f4a40d KW |
1180 | trace_hfi1_tid_node_add(flow->req->qp, s, flow->tnode_cnt - 1, |
1181 | grp->base, grp->map, grp->used, cnt); | |
838b6fd2 KW |
1182 | } |
1183 | ||
1184 | /* | |
1185 | * Try to allocate pageset_count TID's from TID groups for a context | |
1186 | * | |
1187 | * This function allocates TID's without moving groups between lists or | |
1188 | * modifying grp->map. This is done as follows, being cogizant of the lists | |
1189 | * between which the TID groups will move: | |
1190 | * 1. First allocate complete groups of 8 TID's since this is more efficient, | |
1191 | * these groups will move from group->full without affecting used | |
1192 | * 2. If more TID's are needed allocate from used (will move from used->full or | |
1193 | * stay in used) | |
1194 | * 3. If we still don't have the required number of TID's go back and look again | |
1195 | * at a complete group (will move from group->used) | |
1196 | */ | |
1197 | static int kern_alloc_tids(struct tid_rdma_flow *flow) | |
1198 | { | |
1199 | struct hfi1_ctxtdata *rcd = flow->req->rcd; | |
1200 | struct hfi1_devdata *dd = rcd->dd; | |
1201 | u32 ngroups, pageidx = 0; | |
1202 | struct tid_group *group = NULL, *used; | |
1203 | u8 use; | |
1204 | ||
1205 | flow->tnode_cnt = 0; | |
1206 | ngroups = flow->npagesets / dd->rcv_entries.group_size; | |
1207 | if (!ngroups) | |
1208 | goto used_list; | |
1209 | ||
1210 | /* First look at complete groups */ | |
1211 | list_for_each_entry(group, &rcd->tid_group_list.list, list) { | |
1212 | kern_add_tid_node(flow, rcd, "complete groups", group, | |
1213 | group->size); | |
1214 | ||
1215 | pageidx += group->size; | |
1216 | if (!--ngroups) | |
1217 | break; | |
1218 | } | |
1219 | ||
1220 | if (pageidx >= flow->npagesets) | |
1221 | goto ok; | |
1222 | ||
1223 | used_list: | |
1224 | /* Now look at partially used groups */ | |
1225 | list_for_each_entry(used, &rcd->tid_used_list.list, list) { | |
1226 | use = min_t(u32, flow->npagesets - pageidx, | |
1227 | used->size - used->used); | |
1228 | kern_add_tid_node(flow, rcd, "used groups", used, use); | |
1229 | ||
1230 | pageidx += use; | |
1231 | if (pageidx >= flow->npagesets) | |
1232 | goto ok; | |
1233 | } | |
1234 | ||
1235 | /* | |
1236 | * Look again at a complete group, continuing from where we left. | |
1237 | * However, if we are at the head, we have reached the end of the | |
1238 | * complete groups list from the first loop above | |
1239 | */ | |
1240 | if (group && &group->list == &rcd->tid_group_list.list) | |
1241 | goto bail_eagain; | |
1242 | group = list_prepare_entry(group, &rcd->tid_group_list.list, | |
1243 | list); | |
1244 | if (list_is_last(&group->list, &rcd->tid_group_list.list)) | |
1245 | goto bail_eagain; | |
1246 | group = list_next_entry(group, list); | |
1247 | use = min_t(u32, flow->npagesets - pageidx, group->size); | |
1248 | kern_add_tid_node(flow, rcd, "complete continue", group, use); | |
1249 | pageidx += use; | |
1250 | if (pageidx >= flow->npagesets) | |
1251 | goto ok; | |
1252 | bail_eagain: | |
84f4a40d KW |
1253 | trace_hfi1_msg_alloc_tids(flow->req->qp, " insufficient tids: needed ", |
1254 | (u64)flow->npagesets); | |
838b6fd2 KW |
1255 | return -EAGAIN; |
1256 | ok: | |
1257 | return 0; | |
1258 | } | |
1259 | ||
1260 | static void kern_program_rcv_group(struct tid_rdma_flow *flow, int grp_num, | |
1261 | u32 *pset_idx) | |
1262 | { | |
1263 | struct hfi1_ctxtdata *rcd = flow->req->rcd; | |
1264 | struct hfi1_devdata *dd = rcd->dd; | |
1265 | struct kern_tid_node *node = &flow->tnode[grp_num]; | |
1266 | struct tid_group *grp = node->grp; | |
1267 | struct tid_rdma_pageset *pset; | |
1268 | u32 pmtu_pg = flow->req->qp->pmtu >> PAGE_SHIFT; | |
1269 | u32 rcventry, npages = 0, pair = 0, tidctrl; | |
1270 | u8 i, cnt = 0; | |
1271 | ||
1272 | for (i = 0; i < grp->size; i++) { | |
1273 | rcventry = grp->base + i; | |
1274 | ||
1275 | if (node->map & BIT(i) || cnt >= node->cnt) { | |
1276 | rcv_array_wc_fill(dd, rcventry); | |
1277 | continue; | |
1278 | } | |
1279 | pset = &flow->pagesets[(*pset_idx)++]; | |
1280 | if (pset->count) { | |
1281 | hfi1_put_tid(dd, rcventry, PT_EXPECTED, | |
1282 | pset->addr, trdma_pset_order(pset)); | |
1283 | } else { | |
1284 | hfi1_put_tid(dd, rcventry, PT_INVALID, 0, 0); | |
1285 | } | |
1286 | npages += pset->count; | |
1287 | ||
1288 | rcventry -= rcd->expected_base; | |
1289 | tidctrl = pair ? 0x3 : rcventry & 0x1 ? 0x2 : 0x1; | |
1290 | /* | |
1291 | * A single TID entry will be used to use a rcvarr pair (with | |
1292 | * tidctrl 0x3), if ALL these are true (a) the bit pos is even | |
1293 | * (b) the group map shows current and the next bits as free | |
1294 | * indicating two consecutive rcvarry entries are available (c) | |
1295 | * we actually need 2 more entries | |
1296 | */ | |
1297 | pair = !(i & 0x1) && !((node->map >> i) & 0x3) && | |
1298 | node->cnt >= cnt + 2; | |
1299 | if (!pair) { | |
1300 | if (!pset->count) | |
1301 | tidctrl = 0x1; | |
1302 | flow->tid_entry[flow->tidcnt++] = | |
1303 | EXP_TID_SET(IDX, rcventry >> 1) | | |
1304 | EXP_TID_SET(CTRL, tidctrl) | | |
1305 | EXP_TID_SET(LEN, npages); | |
84f4a40d KW |
1306 | trace_hfi1_tid_entry_alloc(/* entry */ |
1307 | flow->req->qp, flow->tidcnt - 1, | |
1308 | flow->tid_entry[flow->tidcnt - 1]); | |
1309 | ||
838b6fd2 KW |
1310 | /* Efficient DIV_ROUND_UP(npages, pmtu_pg) */ |
1311 | flow->npkts += (npages + pmtu_pg - 1) >> ilog2(pmtu_pg); | |
1312 | npages = 0; | |
1313 | } | |
1314 | ||
1315 | if (grp->used == grp->size - 1) | |
1316 | tid_group_move(grp, &rcd->tid_used_list, | |
1317 | &rcd->tid_full_list); | |
1318 | else if (!grp->used) | |
1319 | tid_group_move(grp, &rcd->tid_group_list, | |
1320 | &rcd->tid_used_list); | |
1321 | ||
1322 | grp->used++; | |
1323 | grp->map |= BIT(i); | |
1324 | cnt++; | |
1325 | } | |
1326 | } | |
1327 | ||
1328 | static void kern_unprogram_rcv_group(struct tid_rdma_flow *flow, int grp_num) | |
1329 | { | |
1330 | struct hfi1_ctxtdata *rcd = flow->req->rcd; | |
1331 | struct hfi1_devdata *dd = rcd->dd; | |
1332 | struct kern_tid_node *node = &flow->tnode[grp_num]; | |
1333 | struct tid_group *grp = node->grp; | |
1334 | u32 rcventry; | |
1335 | u8 i, cnt = 0; | |
1336 | ||
1337 | for (i = 0; i < grp->size; i++) { | |
1338 | rcventry = grp->base + i; | |
1339 | ||
1340 | if (node->map & BIT(i) || cnt >= node->cnt) { | |
1341 | rcv_array_wc_fill(dd, rcventry); | |
1342 | continue; | |
1343 | } | |
1344 | ||
1345 | hfi1_put_tid(dd, rcventry, PT_INVALID, 0, 0); | |
1346 | ||
1347 | grp->used--; | |
1348 | grp->map &= ~BIT(i); | |
1349 | cnt++; | |
1350 | ||
1351 | if (grp->used == grp->size - 1) | |
1352 | tid_group_move(grp, &rcd->tid_full_list, | |
1353 | &rcd->tid_used_list); | |
1354 | else if (!grp->used) | |
1355 | tid_group_move(grp, &rcd->tid_used_list, | |
1356 | &rcd->tid_group_list); | |
1357 | } | |
1358 | if (WARN_ON_ONCE(cnt & 1)) { | |
1359 | struct hfi1_ctxtdata *rcd = flow->req->rcd; | |
1360 | struct hfi1_devdata *dd = rcd->dd; | |
1361 | ||
1362 | dd_dev_err(dd, "unexpected odd free cnt %u map 0x%x used %u", | |
1363 | cnt, grp->map, grp->used); | |
1364 | } | |
1365 | } | |
1366 | ||
1367 | static void kern_program_rcvarray(struct tid_rdma_flow *flow) | |
1368 | { | |
1369 | u32 pset_idx = 0; | |
1370 | int i; | |
1371 | ||
1372 | flow->npkts = 0; | |
1373 | flow->tidcnt = 0; | |
1374 | for (i = 0; i < flow->tnode_cnt; i++) | |
1375 | kern_program_rcv_group(flow, i, &pset_idx); | |
84f4a40d | 1376 | trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head, flow); |
838b6fd2 KW |
1377 | } |
1378 | ||
1379 | /** | |
1380 | * hfi1_kern_exp_rcv_setup() - setup TID's and flow for one segment of a | |
1381 | * TID RDMA request | |
1382 | * | |
1383 | * @req: TID RDMA request for which the segment/flow is being set up | |
1384 | * @ss: sge state, maintains state across successive segments of a sge | |
1385 | * @last: set to true after the last sge segment has been processed | |
1386 | * | |
1387 | * This function | |
1388 | * (1) finds a free flow entry in the flow circular buffer | |
1389 | * (2) finds pages and continuous physical chunks constituing one segment | |
1390 | * of an sge | |
1391 | * (3) allocates TID group entries for those chunks | |
1392 | * (4) programs rcvarray entries in the hardware corresponding to those | |
1393 | * TID's | |
1394 | * (5) computes a tidarray with formatted TID entries which can be sent | |
1395 | * to the sender | |
1396 | * (6) Reserves and programs HW flows. | |
1397 | * (7) It also manages queing the QP when TID/flow resources are not | |
1398 | * available. | |
1399 | * | |
1400 | * @req points to struct tid_rdma_request of which the segments are a part. The | |
1401 | * function uses qp, rcd and seg_len members of @req. In the absence of errors, | |
1402 | * req->flow_idx is the index of the flow which has been prepared in this | |
1403 | * invocation of function call. With flow = &req->flows[req->flow_idx], | |
1404 | * flow->tid_entry contains the TID array which the sender can use for TID RDMA | |
1405 | * sends and flow->npkts contains number of packets required to send the | |
1406 | * segment. | |
1407 | * | |
1408 | * hfi1_check_sge_align should be called prior to calling this function and if | |
1409 | * it signals error TID RDMA cannot be used for this sge and this function | |
1410 | * should not be called. | |
1411 | * | |
1412 | * For the queuing, caller must hold the flow->req->qp s_lock from the send | |
1413 | * engine and the function will procure the exp_lock. | |
1414 | * | |
1415 | * Return: | |
1416 | * The function returns -EAGAIN if sufficient number of TID/flow resources to | |
1417 | * map the segment could not be allocated. In this case the function should be | |
1418 | * called again with previous arguments to retry the TID allocation. There are | |
1419 | * no other error returns. The function returns 0 on success. | |
1420 | */ | |
1421 | int hfi1_kern_exp_rcv_setup(struct tid_rdma_request *req, | |
1422 | struct rvt_sge_state *ss, bool *last) | |
1423 | __must_hold(&req->qp->s_lock) | |
1424 | { | |
1425 | struct tid_rdma_flow *flow = &req->flows[req->setup_head]; | |
1426 | struct hfi1_ctxtdata *rcd = req->rcd; | |
1427 | struct hfi1_qp_priv *qpriv = req->qp->priv; | |
1428 | unsigned long flags; | |
1429 | struct rvt_qp *fqp; | |
1430 | u16 clear_tail = req->clear_tail; | |
1431 | ||
1432 | lockdep_assert_held(&req->qp->s_lock); | |
1433 | /* | |
1434 | * We return error if either (a) we don't have space in the flow | |
1435 | * circular buffer, or (b) we already have max entries in the buffer. | |
1436 | * Max entries depend on the type of request we are processing and the | |
1437 | * negotiated TID RDMA parameters. | |
1438 | */ | |
1439 | if (!CIRC_SPACE(req->setup_head, clear_tail, MAX_FLOWS) || | |
1440 | CIRC_CNT(req->setup_head, clear_tail, MAX_FLOWS) >= | |
1441 | req->n_flows) | |
1442 | return -EINVAL; | |
1443 | ||
1444 | /* | |
1445 | * Get pages, identify contiguous physical memory chunks for the segment | |
1446 | * If we can not determine a DMA address mapping we will treat it just | |
1447 | * like if we ran out of space above. | |
1448 | */ | |
1449 | if (kern_get_phys_blocks(flow, qpriv->pages, ss, last)) { | |
1450 | hfi1_wait_kmem(flow->req->qp); | |
1451 | return -ENOMEM; | |
1452 | } | |
1453 | ||
1454 | spin_lock_irqsave(&rcd->exp_lock, flags); | |
1455 | if (kernel_tid_waiters(rcd, &rcd->rarr_queue, flow->req->qp)) | |
1456 | goto queue; | |
1457 | ||
1458 | /* | |
1459 | * At this point we know the number of pagesets and hence the number of | |
1460 | * TID's to map the segment. Allocate the TID's from the TID groups. If | |
1461 | * we cannot allocate the required number we exit and try again later | |
1462 | */ | |
1463 | if (kern_alloc_tids(flow)) | |
1464 | goto queue; | |
1465 | /* | |
1466 | * Finally program the TID entries with the pagesets, compute the | |
1467 | * tidarray and enable the HW flow | |
1468 | */ | |
1469 | kern_program_rcvarray(flow); | |
1470 | ||
1471 | /* | |
1472 | * Setup the flow state with relevant information. | |
1473 | * This information is used for tracking the sequence of data packets | |
1474 | * for the segment. | |
1475 | * The flow is setup here as this is the most accurate time and place | |
1476 | * to do so. Doing at a later time runs the risk of the flow data in | |
1477 | * qpriv getting out of sync. | |
1478 | */ | |
1479 | memset(&flow->flow_state, 0x0, sizeof(flow->flow_state)); | |
1480 | flow->idx = qpriv->flow_state.index; | |
1481 | flow->flow_state.generation = qpriv->flow_state.generation; | |
1482 | flow->flow_state.spsn = qpriv->flow_state.psn; | |
1483 | flow->flow_state.lpsn = flow->flow_state.spsn + flow->npkts - 1; | |
1484 | flow->flow_state.r_next_psn = | |
1485 | full_flow_psn(flow, flow->flow_state.spsn); | |
1486 | qpriv->flow_state.psn += flow->npkts; | |
1487 | ||
1488 | dequeue_tid_waiter(rcd, &rcd->rarr_queue, flow->req->qp); | |
1489 | /* get head before dropping lock */ | |
1490 | fqp = first_qp(rcd, &rcd->rarr_queue); | |
1491 | spin_unlock_irqrestore(&rcd->exp_lock, flags); | |
1492 | tid_rdma_schedule_tid_wakeup(fqp); | |
1493 | ||
1494 | req->setup_head = (req->setup_head + 1) & (MAX_FLOWS - 1); | |
1495 | return 0; | |
1496 | queue: | |
1497 | queue_qp_for_tid_wait(rcd, &rcd->rarr_queue, flow->req->qp); | |
1498 | spin_unlock_irqrestore(&rcd->exp_lock, flags); | |
1499 | return -EAGAIN; | |
1500 | } | |
1501 | ||
1502 | static void hfi1_tid_rdma_reset_flow(struct tid_rdma_flow *flow) | |
1503 | { | |
1504 | flow->npagesets = 0; | |
1505 | } | |
1506 | ||
1507 | /* | |
1508 | * This function is called after one segment has been successfully sent to | |
1509 | * release the flow and TID HW/SW resources for that segment. The segments for a | |
1510 | * TID RDMA request are setup and cleared in FIFO order which is managed using a | |
1511 | * circular buffer. | |
1512 | */ | |
1513 | int hfi1_kern_exp_rcv_clear(struct tid_rdma_request *req) | |
1514 | __must_hold(&req->qp->s_lock) | |
1515 | { | |
1516 | struct tid_rdma_flow *flow = &req->flows[req->clear_tail]; | |
1517 | struct hfi1_ctxtdata *rcd = req->rcd; | |
1518 | unsigned long flags; | |
1519 | int i; | |
1520 | struct rvt_qp *fqp; | |
1521 | ||
1522 | lockdep_assert_held(&req->qp->s_lock); | |
1523 | /* Exit if we have nothing in the flow circular buffer */ | |
1524 | if (!CIRC_CNT(req->setup_head, req->clear_tail, MAX_FLOWS)) | |
1525 | return -EINVAL; | |
1526 | ||
1527 | spin_lock_irqsave(&rcd->exp_lock, flags); | |
1528 | ||
1529 | for (i = 0; i < flow->tnode_cnt; i++) | |
1530 | kern_unprogram_rcv_group(flow, i); | |
1531 | /* To prevent double unprogramming */ | |
1532 | flow->tnode_cnt = 0; | |
1533 | /* get head before dropping lock */ | |
1534 | fqp = first_qp(rcd, &rcd->rarr_queue); | |
1535 | spin_unlock_irqrestore(&rcd->exp_lock, flags); | |
1536 | ||
1537 | dma_unmap_flow(flow); | |
1538 | ||
1539 | hfi1_tid_rdma_reset_flow(flow); | |
1540 | req->clear_tail = (req->clear_tail + 1) & (MAX_FLOWS - 1); | |
1541 | ||
1542 | if (fqp == req->qp) { | |
1543 | __trigger_tid_waiter(fqp); | |
1544 | rvt_put_qp(fqp); | |
1545 | } else { | |
1546 | tid_rdma_schedule_tid_wakeup(fqp); | |
1547 | } | |
1548 | ||
1549 | return 0; | |
1550 | } | |
1551 | ||
1552 | /* | |
1553 | * This function is called to release all the tid entries for | |
1554 | * a request. | |
1555 | */ | |
1556 | void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req) | |
1557 | __must_hold(&req->qp->s_lock) | |
1558 | { | |
1559 | /* Use memory barrier for proper ordering */ | |
1560 | while (CIRC_CNT(req->setup_head, req->clear_tail, MAX_FLOWS)) { | |
1561 | if (hfi1_kern_exp_rcv_clear(req)) | |
1562 | break; | |
1563 | } | |
1564 | } | |
1565 | ||
1566 | /** | |
1567 | * hfi1_kern_exp_rcv_free_flows - free priviously allocated flow information | |
1568 | * @req - the tid rdma request to be cleaned | |
1569 | */ | |
1570 | static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req) | |
1571 | { | |
1572 | kfree(req->flows); | |
1573 | req->flows = NULL; | |
1574 | } | |
1575 | ||
1576 | /** | |
1577 | * __trdma_clean_swqe - clean up for large sized QPs | |
1578 | * @qp: the queue patch | |
1579 | * @wqe: the send wqe | |
1580 | */ | |
1581 | void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) | |
1582 | { | |
1583 | struct hfi1_swqe_priv *p = wqe->priv; | |
1584 | ||
1585 | hfi1_kern_exp_rcv_free_flows(&p->tid_req); | |
1586 | } | |
1587 | ||
1588 | /* | |
1589 | * This can be called at QP create time or in the data path. | |
1590 | */ | |
1591 | static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req, | |
1592 | gfp_t gfp) | |
1593 | { | |
1594 | struct tid_rdma_flow *flows; | |
1595 | int i; | |
1596 | ||
1597 | if (likely(req->flows)) | |
1598 | return 0; | |
1599 | flows = kmalloc_node(MAX_FLOWS * sizeof(*flows), gfp, | |
1600 | req->rcd->numa_id); | |
1601 | if (!flows) | |
1602 | return -ENOMEM; | |
1603 | /* mini init */ | |
1604 | for (i = 0; i < MAX_FLOWS; i++) { | |
1605 | flows[i].req = req; | |
1606 | flows[i].npagesets = 0; | |
1607 | flows[i].pagesets[0].mapped = 0; | |
1608 | } | |
1609 | req->flows = flows; | |
1610 | return 0; | |
1611 | } | |
1612 | ||
1613 | static void hfi1_init_trdma_req(struct rvt_qp *qp, | |
1614 | struct tid_rdma_request *req) | |
1615 | { | |
1616 | struct hfi1_qp_priv *qpriv = qp->priv; | |
1617 | ||
1618 | /* | |
1619 | * Initialize various TID RDMA request variables. | |
1620 | * These variables are "static", which is why they | |
1621 | * can be pre-initialized here before the WRs has | |
1622 | * even been submitted. | |
1623 | * However, non-NULL values for these variables do not | |
1624 | * imply that this WQE has been enabled for TID RDMA. | |
1625 | * Drivers should check the WQE's opcode to determine | |
1626 | * if a request is a TID RDMA one or not. | |
1627 | */ | |
1628 | req->qp = qp; | |
1629 | req->rcd = qpriv->rcd; | |
1630 | } | |
2f16a696 KW |
1631 | |
1632 | u64 hfi1_access_sw_tid_wait(const struct cntr_entry *entry, | |
1633 | void *context, int vl, int mode, u64 data) | |
1634 | { | |
1635 | struct hfi1_devdata *dd = context; | |
1636 | ||
1637 | return dd->verbs_dev.n_tidwait; | |
1638 | } | |
742a3826 | 1639 | |
b126078e KW |
1640 | static struct tid_rdma_flow *find_flow_ib(struct tid_rdma_request *req, |
1641 | u32 psn, u16 *fidx) | |
1642 | { | |
1643 | u16 head, tail; | |
1644 | struct tid_rdma_flow *flow; | |
1645 | ||
1646 | head = req->setup_head; | |
1647 | tail = req->clear_tail; | |
1648 | for ( ; CIRC_CNT(head, tail, MAX_FLOWS); | |
1649 | tail = CIRC_NEXT(tail, MAX_FLOWS)) { | |
1650 | flow = &req->flows[tail]; | |
1651 | if (cmp_psn(psn, flow->flow_state.ib_spsn) >= 0 && | |
1652 | cmp_psn(psn, flow->flow_state.ib_lpsn) <= 0) { | |
1653 | if (fidx) | |
1654 | *fidx = tail; | |
1655 | return flow; | |
1656 | } | |
1657 | } | |
1658 | return NULL; | |
1659 | } | |
1660 | ||
9905bf06 KW |
1661 | static struct tid_rdma_flow * |
1662 | __find_flow_ranged(struct tid_rdma_request *req, u16 head, u16 tail, | |
1663 | u32 psn, u16 *fidx) | |
1664 | { | |
1665 | for ( ; CIRC_CNT(head, tail, MAX_FLOWS); | |
1666 | tail = CIRC_NEXT(tail, MAX_FLOWS)) { | |
1667 | struct tid_rdma_flow *flow = &req->flows[tail]; | |
1668 | u32 spsn, lpsn; | |
1669 | ||
1670 | spsn = full_flow_psn(flow, flow->flow_state.spsn); | |
1671 | lpsn = full_flow_psn(flow, flow->flow_state.lpsn); | |
1672 | ||
1673 | if (cmp_psn(psn, spsn) >= 0 && cmp_psn(psn, lpsn) <= 0) { | |
1674 | if (fidx) | |
1675 | *fidx = tail; | |
1676 | return flow; | |
1677 | } | |
1678 | } | |
1679 | return NULL; | |
1680 | } | |
1681 | ||
1682 | static struct tid_rdma_flow *find_flow(struct tid_rdma_request *req, | |
1683 | u32 psn, u16 *fidx) | |
1684 | { | |
1685 | return __find_flow_ranged(req, req->setup_head, req->clear_tail, psn, | |
1686 | fidx); | |
1687 | } | |
1688 | ||
742a3826 KW |
1689 | /* TID RDMA READ functions */ |
1690 | u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe, | |
1691 | struct ib_other_headers *ohdr, u32 *bth1, | |
1692 | u32 *bth2, u32 *len) | |
1693 | { | |
1694 | struct tid_rdma_request *req = wqe_to_tid_req(wqe); | |
1695 | struct tid_rdma_flow *flow = &req->flows[req->flow_idx]; | |
1696 | struct rvt_qp *qp = req->qp; | |
1697 | struct hfi1_qp_priv *qpriv = qp->priv; | |
1698 | struct hfi1_swqe_priv *wpriv = wqe->priv; | |
1699 | struct tid_rdma_read_req *rreq = &ohdr->u.tid_rdma.r_req; | |
1700 | struct tid_rdma_params *remote; | |
1701 | u32 req_len = 0; | |
1702 | void *req_addr = NULL; | |
1703 | ||
1704 | /* This is the IB psn used to send the request */ | |
1705 | *bth2 = mask_psn(flow->flow_state.ib_spsn + flow->pkt); | |
3ce5daa2 | 1706 | trace_hfi1_tid_flow_build_read_pkt(qp, req->flow_idx, flow); |
742a3826 KW |
1707 | |
1708 | /* TID Entries for TID RDMA READ payload */ | |
1709 | req_addr = &flow->tid_entry[flow->tid_idx]; | |
1710 | req_len = sizeof(*flow->tid_entry) * | |
1711 | (flow->tidcnt - flow->tid_idx); | |
1712 | ||
1713 | memset(&ohdr->u.tid_rdma.r_req, 0, sizeof(ohdr->u.tid_rdma.r_req)); | |
1714 | wpriv->ss.sge.vaddr = req_addr; | |
1715 | wpriv->ss.sge.sge_length = req_len; | |
1716 | wpriv->ss.sge.length = wpriv->ss.sge.sge_length; | |
1717 | /* | |
1718 | * We can safely zero these out. Since the first SGE covers the | |
1719 | * entire packet, nothing else should even look at the MR. | |
1720 | */ | |
1721 | wpriv->ss.sge.mr = NULL; | |
1722 | wpriv->ss.sge.m = 0; | |
1723 | wpriv->ss.sge.n = 0; | |
1724 | ||
1725 | wpriv->ss.sg_list = NULL; | |
1726 | wpriv->ss.total_len = wpriv->ss.sge.sge_length; | |
1727 | wpriv->ss.num_sge = 1; | |
1728 | ||
1729 | /* Construct the TID RDMA READ REQ packet header */ | |
1730 | rcu_read_lock(); | |
1731 | remote = rcu_dereference(qpriv->tid_rdma.remote); | |
1732 | ||
1733 | KDETH_RESET(rreq->kdeth0, KVER, 0x1); | |
1734 | KDETH_RESET(rreq->kdeth1, JKEY, remote->jkey); | |
1735 | rreq->reth.vaddr = cpu_to_be64(wqe->rdma_wr.remote_addr + | |
1736 | req->cur_seg * req->seg_len + flow->sent); | |
1737 | rreq->reth.rkey = cpu_to_be32(wqe->rdma_wr.rkey); | |
1738 | rreq->reth.length = cpu_to_be32(*len); | |
1739 | rreq->tid_flow_psn = | |
1740 | cpu_to_be32((flow->flow_state.generation << | |
1741 | HFI1_KDETH_BTH_SEQ_SHIFT) | | |
1742 | ((flow->flow_state.spsn + flow->pkt) & | |
1743 | HFI1_KDETH_BTH_SEQ_MASK)); | |
1744 | rreq->tid_flow_qp = | |
1745 | cpu_to_be32(qpriv->tid_rdma.local.qp | | |
1746 | ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) << | |
1747 | TID_RDMA_DESTQP_FLOW_SHIFT) | | |
1748 | qpriv->rcd->ctxt); | |
1749 | rreq->verbs_qp = cpu_to_be32(qp->remote_qpn); | |
1750 | *bth1 &= ~RVT_QPN_MASK; | |
1751 | *bth1 |= remote->qp; | |
1752 | *bth2 |= IB_BTH_REQ_ACK; | |
1753 | rcu_read_unlock(); | |
1754 | ||
1755 | /* We are done with this segment */ | |
1756 | flow->sent += *len; | |
1757 | req->cur_seg++; | |
1758 | qp->s_state = TID_OP(READ_REQ); | |
1759 | req->ack_pending++; | |
1760 | req->flow_idx = (req->flow_idx + 1) & (MAX_FLOWS - 1); | |
1761 | qpriv->pending_tid_r_segs++; | |
1762 | qp->s_num_rd_atomic++; | |
1763 | ||
1764 | /* Set the TID RDMA READ request payload size */ | |
1765 | *len = req_len; | |
1766 | ||
1767 | return sizeof(ohdr->u.tid_rdma.r_req) / sizeof(u32); | |
1768 | } | |
1769 | ||
1770 | /* | |
1771 | * @len: contains the data length to read upon entry and the read request | |
1772 | * payload length upon exit. | |
1773 | */ | |
1774 | u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe, | |
1775 | struct ib_other_headers *ohdr, u32 *bth1, | |
1776 | u32 *bth2, u32 *len) | |
1777 | __must_hold(&qp->s_lock) | |
1778 | { | |
1779 | struct hfi1_qp_priv *qpriv = qp->priv; | |
1780 | struct tid_rdma_request *req = wqe_to_tid_req(wqe); | |
1781 | struct tid_rdma_flow *flow = NULL; | |
1782 | u32 hdwords = 0; | |
1783 | bool last; | |
1784 | bool retry = true; | |
1785 | u32 npkts = rvt_div_round_up_mtu(qp, *len); | |
1786 | ||
3ce5daa2 KW |
1787 | trace_hfi1_tid_req_build_read_req(qp, 0, wqe->wr.opcode, wqe->psn, |
1788 | wqe->lpsn, req); | |
742a3826 KW |
1789 | /* |
1790 | * Check sync conditions. Make sure that there are no pending | |
1791 | * segments before freeing the flow. | |
1792 | */ | |
1793 | sync_check: | |
1794 | if (req->state == TID_REQUEST_SYNC) { | |
1795 | if (qpriv->pending_tid_r_segs) | |
1796 | goto done; | |
1797 | ||
1798 | hfi1_kern_clear_hw_flow(req->rcd, qp); | |
1799 | req->state = TID_REQUEST_ACTIVE; | |
1800 | } | |
1801 | ||
1802 | /* | |
1803 | * If the request for this segment is resent, the tid resources should | |
1804 | * have been allocated before. In this case, req->flow_idx should | |
1805 | * fall behind req->setup_head. | |
1806 | */ | |
1807 | if (req->flow_idx == req->setup_head) { | |
1808 | retry = false; | |
1809 | if (req->state == TID_REQUEST_RESEND) { | |
1810 | /* | |
1811 | * This is the first new segment for a request whose | |
1812 | * earlier segments have been re-sent. We need to | |
1813 | * set up the sge pointer correctly. | |
1814 | */ | |
1815 | restart_sge(&qp->s_sge, wqe, req->s_next_psn, | |
1816 | qp->pmtu); | |
1817 | req->isge = 0; | |
1818 | req->state = TID_REQUEST_ACTIVE; | |
1819 | } | |
1820 | ||
1821 | /* | |
1822 | * Check sync. The last PSN of each generation is reserved for | |
1823 | * RESYNC. | |
1824 | */ | |
1825 | if ((qpriv->flow_state.psn + npkts) > MAX_TID_FLOW_PSN - 1) { | |
1826 | req->state = TID_REQUEST_SYNC; | |
1827 | goto sync_check; | |
1828 | } | |
1829 | ||
1830 | /* Allocate the flow if not yet */ | |
1831 | if (hfi1_kern_setup_hw_flow(qpriv->rcd, qp)) | |
1832 | goto done; | |
1833 | ||
1834 | /* | |
1835 | * The following call will advance req->setup_head after | |
1836 | * allocating the tid entries. | |
1837 | */ | |
1838 | if (hfi1_kern_exp_rcv_setup(req, &qp->s_sge, &last)) { | |
1839 | req->state = TID_REQUEST_QUEUED; | |
1840 | ||
1841 | /* | |
1842 | * We don't have resources for this segment. The QP has | |
1843 | * already been queued. | |
1844 | */ | |
1845 | goto done; | |
1846 | } | |
1847 | } | |
1848 | ||
1849 | /* req->flow_idx should only be one slot behind req->setup_head */ | |
1850 | flow = &req->flows[req->flow_idx]; | |
1851 | flow->pkt = 0; | |
1852 | flow->tid_idx = 0; | |
1853 | flow->sent = 0; | |
1854 | if (!retry) { | |
1855 | /* Set the first and last IB PSN for the flow in use.*/ | |
1856 | flow->flow_state.ib_spsn = req->s_next_psn; | |
1857 | flow->flow_state.ib_lpsn = | |
1858 | flow->flow_state.ib_spsn + flow->npkts - 1; | |
1859 | } | |
1860 | ||
1861 | /* Calculate the next segment start psn.*/ | |
1862 | req->s_next_psn += flow->npkts; | |
1863 | ||
1864 | /* Build the packet header */ | |
1865 | hdwords = hfi1_build_tid_rdma_read_packet(wqe, ohdr, bth1, bth2, len); | |
1866 | done: | |
1867 | return hdwords; | |
1868 | } | |
d0d564a1 KW |
1869 | |
1870 | /* | |
1871 | * Validate and accept the TID RDMA READ request parameters. | |
1872 | * Return 0 if the request is accepted successfully; | |
1873 | * Return 1 otherwise. | |
1874 | */ | |
1875 | static int tid_rdma_rcv_read_request(struct rvt_qp *qp, | |
1876 | struct rvt_ack_entry *e, | |
1877 | struct hfi1_packet *packet, | |
1878 | struct ib_other_headers *ohdr, | |
1879 | u32 bth0, u32 psn, u64 vaddr, u32 len) | |
1880 | { | |
1881 | struct hfi1_qp_priv *qpriv = qp->priv; | |
1882 | struct tid_rdma_request *req; | |
1883 | struct tid_rdma_flow *flow; | |
1884 | u32 flow_psn, i, tidlen = 0, pktlen, tlen; | |
1885 | ||
1886 | req = ack_to_tid_req(e); | |
1887 | ||
1888 | /* Validate the payload first */ | |
1889 | flow = &req->flows[req->setup_head]; | |
1890 | ||
1891 | /* payload length = packet length - (header length + ICRC length) */ | |
1892 | pktlen = packet->tlen - (packet->hlen + 4); | |
1893 | if (pktlen > sizeof(flow->tid_entry)) | |
1894 | return 1; | |
1895 | memcpy(flow->tid_entry, packet->ebuf, pktlen); | |
1896 | flow->tidcnt = pktlen / sizeof(*flow->tid_entry); | |
1897 | ||
1898 | /* | |
1899 | * Walk the TID_ENTRY list to make sure we have enough space for a | |
1900 | * complete segment. Also calculate the number of required packets. | |
1901 | */ | |
1902 | flow->npkts = rvt_div_round_up_mtu(qp, len); | |
1903 | for (i = 0; i < flow->tidcnt; i++) { | |
3ce5daa2 KW |
1904 | trace_hfi1_tid_entry_rcv_read_req(qp, i, |
1905 | flow->tid_entry[i]); | |
d0d564a1 KW |
1906 | tlen = EXP_TID_GET(flow->tid_entry[i], LEN); |
1907 | if (!tlen) | |
1908 | return 1; | |
1909 | ||
1910 | /* | |
1911 | * For tid pair (tidctr == 3), the buffer size of the pair | |
1912 | * should be the sum of the buffer size described by each | |
1913 | * tid entry. However, only the first entry needs to be | |
1914 | * specified in the request (see WFR HAS Section 8.5.7.1). | |
1915 | */ | |
1916 | tidlen += tlen; | |
1917 | } | |
1918 | if (tidlen * PAGE_SIZE < len) | |
1919 | return 1; | |
1920 | ||
1921 | /* Empty the flow array */ | |
1922 | req->clear_tail = req->setup_head; | |
1923 | flow->pkt = 0; | |
1924 | flow->tid_idx = 0; | |
1925 | flow->tid_offset = 0; | |
1926 | flow->sent = 0; | |
1927 | flow->tid_qpn = be32_to_cpu(ohdr->u.tid_rdma.r_req.tid_flow_qp); | |
1928 | flow->idx = (flow->tid_qpn >> TID_RDMA_DESTQP_FLOW_SHIFT) & | |
1929 | TID_RDMA_DESTQP_FLOW_MASK; | |
1930 | flow_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_req.tid_flow_psn)); | |
1931 | flow->flow_state.generation = flow_psn >> HFI1_KDETH_BTH_SEQ_SHIFT; | |
1932 | flow->flow_state.spsn = flow_psn & HFI1_KDETH_BTH_SEQ_MASK; | |
1933 | flow->length = len; | |
1934 | ||
1935 | flow->flow_state.lpsn = flow->flow_state.spsn + | |
1936 | flow->npkts - 1; | |
1937 | flow->flow_state.ib_spsn = psn; | |
1938 | flow->flow_state.ib_lpsn = flow->flow_state.ib_spsn + flow->npkts - 1; | |
1939 | ||
3ce5daa2 | 1940 | trace_hfi1_tid_flow_rcv_read_req(qp, req->setup_head, flow); |
d0d564a1 KW |
1941 | /* Set the initial flow index to the current flow. */ |
1942 | req->flow_idx = req->setup_head; | |
1943 | ||
1944 | /* advance circular buffer head */ | |
1945 | req->setup_head = (req->setup_head + 1) & (MAX_FLOWS - 1); | |
1946 | ||
1947 | /* | |
1948 | * Compute last PSN for request. | |
1949 | */ | |
1950 | e->opcode = (bth0 >> 24) & 0xff; | |
1951 | e->psn = psn; | |
1952 | e->lpsn = psn + flow->npkts - 1; | |
1953 | e->sent = 0; | |
1954 | ||
1955 | req->n_flows = qpriv->tid_rdma.local.max_read; | |
1956 | req->state = TID_REQUEST_ACTIVE; | |
1957 | req->cur_seg = 0; | |
1958 | req->comp_seg = 0; | |
1959 | req->ack_seg = 0; | |
1960 | req->isge = 0; | |
1961 | req->seg_len = qpriv->tid_rdma.local.max_len; | |
1962 | req->total_len = len; | |
1963 | req->total_segs = 1; | |
1964 | req->r_flow_psn = e->psn; | |
1965 | ||
3ce5daa2 KW |
1966 | trace_hfi1_tid_req_rcv_read_req(qp, 0, e->opcode, e->psn, e->lpsn, |
1967 | req); | |
d0d564a1 KW |
1968 | return 0; |
1969 | } | |
1970 | ||
1971 | static int tid_rdma_rcv_error(struct hfi1_packet *packet, | |
1972 | struct ib_other_headers *ohdr, | |
1973 | struct rvt_qp *qp, u32 psn, int diff) | |
1974 | { | |
1975 | struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | |
1976 | struct hfi1_ctxtdata *rcd = ((struct hfi1_qp_priv *)qp->priv)->rcd; | |
07b92370 KW |
1977 | struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); |
1978 | struct hfi1_qp_priv *qpriv = qp->priv; | |
d0d564a1 KW |
1979 | struct rvt_ack_entry *e; |
1980 | struct tid_rdma_request *req; | |
1981 | unsigned long flags; | |
1982 | u8 prev; | |
1983 | bool old_req; | |
1984 | ||
3ce5daa2 KW |
1985 | trace_hfi1_rsp_tid_rcv_error(qp, psn); |
1986 | trace_hfi1_tid_rdma_rcv_err(qp, 0, psn, diff); | |
d0d564a1 KW |
1987 | if (diff > 0) { |
1988 | /* sequence error */ | |
1989 | if (!qp->r_nak_state) { | |
1990 | ibp->rvp.n_rc_seqnak++; | |
1991 | qp->r_nak_state = IB_NAK_PSN_ERROR; | |
1992 | qp->r_ack_psn = qp->r_psn; | |
1993 | rc_defered_ack(rcd, qp); | |
1994 | } | |
1995 | goto done; | |
1996 | } | |
1997 | ||
1998 | ibp->rvp.n_rc_dupreq++; | |
1999 | ||
2000 | spin_lock_irqsave(&qp->s_lock, flags); | |
2001 | e = find_prev_entry(qp, psn, &prev, NULL, &old_req); | |
07b92370 KW |
2002 | if (!e || (e->opcode != TID_OP(READ_REQ) && |
2003 | e->opcode != TID_OP(WRITE_REQ))) | |
d0d564a1 KW |
2004 | goto unlock; |
2005 | ||
2006 | req = ack_to_tid_req(e); | |
2007 | req->r_flow_psn = psn; | |
3ce5daa2 | 2008 | trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn, e->lpsn, req); |
d0d564a1 KW |
2009 | if (e->opcode == TID_OP(READ_REQ)) { |
2010 | struct ib_reth *reth; | |
2011 | u32 offset; | |
2012 | u32 len; | |
2013 | u32 rkey; | |
2014 | u64 vaddr; | |
2015 | int ok; | |
2016 | u32 bth0; | |
2017 | ||
2018 | reth = &ohdr->u.tid_rdma.r_req.reth; | |
2019 | /* | |
2020 | * The requester always restarts from the start of the original | |
2021 | * request. | |
2022 | */ | |
2023 | offset = delta_psn(psn, e->psn) * qp->pmtu; | |
2024 | len = be32_to_cpu(reth->length); | |
2025 | if (psn != e->psn || len != req->total_len) | |
2026 | goto unlock; | |
2027 | ||
2028 | if (e->rdma_sge.mr) { | |
2029 | rvt_put_mr(e->rdma_sge.mr); | |
2030 | e->rdma_sge.mr = NULL; | |
2031 | } | |
2032 | ||
2033 | rkey = be32_to_cpu(reth->rkey); | |
2034 | vaddr = get_ib_reth_vaddr(reth); | |
2035 | ||
2036 | qp->r_len = len; | |
2037 | ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, | |
2038 | IB_ACCESS_REMOTE_READ); | |
2039 | if (unlikely(!ok)) | |
2040 | goto unlock; | |
2041 | ||
2042 | /* | |
2043 | * If all the response packets for the current request have | |
2044 | * been sent out and this request is complete (old_request | |
2045 | * == false) and the TID flow may be unusable (the | |
2046 | * req->clear_tail is advanced). However, when an earlier | |
2047 | * request is received, this request will not be complete any | |
2048 | * more (qp->s_tail_ack_queue is moved back, see below). | |
2049 | * Consequently, we need to update the TID flow info everytime | |
2050 | * a duplicate request is received. | |
2051 | */ | |
2052 | bth0 = be32_to_cpu(ohdr->bth[0]); | |
2053 | if (tid_rdma_rcv_read_request(qp, e, packet, ohdr, bth0, psn, | |
2054 | vaddr, len)) | |
2055 | goto unlock; | |
2056 | ||
2057 | /* | |
2058 | * True if the request is already scheduled (between | |
2059 | * qp->s_tail_ack_queue and qp->r_head_ack_queue); | |
2060 | */ | |
2061 | if (old_req) | |
2062 | goto unlock; | |
07b92370 KW |
2063 | } else { |
2064 | struct flow_state *fstate; | |
2065 | bool schedule = false; | |
2066 | u8 i; | |
2067 | ||
2068 | if (req->state == TID_REQUEST_RESEND) { | |
2069 | req->state = TID_REQUEST_RESEND_ACTIVE; | |
2070 | } else if (req->state == TID_REQUEST_INIT_RESEND) { | |
2071 | req->state = TID_REQUEST_INIT; | |
2072 | schedule = true; | |
2073 | } | |
2074 | ||
2075 | /* | |
2076 | * True if the request is already scheduled (between | |
2077 | * qp->s_tail_ack_queue and qp->r_head_ack_queue). | |
2078 | * Also, don't change requests, which are at the SYNC | |
2079 | * point and haven't generated any responses yet. | |
2080 | * There is nothing to retransmit for them yet. | |
2081 | */ | |
2082 | if (old_req || req->state == TID_REQUEST_INIT || | |
2083 | (req->state == TID_REQUEST_SYNC && !req->cur_seg)) { | |
2084 | for (i = prev + 1; ; i++) { | |
2085 | if (i > rvt_size_atomic(&dev->rdi)) | |
2086 | i = 0; | |
2087 | if (i == qp->r_head_ack_queue) | |
2088 | break; | |
2089 | e = &qp->s_ack_queue[i]; | |
2090 | req = ack_to_tid_req(e); | |
2091 | if (e->opcode == TID_OP(WRITE_REQ) && | |
2092 | req->state == TID_REQUEST_INIT) | |
2093 | req->state = TID_REQUEST_INIT_RESEND; | |
2094 | } | |
2095 | /* | |
2096 | * If the state of the request has been changed, | |
2097 | * the first leg needs to get scheduled in order to | |
2098 | * pick up the change. Otherwise, normal response | |
2099 | * processing should take care of it. | |
2100 | */ | |
2101 | if (!schedule) | |
2102 | goto unlock; | |
2103 | } | |
2104 | ||
2105 | /* | |
2106 | * If there is no more allocated segment, just schedule the qp | |
2107 | * without changing any state. | |
2108 | */ | |
2109 | if (req->clear_tail == req->setup_head) | |
2110 | goto schedule; | |
2111 | /* | |
2112 | * If this request has sent responses for segments, which have | |
2113 | * not received data yet (flow_idx != clear_tail), the flow_idx | |
2114 | * pointer needs to be adjusted so the same responses can be | |
2115 | * re-sent. | |
2116 | */ | |
2117 | if (CIRC_CNT(req->flow_idx, req->clear_tail, MAX_FLOWS)) { | |
2118 | fstate = &req->flows[req->clear_tail].flow_state; | |
2119 | qpriv->pending_tid_w_segs -= | |
2120 | CIRC_CNT(req->flow_idx, req->clear_tail, | |
2121 | MAX_FLOWS); | |
2122 | req->flow_idx = | |
2123 | CIRC_ADD(req->clear_tail, | |
2124 | delta_psn(psn, fstate->resp_ib_psn), | |
2125 | MAX_FLOWS); | |
2126 | qpriv->pending_tid_w_segs += | |
2127 | delta_psn(psn, fstate->resp_ib_psn); | |
2128 | /* | |
2129 | * When flow_idx == setup_head, we've gotten a duplicate | |
2130 | * request for a segment, which has not been allocated | |
2131 | * yet. In that case, don't adjust this request. | |
2132 | * However, we still want to go through the loop below | |
2133 | * to adjust all subsequent requests. | |
2134 | */ | |
2135 | if (CIRC_CNT(req->setup_head, req->flow_idx, | |
2136 | MAX_FLOWS)) { | |
2137 | req->cur_seg = delta_psn(psn, e->psn); | |
2138 | req->state = TID_REQUEST_RESEND_ACTIVE; | |
2139 | } | |
2140 | } | |
2141 | ||
2142 | for (i = prev + 1; ; i++) { | |
2143 | /* | |
2144 | * Look at everything up to and including | |
2145 | * s_tail_ack_queue | |
2146 | */ | |
2147 | if (i > rvt_size_atomic(&dev->rdi)) | |
2148 | i = 0; | |
2149 | if (i == qp->r_head_ack_queue) | |
2150 | break; | |
2151 | e = &qp->s_ack_queue[i]; | |
2152 | req = ack_to_tid_req(e); | |
2153 | trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn, | |
2154 | e->lpsn, req); | |
2155 | if (e->opcode != TID_OP(WRITE_REQ) || | |
2156 | req->cur_seg == req->comp_seg || | |
2157 | req->state == TID_REQUEST_INIT || | |
2158 | req->state == TID_REQUEST_INIT_RESEND) { | |
2159 | if (req->state == TID_REQUEST_INIT) | |
2160 | req->state = TID_REQUEST_INIT_RESEND; | |
2161 | continue; | |
2162 | } | |
2163 | qpriv->pending_tid_w_segs -= | |
2164 | CIRC_CNT(req->flow_idx, | |
2165 | req->clear_tail, | |
2166 | MAX_FLOWS); | |
2167 | req->flow_idx = req->clear_tail; | |
2168 | req->state = TID_REQUEST_RESEND; | |
2169 | req->cur_seg = req->comp_seg; | |
2170 | } | |
d0d564a1 KW |
2171 | } |
2172 | /* Re-process old requests.*/ | |
4f9264d1 KW |
2173 | if (qp->s_acked_ack_queue == qp->s_tail_ack_queue) |
2174 | qp->s_acked_ack_queue = prev; | |
d0d564a1 KW |
2175 | qp->s_tail_ack_queue = prev; |
2176 | /* | |
2177 | * Since the qp->s_tail_ack_queue is modified, the | |
2178 | * qp->s_ack_state must be changed to re-initialize | |
2179 | * qp->s_ack_rdma_sge; Otherwise, we will end up in | |
2180 | * wrong memory region. | |
2181 | */ | |
2182 | qp->s_ack_state = OP(ACKNOWLEDGE); | |
07b92370 KW |
2183 | schedule: |
2184 | /* | |
2185 | * It's possible to receive a retry psn that is earlier than an RNRNAK | |
2186 | * psn. In this case, the rnrnak state should be cleared. | |
2187 | */ | |
2188 | if (qpriv->rnr_nak_state) { | |
2189 | qp->s_nak_state = 0; | |
2190 | qpriv->rnr_nak_state = TID_RNR_NAK_INIT; | |
2191 | qp->r_psn = e->lpsn + 1; | |
2192 | hfi1_tid_write_alloc_resources(qp, true); | |
2193 | } | |
2194 | ||
d0d564a1 KW |
2195 | qp->r_state = e->opcode; |
2196 | qp->r_nak_state = 0; | |
2197 | qp->s_flags |= RVT_S_RESP_PENDING; | |
2198 | hfi1_schedule_send(qp); | |
2199 | unlock: | |
2200 | spin_unlock_irqrestore(&qp->s_lock, flags); | |
2201 | done: | |
2202 | return 1; | |
2203 | } | |
2204 | ||
2205 | void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet) | |
2206 | { | |
2207 | /* HANDLER FOR TID RDMA READ REQUEST packet (Responder side)*/ | |
2208 | ||
2209 | /* | |
2210 | * 1. Verify TID RDMA READ REQ as per IB_OPCODE_RC_RDMA_READ | |
2211 | * (see hfi1_rc_rcv()) | |
2212 | * 2. Put TID RDMA READ REQ into the response queueu (s_ack_queue) | |
2213 | * - Setup struct tid_rdma_req with request info | |
2214 | * - Initialize struct tid_rdma_flow info; | |
2215 | * - Copy TID entries; | |
2216 | * 3. Set the qp->s_ack_state. | |
2217 | * 4. Set RVT_S_RESP_PENDING in s_flags. | |
2218 | * 5. Kick the send engine (hfi1_schedule_send()) | |
2219 | */ | |
2220 | struct hfi1_ctxtdata *rcd = packet->rcd; | |
2221 | struct rvt_qp *qp = packet->qp; | |
2222 | struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | |
2223 | struct ib_other_headers *ohdr = packet->ohdr; | |
2224 | struct rvt_ack_entry *e; | |
2225 | unsigned long flags; | |
2226 | struct ib_reth *reth; | |
2227 | struct hfi1_qp_priv *qpriv = qp->priv; | |
2228 | u32 bth0, psn, len, rkey; | |
2229 | bool is_fecn; | |
2230 | u8 next; | |
2231 | u64 vaddr; | |
2232 | int diff; | |
2233 | u8 nack_state = IB_NAK_INVALID_REQUEST; | |
2234 | ||
2235 | bth0 = be32_to_cpu(ohdr->bth[0]); | |
2236 | if (hfi1_ruc_check_hdr(ibp, packet)) | |
2237 | return; | |
2238 | ||
2239 | is_fecn = process_ecn(qp, packet); | |
2240 | psn = mask_psn(be32_to_cpu(ohdr->bth[2])); | |
3ce5daa2 | 2241 | trace_hfi1_rsp_rcv_tid_read_req(qp, psn); |
d0d564a1 KW |
2242 | |
2243 | if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) | |
2244 | rvt_comm_est(qp); | |
2245 | ||
2246 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) | |
2247 | goto nack_inv; | |
2248 | ||
2249 | reth = &ohdr->u.tid_rdma.r_req.reth; | |
2250 | vaddr = be64_to_cpu(reth->vaddr); | |
2251 | len = be32_to_cpu(reth->length); | |
2252 | /* The length needs to be in multiples of PAGE_SIZE */ | |
2253 | if (!len || len & ~PAGE_MASK || len > qpriv->tid_rdma.local.max_len) | |
2254 | goto nack_inv; | |
2255 | ||
2256 | diff = delta_psn(psn, qp->r_psn); | |
2257 | if (unlikely(diff)) { | |
2258 | if (tid_rdma_rcv_error(packet, ohdr, qp, psn, diff)) | |
2259 | return; | |
2260 | goto send_ack; | |
2261 | } | |
2262 | ||
2263 | /* We've verified the request, insert it into the ack queue. */ | |
2264 | next = qp->r_head_ack_queue + 1; | |
2265 | if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) | |
2266 | next = 0; | |
2267 | spin_lock_irqsave(&qp->s_lock, flags); | |
2268 | if (unlikely(next == qp->s_tail_ack_queue)) { | |
2269 | if (!qp->s_ack_queue[next].sent) { | |
2270 | nack_state = IB_NAK_REMOTE_OPERATIONAL_ERROR; | |
2271 | goto nack_inv_unlock; | |
2272 | } | |
2273 | update_ack_queue(qp, next); | |
2274 | } | |
2275 | e = &qp->s_ack_queue[qp->r_head_ack_queue]; | |
2276 | if (e->rdma_sge.mr) { | |
2277 | rvt_put_mr(e->rdma_sge.mr); | |
2278 | e->rdma_sge.mr = NULL; | |
2279 | } | |
2280 | ||
2281 | rkey = be32_to_cpu(reth->rkey); | |
2282 | qp->r_len = len; | |
2283 | ||
2284 | if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr, | |
2285 | rkey, IB_ACCESS_REMOTE_READ))) | |
2286 | goto nack_acc; | |
2287 | ||
2288 | /* Accept the request parameters */ | |
2289 | if (tid_rdma_rcv_read_request(qp, e, packet, ohdr, bth0, psn, vaddr, | |
2290 | len)) | |
2291 | goto nack_inv_unlock; | |
2292 | ||
2293 | qp->r_state = e->opcode; | |
2294 | qp->r_nak_state = 0; | |
2295 | /* | |
2296 | * We need to increment the MSN here instead of when we | |
2297 | * finish sending the result since a duplicate request would | |
2298 | * increment it more than once. | |
2299 | */ | |
2300 | qp->r_msn++; | |
2301 | qp->r_psn += e->lpsn - e->psn + 1; | |
2302 | ||
2303 | qp->r_head_ack_queue = next; | |
2304 | ||
07b92370 KW |
2305 | /* |
2306 | * For all requests other than TID WRITE which are added to the ack | |
2307 | * queue, qpriv->r_tid_alloc follows qp->r_head_ack_queue. It is ok to | |
2308 | * do this because of interlocks between these and TID WRITE | |
2309 | * requests. The same change has also been made in hfi1_rc_rcv(). | |
2310 | */ | |
2311 | qpriv->r_tid_alloc = qp->r_head_ack_queue; | |
2312 | ||
d0d564a1 KW |
2313 | /* Schedule the send tasklet. */ |
2314 | qp->s_flags |= RVT_S_RESP_PENDING; | |
2315 | hfi1_schedule_send(qp); | |
2316 | ||
2317 | spin_unlock_irqrestore(&qp->s_lock, flags); | |
2318 | if (is_fecn) | |
2319 | goto send_ack; | |
2320 | return; | |
2321 | ||
2322 | nack_inv_unlock: | |
2323 | spin_unlock_irqrestore(&qp->s_lock, flags); | |
2324 | nack_inv: | |
2325 | rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); | |
2326 | qp->r_nak_state = nack_state; | |
2327 | qp->r_ack_psn = qp->r_psn; | |
2328 | /* Queue NAK for later */ | |
2329 | rc_defered_ack(rcd, qp); | |
2330 | return; | |
2331 | nack_acc: | |
2332 | spin_unlock_irqrestore(&qp->s_lock, flags); | |
2333 | rvt_rc_error(qp, IB_WC_LOC_PROT_ERR); | |
2334 | qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; | |
2335 | qp->r_ack_psn = qp->r_psn; | |
2336 | send_ack: | |
2337 | hfi1_send_rc_ack(packet, is_fecn); | |
2338 | } | |
1db21b50 KW |
2339 | |
2340 | u32 hfi1_build_tid_rdma_read_resp(struct rvt_qp *qp, struct rvt_ack_entry *e, | |
2341 | struct ib_other_headers *ohdr, u32 *bth0, | |
2342 | u32 *bth1, u32 *bth2, u32 *len, bool *last) | |
2343 | { | |
2344 | struct hfi1_ack_priv *epriv = e->priv; | |
2345 | struct tid_rdma_request *req = &epriv->tid_req; | |
2346 | struct hfi1_qp_priv *qpriv = qp->priv; | |
2347 | struct tid_rdma_flow *flow = &req->flows[req->clear_tail]; | |
2348 | u32 tidentry = flow->tid_entry[flow->tid_idx]; | |
2349 | u32 tidlen = EXP_TID_GET(tidentry, LEN) << PAGE_SHIFT; | |
2350 | struct tid_rdma_read_resp *resp = &ohdr->u.tid_rdma.r_rsp; | |
2351 | u32 next_offset, om = KDETH_OM_LARGE; | |
2352 | bool last_pkt; | |
2353 | u32 hdwords = 0; | |
2354 | struct tid_rdma_params *remote; | |
2355 | ||
2356 | *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset); | |
2357 | flow->sent += *len; | |
2358 | next_offset = flow->tid_offset + *len; | |
2359 | last_pkt = (flow->sent >= flow->length); | |
2360 | ||
3ce5daa2 KW |
2361 | trace_hfi1_tid_entry_build_read_resp(qp, flow->tid_idx, tidentry); |
2362 | trace_hfi1_tid_flow_build_read_resp(qp, req->clear_tail, flow); | |
2363 | ||
1db21b50 KW |
2364 | rcu_read_lock(); |
2365 | remote = rcu_dereference(qpriv->tid_rdma.remote); | |
2366 | if (!remote) { | |
2367 | rcu_read_unlock(); | |
2368 | goto done; | |
2369 | } | |
2370 | KDETH_RESET(resp->kdeth0, KVER, 0x1); | |
2371 | KDETH_SET(resp->kdeth0, SH, !last_pkt); | |
2372 | KDETH_SET(resp->kdeth0, INTR, !!(!last_pkt && remote->urg)); | |
2373 | KDETH_SET(resp->kdeth0, TIDCTRL, EXP_TID_GET(tidentry, CTRL)); | |
2374 | KDETH_SET(resp->kdeth0, TID, EXP_TID_GET(tidentry, IDX)); | |
2375 | KDETH_SET(resp->kdeth0, OM, om == KDETH_OM_LARGE); | |
2376 | KDETH_SET(resp->kdeth0, OFFSET, flow->tid_offset / om); | |
2377 | KDETH_RESET(resp->kdeth1, JKEY, remote->jkey); | |
2378 | resp->verbs_qp = cpu_to_be32(qp->remote_qpn); | |
2379 | rcu_read_unlock(); | |
2380 | ||
2381 | resp->aeth = rvt_compute_aeth(qp); | |
2382 | resp->verbs_psn = cpu_to_be32(mask_psn(flow->flow_state.ib_spsn + | |
2383 | flow->pkt)); | |
2384 | ||
2385 | *bth0 = TID_OP(READ_RESP) << 24; | |
2386 | *bth1 = flow->tid_qpn; | |
2387 | *bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) & | |
2388 | HFI1_KDETH_BTH_SEQ_MASK) | | |
2389 | (flow->flow_state.generation << | |
2390 | HFI1_KDETH_BTH_SEQ_SHIFT)); | |
2391 | *last = last_pkt; | |
2392 | if (last_pkt) | |
2393 | /* Advance to next flow */ | |
2394 | req->clear_tail = (req->clear_tail + 1) & | |
2395 | (MAX_FLOWS - 1); | |
2396 | ||
2397 | if (next_offset >= tidlen) { | |
2398 | flow->tid_offset = 0; | |
2399 | flow->tid_idx++; | |
2400 | } else { | |
2401 | flow->tid_offset = next_offset; | |
2402 | } | |
2403 | ||
2404 | hdwords = sizeof(ohdr->u.tid_rdma.r_rsp) / sizeof(u32); | |
2405 | ||
2406 | done: | |
2407 | return hdwords; | |
2408 | } | |
9905bf06 KW |
2409 | |
2410 | static inline struct tid_rdma_request * | |
2411 | find_tid_request(struct rvt_qp *qp, u32 psn, enum ib_wr_opcode opcode) | |
2412 | __must_hold(&qp->s_lock) | |
2413 | { | |
2414 | struct rvt_swqe *wqe; | |
2415 | struct tid_rdma_request *req = NULL; | |
2416 | u32 i, end; | |
2417 | ||
2418 | end = qp->s_cur + 1; | |
2419 | if (end == qp->s_size) | |
2420 | end = 0; | |
2421 | for (i = qp->s_acked; i != end;) { | |
2422 | wqe = rvt_get_swqe_ptr(qp, i); | |
2423 | if (cmp_psn(psn, wqe->psn) >= 0 && | |
2424 | cmp_psn(psn, wqe->lpsn) <= 0) { | |
2425 | if (wqe->wr.opcode == opcode) | |
2426 | req = wqe_to_tid_req(wqe); | |
2427 | break; | |
2428 | } | |
2429 | if (++i == qp->s_size) | |
2430 | i = 0; | |
2431 | } | |
2432 | ||
2433 | return req; | |
2434 | } | |
2435 | ||
2436 | void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet) | |
2437 | { | |
2438 | /* HANDLER FOR TID RDMA READ RESPONSE packet (Requestor side */ | |
2439 | ||
2440 | /* | |
2441 | * 1. Find matching SWQE | |
2442 | * 2. Check that the entire segment has been read. | |
2443 | * 3. Remove HFI1_S_WAIT_TID_RESP from s_flags. | |
2444 | * 4. Free the TID flow resources. | |
2445 | * 5. Kick the send engine (hfi1_schedule_send()) | |
2446 | */ | |
2447 | struct ib_other_headers *ohdr = packet->ohdr; | |
2448 | struct rvt_qp *qp = packet->qp; | |
2449 | struct hfi1_qp_priv *priv = qp->priv; | |
2450 | struct hfi1_ctxtdata *rcd = packet->rcd; | |
2451 | struct tid_rdma_request *req; | |
2452 | struct tid_rdma_flow *flow; | |
2453 | u32 opcode, aeth; | |
2454 | bool is_fecn; | |
2455 | unsigned long flags; | |
2456 | u32 kpsn, ipsn; | |
2457 | ||
3ce5daa2 | 2458 | trace_hfi1_sender_rcv_tid_read_resp(qp); |
9905bf06 KW |
2459 | is_fecn = process_ecn(qp, packet); |
2460 | kpsn = mask_psn(be32_to_cpu(ohdr->bth[2])); | |
2461 | aeth = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.aeth); | |
2462 | opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff; | |
2463 | ||
2464 | spin_lock_irqsave(&qp->s_lock, flags); | |
2465 | ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn)); | |
2466 | req = find_tid_request(qp, ipsn, IB_WR_TID_RDMA_READ); | |
2467 | if (unlikely(!req)) | |
2468 | goto ack_op_err; | |
2469 | ||
2470 | flow = &req->flows[req->clear_tail]; | |
2471 | /* When header suppression is disabled */ | |
2472 | if (cmp_psn(ipsn, flow->flow_state.ib_lpsn)) | |
2473 | goto ack_done; | |
2474 | req->ack_pending--; | |
2475 | priv->pending_tid_r_segs--; | |
2476 | qp->s_num_rd_atomic--; | |
2477 | if ((qp->s_flags & RVT_S_WAIT_FENCE) && | |
2478 | !qp->s_num_rd_atomic) { | |
2479 | qp->s_flags &= ~(RVT_S_WAIT_FENCE | | |
2480 | RVT_S_WAIT_ACK); | |
2481 | hfi1_schedule_send(qp); | |
2482 | } | |
2483 | if (qp->s_flags & RVT_S_WAIT_RDMAR) { | |
2484 | qp->s_flags &= ~(RVT_S_WAIT_RDMAR | RVT_S_WAIT_ACK); | |
2485 | hfi1_schedule_send(qp); | |
2486 | } | |
2487 | ||
3ce5daa2 KW |
2488 | trace_hfi1_ack(qp, ipsn); |
2489 | trace_hfi1_tid_req_rcv_read_resp(qp, 0, req->e.swqe->wr.opcode, | |
2490 | req->e.swqe->psn, req->e.swqe->lpsn, | |
2491 | req); | |
2492 | trace_hfi1_tid_flow_rcv_read_resp(qp, req->clear_tail, flow); | |
2493 | ||
9905bf06 KW |
2494 | /* Release the tid resources */ |
2495 | hfi1_kern_exp_rcv_clear(req); | |
2496 | ||
2497 | if (!do_rc_ack(qp, aeth, ipsn, opcode, 0, rcd)) | |
2498 | goto ack_done; | |
2499 | ||
2500 | /* If not done yet, build next read request */ | |
2501 | if (++req->comp_seg >= req->total_segs) { | |
2502 | priv->tid_r_comp++; | |
2503 | req->state = TID_REQUEST_COMPLETE; | |
2504 | } | |
2505 | ||
2506 | /* | |
2507 | * Clear the hw flow under two conditions: | |
2508 | * 1. This request is a sync point and it is complete; | |
2509 | * 2. Current request is completed and there are no more requests. | |
2510 | */ | |
2511 | if ((req->state == TID_REQUEST_SYNC && | |
2512 | req->comp_seg == req->cur_seg) || | |
2513 | priv->tid_r_comp == priv->tid_r_reqs) { | |
2514 | hfi1_kern_clear_hw_flow(priv->rcd, qp); | |
2515 | if (req->state == TID_REQUEST_SYNC) | |
2516 | req->state = TID_REQUEST_ACTIVE; | |
2517 | } | |
2518 | ||
2519 | hfi1_schedule_send(qp); | |
2520 | goto ack_done; | |
2521 | ||
2522 | ack_op_err: | |
2523 | /* | |
2524 | * The test indicates that the send engine has finished its cleanup | |
2525 | * after sending the request and it's now safe to put the QP into error | |
2526 | * state. However, if the wqe queue is empty (qp->s_acked == qp->s_tail | |
2527 | * == qp->s_head), it would be unsafe to complete the wqe pointed by | |
2528 | * qp->s_acked here. Putting the qp into error state will safely flush | |
2529 | * all remaining requests. | |
2530 | */ | |
2531 | if (qp->s_last == qp->s_acked) | |
2532 | rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); | |
2533 | ||
2534 | ack_done: | |
2535 | spin_unlock_irqrestore(&qp->s_lock, flags); | |
2536 | if (is_fecn) | |
2537 | hfi1_send_rc_ack(packet, is_fecn); | |
2538 | } | |
2539 | ||
2540 | void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp) | |
2541 | __must_hold(&qp->s_lock) | |
2542 | { | |
2543 | u32 n = qp->s_acked; | |
2544 | struct rvt_swqe *wqe; | |
2545 | struct tid_rdma_request *req; | |
2546 | struct hfi1_qp_priv *priv = qp->priv; | |
2547 | ||
2548 | lockdep_assert_held(&qp->s_lock); | |
2549 | /* Free any TID entries */ | |
2550 | while (n != qp->s_tail) { | |
2551 | wqe = rvt_get_swqe_ptr(qp, n); | |
2552 | if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) { | |
2553 | req = wqe_to_tid_req(wqe); | |
2554 | hfi1_kern_exp_rcv_clear_all(req); | |
2555 | } | |
2556 | ||
2557 | if (++n == qp->s_size) | |
2558 | n = 0; | |
2559 | } | |
2560 | /* Free flow */ | |
2561 | hfi1_kern_clear_hw_flow(priv->rcd, qp); | |
2562 | } | |
2563 | ||
2564 | static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd, | |
2565 | struct hfi1_packet *packet, u8 rcv_type, | |
2566 | u8 opcode) | |
2567 | { | |
2568 | struct rvt_qp *qp = packet->qp; | |
d72fe7d5 | 2569 | struct hfi1_qp_priv *qpriv = qp->priv; |
9905bf06 KW |
2570 | u32 ipsn; |
2571 | struct ib_other_headers *ohdr = packet->ohdr; | |
d72fe7d5 KW |
2572 | struct rvt_ack_entry *e; |
2573 | struct tid_rdma_request *req; | |
2574 | struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); | |
2575 | u32 i; | |
9905bf06 KW |
2576 | |
2577 | if (rcv_type >= RHF_RCV_TYPE_IB) | |
2578 | goto done; | |
2579 | ||
2580 | spin_lock(&qp->s_lock); | |
d72fe7d5 KW |
2581 | |
2582 | /* | |
2583 | * We've ran out of space in the eager buffer. | |
2584 | * Eagerly received KDETH packets which require space in the | |
2585 | * Eager buffer (packet that have payload) are TID RDMA WRITE | |
2586 | * response packets. In this case, we have to re-transmit the | |
2587 | * TID RDMA WRITE request. | |
2588 | */ | |
2589 | if (rcv_type == RHF_RCV_TYPE_EAGER) { | |
2590 | hfi1_restart_rc(qp, qp->s_last_psn + 1, 1); | |
2591 | hfi1_schedule_send(qp); | |
2592 | goto done_unlock; | |
2593 | } | |
2594 | ||
9905bf06 KW |
2595 | /* |
2596 | * For TID READ response, error out QP after freeing the tid | |
2597 | * resources. | |
2598 | */ | |
2599 | if (opcode == TID_OP(READ_RESP)) { | |
2600 | ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn)); | |
2601 | if (cmp_psn(ipsn, qp->s_last_psn) > 0 && | |
2602 | cmp_psn(ipsn, qp->s_psn) < 0) { | |
2603 | hfi1_kern_read_tid_flow_free(qp); | |
2604 | spin_unlock(&qp->s_lock); | |
2605 | rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); | |
2606 | goto done; | |
2607 | } | |
d72fe7d5 KW |
2608 | goto done_unlock; |
2609 | } | |
2610 | ||
2611 | /* | |
2612 | * Error out the qp for TID RDMA WRITE | |
2613 | */ | |
2614 | hfi1_kern_clear_hw_flow(qpriv->rcd, qp); | |
2615 | for (i = 0; i < rvt_max_atomic(rdi); i++) { | |
2616 | e = &qp->s_ack_queue[i]; | |
2617 | if (e->opcode == TID_OP(WRITE_REQ)) { | |
2618 | req = ack_to_tid_req(e); | |
2619 | hfi1_kern_exp_rcv_clear_all(req); | |
2620 | } | |
9905bf06 | 2621 | } |
d72fe7d5 KW |
2622 | spin_unlock(&qp->s_lock); |
2623 | rvt_rc_error(qp, IB_WC_LOC_LEN_ERR); | |
2624 | goto done; | |
9905bf06 | 2625 | |
d72fe7d5 | 2626 | done_unlock: |
9905bf06 KW |
2627 | spin_unlock(&qp->s_lock); |
2628 | done: | |
2629 | return true; | |
2630 | } | |
2631 | ||
2632 | static void restart_tid_rdma_read_req(struct hfi1_ctxtdata *rcd, | |
2633 | struct rvt_qp *qp, struct rvt_swqe *wqe) | |
2634 | { | |
2635 | struct tid_rdma_request *req; | |
2636 | struct tid_rdma_flow *flow; | |
2637 | ||
2638 | /* Start from the right segment */ | |
2639 | qp->r_flags |= RVT_R_RDMAR_SEQ; | |
2640 | req = wqe_to_tid_req(wqe); | |
2641 | flow = &req->flows[req->clear_tail]; | |
2642 | hfi1_restart_rc(qp, flow->flow_state.ib_spsn, 0); | |
2643 | if (list_empty(&qp->rspwait)) { | |
2644 | qp->r_flags |= RVT_R_RSP_SEND; | |
2645 | rvt_get_qp(qp); | |
2646 | list_add_tail(&qp->rspwait, &rcd->qp_wait_list); | |
2647 | } | |
2648 | } | |
2649 | ||
2650 | /* | |
2651 | * Handle the KDETH eflags for TID RDMA READ response. | |
2652 | * | |
2653 | * Return true if the last packet for a segment has been received and it is | |
2654 | * time to process the response normally; otherwise, return true. | |
2655 | * | |
2656 | * The caller must hold the packet->qp->r_lock and the rcu_read_lock. | |
2657 | */ | |
2658 | static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd, | |
2659 | struct hfi1_packet *packet, u8 rcv_type, | |
2660 | u8 rte, u32 psn, u32 ibpsn) | |
2661 | __must_hold(&packet->qp->r_lock) __must_hold(RCU) | |
2662 | { | |
2663 | struct hfi1_pportdata *ppd = rcd->ppd; | |
2664 | struct hfi1_devdata *dd = ppd->dd; | |
2665 | struct hfi1_ibport *ibp; | |
2666 | struct rvt_swqe *wqe; | |
2667 | struct tid_rdma_request *req; | |
2668 | struct tid_rdma_flow *flow; | |
2669 | u32 ack_psn; | |
2670 | struct rvt_qp *qp = packet->qp; | |
2671 | struct hfi1_qp_priv *priv = qp->priv; | |
2672 | bool ret = true; | |
2673 | int diff = 0; | |
2674 | u32 fpsn; | |
2675 | ||
2676 | lockdep_assert_held(&qp->r_lock); | |
2677 | /* If the psn is out of valid range, drop the packet */ | |
2678 | if (cmp_psn(ibpsn, qp->s_last_psn) < 0 || | |
2679 | cmp_psn(ibpsn, qp->s_psn) > 0) | |
2680 | return ret; | |
2681 | ||
2682 | spin_lock(&qp->s_lock); | |
2683 | /* | |
2684 | * Note that NAKs implicitly ACK outstanding SEND and RDMA write | |
2685 | * requests and implicitly NAK RDMA read and atomic requests issued | |
2686 | * before the NAK'ed request. | |
2687 | */ | |
2688 | ack_psn = ibpsn - 1; | |
2689 | wqe = rvt_get_swqe_ptr(qp, qp->s_acked); | |
2690 | ibp = to_iport(qp->ibqp.device, qp->port_num); | |
2691 | ||
2692 | /* Complete WQEs that the PSN finishes. */ | |
2693 | while ((int)delta_psn(ack_psn, wqe->lpsn) >= 0) { | |
2694 | /* | |
2695 | * If this request is a RDMA read or atomic, and the NACK is | |
2696 | * for a later operation, this NACK NAKs the RDMA read or | |
2697 | * atomic. | |
2698 | */ | |
2699 | if (wqe->wr.opcode == IB_WR_RDMA_READ || | |
2700 | wqe->wr.opcode == IB_WR_TID_RDMA_READ || | |
2701 | wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || | |
2702 | wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { | |
2703 | /* Retry this request. */ | |
2704 | if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) { | |
2705 | qp->r_flags |= RVT_R_RDMAR_SEQ; | |
2706 | if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) { | |
2707 | restart_tid_rdma_read_req(rcd, qp, | |
2708 | wqe); | |
2709 | } else { | |
2710 | hfi1_restart_rc(qp, qp->s_last_psn + 1, | |
2711 | 0); | |
2712 | if (list_empty(&qp->rspwait)) { | |
2713 | qp->r_flags |= RVT_R_RSP_SEND; | |
2714 | rvt_get_qp(qp); | |
2715 | list_add_tail(/* wait */ | |
2716 | &qp->rspwait, | |
2717 | &rcd->qp_wait_list); | |
2718 | } | |
2719 | } | |
2720 | } | |
2721 | /* | |
2722 | * No need to process the NAK since we are | |
2723 | * restarting an earlier request. | |
2724 | */ | |
2725 | break; | |
2726 | } | |
2727 | ||
2728 | wqe = do_rc_completion(qp, wqe, ibp); | |
2729 | if (qp->s_acked == qp->s_tail) | |
2730 | break; | |
2731 | } | |
2732 | ||
2733 | /* Handle the eflags for the request */ | |
2734 | if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) | |
2735 | goto s_unlock; | |
2736 | ||
2737 | req = wqe_to_tid_req(wqe); | |
2738 | switch (rcv_type) { | |
2739 | case RHF_RCV_TYPE_EXPECTED: | |
2740 | switch (rte) { | |
2741 | case RHF_RTE_EXPECTED_FLOW_SEQ_ERR: | |
2742 | /* | |
2743 | * On the first occurrence of a Flow Sequence error, | |
2744 | * the flag TID_FLOW_SW_PSN is set. | |
2745 | * | |
2746 | * After that, the flow is *not* reprogrammed and the | |
2747 | * protocol falls back to SW PSN checking. This is done | |
2748 | * to prevent continuous Flow Sequence errors for any | |
2749 | * packets that could be still in the fabric. | |
2750 | */ | |
2751 | flow = find_flow(req, psn, NULL); | |
2752 | if (!flow) { | |
2753 | /* | |
2754 | * We can't find the IB PSN matching the | |
2755 | * received KDETH PSN. The only thing we can | |
2756 | * do at this point is report the error to | |
2757 | * the QP. | |
2758 | */ | |
2759 | hfi1_kern_read_tid_flow_free(qp); | |
2760 | spin_unlock(&qp->s_lock); | |
2761 | rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); | |
2762 | return ret; | |
2763 | } | |
2764 | if (priv->flow_state.flags & TID_FLOW_SW_PSN) { | |
2765 | diff = cmp_psn(psn, | |
2766 | priv->flow_state.r_next_psn); | |
2767 | if (diff > 0) { | |
2768 | if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) | |
2769 | restart_tid_rdma_read_req(rcd, | |
2770 | qp, | |
2771 | wqe); | |
2772 | ||
2773 | /* Drop the packet.*/ | |
2774 | goto s_unlock; | |
2775 | } else if (diff < 0) { | |
2776 | /* | |
2777 | * If a response packet for a restarted | |
2778 | * request has come back, reset the | |
2779 | * restart flag. | |
2780 | */ | |
2781 | if (qp->r_flags & RVT_R_RDMAR_SEQ) | |
2782 | qp->r_flags &= | |
2783 | ~RVT_R_RDMAR_SEQ; | |
2784 | ||
2785 | /* Drop the packet.*/ | |
2786 | goto s_unlock; | |
2787 | } | |
2788 | ||
2789 | /* | |
2790 | * If SW PSN verification is successful and | |
2791 | * this is the last packet in the segment, tell | |
2792 | * the caller to process it as a normal packet. | |
2793 | */ | |
2794 | fpsn = full_flow_psn(flow, | |
2795 | flow->flow_state.lpsn); | |
2796 | if (cmp_psn(fpsn, psn) == 0) { | |
2797 | ret = false; | |
2798 | if (qp->r_flags & RVT_R_RDMAR_SEQ) | |
2799 | qp->r_flags &= | |
2800 | ~RVT_R_RDMAR_SEQ; | |
2801 | } | |
2802 | priv->flow_state.r_next_psn++; | |
2803 | } else { | |
2804 | u64 reg; | |
2805 | u32 last_psn; | |
2806 | ||
2807 | /* | |
2808 | * The only sane way to get the amount of | |
2809 | * progress is to read the HW flow state. | |
2810 | */ | |
2811 | reg = read_uctxt_csr(dd, rcd->ctxt, | |
2812 | RCV_TID_FLOW_TABLE + | |
2813 | (8 * flow->idx)); | |
2814 | last_psn = mask_psn(reg); | |
2815 | ||
2816 | priv->flow_state.r_next_psn = last_psn; | |
2817 | priv->flow_state.flags |= TID_FLOW_SW_PSN; | |
2818 | /* | |
2819 | * If no request has been restarted yet, | |
2820 | * restart the current one. | |
2821 | */ | |
2822 | if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) | |
2823 | restart_tid_rdma_read_req(rcd, qp, | |
2824 | wqe); | |
2825 | } | |
2826 | ||
2827 | break; | |
2828 | ||
2829 | case RHF_RTE_EXPECTED_FLOW_GEN_ERR: | |
2830 | /* | |
2831 | * Since the TID flow is able to ride through | |
2832 | * generation mismatch, drop this stale packet. | |
2833 | */ | |
2834 | break; | |
2835 | ||
2836 | default: | |
2837 | break; | |
2838 | } | |
2839 | break; | |
2840 | ||
2841 | case RHF_RCV_TYPE_ERROR: | |
2842 | switch (rte) { | |
2843 | case RHF_RTE_ERROR_OP_CODE_ERR: | |
2844 | case RHF_RTE_ERROR_KHDR_MIN_LEN_ERR: | |
2845 | case RHF_RTE_ERROR_KHDR_HCRC_ERR: | |
2846 | case RHF_RTE_ERROR_KHDR_KVER_ERR: | |
2847 | case RHF_RTE_ERROR_CONTEXT_ERR: | |
2848 | case RHF_RTE_ERROR_KHDR_TID_ERR: | |
2849 | default: | |
2850 | break; | |
2851 | } | |
2852 | default: | |
2853 | break; | |
2854 | } | |
2855 | s_unlock: | |
2856 | spin_unlock(&qp->s_lock); | |
2857 | return ret; | |
2858 | } | |
2859 | ||
2860 | bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd, | |
2861 | struct hfi1_pportdata *ppd, | |
2862 | struct hfi1_packet *packet) | |
2863 | { | |
2864 | struct hfi1_ibport *ibp = &ppd->ibport_data; | |
2865 | struct hfi1_devdata *dd = ppd->dd; | |
2866 | struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; | |
2867 | u8 rcv_type = rhf_rcv_type(packet->rhf); | |
2868 | u8 rte = rhf_rcv_type_err(packet->rhf); | |
2869 | struct ib_header *hdr = packet->hdr; | |
2870 | struct ib_other_headers *ohdr = NULL; | |
2871 | int lnh = be16_to_cpu(hdr->lrh[0]) & 3; | |
2872 | u16 lid = be16_to_cpu(hdr->lrh[1]); | |
2873 | u8 opcode; | |
2874 | u32 qp_num, psn, ibpsn; | |
2875 | struct rvt_qp *qp; | |
d72fe7d5 | 2876 | struct hfi1_qp_priv *qpriv; |
9905bf06 KW |
2877 | unsigned long flags; |
2878 | bool ret = true; | |
d72fe7d5 KW |
2879 | struct rvt_ack_entry *e; |
2880 | struct tid_rdma_request *req; | |
2881 | struct tid_rdma_flow *flow; | |
9905bf06 | 2882 | |
3ce5daa2 KW |
2883 | trace_hfi1_msg_handle_kdeth_eflags(NULL, "Kdeth error: rhf ", |
2884 | packet->rhf); | |
9905bf06 KW |
2885 | if (packet->rhf & (RHF_VCRC_ERR | RHF_ICRC_ERR)) |
2886 | return ret; | |
2887 | ||
2888 | packet->ohdr = &hdr->u.oth; | |
2889 | ohdr = packet->ohdr; | |
2890 | trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf))); | |
2891 | ||
2892 | /* Get the destination QP number. */ | |
2893 | qp_num = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_qp) & | |
2894 | RVT_QPN_MASK; | |
2895 | if (lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) | |
2896 | goto drop; | |
2897 | ||
2898 | psn = mask_psn(be32_to_cpu(ohdr->bth[2])); | |
2899 | opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff; | |
2900 | ||
2901 | rcu_read_lock(); | |
2902 | qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); | |
2903 | if (!qp) | |
2904 | goto rcu_unlock; | |
2905 | ||
2906 | packet->qp = qp; | |
2907 | ||
2908 | /* Check for valid receive state. */ | |
2909 | spin_lock_irqsave(&qp->r_lock, flags); | |
2910 | if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { | |
2911 | ibp->rvp.n_pkt_drops++; | |
2912 | goto r_unlock; | |
2913 | } | |
2914 | ||
2915 | if (packet->rhf & RHF_TID_ERR) { | |
2916 | /* For TIDERR and RC QPs preemptively schedule a NAK */ | |
2917 | u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */ | |
2918 | ||
2919 | /* Sanity check packet */ | |
2920 | if (tlen < 24) | |
2921 | goto r_unlock; | |
2922 | ||
2923 | /* | |
2924 | * Check for GRH. We should never get packets with GRH in this | |
2925 | * path. | |
2926 | */ | |
2927 | if (lnh == HFI1_LRH_GRH) | |
2928 | goto r_unlock; | |
2929 | ||
2930 | if (tid_rdma_tid_err(rcd, packet, rcv_type, opcode)) | |
2931 | goto r_unlock; | |
2932 | } | |
2933 | ||
2934 | /* handle TID RDMA READ */ | |
2935 | if (opcode == TID_OP(READ_RESP)) { | |
2936 | ibpsn = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn); | |
2937 | ibpsn = mask_psn(ibpsn); | |
2938 | ret = handle_read_kdeth_eflags(rcd, packet, rcv_type, rte, psn, | |
2939 | ibpsn); | |
d72fe7d5 KW |
2940 | goto r_unlock; |
2941 | } | |
2942 | ||
2943 | /* | |
2944 | * qp->s_tail_ack_queue points to the rvt_ack_entry currently being | |
2945 | * processed. These a completed sequentially so we can be sure that | |
2946 | * the pointer will not change until the entire request has completed. | |
2947 | */ | |
2948 | spin_lock(&qp->s_lock); | |
2949 | qpriv = qp->priv; | |
2950 | e = &qp->s_ack_queue[qpriv->r_tid_tail]; | |
2951 | req = ack_to_tid_req(e); | |
2952 | flow = &req->flows[req->clear_tail]; | |
2953 | ||
2954 | switch (rcv_type) { | |
2955 | case RHF_RCV_TYPE_EXPECTED: | |
2956 | switch (rte) { | |
2957 | case RHF_RTE_EXPECTED_FLOW_SEQ_ERR: | |
2958 | if (!(qpriv->s_flags & HFI1_R_TID_SW_PSN)) { | |
2959 | u64 reg; | |
2960 | ||
2961 | qpriv->s_flags |= HFI1_R_TID_SW_PSN; | |
2962 | /* | |
2963 | * The only sane way to get the amount of | |
2964 | * progress is to read the HW flow state. | |
2965 | */ | |
2966 | reg = read_uctxt_csr(dd, rcd->ctxt, | |
2967 | RCV_TID_FLOW_TABLE + | |
2968 | (8 * flow->idx)); | |
2969 | flow->flow_state.r_next_psn = mask_psn(reg); | |
2970 | qpriv->r_next_psn_kdeth = | |
2971 | flow->flow_state.r_next_psn; | |
2972 | goto nak_psn; | |
2973 | } else { | |
2974 | /* | |
2975 | * If the received PSN does not match the next | |
2976 | * expected PSN, NAK the packet. | |
2977 | * However, only do that if we know that the a | |
2978 | * NAK has already been sent. Otherwise, this | |
2979 | * mismatch could be due to packets that were | |
2980 | * already in flight. | |
2981 | */ | |
2982 | if (psn != flow->flow_state.r_next_psn) { | |
2983 | psn = flow->flow_state.r_next_psn; | |
2984 | goto nak_psn; | |
2985 | } | |
2986 | ||
2987 | qpriv->s_nak_state = 0; | |
2988 | /* | |
2989 | * If SW PSN verification is successful and this | |
2990 | * is the last packet in the segment, tell the | |
2991 | * caller to process it as a normal packet. | |
2992 | */ | |
2993 | if (psn == full_flow_psn(flow, | |
2994 | flow->flow_state.lpsn)) | |
2995 | ret = false; | |
2996 | qpriv->r_next_psn_kdeth = | |
2997 | ++flow->flow_state.r_next_psn; | |
2998 | } | |
2999 | break; | |
3000 | ||
3001 | case RHF_RTE_EXPECTED_FLOW_GEN_ERR: | |
3002 | goto nak_psn; | |
3003 | ||
3004 | default: | |
3005 | break; | |
3006 | } | |
3007 | break; | |
3008 | ||
3009 | case RHF_RCV_TYPE_ERROR: | |
3010 | switch (rte) { | |
3011 | case RHF_RTE_ERROR_OP_CODE_ERR: | |
3012 | case RHF_RTE_ERROR_KHDR_MIN_LEN_ERR: | |
3013 | case RHF_RTE_ERROR_KHDR_HCRC_ERR: | |
3014 | case RHF_RTE_ERROR_KHDR_KVER_ERR: | |
3015 | case RHF_RTE_ERROR_CONTEXT_ERR: | |
3016 | case RHF_RTE_ERROR_KHDR_TID_ERR: | |
3017 | default: | |
3018 | break; | |
3019 | } | |
3020 | default: | |
3021 | break; | |
9905bf06 KW |
3022 | } |
3023 | ||
d72fe7d5 KW |
3024 | unlock: |
3025 | spin_unlock(&qp->s_lock); | |
9905bf06 KW |
3026 | r_unlock: |
3027 | spin_unlock_irqrestore(&qp->r_lock, flags); | |
3028 | rcu_unlock: | |
3029 | rcu_read_unlock(); | |
3030 | drop: | |
3031 | return ret; | |
d72fe7d5 KW |
3032 | nak_psn: |
3033 | ibp->rvp.n_rc_seqnak++; | |
3034 | if (!qpriv->s_nak_state) { | |
3035 | qpriv->s_nak_state = IB_NAK_PSN_ERROR; | |
3036 | /* We are NAK'ing the next expected PSN */ | |
3037 | qpriv->s_nak_psn = mask_psn(flow->flow_state.r_next_psn); | |
3038 | qpriv->s_flags |= RVT_S_ACK_PENDING; | |
3039 | if (qpriv->r_tid_ack == HFI1_QP_WQE_INVALID) | |
3040 | qpriv->r_tid_ack = qpriv->r_tid_tail; | |
3041 | } | |
3042 | goto unlock; | |
9905bf06 | 3043 | } |
b126078e KW |
3044 | |
3045 | /* | |
3046 | * "Rewind" the TID request information. | |
3047 | * This means that we reset the state back to ACTIVE, | |
3048 | * find the proper flow, set the flow index to that flow, | |
3049 | * and reset the flow information. | |
3050 | */ | |
3051 | void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe, | |
3052 | u32 *bth2) | |
3053 | { | |
3054 | struct tid_rdma_request *req = wqe_to_tid_req(wqe); | |
3055 | struct tid_rdma_flow *flow; | |
3056 | int diff; | |
3057 | u32 tididx = 0; | |
3058 | u16 fidx; | |
3059 | ||
3060 | if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) { | |
3061 | *bth2 = mask_psn(qp->s_psn); | |
3062 | flow = find_flow_ib(req, *bth2, &fidx); | |
3ce5daa2 KW |
3063 | if (!flow) { |
3064 | trace_hfi1_msg_tid_restart_req(/* msg */ | |
3065 | qp, "!!!!!! Could not find flow to restart: bth2 ", | |
3066 | (u64)*bth2); | |
3067 | trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode, | |
3068 | wqe->psn, wqe->lpsn, | |
3069 | req); | |
b126078e | 3070 | return; |
3ce5daa2 | 3071 | } |
b126078e KW |
3072 | } else { |
3073 | return; | |
3074 | } | |
3075 | ||
3ce5daa2 | 3076 | trace_hfi1_tid_flow_restart_req(qp, fidx, flow); |
b126078e KW |
3077 | diff = delta_psn(*bth2, flow->flow_state.ib_spsn); |
3078 | ||
3079 | flow->sent = 0; | |
3080 | flow->pkt = 0; | |
3081 | flow->tid_idx = 0; | |
3082 | flow->tid_offset = 0; | |
3083 | if (diff) { | |
3084 | for (tididx = 0; tididx < flow->tidcnt; tididx++) { | |
3085 | u32 tidentry = flow->tid_entry[tididx], tidlen, | |
3086 | tidnpkts, npkts; | |
3087 | ||
3088 | flow->tid_offset = 0; | |
3089 | tidlen = EXP_TID_GET(tidentry, LEN) * PAGE_SIZE; | |
3090 | tidnpkts = rvt_div_round_up_mtu(qp, tidlen); | |
3091 | npkts = min_t(u32, diff, tidnpkts); | |
3092 | flow->pkt += npkts; | |
3093 | flow->sent += (npkts == tidnpkts ? tidlen : | |
3094 | npkts * qp->pmtu); | |
3095 | flow->tid_offset += npkts * qp->pmtu; | |
3096 | diff -= npkts; | |
3097 | if (!diff) | |
3098 | break; | |
3099 | } | |
3100 | } | |
3101 | ||
3102 | if (flow->tid_offset == | |
3103 | EXP_TID_GET(flow->tid_entry[tididx], LEN) * PAGE_SIZE) { | |
3104 | tididx++; | |
3105 | flow->tid_offset = 0; | |
3106 | } | |
3107 | flow->tid_idx = tididx; | |
3108 | /* Move flow_idx to correct index */ | |
3109 | req->flow_idx = fidx; | |
3110 | ||
3ce5daa2 KW |
3111 | trace_hfi1_tid_flow_restart_req(qp, fidx, flow); |
3112 | trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode, wqe->psn, | |
3113 | wqe->lpsn, req); | |
b126078e KW |
3114 | req->state = TID_REQUEST_ACTIVE; |
3115 | } | |
24b11923 KW |
3116 | |
3117 | void hfi1_qp_kern_exp_rcv_clear_all(struct rvt_qp *qp) | |
3118 | { | |
3119 | int i, ret; | |
3120 | struct hfi1_qp_priv *qpriv = qp->priv; | |
3121 | struct tid_flow_state *fs; | |
3122 | ||
3123 | if (qp->ibqp.qp_type != IB_QPT_RC || !HFI1_CAP_IS_KSET(TID_RDMA)) | |
3124 | return; | |
3125 | ||
3126 | /* | |
3127 | * First, clear the flow to help prevent any delayed packets from | |
3128 | * being delivered. | |
3129 | */ | |
3130 | fs = &qpriv->flow_state; | |
3131 | if (fs->index != RXE_NUM_TID_FLOWS) | |
3132 | hfi1_kern_clear_hw_flow(qpriv->rcd, qp); | |
3133 | ||
3134 | for (i = qp->s_acked; i != qp->s_head;) { | |
3135 | struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i); | |
3136 | ||
3137 | if (++i == qp->s_size) | |
3138 | i = 0; | |
3139 | /* Free only locally allocated TID entries */ | |
3140 | if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) | |
3141 | continue; | |
3142 | do { | |
3143 | struct hfi1_swqe_priv *priv = wqe->priv; | |
3144 | ||
3145 | ret = hfi1_kern_exp_rcv_clear(&priv->tid_req); | |
3146 | } while (!ret); | |
3147 | } | |
3148 | } | |
a0b34f75 KW |
3149 | |
3150 | bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe) | |
3151 | { | |
3152 | struct rvt_swqe *prev; | |
3153 | struct hfi1_qp_priv *priv = qp->priv; | |
3154 | u32 s_prev; | |
3155 | ||
3156 | s_prev = (qp->s_cur == 0 ? qp->s_size : qp->s_cur) - 1; | |
3157 | prev = rvt_get_swqe_ptr(qp, s_prev); | |
3158 | ||
3159 | switch (wqe->wr.opcode) { | |
3160 | case IB_WR_SEND: | |
3161 | case IB_WR_SEND_WITH_IMM: | |
3162 | case IB_WR_SEND_WITH_INV: | |
3163 | case IB_WR_ATOMIC_CMP_AND_SWP: | |
3164 | case IB_WR_ATOMIC_FETCH_AND_ADD: | |
3165 | case IB_WR_RDMA_WRITE: | |
3166 | case IB_WR_RDMA_READ: | |
3167 | break; | |
3168 | case IB_WR_TID_RDMA_READ: | |
3169 | switch (prev->wr.opcode) { | |
3170 | case IB_WR_RDMA_READ: | |
3171 | if (qp->s_acked != qp->s_cur) | |
3172 | goto interlock; | |
3173 | break; | |
3174 | default: | |
3175 | break; | |
3176 | } | |
3177 | default: | |
3178 | break; | |
3179 | } | |
3180 | return false; | |
3181 | ||
3182 | interlock: | |
3183 | priv->s_flags |= HFI1_S_TID_WAIT_INTERLCK; | |
3184 | return true; | |
3185 | } | |
f1ab4efa KW |
3186 | |
3187 | /* Does @sge meet the alignment requirements for tid rdma? */ | |
3ce5daa2 KW |
3188 | static inline bool hfi1_check_sge_align(struct rvt_qp *qp, |
3189 | struct rvt_sge *sge, int num_sge) | |
f1ab4efa KW |
3190 | { |
3191 | int i; | |
3192 | ||
3ce5daa2 KW |
3193 | for (i = 0; i < num_sge; i++, sge++) { |
3194 | trace_hfi1_sge_check_align(qp, i, sge); | |
f1ab4efa KW |
3195 | if ((u64)sge->vaddr & ~PAGE_MASK || |
3196 | sge->sge_length & ~PAGE_MASK) | |
3197 | return false; | |
3ce5daa2 | 3198 | } |
f1ab4efa KW |
3199 | return true; |
3200 | } | |
3201 | ||
3202 | void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe) | |
3203 | { | |
3204 | struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv; | |
3205 | struct hfi1_swqe_priv *priv = wqe->priv; | |
3206 | struct tid_rdma_params *remote; | |
3207 | enum ib_wr_opcode new_opcode; | |
3208 | bool do_tid_rdma = false; | |
3209 | struct hfi1_pportdata *ppd = qpriv->rcd->ppd; | |
3210 | ||
3211 | if ((rdma_ah_get_dlid(&qp->remote_ah_attr) & ~((1 << ppd->lmc) - 1)) == | |
3212 | ppd->lid) | |
3213 | return; | |
3214 | if (qpriv->hdr_type != HFI1_PKT_TYPE_9B) | |
3215 | return; | |
3216 | ||
3217 | rcu_read_lock(); | |
3218 | remote = rcu_dereference(qpriv->tid_rdma.remote); | |
3219 | /* | |
3220 | * If TID RDMA is disabled by the negotiation, don't | |
3221 | * use it. | |
3222 | */ | |
3223 | if (!remote) | |
3224 | goto exit; | |
3225 | ||
3226 | if (wqe->wr.opcode == IB_WR_RDMA_READ) { | |
3ce5daa2 KW |
3227 | if (hfi1_check_sge_align(qp, &wqe->sg_list[0], |
3228 | wqe->wr.num_sge)) { | |
f1ab4efa KW |
3229 | new_opcode = IB_WR_TID_RDMA_READ; |
3230 | do_tid_rdma = true; | |
3231 | } | |
3232 | } | |
3233 | ||
3234 | if (do_tid_rdma) { | |
3235 | if (hfi1_kern_exp_rcv_alloc_flows(&priv->tid_req, GFP_ATOMIC)) | |
3236 | goto exit; | |
3237 | wqe->wr.opcode = new_opcode; | |
3238 | priv->tid_req.seg_len = | |
3239 | min_t(u32, remote->max_len, wqe->length); | |
3240 | priv->tid_req.total_segs = | |
3241 | DIV_ROUND_UP(wqe->length, priv->tid_req.seg_len); | |
3242 | /* Compute the last PSN of the request */ | |
3243 | wqe->lpsn = wqe->psn; | |
3244 | if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) { | |
3245 | priv->tid_req.n_flows = remote->max_read; | |
3246 | qpriv->tid_r_reqs++; | |
3247 | wqe->lpsn += rvt_div_round_up_mtu(qp, wqe->length) - 1; | |
3248 | } | |
3249 | ||
3250 | priv->tid_req.cur_seg = 0; | |
3251 | priv->tid_req.comp_seg = 0; | |
3252 | priv->tid_req.ack_seg = 0; | |
3253 | priv->tid_req.state = TID_REQUEST_INACTIVE; | |
3ce5daa2 KW |
3254 | trace_hfi1_tid_req_setup_tid_wqe(qp, 1, wqe->wr.opcode, |
3255 | wqe->psn, wqe->lpsn, | |
3256 | &priv->tid_req); | |
f1ab4efa KW |
3257 | } |
3258 | exit: | |
3259 | rcu_read_unlock(); | |
3260 | } | |
c098bbb0 KW |
3261 | |
3262 | /* TID RDMA WRITE functions */ | |
3263 | ||
3264 | u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe, | |
3265 | struct ib_other_headers *ohdr, | |
3266 | u32 *bth1, u32 *bth2, u32 *len) | |
3267 | { | |
3268 | struct hfi1_qp_priv *qpriv = qp->priv; | |
3269 | struct tid_rdma_request *req = wqe_to_tid_req(wqe); | |
3270 | struct tid_rdma_params *remote; | |
3271 | ||
3272 | rcu_read_lock(); | |
3273 | remote = rcu_dereference(qpriv->tid_rdma.remote); | |
3274 | /* | |
3275 | * Set the number of flow to be used based on negotiated | |
3276 | * parameters. | |
3277 | */ | |
3278 | req->n_flows = remote->max_write; | |
3279 | req->state = TID_REQUEST_ACTIVE; | |
3280 | ||
3281 | KDETH_RESET(ohdr->u.tid_rdma.w_req.kdeth0, KVER, 0x1); | |
3282 | KDETH_RESET(ohdr->u.tid_rdma.w_req.kdeth1, JKEY, remote->jkey); | |
3283 | ohdr->u.tid_rdma.w_req.reth.vaddr = | |
3284 | cpu_to_be64(wqe->rdma_wr.remote_addr + (wqe->length - *len)); | |
3285 | ohdr->u.tid_rdma.w_req.reth.rkey = | |
3286 | cpu_to_be32(wqe->rdma_wr.rkey); | |
3287 | ohdr->u.tid_rdma.w_req.reth.length = cpu_to_be32(*len); | |
3288 | ohdr->u.tid_rdma.w_req.verbs_qp = cpu_to_be32(qp->remote_qpn); | |
3289 | *bth1 &= ~RVT_QPN_MASK; | |
3290 | *bth1 |= remote->qp; | |
3291 | qp->s_state = TID_OP(WRITE_REQ); | |
3292 | qp->s_flags |= HFI1_S_WAIT_TID_RESP; | |
3293 | *bth2 |= IB_BTH_REQ_ACK; | |
3294 | *len = 0; | |
3295 | ||
3296 | rcu_read_unlock(); | |
3297 | return sizeof(ohdr->u.tid_rdma.w_req) / sizeof(u32); | |
3298 | } | |
07b92370 KW |
3299 | |
3300 | void hfi1_compute_tid_rdma_flow_wt(void) | |
3301 | { | |
3302 | /* | |
3303 | * Heuristic for computing the RNR timeout when waiting on the flow | |
3304 | * queue. Rather than a computationaly expensive exact estimate of when | |
3305 | * a flow will be available, we assume that if a QP is at position N in | |
3306 | * the flow queue it has to wait approximately (N + 1) * (number of | |
3307 | * segments between two sync points), assuming PMTU of 4K. The rationale | |
3308 | * for this is that flows are released and recycled at each sync point. | |
3309 | */ | |
3310 | tid_rdma_flow_wt = MAX_TID_FLOW_PSN * enum_to_mtu(OPA_MTU_4096) / | |
3311 | TID_RDMA_MAX_SEGMENT_SIZE; | |
3312 | } | |
3313 | ||
3314 | static u32 position_in_queue(struct hfi1_qp_priv *qpriv, | |
3315 | struct tid_queue *queue) | |
3316 | { | |
3317 | return qpriv->tid_enqueue - queue->dequeue; | |
3318 | } | |
3319 | ||
3320 | /* | |
3321 | * @qp: points to rvt_qp context. | |
3322 | * @to_seg: desired RNR timeout in segments. | |
3323 | * Return: index of the next highest timeout in the ib_hfi1_rnr_table[] | |
3324 | */ | |
3325 | static u32 hfi1_compute_tid_rnr_timeout(struct rvt_qp *qp, u32 to_seg) | |
3326 | { | |
3327 | struct hfi1_qp_priv *qpriv = qp->priv; | |
3328 | u64 timeout; | |
3329 | u32 bytes_per_us; | |
3330 | u8 i; | |
3331 | ||
3332 | bytes_per_us = active_egress_rate(qpriv->rcd->ppd) / 8; | |
3333 | timeout = (to_seg * TID_RDMA_MAX_SEGMENT_SIZE) / bytes_per_us; | |
3334 | /* | |
3335 | * Find the next highest value in the RNR table to the required | |
3336 | * timeout. This gives the responder some padding. | |
3337 | */ | |
3338 | for (i = 1; i <= IB_AETH_CREDIT_MASK; i++) | |
3339 | if (rvt_rnr_tbl_to_usec(i) >= timeout) | |
3340 | return i; | |
3341 | return 0; | |
3342 | } | |
3343 | ||
3344 | /** | |
3345 | * Central place for resource allocation at TID write responder, | |
3346 | * is called from write_req and write_data interrupt handlers as | |
3347 | * well as the send thread when a queued QP is scheduled for | |
3348 | * resource allocation. | |
3349 | * | |
3350 | * Iterates over (a) segments of a request and then (b) queued requests | |
3351 | * themselves to allocate resources for up to local->max_write | |
3352 | * segments across multiple requests. Stop allocating when we | |
3353 | * hit a sync point, resume allocating after data packets at | |
3354 | * sync point have been received. | |
3355 | * | |
3356 | * Resource allocation and sending of responses is decoupled. The | |
3357 | * request/segment which are being allocated and sent are as follows. | |
3358 | * Resources are allocated for: | |
3359 | * [request: qpriv->r_tid_alloc, segment: req->alloc_seg] | |
3360 | * The send thread sends: | |
3361 | * [request: qp->s_tail_ack_queue, segment:req->cur_seg] | |
3362 | */ | |
3363 | static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx) | |
3364 | { | |
3365 | struct tid_rdma_request *req; | |
3366 | struct hfi1_qp_priv *qpriv = qp->priv; | |
3367 | struct hfi1_ctxtdata *rcd = qpriv->rcd; | |
3368 | struct tid_rdma_params *local = &qpriv->tid_rdma.local; | |
3369 | struct rvt_ack_entry *e; | |
3370 | u32 npkts, to_seg; | |
3371 | bool last; | |
3372 | int ret = 0; | |
3373 | ||
3374 | lockdep_assert_held(&qp->s_lock); | |
3375 | ||
3376 | while (1) { | |
3377 | /* | |
3378 | * Don't allocate more segments if a RNR NAK has already been | |
3379 | * scheduled to avoid messing up qp->r_psn: the RNR NAK will | |
3380 | * be sent only when all allocated segments have been sent. | |
3381 | * However, if more segments are allocated before that, TID RDMA | |
3382 | * WRITE RESP packets will be sent out for these new segments | |
3383 | * before the RNR NAK packet. When the requester receives the | |
3384 | * RNR NAK packet, it will restart with qp->s_last_psn + 1, | |
3385 | * which does not match qp->r_psn and will be dropped. | |
3386 | * Consequently, the requester will exhaust its retries and | |
3387 | * put the qp into error state. | |
3388 | */ | |
3389 | if (qpriv->rnr_nak_state == TID_RNR_NAK_SEND) | |
3390 | break; | |
3391 | ||
3392 | /* No requests left to process */ | |
3393 | if (qpriv->r_tid_alloc == qpriv->r_tid_head) { | |
3394 | /* If all data has been received, clear the flow */ | |
3395 | if (qpriv->flow_state.index < RXE_NUM_TID_FLOWS && | |
3396 | !qpriv->alloc_w_segs) | |
3397 | hfi1_kern_clear_hw_flow(rcd, qp); | |
3398 | break; | |
3399 | } | |
3400 | ||
3401 | e = &qp->s_ack_queue[qpriv->r_tid_alloc]; | |
3402 | if (e->opcode != TID_OP(WRITE_REQ)) | |
3403 | goto next_req; | |
3404 | req = ack_to_tid_req(e); | |
3405 | /* Finished allocating for all segments of this request */ | |
3406 | if (req->alloc_seg >= req->total_segs) | |
3407 | goto next_req; | |
3408 | ||
3409 | /* Can allocate only a maximum of local->max_write for a QP */ | |
3410 | if (qpriv->alloc_w_segs >= local->max_write) | |
3411 | break; | |
3412 | ||
3413 | /* Don't allocate at a sync point with data packets pending */ | |
3414 | if (qpriv->sync_pt && qpriv->alloc_w_segs) | |
3415 | break; | |
3416 | ||
3417 | /* All data received at the sync point, continue */ | |
3418 | if (qpriv->sync_pt && !qpriv->alloc_w_segs) { | |
3419 | hfi1_kern_clear_hw_flow(rcd, qp); | |
3420 | qpriv->sync_pt = false; | |
3421 | if (qpriv->s_flags & HFI1_R_TID_SW_PSN) | |
3422 | qpriv->s_flags &= ~HFI1_R_TID_SW_PSN; | |
3423 | } | |
3424 | ||
3425 | /* Allocate flow if we don't have one */ | |
3426 | if (qpriv->flow_state.index >= RXE_NUM_TID_FLOWS) { | |
3427 | ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp); | |
3428 | if (ret) { | |
3429 | to_seg = tid_rdma_flow_wt * | |
3430 | position_in_queue(qpriv, | |
3431 | &rcd->flow_queue); | |
3432 | break; | |
3433 | } | |
3434 | } | |
3435 | ||
3436 | npkts = rvt_div_round_up_mtu(qp, req->seg_len); | |
3437 | ||
3438 | /* | |
3439 | * We are at a sync point if we run out of KDETH PSN space. | |
3440 | * Last PSN of every generation is reserved for RESYNC. | |
3441 | */ | |
3442 | if (qpriv->flow_state.psn + npkts > MAX_TID_FLOW_PSN - 1) { | |
3443 | qpriv->sync_pt = true; | |
3444 | break; | |
3445 | } | |
3446 | ||
3447 | /* | |
3448 | * If overtaking req->acked_tail, send an RNR NAK. Because the | |
3449 | * QP is not queued in this case, and the issue can only be | |
3450 | * caused due a delay in scheduling the second leg which we | |
3451 | * cannot estimate, we use a rather arbitrary RNR timeout of | |
3452 | * (MAX_FLOWS / 2) segments | |
3453 | */ | |
3454 | if (!CIRC_SPACE(req->setup_head, req->acked_tail, | |
3455 | MAX_FLOWS)) { | |
3456 | ret = -EAGAIN; | |
3457 | to_seg = MAX_FLOWS >> 1; | |
3458 | qpriv->s_flags |= RVT_S_ACK_PENDING; | |
3459 | break; | |
3460 | } | |
3461 | ||
3462 | /* Try to allocate rcv array / TID entries */ | |
3463 | ret = hfi1_kern_exp_rcv_setup(req, &req->ss, &last); | |
3464 | if (ret == -EAGAIN) | |
3465 | to_seg = position_in_queue(qpriv, &rcd->rarr_queue); | |
3466 | if (ret) | |
3467 | break; | |
3468 | ||
3469 | qpriv->alloc_w_segs++; | |
3470 | req->alloc_seg++; | |
3471 | continue; | |
3472 | next_req: | |
3473 | /* Begin processing the next request */ | |
3474 | if (++qpriv->r_tid_alloc > | |
3475 | rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) | |
3476 | qpriv->r_tid_alloc = 0; | |
3477 | } | |
3478 | ||
3479 | /* | |
3480 | * Schedule an RNR NAK to be sent if (a) flow or rcv array allocation | |
3481 | * has failed (b) we are called from the rcv handler interrupt context | |
3482 | * (c) an RNR NAK has not already been scheduled | |
3483 | */ | |
3484 | if (ret == -EAGAIN && intr_ctx && !qp->r_nak_state) | |
3485 | goto send_rnr_nak; | |
3486 | ||
3487 | return; | |
3488 | ||
3489 | send_rnr_nak: | |
3490 | lockdep_assert_held(&qp->r_lock); | |
3491 | ||
3492 | /* Set r_nak_state to prevent unrelated events from generating NAK's */ | |
3493 | qp->r_nak_state = hfi1_compute_tid_rnr_timeout(qp, to_seg) | IB_RNR_NAK; | |
3494 | ||
3495 | /* Pull back r_psn to the segment being RNR NAK'd */ | |
3496 | qp->r_psn = e->psn + req->alloc_seg; | |
3497 | qp->r_ack_psn = qp->r_psn; | |
3498 | /* | |
3499 | * Pull back r_head_ack_queue to the ack entry following the request | |
3500 | * being RNR NAK'd. This allows resources to be allocated to the request | |
3501 | * if the queued QP is scheduled. | |
3502 | */ | |
3503 | qp->r_head_ack_queue = qpriv->r_tid_alloc + 1; | |
3504 | if (qp->r_head_ack_queue > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) | |
3505 | qp->r_head_ack_queue = 0; | |
3506 | qpriv->r_tid_head = qp->r_head_ack_queue; | |
3507 | /* | |
3508 | * These send side fields are used in make_rc_ack(). They are set in | |
3509 | * hfi1_send_rc_ack() but must be set here before dropping qp->s_lock | |
3510 | * for consistency | |
3511 | */ | |
3512 | qp->s_nak_state = qp->r_nak_state; | |
3513 | qp->s_ack_psn = qp->r_ack_psn; | |
3514 | /* | |
3515 | * Clear the ACK PENDING flag to prevent unwanted ACK because we | |
3516 | * have modified qp->s_ack_psn here. | |
3517 | */ | |
3518 | qp->s_flags &= ~(RVT_S_ACK_PENDING); | |
3519 | ||
3520 | /* | |
3521 | * qpriv->rnr_nak_state is used to determine when the scheduled RNR NAK | |
3522 | * has actually been sent. qp->s_flags RVT_S_ACK_PENDING bit cannot be | |
3523 | * used for this because qp->s_lock is dropped before calling | |
3524 | * hfi1_send_rc_ack() leading to inconsistency between the receive | |
3525 | * interrupt handlers and the send thread in make_rc_ack() | |
3526 | */ | |
3527 | qpriv->rnr_nak_state = TID_RNR_NAK_SEND; | |
3528 | ||
3529 | /* | |
3530 | * Schedule RNR NAK to be sent. RNR NAK's are scheduled from the receive | |
3531 | * interrupt handlers but will be sent from the send engine behind any | |
3532 | * previous responses that may have been scheduled | |
3533 | */ | |
3534 | rc_defered_ack(rcd, qp); | |
3535 | } | |
3536 | ||
3537 | void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet) | |
3538 | { | |
3539 | /* HANDLER FOR TID RDMA WRITE REQUEST packet (Responder side)*/ | |
3540 | ||
3541 | /* | |
3542 | * 1. Verify TID RDMA WRITE REQ as per IB_OPCODE_RC_RDMA_WRITE_FIRST | |
3543 | * (see hfi1_rc_rcv()) | |
3544 | * - Don't allow 0-length requests. | |
3545 | * 2. Put TID RDMA WRITE REQ into the response queueu (s_ack_queue) | |
3546 | * - Setup struct tid_rdma_req with request info | |
3547 | * - Prepare struct tid_rdma_flow array? | |
3548 | * 3. Set the qp->s_ack_state as state diagram in design doc. | |
3549 | * 4. Set RVT_S_RESP_PENDING in s_flags. | |
3550 | * 5. Kick the send engine (hfi1_schedule_send()) | |
3551 | */ | |
3552 | struct hfi1_ctxtdata *rcd = packet->rcd; | |
3553 | struct rvt_qp *qp = packet->qp; | |
3554 | struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | |
3555 | struct ib_other_headers *ohdr = packet->ohdr; | |
3556 | struct rvt_ack_entry *e; | |
3557 | unsigned long flags; | |
3558 | struct ib_reth *reth; | |
3559 | struct hfi1_qp_priv *qpriv = qp->priv; | |
3560 | struct tid_rdma_request *req; | |
3561 | u32 bth0, psn, len, rkey, num_segs; | |
3562 | bool is_fecn; | |
3563 | u8 next; | |
3564 | u64 vaddr; | |
3565 | int diff; | |
3566 | ||
3567 | bth0 = be32_to_cpu(ohdr->bth[0]); | |
3568 | if (hfi1_ruc_check_hdr(ibp, packet)) | |
3569 | return; | |
3570 | ||
3571 | is_fecn = process_ecn(qp, packet); | |
3572 | psn = mask_psn(be32_to_cpu(ohdr->bth[2])); | |
3573 | ||
3574 | if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) | |
3575 | rvt_comm_est(qp); | |
3576 | ||
3577 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) | |
3578 | goto nack_inv; | |
3579 | ||
3580 | reth = &ohdr->u.tid_rdma.w_req.reth; | |
3581 | vaddr = be64_to_cpu(reth->vaddr); | |
3582 | len = be32_to_cpu(reth->length); | |
3583 | ||
3584 | num_segs = DIV_ROUND_UP(len, qpriv->tid_rdma.local.max_len); | |
3585 | diff = delta_psn(psn, qp->r_psn); | |
3586 | if (unlikely(diff)) { | |
3587 | if (tid_rdma_rcv_error(packet, ohdr, qp, psn, diff)) | |
3588 | return; | |
3589 | goto send_ack; | |
3590 | } | |
3591 | ||
3592 | /* | |
3593 | * The resent request which was previously RNR NAK'd is inserted at the | |
3594 | * location of the original request, which is one entry behind | |
3595 | * r_head_ack_queue | |
3596 | */ | |
3597 | if (qpriv->rnr_nak_state) | |
3598 | qp->r_head_ack_queue = qp->r_head_ack_queue ? | |
3599 | qp->r_head_ack_queue - 1 : | |
3600 | rvt_size_atomic(ib_to_rvt(qp->ibqp.device)); | |
3601 | ||
3602 | /* We've verified the request, insert it into the ack queue. */ | |
3603 | next = qp->r_head_ack_queue + 1; | |
3604 | if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) | |
3605 | next = 0; | |
3606 | spin_lock_irqsave(&qp->s_lock, flags); | |
3607 | if (unlikely(next == qp->s_acked_ack_queue)) { | |
3608 | if (!qp->s_ack_queue[next].sent) | |
3609 | goto nack_inv_unlock; | |
3610 | update_ack_queue(qp, next); | |
3611 | } | |
3612 | e = &qp->s_ack_queue[qp->r_head_ack_queue]; | |
3613 | req = ack_to_tid_req(e); | |
3614 | ||
3615 | /* Bring previously RNR NAK'd request back to life */ | |
3616 | if (qpriv->rnr_nak_state) { | |
3617 | qp->r_nak_state = 0; | |
3618 | qp->s_nak_state = 0; | |
3619 | qpriv->rnr_nak_state = TID_RNR_NAK_INIT; | |
3620 | qp->r_psn = e->lpsn + 1; | |
3621 | req->state = TID_REQUEST_INIT; | |
3622 | goto update_head; | |
3623 | } | |
3624 | ||
3625 | if (e->rdma_sge.mr) { | |
3626 | rvt_put_mr(e->rdma_sge.mr); | |
3627 | e->rdma_sge.mr = NULL; | |
3628 | } | |
3629 | ||
3630 | /* The length needs to be in multiples of PAGE_SIZE */ | |
3631 | if (!len || len & ~PAGE_MASK) | |
3632 | goto nack_inv_unlock; | |
3633 | ||
3634 | rkey = be32_to_cpu(reth->rkey); | |
3635 | qp->r_len = len; | |
3636 | ||
3637 | if (e->opcode == TID_OP(WRITE_REQ) && | |
3638 | (req->setup_head != req->clear_tail || | |
3639 | req->clear_tail != req->acked_tail)) | |
3640 | goto nack_inv_unlock; | |
3641 | ||
3642 | if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr, | |
3643 | rkey, IB_ACCESS_REMOTE_WRITE))) | |
3644 | goto nack_acc; | |
3645 | ||
3646 | qp->r_psn += num_segs - 1; | |
3647 | ||
3648 | e->opcode = (bth0 >> 24) & 0xff; | |
3649 | e->psn = psn; | |
3650 | e->lpsn = qp->r_psn; | |
3651 | e->sent = 0; | |
3652 | ||
3653 | req->n_flows = min_t(u16, num_segs, qpriv->tid_rdma.local.max_write); | |
3654 | req->state = TID_REQUEST_INIT; | |
3655 | req->cur_seg = 0; | |
3656 | req->comp_seg = 0; | |
3657 | req->ack_seg = 0; | |
3658 | req->alloc_seg = 0; | |
3659 | req->isge = 0; | |
3660 | req->seg_len = qpriv->tid_rdma.local.max_len; | |
3661 | req->total_len = len; | |
3662 | req->total_segs = num_segs; | |
3663 | req->r_flow_psn = e->psn; | |
3664 | req->ss.sge = e->rdma_sge; | |
3665 | req->ss.num_sge = 1; | |
3666 | ||
3667 | req->flow_idx = req->setup_head; | |
3668 | req->clear_tail = req->setup_head; | |
3669 | req->acked_tail = req->setup_head; | |
3670 | ||
3671 | qp->r_state = e->opcode; | |
3672 | qp->r_nak_state = 0; | |
3673 | /* | |
3674 | * We need to increment the MSN here instead of when we | |
3675 | * finish sending the result since a duplicate request would | |
3676 | * increment it more than once. | |
3677 | */ | |
3678 | qp->r_msn++; | |
3679 | qp->r_psn++; | |
3680 | ||
3681 | if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID) { | |
3682 | qpriv->r_tid_tail = qp->r_head_ack_queue; | |
3683 | } else if (qpriv->r_tid_tail == qpriv->r_tid_head) { | |
3684 | struct tid_rdma_request *ptr; | |
3685 | ||
3686 | e = &qp->s_ack_queue[qpriv->r_tid_tail]; | |
3687 | ptr = ack_to_tid_req(e); | |
3688 | ||
3689 | if (e->opcode != TID_OP(WRITE_REQ) || | |
3690 | ptr->comp_seg == ptr->total_segs) { | |
3691 | if (qpriv->r_tid_tail == qpriv->r_tid_ack) | |
3692 | qpriv->r_tid_ack = qp->r_head_ack_queue; | |
3693 | qpriv->r_tid_tail = qp->r_head_ack_queue; | |
3694 | } | |
3695 | } | |
3696 | update_head: | |
3697 | qp->r_head_ack_queue = next; | |
3698 | qpriv->r_tid_head = qp->r_head_ack_queue; | |
3699 | ||
3700 | hfi1_tid_write_alloc_resources(qp, true); | |
3701 | ||
3702 | /* Schedule the send tasklet. */ | |
3703 | qp->s_flags |= RVT_S_RESP_PENDING; | |
3704 | hfi1_schedule_send(qp); | |
3705 | ||
3706 | spin_unlock_irqrestore(&qp->s_lock, flags); | |
3707 | if (is_fecn) | |
3708 | goto send_ack; | |
3709 | return; | |
3710 | ||
3711 | nack_inv_unlock: | |
3712 | spin_unlock_irqrestore(&qp->s_lock, flags); | |
3713 | nack_inv: | |
3714 | rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); | |
3715 | qp->r_nak_state = IB_NAK_INVALID_REQUEST; | |
3716 | qp->r_ack_psn = qp->r_psn; | |
3717 | /* Queue NAK for later */ | |
3718 | rc_defered_ack(rcd, qp); | |
3719 | return; | |
3720 | nack_acc: | |
3721 | spin_unlock_irqrestore(&qp->s_lock, flags); | |
3722 | rvt_rc_error(qp, IB_WC_LOC_PROT_ERR); | |
3723 | qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; | |
3724 | qp->r_ack_psn = qp->r_psn; | |
3725 | send_ack: | |
3726 | hfi1_send_rc_ack(packet, is_fecn); | |
3727 | } | |
38d46d36 KW |
3728 | |
3729 | u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e, | |
3730 | struct ib_other_headers *ohdr, u32 *bth1, | |
3731 | u32 bth2, u32 *len, | |
3732 | struct rvt_sge_state **ss) | |
3733 | { | |
3734 | struct hfi1_ack_priv *epriv = e->priv; | |
3735 | struct tid_rdma_request *req = &epriv->tid_req; | |
3736 | struct hfi1_qp_priv *qpriv = qp->priv; | |
3737 | struct tid_rdma_flow *flow = NULL; | |
3738 | u32 resp_len = 0, hdwords = 0; | |
3739 | void *resp_addr = NULL; | |
3740 | struct tid_rdma_params *remote; | |
3741 | ||
3742 | flow = &req->flows[req->flow_idx]; | |
3743 | switch (req->state) { | |
3744 | default: | |
3745 | /* | |
3746 | * Try to allocate resources here in case QP was queued and was | |
3747 | * later scheduled when resources became available | |
3748 | */ | |
3749 | hfi1_tid_write_alloc_resources(qp, false); | |
3750 | ||
3751 | /* We've already sent everything which is ready */ | |
3752 | if (req->cur_seg >= req->alloc_seg) | |
3753 | goto done; | |
3754 | ||
3755 | /* | |
3756 | * Resources can be assigned but responses cannot be sent in | |
3757 | * rnr_nak state, till the resent request is received | |
3758 | */ | |
3759 | if (qpriv->rnr_nak_state == TID_RNR_NAK_SENT) | |
3760 | goto done; | |
3761 | ||
3762 | req->state = TID_REQUEST_ACTIVE; | |
3763 | req->flow_idx = CIRC_NEXT(req->flow_idx, MAX_FLOWS); | |
3c759e00 | 3764 | hfi1_add_tid_reap_timer(qp); |
38d46d36 KW |
3765 | break; |
3766 | ||
3767 | case TID_REQUEST_RESEND_ACTIVE: | |
3768 | case TID_REQUEST_RESEND: | |
3769 | req->flow_idx = CIRC_NEXT(req->flow_idx, MAX_FLOWS); | |
3770 | if (!CIRC_CNT(req->setup_head, req->flow_idx, MAX_FLOWS)) | |
3771 | req->state = TID_REQUEST_ACTIVE; | |
3772 | ||
3c759e00 | 3773 | hfi1_mod_tid_reap_timer(qp); |
38d46d36 KW |
3774 | break; |
3775 | } | |
3776 | flow->flow_state.resp_ib_psn = bth2; | |
3777 | resp_addr = (void *)flow->tid_entry; | |
3778 | resp_len = sizeof(*flow->tid_entry) * flow->tidcnt; | |
3779 | req->cur_seg++; | |
3780 | ||
3781 | memset(&ohdr->u.tid_rdma.w_rsp, 0, sizeof(ohdr->u.tid_rdma.w_rsp)); | |
3782 | epriv->ss.sge.vaddr = resp_addr; | |
3783 | epriv->ss.sge.sge_length = resp_len; | |
3784 | epriv->ss.sge.length = epriv->ss.sge.sge_length; | |
3785 | /* | |
3786 | * We can safely zero these out. Since the first SGE covers the | |
3787 | * entire packet, nothing else should even look at the MR. | |
3788 | */ | |
3789 | epriv->ss.sge.mr = NULL; | |
3790 | epriv->ss.sge.m = 0; | |
3791 | epriv->ss.sge.n = 0; | |
3792 | ||
3793 | epriv->ss.sg_list = NULL; | |
3794 | epriv->ss.total_len = epriv->ss.sge.sge_length; | |
3795 | epriv->ss.num_sge = 1; | |
3796 | ||
3797 | *ss = &epriv->ss; | |
3798 | *len = epriv->ss.total_len; | |
3799 | ||
3800 | /* Construct the TID RDMA WRITE RESP packet header */ | |
3801 | rcu_read_lock(); | |
3802 | remote = rcu_dereference(qpriv->tid_rdma.remote); | |
3803 | ||
3804 | KDETH_RESET(ohdr->u.tid_rdma.w_rsp.kdeth0, KVER, 0x1); | |
3805 | KDETH_RESET(ohdr->u.tid_rdma.w_rsp.kdeth1, JKEY, remote->jkey); | |
3806 | ohdr->u.tid_rdma.w_rsp.aeth = rvt_compute_aeth(qp); | |
3807 | ohdr->u.tid_rdma.w_rsp.tid_flow_psn = | |
3808 | cpu_to_be32((flow->flow_state.generation << | |
3809 | HFI1_KDETH_BTH_SEQ_SHIFT) | | |
3810 | (flow->flow_state.spsn & | |
3811 | HFI1_KDETH_BTH_SEQ_MASK)); | |
3812 | ohdr->u.tid_rdma.w_rsp.tid_flow_qp = | |
3813 | cpu_to_be32(qpriv->tid_rdma.local.qp | | |
3814 | ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) << | |
3815 | TID_RDMA_DESTQP_FLOW_SHIFT) | | |
3816 | qpriv->rcd->ctxt); | |
3817 | ohdr->u.tid_rdma.w_rsp.verbs_qp = cpu_to_be32(qp->remote_qpn); | |
3818 | *bth1 = remote->qp; | |
3819 | rcu_read_unlock(); | |
3820 | hdwords = sizeof(ohdr->u.tid_rdma.w_rsp) / sizeof(u32); | |
3821 | qpriv->pending_tid_w_segs++; | |
3822 | done: | |
3823 | return hdwords; | |
3824 | } | |
3c759e00 KW |
3825 | |
3826 | static void hfi1_add_tid_reap_timer(struct rvt_qp *qp) | |
3827 | { | |
3828 | struct hfi1_qp_priv *qpriv = qp->priv; | |
3829 | ||
3830 | lockdep_assert_held(&qp->s_lock); | |
3831 | if (!(qpriv->s_flags & HFI1_R_TID_RSC_TIMER)) { | |
3832 | qpriv->s_flags |= HFI1_R_TID_RSC_TIMER; | |
3833 | qpriv->s_tid_timer.expires = jiffies + | |
3834 | qpriv->tid_timer_timeout_jiffies; | |
3835 | add_timer(&qpriv->s_tid_timer); | |
3836 | } | |
3837 | } | |
3838 | ||
3839 | static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp) | |
3840 | { | |
3841 | struct hfi1_qp_priv *qpriv = qp->priv; | |
3842 | ||
3843 | lockdep_assert_held(&qp->s_lock); | |
3844 | qpriv->s_flags |= HFI1_R_TID_RSC_TIMER; | |
3845 | mod_timer(&qpriv->s_tid_timer, jiffies + | |
3846 | qpriv->tid_timer_timeout_jiffies); | |
3847 | } | |
3848 | ||
3849 | static int hfi1_stop_tid_reap_timer(struct rvt_qp *qp) | |
3850 | { | |
3851 | struct hfi1_qp_priv *qpriv = qp->priv; | |
3852 | int rval = 0; | |
3853 | ||
3854 | lockdep_assert_held(&qp->s_lock); | |
3855 | if (qpriv->s_flags & HFI1_R_TID_RSC_TIMER) { | |
3856 | rval = del_timer(&qpriv->s_tid_timer); | |
3857 | qpriv->s_flags &= ~HFI1_R_TID_RSC_TIMER; | |
3858 | } | |
3859 | return rval; | |
3860 | } | |
3861 | ||
3862 | void hfi1_del_tid_reap_timer(struct rvt_qp *qp) | |
3863 | { | |
3864 | struct hfi1_qp_priv *qpriv = qp->priv; | |
3865 | ||
3866 | del_timer_sync(&qpriv->s_tid_timer); | |
3867 | qpriv->s_flags &= ~HFI1_R_TID_RSC_TIMER; | |
3868 | } | |
3869 | ||
3870 | static void hfi1_tid_timeout(struct timer_list *t) | |
3871 | { | |
3872 | struct hfi1_qp_priv *qpriv = from_timer(qpriv, t, s_tid_timer); | |
3873 | struct rvt_qp *qp = qpriv->owner; | |
3874 | struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); | |
3875 | unsigned long flags; | |
3876 | u32 i; | |
3877 | ||
3878 | spin_lock_irqsave(&qp->r_lock, flags); | |
3879 | spin_lock(&qp->s_lock); | |
3880 | if (qpriv->s_flags & HFI1_R_TID_RSC_TIMER) { | |
3881 | dd_dev_warn(dd_from_ibdev(qp->ibqp.device), "[QP%u] %s %d\n", | |
3882 | qp->ibqp.qp_num, __func__, __LINE__); | |
3883 | hfi1_stop_tid_reap_timer(qp); | |
3884 | /* | |
3885 | * Go though the entire ack queue and clear any outstanding | |
3886 | * HW flow and RcvArray resources. | |
3887 | */ | |
3888 | hfi1_kern_clear_hw_flow(qpriv->rcd, qp); | |
3889 | for (i = 0; i < rvt_max_atomic(rdi); i++) { | |
3890 | struct tid_rdma_request *req = | |
3891 | ack_to_tid_req(&qp->s_ack_queue[i]); | |
3892 | ||
3893 | hfi1_kern_exp_rcv_clear_all(req); | |
3894 | } | |
3895 | spin_unlock(&qp->s_lock); | |
3896 | if (qp->ibqp.event_handler) { | |
3897 | struct ib_event ev; | |
3898 | ||
3899 | ev.device = qp->ibqp.device; | |
3900 | ev.element.qp = &qp->ibqp; | |
3901 | ev.event = IB_EVENT_QP_FATAL; | |
3902 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); | |
3903 | } | |
3904 | rvt_rc_error(qp, IB_WC_RESP_TIMEOUT_ERR); | |
3905 | goto unlock_r_lock; | |
3906 | } | |
3907 | spin_unlock(&qp->s_lock); | |
3908 | unlock_r_lock: | |
3909 | spin_unlock_irqrestore(&qp->r_lock, flags); | |
3910 | } | |
72a0ea99 KW |
3911 | |
3912 | void hfi1_rc_rcv_tid_rdma_write_resp(struct hfi1_packet *packet) | |
3913 | { | |
3914 | /* HANDLER FOR TID RDMA WRITE RESPONSE packet (Requestor side */ | |
3915 | ||
3916 | /* | |
3917 | * 1. Find matching SWQE | |
3918 | * 2. Check that TIDENTRY array has enough space for a complete | |
3919 | * segment. If not, put QP in error state. | |
3920 | * 3. Save response data in struct tid_rdma_req and struct tid_rdma_flow | |
3921 | * 4. Remove HFI1_S_WAIT_TID_RESP from s_flags. | |
3922 | * 5. Set qp->s_state | |
3923 | * 6. Kick the send engine (hfi1_schedule_send()) | |
3924 | */ | |
3925 | struct ib_other_headers *ohdr = packet->ohdr; | |
3926 | struct rvt_qp *qp = packet->qp; | |
3927 | struct hfi1_qp_priv *qpriv = qp->priv; | |
3928 | struct hfi1_ctxtdata *rcd = packet->rcd; | |
3929 | struct rvt_swqe *wqe; | |
3930 | struct tid_rdma_request *req; | |
3931 | struct tid_rdma_flow *flow; | |
3932 | enum ib_wc_status status; | |
3933 | u32 opcode, aeth, psn, flow_psn, i, tidlen = 0, pktlen; | |
3934 | bool is_fecn; | |
3935 | unsigned long flags; | |
3936 | ||
3937 | is_fecn = process_ecn(qp, packet); | |
3938 | psn = mask_psn(be32_to_cpu(ohdr->bth[2])); | |
3939 | aeth = be32_to_cpu(ohdr->u.tid_rdma.w_rsp.aeth); | |
3940 | opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff; | |
3941 | ||
3942 | spin_lock_irqsave(&qp->s_lock, flags); | |
3943 | ||
3944 | /* Ignore invalid responses */ | |
3945 | if (cmp_psn(psn, qp->s_next_psn) >= 0) | |
3946 | goto ack_done; | |
3947 | ||
3948 | /* Ignore duplicate responses. */ | |
3949 | if (unlikely(cmp_psn(psn, qp->s_last_psn) <= 0)) | |
3950 | goto ack_done; | |
3951 | ||
3952 | if (unlikely(qp->s_acked == qp->s_tail)) | |
3953 | goto ack_done; | |
3954 | ||
3955 | /* | |
3956 | * If we are waiting for a particular packet sequence number | |
3957 | * due to a request being resent, check for it. Otherwise, | |
3958 | * ensure that we haven't missed anything. | |
3959 | */ | |
3960 | if (qp->r_flags & RVT_R_RDMAR_SEQ) { | |
3961 | if (cmp_psn(psn, qp->s_last_psn + 1) != 0) | |
3962 | goto ack_done; | |
3963 | qp->r_flags &= ~RVT_R_RDMAR_SEQ; | |
3964 | } | |
3965 | ||
3966 | wqe = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur); | |
3967 | if (unlikely(wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)) | |
3968 | goto ack_op_err; | |
3969 | ||
3970 | req = wqe_to_tid_req(wqe); | |
3971 | /* | |
3972 | * If we've lost ACKs and our acked_tail pointer is too far | |
3973 | * behind, don't overwrite segments. Just drop the packet and | |
3974 | * let the reliability protocol take care of it. | |
3975 | */ | |
3976 | if (!CIRC_SPACE(req->setup_head, req->acked_tail, MAX_FLOWS)) | |
3977 | goto ack_done; | |
3978 | ||
3979 | /* | |
3980 | * The call to do_rc_ack() should be last in the chain of | |
3981 | * packet checks because it will end up updating the QP state. | |
3982 | * Therefore, anything that would prevent the packet from | |
3983 | * being accepted as a successful response should be prior | |
3984 | * to it. | |
3985 | */ | |
3986 | if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd)) | |
3987 | goto ack_done; | |
3988 | ||
3989 | flow = &req->flows[req->setup_head]; | |
3990 | flow->pkt = 0; | |
3991 | flow->tid_idx = 0; | |
3992 | flow->tid_offset = 0; | |
3993 | flow->sent = 0; | |
3994 | flow->resync_npkts = 0; | |
3995 | flow->tid_qpn = be32_to_cpu(ohdr->u.tid_rdma.w_rsp.tid_flow_qp); | |
3996 | flow->idx = (flow->tid_qpn >> TID_RDMA_DESTQP_FLOW_SHIFT) & | |
3997 | TID_RDMA_DESTQP_FLOW_MASK; | |
3998 | flow_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.w_rsp.tid_flow_psn)); | |
3999 | flow->flow_state.generation = flow_psn >> HFI1_KDETH_BTH_SEQ_SHIFT; | |
4000 | flow->flow_state.spsn = flow_psn & HFI1_KDETH_BTH_SEQ_MASK; | |
4001 | flow->flow_state.resp_ib_psn = psn; | |
4002 | flow->length = min_t(u32, req->seg_len, | |
4003 | (wqe->length - (req->comp_seg * req->seg_len))); | |
4004 | ||
4005 | flow->npkts = rvt_div_round_up_mtu(qp, flow->length); | |
4006 | flow->flow_state.lpsn = flow->flow_state.spsn + | |
4007 | flow->npkts - 1; | |
4008 | /* payload length = packet length - (header length + ICRC length) */ | |
4009 | pktlen = packet->tlen - (packet->hlen + 4); | |
4010 | if (pktlen > sizeof(flow->tid_entry)) { | |
4011 | status = IB_WC_LOC_LEN_ERR; | |
4012 | goto ack_err; | |
4013 | } | |
4014 | memcpy(flow->tid_entry, packet->ebuf, pktlen); | |
4015 | flow->tidcnt = pktlen / sizeof(*flow->tid_entry); | |
4016 | ||
4017 | req->comp_seg++; | |
4018 | /* | |
4019 | * Walk the TID_ENTRY list to make sure we have enough space for a | |
4020 | * complete segment. | |
4021 | */ | |
4022 | for (i = 0; i < flow->tidcnt; i++) { | |
4023 | if (!EXP_TID_GET(flow->tid_entry[i], LEN)) { | |
4024 | status = IB_WC_LOC_LEN_ERR; | |
4025 | goto ack_err; | |
4026 | } | |
4027 | tidlen += EXP_TID_GET(flow->tid_entry[i], LEN); | |
4028 | } | |
4029 | if (tidlen * PAGE_SIZE < flow->length) { | |
4030 | status = IB_WC_LOC_LEN_ERR; | |
4031 | goto ack_err; | |
4032 | } | |
4033 | ||
4034 | /* | |
4035 | * If this is the first response for this request, set the initial | |
4036 | * flow index to the current flow. | |
4037 | */ | |
4038 | if (!cmp_psn(psn, wqe->psn)) { | |
4039 | req->r_last_acked = mask_psn(wqe->psn - 1); | |
4040 | /* Set acked flow index to head index */ | |
4041 | req->acked_tail = req->setup_head; | |
4042 | } | |
4043 | ||
4044 | /* advance circular buffer head */ | |
4045 | req->setup_head = CIRC_NEXT(req->setup_head, MAX_FLOWS); | |
4046 | req->state = TID_REQUEST_ACTIVE; | |
4047 | ||
4048 | /* | |
4049 | * If all responses for this TID RDMA WRITE request have been received | |
4050 | * advance the pointer to the next one. | |
4051 | * Since TID RDMA requests could be mixed in with regular IB requests, | |
4052 | * they might not appear sequentially in the queue. Therefore, the | |
4053 | * next request needs to be "found". | |
4054 | */ | |
4055 | if (qpriv->s_tid_cur != qpriv->s_tid_head && | |
4056 | req->comp_seg == req->total_segs) { | |
4057 | for (i = qpriv->s_tid_cur + 1; ; i++) { | |
4058 | if (i == qp->s_size) | |
4059 | i = 0; | |
4060 | wqe = rvt_get_swqe_ptr(qp, i); | |
4061 | if (i == qpriv->s_tid_head) | |
4062 | break; | |
4063 | if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) | |
4064 | break; | |
4065 | } | |
4066 | qpriv->s_tid_cur = i; | |
4067 | } | |
4068 | qp->s_flags &= ~HFI1_S_WAIT_TID_RESP; | |
4069 | ||
4070 | goto ack_done; | |
4071 | ||
4072 | ack_op_err: | |
4073 | status = IB_WC_LOC_QP_OP_ERR; | |
4074 | ack_err: | |
4075 | rvt_error_qp(qp, status); | |
4076 | ack_done: | |
4077 | spin_unlock_irqrestore(&qp->s_lock, flags); | |
4078 | if (is_fecn) | |
4079 | hfi1_send_rc_ack(packet, is_fecn); | |
4080 | } | |
539e1908 KW |
4081 | |
4082 | bool hfi1_build_tid_rdma_packet(struct rvt_swqe *wqe, | |
4083 | struct ib_other_headers *ohdr, | |
4084 | u32 *bth1, u32 *bth2, u32 *len) | |
4085 | { | |
4086 | struct tid_rdma_request *req = wqe_to_tid_req(wqe); | |
4087 | struct tid_rdma_flow *flow = &req->flows[req->clear_tail]; | |
4088 | struct tid_rdma_params *remote; | |
4089 | struct rvt_qp *qp = req->qp; | |
4090 | struct hfi1_qp_priv *qpriv = qp->priv; | |
4091 | u32 tidentry = flow->tid_entry[flow->tid_idx]; | |
4092 | u32 tidlen = EXP_TID_GET(tidentry, LEN) << PAGE_SHIFT; | |
4093 | struct tid_rdma_write_data *wd = &ohdr->u.tid_rdma.w_data; | |
4094 | u32 next_offset, om = KDETH_OM_LARGE; | |
4095 | bool last_pkt; | |
4096 | ||
4097 | if (!tidlen) { | |
4098 | hfi1_trdma_send_complete(qp, wqe, IB_WC_REM_INV_RD_REQ_ERR); | |
4099 | rvt_error_qp(qp, IB_WC_REM_INV_RD_REQ_ERR); | |
4100 | } | |
4101 | ||
4102 | *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset); | |
4103 | flow->sent += *len; | |
4104 | next_offset = flow->tid_offset + *len; | |
4105 | last_pkt = (flow->tid_idx == (flow->tidcnt - 1) && | |
4106 | next_offset >= tidlen) || (flow->sent >= flow->length); | |
4107 | ||
4108 | rcu_read_lock(); | |
4109 | remote = rcu_dereference(qpriv->tid_rdma.remote); | |
4110 | KDETH_RESET(wd->kdeth0, KVER, 0x1); | |
4111 | KDETH_SET(wd->kdeth0, SH, !last_pkt); | |
4112 | KDETH_SET(wd->kdeth0, INTR, !!(!last_pkt && remote->urg)); | |
4113 | KDETH_SET(wd->kdeth0, TIDCTRL, EXP_TID_GET(tidentry, CTRL)); | |
4114 | KDETH_SET(wd->kdeth0, TID, EXP_TID_GET(tidentry, IDX)); | |
4115 | KDETH_SET(wd->kdeth0, OM, om == KDETH_OM_LARGE); | |
4116 | KDETH_SET(wd->kdeth0, OFFSET, flow->tid_offset / om); | |
4117 | KDETH_RESET(wd->kdeth1, JKEY, remote->jkey); | |
4118 | wd->verbs_qp = cpu_to_be32(qp->remote_qpn); | |
4119 | rcu_read_unlock(); | |
4120 | ||
4121 | *bth1 = flow->tid_qpn; | |
4122 | *bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) & | |
4123 | HFI1_KDETH_BTH_SEQ_MASK) | | |
4124 | (flow->flow_state.generation << | |
4125 | HFI1_KDETH_BTH_SEQ_SHIFT)); | |
4126 | if (last_pkt) { | |
4127 | /* PSNs are zero-based, so +1 to count number of packets */ | |
4128 | if (flow->flow_state.lpsn + 1 + | |
4129 | rvt_div_round_up_mtu(qp, req->seg_len) > | |
4130 | MAX_TID_FLOW_PSN) | |
4131 | req->state = TID_REQUEST_SYNC; | |
4132 | *bth2 |= IB_BTH_REQ_ACK; | |
4133 | } | |
4134 | ||
4135 | if (next_offset >= tidlen) { | |
4136 | flow->tid_offset = 0; | |
4137 | flow->tid_idx++; | |
4138 | } else { | |
4139 | flow->tid_offset = next_offset; | |
4140 | } | |
4141 | return last_pkt; | |
4142 | } | |
d72fe7d5 KW |
4143 | |
4144 | void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet) | |
4145 | { | |
4146 | struct rvt_qp *qp = packet->qp; | |
4147 | struct hfi1_qp_priv *priv = qp->priv; | |
4148 | struct hfi1_ctxtdata *rcd = priv->rcd; | |
4149 | struct ib_other_headers *ohdr = packet->ohdr; | |
4150 | struct rvt_ack_entry *e; | |
4151 | struct tid_rdma_request *req; | |
4152 | struct tid_rdma_flow *flow; | |
4153 | struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); | |
4154 | unsigned long flags; | |
4155 | u32 psn, next; | |
4156 | u8 opcode; | |
4157 | ||
4158 | psn = mask_psn(be32_to_cpu(ohdr->bth[2])); | |
4159 | opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff; | |
4160 | ||
4161 | /* | |
4162 | * All error handling should be done by now. If we are here, the packet | |
4163 | * is either good or been accepted by the error handler. | |
4164 | */ | |
4165 | spin_lock_irqsave(&qp->s_lock, flags); | |
4166 | e = &qp->s_ack_queue[priv->r_tid_tail]; | |
4167 | req = ack_to_tid_req(e); | |
4168 | flow = &req->flows[req->clear_tail]; | |
4169 | if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.lpsn))) { | |
4170 | if (cmp_psn(psn, flow->flow_state.r_next_psn)) | |
4171 | goto send_nak; | |
4172 | flow->flow_state.r_next_psn++; | |
4173 | goto exit; | |
4174 | } | |
4175 | flow->flow_state.r_next_psn = mask_psn(psn + 1); | |
4176 | hfi1_kern_exp_rcv_clear(req); | |
4177 | priv->alloc_w_segs--; | |
4178 | rcd->flows[flow->idx].psn = psn & HFI1_KDETH_BTH_SEQ_MASK; | |
4179 | req->comp_seg++; | |
4180 | priv->s_nak_state = 0; | |
4181 | ||
4182 | /* | |
4183 | * Release the flow if one of the following conditions has been met: | |
4184 | * - The request has reached a sync point AND all outstanding | |
4185 | * segments have been completed, or | |
4186 | * - The entire request is complete and there are no more requests | |
4187 | * (of any kind) in the queue. | |
4188 | */ | |
4189 | if (priv->r_tid_ack == HFI1_QP_WQE_INVALID) | |
4190 | priv->r_tid_ack = priv->r_tid_tail; | |
4191 | ||
4192 | if (opcode == TID_OP(WRITE_DATA_LAST)) { | |
4193 | for (next = priv->r_tid_tail + 1; ; next++) { | |
4194 | if (next > rvt_size_atomic(&dev->rdi)) | |
4195 | next = 0; | |
4196 | if (next == priv->r_tid_head) | |
4197 | break; | |
4198 | e = &qp->s_ack_queue[next]; | |
4199 | if (e->opcode == TID_OP(WRITE_REQ)) | |
4200 | break; | |
4201 | } | |
4202 | priv->r_tid_tail = next; | |
4203 | if (++qp->s_acked_ack_queue > rvt_size_atomic(&dev->rdi)) | |
4204 | qp->s_acked_ack_queue = 0; | |
4205 | } | |
4206 | ||
4207 | hfi1_tid_write_alloc_resources(qp, true); | |
4208 | ||
4209 | /* | |
4210 | * If we need to generate more responses, schedule the | |
4211 | * send engine. | |
4212 | */ | |
4213 | if (req->cur_seg < req->total_segs || | |
4214 | qp->s_tail_ack_queue != qp->r_head_ack_queue) { | |
4215 | qp->s_flags |= RVT_S_RESP_PENDING; | |
4216 | hfi1_schedule_send(qp); | |
4217 | } | |
4218 | ||
4219 | priv->pending_tid_w_segs--; | |
4220 | if (priv->s_flags & HFI1_R_TID_RSC_TIMER) { | |
4221 | if (priv->pending_tid_w_segs) | |
4222 | hfi1_mod_tid_reap_timer(req->qp); | |
4223 | else | |
4224 | hfi1_stop_tid_reap_timer(req->qp); | |
4225 | } | |
4226 | ||
4227 | done: | |
4228 | priv->s_flags |= RVT_S_ACK_PENDING; | |
4229 | exit: | |
4230 | priv->r_next_psn_kdeth = flow->flow_state.r_next_psn; | |
4231 | spin_unlock_irqrestore(&qp->s_lock, flags); | |
4232 | return; | |
4233 | ||
4234 | send_nak: | |
4235 | if (!priv->s_nak_state) { | |
4236 | priv->s_nak_state = IB_NAK_PSN_ERROR; | |
4237 | priv->s_nak_psn = flow->flow_state.r_next_psn; | |
4238 | priv->s_flags |= RVT_S_ACK_PENDING; | |
4239 | if (priv->r_tid_ack == HFI1_QP_WQE_INVALID) | |
4240 | priv->r_tid_ack = priv->r_tid_tail; | |
4241 | } | |
4242 | goto done; | |
4243 | } |