]>
Commit | Line | Data |
---|---|---|
5190f052 MM |
1 | // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) |
2 | /* | |
3 | * Copyright(c) 2018 Intel Corporation. | |
4 | * | |
5 | */ | |
6 | ||
7 | #include "hfi.h" | |
37356e78 | 8 | #include "qp.h" |
5190f052 MM |
9 | #include "verbs.h" |
10 | #include "tid_rdma.h" | |
838b6fd2 | 11 | #include "exp_rcv.h" |
a131d164 | 12 | #include "trace.h" |
5190f052 | 13 | |
37356e78 KW |
14 | #define RCV_TID_FLOW_TABLE_CTRL_FLOW_VALID_SMASK BIT_ULL(32) |
15 | #define RCV_TID_FLOW_TABLE_CTRL_HDR_SUPP_EN_SMASK BIT_ULL(33) | |
16 | #define RCV_TID_FLOW_TABLE_CTRL_KEEP_AFTER_SEQ_ERR_SMASK BIT_ULL(34) | |
17 | #define RCV_TID_FLOW_TABLE_CTRL_KEEP_ON_GEN_ERR_SMASK BIT_ULL(35) | |
18 | #define RCV_TID_FLOW_TABLE_STATUS_SEQ_MISMATCH_SMASK BIT_ULL(37) | |
19 | #define RCV_TID_FLOW_TABLE_STATUS_GEN_MISMATCH_SMASK BIT_ULL(38) | |
20 | ||
21 | #define GENERATION_MASK 0xFFFFF | |
22 | ||
23 | static u32 mask_generation(u32 a) | |
24 | { | |
25 | return a & GENERATION_MASK; | |
26 | } | |
27 | ||
28 | /* Reserved generation value to set to unused flows for kernel contexts */ | |
29 | #define KERN_GENERATION_RESERVED mask_generation(U32_MAX) | |
30 | ||
d22a207d KW |
31 | /* |
32 | * J_KEY for kernel contexts when TID RDMA is used. | |
33 | * See generate_jkey() in hfi.h for more information. | |
34 | */ | |
35 | #define TID_RDMA_JKEY 32 | |
36 | #define HFI1_KERNEL_MIN_JKEY HFI1_ADMIN_JKEY_RANGE | |
37 | #define HFI1_KERNEL_MAX_JKEY (2 * HFI1_ADMIN_JKEY_RANGE - 1) | |
38 | ||
838b6fd2 | 39 | /* Maximum number of segments in flight per QP request. */ |
d22a207d KW |
40 | #define TID_RDMA_MAX_READ_SEGS_PER_REQ 6 |
41 | #define TID_RDMA_MAX_WRITE_SEGS_PER_REQ 4 | |
838b6fd2 KW |
42 | #define MAX_REQ max_t(u16, TID_RDMA_MAX_READ_SEGS_PER_REQ, \ |
43 | TID_RDMA_MAX_WRITE_SEGS_PER_REQ) | |
44 | #define MAX_FLOWS roundup_pow_of_two(MAX_REQ + 1) | |
45 | ||
46 | #define MAX_EXPECTED_PAGES (MAX_EXPECTED_BUFFER / PAGE_SIZE) | |
d22a207d KW |
47 | |
48 | #define TID_OPFN_QP_CTXT_MASK 0xff | |
49 | #define TID_OPFN_QP_CTXT_SHIFT 56 | |
50 | #define TID_OPFN_QP_KDETH_MASK 0xff | |
51 | #define TID_OPFN_QP_KDETH_SHIFT 48 | |
52 | #define TID_OPFN_MAX_LEN_MASK 0x7ff | |
53 | #define TID_OPFN_MAX_LEN_SHIFT 37 | |
54 | #define TID_OPFN_TIMEOUT_MASK 0x1f | |
55 | #define TID_OPFN_TIMEOUT_SHIFT 32 | |
56 | #define TID_OPFN_RESERVED_MASK 0x3f | |
57 | #define TID_OPFN_RESERVED_SHIFT 26 | |
58 | #define TID_OPFN_URG_MASK 0x1 | |
59 | #define TID_OPFN_URG_SHIFT 25 | |
60 | #define TID_OPFN_VER_MASK 0x7 | |
61 | #define TID_OPFN_VER_SHIFT 22 | |
62 | #define TID_OPFN_JKEY_MASK 0x3f | |
63 | #define TID_OPFN_JKEY_SHIFT 16 | |
64 | #define TID_OPFN_MAX_READ_MASK 0x3f | |
65 | #define TID_OPFN_MAX_READ_SHIFT 10 | |
66 | #define TID_OPFN_MAX_WRITE_MASK 0x3f | |
67 | #define TID_OPFN_MAX_WRITE_SHIFT 4 | |
68 | ||
69 | /* | |
70 | * OPFN TID layout | |
71 | * | |
72 | * 63 47 31 15 | |
73 | * NNNNNNNNKKKKKKKK MMMMMMMMMMMTTTTT DDDDDDUVVVJJJJJJ RRRRRRWWWWWWCCCC | |
74 | * 3210987654321098 7654321098765432 1098765432109876 5432109876543210 | |
75 | * N - the context Number | |
76 | * K - the Kdeth_qp | |
77 | * M - Max_len | |
78 | * T - Timeout | |
79 | * D - reserveD | |
80 | * V - version | |
81 | * U - Urg capable | |
82 | * J - Jkey | |
83 | * R - max_Read | |
84 | * W - max_Write | |
85 | * C - Capcode | |
86 | */ | |
87 | ||
37356e78 | 88 | static void tid_rdma_trigger_resume(struct work_struct *work); |
838b6fd2 KW |
89 | static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req); |
90 | static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req, | |
91 | gfp_t gfp); | |
92 | static void hfi1_init_trdma_req(struct rvt_qp *qp, | |
93 | struct tid_rdma_request *req); | |
37356e78 | 94 | |
d22a207d KW |
95 | static u64 tid_rdma_opfn_encode(struct tid_rdma_params *p) |
96 | { | |
97 | return | |
98 | (((u64)p->qp & TID_OPFN_QP_CTXT_MASK) << | |
99 | TID_OPFN_QP_CTXT_SHIFT) | | |
100 | ((((u64)p->qp >> 16) & TID_OPFN_QP_KDETH_MASK) << | |
101 | TID_OPFN_QP_KDETH_SHIFT) | | |
102 | (((u64)((p->max_len >> PAGE_SHIFT) - 1) & | |
103 | TID_OPFN_MAX_LEN_MASK) << TID_OPFN_MAX_LEN_SHIFT) | | |
104 | (((u64)p->timeout & TID_OPFN_TIMEOUT_MASK) << | |
105 | TID_OPFN_TIMEOUT_SHIFT) | | |
106 | (((u64)p->urg & TID_OPFN_URG_MASK) << TID_OPFN_URG_SHIFT) | | |
107 | (((u64)p->jkey & TID_OPFN_JKEY_MASK) << TID_OPFN_JKEY_SHIFT) | | |
108 | (((u64)p->max_read & TID_OPFN_MAX_READ_MASK) << | |
109 | TID_OPFN_MAX_READ_SHIFT) | | |
110 | (((u64)p->max_write & TID_OPFN_MAX_WRITE_MASK) << | |
111 | TID_OPFN_MAX_WRITE_SHIFT); | |
112 | } | |
113 | ||
114 | static void tid_rdma_opfn_decode(struct tid_rdma_params *p, u64 data) | |
115 | { | |
116 | p->max_len = (((data >> TID_OPFN_MAX_LEN_SHIFT) & | |
117 | TID_OPFN_MAX_LEN_MASK) + 1) << PAGE_SHIFT; | |
118 | p->jkey = (data >> TID_OPFN_JKEY_SHIFT) & TID_OPFN_JKEY_MASK; | |
119 | p->max_write = (data >> TID_OPFN_MAX_WRITE_SHIFT) & | |
120 | TID_OPFN_MAX_WRITE_MASK; | |
121 | p->max_read = (data >> TID_OPFN_MAX_READ_SHIFT) & | |
122 | TID_OPFN_MAX_READ_MASK; | |
123 | p->qp = | |
124 | ((((data >> TID_OPFN_QP_KDETH_SHIFT) & TID_OPFN_QP_KDETH_MASK) | |
125 | << 16) | | |
126 | ((data >> TID_OPFN_QP_CTXT_SHIFT) & TID_OPFN_QP_CTXT_MASK)); | |
127 | p->urg = (data >> TID_OPFN_URG_SHIFT) & TID_OPFN_URG_MASK; | |
128 | p->timeout = (data >> TID_OPFN_TIMEOUT_SHIFT) & TID_OPFN_TIMEOUT_MASK; | |
129 | } | |
130 | ||
131 | void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p) | |
132 | { | |
133 | struct hfi1_qp_priv *priv = qp->priv; | |
134 | ||
135 | p->qp = (kdeth_qp << 16) | priv->rcd->ctxt; | |
136 | p->max_len = TID_RDMA_MAX_SEGMENT_SIZE; | |
137 | p->jkey = priv->rcd->jkey; | |
138 | p->max_read = TID_RDMA_MAX_READ_SEGS_PER_REQ; | |
139 | p->max_write = TID_RDMA_MAX_WRITE_SEGS_PER_REQ; | |
140 | p->timeout = qp->timeout; | |
141 | p->urg = is_urg_masked(priv->rcd); | |
142 | } | |
143 | ||
144 | bool tid_rdma_conn_req(struct rvt_qp *qp, u64 *data) | |
145 | { | |
146 | struct hfi1_qp_priv *priv = qp->priv; | |
147 | ||
148 | *data = tid_rdma_opfn_encode(&priv->tid_rdma.local); | |
149 | return true; | |
150 | } | |
151 | ||
152 | bool tid_rdma_conn_reply(struct rvt_qp *qp, u64 data) | |
153 | { | |
154 | struct hfi1_qp_priv *priv = qp->priv; | |
155 | struct tid_rdma_params *remote, *old; | |
156 | bool ret = true; | |
157 | ||
158 | old = rcu_dereference_protected(priv->tid_rdma.remote, | |
159 | lockdep_is_held(&priv->opfn.lock)); | |
160 | data &= ~0xfULL; | |
161 | /* | |
162 | * If data passed in is zero, return true so as not to continue the | |
163 | * negotiation process | |
164 | */ | |
165 | if (!data || !HFI1_CAP_IS_KSET(TID_RDMA)) | |
166 | goto null; | |
167 | /* | |
168 | * If kzalloc fails, return false. This will result in: | |
169 | * * at the requester a new OPFN request being generated to retry | |
170 | * the negotiation | |
171 | * * at the responder, 0 being returned to the requester so as to | |
172 | * disable TID RDMA at both the requester and the responder | |
173 | */ | |
174 | remote = kzalloc(sizeof(*remote), GFP_ATOMIC); | |
175 | if (!remote) { | |
176 | ret = false; | |
177 | goto null; | |
178 | } | |
179 | ||
180 | tid_rdma_opfn_decode(remote, data); | |
181 | priv->tid_timer_timeout_jiffies = | |
182 | usecs_to_jiffies((((4096UL * (1UL << remote->timeout)) / | |
183 | 1000UL) << 3) * 7); | |
a131d164 KW |
184 | trace_hfi1_opfn_param(qp, 0, &priv->tid_rdma.local); |
185 | trace_hfi1_opfn_param(qp, 1, remote); | |
d22a207d KW |
186 | rcu_assign_pointer(priv->tid_rdma.remote, remote); |
187 | /* | |
188 | * A TID RDMA READ request's segment size is not equal to | |
189 | * remote->max_len only when the request's data length is smaller | |
190 | * than remote->max_len. In that case, there will be only one segment. | |
191 | * Therefore, when priv->pkts_ps is used to calculate req->cur_seg | |
192 | * during retry, it will lead to req->cur_seg = 0, which is exactly | |
193 | * what is expected. | |
194 | */ | |
195 | priv->pkts_ps = (u16)rvt_div_mtu(qp, remote->max_len); | |
196 | priv->timeout_shift = ilog2(priv->pkts_ps - 1) + 1; | |
197 | goto free; | |
198 | null: | |
199 | RCU_INIT_POINTER(priv->tid_rdma.remote, NULL); | |
200 | priv->timeout_shift = 0; | |
201 | free: | |
202 | if (old) | |
203 | kfree_rcu(old, rcu_head); | |
204 | return ret; | |
205 | } | |
206 | ||
207 | bool tid_rdma_conn_resp(struct rvt_qp *qp, u64 *data) | |
208 | { | |
209 | bool ret; | |
210 | ||
211 | ret = tid_rdma_conn_reply(qp, *data); | |
212 | *data = 0; | |
213 | /* | |
214 | * If tid_rdma_conn_reply() returns error, set *data as 0 to indicate | |
215 | * TID RDMA could not be enabled. This will result in TID RDMA being | |
216 | * disabled at the requester too. | |
217 | */ | |
218 | if (ret) | |
219 | (void)tid_rdma_conn_req(qp, data); | |
220 | return ret; | |
221 | } | |
222 | ||
223 | void tid_rdma_conn_error(struct rvt_qp *qp) | |
224 | { | |
225 | struct hfi1_qp_priv *priv = qp->priv; | |
226 | struct tid_rdma_params *old; | |
227 | ||
228 | old = rcu_dereference_protected(priv->tid_rdma.remote, | |
229 | lockdep_is_held(&priv->opfn.lock)); | |
230 | RCU_INIT_POINTER(priv->tid_rdma.remote, NULL); | |
231 | if (old) | |
232 | kfree_rcu(old, rcu_head); | |
233 | } | |
234 | ||
235 | /* This is called at context initialization time */ | |
236 | int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit) | |
237 | { | |
238 | if (reinit) | |
239 | return 0; | |
240 | ||
241 | BUILD_BUG_ON(TID_RDMA_JKEY < HFI1_KERNEL_MIN_JKEY); | |
242 | BUILD_BUG_ON(TID_RDMA_JKEY > HFI1_KERNEL_MAX_JKEY); | |
243 | rcd->jkey = TID_RDMA_JKEY; | |
244 | hfi1_set_ctxt_jkey(rcd->dd, rcd, rcd->jkey); | |
838b6fd2 | 245 | return hfi1_alloc_ctxt_rcv_groups(rcd); |
d22a207d KW |
246 | } |
247 | ||
5190f052 MM |
248 | /** |
249 | * qp_to_rcd - determine the receive context used by a qp | |
250 | * @qp - the qp | |
251 | * | |
252 | * This routine returns the receive context associated | |
253 | * with a a qp's qpn. | |
254 | * | |
255 | * Returns the context. | |
256 | */ | |
257 | static struct hfi1_ctxtdata *qp_to_rcd(struct rvt_dev_info *rdi, | |
258 | struct rvt_qp *qp) | |
259 | { | |
260 | struct hfi1_ibdev *verbs_dev = container_of(rdi, | |
261 | struct hfi1_ibdev, | |
262 | rdi); | |
263 | struct hfi1_devdata *dd = container_of(verbs_dev, | |
264 | struct hfi1_devdata, | |
265 | verbs_dev); | |
266 | unsigned int ctxt; | |
267 | ||
268 | if (qp->ibqp.qp_num == 0) | |
269 | ctxt = 0; | |
270 | else | |
271 | ctxt = ((qp->ibqp.qp_num >> dd->qos_shift) % | |
272 | (dd->n_krcv_queues - 1)) + 1; | |
273 | ||
274 | return dd->rcd[ctxt]; | |
275 | } | |
276 | ||
277 | int hfi1_qp_priv_init(struct rvt_dev_info *rdi, struct rvt_qp *qp, | |
278 | struct ib_qp_init_attr *init_attr) | |
279 | { | |
280 | struct hfi1_qp_priv *qpriv = qp->priv; | |
838b6fd2 | 281 | int i, ret; |
5190f052 MM |
282 | |
283 | qpriv->rcd = qp_to_rcd(rdi, qp); | |
284 | ||
48a615dc KW |
285 | spin_lock_init(&qpriv->opfn.lock); |
286 | INIT_WORK(&qpriv->opfn.opfn_work, opfn_send_conn_request); | |
37356e78 KW |
287 | INIT_WORK(&qpriv->tid_rdma.trigger_work, tid_rdma_trigger_resume); |
288 | qpriv->flow_state.psn = 0; | |
289 | qpriv->flow_state.index = RXE_NUM_TID_FLOWS; | |
290 | qpriv->flow_state.last_index = RXE_NUM_TID_FLOWS; | |
291 | qpriv->flow_state.generation = KERN_GENERATION_RESERVED; | |
292 | INIT_LIST_HEAD(&qpriv->tid_wait); | |
48a615dc | 293 | |
838b6fd2 KW |
294 | if (init_attr->qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) { |
295 | struct hfi1_devdata *dd = qpriv->rcd->dd; | |
296 | ||
297 | qpriv->pages = kzalloc_node(TID_RDMA_MAX_PAGES * | |
298 | sizeof(*qpriv->pages), | |
299 | GFP_KERNEL, dd->node); | |
300 | if (!qpriv->pages) | |
301 | return -ENOMEM; | |
302 | for (i = 0; i < qp->s_size; i++) { | |
303 | struct hfi1_swqe_priv *priv; | |
304 | struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i); | |
305 | ||
306 | priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, | |
307 | dd->node); | |
308 | if (!priv) | |
309 | return -ENOMEM; | |
310 | ||
311 | hfi1_init_trdma_req(qp, &priv->tid_req); | |
312 | priv->tid_req.e.swqe = wqe; | |
313 | wqe->priv = priv; | |
314 | } | |
315 | for (i = 0; i < rvt_max_atomic(rdi); i++) { | |
316 | struct hfi1_ack_priv *priv; | |
317 | ||
318 | priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, | |
319 | dd->node); | |
320 | if (!priv) | |
321 | return -ENOMEM; | |
322 | ||
323 | hfi1_init_trdma_req(qp, &priv->tid_req); | |
324 | priv->tid_req.e.ack = &qp->s_ack_queue[i]; | |
325 | ||
326 | ret = hfi1_kern_exp_rcv_alloc_flows(&priv->tid_req, | |
327 | GFP_KERNEL); | |
328 | if (ret) { | |
329 | kfree(priv); | |
330 | return ret; | |
331 | } | |
332 | qp->s_ack_queue[i].priv = priv; | |
333 | } | |
334 | } | |
335 | ||
5190f052 MM |
336 | return 0; |
337 | } | |
48a615dc KW |
338 | |
339 | void hfi1_qp_priv_tid_free(struct rvt_dev_info *rdi, struct rvt_qp *qp) | |
340 | { | |
838b6fd2 KW |
341 | struct hfi1_qp_priv *qpriv = qp->priv; |
342 | struct rvt_swqe *wqe; | |
343 | u32 i; | |
344 | ||
345 | if (qp->ibqp.qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) { | |
346 | for (i = 0; i < qp->s_size; i++) { | |
347 | wqe = rvt_get_swqe_ptr(qp, i); | |
348 | kfree(wqe->priv); | |
349 | wqe->priv = NULL; | |
350 | } | |
351 | for (i = 0; i < rvt_max_atomic(rdi); i++) { | |
352 | struct hfi1_ack_priv *priv = qp->s_ack_queue[i].priv; | |
353 | ||
354 | if (priv) | |
355 | hfi1_kern_exp_rcv_free_flows(&priv->tid_req); | |
356 | kfree(priv); | |
357 | qp->s_ack_queue[i].priv = NULL; | |
358 | } | |
359 | cancel_work_sync(&qpriv->opfn.opfn_work); | |
360 | kfree(qpriv->pages); | |
361 | qpriv->pages = NULL; | |
362 | } | |
48a615dc | 363 | } |
37356e78 KW |
364 | |
365 | /* Flow and tid waiter functions */ | |
366 | /** | |
367 | * DOC: lock ordering | |
368 | * | |
369 | * There are two locks involved with the queuing | |
370 | * routines: the qp s_lock and the exp_lock. | |
371 | * | |
372 | * Since the tid space allocation is called from | |
373 | * the send engine, the qp s_lock is already held. | |
374 | * | |
375 | * The allocation routines will get the exp_lock. | |
376 | * | |
377 | * The first_qp() call is provided to allow the head of | |
378 | * the rcd wait queue to be fetched under the exp_lock and | |
379 | * followed by a drop of the exp_lock. | |
380 | * | |
381 | * Any qp in the wait list will have the qp reference count held | |
382 | * to hold the qp in memory. | |
383 | */ | |
384 | ||
385 | /* | |
386 | * return head of rcd wait list | |
387 | * | |
388 | * Must hold the exp_lock. | |
389 | * | |
390 | * Get a reference to the QP to hold the QP in memory. | |
391 | * | |
392 | * The caller must release the reference when the local | |
393 | * is no longer being used. | |
394 | */ | |
395 | static struct rvt_qp *first_qp(struct hfi1_ctxtdata *rcd, | |
396 | struct tid_queue *queue) | |
397 | __must_hold(&rcd->exp_lock) | |
398 | { | |
399 | struct hfi1_qp_priv *priv; | |
400 | ||
401 | lockdep_assert_held(&rcd->exp_lock); | |
402 | priv = list_first_entry_or_null(&queue->queue_head, | |
403 | struct hfi1_qp_priv, | |
404 | tid_wait); | |
405 | if (!priv) | |
406 | return NULL; | |
407 | rvt_get_qp(priv->owner); | |
408 | return priv->owner; | |
409 | } | |
410 | ||
411 | /** | |
412 | * kernel_tid_waiters - determine rcd wait | |
413 | * @rcd: the receive context | |
414 | * @qp: the head of the qp being processed | |
415 | * | |
416 | * This routine will return false IFF | |
417 | * the list is NULL or the head of the | |
418 | * list is the indicated qp. | |
419 | * | |
420 | * Must hold the qp s_lock and the exp_lock. | |
421 | * | |
422 | * Return: | |
423 | * false if either of the conditions below are statisfied: | |
424 | * 1. The list is empty or | |
425 | * 2. The indicated qp is at the head of the list and the | |
426 | * HFI1_S_WAIT_TID_SPACE bit is set in qp->s_flags. | |
427 | * true is returned otherwise. | |
428 | */ | |
429 | static bool kernel_tid_waiters(struct hfi1_ctxtdata *rcd, | |
430 | struct tid_queue *queue, struct rvt_qp *qp) | |
431 | __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock) | |
432 | { | |
433 | struct rvt_qp *fqp; | |
434 | bool ret = true; | |
435 | ||
436 | lockdep_assert_held(&qp->s_lock); | |
437 | lockdep_assert_held(&rcd->exp_lock); | |
438 | fqp = first_qp(rcd, queue); | |
439 | if (!fqp || (fqp == qp && (qp->s_flags & HFI1_S_WAIT_TID_SPACE))) | |
440 | ret = false; | |
441 | rvt_put_qp(fqp); | |
442 | return ret; | |
443 | } | |
444 | ||
445 | /** | |
446 | * dequeue_tid_waiter - dequeue the qp from the list | |
447 | * @qp - the qp to remove the wait list | |
448 | * | |
449 | * This routine removes the indicated qp from the | |
450 | * wait list if it is there. | |
451 | * | |
452 | * This should be done after the hardware flow and | |
453 | * tid array resources have been allocated. | |
454 | * | |
455 | * Must hold the qp s_lock and the rcd exp_lock. | |
456 | * | |
457 | * It assumes the s_lock to protect the s_flags | |
458 | * field and to reliably test the HFI1_S_WAIT_TID_SPACE flag. | |
459 | */ | |
460 | static void dequeue_tid_waiter(struct hfi1_ctxtdata *rcd, | |
461 | struct tid_queue *queue, struct rvt_qp *qp) | |
462 | __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock) | |
463 | { | |
464 | struct hfi1_qp_priv *priv = qp->priv; | |
465 | ||
466 | lockdep_assert_held(&qp->s_lock); | |
467 | lockdep_assert_held(&rcd->exp_lock); | |
468 | if (list_empty(&priv->tid_wait)) | |
469 | return; | |
470 | list_del_init(&priv->tid_wait); | |
471 | qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE; | |
472 | queue->dequeue++; | |
473 | rvt_put_qp(qp); | |
474 | } | |
475 | ||
476 | /** | |
477 | * queue_qp_for_tid_wait - suspend QP on tid space | |
478 | * @rcd: the receive context | |
479 | * @qp: the qp | |
480 | * | |
481 | * The qp is inserted at the tail of the rcd | |
482 | * wait queue and the HFI1_S_WAIT_TID_SPACE s_flag is set. | |
483 | * | |
484 | * Must hold the qp s_lock and the exp_lock. | |
485 | */ | |
486 | static void queue_qp_for_tid_wait(struct hfi1_ctxtdata *rcd, | |
487 | struct tid_queue *queue, struct rvt_qp *qp) | |
488 | __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock) | |
489 | { | |
490 | struct hfi1_qp_priv *priv = qp->priv; | |
491 | ||
492 | lockdep_assert_held(&qp->s_lock); | |
493 | lockdep_assert_held(&rcd->exp_lock); | |
494 | if (list_empty(&priv->tid_wait)) { | |
495 | qp->s_flags |= HFI1_S_WAIT_TID_SPACE; | |
496 | list_add_tail(&priv->tid_wait, &queue->queue_head); | |
497 | priv->tid_enqueue = ++queue->enqueue; | |
2f16a696 | 498 | rcd->dd->verbs_dev.n_tidwait++; |
37356e78 KW |
499 | trace_hfi1_qpsleep(qp, HFI1_S_WAIT_TID_SPACE); |
500 | rvt_get_qp(qp); | |
501 | } | |
502 | } | |
503 | ||
504 | /** | |
505 | * __trigger_tid_waiter - trigger tid waiter | |
506 | * @qp: the qp | |
507 | * | |
508 | * This is a private entrance to schedule the qp | |
509 | * assuming the caller is holding the qp->s_lock. | |
510 | */ | |
511 | static void __trigger_tid_waiter(struct rvt_qp *qp) | |
512 | __must_hold(&qp->s_lock) | |
513 | { | |
514 | lockdep_assert_held(&qp->s_lock); | |
515 | if (!(qp->s_flags & HFI1_S_WAIT_TID_SPACE)) | |
516 | return; | |
517 | trace_hfi1_qpwakeup(qp, HFI1_S_WAIT_TID_SPACE); | |
518 | hfi1_schedule_send(qp); | |
519 | } | |
520 | ||
521 | /** | |
522 | * tid_rdma_schedule_tid_wakeup - schedule wakeup for a qp | |
523 | * @qp - the qp | |
524 | * | |
525 | * trigger a schedule or a waiting qp in a deadlock | |
526 | * safe manner. The qp reference is held prior | |
527 | * to this call via first_qp(). | |
528 | * | |
529 | * If the qp trigger was already scheduled (!rval) | |
530 | * the the reference is dropped, otherwise the resume | |
531 | * or the destroy cancel will dispatch the reference. | |
532 | */ | |
533 | static void tid_rdma_schedule_tid_wakeup(struct rvt_qp *qp) | |
534 | { | |
535 | struct hfi1_qp_priv *priv; | |
536 | struct hfi1_ibport *ibp; | |
537 | struct hfi1_pportdata *ppd; | |
538 | struct hfi1_devdata *dd; | |
539 | bool rval; | |
540 | ||
541 | if (!qp) | |
542 | return; | |
543 | ||
544 | priv = qp->priv; | |
545 | ibp = to_iport(qp->ibqp.device, qp->port_num); | |
546 | ppd = ppd_from_ibp(ibp); | |
547 | dd = dd_from_ibdev(qp->ibqp.device); | |
548 | ||
549 | rval = queue_work_on(priv->s_sde ? | |
550 | priv->s_sde->cpu : | |
551 | cpumask_first(cpumask_of_node(dd->node)), | |
552 | ppd->hfi1_wq, | |
553 | &priv->tid_rdma.trigger_work); | |
554 | if (!rval) | |
555 | rvt_put_qp(qp); | |
556 | } | |
557 | ||
558 | /** | |
559 | * tid_rdma_trigger_resume - field a trigger work request | |
560 | * @work - the work item | |
561 | * | |
562 | * Complete the off qp trigger processing by directly | |
563 | * calling the progress routine. | |
564 | */ | |
565 | static void tid_rdma_trigger_resume(struct work_struct *work) | |
566 | { | |
567 | struct tid_rdma_qp_params *tr; | |
568 | struct hfi1_qp_priv *priv; | |
569 | struct rvt_qp *qp; | |
570 | ||
571 | tr = container_of(work, struct tid_rdma_qp_params, trigger_work); | |
572 | priv = container_of(tr, struct hfi1_qp_priv, tid_rdma); | |
573 | qp = priv->owner; | |
574 | spin_lock_irq(&qp->s_lock); | |
575 | if (qp->s_flags & HFI1_S_WAIT_TID_SPACE) { | |
576 | spin_unlock_irq(&qp->s_lock); | |
577 | hfi1_do_send(priv->owner, true); | |
578 | } else { | |
579 | spin_unlock_irq(&qp->s_lock); | |
580 | } | |
581 | rvt_put_qp(qp); | |
582 | } | |
583 | ||
584 | /** | |
585 | * tid_rdma_flush_wait - unwind any tid space wait | |
586 | * | |
587 | * This is called when resetting a qp to | |
588 | * allow a destroy or reset to get rid | |
589 | * of any tid space linkage and reference counts. | |
590 | */ | |
591 | static void _tid_rdma_flush_wait(struct rvt_qp *qp, struct tid_queue *queue) | |
592 | __must_hold(&qp->s_lock) | |
593 | { | |
594 | struct hfi1_qp_priv *priv; | |
595 | ||
596 | if (!qp) | |
597 | return; | |
598 | lockdep_assert_held(&qp->s_lock); | |
599 | priv = qp->priv; | |
600 | qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE; | |
601 | spin_lock(&priv->rcd->exp_lock); | |
602 | if (!list_empty(&priv->tid_wait)) { | |
603 | list_del_init(&priv->tid_wait); | |
604 | qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE; | |
605 | queue->dequeue++; | |
606 | rvt_put_qp(qp); | |
607 | } | |
608 | spin_unlock(&priv->rcd->exp_lock); | |
609 | } | |
610 | ||
611 | void hfi1_tid_rdma_flush_wait(struct rvt_qp *qp) | |
612 | __must_hold(&qp->s_lock) | |
613 | { | |
614 | struct hfi1_qp_priv *priv = qp->priv; | |
615 | ||
616 | _tid_rdma_flush_wait(qp, &priv->rcd->flow_queue); | |
838b6fd2 | 617 | _tid_rdma_flush_wait(qp, &priv->rcd->rarr_queue); |
37356e78 KW |
618 | } |
619 | ||
620 | /* Flow functions */ | |
621 | /** | |
622 | * kern_reserve_flow - allocate a hardware flow | |
623 | * @rcd - the context to use for allocation | |
624 | * @last - the index of the preferred flow. Use RXE_NUM_TID_FLOWS to | |
625 | * signify "don't care". | |
626 | * | |
627 | * Use a bit mask based allocation to reserve a hardware | |
628 | * flow for use in receiving KDETH data packets. If a preferred flow is | |
629 | * specified the function will attempt to reserve that flow again, if | |
630 | * available. | |
631 | * | |
632 | * The exp_lock must be held. | |
633 | * | |
634 | * Return: | |
635 | * On success: a value postive value between 0 and RXE_NUM_TID_FLOWS - 1 | |
636 | * On failure: -EAGAIN | |
637 | */ | |
638 | static int kern_reserve_flow(struct hfi1_ctxtdata *rcd, int last) | |
639 | __must_hold(&rcd->exp_lock) | |
640 | { | |
641 | int nr; | |
642 | ||
643 | /* Attempt to reserve the preferred flow index */ | |
644 | if (last >= 0 && last < RXE_NUM_TID_FLOWS && | |
645 | !test_and_set_bit(last, &rcd->flow_mask)) | |
646 | return last; | |
647 | ||
648 | nr = ffz(rcd->flow_mask); | |
649 | BUILD_BUG_ON(RXE_NUM_TID_FLOWS >= | |
650 | (sizeof(rcd->flow_mask) * BITS_PER_BYTE)); | |
651 | if (nr > (RXE_NUM_TID_FLOWS - 1)) | |
652 | return -EAGAIN; | |
653 | set_bit(nr, &rcd->flow_mask); | |
654 | return nr; | |
655 | } | |
656 | ||
657 | static void kern_set_hw_flow(struct hfi1_ctxtdata *rcd, u32 generation, | |
658 | u32 flow_idx) | |
659 | { | |
660 | u64 reg; | |
661 | ||
662 | reg = ((u64)generation << HFI1_KDETH_BTH_SEQ_SHIFT) | | |
663 | RCV_TID_FLOW_TABLE_CTRL_FLOW_VALID_SMASK | | |
664 | RCV_TID_FLOW_TABLE_CTRL_KEEP_AFTER_SEQ_ERR_SMASK | | |
665 | RCV_TID_FLOW_TABLE_CTRL_KEEP_ON_GEN_ERR_SMASK | | |
666 | RCV_TID_FLOW_TABLE_STATUS_SEQ_MISMATCH_SMASK | | |
667 | RCV_TID_FLOW_TABLE_STATUS_GEN_MISMATCH_SMASK; | |
668 | ||
669 | if (generation != KERN_GENERATION_RESERVED) | |
670 | reg |= RCV_TID_FLOW_TABLE_CTRL_HDR_SUPP_EN_SMASK; | |
671 | ||
672 | write_uctxt_csr(rcd->dd, rcd->ctxt, | |
673 | RCV_TID_FLOW_TABLE + 8 * flow_idx, reg); | |
674 | } | |
675 | ||
676 | static u32 kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx) | |
677 | __must_hold(&rcd->exp_lock) | |
678 | { | |
679 | u32 generation = rcd->flows[flow_idx].generation; | |
680 | ||
681 | kern_set_hw_flow(rcd, generation, flow_idx); | |
682 | return generation; | |
683 | } | |
684 | ||
685 | static u32 kern_flow_generation_next(u32 gen) | |
686 | { | |
687 | u32 generation = mask_generation(gen + 1); | |
688 | ||
689 | if (generation == KERN_GENERATION_RESERVED) | |
690 | generation = mask_generation(generation + 1); | |
691 | return generation; | |
692 | } | |
693 | ||
694 | static void kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx) | |
695 | __must_hold(&rcd->exp_lock) | |
696 | { | |
697 | rcd->flows[flow_idx].generation = | |
698 | kern_flow_generation_next(rcd->flows[flow_idx].generation); | |
699 | kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, flow_idx); | |
700 | } | |
701 | ||
702 | int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp) | |
703 | { | |
704 | struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv; | |
705 | struct tid_flow_state *fs = &qpriv->flow_state; | |
706 | struct rvt_qp *fqp; | |
707 | unsigned long flags; | |
708 | int ret = 0; | |
709 | ||
710 | /* The QP already has an allocated flow */ | |
711 | if (fs->index != RXE_NUM_TID_FLOWS) | |
712 | return ret; | |
713 | ||
714 | spin_lock_irqsave(&rcd->exp_lock, flags); | |
715 | if (kernel_tid_waiters(rcd, &rcd->flow_queue, qp)) | |
716 | goto queue; | |
717 | ||
718 | ret = kern_reserve_flow(rcd, fs->last_index); | |
719 | if (ret < 0) | |
720 | goto queue; | |
721 | fs->index = ret; | |
722 | fs->last_index = fs->index; | |
723 | ||
724 | /* Generation received in a RESYNC overrides default flow generation */ | |
725 | if (fs->generation != KERN_GENERATION_RESERVED) | |
726 | rcd->flows[fs->index].generation = fs->generation; | |
727 | fs->generation = kern_setup_hw_flow(rcd, fs->index); | |
728 | fs->psn = 0; | |
729 | fs->flags = 0; | |
730 | dequeue_tid_waiter(rcd, &rcd->flow_queue, qp); | |
731 | /* get head before dropping lock */ | |
732 | fqp = first_qp(rcd, &rcd->flow_queue); | |
733 | spin_unlock_irqrestore(&rcd->exp_lock, flags); | |
734 | ||
735 | tid_rdma_schedule_tid_wakeup(fqp); | |
736 | return 0; | |
737 | queue: | |
738 | queue_qp_for_tid_wait(rcd, &rcd->flow_queue, qp); | |
739 | spin_unlock_irqrestore(&rcd->exp_lock, flags); | |
740 | return -EAGAIN; | |
741 | } | |
742 | ||
743 | void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp) | |
744 | { | |
745 | struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv; | |
746 | struct tid_flow_state *fs = &qpriv->flow_state; | |
747 | struct rvt_qp *fqp; | |
748 | unsigned long flags; | |
749 | ||
750 | if (fs->index >= RXE_NUM_TID_FLOWS) | |
751 | return; | |
752 | spin_lock_irqsave(&rcd->exp_lock, flags); | |
753 | kern_clear_hw_flow(rcd, fs->index); | |
754 | clear_bit(fs->index, &rcd->flow_mask); | |
755 | fs->index = RXE_NUM_TID_FLOWS; | |
756 | fs->psn = 0; | |
757 | fs->generation = KERN_GENERATION_RESERVED; | |
758 | ||
759 | /* get head before dropping lock */ | |
760 | fqp = first_qp(rcd, &rcd->flow_queue); | |
761 | spin_unlock_irqrestore(&rcd->exp_lock, flags); | |
762 | ||
763 | if (fqp == qp) { | |
764 | __trigger_tid_waiter(fqp); | |
765 | rvt_put_qp(fqp); | |
766 | } else { | |
767 | tid_rdma_schedule_tid_wakeup(fqp); | |
768 | } | |
769 | } | |
770 | ||
771 | void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd) | |
772 | { | |
773 | int i; | |
774 | ||
775 | for (i = 0; i < RXE_NUM_TID_FLOWS; i++) { | |
776 | rcd->flows[i].generation = mask_generation(prandom_u32()); | |
777 | kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, i); | |
778 | } | |
779 | } | |
838b6fd2 KW |
780 | |
781 | /* TID allocation functions */ | |
782 | static u8 trdma_pset_order(struct tid_rdma_pageset *s) | |
783 | { | |
784 | u8 count = s->count; | |
785 | ||
786 | return ilog2(count) + 1; | |
787 | } | |
788 | ||
789 | /** | |
790 | * tid_rdma_find_phys_blocks_4k - get groups base on mr info | |
791 | * @npages - number of pages | |
792 | * @pages - pointer to an array of page structs | |
793 | * @list - page set array to return | |
794 | * | |
795 | * This routine returns the number of groups associated with | |
796 | * the current sge information. This implementation is based | |
797 | * on the expected receive find_phys_blocks() adjusted to | |
798 | * use the MR information vs. the pfn. | |
799 | * | |
800 | * Return: | |
801 | * the number of RcvArray entries | |
802 | */ | |
803 | static u32 tid_rdma_find_phys_blocks_4k(struct tid_rdma_flow *flow, | |
804 | struct page **pages, | |
805 | u32 npages, | |
806 | struct tid_rdma_pageset *list) | |
807 | { | |
808 | u32 pagecount, pageidx, setcount = 0, i; | |
809 | void *vaddr, *this_vaddr; | |
810 | ||
811 | if (!npages) | |
812 | return 0; | |
813 | ||
814 | /* | |
815 | * Look for sets of physically contiguous pages in the user buffer. | |
816 | * This will allow us to optimize Expected RcvArray entry usage by | |
817 | * using the bigger supported sizes. | |
818 | */ | |
819 | vaddr = page_address(pages[0]); | |
84f4a40d | 820 | trace_hfi1_tid_flow_page(flow->req->qp, flow, 0, 0, 0, vaddr); |
838b6fd2 KW |
821 | for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) { |
822 | this_vaddr = i < npages ? page_address(pages[i]) : NULL; | |
84f4a40d KW |
823 | trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 0, 0, |
824 | this_vaddr); | |
838b6fd2 KW |
825 | /* |
826 | * If the vaddr's are not sequential, pages are not physically | |
827 | * contiguous. | |
828 | */ | |
829 | if (this_vaddr != (vaddr + PAGE_SIZE)) { | |
830 | /* | |
831 | * At this point we have to loop over the set of | |
832 | * physically contiguous pages and break them down it | |
833 | * sizes supported by the HW. | |
834 | * There are two main constraints: | |
835 | * 1. The max buffer size is MAX_EXPECTED_BUFFER. | |
836 | * If the total set size is bigger than that | |
837 | * program only a MAX_EXPECTED_BUFFER chunk. | |
838 | * 2. The buffer size has to be a power of two. If | |
839 | * it is not, round down to the closes power of | |
840 | * 2 and program that size. | |
841 | */ | |
842 | while (pagecount) { | |
843 | int maxpages = pagecount; | |
844 | u32 bufsize = pagecount * PAGE_SIZE; | |
845 | ||
846 | if (bufsize > MAX_EXPECTED_BUFFER) | |
847 | maxpages = | |
848 | MAX_EXPECTED_BUFFER >> | |
849 | PAGE_SHIFT; | |
850 | else if (!is_power_of_2(bufsize)) | |
851 | maxpages = | |
852 | rounddown_pow_of_two(bufsize) >> | |
853 | PAGE_SHIFT; | |
854 | ||
855 | list[setcount].idx = pageidx; | |
856 | list[setcount].count = maxpages; | |
84f4a40d KW |
857 | trace_hfi1_tid_pageset(flow->req->qp, setcount, |
858 | list[setcount].idx, | |
859 | list[setcount].count); | |
838b6fd2 KW |
860 | pagecount -= maxpages; |
861 | pageidx += maxpages; | |
862 | setcount++; | |
863 | } | |
864 | pageidx = i; | |
865 | pagecount = 1; | |
866 | vaddr = this_vaddr; | |
867 | } else { | |
868 | vaddr += PAGE_SIZE; | |
869 | pagecount++; | |
870 | } | |
871 | } | |
872 | /* insure we always return an even number of sets */ | |
873 | if (setcount & 1) | |
874 | list[setcount++].count = 0; | |
875 | return setcount; | |
876 | } | |
877 | ||
878 | /** | |
879 | * tid_flush_pages - dump out pages into pagesets | |
880 | * @list - list of pagesets | |
881 | * @idx - pointer to current page index | |
882 | * @pages - number of pages to dump | |
883 | * @sets - current number of pagesset | |
884 | * | |
885 | * This routine flushes out accumuated pages. | |
886 | * | |
887 | * To insure an even number of sets the | |
888 | * code may add a filler. | |
889 | * | |
890 | * This can happen with when pages is not | |
891 | * a power of 2 or pages is a power of 2 | |
892 | * less than the maximum pages. | |
893 | * | |
894 | * Return: | |
895 | * The new number of sets | |
896 | */ | |
897 | ||
898 | static u32 tid_flush_pages(struct tid_rdma_pageset *list, | |
899 | u32 *idx, u32 pages, u32 sets) | |
900 | { | |
901 | while (pages) { | |
902 | u32 maxpages = pages; | |
903 | ||
904 | if (maxpages > MAX_EXPECTED_PAGES) | |
905 | maxpages = MAX_EXPECTED_PAGES; | |
906 | else if (!is_power_of_2(maxpages)) | |
907 | maxpages = rounddown_pow_of_two(maxpages); | |
908 | list[sets].idx = *idx; | |
909 | list[sets++].count = maxpages; | |
910 | *idx += maxpages; | |
911 | pages -= maxpages; | |
912 | } | |
913 | /* might need a filler */ | |
914 | if (sets & 1) | |
915 | list[sets++].count = 0; | |
916 | return sets; | |
917 | } | |
918 | ||
919 | /** | |
920 | * tid_rdma_find_phys_blocks_8k - get groups base on mr info | |
921 | * @pages - pointer to an array of page structs | |
922 | * @npages - number of pages | |
923 | * @list - page set array to return | |
924 | * | |
925 | * This routine parses an array of pages to compute pagesets | |
926 | * in an 8k compatible way. | |
927 | * | |
928 | * pages are tested two at a time, i, i + 1 for contiguous | |
929 | * pages and i - 1 and i contiguous pages. | |
930 | * | |
931 | * If any condition is false, any accumlated pages are flushed and | |
932 | * v0,v1 are emitted as separate PAGE_SIZE pagesets | |
933 | * | |
934 | * Otherwise, the current 8k is totaled for a future flush. | |
935 | * | |
936 | * Return: | |
937 | * The number of pagesets | |
938 | * list set with the returned number of pagesets | |
939 | * | |
940 | */ | |
941 | static u32 tid_rdma_find_phys_blocks_8k(struct tid_rdma_flow *flow, | |
942 | struct page **pages, | |
943 | u32 npages, | |
944 | struct tid_rdma_pageset *list) | |
945 | { | |
946 | u32 idx, sets = 0, i; | |
947 | u32 pagecnt = 0; | |
948 | void *v0, *v1, *vm1; | |
949 | ||
950 | if (!npages) | |
951 | return 0; | |
952 | for (idx = 0, i = 0, vm1 = NULL; i < npages; i += 2) { | |
953 | /* get a new v0 */ | |
954 | v0 = page_address(pages[i]); | |
84f4a40d | 955 | trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 0, v0); |
838b6fd2 KW |
956 | v1 = i + 1 < npages ? |
957 | page_address(pages[i + 1]) : NULL; | |
84f4a40d | 958 | trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 1, v1); |
838b6fd2 KW |
959 | /* compare i, i + 1 vaddr */ |
960 | if (v1 != (v0 + PAGE_SIZE)) { | |
961 | /* flush out pages */ | |
962 | sets = tid_flush_pages(list, &idx, pagecnt, sets); | |
963 | /* output v0,v1 as two pagesets */ | |
964 | list[sets].idx = idx++; | |
965 | list[sets++].count = 1; | |
966 | if (v1) { | |
967 | list[sets].count = 1; | |
968 | list[sets++].idx = idx++; | |
969 | } else { | |
970 | list[sets++].count = 0; | |
971 | } | |
972 | vm1 = NULL; | |
973 | pagecnt = 0; | |
974 | continue; | |
975 | } | |
976 | /* i,i+1 consecutive, look at i-1,i */ | |
977 | if (vm1 && v0 != (vm1 + PAGE_SIZE)) { | |
978 | /* flush out pages */ | |
979 | sets = tid_flush_pages(list, &idx, pagecnt, sets); | |
980 | pagecnt = 0; | |
981 | } | |
982 | /* pages will always be a multiple of 8k */ | |
983 | pagecnt += 2; | |
984 | /* save i-1 */ | |
985 | vm1 = v1; | |
986 | /* move to next pair */ | |
987 | } | |
988 | /* dump residual pages at end */ | |
989 | sets = tid_flush_pages(list, &idx, npages - idx, sets); | |
990 | /* by design cannot be odd sets */ | |
991 | WARN_ON(sets & 1); | |
992 | return sets; | |
993 | } | |
994 | ||
995 | /** | |
996 | * Find pages for one segment of a sge array represented by @ss. The function | |
997 | * does not check the sge, the sge must have been checked for alignment with a | |
998 | * prior call to hfi1_kern_trdma_ok. Other sge checking is done as part of | |
999 | * rvt_lkey_ok and rvt_rkey_ok. Also, the function only modifies the local sge | |
1000 | * copy maintained in @ss->sge, the original sge is not modified. | |
1001 | * | |
1002 | * Unlike IB RDMA WRITE, we can't decrement ss->num_sge here because we are not | |
1003 | * releasing the MR reference count at the same time. Otherwise, we'll "leak" | |
1004 | * references to the MR. This difference requires that we keep track of progress | |
1005 | * into the sg_list. This is done by the cur_seg cursor in the tid_rdma_request | |
1006 | * structure. | |
1007 | */ | |
1008 | static u32 kern_find_pages(struct tid_rdma_flow *flow, | |
1009 | struct page **pages, | |
1010 | struct rvt_sge_state *ss, bool *last) | |
1011 | { | |
1012 | struct tid_rdma_request *req = flow->req; | |
1013 | struct rvt_sge *sge = &ss->sge; | |
1014 | u32 length = flow->req->seg_len; | |
1015 | u32 len = PAGE_SIZE; | |
1016 | u32 i = 0; | |
1017 | ||
1018 | while (length && req->isge < ss->num_sge) { | |
1019 | pages[i++] = virt_to_page(sge->vaddr); | |
1020 | ||
1021 | sge->vaddr += len; | |
1022 | sge->length -= len; | |
1023 | sge->sge_length -= len; | |
1024 | if (!sge->sge_length) { | |
1025 | if (++req->isge < ss->num_sge) | |
1026 | *sge = ss->sg_list[req->isge - 1]; | |
1027 | } else if (sge->length == 0 && sge->mr->lkey) { | |
1028 | if (++sge->n >= RVT_SEGSZ) { | |
1029 | ++sge->m; | |
1030 | sge->n = 0; | |
1031 | } | |
1032 | sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; | |
1033 | sge->length = sge->mr->map[sge->m]->segs[sge->n].length; | |
1034 | } | |
1035 | length -= len; | |
1036 | } | |
1037 | ||
1038 | flow->length = flow->req->seg_len - length; | |
1039 | *last = req->isge == ss->num_sge ? false : true; | |
1040 | return i; | |
1041 | } | |
1042 | ||
1043 | static void dma_unmap_flow(struct tid_rdma_flow *flow) | |
1044 | { | |
1045 | struct hfi1_devdata *dd; | |
1046 | int i; | |
1047 | struct tid_rdma_pageset *pset; | |
1048 | ||
1049 | dd = flow->req->rcd->dd; | |
1050 | for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets; | |
1051 | i++, pset++) { | |
1052 | if (pset->count && pset->addr) { | |
1053 | dma_unmap_page(&dd->pcidev->dev, | |
1054 | pset->addr, | |
1055 | PAGE_SIZE * pset->count, | |
1056 | DMA_FROM_DEVICE); | |
1057 | pset->mapped = 0; | |
1058 | } | |
1059 | } | |
1060 | } | |
1061 | ||
1062 | static int dma_map_flow(struct tid_rdma_flow *flow, struct page **pages) | |
1063 | { | |
1064 | int i; | |
1065 | struct hfi1_devdata *dd = flow->req->rcd->dd; | |
1066 | struct tid_rdma_pageset *pset; | |
1067 | ||
1068 | for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets; | |
1069 | i++, pset++) { | |
1070 | if (pset->count) { | |
1071 | pset->addr = dma_map_page(&dd->pcidev->dev, | |
1072 | pages[pset->idx], | |
1073 | 0, | |
1074 | PAGE_SIZE * pset->count, | |
1075 | DMA_FROM_DEVICE); | |
1076 | ||
1077 | if (dma_mapping_error(&dd->pcidev->dev, pset->addr)) { | |
1078 | dma_unmap_flow(flow); | |
1079 | return -ENOMEM; | |
1080 | } | |
1081 | pset->mapped = 1; | |
1082 | } | |
1083 | } | |
1084 | return 0; | |
1085 | } | |
1086 | ||
1087 | static inline bool dma_mapped(struct tid_rdma_flow *flow) | |
1088 | { | |
1089 | return !!flow->pagesets[0].mapped; | |
1090 | } | |
1091 | ||
1092 | /* | |
1093 | * Get pages pointers and identify contiguous physical memory chunks for a | |
1094 | * segment. All segments are of length flow->req->seg_len. | |
1095 | */ | |
1096 | static int kern_get_phys_blocks(struct tid_rdma_flow *flow, | |
1097 | struct page **pages, | |
1098 | struct rvt_sge_state *ss, bool *last) | |
1099 | { | |
1100 | u8 npages; | |
1101 | ||
1102 | /* Reuse previously computed pagesets, if any */ | |
1103 | if (flow->npagesets) { | |
84f4a40d KW |
1104 | trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head, |
1105 | flow); | |
838b6fd2 KW |
1106 | if (!dma_mapped(flow)) |
1107 | return dma_map_flow(flow, pages); | |
1108 | return 0; | |
1109 | } | |
1110 | ||
1111 | npages = kern_find_pages(flow, pages, ss, last); | |
1112 | ||
1113 | if (flow->req->qp->pmtu == enum_to_mtu(OPA_MTU_4096)) | |
1114 | flow->npagesets = | |
1115 | tid_rdma_find_phys_blocks_4k(flow, pages, npages, | |
1116 | flow->pagesets); | |
1117 | else | |
1118 | flow->npagesets = | |
1119 | tid_rdma_find_phys_blocks_8k(flow, pages, npages, | |
1120 | flow->pagesets); | |
1121 | ||
1122 | return dma_map_flow(flow, pages); | |
1123 | } | |
1124 | ||
1125 | static inline void kern_add_tid_node(struct tid_rdma_flow *flow, | |
1126 | struct hfi1_ctxtdata *rcd, char *s, | |
1127 | struct tid_group *grp, u8 cnt) | |
1128 | { | |
1129 | struct kern_tid_node *node = &flow->tnode[flow->tnode_cnt++]; | |
1130 | ||
1131 | WARN_ON_ONCE(flow->tnode_cnt >= | |
1132 | (TID_RDMA_MAX_SEGMENT_SIZE >> PAGE_SHIFT)); | |
1133 | if (WARN_ON_ONCE(cnt & 1)) | |
1134 | dd_dev_err(rcd->dd, | |
1135 | "unexpected odd allocation cnt %u map 0x%x used %u", | |
1136 | cnt, grp->map, grp->used); | |
1137 | ||
1138 | node->grp = grp; | |
1139 | node->map = grp->map; | |
1140 | node->cnt = cnt; | |
84f4a40d KW |
1141 | trace_hfi1_tid_node_add(flow->req->qp, s, flow->tnode_cnt - 1, |
1142 | grp->base, grp->map, grp->used, cnt); | |
838b6fd2 KW |
1143 | } |
1144 | ||
1145 | /* | |
1146 | * Try to allocate pageset_count TID's from TID groups for a context | |
1147 | * | |
1148 | * This function allocates TID's without moving groups between lists or | |
1149 | * modifying grp->map. This is done as follows, being cogizant of the lists | |
1150 | * between which the TID groups will move: | |
1151 | * 1. First allocate complete groups of 8 TID's since this is more efficient, | |
1152 | * these groups will move from group->full without affecting used | |
1153 | * 2. If more TID's are needed allocate from used (will move from used->full or | |
1154 | * stay in used) | |
1155 | * 3. If we still don't have the required number of TID's go back and look again | |
1156 | * at a complete group (will move from group->used) | |
1157 | */ | |
1158 | static int kern_alloc_tids(struct tid_rdma_flow *flow) | |
1159 | { | |
1160 | struct hfi1_ctxtdata *rcd = flow->req->rcd; | |
1161 | struct hfi1_devdata *dd = rcd->dd; | |
1162 | u32 ngroups, pageidx = 0; | |
1163 | struct tid_group *group = NULL, *used; | |
1164 | u8 use; | |
1165 | ||
1166 | flow->tnode_cnt = 0; | |
1167 | ngroups = flow->npagesets / dd->rcv_entries.group_size; | |
1168 | if (!ngroups) | |
1169 | goto used_list; | |
1170 | ||
1171 | /* First look at complete groups */ | |
1172 | list_for_each_entry(group, &rcd->tid_group_list.list, list) { | |
1173 | kern_add_tid_node(flow, rcd, "complete groups", group, | |
1174 | group->size); | |
1175 | ||
1176 | pageidx += group->size; | |
1177 | if (!--ngroups) | |
1178 | break; | |
1179 | } | |
1180 | ||
1181 | if (pageidx >= flow->npagesets) | |
1182 | goto ok; | |
1183 | ||
1184 | used_list: | |
1185 | /* Now look at partially used groups */ | |
1186 | list_for_each_entry(used, &rcd->tid_used_list.list, list) { | |
1187 | use = min_t(u32, flow->npagesets - pageidx, | |
1188 | used->size - used->used); | |
1189 | kern_add_tid_node(flow, rcd, "used groups", used, use); | |
1190 | ||
1191 | pageidx += use; | |
1192 | if (pageidx >= flow->npagesets) | |
1193 | goto ok; | |
1194 | } | |
1195 | ||
1196 | /* | |
1197 | * Look again at a complete group, continuing from where we left. | |
1198 | * However, if we are at the head, we have reached the end of the | |
1199 | * complete groups list from the first loop above | |
1200 | */ | |
1201 | if (group && &group->list == &rcd->tid_group_list.list) | |
1202 | goto bail_eagain; | |
1203 | group = list_prepare_entry(group, &rcd->tid_group_list.list, | |
1204 | list); | |
1205 | if (list_is_last(&group->list, &rcd->tid_group_list.list)) | |
1206 | goto bail_eagain; | |
1207 | group = list_next_entry(group, list); | |
1208 | use = min_t(u32, flow->npagesets - pageidx, group->size); | |
1209 | kern_add_tid_node(flow, rcd, "complete continue", group, use); | |
1210 | pageidx += use; | |
1211 | if (pageidx >= flow->npagesets) | |
1212 | goto ok; | |
1213 | bail_eagain: | |
84f4a40d KW |
1214 | trace_hfi1_msg_alloc_tids(flow->req->qp, " insufficient tids: needed ", |
1215 | (u64)flow->npagesets); | |
838b6fd2 KW |
1216 | return -EAGAIN; |
1217 | ok: | |
1218 | return 0; | |
1219 | } | |
1220 | ||
1221 | static void kern_program_rcv_group(struct tid_rdma_flow *flow, int grp_num, | |
1222 | u32 *pset_idx) | |
1223 | { | |
1224 | struct hfi1_ctxtdata *rcd = flow->req->rcd; | |
1225 | struct hfi1_devdata *dd = rcd->dd; | |
1226 | struct kern_tid_node *node = &flow->tnode[grp_num]; | |
1227 | struct tid_group *grp = node->grp; | |
1228 | struct tid_rdma_pageset *pset; | |
1229 | u32 pmtu_pg = flow->req->qp->pmtu >> PAGE_SHIFT; | |
1230 | u32 rcventry, npages = 0, pair = 0, tidctrl; | |
1231 | u8 i, cnt = 0; | |
1232 | ||
1233 | for (i = 0; i < grp->size; i++) { | |
1234 | rcventry = grp->base + i; | |
1235 | ||
1236 | if (node->map & BIT(i) || cnt >= node->cnt) { | |
1237 | rcv_array_wc_fill(dd, rcventry); | |
1238 | continue; | |
1239 | } | |
1240 | pset = &flow->pagesets[(*pset_idx)++]; | |
1241 | if (pset->count) { | |
1242 | hfi1_put_tid(dd, rcventry, PT_EXPECTED, | |
1243 | pset->addr, trdma_pset_order(pset)); | |
1244 | } else { | |
1245 | hfi1_put_tid(dd, rcventry, PT_INVALID, 0, 0); | |
1246 | } | |
1247 | npages += pset->count; | |
1248 | ||
1249 | rcventry -= rcd->expected_base; | |
1250 | tidctrl = pair ? 0x3 : rcventry & 0x1 ? 0x2 : 0x1; | |
1251 | /* | |
1252 | * A single TID entry will be used to use a rcvarr pair (with | |
1253 | * tidctrl 0x3), if ALL these are true (a) the bit pos is even | |
1254 | * (b) the group map shows current and the next bits as free | |
1255 | * indicating two consecutive rcvarry entries are available (c) | |
1256 | * we actually need 2 more entries | |
1257 | */ | |
1258 | pair = !(i & 0x1) && !((node->map >> i) & 0x3) && | |
1259 | node->cnt >= cnt + 2; | |
1260 | if (!pair) { | |
1261 | if (!pset->count) | |
1262 | tidctrl = 0x1; | |
1263 | flow->tid_entry[flow->tidcnt++] = | |
1264 | EXP_TID_SET(IDX, rcventry >> 1) | | |
1265 | EXP_TID_SET(CTRL, tidctrl) | | |
1266 | EXP_TID_SET(LEN, npages); | |
84f4a40d KW |
1267 | trace_hfi1_tid_entry_alloc(/* entry */ |
1268 | flow->req->qp, flow->tidcnt - 1, | |
1269 | flow->tid_entry[flow->tidcnt - 1]); | |
1270 | ||
838b6fd2 KW |
1271 | /* Efficient DIV_ROUND_UP(npages, pmtu_pg) */ |
1272 | flow->npkts += (npages + pmtu_pg - 1) >> ilog2(pmtu_pg); | |
1273 | npages = 0; | |
1274 | } | |
1275 | ||
1276 | if (grp->used == grp->size - 1) | |
1277 | tid_group_move(grp, &rcd->tid_used_list, | |
1278 | &rcd->tid_full_list); | |
1279 | else if (!grp->used) | |
1280 | tid_group_move(grp, &rcd->tid_group_list, | |
1281 | &rcd->tid_used_list); | |
1282 | ||
1283 | grp->used++; | |
1284 | grp->map |= BIT(i); | |
1285 | cnt++; | |
1286 | } | |
1287 | } | |
1288 | ||
1289 | static void kern_unprogram_rcv_group(struct tid_rdma_flow *flow, int grp_num) | |
1290 | { | |
1291 | struct hfi1_ctxtdata *rcd = flow->req->rcd; | |
1292 | struct hfi1_devdata *dd = rcd->dd; | |
1293 | struct kern_tid_node *node = &flow->tnode[grp_num]; | |
1294 | struct tid_group *grp = node->grp; | |
1295 | u32 rcventry; | |
1296 | u8 i, cnt = 0; | |
1297 | ||
1298 | for (i = 0; i < grp->size; i++) { | |
1299 | rcventry = grp->base + i; | |
1300 | ||
1301 | if (node->map & BIT(i) || cnt >= node->cnt) { | |
1302 | rcv_array_wc_fill(dd, rcventry); | |
1303 | continue; | |
1304 | } | |
1305 | ||
1306 | hfi1_put_tid(dd, rcventry, PT_INVALID, 0, 0); | |
1307 | ||
1308 | grp->used--; | |
1309 | grp->map &= ~BIT(i); | |
1310 | cnt++; | |
1311 | ||
1312 | if (grp->used == grp->size - 1) | |
1313 | tid_group_move(grp, &rcd->tid_full_list, | |
1314 | &rcd->tid_used_list); | |
1315 | else if (!grp->used) | |
1316 | tid_group_move(grp, &rcd->tid_used_list, | |
1317 | &rcd->tid_group_list); | |
1318 | } | |
1319 | if (WARN_ON_ONCE(cnt & 1)) { | |
1320 | struct hfi1_ctxtdata *rcd = flow->req->rcd; | |
1321 | struct hfi1_devdata *dd = rcd->dd; | |
1322 | ||
1323 | dd_dev_err(dd, "unexpected odd free cnt %u map 0x%x used %u", | |
1324 | cnt, grp->map, grp->used); | |
1325 | } | |
1326 | } | |
1327 | ||
1328 | static void kern_program_rcvarray(struct tid_rdma_flow *flow) | |
1329 | { | |
1330 | u32 pset_idx = 0; | |
1331 | int i; | |
1332 | ||
1333 | flow->npkts = 0; | |
1334 | flow->tidcnt = 0; | |
1335 | for (i = 0; i < flow->tnode_cnt; i++) | |
1336 | kern_program_rcv_group(flow, i, &pset_idx); | |
84f4a40d | 1337 | trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head, flow); |
838b6fd2 KW |
1338 | } |
1339 | ||
1340 | /** | |
1341 | * hfi1_kern_exp_rcv_setup() - setup TID's and flow for one segment of a | |
1342 | * TID RDMA request | |
1343 | * | |
1344 | * @req: TID RDMA request for which the segment/flow is being set up | |
1345 | * @ss: sge state, maintains state across successive segments of a sge | |
1346 | * @last: set to true after the last sge segment has been processed | |
1347 | * | |
1348 | * This function | |
1349 | * (1) finds a free flow entry in the flow circular buffer | |
1350 | * (2) finds pages and continuous physical chunks constituing one segment | |
1351 | * of an sge | |
1352 | * (3) allocates TID group entries for those chunks | |
1353 | * (4) programs rcvarray entries in the hardware corresponding to those | |
1354 | * TID's | |
1355 | * (5) computes a tidarray with formatted TID entries which can be sent | |
1356 | * to the sender | |
1357 | * (6) Reserves and programs HW flows. | |
1358 | * (7) It also manages queing the QP when TID/flow resources are not | |
1359 | * available. | |
1360 | * | |
1361 | * @req points to struct tid_rdma_request of which the segments are a part. The | |
1362 | * function uses qp, rcd and seg_len members of @req. In the absence of errors, | |
1363 | * req->flow_idx is the index of the flow which has been prepared in this | |
1364 | * invocation of function call. With flow = &req->flows[req->flow_idx], | |
1365 | * flow->tid_entry contains the TID array which the sender can use for TID RDMA | |
1366 | * sends and flow->npkts contains number of packets required to send the | |
1367 | * segment. | |
1368 | * | |
1369 | * hfi1_check_sge_align should be called prior to calling this function and if | |
1370 | * it signals error TID RDMA cannot be used for this sge and this function | |
1371 | * should not be called. | |
1372 | * | |
1373 | * For the queuing, caller must hold the flow->req->qp s_lock from the send | |
1374 | * engine and the function will procure the exp_lock. | |
1375 | * | |
1376 | * Return: | |
1377 | * The function returns -EAGAIN if sufficient number of TID/flow resources to | |
1378 | * map the segment could not be allocated. In this case the function should be | |
1379 | * called again with previous arguments to retry the TID allocation. There are | |
1380 | * no other error returns. The function returns 0 on success. | |
1381 | */ | |
1382 | int hfi1_kern_exp_rcv_setup(struct tid_rdma_request *req, | |
1383 | struct rvt_sge_state *ss, bool *last) | |
1384 | __must_hold(&req->qp->s_lock) | |
1385 | { | |
1386 | struct tid_rdma_flow *flow = &req->flows[req->setup_head]; | |
1387 | struct hfi1_ctxtdata *rcd = req->rcd; | |
1388 | struct hfi1_qp_priv *qpriv = req->qp->priv; | |
1389 | unsigned long flags; | |
1390 | struct rvt_qp *fqp; | |
1391 | u16 clear_tail = req->clear_tail; | |
1392 | ||
1393 | lockdep_assert_held(&req->qp->s_lock); | |
1394 | /* | |
1395 | * We return error if either (a) we don't have space in the flow | |
1396 | * circular buffer, or (b) we already have max entries in the buffer. | |
1397 | * Max entries depend on the type of request we are processing and the | |
1398 | * negotiated TID RDMA parameters. | |
1399 | */ | |
1400 | if (!CIRC_SPACE(req->setup_head, clear_tail, MAX_FLOWS) || | |
1401 | CIRC_CNT(req->setup_head, clear_tail, MAX_FLOWS) >= | |
1402 | req->n_flows) | |
1403 | return -EINVAL; | |
1404 | ||
1405 | /* | |
1406 | * Get pages, identify contiguous physical memory chunks for the segment | |
1407 | * If we can not determine a DMA address mapping we will treat it just | |
1408 | * like if we ran out of space above. | |
1409 | */ | |
1410 | if (kern_get_phys_blocks(flow, qpriv->pages, ss, last)) { | |
1411 | hfi1_wait_kmem(flow->req->qp); | |
1412 | return -ENOMEM; | |
1413 | } | |
1414 | ||
1415 | spin_lock_irqsave(&rcd->exp_lock, flags); | |
1416 | if (kernel_tid_waiters(rcd, &rcd->rarr_queue, flow->req->qp)) | |
1417 | goto queue; | |
1418 | ||
1419 | /* | |
1420 | * At this point we know the number of pagesets and hence the number of | |
1421 | * TID's to map the segment. Allocate the TID's from the TID groups. If | |
1422 | * we cannot allocate the required number we exit and try again later | |
1423 | */ | |
1424 | if (kern_alloc_tids(flow)) | |
1425 | goto queue; | |
1426 | /* | |
1427 | * Finally program the TID entries with the pagesets, compute the | |
1428 | * tidarray and enable the HW flow | |
1429 | */ | |
1430 | kern_program_rcvarray(flow); | |
1431 | ||
1432 | /* | |
1433 | * Setup the flow state with relevant information. | |
1434 | * This information is used for tracking the sequence of data packets | |
1435 | * for the segment. | |
1436 | * The flow is setup here as this is the most accurate time and place | |
1437 | * to do so. Doing at a later time runs the risk of the flow data in | |
1438 | * qpriv getting out of sync. | |
1439 | */ | |
1440 | memset(&flow->flow_state, 0x0, sizeof(flow->flow_state)); | |
1441 | flow->idx = qpriv->flow_state.index; | |
1442 | flow->flow_state.generation = qpriv->flow_state.generation; | |
1443 | flow->flow_state.spsn = qpriv->flow_state.psn; | |
1444 | flow->flow_state.lpsn = flow->flow_state.spsn + flow->npkts - 1; | |
1445 | flow->flow_state.r_next_psn = | |
1446 | full_flow_psn(flow, flow->flow_state.spsn); | |
1447 | qpriv->flow_state.psn += flow->npkts; | |
1448 | ||
1449 | dequeue_tid_waiter(rcd, &rcd->rarr_queue, flow->req->qp); | |
1450 | /* get head before dropping lock */ | |
1451 | fqp = first_qp(rcd, &rcd->rarr_queue); | |
1452 | spin_unlock_irqrestore(&rcd->exp_lock, flags); | |
1453 | tid_rdma_schedule_tid_wakeup(fqp); | |
1454 | ||
1455 | req->setup_head = (req->setup_head + 1) & (MAX_FLOWS - 1); | |
1456 | return 0; | |
1457 | queue: | |
1458 | queue_qp_for_tid_wait(rcd, &rcd->rarr_queue, flow->req->qp); | |
1459 | spin_unlock_irqrestore(&rcd->exp_lock, flags); | |
1460 | return -EAGAIN; | |
1461 | } | |
1462 | ||
1463 | static void hfi1_tid_rdma_reset_flow(struct tid_rdma_flow *flow) | |
1464 | { | |
1465 | flow->npagesets = 0; | |
1466 | } | |
1467 | ||
1468 | /* | |
1469 | * This function is called after one segment has been successfully sent to | |
1470 | * release the flow and TID HW/SW resources for that segment. The segments for a | |
1471 | * TID RDMA request are setup and cleared in FIFO order which is managed using a | |
1472 | * circular buffer. | |
1473 | */ | |
1474 | int hfi1_kern_exp_rcv_clear(struct tid_rdma_request *req) | |
1475 | __must_hold(&req->qp->s_lock) | |
1476 | { | |
1477 | struct tid_rdma_flow *flow = &req->flows[req->clear_tail]; | |
1478 | struct hfi1_ctxtdata *rcd = req->rcd; | |
1479 | unsigned long flags; | |
1480 | int i; | |
1481 | struct rvt_qp *fqp; | |
1482 | ||
1483 | lockdep_assert_held(&req->qp->s_lock); | |
1484 | /* Exit if we have nothing in the flow circular buffer */ | |
1485 | if (!CIRC_CNT(req->setup_head, req->clear_tail, MAX_FLOWS)) | |
1486 | return -EINVAL; | |
1487 | ||
1488 | spin_lock_irqsave(&rcd->exp_lock, flags); | |
1489 | ||
1490 | for (i = 0; i < flow->tnode_cnt; i++) | |
1491 | kern_unprogram_rcv_group(flow, i); | |
1492 | /* To prevent double unprogramming */ | |
1493 | flow->tnode_cnt = 0; | |
1494 | /* get head before dropping lock */ | |
1495 | fqp = first_qp(rcd, &rcd->rarr_queue); | |
1496 | spin_unlock_irqrestore(&rcd->exp_lock, flags); | |
1497 | ||
1498 | dma_unmap_flow(flow); | |
1499 | ||
1500 | hfi1_tid_rdma_reset_flow(flow); | |
1501 | req->clear_tail = (req->clear_tail + 1) & (MAX_FLOWS - 1); | |
1502 | ||
1503 | if (fqp == req->qp) { | |
1504 | __trigger_tid_waiter(fqp); | |
1505 | rvt_put_qp(fqp); | |
1506 | } else { | |
1507 | tid_rdma_schedule_tid_wakeup(fqp); | |
1508 | } | |
1509 | ||
1510 | return 0; | |
1511 | } | |
1512 | ||
1513 | /* | |
1514 | * This function is called to release all the tid entries for | |
1515 | * a request. | |
1516 | */ | |
1517 | void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req) | |
1518 | __must_hold(&req->qp->s_lock) | |
1519 | { | |
1520 | /* Use memory barrier for proper ordering */ | |
1521 | while (CIRC_CNT(req->setup_head, req->clear_tail, MAX_FLOWS)) { | |
1522 | if (hfi1_kern_exp_rcv_clear(req)) | |
1523 | break; | |
1524 | } | |
1525 | } | |
1526 | ||
1527 | /** | |
1528 | * hfi1_kern_exp_rcv_free_flows - free priviously allocated flow information | |
1529 | * @req - the tid rdma request to be cleaned | |
1530 | */ | |
1531 | static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req) | |
1532 | { | |
1533 | kfree(req->flows); | |
1534 | req->flows = NULL; | |
1535 | } | |
1536 | ||
1537 | /** | |
1538 | * __trdma_clean_swqe - clean up for large sized QPs | |
1539 | * @qp: the queue patch | |
1540 | * @wqe: the send wqe | |
1541 | */ | |
1542 | void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) | |
1543 | { | |
1544 | struct hfi1_swqe_priv *p = wqe->priv; | |
1545 | ||
1546 | hfi1_kern_exp_rcv_free_flows(&p->tid_req); | |
1547 | } | |
1548 | ||
1549 | /* | |
1550 | * This can be called at QP create time or in the data path. | |
1551 | */ | |
1552 | static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req, | |
1553 | gfp_t gfp) | |
1554 | { | |
1555 | struct tid_rdma_flow *flows; | |
1556 | int i; | |
1557 | ||
1558 | if (likely(req->flows)) | |
1559 | return 0; | |
1560 | flows = kmalloc_node(MAX_FLOWS * sizeof(*flows), gfp, | |
1561 | req->rcd->numa_id); | |
1562 | if (!flows) | |
1563 | return -ENOMEM; | |
1564 | /* mini init */ | |
1565 | for (i = 0; i < MAX_FLOWS; i++) { | |
1566 | flows[i].req = req; | |
1567 | flows[i].npagesets = 0; | |
1568 | flows[i].pagesets[0].mapped = 0; | |
1569 | } | |
1570 | req->flows = flows; | |
1571 | return 0; | |
1572 | } | |
1573 | ||
1574 | static void hfi1_init_trdma_req(struct rvt_qp *qp, | |
1575 | struct tid_rdma_request *req) | |
1576 | { | |
1577 | struct hfi1_qp_priv *qpriv = qp->priv; | |
1578 | ||
1579 | /* | |
1580 | * Initialize various TID RDMA request variables. | |
1581 | * These variables are "static", which is why they | |
1582 | * can be pre-initialized here before the WRs has | |
1583 | * even been submitted. | |
1584 | * However, non-NULL values for these variables do not | |
1585 | * imply that this WQE has been enabled for TID RDMA. | |
1586 | * Drivers should check the WQE's opcode to determine | |
1587 | * if a request is a TID RDMA one or not. | |
1588 | */ | |
1589 | req->qp = qp; | |
1590 | req->rcd = qpriv->rcd; | |
1591 | } | |
2f16a696 KW |
1592 | |
1593 | u64 hfi1_access_sw_tid_wait(const struct cntr_entry *entry, | |
1594 | void *context, int vl, int mode, u64 data) | |
1595 | { | |
1596 | struct hfi1_devdata *dd = context; | |
1597 | ||
1598 | return dd->verbs_dev.n_tidwait; | |
1599 | } |