]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright(c) 2015, 2016 Intel Corporation. | |
3 | * | |
4 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
5 | * redistributing this file, you may do so under either license. | |
6 | * | |
7 | * GPL LICENSE SUMMARY | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of version 2 of the GNU General Public License as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * General Public License for more details. | |
17 | * | |
18 | * BSD LICENSE | |
19 | * | |
20 | * Redistribution and use in source and binary forms, with or without | |
21 | * modification, are permitted provided that the following conditions | |
22 | * are met: | |
23 | * | |
24 | * - Redistributions of source code must retain the above copyright | |
25 | * notice, this list of conditions and the following disclaimer. | |
26 | * - Redistributions in binary form must reproduce the above copyright | |
27 | * notice, this list of conditions and the following disclaimer in | |
28 | * the documentation and/or other materials provided with the | |
29 | * distribution. | |
30 | * - Neither the name of Intel Corporation nor the names of its | |
31 | * contributors may be used to endorse or promote products derived | |
32 | * from this software without specific prior written permission. | |
33 | * | |
34 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
35 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
36 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
37 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
38 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
39 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
40 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
41 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
42 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
43 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
44 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
45 | * | |
46 | */ | |
47 | ||
48 | #include <linux/err.h> | |
49 | #include <linux/vmalloc.h> | |
50 | #include <linux/hash.h> | |
51 | #include <linux/module.h> | |
52 | #include <linux/seq_file.h> | |
53 | #include <rdma/rdma_vt.h> | |
54 | #include <rdma/rdmavt_qp.h> | |
55 | #include <rdma/ib_verbs.h> | |
56 | ||
57 | #include "hfi.h" | |
58 | #include "qp.h" | |
59 | #include "trace.h" | |
60 | #include "verbs_txreq.h" | |
61 | ||
62 | unsigned int hfi1_qp_table_size = 256; | |
63 | module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO); | |
64 | MODULE_PARM_DESC(qp_table_size, "QP table size"); | |
65 | ||
66 | static void flush_tx_list(struct rvt_qp *qp); | |
67 | static int iowait_sleep( | |
68 | struct sdma_engine *sde, | |
69 | struct iowait *wait, | |
70 | struct sdma_txreq *stx, | |
71 | unsigned seq); | |
72 | static void iowait_wakeup(struct iowait *wait, int reason); | |
73 | static void iowait_sdma_drained(struct iowait *wait); | |
74 | static void qp_pio_drain(struct rvt_qp *qp); | |
75 | ||
76 | const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = { | |
77 | [IB_WR_RDMA_WRITE] = { | |
78 | .length = sizeof(struct ib_rdma_wr), | |
79 | .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), | |
80 | }, | |
81 | ||
82 | [IB_WR_RDMA_READ] = { | |
83 | .length = sizeof(struct ib_rdma_wr), | |
84 | .qpt_support = BIT(IB_QPT_RC), | |
85 | .flags = RVT_OPERATION_ATOMIC, | |
86 | }, | |
87 | ||
88 | [IB_WR_ATOMIC_CMP_AND_SWP] = { | |
89 | .length = sizeof(struct ib_atomic_wr), | |
90 | .qpt_support = BIT(IB_QPT_RC), | |
91 | .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE, | |
92 | }, | |
93 | ||
94 | [IB_WR_ATOMIC_FETCH_AND_ADD] = { | |
95 | .length = sizeof(struct ib_atomic_wr), | |
96 | .qpt_support = BIT(IB_QPT_RC), | |
97 | .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE, | |
98 | }, | |
99 | ||
100 | [IB_WR_RDMA_WRITE_WITH_IMM] = { | |
101 | .length = sizeof(struct ib_rdma_wr), | |
102 | .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), | |
103 | }, | |
104 | ||
105 | [IB_WR_SEND] = { | |
106 | .length = sizeof(struct ib_send_wr), | |
107 | .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) | | |
108 | BIT(IB_QPT_UC) | BIT(IB_QPT_RC), | |
109 | }, | |
110 | ||
111 | [IB_WR_SEND_WITH_IMM] = { | |
112 | .length = sizeof(struct ib_send_wr), | |
113 | .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) | | |
114 | BIT(IB_QPT_UC) | BIT(IB_QPT_RC), | |
115 | }, | |
116 | ||
117 | [IB_WR_REG_MR] = { | |
118 | .length = sizeof(struct ib_reg_wr), | |
119 | .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), | |
120 | .flags = RVT_OPERATION_LOCAL, | |
121 | }, | |
122 | ||
123 | [IB_WR_LOCAL_INV] = { | |
124 | .length = sizeof(struct ib_send_wr), | |
125 | .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), | |
126 | .flags = RVT_OPERATION_LOCAL, | |
127 | }, | |
128 | ||
129 | [IB_WR_SEND_WITH_INV] = { | |
130 | .length = sizeof(struct ib_send_wr), | |
131 | .qpt_support = BIT(IB_QPT_RC), | |
132 | }, | |
133 | ||
134 | }; | |
135 | ||
136 | static void flush_tx_list(struct rvt_qp *qp) | |
137 | { | |
138 | struct hfi1_qp_priv *priv = qp->priv; | |
139 | ||
140 | while (!list_empty(&priv->s_iowait.tx_head)) { | |
141 | struct sdma_txreq *tx; | |
142 | ||
143 | tx = list_first_entry( | |
144 | &priv->s_iowait.tx_head, | |
145 | struct sdma_txreq, | |
146 | list); | |
147 | list_del_init(&tx->list); | |
148 | hfi1_put_txreq( | |
149 | container_of(tx, struct verbs_txreq, txreq)); | |
150 | } | |
151 | } | |
152 | ||
153 | static void flush_iowait(struct rvt_qp *qp) | |
154 | { | |
155 | struct hfi1_qp_priv *priv = qp->priv; | |
156 | unsigned long flags; | |
157 | seqlock_t *lock = priv->s_iowait.lock; | |
158 | ||
159 | if (!lock) | |
160 | return; | |
161 | write_seqlock_irqsave(lock, flags); | |
162 | if (!list_empty(&priv->s_iowait.list)) { | |
163 | list_del_init(&priv->s_iowait.list); | |
164 | priv->s_iowait.lock = NULL; | |
165 | rvt_put_qp(qp); | |
166 | } | |
167 | write_sequnlock_irqrestore(lock, flags); | |
168 | } | |
169 | ||
170 | static inline int opa_mtu_enum_to_int(int mtu) | |
171 | { | |
172 | switch (mtu) { | |
173 | case OPA_MTU_8192: return 8192; | |
174 | case OPA_MTU_10240: return 10240; | |
175 | default: return -1; | |
176 | } | |
177 | } | |
178 | ||
179 | /** | |
180 | * This function is what we would push to the core layer if we wanted to be a | |
181 | * "first class citizen". Instead we hide this here and rely on Verbs ULPs | |
182 | * to blindly pass the MTU enum value from the PathRecord to us. | |
183 | */ | |
184 | static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu) | |
185 | { | |
186 | int val; | |
187 | ||
188 | /* Constraining 10KB packets to 8KB packets */ | |
189 | if (mtu == (enum ib_mtu)OPA_MTU_10240) | |
190 | mtu = OPA_MTU_8192; | |
191 | val = opa_mtu_enum_to_int((int)mtu); | |
192 | if (val > 0) | |
193 | return val; | |
194 | return ib_mtu_enum_to_int(mtu); | |
195 | } | |
196 | ||
197 | int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, | |
198 | int attr_mask, struct ib_udata *udata) | |
199 | { | |
200 | struct ib_qp *ibqp = &qp->ibqp; | |
201 | struct hfi1_ibdev *dev = to_idev(ibqp->device); | |
202 | struct hfi1_devdata *dd = dd_from_dev(dev); | |
203 | u8 sc; | |
204 | ||
205 | if (attr_mask & IB_QP_AV) { | |
206 | sc = ah_to_sc(ibqp->device, &attr->ah_attr); | |
207 | if (sc == 0xf) | |
208 | return -EINVAL; | |
209 | ||
210 | if (!qp_to_sdma_engine(qp, sc) && | |
211 | dd->flags & HFI1_HAS_SEND_DMA) | |
212 | return -EINVAL; | |
213 | ||
214 | if (!qp_to_send_context(qp, sc)) | |
215 | return -EINVAL; | |
216 | } | |
217 | ||
218 | if (attr_mask & IB_QP_ALT_PATH) { | |
219 | sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr); | |
220 | if (sc == 0xf) | |
221 | return -EINVAL; | |
222 | ||
223 | if (!qp_to_sdma_engine(qp, sc) && | |
224 | dd->flags & HFI1_HAS_SEND_DMA) | |
225 | return -EINVAL; | |
226 | ||
227 | if (!qp_to_send_context(qp, sc)) | |
228 | return -EINVAL; | |
229 | } | |
230 | ||
231 | return 0; | |
232 | } | |
233 | ||
234 | void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, | |
235 | int attr_mask, struct ib_udata *udata) | |
236 | { | |
237 | struct ib_qp *ibqp = &qp->ibqp; | |
238 | struct hfi1_qp_priv *priv = qp->priv; | |
239 | ||
240 | if (attr_mask & IB_QP_AV) { | |
241 | priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); | |
242 | priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); | |
243 | priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc); | |
244 | } | |
245 | ||
246 | if (attr_mask & IB_QP_PATH_MIG_STATE && | |
247 | attr->path_mig_state == IB_MIG_MIGRATED && | |
248 | qp->s_mig_state == IB_MIG_ARMED) { | |
249 | qp->s_flags |= RVT_S_AHG_CLEAR; | |
250 | priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); | |
251 | priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); | |
252 | priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc); | |
253 | } | |
254 | } | |
255 | ||
256 | /** | |
257 | * hfi1_check_send_wqe - validate wqe | |
258 | * @qp - The qp | |
259 | * @wqe - The built wqe | |
260 | * | |
261 | * validate wqe. This is called | |
262 | * prior to inserting the wqe into | |
263 | * the ring but after the wqe has been | |
264 | * setup. | |
265 | * | |
266 | * Returns 0 on success, -EINVAL on failure | |
267 | * | |
268 | */ | |
269 | int hfi1_check_send_wqe(struct rvt_qp *qp, | |
270 | struct rvt_swqe *wqe) | |
271 | { | |
272 | struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | |
273 | struct rvt_ah *ah; | |
274 | ||
275 | switch (qp->ibqp.qp_type) { | |
276 | case IB_QPT_RC: | |
277 | case IB_QPT_UC: | |
278 | if (wqe->length > 0x80000000U) | |
279 | return -EINVAL; | |
280 | break; | |
281 | case IB_QPT_SMI: | |
282 | ah = ibah_to_rvtah(wqe->ud_wr.ah); | |
283 | if (wqe->length > (1 << ah->log_pmtu)) | |
284 | return -EINVAL; | |
285 | break; | |
286 | case IB_QPT_GSI: | |
287 | case IB_QPT_UD: | |
288 | ah = ibah_to_rvtah(wqe->ud_wr.ah); | |
289 | if (wqe->length > (1 << ah->log_pmtu)) | |
290 | return -EINVAL; | |
291 | if (ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)] == 0xf) | |
292 | return -EINVAL; | |
293 | default: | |
294 | break; | |
295 | } | |
296 | return wqe->length <= piothreshold; | |
297 | } | |
298 | ||
299 | /** | |
300 | * _hfi1_schedule_send - schedule progress | |
301 | * @qp: the QP | |
302 | * | |
303 | * This schedules qp progress w/o regard to the s_flags. | |
304 | * | |
305 | * It is only used in the post send, which doesn't hold | |
306 | * the s_lock. | |
307 | */ | |
308 | void _hfi1_schedule_send(struct rvt_qp *qp) | |
309 | { | |
310 | struct hfi1_qp_priv *priv = qp->priv; | |
311 | struct hfi1_ibport *ibp = | |
312 | to_iport(qp->ibqp.device, qp->port_num); | |
313 | struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); | |
314 | struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); | |
315 | ||
316 | iowait_schedule(&priv->s_iowait, ppd->hfi1_wq, | |
317 | priv->s_sde ? | |
318 | priv->s_sde->cpu : | |
319 | cpumask_first(cpumask_of_node(dd->node))); | |
320 | } | |
321 | ||
322 | static void qp_pio_drain(struct rvt_qp *qp) | |
323 | { | |
324 | struct hfi1_ibdev *dev; | |
325 | struct hfi1_qp_priv *priv = qp->priv; | |
326 | ||
327 | if (!priv->s_sendcontext) | |
328 | return; | |
329 | dev = to_idev(qp->ibqp.device); | |
330 | while (iowait_pio_pending(&priv->s_iowait)) { | |
331 | write_seqlock_irq(&dev->iowait_lock); | |
332 | hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1); | |
333 | write_sequnlock_irq(&dev->iowait_lock); | |
334 | iowait_pio_drain(&priv->s_iowait); | |
335 | write_seqlock_irq(&dev->iowait_lock); | |
336 | hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0); | |
337 | write_sequnlock_irq(&dev->iowait_lock); | |
338 | } | |
339 | } | |
340 | ||
341 | /** | |
342 | * hfi1_schedule_send - schedule progress | |
343 | * @qp: the QP | |
344 | * | |
345 | * This schedules qp progress and caller should hold | |
346 | * the s_lock. | |
347 | */ | |
348 | void hfi1_schedule_send(struct rvt_qp *qp) | |
349 | { | |
350 | lockdep_assert_held(&qp->s_lock); | |
351 | if (hfi1_send_ok(qp)) | |
352 | _hfi1_schedule_send(qp); | |
353 | } | |
354 | ||
355 | void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag) | |
356 | { | |
357 | unsigned long flags; | |
358 | ||
359 | spin_lock_irqsave(&qp->s_lock, flags); | |
360 | if (qp->s_flags & flag) { | |
361 | qp->s_flags &= ~flag; | |
362 | trace_hfi1_qpwakeup(qp, flag); | |
363 | hfi1_schedule_send(qp); | |
364 | } | |
365 | spin_unlock_irqrestore(&qp->s_lock, flags); | |
366 | /* Notify hfi1_destroy_qp() if it is waiting. */ | |
367 | rvt_put_qp(qp); | |
368 | } | |
369 | ||
370 | static int iowait_sleep( | |
371 | struct sdma_engine *sde, | |
372 | struct iowait *wait, | |
373 | struct sdma_txreq *stx, | |
374 | unsigned seq) | |
375 | { | |
376 | struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq); | |
377 | struct rvt_qp *qp; | |
378 | struct hfi1_qp_priv *priv; | |
379 | unsigned long flags; | |
380 | int ret = 0; | |
381 | struct hfi1_ibdev *dev; | |
382 | ||
383 | qp = tx->qp; | |
384 | priv = qp->priv; | |
385 | ||
386 | spin_lock_irqsave(&qp->s_lock, flags); | |
387 | if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { | |
388 | /* | |
389 | * If we couldn't queue the DMA request, save the info | |
390 | * and try again later rather than destroying the | |
391 | * buffer and undoing the side effects of the copy. | |
392 | */ | |
393 | /* Make a common routine? */ | |
394 | dev = &sde->dd->verbs_dev; | |
395 | list_add_tail(&stx->list, &wait->tx_head); | |
396 | write_seqlock(&dev->iowait_lock); | |
397 | if (sdma_progress(sde, seq, stx)) | |
398 | goto eagain; | |
399 | if (list_empty(&priv->s_iowait.list)) { | |
400 | struct hfi1_ibport *ibp = | |
401 | to_iport(qp->ibqp.device, qp->port_num); | |
402 | ||
403 | ibp->rvp.n_dmawait++; | |
404 | qp->s_flags |= RVT_S_WAIT_DMA_DESC; | |
405 | list_add_tail(&priv->s_iowait.list, &sde->dmawait); | |
406 | priv->s_iowait.lock = &dev->iowait_lock; | |
407 | trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC); | |
408 | rvt_get_qp(qp); | |
409 | } | |
410 | write_sequnlock(&dev->iowait_lock); | |
411 | qp->s_flags &= ~RVT_S_BUSY; | |
412 | spin_unlock_irqrestore(&qp->s_lock, flags); | |
413 | ret = -EBUSY; | |
414 | } else { | |
415 | spin_unlock_irqrestore(&qp->s_lock, flags); | |
416 | hfi1_put_txreq(tx); | |
417 | } | |
418 | return ret; | |
419 | eagain: | |
420 | write_sequnlock(&dev->iowait_lock); | |
421 | spin_unlock_irqrestore(&qp->s_lock, flags); | |
422 | list_del_init(&stx->list); | |
423 | return -EAGAIN; | |
424 | } | |
425 | ||
426 | static void iowait_wakeup(struct iowait *wait, int reason) | |
427 | { | |
428 | struct rvt_qp *qp = iowait_to_qp(wait); | |
429 | ||
430 | WARN_ON(reason != SDMA_AVAIL_REASON); | |
431 | hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC); | |
432 | } | |
433 | ||
434 | static void iowait_sdma_drained(struct iowait *wait) | |
435 | { | |
436 | struct rvt_qp *qp = iowait_to_qp(wait); | |
437 | unsigned long flags; | |
438 | ||
439 | /* | |
440 | * This happens when the send engine notes | |
441 | * a QP in the error state and cannot | |
442 | * do the flush work until that QP's | |
443 | * sdma work has finished. | |
444 | */ | |
445 | spin_lock_irqsave(&qp->s_lock, flags); | |
446 | if (qp->s_flags & RVT_S_WAIT_DMA) { | |
447 | qp->s_flags &= ~RVT_S_WAIT_DMA; | |
448 | hfi1_schedule_send(qp); | |
449 | } | |
450 | spin_unlock_irqrestore(&qp->s_lock, flags); | |
451 | } | |
452 | ||
453 | /** | |
454 | * | |
455 | * qp_to_sdma_engine - map a qp to a send engine | |
456 | * @qp: the QP | |
457 | * @sc5: the 5 bit sc | |
458 | * | |
459 | * Return: | |
460 | * A send engine for the qp or NULL for SMI type qp. | |
461 | */ | |
462 | struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5) | |
463 | { | |
464 | struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); | |
465 | struct sdma_engine *sde; | |
466 | ||
467 | if (!(dd->flags & HFI1_HAS_SEND_DMA)) | |
468 | return NULL; | |
469 | switch (qp->ibqp.qp_type) { | |
470 | case IB_QPT_SMI: | |
471 | return NULL; | |
472 | default: | |
473 | break; | |
474 | } | |
475 | sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5); | |
476 | return sde; | |
477 | } | |
478 | ||
479 | /* | |
480 | * qp_to_send_context - map a qp to a send context | |
481 | * @qp: the QP | |
482 | * @sc5: the 5 bit sc | |
483 | * | |
484 | * Return: | |
485 | * A send context for the qp | |
486 | */ | |
487 | struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5) | |
488 | { | |
489 | struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); | |
490 | ||
491 | switch (qp->ibqp.qp_type) { | |
492 | case IB_QPT_SMI: | |
493 | /* SMA packets to VL15 */ | |
494 | return dd->vld[15].sc; | |
495 | default: | |
496 | break; | |
497 | } | |
498 | ||
499 | return pio_select_send_context_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, | |
500 | sc5); | |
501 | } | |
502 | ||
503 | struct qp_iter { | |
504 | struct hfi1_ibdev *dev; | |
505 | struct rvt_qp *qp; | |
506 | int specials; | |
507 | int n; | |
508 | }; | |
509 | ||
510 | struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev) | |
511 | { | |
512 | struct qp_iter *iter; | |
513 | ||
514 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | |
515 | if (!iter) | |
516 | return NULL; | |
517 | ||
518 | iter->dev = dev; | |
519 | iter->specials = dev->rdi.ibdev.phys_port_cnt * 2; | |
520 | ||
521 | return iter; | |
522 | } | |
523 | ||
524 | int qp_iter_next(struct qp_iter *iter) | |
525 | { | |
526 | struct hfi1_ibdev *dev = iter->dev; | |
527 | int n = iter->n; | |
528 | int ret = 1; | |
529 | struct rvt_qp *pqp = iter->qp; | |
530 | struct rvt_qp *qp; | |
531 | ||
532 | /* | |
533 | * The approach is to consider the special qps | |
534 | * as an additional table entries before the | |
535 | * real hash table. Since the qp code sets | |
536 | * the qp->next hash link to NULL, this works just fine. | |
537 | * | |
538 | * iter->specials is 2 * # ports | |
539 | * | |
540 | * n = 0..iter->specials is the special qp indices | |
541 | * | |
542 | * n = iter->specials..dev->rdi.qp_dev->qp_table_size+iter->specials are | |
543 | * the potential hash bucket entries | |
544 | * | |
545 | */ | |
546 | for (; n < dev->rdi.qp_dev->qp_table_size + iter->specials; n++) { | |
547 | if (pqp) { | |
548 | qp = rcu_dereference(pqp->next); | |
549 | } else { | |
550 | if (n < iter->specials) { | |
551 | struct hfi1_pportdata *ppd; | |
552 | struct hfi1_ibport *ibp; | |
553 | int pidx; | |
554 | ||
555 | pidx = n % dev->rdi.ibdev.phys_port_cnt; | |
556 | ppd = &dd_from_dev(dev)->pport[pidx]; | |
557 | ibp = &ppd->ibport_data; | |
558 | ||
559 | if (!(n & 1)) | |
560 | qp = rcu_dereference(ibp->rvp.qp[0]); | |
561 | else | |
562 | qp = rcu_dereference(ibp->rvp.qp[1]); | |
563 | } else { | |
564 | qp = rcu_dereference( | |
565 | dev->rdi.qp_dev->qp_table[ | |
566 | (n - iter->specials)]); | |
567 | } | |
568 | } | |
569 | pqp = qp; | |
570 | if (qp) { | |
571 | iter->qp = qp; | |
572 | iter->n = n; | |
573 | return 0; | |
574 | } | |
575 | } | |
576 | return ret; | |
577 | } | |
578 | ||
579 | static const char * const qp_type_str[] = { | |
580 | "SMI", "GSI", "RC", "UC", "UD", | |
581 | }; | |
582 | ||
583 | static int qp_idle(struct rvt_qp *qp) | |
584 | { | |
585 | return | |
586 | qp->s_last == qp->s_acked && | |
587 | qp->s_acked == qp->s_cur && | |
588 | qp->s_cur == qp->s_tail && | |
589 | qp->s_tail == qp->s_head; | |
590 | } | |
591 | ||
592 | void qp_iter_print(struct seq_file *s, struct qp_iter *iter) | |
593 | { | |
594 | struct rvt_swqe *wqe; | |
595 | struct rvt_qp *qp = iter->qp; | |
596 | struct hfi1_qp_priv *priv = qp->priv; | |
597 | struct sdma_engine *sde; | |
598 | struct send_context *send_context; | |
599 | ||
600 | sde = qp_to_sdma_engine(qp, priv->s_sc); | |
601 | wqe = rvt_get_swqe_ptr(qp, qp->s_last); | |
602 | send_context = qp_to_send_context(qp, priv->s_sc); | |
603 | seq_printf(s, | |
604 | "N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x S(%u %u %u %u %u %u %u) R(%u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d\n", | |
605 | iter->n, | |
606 | qp_idle(qp) ? "I" : "B", | |
607 | qp->ibqp.qp_num, | |
608 | atomic_read(&qp->refcount), | |
609 | qp_type_str[qp->ibqp.qp_type], | |
610 | qp->state, | |
611 | wqe ? wqe->wr.opcode : 0, | |
612 | qp->s_hdrwords, | |
613 | qp->s_flags, | |
614 | iowait_sdma_pending(&priv->s_iowait), | |
615 | iowait_pio_pending(&priv->s_iowait), | |
616 | !list_empty(&priv->s_iowait.list), | |
617 | qp->timeout, | |
618 | wqe ? wqe->ssn : 0, | |
619 | qp->s_lsn, | |
620 | qp->s_last_psn, | |
621 | qp->s_psn, qp->s_next_psn, | |
622 | qp->s_sending_psn, qp->s_sending_hpsn, | |
623 | qp->r_psn, | |
624 | qp->s_last, qp->s_acked, qp->s_cur, | |
625 | qp->s_tail, qp->s_head, qp->s_size, | |
626 | qp->s_avail, | |
627 | /* ack_queue ring pointers, size */ | |
628 | qp->s_tail_ack_queue, qp->r_head_ack_queue, | |
629 | HFI1_MAX_RDMA_ATOMIC, | |
630 | /* remote QP info */ | |
631 | qp->remote_qpn, | |
632 | rdma_ah_get_dlid(&qp->remote_ah_attr), | |
633 | rdma_ah_get_sl(&qp->remote_ah_attr), | |
634 | qp->pmtu, | |
635 | qp->s_retry, | |
636 | qp->s_retry_cnt, | |
637 | qp->s_rnr_retry_cnt, | |
638 | qp->s_rnr_retry, | |
639 | sde, | |
640 | sde ? sde->this_idx : 0, | |
641 | send_context, | |
642 | send_context ? send_context->sw_index : 0, | |
643 | ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->head, | |
644 | ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->tail, | |
645 | qp->pid); | |
646 | } | |
647 | ||
648 | void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp) | |
649 | { | |
650 | struct hfi1_qp_priv *priv; | |
651 | ||
652 | priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, rdi->dparms.node); | |
653 | if (!priv) | |
654 | return ERR_PTR(-ENOMEM); | |
655 | ||
656 | priv->owner = qp; | |
657 | ||
658 | priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), GFP_KERNEL, | |
659 | rdi->dparms.node); | |
660 | if (!priv->s_ahg) { | |
661 | kfree(priv); | |
662 | return ERR_PTR(-ENOMEM); | |
663 | } | |
664 | iowait_init( | |
665 | &priv->s_iowait, | |
666 | 1, | |
667 | _hfi1_do_send, | |
668 | iowait_sleep, | |
669 | iowait_wakeup, | |
670 | iowait_sdma_drained); | |
671 | return priv; | |
672 | } | |
673 | ||
674 | void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp) | |
675 | { | |
676 | struct hfi1_qp_priv *priv = qp->priv; | |
677 | ||
678 | kfree(priv->s_ahg); | |
679 | kfree(priv); | |
680 | } | |
681 | ||
682 | unsigned free_all_qps(struct rvt_dev_info *rdi) | |
683 | { | |
684 | struct hfi1_ibdev *verbs_dev = container_of(rdi, | |
685 | struct hfi1_ibdev, | |
686 | rdi); | |
687 | struct hfi1_devdata *dd = container_of(verbs_dev, | |
688 | struct hfi1_devdata, | |
689 | verbs_dev); | |
690 | int n; | |
691 | unsigned qp_inuse = 0; | |
692 | ||
693 | for (n = 0; n < dd->num_pports; n++) { | |
694 | struct hfi1_ibport *ibp = &dd->pport[n].ibport_data; | |
695 | ||
696 | rcu_read_lock(); | |
697 | if (rcu_dereference(ibp->rvp.qp[0])) | |
698 | qp_inuse++; | |
699 | if (rcu_dereference(ibp->rvp.qp[1])) | |
700 | qp_inuse++; | |
701 | rcu_read_unlock(); | |
702 | } | |
703 | ||
704 | return qp_inuse; | |
705 | } | |
706 | ||
707 | void flush_qp_waiters(struct rvt_qp *qp) | |
708 | { | |
709 | lockdep_assert_held(&qp->s_lock); | |
710 | flush_iowait(qp); | |
711 | } | |
712 | ||
713 | void stop_send_queue(struct rvt_qp *qp) | |
714 | { | |
715 | struct hfi1_qp_priv *priv = qp->priv; | |
716 | ||
717 | cancel_work_sync(&priv->s_iowait.iowork); | |
718 | } | |
719 | ||
720 | void quiesce_qp(struct rvt_qp *qp) | |
721 | { | |
722 | struct hfi1_qp_priv *priv = qp->priv; | |
723 | ||
724 | iowait_sdma_drain(&priv->s_iowait); | |
725 | qp_pio_drain(qp); | |
726 | flush_tx_list(qp); | |
727 | } | |
728 | ||
729 | void notify_qp_reset(struct rvt_qp *qp) | |
730 | { | |
731 | qp->r_adefered = 0; | |
732 | clear_ahg(qp); | |
733 | } | |
734 | ||
735 | /* | |
736 | * Switch to alternate path. | |
737 | * The QP s_lock should be held and interrupts disabled. | |
738 | */ | |
739 | void hfi1_migrate_qp(struct rvt_qp *qp) | |
740 | { | |
741 | struct hfi1_qp_priv *priv = qp->priv; | |
742 | struct ib_event ev; | |
743 | ||
744 | qp->s_mig_state = IB_MIG_MIGRATED; | |
745 | qp->remote_ah_attr = qp->alt_ah_attr; | |
746 | qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr); | |
747 | qp->s_pkey_index = qp->s_alt_pkey_index; | |
748 | qp->s_flags |= RVT_S_AHG_CLEAR; | |
749 | priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr); | |
750 | priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); | |
751 | ||
752 | ev.device = qp->ibqp.device; | |
753 | ev.element.qp = &qp->ibqp; | |
754 | ev.event = IB_EVENT_PATH_MIG; | |
755 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); | |
756 | } | |
757 | ||
758 | int mtu_to_path_mtu(u32 mtu) | |
759 | { | |
760 | return mtu_to_enum(mtu, OPA_MTU_8192); | |
761 | } | |
762 | ||
763 | u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu) | |
764 | { | |
765 | u32 mtu; | |
766 | struct hfi1_ibdev *verbs_dev = container_of(rdi, | |
767 | struct hfi1_ibdev, | |
768 | rdi); | |
769 | struct hfi1_devdata *dd = container_of(verbs_dev, | |
770 | struct hfi1_devdata, | |
771 | verbs_dev); | |
772 | struct hfi1_ibport *ibp; | |
773 | u8 sc, vl; | |
774 | ||
775 | ibp = &dd->pport[qp->port_num - 1].ibport_data; | |
776 | sc = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)]; | |
777 | vl = sc_to_vlt(dd, sc); | |
778 | ||
779 | mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu); | |
780 | if (vl < PER_VL_SEND_CONTEXTS) | |
781 | mtu = min_t(u32, mtu, dd->vld[vl].mtu); | |
782 | return mtu; | |
783 | } | |
784 | ||
785 | int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp, | |
786 | struct ib_qp_attr *attr) | |
787 | { | |
788 | int mtu, pidx = qp->port_num - 1; | |
789 | struct hfi1_ibdev *verbs_dev = container_of(rdi, | |
790 | struct hfi1_ibdev, | |
791 | rdi); | |
792 | struct hfi1_devdata *dd = container_of(verbs_dev, | |
793 | struct hfi1_devdata, | |
794 | verbs_dev); | |
795 | mtu = verbs_mtu_enum_to_int(qp->ibqp.device, attr->path_mtu); | |
796 | if (mtu == -1) | |
797 | return -1; /* values less than 0 are error */ | |
798 | ||
799 | if (mtu > dd->pport[pidx].ibmtu) | |
800 | return mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048); | |
801 | else | |
802 | return attr->path_mtu; | |
803 | } | |
804 | ||
805 | void notify_error_qp(struct rvt_qp *qp) | |
806 | { | |
807 | struct hfi1_qp_priv *priv = qp->priv; | |
808 | seqlock_t *lock = priv->s_iowait.lock; | |
809 | ||
810 | if (lock) { | |
811 | write_seqlock(lock); | |
812 | if (!list_empty(&priv->s_iowait.list) && | |
813 | !(qp->s_flags & RVT_S_BUSY)) { | |
814 | qp->s_flags &= ~RVT_S_ANY_WAIT_IO; | |
815 | list_del_init(&priv->s_iowait.list); | |
816 | priv->s_iowait.lock = NULL; | |
817 | rvt_put_qp(qp); | |
818 | } | |
819 | write_sequnlock(lock); | |
820 | } | |
821 | ||
822 | if (!(qp->s_flags & RVT_S_BUSY)) { | |
823 | qp->s_hdrwords = 0; | |
824 | if (qp->s_rdma_mr) { | |
825 | rvt_put_mr(qp->s_rdma_mr); | |
826 | qp->s_rdma_mr = NULL; | |
827 | } | |
828 | flush_tx_list(qp); | |
829 | } | |
830 | } | |
831 | ||
832 | /** | |
833 | * hfi1_error_port_qps - put a port's RC/UC qps into error state | |
834 | * @ibp: the ibport. | |
835 | * @sl: the service level. | |
836 | * | |
837 | * This function places all RC/UC qps with a given service level into error | |
838 | * state. It is generally called to force upper lay apps to abandon stale qps | |
839 | * after an sl->sc mapping change. | |
840 | */ | |
841 | void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl) | |
842 | { | |
843 | struct rvt_qp *qp = NULL; | |
844 | struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); | |
845 | struct hfi1_ibdev *dev = &ppd->dd->verbs_dev; | |
846 | int n; | |
847 | int lastwqe; | |
848 | struct ib_event ev; | |
849 | ||
850 | rcu_read_lock(); | |
851 | ||
852 | /* Deal only with RC/UC qps that use the given SL. */ | |
853 | for (n = 0; n < dev->rdi.qp_dev->qp_table_size; n++) { | |
854 | for (qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]); qp; | |
855 | qp = rcu_dereference(qp->next)) { | |
856 | if (qp->port_num == ppd->port && | |
857 | (qp->ibqp.qp_type == IB_QPT_UC || | |
858 | qp->ibqp.qp_type == IB_QPT_RC) && | |
859 | rdma_ah_get_sl(&qp->remote_ah_attr) == sl && | |
860 | (ib_rvt_state_ops[qp->state] & | |
861 | RVT_POST_SEND_OK)) { | |
862 | spin_lock_irq(&qp->r_lock); | |
863 | spin_lock(&qp->s_hlock); | |
864 | spin_lock(&qp->s_lock); | |
865 | lastwqe = rvt_error_qp(qp, | |
866 | IB_WC_WR_FLUSH_ERR); | |
867 | spin_unlock(&qp->s_lock); | |
868 | spin_unlock(&qp->s_hlock); | |
869 | spin_unlock_irq(&qp->r_lock); | |
870 | if (lastwqe) { | |
871 | ev.device = qp->ibqp.device; | |
872 | ev.element.qp = &qp->ibqp; | |
873 | ev.event = | |
874 | IB_EVENT_QP_LAST_WQE_REACHED; | |
875 | qp->ibqp.event_handler(&ev, | |
876 | qp->ibqp.qp_context); | |
877 | } | |
878 | } | |
879 | } | |
880 | } | |
881 | ||
882 | rcu_read_unlock(); | |
883 | } |