]>
Commit | Line | Data |
---|---|---|
f931551b | 1 | /* |
f7cf9a61 | 2 | * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved. |
1fb9fed6 | 3 | * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved. |
f931551b RC |
4 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. |
5 | * | |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
33 | */ | |
34 | ||
35 | #include <linux/err.h> | |
36 | #include <linux/vmalloc.h> | |
869a2a96 | 37 | #include <rdma/rdma_vt.h> |
1dd173b0 MM |
38 | #ifdef CONFIG_DEBUG_FS |
39 | #include <linux/seq_file.h> | |
40 | #endif | |
f931551b RC |
41 | |
42 | #include "qib.h" | |
43 | ||
898fa52b HC |
44 | static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, |
45 | struct rvt_qpn_map *map, unsigned off) | |
f931551b | 46 | { |
898fa52b | 47 | return (map - qpt->map) * RVT_BITS_PER_PAGE + off; |
f931551b RC |
48 | } |
49 | ||
898fa52b HC |
50 | static inline unsigned find_next_offset(struct rvt_qpn_table *qpt, |
51 | struct rvt_qpn_map *map, unsigned off, | |
84b3adc2 | 52 | unsigned n, u16 qpt_mask) |
f931551b | 53 | { |
898fa52b | 54 | if (qpt_mask) { |
f931551b | 55 | off++; |
898fa52b HC |
56 | if (((off & qpt_mask) >> 1) >= n) |
57 | off = (off | qpt_mask) + 2; | |
58 | } else { | |
59 | off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off); | |
60 | } | |
f931551b RC |
61 | return off; |
62 | } | |
63 | ||
9ec4faa3 MM |
64 | const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = { |
65 | [IB_WR_RDMA_WRITE] = { | |
66 | .length = sizeof(struct ib_rdma_wr), | |
67 | .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), | |
68 | }, | |
69 | ||
70 | [IB_WR_RDMA_READ] = { | |
71 | .length = sizeof(struct ib_rdma_wr), | |
72 | .qpt_support = BIT(IB_QPT_RC), | |
73 | .flags = RVT_OPERATION_ATOMIC, | |
74 | }, | |
75 | ||
76 | [IB_WR_ATOMIC_CMP_AND_SWP] = { | |
77 | .length = sizeof(struct ib_atomic_wr), | |
78 | .qpt_support = BIT(IB_QPT_RC), | |
79 | .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE, | |
80 | }, | |
81 | ||
82 | [IB_WR_ATOMIC_FETCH_AND_ADD] = { | |
83 | .length = sizeof(struct ib_atomic_wr), | |
84 | .qpt_support = BIT(IB_QPT_RC), | |
85 | .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE, | |
86 | }, | |
87 | ||
88 | [IB_WR_RDMA_WRITE_WITH_IMM] = { | |
89 | .length = sizeof(struct ib_rdma_wr), | |
90 | .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), | |
91 | }, | |
92 | ||
93 | [IB_WR_SEND] = { | |
94 | .length = sizeof(struct ib_send_wr), | |
95 | .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) | | |
96 | BIT(IB_QPT_UC) | BIT(IB_QPT_RC), | |
97 | }, | |
98 | ||
99 | [IB_WR_SEND_WITH_IMM] = { | |
100 | .length = sizeof(struct ib_send_wr), | |
101 | .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) | | |
102 | BIT(IB_QPT_UC) | BIT(IB_QPT_RC), | |
103 | }, | |
104 | ||
105 | }; | |
106 | ||
0f4d027c | 107 | static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map) |
f931551b | 108 | { |
0f4d027c | 109 | unsigned long page = get_zeroed_page(GFP_KERNEL); |
f931551b RC |
110 | |
111 | /* | |
112 | * Free the page if someone raced with us installing it. | |
113 | */ | |
114 | ||
115 | spin_lock(&qpt->lock); | |
116 | if (map->page) | |
117 | free_page(page); | |
118 | else | |
119 | map->page = (void *)page; | |
120 | spin_unlock(&qpt->lock); | |
121 | } | |
122 | ||
123 | /* | |
124 | * Allocate the next available QPN or | |
125 | * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI. | |
126 | */ | |
20f333b6 | 127 | int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, |
0f4d027c | 128 | enum ib_qp_type type, u8 port) |
f931551b RC |
129 | { |
130 | u32 i, offset, max_scan, qpn; | |
898fa52b | 131 | struct rvt_qpn_map *map; |
f931551b | 132 | u32 ret; |
47c7ea6d HC |
133 | struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi); |
134 | struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata, | |
135 | verbs_dev); | |
84b3adc2 | 136 | u16 qpt_mask = dd->qpn_mask; |
f931551b RC |
137 | |
138 | if (type == IB_QPT_SMI || type == IB_QPT_GSI) { | |
139 | unsigned n; | |
140 | ||
141 | ret = type == IB_QPT_GSI; | |
142 | n = 1 << (ret + 2 * (port - 1)); | |
143 | spin_lock(&qpt->lock); | |
144 | if (qpt->flags & n) | |
145 | ret = -EINVAL; | |
146 | else | |
147 | qpt->flags |= n; | |
148 | spin_unlock(&qpt->lock); | |
149 | goto bail; | |
150 | } | |
151 | ||
7c3edd3f | 152 | qpn = qpt->last + 2; |
898fa52b | 153 | if (qpn >= RVT_QPN_MAX) |
f931551b | 154 | qpn = 2; |
898fa52b HC |
155 | if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues) |
156 | qpn = (qpn | qpt_mask) + 2; | |
157 | offset = qpn & RVT_BITS_PER_PAGE_MASK; | |
158 | map = &qpt->map[qpn / RVT_BITS_PER_PAGE]; | |
f931551b RC |
159 | max_scan = qpt->nmaps - !offset; |
160 | for (i = 0;;) { | |
161 | if (unlikely(!map->page)) { | |
0f4d027c | 162 | get_map_page(qpt, map); |
f931551b RC |
163 | if (unlikely(!map->page)) |
164 | break; | |
165 | } | |
166 | do { | |
167 | if (!test_and_set_bit(offset, map->page)) { | |
168 | qpt->last = qpn; | |
169 | ret = qpn; | |
170 | goto bail; | |
171 | } | |
2528ea60 | 172 | offset = find_next_offset(qpt, map, offset, |
84b3adc2 | 173 | dd->n_krcv_queues, qpt_mask); |
f931551b RC |
174 | qpn = mk_qpn(qpt, map, offset); |
175 | /* | |
176 | * This test differs from alloc_pidmap(). | |
177 | * If find_next_offset() does find a zero | |
178 | * bit, we don't need to check for QPN | |
179 | * wrapping around past our starting QPN. | |
180 | * We just need to be sure we don't loop | |
181 | * forever. | |
182 | */ | |
898fa52b | 183 | } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX); |
f931551b RC |
184 | /* |
185 | * In order to keep the number of pages allocated to a | |
186 | * minimum, we scan the all existing pages before increasing | |
187 | * the size of the bitmap table. | |
188 | */ | |
189 | if (++i > max_scan) { | |
898fa52b | 190 | if (qpt->nmaps == RVT_QPNMAP_ENTRIES) |
f931551b RC |
191 | break; |
192 | map = &qpt->map[qpt->nmaps++]; | |
2528ea60 | 193 | offset = 0; |
f931551b RC |
194 | } else if (map < &qpt->map[qpt->nmaps]) { |
195 | ++map; | |
2528ea60 | 196 | offset = 0; |
f931551b RC |
197 | } else { |
198 | map = &qpt->map[0]; | |
2528ea60 | 199 | offset = 2; |
f931551b RC |
200 | } |
201 | qpn = mk_qpn(qpt, map, offset); | |
202 | } | |
203 | ||
204 | ret = -ENOMEM; | |
205 | ||
206 | bail: | |
207 | return ret; | |
208 | } | |
209 | ||
f931551b RC |
210 | /** |
211 | * qib_free_all_qps - check for QPs still in use | |
f931551b | 212 | */ |
47c7ea6d | 213 | unsigned qib_free_all_qps(struct rvt_dev_info *rdi) |
f931551b | 214 | { |
47c7ea6d HC |
215 | struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi); |
216 | struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata, | |
217 | verbs_dev); | |
f931551b RC |
218 | unsigned n, qp_inuse = 0; |
219 | ||
220 | for (n = 0; n < dd->num_pports; n++) { | |
221 | struct qib_ibport *ibp = &dd->pport[n].ibport_data; | |
222 | ||
af061a64 | 223 | rcu_read_lock(); |
f24a6d48 | 224 | if (rcu_dereference(ibp->rvp.qp[0])) |
f931551b | 225 | qp_inuse++; |
f24a6d48 | 226 | if (rcu_dereference(ibp->rvp.qp[1])) |
f931551b | 227 | qp_inuse++; |
af061a64 | 228 | rcu_read_unlock(); |
f931551b | 229 | } |
f931551b RC |
230 | return qp_inuse; |
231 | } | |
232 | ||
20f333b6 | 233 | void qib_notify_qp_reset(struct rvt_qp *qp) |
f931551b | 234 | { |
ffc26907 | 235 | struct qib_qp_priv *priv = qp->priv; |
47c7ea6d | 236 | |
ffc26907 | 237 | atomic_set(&priv->s_dma_busy, 0); |
f931551b RC |
238 | } |
239 | ||
20f333b6 | 240 | void qib_notify_error_qp(struct rvt_qp *qp) |
f931551b | 241 | { |
ffc26907 | 242 | struct qib_qp_priv *priv = qp->priv; |
f931551b | 243 | struct qib_ibdev *dev = to_idev(qp->ibqp.device); |
16028f27 | 244 | |
cd18201f | 245 | spin_lock(&dev->rdi.pending_lock); |
01ba79d4 HC |
246 | if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) { |
247 | qp->s_flags &= ~RVT_S_ANY_WAIT_IO; | |
ffc26907 | 248 | list_del_init(&priv->iowait); |
f931551b | 249 | } |
cd18201f | 250 | spin_unlock(&dev->rdi.pending_lock); |
f931551b | 251 | |
01ba79d4 | 252 | if (!(qp->s_flags & RVT_S_BUSY)) { |
f931551b RC |
253 | qp->s_hdrwords = 0; |
254 | if (qp->s_rdma_mr) { | |
7c2e11fe | 255 | rvt_put_mr(qp->s_rdma_mr); |
f931551b RC |
256 | qp->s_rdma_mr = NULL; |
257 | } | |
ffc26907 DD |
258 | if (priv->s_tx) { |
259 | qib_put_txreq(priv->s_tx); | |
260 | priv->s_tx = NULL; | |
f931551b RC |
261 | } |
262 | } | |
f931551b RC |
263 | } |
264 | ||
70696ea7 | 265 | static int mtu_to_enum(u32 mtu) |
f931551b | 266 | { |
70696ea7 | 267 | int enum_mtu; |
f931551b | 268 | |
70696ea7 HC |
269 | switch (mtu) { |
270 | case 4096: | |
271 | enum_mtu = IB_MTU_4096; | |
f931551b | 272 | break; |
70696ea7 HC |
273 | case 2048: |
274 | enum_mtu = IB_MTU_2048; | |
f931551b | 275 | break; |
70696ea7 HC |
276 | case 1024: |
277 | enum_mtu = IB_MTU_1024; | |
f931551b | 278 | break; |
70696ea7 HC |
279 | case 512: |
280 | enum_mtu = IB_MTU_512; | |
f931551b | 281 | break; |
70696ea7 HC |
282 | case 256: |
283 | enum_mtu = IB_MTU_256; | |
f931551b | 284 | break; |
f931551b | 285 | default: |
70696ea7 | 286 | enum_mtu = IB_MTU_2048; |
d0f2faf7 | 287 | } |
70696ea7 HC |
288 | return enum_mtu; |
289 | } | |
f931551b | 290 | |
20f333b6 HC |
291 | int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp, |
292 | struct ib_qp_attr *attr) | |
70696ea7 HC |
293 | { |
294 | int mtu, pmtu, pidx = qp->port_num - 1; | |
295 | struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi); | |
296 | struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata, | |
297 | verbs_dev); | |
298 | mtu = ib_mtu_enum_to_int(attr->path_mtu); | |
299 | if (mtu == -1) | |
300 | return -EINVAL; | |
f931551b | 301 | |
70696ea7 HC |
302 | if (mtu > dd->pport[pidx].ibmtu) |
303 | pmtu = mtu_to_enum(dd->pport[pidx].ibmtu); | |
304 | else | |
305 | pmtu = attr->path_mtu; | |
306 | return pmtu; | |
307 | } | |
f931551b | 308 | |
20f333b6 | 309 | int qib_mtu_to_path_mtu(u32 mtu) |
70696ea7 HC |
310 | { |
311 | return mtu_to_enum(mtu); | |
312 | } | |
f931551b | 313 | |
20f333b6 | 314 | u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu) |
70696ea7 HC |
315 | { |
316 | return ib_mtu_enum_to_int(pmtu); | |
f931551b RC |
317 | } |
318 | ||
0f4d027c | 319 | void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp) |
f931551b | 320 | { |
ffc26907 | 321 | struct qib_qp_priv *priv; |
f931551b | 322 | |
0f4d027c | 323 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
47c7ea6d HC |
324 | if (!priv) |
325 | return ERR_PTR(-ENOMEM); | |
326 | priv->owner = qp; | |
f931551b | 327 | |
0f4d027c | 328 | priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), GFP_KERNEL); |
47c7ea6d HC |
329 | if (!priv->s_hdr) { |
330 | kfree(priv); | |
331 | return ERR_PTR(-ENOMEM); | |
f931551b | 332 | } |
47c7ea6d | 333 | init_waitqueue_head(&priv->wait_dma); |
db3ef0eb | 334 | INIT_WORK(&priv->s_work, _qib_do_send); |
47c7ea6d | 335 | INIT_LIST_HEAD(&priv->iowait); |
f931551b | 336 | |
47c7ea6d HC |
337 | return priv; |
338 | } | |
f931551b | 339 | |
20f333b6 | 340 | void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp) |
47c7ea6d HC |
341 | { |
342 | struct qib_qp_priv *priv = qp->priv; | |
f931551b | 343 | |
ffc26907 DD |
344 | kfree(priv->s_hdr); |
345 | kfree(priv); | |
f931551b RC |
346 | } |
347 | ||
20f333b6 | 348 | void qib_stop_send_queue(struct rvt_qp *qp) |
70696ea7 HC |
349 | { |
350 | struct qib_qp_priv *priv = qp->priv; | |
351 | ||
352 | cancel_work_sync(&priv->s_work); | |
353 | } | |
354 | ||
20f333b6 | 355 | void qib_quiesce_qp(struct rvt_qp *qp) |
70696ea7 HC |
356 | { |
357 | struct qib_qp_priv *priv = qp->priv; | |
358 | ||
359 | wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy)); | |
360 | if (priv->s_tx) { | |
361 | qib_put_txreq(priv->s_tx); | |
362 | priv->s_tx = NULL; | |
363 | } | |
364 | } | |
365 | ||
20f333b6 | 366 | void qib_flush_qp_waiters(struct rvt_qp *qp) |
70696ea7 HC |
367 | { |
368 | struct qib_qp_priv *priv = qp->priv; | |
369 | struct qib_ibdev *dev = to_idev(qp->ibqp.device); | |
370 | ||
371 | spin_lock(&dev->rdi.pending_lock); | |
372 | if (!list_empty(&priv->iowait)) | |
373 | list_del_init(&priv->iowait); | |
374 | spin_unlock(&dev->rdi.pending_lock); | |
375 | } | |
376 | ||
46a80d62 MM |
377 | /** |
378 | * qib_check_send_wqe - validate wr/wqe | |
379 | * @qp - The qp | |
380 | * @wqe - The built wqe | |
381 | * | |
382 | * validate wr/wqe. This is called | |
383 | * prior to inserting the wqe into | |
384 | * the ring but after the wqe has been | |
385 | * setup. | |
386 | * | |
91702b4a | 387 | * Returns 1 to force direct progress, 0 otherwise, -EINVAL on failure |
46a80d62 MM |
388 | */ |
389 | int qib_check_send_wqe(struct rvt_qp *qp, | |
390 | struct rvt_swqe *wqe) | |
391 | { | |
392 | struct rvt_ah *ah; | |
91702b4a | 393 | int ret = 0; |
46a80d62 MM |
394 | |
395 | switch (qp->ibqp.qp_type) { | |
396 | case IB_QPT_RC: | |
397 | case IB_QPT_UC: | |
398 | if (wqe->length > 0x80000000U) | |
399 | return -EINVAL; | |
400 | break; | |
401 | case IB_QPT_SMI: | |
402 | case IB_QPT_GSI: | |
403 | case IB_QPT_UD: | |
404 | ah = ibah_to_rvtah(wqe->ud_wr.ah); | |
405 | if (wqe->length > (1 << ah->log_pmtu)) | |
406 | return -EINVAL; | |
91702b4a MM |
407 | /* progress hint */ |
408 | ret = 1; | |
46a80d62 MM |
409 | break; |
410 | default: | |
411 | break; | |
412 | } | |
91702b4a | 413 | return ret; |
46a80d62 MM |
414 | } |
415 | ||
1dd173b0 MM |
416 | #ifdef CONFIG_DEBUG_FS |
417 | ||
418 | struct qib_qp_iter { | |
419 | struct qib_ibdev *dev; | |
7c2e11fe | 420 | struct rvt_qp *qp; |
1dd173b0 MM |
421 | int n; |
422 | }; | |
423 | ||
424 | struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev) | |
425 | { | |
426 | struct qib_qp_iter *iter; | |
427 | ||
428 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | |
429 | if (!iter) | |
430 | return NULL; | |
431 | ||
432 | iter->dev = dev; | |
1dd173b0 MM |
433 | |
434 | return iter; | |
435 | } | |
436 | ||
437 | int qib_qp_iter_next(struct qib_qp_iter *iter) | |
438 | { | |
439 | struct qib_ibdev *dev = iter->dev; | |
440 | int n = iter->n; | |
441 | int ret = 1; | |
7c2e11fe DD |
442 | struct rvt_qp *pqp = iter->qp; |
443 | struct rvt_qp *qp; | |
1dd173b0 | 444 | |
898fa52b | 445 | for (; n < dev->rdi.qp_dev->qp_table_size; n++) { |
1dd173b0 MM |
446 | if (pqp) |
447 | qp = rcu_dereference(pqp->next); | |
448 | else | |
898fa52b | 449 | qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]); |
1dd173b0 MM |
450 | pqp = qp; |
451 | if (qp) { | |
1dd173b0 MM |
452 | iter->qp = qp; |
453 | iter->n = n; | |
454 | return 0; | |
455 | } | |
456 | } | |
1dd173b0 MM |
457 | return ret; |
458 | } | |
459 | ||
460 | static const char * const qp_type_str[] = { | |
461 | "SMI", "GSI", "RC", "UC", "UD", | |
462 | }; | |
463 | ||
464 | void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter) | |
465 | { | |
7c2e11fe DD |
466 | struct rvt_swqe *wqe; |
467 | struct rvt_qp *qp = iter->qp; | |
ffc26907 | 468 | struct qib_qp_priv *priv = qp->priv; |
1dd173b0 | 469 | |
db3ef0eb | 470 | wqe = rvt_get_swqe_ptr(qp, qp->s_last); |
1dd173b0 MM |
471 | seq_printf(s, |
472 | "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n", | |
473 | iter->n, | |
474 | qp->ibqp.qp_num, | |
475 | qp_type_str[qp->ibqp.qp_type], | |
476 | qp->state, | |
477 | wqe->wr.opcode, | |
478 | qp->s_hdrwords, | |
479 | qp->s_flags, | |
ffc26907 DD |
480 | atomic_read(&priv->s_dma_busy), |
481 | !list_empty(&priv->iowait), | |
1dd173b0 MM |
482 | qp->timeout, |
483 | wqe->ssn, | |
484 | qp->s_lsn, | |
485 | qp->s_last_psn, | |
486 | qp->s_psn, qp->s_next_psn, | |
487 | qp->s_sending_psn, qp->s_sending_hpsn, | |
488 | qp->s_last, qp->s_acked, qp->s_cur, | |
489 | qp->s_tail, qp->s_head, qp->s_size, | |
490 | qp->remote_qpn, | |
d8966fcd | 491 | rdma_ah_get_dlid(&qp->remote_ah_attr)); |
1dd173b0 MM |
492 | } |
493 | ||
494 | #endif |