]>
Commit | Line | Data |
---|---|---|
29c8d9eb AR |
1 | /* |
2 | * Copyright (c) 2012-2016 VMware, Inc. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of EITHER the GNU General Public License | |
6 | * version 2 as published by the Free Software Foundation or the BSD | |
7 | * 2-Clause License. This program is distributed in the hope that it | |
8 | * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED | |
9 | * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. | |
10 | * See the GNU General Public License version 2 for more details at | |
11 | * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program available in the file COPYING in the main | |
15 | * directory of this source tree. | |
16 | * | |
17 | * The BSD 2-Clause License | |
18 | * | |
19 | * Redistribution and use in source and binary forms, with or | |
20 | * without modification, are permitted provided that the following | |
21 | * conditions are met: | |
22 | * | |
23 | * - Redistributions of source code must retain the above | |
24 | * copyright notice, this list of conditions and the following | |
25 | * disclaimer. | |
26 | * | |
27 | * - Redistributions in binary form must reproduce the above | |
28 | * copyright notice, this list of conditions and the following | |
29 | * disclaimer in the documentation and/or other materials | |
30 | * provided with the distribution. | |
31 | * | |
32 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
33 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
34 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | |
35 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | |
36 | * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, | |
37 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
38 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | |
39 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
40 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | |
41 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
42 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED | |
43 | * OF THE POSSIBILITY OF SUCH DAMAGE. | |
44 | */ | |
45 | ||
46 | #include <asm/page.h> | |
47 | #include <linux/io.h> | |
48 | #include <linux/wait.h> | |
49 | #include <rdma/ib_addr.h> | |
50 | #include <rdma/ib_smi.h> | |
51 | #include <rdma/ib_user_verbs.h> | |
52 | ||
53 | #include "pvrdma.h" | |
54 | ||
55 | static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq, | |
56 | struct pvrdma_cq **recv_cq) | |
57 | { | |
58 | *send_cq = to_vcq(qp->ibqp.send_cq); | |
59 | *recv_cq = to_vcq(qp->ibqp.recv_cq); | |
60 | } | |
61 | ||
62 | static void pvrdma_lock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq, | |
63 | unsigned long *scq_flags, | |
64 | unsigned long *rcq_flags) | |
65 | __acquires(scq->cq_lock) __acquires(rcq->cq_lock) | |
66 | { | |
67 | if (scq == rcq) { | |
68 | spin_lock_irqsave(&scq->cq_lock, *scq_flags); | |
69 | __acquire(rcq->cq_lock); | |
70 | } else if (scq->cq_handle < rcq->cq_handle) { | |
71 | spin_lock_irqsave(&scq->cq_lock, *scq_flags); | |
72 | spin_lock_irqsave_nested(&rcq->cq_lock, *rcq_flags, | |
73 | SINGLE_DEPTH_NESTING); | |
74 | } else { | |
75 | spin_lock_irqsave(&rcq->cq_lock, *rcq_flags); | |
76 | spin_lock_irqsave_nested(&scq->cq_lock, *scq_flags, | |
77 | SINGLE_DEPTH_NESTING); | |
78 | } | |
79 | } | |
80 | ||
81 | static void pvrdma_unlock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq, | |
82 | unsigned long *scq_flags, | |
83 | unsigned long *rcq_flags) | |
84 | __releases(scq->cq_lock) __releases(rcq->cq_lock) | |
85 | { | |
86 | if (scq == rcq) { | |
87 | __release(rcq->cq_lock); | |
88 | spin_unlock_irqrestore(&scq->cq_lock, *scq_flags); | |
89 | } else if (scq->cq_handle < rcq->cq_handle) { | |
90 | spin_unlock_irqrestore(&rcq->cq_lock, *rcq_flags); | |
91 | spin_unlock_irqrestore(&scq->cq_lock, *scq_flags); | |
92 | } else { | |
93 | spin_unlock_irqrestore(&scq->cq_lock, *scq_flags); | |
94 | spin_unlock_irqrestore(&rcq->cq_lock, *rcq_flags); | |
95 | } | |
96 | } | |
97 | ||
98 | static void pvrdma_reset_qp(struct pvrdma_qp *qp) | |
99 | { | |
100 | struct pvrdma_cq *scq, *rcq; | |
101 | unsigned long scq_flags, rcq_flags; | |
102 | ||
103 | /* Clean up cqes */ | |
104 | get_cqs(qp, &scq, &rcq); | |
105 | pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags); | |
106 | ||
107 | _pvrdma_flush_cqe(qp, scq); | |
108 | if (scq != rcq) | |
109 | _pvrdma_flush_cqe(qp, rcq); | |
110 | ||
111 | pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags); | |
112 | ||
113 | /* | |
114 | * Reset queuepair. The checks are because usermode queuepairs won't | |
115 | * have kernel ringstates. | |
116 | */ | |
117 | if (qp->rq.ring) { | |
118 | atomic_set(&qp->rq.ring->cons_head, 0); | |
119 | atomic_set(&qp->rq.ring->prod_tail, 0); | |
120 | } | |
121 | if (qp->sq.ring) { | |
122 | atomic_set(&qp->sq.ring->cons_head, 0); | |
123 | atomic_set(&qp->sq.ring->prod_tail, 0); | |
124 | } | |
125 | } | |
126 | ||
127 | static int pvrdma_set_rq_size(struct pvrdma_dev *dev, | |
128 | struct ib_qp_cap *req_cap, | |
129 | struct pvrdma_qp *qp) | |
130 | { | |
131 | if (req_cap->max_recv_wr > dev->dsr->caps.max_qp_wr || | |
132 | req_cap->max_recv_sge > dev->dsr->caps.max_sge) { | |
133 | dev_warn(&dev->pdev->dev, "recv queue size invalid\n"); | |
134 | return -EINVAL; | |
135 | } | |
136 | ||
137 | qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_recv_wr)); | |
138 | qp->rq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_recv_sge)); | |
139 | ||
140 | /* Write back */ | |
141 | req_cap->max_recv_wr = qp->rq.wqe_cnt; | |
142 | req_cap->max_recv_sge = qp->rq.max_sg; | |
143 | ||
144 | qp->rq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_rq_wqe_hdr) + | |
145 | sizeof(struct pvrdma_sge) * | |
146 | qp->rq.max_sg); | |
147 | qp->npages_recv = (qp->rq.wqe_cnt * qp->rq.wqe_size + PAGE_SIZE - 1) / | |
148 | PAGE_SIZE; | |
149 | ||
150 | return 0; | |
151 | } | |
152 | ||
153 | static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap, | |
1dd70ea3 | 154 | struct pvrdma_qp *qp) |
29c8d9eb AR |
155 | { |
156 | if (req_cap->max_send_wr > dev->dsr->caps.max_qp_wr || | |
157 | req_cap->max_send_sge > dev->dsr->caps.max_sge) { | |
158 | dev_warn(&dev->pdev->dev, "send queue size invalid\n"); | |
159 | return -EINVAL; | |
160 | } | |
161 | ||
162 | qp->sq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_send_wr)); | |
163 | qp->sq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_send_sge)); | |
164 | ||
165 | /* Write back */ | |
166 | req_cap->max_send_wr = qp->sq.wqe_cnt; | |
167 | req_cap->max_send_sge = qp->sq.max_sg; | |
168 | ||
169 | qp->sq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_sq_wqe_hdr) + | |
170 | sizeof(struct pvrdma_sge) * | |
171 | qp->sq.max_sg); | |
172 | /* Note: one extra page for the header. */ | |
e51c2fb0 AR |
173 | qp->npages_send = PVRDMA_QP_NUM_HEADER_PAGES + |
174 | (qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) / | |
175 | PAGE_SIZE; | |
29c8d9eb AR |
176 | |
177 | return 0; | |
178 | } | |
179 | ||
180 | /** | |
181 | * pvrdma_create_qp - create queue pair | |
182 | * @pd: protection domain | |
183 | * @init_attr: queue pair attributes | |
184 | * @udata: user data | |
185 | * | |
186 | * @return: the ib_qp pointer on success, otherwise returns an errno. | |
187 | */ | |
188 | struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, | |
189 | struct ib_qp_init_attr *init_attr, | |
190 | struct ib_udata *udata) | |
191 | { | |
192 | struct pvrdma_qp *qp = NULL; | |
193 | struct pvrdma_dev *dev = to_vdev(pd->device); | |
194 | union pvrdma_cmd_req req; | |
195 | union pvrdma_cmd_resp rsp; | |
196 | struct pvrdma_cmd_create_qp *cmd = &req.create_qp; | |
197 | struct pvrdma_cmd_create_qp_resp *resp = &rsp.create_qp_resp; | |
198 | struct pvrdma_create_qp ucmd; | |
199 | unsigned long flags; | |
200 | int ret; | |
8b10ba78 | 201 | bool is_srq = !!init_attr->srq; |
29c8d9eb AR |
202 | |
203 | if (init_attr->create_flags) { | |
204 | dev_warn(&dev->pdev->dev, | |
205 | "invalid create queuepair flags %#x\n", | |
206 | init_attr->create_flags); | |
207 | return ERR_PTR(-EINVAL); | |
208 | } | |
209 | ||
210 | if (init_attr->qp_type != IB_QPT_RC && | |
211 | init_attr->qp_type != IB_QPT_UD && | |
212 | init_attr->qp_type != IB_QPT_GSI) { | |
213 | dev_warn(&dev->pdev->dev, "queuepair type %d not supported\n", | |
214 | init_attr->qp_type); | |
215 | return ERR_PTR(-EINVAL); | |
216 | } | |
217 | ||
8b10ba78 BT |
218 | if (is_srq && !dev->dsr->caps.max_srq) { |
219 | dev_warn(&dev->pdev->dev, | |
220 | "SRQs not supported by device\n"); | |
221 | return ERR_PTR(-EINVAL); | |
222 | } | |
223 | ||
29c8d9eb AR |
224 | if (!atomic_add_unless(&dev->num_qps, 1, dev->dsr->caps.max_qp)) |
225 | return ERR_PTR(-ENOMEM); | |
226 | ||
227 | switch (init_attr->qp_type) { | |
228 | case IB_QPT_GSI: | |
229 | if (init_attr->port_num == 0 || | |
230 | init_attr->port_num > pd->device->phys_port_cnt || | |
231 | udata) { | |
232 | dev_warn(&dev->pdev->dev, "invalid queuepair attrs\n"); | |
233 | ret = -EINVAL; | |
234 | goto err_qp; | |
235 | } | |
236 | /* fall through */ | |
237 | case IB_QPT_RC: | |
238 | case IB_QPT_UD: | |
239 | qp = kzalloc(sizeof(*qp), GFP_KERNEL); | |
240 | if (!qp) { | |
241 | ret = -ENOMEM; | |
242 | goto err_qp; | |
243 | } | |
244 | ||
245 | spin_lock_init(&qp->sq.lock); | |
246 | spin_lock_init(&qp->rq.lock); | |
247 | mutex_init(&qp->mutex); | |
a61eb613 | 248 | refcount_set(&qp->refcnt, 1); |
e3524b26 | 249 | init_completion(&qp->free); |
29c8d9eb AR |
250 | |
251 | qp->state = IB_QPS_RESET; | |
5aef7cf2 | 252 | qp->is_kernel = !(pd->uobject && udata); |
29c8d9eb | 253 | |
5aef7cf2 | 254 | if (!qp->is_kernel) { |
29c8d9eb AR |
255 | dev_dbg(&dev->pdev->dev, |
256 | "create queuepair from user space\n"); | |
257 | ||
258 | if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { | |
259 | ret = -EFAULT; | |
260 | goto err_qp; | |
261 | } | |
262 | ||
8b10ba78 BT |
263 | if (!is_srq) { |
264 | /* set qp->sq.wqe_cnt, shift, buf_size.. */ | |
265 | qp->rumem = ib_umem_get(pd->uobject->context, | |
266 | ucmd.rbuf_addr, | |
267 | ucmd.rbuf_size, 0, 0); | |
268 | if (IS_ERR(qp->rumem)) { | |
269 | ret = PTR_ERR(qp->rumem); | |
270 | goto err_qp; | |
271 | } | |
272 | qp->srq = NULL; | |
273 | } else { | |
274 | qp->rumem = NULL; | |
275 | qp->srq = to_vsrq(init_attr->srq); | |
29c8d9eb AR |
276 | } |
277 | ||
278 | qp->sumem = ib_umem_get(pd->uobject->context, | |
279 | ucmd.sbuf_addr, | |
280 | ucmd.sbuf_size, 0, 0); | |
281 | if (IS_ERR(qp->sumem)) { | |
8b10ba78 BT |
282 | if (!is_srq) |
283 | ib_umem_release(qp->rumem); | |
29c8d9eb AR |
284 | ret = PTR_ERR(qp->sumem); |
285 | goto err_qp; | |
286 | } | |
287 | ||
288 | qp->npages_send = ib_umem_page_count(qp->sumem); | |
8b10ba78 BT |
289 | if (!is_srq) |
290 | qp->npages_recv = ib_umem_page_count(qp->rumem); | |
291 | else | |
292 | qp->npages_recv = 0; | |
29c8d9eb AR |
293 | qp->npages = qp->npages_send + qp->npages_recv; |
294 | } else { | |
29c8d9eb | 295 | ret = pvrdma_set_sq_size(to_vdev(pd->device), |
1dd70ea3 | 296 | &init_attr->cap, qp); |
29c8d9eb AR |
297 | if (ret) |
298 | goto err_qp; | |
299 | ||
300 | ret = pvrdma_set_rq_size(to_vdev(pd->device), | |
301 | &init_attr->cap, qp); | |
302 | if (ret) | |
303 | goto err_qp; | |
304 | ||
305 | qp->npages = qp->npages_send + qp->npages_recv; | |
306 | ||
307 | /* Skip header page. */ | |
e51c2fb0 | 308 | qp->sq.offset = PVRDMA_QP_NUM_HEADER_PAGES * PAGE_SIZE; |
29c8d9eb AR |
309 | |
310 | /* Recv queue pages are after send pages. */ | |
311 | qp->rq.offset = qp->npages_send * PAGE_SIZE; | |
312 | } | |
313 | ||
314 | if (qp->npages < 0 || qp->npages > PVRDMA_PAGE_DIR_MAX_PAGES) { | |
315 | dev_warn(&dev->pdev->dev, | |
316 | "overflow pages in queuepair\n"); | |
317 | ret = -EINVAL; | |
318 | goto err_umem; | |
319 | } | |
320 | ||
321 | ret = pvrdma_page_dir_init(dev, &qp->pdir, qp->npages, | |
322 | qp->is_kernel); | |
323 | if (ret) { | |
324 | dev_warn(&dev->pdev->dev, | |
325 | "could not allocate page directory\n"); | |
326 | goto err_umem; | |
327 | } | |
328 | ||
329 | if (!qp->is_kernel) { | |
330 | pvrdma_page_dir_insert_umem(&qp->pdir, qp->sumem, 0); | |
8b10ba78 BT |
331 | if (!is_srq) |
332 | pvrdma_page_dir_insert_umem(&qp->pdir, | |
333 | qp->rumem, | |
334 | qp->npages_send); | |
29c8d9eb AR |
335 | } else { |
336 | /* Ring state is always the first page. */ | |
337 | qp->sq.ring = qp->pdir.pages[0]; | |
8b10ba78 | 338 | qp->rq.ring = is_srq ? NULL : &qp->sq.ring[1]; |
29c8d9eb AR |
339 | } |
340 | break; | |
341 | default: | |
342 | ret = -EINVAL; | |
343 | goto err_qp; | |
344 | } | |
345 | ||
346 | /* Not supported */ | |
347 | init_attr->cap.max_inline_data = 0; | |
348 | ||
349 | memset(cmd, 0, sizeof(*cmd)); | |
350 | cmd->hdr.cmd = PVRDMA_CMD_CREATE_QP; | |
351 | cmd->pd_handle = to_vpd(pd)->pd_handle; | |
352 | cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle; | |
353 | cmd->recv_cq_handle = to_vcq(init_attr->recv_cq)->cq_handle; | |
8b10ba78 BT |
354 | if (is_srq) |
355 | cmd->srq_handle = to_vsrq(init_attr->srq)->srq_handle; | |
356 | else | |
357 | cmd->srq_handle = 0; | |
29c8d9eb AR |
358 | cmd->max_send_wr = init_attr->cap.max_send_wr; |
359 | cmd->max_recv_wr = init_attr->cap.max_recv_wr; | |
360 | cmd->max_send_sge = init_attr->cap.max_send_sge; | |
361 | cmd->max_recv_sge = init_attr->cap.max_recv_sge; | |
362 | cmd->max_inline_data = init_attr->cap.max_inline_data; | |
363 | cmd->sq_sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0; | |
364 | cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type); | |
8b10ba78 BT |
365 | cmd->is_srq = is_srq; |
366 | cmd->lkey = 0; | |
29c8d9eb AR |
367 | cmd->access_flags = IB_ACCESS_LOCAL_WRITE; |
368 | cmd->total_chunks = qp->npages; | |
e51c2fb0 | 369 | cmd->send_chunks = qp->npages_send - PVRDMA_QP_NUM_HEADER_PAGES; |
29c8d9eb AR |
370 | cmd->pdir_dma = qp->pdir.dir_dma; |
371 | ||
372 | dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n", | |
373 | cmd->max_send_wr, cmd->max_recv_wr, cmd->max_send_sge, | |
374 | cmd->max_recv_sge); | |
375 | ||
376 | ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_QP_RESP); | |
377 | if (ret < 0) { | |
378 | dev_warn(&dev->pdev->dev, | |
379 | "could not create queuepair, error: %d\n", ret); | |
380 | goto err_pdir; | |
381 | } | |
382 | ||
383 | /* max_send_wr/_recv_wr/_send_sge/_recv_sge/_inline_data */ | |
384 | qp->qp_handle = resp->qpn; | |
385 | qp->port = init_attr->port_num; | |
386 | qp->ibqp.qp_num = resp->qpn; | |
387 | spin_lock_irqsave(&dev->qp_tbl_lock, flags); | |
388 | dev->qp_tbl[qp->qp_handle % dev->dsr->caps.max_qp] = qp; | |
389 | spin_unlock_irqrestore(&dev->qp_tbl_lock, flags); | |
390 | ||
391 | return &qp->ibqp; | |
392 | ||
393 | err_pdir: | |
394 | pvrdma_page_dir_cleanup(dev, &qp->pdir); | |
395 | err_umem: | |
5aef7cf2 | 396 | if (!qp->is_kernel) { |
29c8d9eb AR |
397 | if (qp->rumem) |
398 | ib_umem_release(qp->rumem); | |
399 | if (qp->sumem) | |
400 | ib_umem_release(qp->sumem); | |
401 | } | |
402 | err_qp: | |
403 | kfree(qp); | |
404 | atomic_dec(&dev->num_qps); | |
405 | ||
406 | return ERR_PTR(ret); | |
407 | } | |
408 | ||
409 | static void pvrdma_free_qp(struct pvrdma_qp *qp) | |
410 | { | |
411 | struct pvrdma_dev *dev = to_vdev(qp->ibqp.device); | |
412 | struct pvrdma_cq *scq; | |
413 | struct pvrdma_cq *rcq; | |
414 | unsigned long flags, scq_flags, rcq_flags; | |
415 | ||
416 | /* In case cq is polling */ | |
417 | get_cqs(qp, &scq, &rcq); | |
418 | pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags); | |
419 | ||
420 | _pvrdma_flush_cqe(qp, scq); | |
421 | if (scq != rcq) | |
422 | _pvrdma_flush_cqe(qp, rcq); | |
423 | ||
424 | spin_lock_irqsave(&dev->qp_tbl_lock, flags); | |
425 | dev->qp_tbl[qp->qp_handle] = NULL; | |
426 | spin_unlock_irqrestore(&dev->qp_tbl_lock, flags); | |
427 | ||
428 | pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags); | |
429 | ||
a61eb613 | 430 | if (refcount_dec_and_test(&qp->refcnt)) |
e3524b26 BT |
431 | complete(&qp->free); |
432 | wait_for_completion(&qp->free); | |
29c8d9eb | 433 | |
17748056 BT |
434 | if (!qp->is_kernel) { |
435 | if (qp->rumem) | |
436 | ib_umem_release(qp->rumem); | |
437 | if (qp->sumem) | |
438 | ib_umem_release(qp->sumem); | |
439 | } | |
440 | ||
29c8d9eb AR |
441 | pvrdma_page_dir_cleanup(dev, &qp->pdir); |
442 | ||
443 | kfree(qp); | |
444 | ||
445 | atomic_dec(&dev->num_qps); | |
446 | } | |
447 | ||
448 | /** | |
449 | * pvrdma_destroy_qp - destroy a queue pair | |
450 | * @qp: the queue pair to destroy | |
451 | * | |
452 | * @return: 0 on success. | |
453 | */ | |
454 | int pvrdma_destroy_qp(struct ib_qp *qp) | |
455 | { | |
456 | struct pvrdma_qp *vqp = to_vqp(qp); | |
457 | union pvrdma_cmd_req req; | |
458 | struct pvrdma_cmd_destroy_qp *cmd = &req.destroy_qp; | |
459 | int ret; | |
460 | ||
461 | memset(cmd, 0, sizeof(*cmd)); | |
462 | cmd->hdr.cmd = PVRDMA_CMD_DESTROY_QP; | |
463 | cmd->qp_handle = vqp->qp_handle; | |
464 | ||
465 | ret = pvrdma_cmd_post(to_vdev(qp->device), &req, NULL, 0); | |
466 | if (ret < 0) | |
467 | dev_warn(&to_vdev(qp->device)->pdev->dev, | |
468 | "destroy queuepair failed, error: %d\n", ret); | |
469 | ||
470 | pvrdma_free_qp(vqp); | |
471 | ||
472 | return 0; | |
473 | } | |
474 | ||
475 | /** | |
476 | * pvrdma_modify_qp - modify queue pair attributes | |
477 | * @ibqp: the queue pair | |
478 | * @attr: the new queue pair's attributes | |
479 | * @attr_mask: attributes mask | |
480 | * @udata: user data | |
481 | * | |
482 | * @returns 0 on success, otherwise returns an errno. | |
483 | */ | |
484 | int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
485 | int attr_mask, struct ib_udata *udata) | |
486 | { | |
487 | struct pvrdma_dev *dev = to_vdev(ibqp->device); | |
488 | struct pvrdma_qp *qp = to_vqp(ibqp); | |
489 | union pvrdma_cmd_req req; | |
490 | union pvrdma_cmd_resp rsp; | |
491 | struct pvrdma_cmd_modify_qp *cmd = &req.modify_qp; | |
492 | int cur_state, next_state; | |
493 | int ret; | |
494 | ||
495 | /* Sanity checking. Should need lock here */ | |
496 | mutex_lock(&qp->mutex); | |
497 | cur_state = (attr_mask & IB_QP_CUR_STATE) ? attr->cur_qp_state : | |
498 | qp->state; | |
499 | next_state = (attr_mask & IB_QP_STATE) ? attr->qp_state : cur_state; | |
500 | ||
501 | if (!ib_modify_qp_is_ok(cur_state, next_state, ibqp->qp_type, | |
502 | attr_mask, IB_LINK_LAYER_ETHERNET)) { | |
503 | ret = -EINVAL; | |
504 | goto out; | |
505 | } | |
506 | ||
507 | if (attr_mask & IB_QP_PORT) { | |
508 | if (attr->port_num == 0 || | |
509 | attr->port_num > ibqp->device->phys_port_cnt) { | |
510 | ret = -EINVAL; | |
511 | goto out; | |
512 | } | |
513 | } | |
514 | ||
515 | if (attr_mask & IB_QP_MIN_RNR_TIMER) { | |
516 | if (attr->min_rnr_timer > 31) { | |
517 | ret = -EINVAL; | |
518 | goto out; | |
519 | } | |
520 | } | |
521 | ||
522 | if (attr_mask & IB_QP_PKEY_INDEX) { | |
523 | if (attr->pkey_index >= dev->dsr->caps.max_pkeys) { | |
524 | ret = -EINVAL; | |
525 | goto out; | |
526 | } | |
527 | } | |
528 | ||
529 | if (attr_mask & IB_QP_QKEY) | |
530 | qp->qkey = attr->qkey; | |
531 | ||
532 | if (cur_state == next_state && cur_state == IB_QPS_RESET) { | |
533 | ret = 0; | |
534 | goto out; | |
535 | } | |
536 | ||
537 | qp->state = next_state; | |
538 | memset(cmd, 0, sizeof(*cmd)); | |
539 | cmd->hdr.cmd = PVRDMA_CMD_MODIFY_QP; | |
540 | cmd->qp_handle = qp->qp_handle; | |
541 | cmd->attr_mask = ib_qp_attr_mask_to_pvrdma(attr_mask); | |
542 | cmd->attrs.qp_state = ib_qp_state_to_pvrdma(attr->qp_state); | |
543 | cmd->attrs.cur_qp_state = | |
544 | ib_qp_state_to_pvrdma(attr->cur_qp_state); | |
545 | cmd->attrs.path_mtu = ib_mtu_to_pvrdma(attr->path_mtu); | |
546 | cmd->attrs.path_mig_state = | |
547 | ib_mig_state_to_pvrdma(attr->path_mig_state); | |
548 | cmd->attrs.qkey = attr->qkey; | |
549 | cmd->attrs.rq_psn = attr->rq_psn; | |
550 | cmd->attrs.sq_psn = attr->sq_psn; | |
551 | cmd->attrs.dest_qp_num = attr->dest_qp_num; | |
552 | cmd->attrs.qp_access_flags = | |
553 | ib_access_flags_to_pvrdma(attr->qp_access_flags); | |
554 | cmd->attrs.pkey_index = attr->pkey_index; | |
555 | cmd->attrs.alt_pkey_index = attr->alt_pkey_index; | |
556 | cmd->attrs.en_sqd_async_notify = attr->en_sqd_async_notify; | |
557 | cmd->attrs.sq_draining = attr->sq_draining; | |
558 | cmd->attrs.max_rd_atomic = attr->max_rd_atomic; | |
559 | cmd->attrs.max_dest_rd_atomic = attr->max_dest_rd_atomic; | |
560 | cmd->attrs.min_rnr_timer = attr->min_rnr_timer; | |
561 | cmd->attrs.port_num = attr->port_num; | |
562 | cmd->attrs.timeout = attr->timeout; | |
563 | cmd->attrs.retry_cnt = attr->retry_cnt; | |
564 | cmd->attrs.rnr_retry = attr->rnr_retry; | |
565 | cmd->attrs.alt_port_num = attr->alt_port_num; | |
566 | cmd->attrs.alt_timeout = attr->alt_timeout; | |
567 | ib_qp_cap_to_pvrdma(&cmd->attrs.cap, &attr->cap); | |
f988653a DC |
568 | rdma_ah_attr_to_pvrdma(&cmd->attrs.ah_attr, &attr->ah_attr); |
569 | rdma_ah_attr_to_pvrdma(&cmd->attrs.alt_ah_attr, &attr->alt_ah_attr); | |
29c8d9eb AR |
570 | |
571 | ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_MODIFY_QP_RESP); | |
572 | if (ret < 0) { | |
573 | dev_warn(&dev->pdev->dev, | |
574 | "could not modify queuepair, error: %d\n", ret); | |
575 | } else if (rsp.hdr.err > 0) { | |
576 | dev_warn(&dev->pdev->dev, | |
577 | "cannot modify queuepair, error: %d\n", rsp.hdr.err); | |
578 | ret = -EINVAL; | |
579 | } | |
580 | ||
581 | if (ret == 0 && next_state == IB_QPS_RESET) | |
582 | pvrdma_reset_qp(qp); | |
583 | ||
584 | out: | |
585 | mutex_unlock(&qp->mutex); | |
586 | ||
587 | return ret; | |
588 | } | |
589 | ||
6332dee8 | 590 | static inline void *get_sq_wqe(struct pvrdma_qp *qp, unsigned int n) |
29c8d9eb AR |
591 | { |
592 | return pvrdma_page_dir_get_ptr(&qp->pdir, | |
593 | qp->sq.offset + n * qp->sq.wqe_size); | |
594 | } | |
595 | ||
6332dee8 | 596 | static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n) |
29c8d9eb AR |
597 | { |
598 | return pvrdma_page_dir_get_ptr(&qp->pdir, | |
599 | qp->rq.offset + n * qp->rq.wqe_size); | |
600 | } | |
601 | ||
602 | static int set_reg_seg(struct pvrdma_sq_wqe_hdr *wqe_hdr, struct ib_reg_wr *wr) | |
603 | { | |
604 | struct pvrdma_user_mr *mr = to_vmr(wr->mr); | |
605 | ||
606 | wqe_hdr->wr.fast_reg.iova_start = mr->ibmr.iova; | |
607 | wqe_hdr->wr.fast_reg.pl_pdir_dma = mr->pdir.dir_dma; | |
608 | wqe_hdr->wr.fast_reg.page_shift = mr->page_shift; | |
609 | wqe_hdr->wr.fast_reg.page_list_len = mr->npages; | |
610 | wqe_hdr->wr.fast_reg.length = mr->ibmr.length; | |
611 | wqe_hdr->wr.fast_reg.access_flags = wr->access; | |
612 | wqe_hdr->wr.fast_reg.rkey = wr->key; | |
613 | ||
614 | return pvrdma_page_dir_insert_page_list(&mr->pdir, mr->pages, | |
615 | mr->npages); | |
616 | } | |
617 | ||
618 | /** | |
619 | * pvrdma_post_send - post send work request entries on a QP | |
620 | * @ibqp: the QP | |
621 | * @wr: work request list to post | |
622 | * @bad_wr: the first bad WR returned | |
623 | * | |
624 | * @return: 0 on success, otherwise errno returned. | |
625 | */ | |
626 | int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |
627 | struct ib_send_wr **bad_wr) | |
628 | { | |
629 | struct pvrdma_qp *qp = to_vqp(ibqp); | |
630 | struct pvrdma_dev *dev = to_vdev(ibqp->device); | |
631 | unsigned long flags; | |
632 | struct pvrdma_sq_wqe_hdr *wqe_hdr; | |
633 | struct pvrdma_sge *sge; | |
6332dee8 | 634 | int i, ret; |
29c8d9eb AR |
635 | |
636 | /* | |
637 | * In states lower than RTS, we can fail immediately. In other states, | |
638 | * just post and let the device figure it out. | |
639 | */ | |
640 | if (qp->state < IB_QPS_RTS) { | |
641 | *bad_wr = wr; | |
642 | return -EINVAL; | |
643 | } | |
644 | ||
645 | spin_lock_irqsave(&qp->sq.lock, flags); | |
646 | ||
6332dee8 AR |
647 | while (wr) { |
648 | unsigned int tail = 0; | |
29c8d9eb AR |
649 | |
650 | if (unlikely(!pvrdma_idx_ring_has_space( | |
651 | qp->sq.ring, qp->sq.wqe_cnt, &tail))) { | |
652 | dev_warn_ratelimited(&dev->pdev->dev, | |
653 | "send queue is full\n"); | |
654 | *bad_wr = wr; | |
655 | ret = -ENOMEM; | |
656 | goto out; | |
657 | } | |
658 | ||
659 | if (unlikely(wr->num_sge > qp->sq.max_sg || wr->num_sge < 0)) { | |
660 | dev_warn_ratelimited(&dev->pdev->dev, | |
661 | "send SGE overflow\n"); | |
662 | *bad_wr = wr; | |
663 | ret = -EINVAL; | |
664 | goto out; | |
665 | } | |
666 | ||
667 | if (unlikely(wr->opcode < 0)) { | |
668 | dev_warn_ratelimited(&dev->pdev->dev, | |
669 | "invalid send opcode\n"); | |
670 | *bad_wr = wr; | |
671 | ret = -EINVAL; | |
672 | goto out; | |
673 | } | |
674 | ||
675 | /* | |
676 | * Only support UD, RC. | |
677 | * Need to check opcode table for thorough checking. | |
678 | * opcode _UD _UC _RC | |
679 | * _SEND x x x | |
680 | * _SEND_WITH_IMM x x x | |
681 | * _RDMA_WRITE x x | |
682 | * _RDMA_WRITE_WITH_IMM x x | |
683 | * _LOCAL_INV x x | |
684 | * _SEND_WITH_INV x x | |
685 | * _RDMA_READ x | |
686 | * _ATOMIC_CMP_AND_SWP x | |
687 | * _ATOMIC_FETCH_AND_ADD x | |
688 | * _MASK_ATOMIC_CMP_AND_SWP x | |
689 | * _MASK_ATOMIC_FETCH_AND_ADD x | |
690 | * _REG_MR x | |
691 | * | |
692 | */ | |
693 | if (qp->ibqp.qp_type != IB_QPT_UD && | |
694 | qp->ibqp.qp_type != IB_QPT_RC && | |
695 | wr->opcode != IB_WR_SEND) { | |
696 | dev_warn_ratelimited(&dev->pdev->dev, | |
697 | "unsupported queuepair type\n"); | |
698 | *bad_wr = wr; | |
699 | ret = -EINVAL; | |
700 | goto out; | |
701 | } else if (qp->ibqp.qp_type == IB_QPT_UD || | |
702 | qp->ibqp.qp_type == IB_QPT_GSI) { | |
703 | if (wr->opcode != IB_WR_SEND && | |
704 | wr->opcode != IB_WR_SEND_WITH_IMM) { | |
705 | dev_warn_ratelimited(&dev->pdev->dev, | |
706 | "invalid send opcode\n"); | |
707 | *bad_wr = wr; | |
708 | ret = -EINVAL; | |
709 | goto out; | |
710 | } | |
711 | } | |
712 | ||
6332dee8 | 713 | wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, tail); |
29c8d9eb AR |
714 | memset(wqe_hdr, 0, sizeof(*wqe_hdr)); |
715 | wqe_hdr->wr_id = wr->wr_id; | |
716 | wqe_hdr->num_sge = wr->num_sge; | |
717 | wqe_hdr->opcode = ib_wr_opcode_to_pvrdma(wr->opcode); | |
718 | wqe_hdr->send_flags = ib_send_flags_to_pvrdma(wr->send_flags); | |
719 | if (wr->opcode == IB_WR_SEND_WITH_IMM || | |
720 | wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) | |
721 | wqe_hdr->ex.imm_data = wr->ex.imm_data; | |
722 | ||
723 | switch (qp->ibqp.qp_type) { | |
724 | case IB_QPT_GSI: | |
725 | case IB_QPT_UD: | |
726 | if (unlikely(!ud_wr(wr)->ah)) { | |
727 | dev_warn_ratelimited(&dev->pdev->dev, | |
728 | "invalid address handle\n"); | |
729 | *bad_wr = wr; | |
730 | ret = -EINVAL; | |
731 | goto out; | |
732 | } | |
733 | ||
734 | /* | |
735 | * Use qkey from qp context if high order bit set, | |
736 | * otherwise from work request. | |
737 | */ | |
738 | wqe_hdr->wr.ud.remote_qpn = ud_wr(wr)->remote_qpn; | |
739 | wqe_hdr->wr.ud.remote_qkey = | |
740 | ud_wr(wr)->remote_qkey & 0x80000000 ? | |
741 | qp->qkey : ud_wr(wr)->remote_qkey; | |
742 | wqe_hdr->wr.ud.av = to_vah(ud_wr(wr)->ah)->av; | |
743 | ||
744 | break; | |
745 | case IB_QPT_RC: | |
746 | switch (wr->opcode) { | |
747 | case IB_WR_RDMA_READ: | |
748 | case IB_WR_RDMA_WRITE: | |
749 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
750 | wqe_hdr->wr.rdma.remote_addr = | |
751 | rdma_wr(wr)->remote_addr; | |
752 | wqe_hdr->wr.rdma.rkey = rdma_wr(wr)->rkey; | |
753 | break; | |
754 | case IB_WR_LOCAL_INV: | |
755 | case IB_WR_SEND_WITH_INV: | |
756 | wqe_hdr->ex.invalidate_rkey = | |
757 | wr->ex.invalidate_rkey; | |
758 | break; | |
759 | case IB_WR_ATOMIC_CMP_AND_SWP: | |
760 | case IB_WR_ATOMIC_FETCH_AND_ADD: | |
761 | wqe_hdr->wr.atomic.remote_addr = | |
762 | atomic_wr(wr)->remote_addr; | |
763 | wqe_hdr->wr.atomic.rkey = atomic_wr(wr)->rkey; | |
764 | wqe_hdr->wr.atomic.compare_add = | |
765 | atomic_wr(wr)->compare_add; | |
766 | if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) | |
767 | wqe_hdr->wr.atomic.swap = | |
768 | atomic_wr(wr)->swap; | |
769 | break; | |
770 | case IB_WR_REG_MR: | |
771 | ret = set_reg_seg(wqe_hdr, reg_wr(wr)); | |
772 | if (ret < 0) { | |
773 | dev_warn_ratelimited(&dev->pdev->dev, | |
774 | "Failed to set fast register work request\n"); | |
775 | *bad_wr = wr; | |
776 | goto out; | |
777 | } | |
778 | break; | |
779 | default: | |
780 | break; | |
781 | } | |
782 | ||
783 | break; | |
784 | default: | |
785 | dev_warn_ratelimited(&dev->pdev->dev, | |
786 | "invalid queuepair type\n"); | |
787 | ret = -EINVAL; | |
788 | *bad_wr = wr; | |
789 | goto out; | |
790 | } | |
791 | ||
792 | sge = (struct pvrdma_sge *)(wqe_hdr + 1); | |
793 | for (i = 0; i < wr->num_sge; i++) { | |
794 | /* Need to check wqe_size 0 or max size */ | |
795 | sge->addr = wr->sg_list[i].addr; | |
796 | sge->length = wr->sg_list[i].length; | |
797 | sge->lkey = wr->sg_list[i].lkey; | |
798 | sge++; | |
799 | } | |
800 | ||
801 | /* Make sure wqe is written before index update */ | |
802 | smp_wmb(); | |
803 | ||
29c8d9eb AR |
804 | /* Update shared sq ring */ |
805 | pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail, | |
806 | qp->sq.wqe_cnt); | |
6332dee8 AR |
807 | |
808 | wr = wr->next; | |
29c8d9eb AR |
809 | } |
810 | ||
811 | ret = 0; | |
812 | ||
813 | out: | |
814 | spin_unlock_irqrestore(&qp->sq.lock, flags); | |
815 | ||
816 | if (!ret) | |
817 | pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_SEND | qp->qp_handle); | |
818 | ||
819 | return ret; | |
820 | } | |
821 | ||
822 | /** | |
823 | * pvrdma_post_receive - post receive work request entries on a QP | |
824 | * @ibqp: the QP | |
825 | * @wr: the work request list to post | |
826 | * @bad_wr: the first bad WR returned | |
827 | * | |
828 | * @return: 0 on success, otherwise errno returned. | |
829 | */ | |
830 | int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |
831 | struct ib_recv_wr **bad_wr) | |
832 | { | |
833 | struct pvrdma_dev *dev = to_vdev(ibqp->device); | |
834 | unsigned long flags; | |
835 | struct pvrdma_qp *qp = to_vqp(ibqp); | |
836 | struct pvrdma_rq_wqe_hdr *wqe_hdr; | |
837 | struct pvrdma_sge *sge; | |
29c8d9eb AR |
838 | int ret = 0; |
839 | int i; | |
840 | ||
841 | /* | |
842 | * In the RESET state, we can fail immediately. For other states, | |
843 | * just post and let the device figure it out. | |
844 | */ | |
845 | if (qp->state == IB_QPS_RESET) { | |
846 | *bad_wr = wr; | |
847 | return -EINVAL; | |
848 | } | |
849 | ||
8b10ba78 BT |
850 | if (qp->srq) { |
851 | dev_warn(&dev->pdev->dev, "QP associated with SRQ\n"); | |
852 | *bad_wr = wr; | |
853 | return -EINVAL; | |
854 | } | |
855 | ||
29c8d9eb AR |
856 | spin_lock_irqsave(&qp->rq.lock, flags); |
857 | ||
6332dee8 AR |
858 | while (wr) { |
859 | unsigned int tail = 0; | |
29c8d9eb AR |
860 | |
861 | if (unlikely(wr->num_sge > qp->rq.max_sg || | |
862 | wr->num_sge < 0)) { | |
863 | ret = -EINVAL; | |
864 | *bad_wr = wr; | |
865 | dev_warn_ratelimited(&dev->pdev->dev, | |
866 | "recv SGE overflow\n"); | |
867 | goto out; | |
868 | } | |
869 | ||
870 | if (unlikely(!pvrdma_idx_ring_has_space( | |
871 | qp->rq.ring, qp->rq.wqe_cnt, &tail))) { | |
872 | ret = -ENOMEM; | |
873 | *bad_wr = wr; | |
874 | dev_warn_ratelimited(&dev->pdev->dev, | |
875 | "recv queue full\n"); | |
876 | goto out; | |
877 | } | |
878 | ||
6332dee8 | 879 | wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, tail); |
29c8d9eb AR |
880 | wqe_hdr->wr_id = wr->wr_id; |
881 | wqe_hdr->num_sge = wr->num_sge; | |
882 | wqe_hdr->total_len = 0; | |
883 | ||
884 | sge = (struct pvrdma_sge *)(wqe_hdr + 1); | |
885 | for (i = 0; i < wr->num_sge; i++) { | |
886 | sge->addr = wr->sg_list[i].addr; | |
887 | sge->length = wr->sg_list[i].length; | |
888 | sge->lkey = wr->sg_list[i].lkey; | |
889 | sge++; | |
890 | } | |
891 | ||
892 | /* Make sure wqe is written before index update */ | |
893 | smp_wmb(); | |
894 | ||
29c8d9eb AR |
895 | /* Update shared rq ring */ |
896 | pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail, | |
897 | qp->rq.wqe_cnt); | |
6332dee8 AR |
898 | |
899 | wr = wr->next; | |
29c8d9eb AR |
900 | } |
901 | ||
902 | spin_unlock_irqrestore(&qp->rq.lock, flags); | |
903 | ||
904 | pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_RECV | qp->qp_handle); | |
905 | ||
906 | return ret; | |
907 | ||
908 | out: | |
909 | spin_unlock_irqrestore(&qp->rq.lock, flags); | |
910 | ||
911 | return ret; | |
912 | } | |
913 | ||
914 | /** | |
915 | * pvrdma_query_qp - query a queue pair's attributes | |
916 | * @ibqp: the queue pair to query | |
917 | * @attr: the queue pair's attributes | |
918 | * @attr_mask: attributes mask | |
919 | * @init_attr: initial queue pair attributes | |
920 | * | |
921 | * @returns 0 on success, otherwise returns an errno. | |
922 | */ | |
923 | int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
924 | int attr_mask, struct ib_qp_init_attr *init_attr) | |
925 | { | |
926 | struct pvrdma_dev *dev = to_vdev(ibqp->device); | |
927 | struct pvrdma_qp *qp = to_vqp(ibqp); | |
928 | union pvrdma_cmd_req req; | |
929 | union pvrdma_cmd_resp rsp; | |
930 | struct pvrdma_cmd_query_qp *cmd = &req.query_qp; | |
931 | struct pvrdma_cmd_query_qp_resp *resp = &rsp.query_qp_resp; | |
932 | int ret = 0; | |
933 | ||
934 | mutex_lock(&qp->mutex); | |
935 | ||
936 | if (qp->state == IB_QPS_RESET) { | |
937 | attr->qp_state = IB_QPS_RESET; | |
938 | goto out; | |
939 | } | |
940 | ||
941 | memset(cmd, 0, sizeof(*cmd)); | |
942 | cmd->hdr.cmd = PVRDMA_CMD_QUERY_QP; | |
943 | cmd->qp_handle = qp->qp_handle; | |
944 | cmd->attr_mask = ib_qp_attr_mask_to_pvrdma(attr_mask); | |
945 | ||
946 | ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_QP_RESP); | |
947 | if (ret < 0) { | |
948 | dev_warn(&dev->pdev->dev, | |
949 | "could not query queuepair, error: %d\n", ret); | |
950 | goto out; | |
951 | } | |
952 | ||
953 | attr->qp_state = pvrdma_qp_state_to_ib(resp->attrs.qp_state); | |
954 | attr->cur_qp_state = | |
955 | pvrdma_qp_state_to_ib(resp->attrs.cur_qp_state); | |
956 | attr->path_mtu = pvrdma_mtu_to_ib(resp->attrs.path_mtu); | |
957 | attr->path_mig_state = | |
958 | pvrdma_mig_state_to_ib(resp->attrs.path_mig_state); | |
959 | attr->qkey = resp->attrs.qkey; | |
960 | attr->rq_psn = resp->attrs.rq_psn; | |
961 | attr->sq_psn = resp->attrs.sq_psn; | |
962 | attr->dest_qp_num = resp->attrs.dest_qp_num; | |
963 | attr->qp_access_flags = | |
964 | pvrdma_access_flags_to_ib(resp->attrs.qp_access_flags); | |
965 | attr->pkey_index = resp->attrs.pkey_index; | |
966 | attr->alt_pkey_index = resp->attrs.alt_pkey_index; | |
967 | attr->en_sqd_async_notify = resp->attrs.en_sqd_async_notify; | |
968 | attr->sq_draining = resp->attrs.sq_draining; | |
969 | attr->max_rd_atomic = resp->attrs.max_rd_atomic; | |
970 | attr->max_dest_rd_atomic = resp->attrs.max_dest_rd_atomic; | |
971 | attr->min_rnr_timer = resp->attrs.min_rnr_timer; | |
972 | attr->port_num = resp->attrs.port_num; | |
973 | attr->timeout = resp->attrs.timeout; | |
974 | attr->retry_cnt = resp->attrs.retry_cnt; | |
975 | attr->rnr_retry = resp->attrs.rnr_retry; | |
976 | attr->alt_port_num = resp->attrs.alt_port_num; | |
977 | attr->alt_timeout = resp->attrs.alt_timeout; | |
978 | pvrdma_qp_cap_to_ib(&attr->cap, &resp->attrs.cap); | |
f988653a DC |
979 | pvrdma_ah_attr_to_rdma(&attr->ah_attr, &resp->attrs.ah_attr); |
980 | pvrdma_ah_attr_to_rdma(&attr->alt_ah_attr, &resp->attrs.alt_ah_attr); | |
29c8d9eb AR |
981 | |
982 | qp->state = attr->qp_state; | |
983 | ||
984 | ret = 0; | |
985 | ||
986 | out: | |
987 | attr->cur_qp_state = attr->qp_state; | |
988 | ||
989 | init_attr->event_handler = qp->ibqp.event_handler; | |
990 | init_attr->qp_context = qp->ibqp.qp_context; | |
991 | init_attr->send_cq = qp->ibqp.send_cq; | |
992 | init_attr->recv_cq = qp->ibqp.recv_cq; | |
993 | init_attr->srq = qp->ibqp.srq; | |
994 | init_attr->xrcd = NULL; | |
995 | init_attr->cap = attr->cap; | |
996 | init_attr->sq_sig_type = 0; | |
997 | init_attr->qp_type = qp->ibqp.qp_type; | |
998 | init_attr->create_flags = 0; | |
999 | init_attr->port_num = qp->port; | |
1000 | ||
1001 | mutex_unlock(&qp->mutex); | |
1002 | return ret; | |
1003 | } |