]>
Commit | Line | Data |
---|---|---|
29c8d9eb AR |
1 | /* |
2 | * Copyright (c) 2012-2016 VMware, Inc. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of EITHER the GNU General Public License | |
6 | * version 2 as published by the Free Software Foundation or the BSD | |
7 | * 2-Clause License. This program is distributed in the hope that it | |
8 | * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED | |
9 | * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. | |
10 | * See the GNU General Public License version 2 for more details at | |
11 | * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program available in the file COPYING in the main | |
15 | * directory of this source tree. | |
16 | * | |
17 | * The BSD 2-Clause License | |
18 | * | |
19 | * Redistribution and use in source and binary forms, with or | |
20 | * without modification, are permitted provided that the following | |
21 | * conditions are met: | |
22 | * | |
23 | * - Redistributions of source code must retain the above | |
24 | * copyright notice, this list of conditions and the following | |
25 | * disclaimer. | |
26 | * | |
27 | * - Redistributions in binary form must reproduce the above | |
28 | * copyright notice, this list of conditions and the following | |
29 | * disclaimer in the documentation and/or other materials | |
30 | * provided with the distribution. | |
31 | * | |
32 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
33 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
34 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | |
35 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | |
36 | * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, | |
37 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
38 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | |
39 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
40 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | |
41 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
42 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED | |
43 | * OF THE POSSIBILITY OF SUCH DAMAGE. | |
44 | */ | |
45 | ||
46 | #include <asm/page.h> | |
47 | #include <linux/io.h> | |
48 | #include <linux/wait.h> | |
49 | #include <rdma/ib_addr.h> | |
50 | #include <rdma/ib_smi.h> | |
51 | #include <rdma/ib_user_verbs.h> | |
52 | ||
53 | #include "pvrdma.h" | |
54 | ||
55 | static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq, | |
56 | struct pvrdma_cq **recv_cq) | |
57 | { | |
58 | *send_cq = to_vcq(qp->ibqp.send_cq); | |
59 | *recv_cq = to_vcq(qp->ibqp.recv_cq); | |
60 | } | |
61 | ||
62 | static void pvrdma_lock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq, | |
63 | unsigned long *scq_flags, | |
64 | unsigned long *rcq_flags) | |
65 | __acquires(scq->cq_lock) __acquires(rcq->cq_lock) | |
66 | { | |
67 | if (scq == rcq) { | |
68 | spin_lock_irqsave(&scq->cq_lock, *scq_flags); | |
69 | __acquire(rcq->cq_lock); | |
70 | } else if (scq->cq_handle < rcq->cq_handle) { | |
71 | spin_lock_irqsave(&scq->cq_lock, *scq_flags); | |
72 | spin_lock_irqsave_nested(&rcq->cq_lock, *rcq_flags, | |
73 | SINGLE_DEPTH_NESTING); | |
74 | } else { | |
75 | spin_lock_irqsave(&rcq->cq_lock, *rcq_flags); | |
76 | spin_lock_irqsave_nested(&scq->cq_lock, *scq_flags, | |
77 | SINGLE_DEPTH_NESTING); | |
78 | } | |
79 | } | |
80 | ||
81 | static void pvrdma_unlock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq, | |
82 | unsigned long *scq_flags, | |
83 | unsigned long *rcq_flags) | |
84 | __releases(scq->cq_lock) __releases(rcq->cq_lock) | |
85 | { | |
86 | if (scq == rcq) { | |
87 | __release(rcq->cq_lock); | |
88 | spin_unlock_irqrestore(&scq->cq_lock, *scq_flags); | |
89 | } else if (scq->cq_handle < rcq->cq_handle) { | |
90 | spin_unlock_irqrestore(&rcq->cq_lock, *rcq_flags); | |
91 | spin_unlock_irqrestore(&scq->cq_lock, *scq_flags); | |
92 | } else { | |
93 | spin_unlock_irqrestore(&scq->cq_lock, *scq_flags); | |
94 | spin_unlock_irqrestore(&rcq->cq_lock, *rcq_flags); | |
95 | } | |
96 | } | |
97 | ||
98 | static void pvrdma_reset_qp(struct pvrdma_qp *qp) | |
99 | { | |
100 | struct pvrdma_cq *scq, *rcq; | |
101 | unsigned long scq_flags, rcq_flags; | |
102 | ||
103 | /* Clean up cqes */ | |
104 | get_cqs(qp, &scq, &rcq); | |
105 | pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags); | |
106 | ||
107 | _pvrdma_flush_cqe(qp, scq); | |
108 | if (scq != rcq) | |
109 | _pvrdma_flush_cqe(qp, rcq); | |
110 | ||
111 | pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags); | |
112 | ||
113 | /* | |
114 | * Reset queuepair. The checks are because usermode queuepairs won't | |
115 | * have kernel ringstates. | |
116 | */ | |
117 | if (qp->rq.ring) { | |
118 | atomic_set(&qp->rq.ring->cons_head, 0); | |
119 | atomic_set(&qp->rq.ring->prod_tail, 0); | |
120 | } | |
121 | if (qp->sq.ring) { | |
122 | atomic_set(&qp->sq.ring->cons_head, 0); | |
123 | atomic_set(&qp->sq.ring->prod_tail, 0); | |
124 | } | |
125 | } | |
126 | ||
127 | static int pvrdma_set_rq_size(struct pvrdma_dev *dev, | |
128 | struct ib_qp_cap *req_cap, | |
129 | struct pvrdma_qp *qp) | |
130 | { | |
131 | if (req_cap->max_recv_wr > dev->dsr->caps.max_qp_wr || | |
132 | req_cap->max_recv_sge > dev->dsr->caps.max_sge) { | |
133 | dev_warn(&dev->pdev->dev, "recv queue size invalid\n"); | |
134 | return -EINVAL; | |
135 | } | |
136 | ||
137 | qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_recv_wr)); | |
138 | qp->rq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_recv_sge)); | |
139 | ||
140 | /* Write back */ | |
141 | req_cap->max_recv_wr = qp->rq.wqe_cnt; | |
142 | req_cap->max_recv_sge = qp->rq.max_sg; | |
143 | ||
144 | qp->rq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_rq_wqe_hdr) + | |
145 | sizeof(struct pvrdma_sge) * | |
146 | qp->rq.max_sg); | |
147 | qp->npages_recv = (qp->rq.wqe_cnt * qp->rq.wqe_size + PAGE_SIZE - 1) / | |
148 | PAGE_SIZE; | |
149 | ||
150 | return 0; | |
151 | } | |
152 | ||
153 | static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap, | |
1dd70ea3 | 154 | struct pvrdma_qp *qp) |
29c8d9eb AR |
155 | { |
156 | if (req_cap->max_send_wr > dev->dsr->caps.max_qp_wr || | |
157 | req_cap->max_send_sge > dev->dsr->caps.max_sge) { | |
158 | dev_warn(&dev->pdev->dev, "send queue size invalid\n"); | |
159 | return -EINVAL; | |
160 | } | |
161 | ||
162 | qp->sq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_send_wr)); | |
163 | qp->sq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_send_sge)); | |
164 | ||
165 | /* Write back */ | |
166 | req_cap->max_send_wr = qp->sq.wqe_cnt; | |
167 | req_cap->max_send_sge = qp->sq.max_sg; | |
168 | ||
169 | qp->sq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_sq_wqe_hdr) + | |
170 | sizeof(struct pvrdma_sge) * | |
171 | qp->sq.max_sg); | |
172 | /* Note: one extra page for the header. */ | |
e51c2fb0 AR |
173 | qp->npages_send = PVRDMA_QP_NUM_HEADER_PAGES + |
174 | (qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) / | |
175 | PAGE_SIZE; | |
29c8d9eb AR |
176 | |
177 | return 0; | |
178 | } | |
179 | ||
180 | /** | |
181 | * pvrdma_create_qp - create queue pair | |
182 | * @pd: protection domain | |
183 | * @init_attr: queue pair attributes | |
184 | * @udata: user data | |
185 | * | |
186 | * @return: the ib_qp pointer on success, otherwise returns an errno. | |
187 | */ | |
188 | struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, | |
189 | struct ib_qp_init_attr *init_attr, | |
190 | struct ib_udata *udata) | |
191 | { | |
192 | struct pvrdma_qp *qp = NULL; | |
193 | struct pvrdma_dev *dev = to_vdev(pd->device); | |
194 | union pvrdma_cmd_req req; | |
195 | union pvrdma_cmd_resp rsp; | |
196 | struct pvrdma_cmd_create_qp *cmd = &req.create_qp; | |
197 | struct pvrdma_cmd_create_qp_resp *resp = &rsp.create_qp_resp; | |
198 | struct pvrdma_create_qp ucmd; | |
199 | unsigned long flags; | |
200 | int ret; | |
201 | ||
202 | if (init_attr->create_flags) { | |
203 | dev_warn(&dev->pdev->dev, | |
204 | "invalid create queuepair flags %#x\n", | |
205 | init_attr->create_flags); | |
206 | return ERR_PTR(-EINVAL); | |
207 | } | |
208 | ||
209 | if (init_attr->qp_type != IB_QPT_RC && | |
210 | init_attr->qp_type != IB_QPT_UD && | |
211 | init_attr->qp_type != IB_QPT_GSI) { | |
212 | dev_warn(&dev->pdev->dev, "queuepair type %d not supported\n", | |
213 | init_attr->qp_type); | |
214 | return ERR_PTR(-EINVAL); | |
215 | } | |
216 | ||
217 | if (!atomic_add_unless(&dev->num_qps, 1, dev->dsr->caps.max_qp)) | |
218 | return ERR_PTR(-ENOMEM); | |
219 | ||
220 | switch (init_attr->qp_type) { | |
221 | case IB_QPT_GSI: | |
222 | if (init_attr->port_num == 0 || | |
223 | init_attr->port_num > pd->device->phys_port_cnt || | |
224 | udata) { | |
225 | dev_warn(&dev->pdev->dev, "invalid queuepair attrs\n"); | |
226 | ret = -EINVAL; | |
227 | goto err_qp; | |
228 | } | |
229 | /* fall through */ | |
230 | case IB_QPT_RC: | |
231 | case IB_QPT_UD: | |
232 | qp = kzalloc(sizeof(*qp), GFP_KERNEL); | |
233 | if (!qp) { | |
234 | ret = -ENOMEM; | |
235 | goto err_qp; | |
236 | } | |
237 | ||
238 | spin_lock_init(&qp->sq.lock); | |
239 | spin_lock_init(&qp->rq.lock); | |
240 | mutex_init(&qp->mutex); | |
241 | atomic_set(&qp->refcnt, 1); | |
242 | init_waitqueue_head(&qp->wait); | |
243 | ||
244 | qp->state = IB_QPS_RESET; | |
245 | ||
246 | if (pd->uobject && udata) { | |
247 | dev_dbg(&dev->pdev->dev, | |
248 | "create queuepair from user space\n"); | |
249 | ||
250 | if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { | |
251 | ret = -EFAULT; | |
252 | goto err_qp; | |
253 | } | |
254 | ||
255 | /* set qp->sq.wqe_cnt, shift, buf_size.. */ | |
256 | qp->rumem = ib_umem_get(pd->uobject->context, | |
257 | ucmd.rbuf_addr, | |
258 | ucmd.rbuf_size, 0, 0); | |
259 | if (IS_ERR(qp->rumem)) { | |
260 | ret = PTR_ERR(qp->rumem); | |
261 | goto err_qp; | |
262 | } | |
263 | ||
264 | qp->sumem = ib_umem_get(pd->uobject->context, | |
265 | ucmd.sbuf_addr, | |
266 | ucmd.sbuf_size, 0, 0); | |
267 | if (IS_ERR(qp->sumem)) { | |
268 | ib_umem_release(qp->rumem); | |
269 | ret = PTR_ERR(qp->sumem); | |
270 | goto err_qp; | |
271 | } | |
272 | ||
273 | qp->npages_send = ib_umem_page_count(qp->sumem); | |
274 | qp->npages_recv = ib_umem_page_count(qp->rumem); | |
275 | qp->npages = qp->npages_send + qp->npages_recv; | |
276 | } else { | |
277 | qp->is_kernel = true; | |
278 | ||
279 | ret = pvrdma_set_sq_size(to_vdev(pd->device), | |
1dd70ea3 | 280 | &init_attr->cap, qp); |
29c8d9eb AR |
281 | if (ret) |
282 | goto err_qp; | |
283 | ||
284 | ret = pvrdma_set_rq_size(to_vdev(pd->device), | |
285 | &init_attr->cap, qp); | |
286 | if (ret) | |
287 | goto err_qp; | |
288 | ||
289 | qp->npages = qp->npages_send + qp->npages_recv; | |
290 | ||
291 | /* Skip header page. */ | |
e51c2fb0 | 292 | qp->sq.offset = PVRDMA_QP_NUM_HEADER_PAGES * PAGE_SIZE; |
29c8d9eb AR |
293 | |
294 | /* Recv queue pages are after send pages. */ | |
295 | qp->rq.offset = qp->npages_send * PAGE_SIZE; | |
296 | } | |
297 | ||
298 | if (qp->npages < 0 || qp->npages > PVRDMA_PAGE_DIR_MAX_PAGES) { | |
299 | dev_warn(&dev->pdev->dev, | |
300 | "overflow pages in queuepair\n"); | |
301 | ret = -EINVAL; | |
302 | goto err_umem; | |
303 | } | |
304 | ||
305 | ret = pvrdma_page_dir_init(dev, &qp->pdir, qp->npages, | |
306 | qp->is_kernel); | |
307 | if (ret) { | |
308 | dev_warn(&dev->pdev->dev, | |
309 | "could not allocate page directory\n"); | |
310 | goto err_umem; | |
311 | } | |
312 | ||
313 | if (!qp->is_kernel) { | |
314 | pvrdma_page_dir_insert_umem(&qp->pdir, qp->sumem, 0); | |
315 | pvrdma_page_dir_insert_umem(&qp->pdir, qp->rumem, | |
316 | qp->npages_send); | |
317 | } else { | |
318 | /* Ring state is always the first page. */ | |
319 | qp->sq.ring = qp->pdir.pages[0]; | |
320 | qp->rq.ring = &qp->sq.ring[1]; | |
321 | } | |
322 | break; | |
323 | default: | |
324 | ret = -EINVAL; | |
325 | goto err_qp; | |
326 | } | |
327 | ||
328 | /* Not supported */ | |
329 | init_attr->cap.max_inline_data = 0; | |
330 | ||
331 | memset(cmd, 0, sizeof(*cmd)); | |
332 | cmd->hdr.cmd = PVRDMA_CMD_CREATE_QP; | |
333 | cmd->pd_handle = to_vpd(pd)->pd_handle; | |
334 | cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle; | |
335 | cmd->recv_cq_handle = to_vcq(init_attr->recv_cq)->cq_handle; | |
336 | cmd->max_send_wr = init_attr->cap.max_send_wr; | |
337 | cmd->max_recv_wr = init_attr->cap.max_recv_wr; | |
338 | cmd->max_send_sge = init_attr->cap.max_send_sge; | |
339 | cmd->max_recv_sge = init_attr->cap.max_recv_sge; | |
340 | cmd->max_inline_data = init_attr->cap.max_inline_data; | |
341 | cmd->sq_sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0; | |
342 | cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type); | |
343 | cmd->access_flags = IB_ACCESS_LOCAL_WRITE; | |
344 | cmd->total_chunks = qp->npages; | |
e51c2fb0 | 345 | cmd->send_chunks = qp->npages_send - PVRDMA_QP_NUM_HEADER_PAGES; |
29c8d9eb AR |
346 | cmd->pdir_dma = qp->pdir.dir_dma; |
347 | ||
348 | dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n", | |
349 | cmd->max_send_wr, cmd->max_recv_wr, cmd->max_send_sge, | |
350 | cmd->max_recv_sge); | |
351 | ||
352 | ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_QP_RESP); | |
353 | if (ret < 0) { | |
354 | dev_warn(&dev->pdev->dev, | |
355 | "could not create queuepair, error: %d\n", ret); | |
356 | goto err_pdir; | |
357 | } | |
358 | ||
359 | /* max_send_wr/_recv_wr/_send_sge/_recv_sge/_inline_data */ | |
360 | qp->qp_handle = resp->qpn; | |
361 | qp->port = init_attr->port_num; | |
362 | qp->ibqp.qp_num = resp->qpn; | |
363 | spin_lock_irqsave(&dev->qp_tbl_lock, flags); | |
364 | dev->qp_tbl[qp->qp_handle % dev->dsr->caps.max_qp] = qp; | |
365 | spin_unlock_irqrestore(&dev->qp_tbl_lock, flags); | |
366 | ||
367 | return &qp->ibqp; | |
368 | ||
369 | err_pdir: | |
370 | pvrdma_page_dir_cleanup(dev, &qp->pdir); | |
371 | err_umem: | |
372 | if (pd->uobject && udata) { | |
373 | if (qp->rumem) | |
374 | ib_umem_release(qp->rumem); | |
375 | if (qp->sumem) | |
376 | ib_umem_release(qp->sumem); | |
377 | } | |
378 | err_qp: | |
379 | kfree(qp); | |
380 | atomic_dec(&dev->num_qps); | |
381 | ||
382 | return ERR_PTR(ret); | |
383 | } | |
384 | ||
385 | static void pvrdma_free_qp(struct pvrdma_qp *qp) | |
386 | { | |
387 | struct pvrdma_dev *dev = to_vdev(qp->ibqp.device); | |
388 | struct pvrdma_cq *scq; | |
389 | struct pvrdma_cq *rcq; | |
390 | unsigned long flags, scq_flags, rcq_flags; | |
391 | ||
392 | /* In case cq is polling */ | |
393 | get_cqs(qp, &scq, &rcq); | |
394 | pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags); | |
395 | ||
396 | _pvrdma_flush_cqe(qp, scq); | |
397 | if (scq != rcq) | |
398 | _pvrdma_flush_cqe(qp, rcq); | |
399 | ||
400 | spin_lock_irqsave(&dev->qp_tbl_lock, flags); | |
401 | dev->qp_tbl[qp->qp_handle] = NULL; | |
402 | spin_unlock_irqrestore(&dev->qp_tbl_lock, flags); | |
403 | ||
404 | pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags); | |
405 | ||
406 | atomic_dec(&qp->refcnt); | |
407 | wait_event(qp->wait, !atomic_read(&qp->refcnt)); | |
408 | ||
409 | pvrdma_page_dir_cleanup(dev, &qp->pdir); | |
410 | ||
411 | kfree(qp); | |
412 | ||
413 | atomic_dec(&dev->num_qps); | |
414 | } | |
415 | ||
416 | /** | |
417 | * pvrdma_destroy_qp - destroy a queue pair | |
418 | * @qp: the queue pair to destroy | |
419 | * | |
420 | * @return: 0 on success. | |
421 | */ | |
422 | int pvrdma_destroy_qp(struct ib_qp *qp) | |
423 | { | |
424 | struct pvrdma_qp *vqp = to_vqp(qp); | |
425 | union pvrdma_cmd_req req; | |
426 | struct pvrdma_cmd_destroy_qp *cmd = &req.destroy_qp; | |
427 | int ret; | |
428 | ||
429 | memset(cmd, 0, sizeof(*cmd)); | |
430 | cmd->hdr.cmd = PVRDMA_CMD_DESTROY_QP; | |
431 | cmd->qp_handle = vqp->qp_handle; | |
432 | ||
433 | ret = pvrdma_cmd_post(to_vdev(qp->device), &req, NULL, 0); | |
434 | if (ret < 0) | |
435 | dev_warn(&to_vdev(qp->device)->pdev->dev, | |
436 | "destroy queuepair failed, error: %d\n", ret); | |
437 | ||
438 | pvrdma_free_qp(vqp); | |
439 | ||
440 | return 0; | |
441 | } | |
442 | ||
443 | /** | |
444 | * pvrdma_modify_qp - modify queue pair attributes | |
445 | * @ibqp: the queue pair | |
446 | * @attr: the new queue pair's attributes | |
447 | * @attr_mask: attributes mask | |
448 | * @udata: user data | |
449 | * | |
450 | * @returns 0 on success, otherwise returns an errno. | |
451 | */ | |
452 | int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
453 | int attr_mask, struct ib_udata *udata) | |
454 | { | |
455 | struct pvrdma_dev *dev = to_vdev(ibqp->device); | |
456 | struct pvrdma_qp *qp = to_vqp(ibqp); | |
457 | union pvrdma_cmd_req req; | |
458 | union pvrdma_cmd_resp rsp; | |
459 | struct pvrdma_cmd_modify_qp *cmd = &req.modify_qp; | |
460 | int cur_state, next_state; | |
461 | int ret; | |
462 | ||
463 | /* Sanity checking. Should need lock here */ | |
464 | mutex_lock(&qp->mutex); | |
465 | cur_state = (attr_mask & IB_QP_CUR_STATE) ? attr->cur_qp_state : | |
466 | qp->state; | |
467 | next_state = (attr_mask & IB_QP_STATE) ? attr->qp_state : cur_state; | |
468 | ||
469 | if (!ib_modify_qp_is_ok(cur_state, next_state, ibqp->qp_type, | |
470 | attr_mask, IB_LINK_LAYER_ETHERNET)) { | |
471 | ret = -EINVAL; | |
472 | goto out; | |
473 | } | |
474 | ||
475 | if (attr_mask & IB_QP_PORT) { | |
476 | if (attr->port_num == 0 || | |
477 | attr->port_num > ibqp->device->phys_port_cnt) { | |
478 | ret = -EINVAL; | |
479 | goto out; | |
480 | } | |
481 | } | |
482 | ||
483 | if (attr_mask & IB_QP_MIN_RNR_TIMER) { | |
484 | if (attr->min_rnr_timer > 31) { | |
485 | ret = -EINVAL; | |
486 | goto out; | |
487 | } | |
488 | } | |
489 | ||
490 | if (attr_mask & IB_QP_PKEY_INDEX) { | |
491 | if (attr->pkey_index >= dev->dsr->caps.max_pkeys) { | |
492 | ret = -EINVAL; | |
493 | goto out; | |
494 | } | |
495 | } | |
496 | ||
497 | if (attr_mask & IB_QP_QKEY) | |
498 | qp->qkey = attr->qkey; | |
499 | ||
500 | if (cur_state == next_state && cur_state == IB_QPS_RESET) { | |
501 | ret = 0; | |
502 | goto out; | |
503 | } | |
504 | ||
505 | qp->state = next_state; | |
506 | memset(cmd, 0, sizeof(*cmd)); | |
507 | cmd->hdr.cmd = PVRDMA_CMD_MODIFY_QP; | |
508 | cmd->qp_handle = qp->qp_handle; | |
509 | cmd->attr_mask = ib_qp_attr_mask_to_pvrdma(attr_mask); | |
510 | cmd->attrs.qp_state = ib_qp_state_to_pvrdma(attr->qp_state); | |
511 | cmd->attrs.cur_qp_state = | |
512 | ib_qp_state_to_pvrdma(attr->cur_qp_state); | |
513 | cmd->attrs.path_mtu = ib_mtu_to_pvrdma(attr->path_mtu); | |
514 | cmd->attrs.path_mig_state = | |
515 | ib_mig_state_to_pvrdma(attr->path_mig_state); | |
516 | cmd->attrs.qkey = attr->qkey; | |
517 | cmd->attrs.rq_psn = attr->rq_psn; | |
518 | cmd->attrs.sq_psn = attr->sq_psn; | |
519 | cmd->attrs.dest_qp_num = attr->dest_qp_num; | |
520 | cmd->attrs.qp_access_flags = | |
521 | ib_access_flags_to_pvrdma(attr->qp_access_flags); | |
522 | cmd->attrs.pkey_index = attr->pkey_index; | |
523 | cmd->attrs.alt_pkey_index = attr->alt_pkey_index; | |
524 | cmd->attrs.en_sqd_async_notify = attr->en_sqd_async_notify; | |
525 | cmd->attrs.sq_draining = attr->sq_draining; | |
526 | cmd->attrs.max_rd_atomic = attr->max_rd_atomic; | |
527 | cmd->attrs.max_dest_rd_atomic = attr->max_dest_rd_atomic; | |
528 | cmd->attrs.min_rnr_timer = attr->min_rnr_timer; | |
529 | cmd->attrs.port_num = attr->port_num; | |
530 | cmd->attrs.timeout = attr->timeout; | |
531 | cmd->attrs.retry_cnt = attr->retry_cnt; | |
532 | cmd->attrs.rnr_retry = attr->rnr_retry; | |
533 | cmd->attrs.alt_port_num = attr->alt_port_num; | |
534 | cmd->attrs.alt_timeout = attr->alt_timeout; | |
535 | ib_qp_cap_to_pvrdma(&cmd->attrs.cap, &attr->cap); | |
536 | ib_ah_attr_to_pvrdma(&cmd->attrs.ah_attr, &attr->ah_attr); | |
537 | ib_ah_attr_to_pvrdma(&cmd->attrs.alt_ah_attr, &attr->alt_ah_attr); | |
538 | ||
539 | ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_MODIFY_QP_RESP); | |
540 | if (ret < 0) { | |
541 | dev_warn(&dev->pdev->dev, | |
542 | "could not modify queuepair, error: %d\n", ret); | |
543 | } else if (rsp.hdr.err > 0) { | |
544 | dev_warn(&dev->pdev->dev, | |
545 | "cannot modify queuepair, error: %d\n", rsp.hdr.err); | |
546 | ret = -EINVAL; | |
547 | } | |
548 | ||
549 | if (ret == 0 && next_state == IB_QPS_RESET) | |
550 | pvrdma_reset_qp(qp); | |
551 | ||
552 | out: | |
553 | mutex_unlock(&qp->mutex); | |
554 | ||
555 | return ret; | |
556 | } | |
557 | ||
6332dee8 | 558 | static inline void *get_sq_wqe(struct pvrdma_qp *qp, unsigned int n) |
29c8d9eb AR |
559 | { |
560 | return pvrdma_page_dir_get_ptr(&qp->pdir, | |
561 | qp->sq.offset + n * qp->sq.wqe_size); | |
562 | } | |
563 | ||
6332dee8 | 564 | static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n) |
29c8d9eb AR |
565 | { |
566 | return pvrdma_page_dir_get_ptr(&qp->pdir, | |
567 | qp->rq.offset + n * qp->rq.wqe_size); | |
568 | } | |
569 | ||
570 | static int set_reg_seg(struct pvrdma_sq_wqe_hdr *wqe_hdr, struct ib_reg_wr *wr) | |
571 | { | |
572 | struct pvrdma_user_mr *mr = to_vmr(wr->mr); | |
573 | ||
574 | wqe_hdr->wr.fast_reg.iova_start = mr->ibmr.iova; | |
575 | wqe_hdr->wr.fast_reg.pl_pdir_dma = mr->pdir.dir_dma; | |
576 | wqe_hdr->wr.fast_reg.page_shift = mr->page_shift; | |
577 | wqe_hdr->wr.fast_reg.page_list_len = mr->npages; | |
578 | wqe_hdr->wr.fast_reg.length = mr->ibmr.length; | |
579 | wqe_hdr->wr.fast_reg.access_flags = wr->access; | |
580 | wqe_hdr->wr.fast_reg.rkey = wr->key; | |
581 | ||
582 | return pvrdma_page_dir_insert_page_list(&mr->pdir, mr->pages, | |
583 | mr->npages); | |
584 | } | |
585 | ||
586 | /** | |
587 | * pvrdma_post_send - post send work request entries on a QP | |
588 | * @ibqp: the QP | |
589 | * @wr: work request list to post | |
590 | * @bad_wr: the first bad WR returned | |
591 | * | |
592 | * @return: 0 on success, otherwise errno returned. | |
593 | */ | |
594 | int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |
595 | struct ib_send_wr **bad_wr) | |
596 | { | |
597 | struct pvrdma_qp *qp = to_vqp(ibqp); | |
598 | struct pvrdma_dev *dev = to_vdev(ibqp->device); | |
599 | unsigned long flags; | |
600 | struct pvrdma_sq_wqe_hdr *wqe_hdr; | |
601 | struct pvrdma_sge *sge; | |
6332dee8 | 602 | int i, ret; |
29c8d9eb AR |
603 | |
604 | /* | |
605 | * In states lower than RTS, we can fail immediately. In other states, | |
606 | * just post and let the device figure it out. | |
607 | */ | |
608 | if (qp->state < IB_QPS_RTS) { | |
609 | *bad_wr = wr; | |
610 | return -EINVAL; | |
611 | } | |
612 | ||
613 | spin_lock_irqsave(&qp->sq.lock, flags); | |
614 | ||
6332dee8 AR |
615 | while (wr) { |
616 | unsigned int tail = 0; | |
29c8d9eb AR |
617 | |
618 | if (unlikely(!pvrdma_idx_ring_has_space( | |
619 | qp->sq.ring, qp->sq.wqe_cnt, &tail))) { | |
620 | dev_warn_ratelimited(&dev->pdev->dev, | |
621 | "send queue is full\n"); | |
622 | *bad_wr = wr; | |
623 | ret = -ENOMEM; | |
624 | goto out; | |
625 | } | |
626 | ||
627 | if (unlikely(wr->num_sge > qp->sq.max_sg || wr->num_sge < 0)) { | |
628 | dev_warn_ratelimited(&dev->pdev->dev, | |
629 | "send SGE overflow\n"); | |
630 | *bad_wr = wr; | |
631 | ret = -EINVAL; | |
632 | goto out; | |
633 | } | |
634 | ||
635 | if (unlikely(wr->opcode < 0)) { | |
636 | dev_warn_ratelimited(&dev->pdev->dev, | |
637 | "invalid send opcode\n"); | |
638 | *bad_wr = wr; | |
639 | ret = -EINVAL; | |
640 | goto out; | |
641 | } | |
642 | ||
643 | /* | |
644 | * Only support UD, RC. | |
645 | * Need to check opcode table for thorough checking. | |
646 | * opcode _UD _UC _RC | |
647 | * _SEND x x x | |
648 | * _SEND_WITH_IMM x x x | |
649 | * _RDMA_WRITE x x | |
650 | * _RDMA_WRITE_WITH_IMM x x | |
651 | * _LOCAL_INV x x | |
652 | * _SEND_WITH_INV x x | |
653 | * _RDMA_READ x | |
654 | * _ATOMIC_CMP_AND_SWP x | |
655 | * _ATOMIC_FETCH_AND_ADD x | |
656 | * _MASK_ATOMIC_CMP_AND_SWP x | |
657 | * _MASK_ATOMIC_FETCH_AND_ADD x | |
658 | * _REG_MR x | |
659 | * | |
660 | */ | |
661 | if (qp->ibqp.qp_type != IB_QPT_UD && | |
662 | qp->ibqp.qp_type != IB_QPT_RC && | |
663 | wr->opcode != IB_WR_SEND) { | |
664 | dev_warn_ratelimited(&dev->pdev->dev, | |
665 | "unsupported queuepair type\n"); | |
666 | *bad_wr = wr; | |
667 | ret = -EINVAL; | |
668 | goto out; | |
669 | } else if (qp->ibqp.qp_type == IB_QPT_UD || | |
670 | qp->ibqp.qp_type == IB_QPT_GSI) { | |
671 | if (wr->opcode != IB_WR_SEND && | |
672 | wr->opcode != IB_WR_SEND_WITH_IMM) { | |
673 | dev_warn_ratelimited(&dev->pdev->dev, | |
674 | "invalid send opcode\n"); | |
675 | *bad_wr = wr; | |
676 | ret = -EINVAL; | |
677 | goto out; | |
678 | } | |
679 | } | |
680 | ||
6332dee8 | 681 | wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, tail); |
29c8d9eb AR |
682 | memset(wqe_hdr, 0, sizeof(*wqe_hdr)); |
683 | wqe_hdr->wr_id = wr->wr_id; | |
684 | wqe_hdr->num_sge = wr->num_sge; | |
685 | wqe_hdr->opcode = ib_wr_opcode_to_pvrdma(wr->opcode); | |
686 | wqe_hdr->send_flags = ib_send_flags_to_pvrdma(wr->send_flags); | |
687 | if (wr->opcode == IB_WR_SEND_WITH_IMM || | |
688 | wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) | |
689 | wqe_hdr->ex.imm_data = wr->ex.imm_data; | |
690 | ||
691 | switch (qp->ibqp.qp_type) { | |
692 | case IB_QPT_GSI: | |
693 | case IB_QPT_UD: | |
694 | if (unlikely(!ud_wr(wr)->ah)) { | |
695 | dev_warn_ratelimited(&dev->pdev->dev, | |
696 | "invalid address handle\n"); | |
697 | *bad_wr = wr; | |
698 | ret = -EINVAL; | |
699 | goto out; | |
700 | } | |
701 | ||
702 | /* | |
703 | * Use qkey from qp context if high order bit set, | |
704 | * otherwise from work request. | |
705 | */ | |
706 | wqe_hdr->wr.ud.remote_qpn = ud_wr(wr)->remote_qpn; | |
707 | wqe_hdr->wr.ud.remote_qkey = | |
708 | ud_wr(wr)->remote_qkey & 0x80000000 ? | |
709 | qp->qkey : ud_wr(wr)->remote_qkey; | |
710 | wqe_hdr->wr.ud.av = to_vah(ud_wr(wr)->ah)->av; | |
711 | ||
712 | break; | |
713 | case IB_QPT_RC: | |
714 | switch (wr->opcode) { | |
715 | case IB_WR_RDMA_READ: | |
716 | case IB_WR_RDMA_WRITE: | |
717 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
718 | wqe_hdr->wr.rdma.remote_addr = | |
719 | rdma_wr(wr)->remote_addr; | |
720 | wqe_hdr->wr.rdma.rkey = rdma_wr(wr)->rkey; | |
721 | break; | |
722 | case IB_WR_LOCAL_INV: | |
723 | case IB_WR_SEND_WITH_INV: | |
724 | wqe_hdr->ex.invalidate_rkey = | |
725 | wr->ex.invalidate_rkey; | |
726 | break; | |
727 | case IB_WR_ATOMIC_CMP_AND_SWP: | |
728 | case IB_WR_ATOMIC_FETCH_AND_ADD: | |
729 | wqe_hdr->wr.atomic.remote_addr = | |
730 | atomic_wr(wr)->remote_addr; | |
731 | wqe_hdr->wr.atomic.rkey = atomic_wr(wr)->rkey; | |
732 | wqe_hdr->wr.atomic.compare_add = | |
733 | atomic_wr(wr)->compare_add; | |
734 | if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) | |
735 | wqe_hdr->wr.atomic.swap = | |
736 | atomic_wr(wr)->swap; | |
737 | break; | |
738 | case IB_WR_REG_MR: | |
739 | ret = set_reg_seg(wqe_hdr, reg_wr(wr)); | |
740 | if (ret < 0) { | |
741 | dev_warn_ratelimited(&dev->pdev->dev, | |
742 | "Failed to set fast register work request\n"); | |
743 | *bad_wr = wr; | |
744 | goto out; | |
745 | } | |
746 | break; | |
747 | default: | |
748 | break; | |
749 | } | |
750 | ||
751 | break; | |
752 | default: | |
753 | dev_warn_ratelimited(&dev->pdev->dev, | |
754 | "invalid queuepair type\n"); | |
755 | ret = -EINVAL; | |
756 | *bad_wr = wr; | |
757 | goto out; | |
758 | } | |
759 | ||
760 | sge = (struct pvrdma_sge *)(wqe_hdr + 1); | |
761 | for (i = 0; i < wr->num_sge; i++) { | |
762 | /* Need to check wqe_size 0 or max size */ | |
763 | sge->addr = wr->sg_list[i].addr; | |
764 | sge->length = wr->sg_list[i].length; | |
765 | sge->lkey = wr->sg_list[i].lkey; | |
766 | sge++; | |
767 | } | |
768 | ||
769 | /* Make sure wqe is written before index update */ | |
770 | smp_wmb(); | |
771 | ||
29c8d9eb AR |
772 | /* Update shared sq ring */ |
773 | pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail, | |
774 | qp->sq.wqe_cnt); | |
6332dee8 AR |
775 | |
776 | wr = wr->next; | |
29c8d9eb AR |
777 | } |
778 | ||
779 | ret = 0; | |
780 | ||
781 | out: | |
782 | spin_unlock_irqrestore(&qp->sq.lock, flags); | |
783 | ||
784 | if (!ret) | |
785 | pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_SEND | qp->qp_handle); | |
786 | ||
787 | return ret; | |
788 | } | |
789 | ||
790 | /** | |
791 | * pvrdma_post_receive - post receive work request entries on a QP | |
792 | * @ibqp: the QP | |
793 | * @wr: the work request list to post | |
794 | * @bad_wr: the first bad WR returned | |
795 | * | |
796 | * @return: 0 on success, otherwise errno returned. | |
797 | */ | |
798 | int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |
799 | struct ib_recv_wr **bad_wr) | |
800 | { | |
801 | struct pvrdma_dev *dev = to_vdev(ibqp->device); | |
802 | unsigned long flags; | |
803 | struct pvrdma_qp *qp = to_vqp(ibqp); | |
804 | struct pvrdma_rq_wqe_hdr *wqe_hdr; | |
805 | struct pvrdma_sge *sge; | |
29c8d9eb AR |
806 | int ret = 0; |
807 | int i; | |
808 | ||
809 | /* | |
810 | * In the RESET state, we can fail immediately. For other states, | |
811 | * just post and let the device figure it out. | |
812 | */ | |
813 | if (qp->state == IB_QPS_RESET) { | |
814 | *bad_wr = wr; | |
815 | return -EINVAL; | |
816 | } | |
817 | ||
818 | spin_lock_irqsave(&qp->rq.lock, flags); | |
819 | ||
6332dee8 AR |
820 | while (wr) { |
821 | unsigned int tail = 0; | |
29c8d9eb AR |
822 | |
823 | if (unlikely(wr->num_sge > qp->rq.max_sg || | |
824 | wr->num_sge < 0)) { | |
825 | ret = -EINVAL; | |
826 | *bad_wr = wr; | |
827 | dev_warn_ratelimited(&dev->pdev->dev, | |
828 | "recv SGE overflow\n"); | |
829 | goto out; | |
830 | } | |
831 | ||
832 | if (unlikely(!pvrdma_idx_ring_has_space( | |
833 | qp->rq.ring, qp->rq.wqe_cnt, &tail))) { | |
834 | ret = -ENOMEM; | |
835 | *bad_wr = wr; | |
836 | dev_warn_ratelimited(&dev->pdev->dev, | |
837 | "recv queue full\n"); | |
838 | goto out; | |
839 | } | |
840 | ||
6332dee8 | 841 | wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, tail); |
29c8d9eb AR |
842 | wqe_hdr->wr_id = wr->wr_id; |
843 | wqe_hdr->num_sge = wr->num_sge; | |
844 | wqe_hdr->total_len = 0; | |
845 | ||
846 | sge = (struct pvrdma_sge *)(wqe_hdr + 1); | |
847 | for (i = 0; i < wr->num_sge; i++) { | |
848 | sge->addr = wr->sg_list[i].addr; | |
849 | sge->length = wr->sg_list[i].length; | |
850 | sge->lkey = wr->sg_list[i].lkey; | |
851 | sge++; | |
852 | } | |
853 | ||
854 | /* Make sure wqe is written before index update */ | |
855 | smp_wmb(); | |
856 | ||
29c8d9eb AR |
857 | /* Update shared rq ring */ |
858 | pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail, | |
859 | qp->rq.wqe_cnt); | |
6332dee8 AR |
860 | |
861 | wr = wr->next; | |
29c8d9eb AR |
862 | } |
863 | ||
864 | spin_unlock_irqrestore(&qp->rq.lock, flags); | |
865 | ||
866 | pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_RECV | qp->qp_handle); | |
867 | ||
868 | return ret; | |
869 | ||
870 | out: | |
871 | spin_unlock_irqrestore(&qp->rq.lock, flags); | |
872 | ||
873 | return ret; | |
874 | } | |
875 | ||
876 | /** | |
877 | * pvrdma_query_qp - query a queue pair's attributes | |
878 | * @ibqp: the queue pair to query | |
879 | * @attr: the queue pair's attributes | |
880 | * @attr_mask: attributes mask | |
881 | * @init_attr: initial queue pair attributes | |
882 | * | |
883 | * @returns 0 on success, otherwise returns an errno. | |
884 | */ | |
885 | int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
886 | int attr_mask, struct ib_qp_init_attr *init_attr) | |
887 | { | |
888 | struct pvrdma_dev *dev = to_vdev(ibqp->device); | |
889 | struct pvrdma_qp *qp = to_vqp(ibqp); | |
890 | union pvrdma_cmd_req req; | |
891 | union pvrdma_cmd_resp rsp; | |
892 | struct pvrdma_cmd_query_qp *cmd = &req.query_qp; | |
893 | struct pvrdma_cmd_query_qp_resp *resp = &rsp.query_qp_resp; | |
894 | int ret = 0; | |
895 | ||
896 | mutex_lock(&qp->mutex); | |
897 | ||
898 | if (qp->state == IB_QPS_RESET) { | |
899 | attr->qp_state = IB_QPS_RESET; | |
900 | goto out; | |
901 | } | |
902 | ||
903 | memset(cmd, 0, sizeof(*cmd)); | |
904 | cmd->hdr.cmd = PVRDMA_CMD_QUERY_QP; | |
905 | cmd->qp_handle = qp->qp_handle; | |
906 | cmd->attr_mask = ib_qp_attr_mask_to_pvrdma(attr_mask); | |
907 | ||
908 | ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_QP_RESP); | |
909 | if (ret < 0) { | |
910 | dev_warn(&dev->pdev->dev, | |
911 | "could not query queuepair, error: %d\n", ret); | |
912 | goto out; | |
913 | } | |
914 | ||
915 | attr->qp_state = pvrdma_qp_state_to_ib(resp->attrs.qp_state); | |
916 | attr->cur_qp_state = | |
917 | pvrdma_qp_state_to_ib(resp->attrs.cur_qp_state); | |
918 | attr->path_mtu = pvrdma_mtu_to_ib(resp->attrs.path_mtu); | |
919 | attr->path_mig_state = | |
920 | pvrdma_mig_state_to_ib(resp->attrs.path_mig_state); | |
921 | attr->qkey = resp->attrs.qkey; | |
922 | attr->rq_psn = resp->attrs.rq_psn; | |
923 | attr->sq_psn = resp->attrs.sq_psn; | |
924 | attr->dest_qp_num = resp->attrs.dest_qp_num; | |
925 | attr->qp_access_flags = | |
926 | pvrdma_access_flags_to_ib(resp->attrs.qp_access_flags); | |
927 | attr->pkey_index = resp->attrs.pkey_index; | |
928 | attr->alt_pkey_index = resp->attrs.alt_pkey_index; | |
929 | attr->en_sqd_async_notify = resp->attrs.en_sqd_async_notify; | |
930 | attr->sq_draining = resp->attrs.sq_draining; | |
931 | attr->max_rd_atomic = resp->attrs.max_rd_atomic; | |
932 | attr->max_dest_rd_atomic = resp->attrs.max_dest_rd_atomic; | |
933 | attr->min_rnr_timer = resp->attrs.min_rnr_timer; | |
934 | attr->port_num = resp->attrs.port_num; | |
935 | attr->timeout = resp->attrs.timeout; | |
936 | attr->retry_cnt = resp->attrs.retry_cnt; | |
937 | attr->rnr_retry = resp->attrs.rnr_retry; | |
938 | attr->alt_port_num = resp->attrs.alt_port_num; | |
939 | attr->alt_timeout = resp->attrs.alt_timeout; | |
940 | pvrdma_qp_cap_to_ib(&attr->cap, &resp->attrs.cap); | |
941 | pvrdma_ah_attr_to_ib(&attr->ah_attr, &resp->attrs.ah_attr); | |
942 | pvrdma_ah_attr_to_ib(&attr->alt_ah_attr, &resp->attrs.alt_ah_attr); | |
943 | ||
944 | qp->state = attr->qp_state; | |
945 | ||
946 | ret = 0; | |
947 | ||
948 | out: | |
949 | attr->cur_qp_state = attr->qp_state; | |
950 | ||
951 | init_attr->event_handler = qp->ibqp.event_handler; | |
952 | init_attr->qp_context = qp->ibqp.qp_context; | |
953 | init_attr->send_cq = qp->ibqp.send_cq; | |
954 | init_attr->recv_cq = qp->ibqp.recv_cq; | |
955 | init_attr->srq = qp->ibqp.srq; | |
956 | init_attr->xrcd = NULL; | |
957 | init_attr->cap = attr->cap; | |
958 | init_attr->sq_sig_type = 0; | |
959 | init_attr->qp_type = qp->ibqp.qp_type; | |
960 | init_attr->create_flags = 0; | |
961 | init_attr->port_num = qp->port; | |
962 | ||
963 | mutex_unlock(&qp->mutex); | |
964 | return ret; | |
965 | } |