]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/infiniband/sw/rdmavt/qp.c
IB/rdmavt: Remove unused variable from Queue Pair
[mirror_ubuntu-bionic-kernel.git] / drivers / infiniband / sw / rdmavt / qp.c
CommitLineData
b518d3e6
DD
1/*
2 * Copyright(c) 2015 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
3b0b3fb3 48#include <linux/hash.h>
0acb0cc7
DD
49#include <linux/bitops.h>
50#include <linux/lockdep.h>
515667f8
DD
51#include <linux/vmalloc.h>
52#include <linux/slab.h>
53#include <rdma/ib_verbs.h>
b518d3e6 54#include "qp.h"
515667f8 55#include "vt.h"
3b0b3fb3 56#include "trace.h"
b518d3e6 57
bfbac097
DD
58/*
59 * Note that it is OK to post send work requests in the SQE and ERR
60 * states; rvt_do_send() will process them and generate error
61 * completions as per IB 1.2 C10-96.
62 */
63const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
64 [IB_QPS_RESET] = 0,
65 [IB_QPS_INIT] = RVT_POST_RECV_OK,
66 [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
67 [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
68 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
69 RVT_PROCESS_NEXT_SEND_OK,
70 [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
71 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
72 [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
73 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
74 [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
75 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
76};
77EXPORT_SYMBOL(ib_rvt_state_ops);
78
d2b8d4da
MM
79static void get_map_page(struct rvt_qpn_table *qpt,
80 struct rvt_qpn_map *map,
81 gfp_t gfp)
0acb0cc7 82{
d2b8d4da 83 unsigned long page = get_zeroed_page(gfp);
0acb0cc7
DD
84
85 /*
86 * Free the page if someone raced with us installing it.
87 */
88
89 spin_lock(&qpt->lock);
90 if (map->page)
91 free_page(page);
92 else
93 map->page = (void *)page;
94 spin_unlock(&qpt->lock);
95}
96
97/**
98 * init_qpn_table - initialize the QP number table for a device
99 * @qpt: the QPN table
100 */
101static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
102{
103 u32 offset, i;
104 struct rvt_qpn_map *map;
105 int ret = 0;
106
fef2efd6 107 if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
0acb0cc7
DD
108 return -EINVAL;
109
110 spin_lock_init(&qpt->lock);
111
112 qpt->last = rdi->dparms.qpn_start;
113 qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
114
115 /*
116 * Drivers may want some QPs beyond what we need for verbs let them use
117 * our qpn table. No need for two. Lets go ahead and mark the bitmaps
118 * for those. The reserved range must be *after* the range which verbs
119 * will pick from.
120 */
121
122 /* Figure out number of bit maps needed before reserved range */
123 qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
124
125 /* This should always be zero */
126 offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
127
128 /* Starting with the first reserved bit map */
129 map = &qpt->map[qpt->nmaps];
130
131 rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
132 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
fef2efd6 133 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
0acb0cc7 134 if (!map->page) {
d2b8d4da 135 get_map_page(qpt, map, GFP_KERNEL);
0acb0cc7
DD
136 if (!map->page) {
137 ret = -ENOMEM;
138 break;
139 }
140 }
141 set_bit(offset, map->page);
142 offset++;
143 if (offset == RVT_BITS_PER_PAGE) {
144 /* next page */
145 qpt->nmaps++;
146 map++;
147 offset = 0;
148 }
149 }
150 return ret;
151}
152
153/**
154 * free_qpn_table - free the QP number table for a device
155 * @qpt: the QPN table
156 */
157static void free_qpn_table(struct rvt_qpn_table *qpt)
158{
159 int i;
160
161 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
162 free_page((unsigned long)qpt->map[i].page);
163}
164
165int rvt_driver_qp_init(struct rvt_dev_info *rdi)
166{
167 int i;
168 int ret = -ENOMEM;
169
170 if (rdi->flags & RVT_FLAG_QP_INIT_DRIVER) {
171 rvt_pr_info(rdi, "Driver is doing QP init.\n");
172 return 0;
173 }
174
175 if (!rdi->dparms.qp_table_size)
176 return -EINVAL;
177
178 /*
179 * If driver is not doing any QP allocation then make sure it is
180 * providing the necessary QP functions.
181 */
515667f8
DD
182 if (!rdi->driver_f.free_all_qps ||
183 !rdi->driver_f.qp_priv_alloc ||
184 !rdi->driver_f.qp_priv_free ||
185 !rdi->driver_f.notify_qp_reset)
0acb0cc7
DD
186 return -EINVAL;
187
188 /* allocate parent object */
189 rdi->qp_dev = kzalloc(sizeof(*rdi->qp_dev), GFP_KERNEL);
190 if (!rdi->qp_dev)
191 return -ENOMEM;
192
193 /* allocate hash table */
194 rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
195 rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
196 rdi->qp_dev->qp_table =
197 kmalloc(rdi->qp_dev->qp_table_size *
198 sizeof(*rdi->qp_dev->qp_table),
199 GFP_KERNEL);
200 if (!rdi->qp_dev->qp_table)
201 goto no_qp_table;
202
203 for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
204 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
205
206 spin_lock_init(&rdi->qp_dev->qpt_lock);
207
208 /* initialize qpn map */
209 if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
210 goto fail_table;
211
515667f8
DD
212 spin_lock_init(&rdi->n_qps_lock);
213
214 return 0;
0acb0cc7
DD
215
216fail_table:
217 kfree(rdi->qp_dev->qp_table);
218 free_qpn_table(&rdi->qp_dev->qpn_table);
219
220no_qp_table:
221 kfree(rdi->qp_dev);
222
223 return ret;
224}
225
226/**
227 * free_all_qps - check for QPs still in use
228 * @qpt: the QP table to empty
229 *
230 * There should not be any QPs still in use.
231 * Free memory for table.
232 */
515667f8 233static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
0acb0cc7
DD
234{
235 unsigned long flags;
236 struct rvt_qp *qp;
237 unsigned n, qp_inuse = 0;
238 spinlock_t *ql; /* work around too long line below */
239
515667f8
DD
240 if (rdi->driver_f.free_all_qps)
241 qp_inuse = rdi->driver_f.free_all_qps(rdi);
0acb0cc7 242
4e74080b
DD
243 qp_inuse += rvt_mcast_tree_empty(rdi);
244
0acb0cc7 245 if (!rdi->qp_dev)
515667f8 246 return qp_inuse;
0acb0cc7
DD
247
248 ql = &rdi->qp_dev->qpt_lock;
515667f8 249 spin_lock_irqsave(ql, flags);
0acb0cc7
DD
250 for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
251 qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
252 lockdep_is_held(ql));
253 RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
515667f8
DD
254
255 for (; qp; qp = rcu_dereference_protected(qp->next,
256 lockdep_is_held(ql)))
0acb0cc7 257 qp_inuse++;
0acb0cc7
DD
258 }
259 spin_unlock_irqrestore(ql, flags);
260 synchronize_rcu();
261 return qp_inuse;
262}
263
264void rvt_qp_exit(struct rvt_dev_info *rdi)
265{
515667f8 266 u32 qps_inuse = rvt_free_all_qps(rdi);
0acb0cc7 267
0acb0cc7
DD
268 if (qps_inuse)
269 rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
270 qps_inuse);
271 if (!rdi->qp_dev)
272 return;
273
515667f8
DD
274 if (rdi->flags & RVT_FLAG_QP_INIT_DRIVER)
275 return; /* driver did the qp init so nothing else to do */
276
0acb0cc7
DD
277 kfree(rdi->qp_dev->qp_table);
278 free_qpn_table(&rdi->qp_dev->qpn_table);
279 kfree(rdi->qp_dev);
280}
281
515667f8
DD
282static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
283 struct rvt_qpn_map *map, unsigned off)
284{
285 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
286}
287
288/*
289 * Allocate the next available QPN or
290 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
291 */
292static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
d2b8d4da 293 enum ib_qp_type type, u8 port, gfp_t gfp)
515667f8
DD
294{
295 u32 i, offset, max_scan, qpn;
296 struct rvt_qpn_map *map;
297 u32 ret;
298
299 if (rdi->driver_f.alloc_qpn)
d2b8d4da
MM
300 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port,
301 GFP_KERNEL);
515667f8
DD
302
303 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
304 unsigned n;
305
306 ret = type == IB_QPT_GSI;
307 n = 1 << (ret + 2 * (port - 1));
308 spin_lock(&qpt->lock);
309 if (qpt->flags & n)
310 ret = -EINVAL;
311 else
312 qpt->flags |= n;
313 spin_unlock(&qpt->lock);
314 goto bail;
315 }
316
317 qpn = qpt->last + qpt->incr;
318 if (qpn >= RVT_QPN_MAX)
319 qpn = qpt->incr | ((qpt->last & 1) ^ 1);
320 /* offset carries bit 0 */
321 offset = qpn & RVT_BITS_PER_PAGE_MASK;
322 map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
323 max_scan = qpt->nmaps - !offset;
324 for (i = 0;;) {
325 if (unlikely(!map->page)) {
d2b8d4da 326 get_map_page(qpt, map, gfp);
515667f8
DD
327 if (unlikely(!map->page))
328 break;
329 }
330 do {
331 if (!test_and_set_bit(offset, map->page)) {
332 qpt->last = qpn;
333 ret = qpn;
334 goto bail;
335 }
336 offset += qpt->incr;
337 /*
338 * This qpn might be bogus if offset >= BITS_PER_PAGE.
339 * That is OK. It gets re-assigned below
340 */
341 qpn = mk_qpn(qpt, map, offset);
342 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
343 /*
344 * In order to keep the number of pages allocated to a
345 * minimum, we scan the all existing pages before increasing
346 * the size of the bitmap table.
347 */
348 if (++i > max_scan) {
349 if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
350 break;
351 map = &qpt->map[qpt->nmaps++];
352 /* start at incr with current bit 0 */
353 offset = qpt->incr | (offset & 1);
354 } else if (map < &qpt->map[qpt->nmaps]) {
355 ++map;
356 /* start at incr with current bit 0 */
357 offset = qpt->incr | (offset & 1);
358 } else {
359 map = &qpt->map[0];
360 /* wrap to first map page, invert bit 0 */
361 offset = qpt->incr | ((offset & 1) ^ 1);
362 }
363 /* there can be no bits at shift and below */
364 WARN_ON(offset & (rdi->dparms.qos_shift - 1));
365 qpn = mk_qpn(qpt, map, offset);
366 }
367
368 ret = -ENOMEM;
369
370bail:
371 return ret;
372}
373
374static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
375{
376 struct rvt_qpn_map *map;
377
378 map = qpt->map + qpn / RVT_BITS_PER_PAGE;
379 if (map->page)
380 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
381}
382
383/**
384 * reset_qp - initialize the QP state to the reset state
385 * @qp: the QP to reset
386 * @type: the QP type
3b0b3fb3 387 * r and s lock are required to be held by the caller
515667f8 388 */
5a9cf6f2
DD
389void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
390 enum ib_qp_type type)
515667f8 391{
3b0b3fb3
DD
392 if (qp->state != IB_QPS_RESET) {
393 qp->state = IB_QPS_RESET;
394
395 /* Let drivers flush their waitlist */
396 rdi->driver_f.flush_qp_waiters(qp);
397 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
398 spin_unlock(&qp->s_lock);
399 spin_unlock_irq(&qp->r_lock);
400
401 /* Stop the send queue and the retry timer */
402 rdi->driver_f.stop_send_queue(qp);
403 del_timer_sync(&qp->s_timer);
404
405 /* Wait for things to stop */
406 rdi->driver_f.quiesce_qp(qp);
407
408 /* take qp out the hash and wait for it to be unused */
409 rvt_remove_qp(rdi, qp);
410 wait_event(qp->wait, !atomic_read(&qp->refcount));
411
412 /* grab the lock b/c it was locked at call time */
413 spin_lock_irq(&qp->r_lock);
414 spin_lock(&qp->s_lock);
415
416 rvt_clear_mr_refs(qp, 1);
417 }
515667f8
DD
418
419 /*
3b0b3fb3
DD
420 * Let the driver do any tear down it needs to for a qp
421 * that has been reset
515667f8
DD
422 */
423 rdi->driver_f.notify_qp_reset(qp);
424
3b0b3fb3
DD
425 qp->remote_qpn = 0;
426 qp->qkey = 0;
427 qp->qp_access_flags = 0;
515667f8
DD
428 qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
429 qp->s_hdrwords = 0;
430 qp->s_wqe = NULL;
431 qp->s_draining = 0;
432 qp->s_next_psn = 0;
433 qp->s_last_psn = 0;
434 qp->s_sending_psn = 0;
435 qp->s_sending_hpsn = 0;
436 qp->s_psn = 0;
437 qp->r_psn = 0;
438 qp->r_msn = 0;
439 if (type == IB_QPT_RC) {
440 qp->s_state = IB_OPCODE_RC_SEND_LAST;
441 qp->r_state = IB_OPCODE_RC_SEND_LAST;
442 } else {
443 qp->s_state = IB_OPCODE_UC_SEND_LAST;
444 qp->r_state = IB_OPCODE_UC_SEND_LAST;
445 }
446 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
447 qp->r_nak_state = 0;
448 qp->r_aflags = 0;
449 qp->r_flags = 0;
450 qp->s_head = 0;
451 qp->s_tail = 0;
452 qp->s_cur = 0;
453 qp->s_acked = 0;
454 qp->s_last = 0;
455 qp->s_ssn = 1;
456 qp->s_lsn = 0;
457 qp->s_mig_state = IB_MIG_MIGRATED;
458 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
459 qp->r_head_ack_queue = 0;
460 qp->s_tail_ack_queue = 0;
461 qp->s_num_rd_atomic = 0;
462 if (qp->r_rq.wq) {
463 qp->r_rq.wq->head = 0;
464 qp->r_rq.wq->tail = 0;
465 }
466 qp->r_sge.num_sge = 0;
467}
5a9cf6f2 468EXPORT_SYMBOL(rvt_reset_qp);
515667f8 469
b518d3e6
DD
470/**
471 * rvt_create_qp - create a queue pair for a device
472 * @ibpd: the protection domain who's device we create the queue pair for
473 * @init_attr: the attributes of the queue pair
474 * @udata: user data for libibverbs.so
475 *
515667f8
DD
476 * Queue pair creation is mostly an rvt issue. However, drivers have their own
477 * unique idea of what queue pair numbers mean. For instance there is a reserved
478 * range for PSM.
479 *
b518d3e6
DD
480 * Returns the queue pair on success, otherwise returns an errno.
481 *
482 * Called by the ib_create_qp() core verbs function.
483 */
484struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
485 struct ib_qp_init_attr *init_attr,
486 struct ib_udata *udata)
487{
515667f8
DD
488 struct rvt_qp *qp;
489 int err;
490 struct rvt_swqe *swq = NULL;
491 size_t sz;
492 size_t sg_list_sz;
493 struct ib_qp *ret = ERR_PTR(-ENOMEM);
494 struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
495 void *priv = NULL;
d2b8d4da 496 gfp_t gfp;
515667f8
DD
497
498 if (!rdi)
499 return ERR_PTR(-EINVAL);
500
501 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge ||
502 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
d2b8d4da 503 init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
515667f8
DD
504 return ERR_PTR(-EINVAL);
505
d2b8d4da
MM
506 /* GFP_NOIO is applicable to RC QP's only */
507
508 if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
509 init_attr->qp_type != IB_QPT_RC)
510 return ERR_PTR(-EINVAL);
511
512 gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
513 GFP_NOIO : GFP_KERNEL;
514
515667f8
DD
515 /* Check receive queue parameters if no SRQ is specified. */
516 if (!init_attr->srq) {
517 if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge ||
518 init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
519 return ERR_PTR(-EINVAL);
520
521 if (init_attr->cap.max_send_sge +
522 init_attr->cap.max_send_wr +
523 init_attr->cap.max_recv_sge +
524 init_attr->cap.max_recv_wr == 0)
525 return ERR_PTR(-EINVAL);
526 }
527
528 switch (init_attr->qp_type) {
529 case IB_QPT_SMI:
530 case IB_QPT_GSI:
531 if (init_attr->port_num == 0 ||
532 init_attr->port_num > ibpd->device->phys_port_cnt)
533 return ERR_PTR(-EINVAL);
534 case IB_QPT_UC:
535 case IB_QPT_RC:
536 case IB_QPT_UD:
537 sz = sizeof(struct rvt_sge) *
538 init_attr->cap.max_send_sge +
539 sizeof(struct rvt_swqe);
d2b8d4da
MM
540 if (gfp == GFP_NOIO)
541 swq = __vmalloc(
542 (init_attr->cap.max_send_wr + 1) * sz,
543 gfp, PAGE_KERNEL);
544 else
545 swq = vmalloc(
546 (init_attr->cap.max_send_wr + 1) * sz);
515667f8
DD
547 if (!swq)
548 return ERR_PTR(-ENOMEM);
549
550 sz = sizeof(*qp);
551 sg_list_sz = 0;
552 if (init_attr->srq) {
553 struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
554
555 if (srq->rq.max_sge > 1)
556 sg_list_sz = sizeof(*qp->r_sg_list) *
557 (srq->rq.max_sge - 1);
558 } else if (init_attr->cap.max_recv_sge > 1)
559 sg_list_sz = sizeof(*qp->r_sg_list) *
560 (init_attr->cap.max_recv_sge - 1);
d2b8d4da 561 qp = kzalloc(sz + sg_list_sz, gfp);
515667f8
DD
562 if (!qp)
563 goto bail_swq;
564
565 RCU_INIT_POINTER(qp->next, NULL);
566
567 /*
568 * Driver needs to set up it's private QP structure and do any
569 * initialization that is needed.
570 */
d2b8d4da 571 priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
515667f8
DD
572 if (!priv)
573 goto bail_qp;
574 qp->priv = priv;
575 qp->timeout_jiffies =
576 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
577 1000UL);
578 if (init_attr->srq) {
579 sz = 0;
580 } else {
581 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
582 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
583 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
584 sizeof(struct rvt_rwqe);
d2b8d4da
MM
585 if (udata)
586 qp->r_rq.wq = vmalloc_user(
587 sizeof(struct rvt_rwq) +
588 qp->r_rq.size * sz);
589 else if (gfp == GFP_NOIO)
590 qp->r_rq.wq = __vmalloc(
591 sizeof(struct rvt_rwq) +
592 qp->r_rq.size * sz,
593 gfp, PAGE_KERNEL);
594 else
595 qp->r_rq.wq = vmalloc(
596 sizeof(struct rvt_rwq) +
597 qp->r_rq.size * sz);
515667f8
DD
598 if (!qp->r_rq.wq)
599 goto bail_driver_priv;
600 }
601
602 /*
603 * ib_create_qp() will initialize qp->ibqp
604 * except for qp->ibqp.qp_num.
605 */
606 spin_lock_init(&qp->r_lock);
607 spin_lock_init(&qp->s_lock);
608 spin_lock_init(&qp->r_rq.lock);
609 atomic_set(&qp->refcount, 0);
610 init_waitqueue_head(&qp->wait);
611 init_timer(&qp->s_timer);
612 qp->s_timer.data = (unsigned long)qp;
613 INIT_LIST_HEAD(&qp->rspwait);
614 qp->state = IB_QPS_RESET;
615 qp->s_wq = swq;
616 qp->s_size = init_attr->cap.max_send_wr + 1;
617 qp->s_max_sge = init_attr->cap.max_send_sge;
618 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
619 qp->s_flags = RVT_S_SIGNAL_REQ_WR;
620
621 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
622 init_attr->qp_type,
d2b8d4da 623 init_attr->port_num, gfp);
515667f8
DD
624 if (err < 0) {
625 ret = ERR_PTR(err);
626 goto bail_rq_wq;
627 }
628 qp->ibqp.qp_num = err;
629 qp->port_num = init_attr->port_num;
5a9cf6f2 630 rvt_reset_qp(rdi, qp, init_attr->qp_type);
515667f8
DD
631 break;
632
633 default:
634 /* Don't support raw QPs */
635 return ERR_PTR(-EINVAL);
636 }
637
638 init_attr->cap.max_inline_data = 0;
639
640 /*
641 * Return the address of the RWQ as the offset to mmap.
bfbac097 642 * See rvt_mmap() for details.
515667f8
DD
643 */
644 if (udata && udata->outlen >= sizeof(__u64)) {
645 if (!qp->r_rq.wq) {
646 __u64 offset = 0;
647
648 err = ib_copy_to_udata(udata, &offset,
649 sizeof(offset));
650 if (err) {
651 ret = ERR_PTR(err);
652 goto bail_qpn;
653 }
654 } else {
655 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
656
657 qp->ip = rvt_create_mmap_info(rdi, s,
658 ibpd->uobject->context,
659 qp->r_rq.wq);
660 if (!qp->ip) {
661 ret = ERR_PTR(-ENOMEM);
662 goto bail_qpn;
663 }
664
665 err = ib_copy_to_udata(udata, &qp->ip->offset,
666 sizeof(qp->ip->offset));
667 if (err) {
668 ret = ERR_PTR(err);
669 goto bail_ip;
670 }
671 }
672 }
673
674 spin_lock(&rdi->n_qps_lock);
675 if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
676 spin_unlock(&rdi->n_qps_lock);
677 ret = ERR_PTR(-ENOMEM);
678 goto bail_ip;
679 }
680
681 rdi->n_qps_allocated++;
682 spin_unlock(&rdi->n_qps_lock);
683
684 if (qp->ip) {
685 spin_lock_irq(&rdi->pending_lock);
686 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
687 spin_unlock_irq(&rdi->pending_lock);
688 }
689
690 ret = &qp->ibqp;
691
b518d3e6 692 /*
515667f8
DD
693 * We have our QP and its good, now keep track of what types of opcodes
694 * can be processed on this QP. We do this by keeping track of what the
695 * 3 high order bits of the opcode are.
b518d3e6 696 */
515667f8
DD
697 switch (init_attr->qp_type) {
698 case IB_QPT_SMI:
699 case IB_QPT_GSI:
700 case IB_QPT_UD:
701 qp->allowed_ops = IB_OPCODE_UD_SEND_ONLY & RVT_OPCODE_QP_MASK;
702 break;
703 case IB_QPT_RC:
704 qp->allowed_ops = IB_OPCODE_RC_SEND_ONLY & RVT_OPCODE_QP_MASK;
705 break;
706 case IB_QPT_UC:
707 qp->allowed_ops = IB_OPCODE_UC_SEND_ONLY & RVT_OPCODE_QP_MASK;
708 break;
709 default:
710 ret = ERR_PTR(-EINVAL);
711 goto bail_ip;
712 }
713
714 return ret;
715
716bail_ip:
717 kref_put(&qp->ip->ref, rvt_release_mmap_info);
718
719bail_qpn:
720 free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
721
722bail_rq_wq:
723 vfree(qp->r_rq.wq);
724
725bail_driver_priv:
726 rdi->driver_f.qp_priv_free(rdi, qp);
727
728bail_qp:
729 kfree(qp);
730
731bail_swq:
732 vfree(swq);
733
734 return ret;
b518d3e6
DD
735}
736
3b0b3fb3
DD
737void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
738{
739 unsigned n;
740
741 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
742 rvt_put_ss(&qp->s_rdma_read_sge);
743
744 rvt_put_ss(&qp->r_sge);
745
746 if (clr_sends) {
747 while (qp->s_last != qp->s_head) {
748 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
749 unsigned i;
750
751 for (i = 0; i < wqe->wr.num_sge; i++) {
752 struct rvt_sge *sge = &wqe->sg_list[i];
753
754 rvt_put_mr(sge->mr);
755 }
756 if (qp->ibqp.qp_type == IB_QPT_UD ||
757 qp->ibqp.qp_type == IB_QPT_SMI ||
758 qp->ibqp.qp_type == IB_QPT_GSI)
759 atomic_dec(&ibah_to_rvtah(
760 wqe->ud_wr.ah)->refcount);
761 if (++qp->s_last >= qp->s_size)
762 qp->s_last = 0;
763 }
764 if (qp->s_rdma_mr) {
765 rvt_put_mr(qp->s_rdma_mr);
766 qp->s_rdma_mr = NULL;
767 }
768 }
769
770 if (qp->ibqp.qp_type != IB_QPT_RC)
771 return;
772
773 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
774 struct rvt_ack_entry *e = &qp->s_ack_queue[n];
775
776 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
777 e->rdma_sge.mr) {
778 rvt_put_mr(e->rdma_sge.mr);
779 e->rdma_sge.mr = NULL;
780 }
781 }
782}
783EXPORT_SYMBOL(rvt_clear_mr_refs);
784
785/**
786 * rvt_error_qp - put a QP into the error state
787 * @qp: the QP to put into the error state
788 * @err: the receive completion error to signal if a RWQE is active
789 *
790 * Flushes both send and receive work queues.
791 * Returns true if last WQE event should be generated.
792 * The QP r_lock and s_lock should be held and interrupts disabled.
793 * If we are already in error state, just return.
794 */
795int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
796{
797 struct ib_wc wc;
798 int ret = 0;
799 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
800
801 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
802 goto bail;
803
804 qp->state = IB_QPS_ERR;
805
806 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
807 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
808 del_timer(&qp->s_timer);
809 }
810
811 if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
812 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
813
814 rdi->driver_f.notify_error_qp(qp);
815
816 /* Schedule the sending tasklet to drain the send work queue. */
817 if (qp->s_last != qp->s_head)
818 rdi->driver_f.schedule_send(qp);
819
820 rvt_clear_mr_refs(qp, 0);
821
822 memset(&wc, 0, sizeof(wc));
823 wc.qp = &qp->ibqp;
824 wc.opcode = IB_WC_RECV;
825
826 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
827 wc.wr_id = qp->r_wr_id;
828 wc.status = err;
829 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
830 }
831 wc.status = IB_WC_WR_FLUSH_ERR;
832
833 if (qp->r_rq.wq) {
834 struct rvt_rwq *wq;
835 u32 head;
836 u32 tail;
837
838 spin_lock(&qp->r_rq.lock);
839
840 /* sanity check pointers before trusting them */
841 wq = qp->r_rq.wq;
842 head = wq->head;
843 if (head >= qp->r_rq.size)
844 head = 0;
845 tail = wq->tail;
846 if (tail >= qp->r_rq.size)
847 tail = 0;
848 while (tail != head) {
849 wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
850 if (++tail >= qp->r_rq.size)
851 tail = 0;
852 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
853 }
854 wq->tail = tail;
855
856 spin_unlock(&qp->r_rq.lock);
857 } else if (qp->ibqp.event_handler) {
858 ret = 1;
859 }
860
861bail:
862 return ret;
863}
864EXPORT_SYMBOL(rvt_error_qp);
865
866/*
867 * Put the QP into the hash table.
868 * The hash table holds a reference to the QP.
869 */
870static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
871{
872 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
873 unsigned long flags;
874
875 atomic_inc(&qp->refcount);
876 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
877
878 if (qp->ibqp.qp_num <= 1) {
879 rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
880 } else {
881 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
882
883 qp->next = rdi->qp_dev->qp_table[n];
884 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
885 trace_rvt_qpinsert(qp, n);
886 }
887
888 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
889}
890
891/*
892 * Remove the QP from the table so it can't be found asynchronously by
893 * the receive routine.
894 */
895void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
896{
897 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
898 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
899 unsigned long flags;
900 int removed = 1;
901
902 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
903
904 if (rcu_dereference_protected(rvp->qp[0],
905 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
906 RCU_INIT_POINTER(rvp->qp[0], NULL);
907 } else if (rcu_dereference_protected(rvp->qp[1],
908 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
909 RCU_INIT_POINTER(rvp->qp[1], NULL);
910 } else {
911 struct rvt_qp *q;
912 struct rvt_qp __rcu **qpp;
913
914 removed = 0;
915 qpp = &rdi->qp_dev->qp_table[n];
916 for (; (q = rcu_dereference_protected(*qpp,
917 lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
918 qpp = &q->next) {
919 if (q == qp) {
920 RCU_INIT_POINTER(*qpp,
921 rcu_dereference_protected(qp->next,
922 lockdep_is_held(&rdi->qp_dev->qpt_lock)));
923 removed = 1;
924 trace_rvt_qpremove(qp, n);
925 break;
926 }
927 }
928 }
929
930 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
931 if (removed) {
932 synchronize_rcu();
933 if (atomic_dec_and_test(&qp->refcount))
934 wake_up(&qp->wait);
935 }
936}
937EXPORT_SYMBOL(rvt_remove_qp);
938
b518d3e6
DD
939/**
940 * qib_modify_qp - modify the attributes of a queue pair
941 * @ibqp: the queue pair who's attributes we're modifying
942 * @attr: the new attributes
943 * @attr_mask: the mask of attributes to modify
944 * @udata: user data for libibverbs.so
945 *
946 * Returns 0 on success, otherwise returns an errno.
947 */
948int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
949 int attr_mask, struct ib_udata *udata)
950{
3b0b3fb3
DD
951 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
952 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
953 enum ib_qp_state cur_state, new_state;
954 struct ib_event ev;
955 int lastwqe = 0;
956 int mig = 0;
957 int pmtu = 0; /* for gcc warning only */
958 enum rdma_link_layer link;
959
960 link = rdma_port_get_link_layer(ibqp->device, qp->port_num);
961
962 spin_lock_irq(&qp->r_lock);
963 spin_lock(&qp->s_lock);
964
965 cur_state = attr_mask & IB_QP_CUR_STATE ?
966 attr->cur_qp_state : qp->state;
967 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
968
969 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
970 attr_mask, link))
971 goto inval;
972
973 if (attr_mask & IB_QP_AV) {
974 if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
975 goto inval;
976 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
977 goto inval;
978 }
979
980 if (attr_mask & IB_QP_ALT_PATH) {
981 if (attr->alt_ah_attr.dlid >=
982 be16_to_cpu(IB_MULTICAST_LID_BASE))
983 goto inval;
984 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
985 goto inval;
986 if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
987 goto inval;
988 }
989
990 if (attr_mask & IB_QP_PKEY_INDEX)
991 if (attr->pkey_index >= rvt_get_npkeys(rdi))
992 goto inval;
993
994 if (attr_mask & IB_QP_MIN_RNR_TIMER)
995 if (attr->min_rnr_timer > 31)
996 goto inval;
997
998 if (attr_mask & IB_QP_PORT)
999 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1000 qp->ibqp.qp_type == IB_QPT_GSI ||
1001 attr->port_num == 0 ||
1002 attr->port_num > ibqp->device->phys_port_cnt)
1003 goto inval;
1004
1005 if (attr_mask & IB_QP_DEST_QPN)
1006 if (attr->dest_qp_num > RVT_QPN_MASK)
1007 goto inval;
1008
1009 if (attr_mask & IB_QP_RETRY_CNT)
1010 if (attr->retry_cnt > 7)
1011 goto inval;
1012
1013 if (attr_mask & IB_QP_RNR_RETRY)
1014 if (attr->rnr_retry > 7)
1015 goto inval;
1016
b518d3e6 1017 /*
3b0b3fb3
DD
1018 * Don't allow invalid path_mtu values. OK to set greater
1019 * than the active mtu (or even the max_cap, if we have tuned
1020 * that to a small mtu. We'll set qp->path_mtu
1021 * to the lesser of requested attribute mtu and active,
1022 * for packetizing messages.
1023 * Note that the QP port has to be set in INIT and MTU in RTR.
b518d3e6 1024 */
3b0b3fb3
DD
1025 if (attr_mask & IB_QP_PATH_MTU) {
1026 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
1027 if (pmtu < 0)
1028 goto inval;
1029 }
1030
1031 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1032 if (attr->path_mig_state == IB_MIG_REARM) {
1033 if (qp->s_mig_state == IB_MIG_ARMED)
1034 goto inval;
1035 if (new_state != IB_QPS_RTS)
1036 goto inval;
1037 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
1038 if (qp->s_mig_state == IB_MIG_REARM)
1039 goto inval;
1040 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
1041 goto inval;
1042 if (qp->s_mig_state == IB_MIG_ARMED)
1043 mig = 1;
1044 } else {
1045 goto inval;
1046 }
1047 }
1048
1049 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1050 if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
1051 goto inval;
1052
1053 switch (new_state) {
1054 case IB_QPS_RESET:
1055 if (qp->state != IB_QPS_RESET)
1056 rvt_reset_qp(rdi, qp, ibqp->qp_type);
1057 break;
1058
1059 case IB_QPS_RTR:
1060 /* Allow event to re-trigger if QP set to RTR more than once */
1061 qp->r_flags &= ~RVT_R_COMM_EST;
1062 qp->state = new_state;
1063 break;
1064
1065 case IB_QPS_SQD:
1066 qp->s_draining = qp->s_last != qp->s_cur;
1067 qp->state = new_state;
1068 break;
1069
1070 case IB_QPS_SQE:
1071 if (qp->ibqp.qp_type == IB_QPT_RC)
1072 goto inval;
1073 qp->state = new_state;
1074 break;
1075
1076 case IB_QPS_ERR:
1077 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1078 break;
1079
1080 default:
1081 qp->state = new_state;
1082 break;
1083 }
1084
1085 if (attr_mask & IB_QP_PKEY_INDEX)
1086 qp->s_pkey_index = attr->pkey_index;
1087
1088 if (attr_mask & IB_QP_PORT)
1089 qp->port_num = attr->port_num;
1090
1091 if (attr_mask & IB_QP_DEST_QPN)
1092 qp->remote_qpn = attr->dest_qp_num;
1093
1094 if (attr_mask & IB_QP_SQ_PSN) {
1095 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
1096 qp->s_psn = qp->s_next_psn;
1097 qp->s_sending_psn = qp->s_next_psn;
1098 qp->s_last_psn = qp->s_next_psn - 1;
1099 qp->s_sending_hpsn = qp->s_last_psn;
1100 }
1101
1102 if (attr_mask & IB_QP_RQ_PSN)
1103 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
1104
1105 if (attr_mask & IB_QP_ACCESS_FLAGS)
1106 qp->qp_access_flags = attr->qp_access_flags;
1107
1108 if (attr_mask & IB_QP_AV) {
1109 qp->remote_ah_attr = attr->ah_attr;
1110 qp->s_srate = attr->ah_attr.static_rate;
1111 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
1112 }
1113
1114 if (attr_mask & IB_QP_ALT_PATH) {
1115 qp->alt_ah_attr = attr->alt_ah_attr;
1116 qp->s_alt_pkey_index = attr->alt_pkey_index;
1117 }
1118
1119 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1120 qp->s_mig_state = attr->path_mig_state;
1121 if (mig) {
1122 qp->remote_ah_attr = qp->alt_ah_attr;
1123 qp->port_num = qp->alt_ah_attr.port_num;
1124 qp->s_pkey_index = qp->s_alt_pkey_index;
1125
1126 /*
1127 * Ignored by drivers which do not support it. Not
1128 * really worth creating a call back into the driver
1129 * just to set a flag.
1130 */
1131 qp->s_flags |= RVT_S_AHG_CLEAR;
1132 }
1133 }
1134
1135 if (attr_mask & IB_QP_PATH_MTU) {
1136 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
1137 qp->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
1138 }
1139
1140 if (attr_mask & IB_QP_RETRY_CNT) {
1141 qp->s_retry_cnt = attr->retry_cnt;
1142 qp->s_retry = attr->retry_cnt;
1143 }
1144
1145 if (attr_mask & IB_QP_RNR_RETRY) {
1146 qp->s_rnr_retry_cnt = attr->rnr_retry;
1147 qp->s_rnr_retry = attr->rnr_retry;
1148 }
1149
1150 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1151 qp->r_min_rnr_timer = attr->min_rnr_timer;
1152
1153 if (attr_mask & IB_QP_TIMEOUT) {
1154 qp->timeout = attr->timeout;
1155 qp->timeout_jiffies =
1156 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1157 1000UL);
1158 }
1159
1160 if (attr_mask & IB_QP_QKEY)
1161 qp->qkey = attr->qkey;
1162
1163 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1164 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
1165
1166 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1167 qp->s_max_rd_atomic = attr->max_rd_atomic;
1168
1169 spin_unlock(&qp->s_lock);
1170 spin_unlock_irq(&qp->r_lock);
1171
1172 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1173 rvt_insert_qp(rdi, qp);
1174
1175 if (lastwqe) {
1176 ev.device = qp->ibqp.device;
1177 ev.element.qp = &qp->ibqp;
1178 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1179 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1180 }
1181 if (mig) {
1182 ev.device = qp->ibqp.device;
1183 ev.element.qp = &qp->ibqp;
1184 ev.event = IB_EVENT_PATH_MIG;
1185 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1186 }
1187 return 0;
1188
1189inval:
1190 spin_unlock(&qp->s_lock);
1191 spin_unlock_irq(&qp->r_lock);
1192 return -EINVAL;
b518d3e6
DD
1193}
1194
1195/**
1196 * rvt_destroy_qp - destroy a queue pair
1197 * @ibqp: the queue pair to destroy
1198 *
1199 * Returns 0 on success.
1200 *
1201 * Note that this can be called while the QP is actively sending or
1202 * receiving!
1203 */
1204int rvt_destroy_qp(struct ib_qp *ibqp)
1205{
5a17ad11
DD
1206 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1207 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
b518d3e6 1208
5a17ad11
DD
1209 spin_lock_irq(&qp->r_lock);
1210 spin_lock(&qp->s_lock);
1211 rvt_reset_qp(rdi, qp, ibqp->qp_type);
1212 spin_unlock(&qp->s_lock);
1213 spin_unlock_irq(&qp->r_lock);
1214
1215 /* qpn is now available for use again */
1216 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1217
1218 spin_lock(&rdi->n_qps_lock);
1219 rdi->n_qps_allocated--;
1220 spin_unlock(&rdi->n_qps_lock);
1221
1222 if (qp->ip)
1223 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1224 else
1225 vfree(qp->r_rq.wq);
1226 vfree(qp->s_wq);
1227 rdi->driver_f.qp_priv_free(rdi, qp);
1228 kfree(qp);
1229 return 0;
b518d3e6
DD
1230}
1231
1232int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1233 int attr_mask, struct ib_qp_init_attr *init_attr)
1234{
1235 return -EOPNOTSUPP;
1236}
8cf4020b
DD
1237
1238/**
1239 * rvt_post_receive - post a receive on a QP
1240 * @ibqp: the QP to post the receive on
1241 * @wr: the WR to post
1242 * @bad_wr: the first bad WR is put here
1243 *
1244 * This may be called from interrupt context.
1245 */
1246int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1247 struct ib_recv_wr **bad_wr)
1248{
120bdafa
DD
1249 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1250 struct rvt_rwq *wq = qp->r_rq.wq;
1251 unsigned long flags;
8cf4020b 1252
120bdafa
DD
1253 /* Check that state is OK to post receive. */
1254 if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
1255 *bad_wr = wr;
1256 return -EINVAL;
1257 }
1258
1259 for (; wr; wr = wr->next) {
1260 struct rvt_rwqe *wqe;
1261 u32 next;
1262 int i;
1263
1264 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
1265 *bad_wr = wr;
1266 return -EINVAL;
1267 }
1268
1269 spin_lock_irqsave(&qp->r_rq.lock, flags);
1270 next = wq->head + 1;
1271 if (next >= qp->r_rq.size)
1272 next = 0;
1273 if (next == wq->tail) {
1274 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1275 *bad_wr = wr;
1276 return -ENOMEM;
1277 }
1278
1279 wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
1280 wqe->wr_id = wr->wr_id;
1281 wqe->num_sge = wr->num_sge;
1282 for (i = 0; i < wr->num_sge; i++)
1283 wqe->sg_list[i] = wr->sg_list[i];
1284 /* Make sure queue entry is written before the head index. */
1285 smp_wmb();
1286 wq->head = next;
1287 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1288 }
1289 return 0;
8cf4020b
DD
1290}
1291
bfbac097
DD
1292/**
1293 * rvt_post_one_wr - post one RC, UC, or UD send work request
1294 * @qp: the QP to post on
1295 * @wr: the work request to send
1296 */
1297static int rvt_post_one_wr(struct rvt_qp *qp, struct ib_send_wr *wr)
1298{
1299 struct rvt_swqe *wqe;
1300 u32 next;
1301 int i;
1302 int j;
1303 int acc;
1304 struct rvt_lkey_table *rkt;
1305 struct rvt_pd *pd;
1306 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1307
1308 /* IB spec says that num_sge == 0 is OK. */
1309 if (unlikely(wr->num_sge > qp->s_max_sge))
1310 return -EINVAL;
1311
1312 /*
1313 * Don't allow RDMA reads or atomic operations on UC or
1314 * undefined operations.
1315 * Make sure buffer is large enough to hold the result for atomics.
1316 */
1317 if (qp->ibqp.qp_type == IB_QPT_UC) {
1318 if ((unsigned)wr->opcode >= IB_WR_RDMA_READ)
1319 return -EINVAL;
1320 } else if (qp->ibqp.qp_type != IB_QPT_RC) {
1321 /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
1322 if (wr->opcode != IB_WR_SEND &&
1323 wr->opcode != IB_WR_SEND_WITH_IMM)
1324 return -EINVAL;
1325 /* Check UD destination address PD */
1326 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
1327 return -EINVAL;
1328 } else if ((unsigned)wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) {
1329 return -EINVAL;
1330 } else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
1331 (wr->num_sge == 0 ||
1332 wr->sg_list[0].length < sizeof(u64) ||
1333 wr->sg_list[0].addr & (sizeof(u64) - 1))) {
1334 return -EINVAL;
1335 } else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) {
1336 return -EINVAL;
1337 }
1338
1339 next = qp->s_head + 1;
1340 if (next >= qp->s_size)
1341 next = 0;
1342 if (next == qp->s_last)
1343 return -ENOMEM;
1344
1345 rkt = &rdi->lkey_table;
1346 pd = ibpd_to_rvtpd(qp->ibqp.pd);
1347 wqe = rvt_get_swqe_ptr(qp, qp->s_head);
1348
1349 if (qp->ibqp.qp_type != IB_QPT_UC &&
1350 qp->ibqp.qp_type != IB_QPT_RC)
1351 memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
1352 else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
1353 wr->opcode == IB_WR_RDMA_WRITE ||
1354 wr->opcode == IB_WR_RDMA_READ)
1355 memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
1356 else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1357 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
1358 memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
1359 else
1360 memcpy(&wqe->wr, wr, sizeof(wqe->wr));
1361
1362 wqe->length = 0;
1363 j = 0;
1364 if (wr->num_sge) {
1365 acc = wr->opcode >= IB_WR_RDMA_READ ?
1366 IB_ACCESS_LOCAL_WRITE : 0;
1367 for (i = 0; i < wr->num_sge; i++) {
1368 u32 length = wr->sg_list[i].length;
1369 int ok;
1370
1371 if (length == 0)
1372 continue;
1373 ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j],
1374 &wr->sg_list[i], acc);
1375 if (!ok)
1376 goto bail_inval_free;
1377 wqe->length += length;
1378 j++;
1379 }
1380 wqe->wr.num_sge = j;
1381 }
1382 if (qp->ibqp.qp_type == IB_QPT_UC ||
1383 qp->ibqp.qp_type == IB_QPT_RC) {
1384 if (wqe->length > 0x80000000U)
1385 goto bail_inval_free;
1386 } else {
1387 atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
1388 }
1389 wqe->ssn = qp->s_ssn++;
1390 qp->s_head = next;
1391
1392 return 0;
1393
1394bail_inval_free:
1395 /* release mr holds */
1396 while (j) {
1397 struct rvt_sge *sge = &wqe->sg_list[--j];
1398
1399 rvt_put_mr(sge->mr);
1400 }
1401 return -EINVAL;
1402}
1403
8cf4020b
DD
1404/**
1405 * rvt_post_send - post a send on a QP
1406 * @ibqp: the QP to post the send on
1407 * @wr: the list of work requests to post
1408 * @bad_wr: the first bad WR is put here
1409 *
1410 * This may be called from interrupt context.
1411 */
1412int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1413 struct ib_send_wr **bad_wr)
1414{
bfbac097
DD
1415 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1416 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1417 unsigned long flags = 0;
1418 int call_send;
1419 unsigned nreq = 0;
1420 int err = 0;
1421
1422 spin_lock_irqsave(&qp->s_lock, flags);
1423
8cf4020b 1424 /*
bfbac097
DD
1425 * Ensure QP state is such that we can send. If not bail out early,
1426 * there is no need to do this every time we post a send.
8cf4020b 1427 */
bfbac097
DD
1428 if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
1429 spin_unlock_irqrestore(&qp->s_lock, flags);
1430 return -EINVAL;
1431 }
8cf4020b 1432
bfbac097
DD
1433 /*
1434 * If the send queue is empty, and we only have a single WR then just go
1435 * ahead and kick the send engine into gear. Otherwise we will always
1436 * just schedule the send to happen later.
1437 */
1438 call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next;
1439
1440 for (; wr; wr = wr->next) {
1441 err = rvt_post_one_wr(qp, wr);
1442 if (unlikely(err)) {
1443 *bad_wr = wr;
1444 goto bail;
1445 }
1446 nreq++;
1447 }
1448bail:
1449 if (nreq && !call_send)
1450 rdi->driver_f.schedule_send(qp);
1451 spin_unlock_irqrestore(&qp->s_lock, flags);
1452 if (nreq && call_send)
1453 rdi->driver_f.do_send(qp);
1454 return err;
8cf4020b
DD
1455}
1456
1457/**
1458 * rvt_post_srq_receive - post a receive on a shared receive queue
1459 * @ibsrq: the SRQ to post the receive on
1460 * @wr: the list of work requests to post
1461 * @bad_wr: A pointer to the first WR to cause a problem is put here
1462 *
1463 * This may be called from interrupt context.
1464 */
1465int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
1466 struct ib_recv_wr **bad_wr)
1467{
1468 return -EOPNOTSUPP;
1469}
3b0b3fb3
DD
1470
1471void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
1472{
1473 struct rvt_qpn_map *map;
1474
1475 map = qpt->map + qpn / RVT_BITS_PER_PAGE;
1476 if (map->page)
1477 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
1478}
1479EXPORT_SYMBOL(rvt_free_qpn);
1480
1481void rvt_dec_qp_cnt(struct rvt_dev_info *rdi)
1482{
1483 spin_lock(&rdi->n_qps_lock);
1484 rdi->n_qps_allocated--;
1485 spin_unlock(&rdi->n_qps_lock);
1486}
1487EXPORT_SYMBOL(rvt_dec_qp_cnt);