]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/infiniband/sw/rdmavt/qp.c
IB/hfi1: Check upper-case EFI variables
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / sw / rdmavt / qp.c
CommitLineData
b518d3e6 1/*
fe314195 2 * Copyright(c) 2016 Intel Corporation.
b518d3e6
DD
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
3b0b3fb3 48#include <linux/hash.h>
0acb0cc7
DD
49#include <linux/bitops.h>
50#include <linux/lockdep.h>
515667f8
DD
51#include <linux/vmalloc.h>
52#include <linux/slab.h>
53#include <rdma/ib_verbs.h>
b518d3e6 54#include "qp.h"
515667f8 55#include "vt.h"
3b0b3fb3 56#include "trace.h"
b518d3e6 57
bfbac097
DD
58/*
59 * Note that it is OK to post send work requests in the SQE and ERR
60 * states; rvt_do_send() will process them and generate error
61 * completions as per IB 1.2 C10-96.
62 */
63const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
64 [IB_QPS_RESET] = 0,
65 [IB_QPS_INIT] = RVT_POST_RECV_OK,
66 [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
67 [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
68 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
69 RVT_PROCESS_NEXT_SEND_OK,
70 [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
71 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
72 [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
73 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
74 [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
75 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
76};
77EXPORT_SYMBOL(ib_rvt_state_ops);
78
f2dc9cdc
MM
79/*
80 * Translate ib_wr_opcode into ib_wc_opcode.
81 */
82const enum ib_wc_opcode ib_rvt_wc_opcode[] = {
83 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
84 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
85 [IB_WR_SEND] = IB_WC_SEND,
86 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
87 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
88 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
89 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
90 [IB_WR_SEND_WITH_INV] = IB_WC_SEND,
91 [IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV,
92 [IB_WR_REG_MR] = IB_WC_REG_MR
93};
94EXPORT_SYMBOL(ib_rvt_wc_opcode);
95
d2b8d4da
MM
96static void get_map_page(struct rvt_qpn_table *qpt,
97 struct rvt_qpn_map *map,
98 gfp_t gfp)
0acb0cc7 99{
d2b8d4da 100 unsigned long page = get_zeroed_page(gfp);
0acb0cc7
DD
101
102 /*
103 * Free the page if someone raced with us installing it.
104 */
105
106 spin_lock(&qpt->lock);
107 if (map->page)
108 free_page(page);
109 else
110 map->page = (void *)page;
111 spin_unlock(&qpt->lock);
112}
113
114/**
115 * init_qpn_table - initialize the QP number table for a device
116 * @qpt: the QPN table
117 */
118static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
119{
120 u32 offset, i;
121 struct rvt_qpn_map *map;
122 int ret = 0;
123
fef2efd6 124 if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
0acb0cc7
DD
125 return -EINVAL;
126
127 spin_lock_init(&qpt->lock);
128
129 qpt->last = rdi->dparms.qpn_start;
130 qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
131
132 /*
133 * Drivers may want some QPs beyond what we need for verbs let them use
134 * our qpn table. No need for two. Lets go ahead and mark the bitmaps
135 * for those. The reserved range must be *after* the range which verbs
136 * will pick from.
137 */
138
139 /* Figure out number of bit maps needed before reserved range */
140 qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
141
142 /* This should always be zero */
143 offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
144
145 /* Starting with the first reserved bit map */
146 map = &qpt->map[qpt->nmaps];
147
148 rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
149 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
fef2efd6 150 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
0acb0cc7 151 if (!map->page) {
d2b8d4da 152 get_map_page(qpt, map, GFP_KERNEL);
0acb0cc7
DD
153 if (!map->page) {
154 ret = -ENOMEM;
155 break;
156 }
157 }
158 set_bit(offset, map->page);
159 offset++;
160 if (offset == RVT_BITS_PER_PAGE) {
161 /* next page */
162 qpt->nmaps++;
163 map++;
164 offset = 0;
165 }
166 }
167 return ret;
168}
169
170/**
171 * free_qpn_table - free the QP number table for a device
172 * @qpt: the QPN table
173 */
174static void free_qpn_table(struct rvt_qpn_table *qpt)
175{
176 int i;
177
178 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
179 free_page((unsigned long)qpt->map[i].page);
180}
181
90793f71
DD
182/**
183 * rvt_driver_qp_init - Init driver qp resources
184 * @rdi: rvt dev strucutre
185 *
186 * Return: 0 on success
187 */
0acb0cc7
DD
188int rvt_driver_qp_init(struct rvt_dev_info *rdi)
189{
190 int i;
191 int ret = -ENOMEM;
192
0acb0cc7
DD
193 if (!rdi->dparms.qp_table_size)
194 return -EINVAL;
195
196 /*
197 * If driver is not doing any QP allocation then make sure it is
198 * providing the necessary QP functions.
199 */
515667f8
DD
200 if (!rdi->driver_f.free_all_qps ||
201 !rdi->driver_f.qp_priv_alloc ||
202 !rdi->driver_f.qp_priv_free ||
203 !rdi->driver_f.notify_qp_reset)
0acb0cc7
DD
204 return -EINVAL;
205
206 /* allocate parent object */
d1b697b6
MH
207 rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
208 rdi->dparms.node);
0acb0cc7
DD
209 if (!rdi->qp_dev)
210 return -ENOMEM;
211
212 /* allocate hash table */
213 rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
214 rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
215 rdi->qp_dev->qp_table =
d1b697b6
MH
216 kmalloc_node(rdi->qp_dev->qp_table_size *
217 sizeof(*rdi->qp_dev->qp_table),
218 GFP_KERNEL, rdi->dparms.node);
0acb0cc7
DD
219 if (!rdi->qp_dev->qp_table)
220 goto no_qp_table;
221
222 for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
223 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
224
225 spin_lock_init(&rdi->qp_dev->qpt_lock);
226
227 /* initialize qpn map */
228 if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
229 goto fail_table;
230
515667f8
DD
231 spin_lock_init(&rdi->n_qps_lock);
232
233 return 0;
0acb0cc7
DD
234
235fail_table:
236 kfree(rdi->qp_dev->qp_table);
237 free_qpn_table(&rdi->qp_dev->qpn_table);
238
239no_qp_table:
240 kfree(rdi->qp_dev);
241
242 return ret;
243}
244
245/**
246 * free_all_qps - check for QPs still in use
247 * @qpt: the QP table to empty
248 *
249 * There should not be any QPs still in use.
250 * Free memory for table.
251 */
515667f8 252static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
0acb0cc7
DD
253{
254 unsigned long flags;
255 struct rvt_qp *qp;
256 unsigned n, qp_inuse = 0;
257 spinlock_t *ql; /* work around too long line below */
258
515667f8
DD
259 if (rdi->driver_f.free_all_qps)
260 qp_inuse = rdi->driver_f.free_all_qps(rdi);
0acb0cc7 261
4e74080b
DD
262 qp_inuse += rvt_mcast_tree_empty(rdi);
263
0acb0cc7 264 if (!rdi->qp_dev)
515667f8 265 return qp_inuse;
0acb0cc7
DD
266
267 ql = &rdi->qp_dev->qpt_lock;
515667f8 268 spin_lock_irqsave(ql, flags);
0acb0cc7
DD
269 for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
270 qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
271 lockdep_is_held(ql));
272 RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
515667f8
DD
273
274 for (; qp; qp = rcu_dereference_protected(qp->next,
275 lockdep_is_held(ql)))
0acb0cc7 276 qp_inuse++;
0acb0cc7
DD
277 }
278 spin_unlock_irqrestore(ql, flags);
279 synchronize_rcu();
280 return qp_inuse;
281}
282
90793f71
DD
283/**
284 * rvt_qp_exit - clean up qps on device exit
285 * @rdi: rvt dev structure
286 *
287 * Check for qp leaks and free resources.
288 */
0acb0cc7
DD
289void rvt_qp_exit(struct rvt_dev_info *rdi)
290{
515667f8 291 u32 qps_inuse = rvt_free_all_qps(rdi);
0acb0cc7 292
0acb0cc7
DD
293 if (qps_inuse)
294 rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
295 qps_inuse);
296 if (!rdi->qp_dev)
297 return;
298
299 kfree(rdi->qp_dev->qp_table);
300 free_qpn_table(&rdi->qp_dev->qpn_table);
301 kfree(rdi->qp_dev);
302}
303
515667f8
DD
304static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
305 struct rvt_qpn_map *map, unsigned off)
306{
307 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
308}
309
f1badc71
DD
310/**
311 * alloc_qpn - Allocate the next available qpn or zero/one for QP type
312 * IB_QPT_SMI/IB_QPT_GSI
313 *@rdi: rvt device info structure
314 *@qpt: queue pair number table pointer
315 *@port_num: IB port number, 1 based, comes from core
316 *
317 * Return: The queue pair number
515667f8
DD
318 */
319static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
f1badc71 320 enum ib_qp_type type, u8 port_num, gfp_t gfp)
515667f8
DD
321{
322 u32 i, offset, max_scan, qpn;
323 struct rvt_qpn_map *map;
324 u32 ret;
325
326 if (rdi->driver_f.alloc_qpn)
b7b3cf44 327 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num, gfp);
515667f8
DD
328
329 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
330 unsigned n;
331
332 ret = type == IB_QPT_GSI;
f1badc71 333 n = 1 << (ret + 2 * (port_num - 1));
515667f8
DD
334 spin_lock(&qpt->lock);
335 if (qpt->flags & n)
336 ret = -EINVAL;
337 else
338 qpt->flags |= n;
339 spin_unlock(&qpt->lock);
340 goto bail;
341 }
342
343 qpn = qpt->last + qpt->incr;
344 if (qpn >= RVT_QPN_MAX)
345 qpn = qpt->incr | ((qpt->last & 1) ^ 1);
346 /* offset carries bit 0 */
347 offset = qpn & RVT_BITS_PER_PAGE_MASK;
348 map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
349 max_scan = qpt->nmaps - !offset;
350 for (i = 0;;) {
351 if (unlikely(!map->page)) {
d2b8d4da 352 get_map_page(qpt, map, gfp);
515667f8
DD
353 if (unlikely(!map->page))
354 break;
355 }
356 do {
357 if (!test_and_set_bit(offset, map->page)) {
358 qpt->last = qpn;
359 ret = qpn;
360 goto bail;
361 }
362 offset += qpt->incr;
363 /*
364 * This qpn might be bogus if offset >= BITS_PER_PAGE.
365 * That is OK. It gets re-assigned below
366 */
367 qpn = mk_qpn(qpt, map, offset);
368 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
369 /*
370 * In order to keep the number of pages allocated to a
371 * minimum, we scan the all existing pages before increasing
372 * the size of the bitmap table.
373 */
374 if (++i > max_scan) {
375 if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
376 break;
377 map = &qpt->map[qpt->nmaps++];
378 /* start at incr with current bit 0 */
379 offset = qpt->incr | (offset & 1);
380 } else if (map < &qpt->map[qpt->nmaps]) {
381 ++map;
382 /* start at incr with current bit 0 */
383 offset = qpt->incr | (offset & 1);
384 } else {
385 map = &qpt->map[0];
386 /* wrap to first map page, invert bit 0 */
387 offset = qpt->incr | ((offset & 1) ^ 1);
388 }
501edc42
BW
389 /* there can be no set bits in low-order QoS bits */
390 WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1));
515667f8
DD
391 qpn = mk_qpn(qpt, map, offset);
392 }
393
394 ret = -ENOMEM;
395
396bail:
397 return ret;
398}
399
400static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
401{
402 struct rvt_qpn_map *map;
403
404 map = qpt->map + qpn / RVT_BITS_PER_PAGE;
405 if (map->page)
406 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
407}
408
79a225be
DD
409/**
410 * rvt_clear_mr_refs - Drop help mr refs
411 * @qp: rvt qp data structure
412 * @clr_sends: If shoudl clear send side or not
413 */
414static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
415{
416 unsigned n;
8b103e9c 417 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
79a225be
DD
418
419 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
420 rvt_put_ss(&qp->s_rdma_read_sge);
421
422 rvt_put_ss(&qp->r_sge);
423
424 if (clr_sends) {
425 while (qp->s_last != qp->s_head) {
426 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
427 unsigned i;
428
429 for (i = 0; i < wqe->wr.num_sge; i++) {
430 struct rvt_sge *sge = &wqe->sg_list[i];
431
432 rvt_put_mr(sge->mr);
433 }
434 if (qp->ibqp.qp_type == IB_QPT_UD ||
435 qp->ibqp.qp_type == IB_QPT_SMI ||
436 qp->ibqp.qp_type == IB_QPT_GSI)
437 atomic_dec(&ibah_to_rvtah(
438 wqe->ud_wr.ah)->refcount);
439 if (++qp->s_last >= qp->s_size)
440 qp->s_last = 0;
441 smp_wmb(); /* see qp_set_savail */
442 }
443 if (qp->s_rdma_mr) {
444 rvt_put_mr(qp->s_rdma_mr);
445 qp->s_rdma_mr = NULL;
446 }
447 }
448
449 if (qp->ibqp.qp_type != IB_QPT_RC)
450 return;
451
8b103e9c 452 for (n = 0; n < rvt_max_atomic(rdi); n++) {
79a225be
DD
453 struct rvt_ack_entry *e = &qp->s_ack_queue[n];
454
fe508272 455 if (e->rdma_sge.mr) {
79a225be
DD
456 rvt_put_mr(e->rdma_sge.mr);
457 e->rdma_sge.mr = NULL;
458 }
459 }
460}
461
462/**
463 * rvt_remove_qp - remove qp form table
464 * @rdi: rvt dev struct
465 * @qp: qp to remove
466 *
467 * Remove the QP from the table so it can't be found asynchronously by
468 * the receive routine.
469 */
470static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
471{
472 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
473 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
474 unsigned long flags;
475 int removed = 1;
476
477 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
478
479 if (rcu_dereference_protected(rvp->qp[0],
480 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
481 RCU_INIT_POINTER(rvp->qp[0], NULL);
482 } else if (rcu_dereference_protected(rvp->qp[1],
483 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
484 RCU_INIT_POINTER(rvp->qp[1], NULL);
485 } else {
486 struct rvt_qp *q;
487 struct rvt_qp __rcu **qpp;
488
489 removed = 0;
490 qpp = &rdi->qp_dev->qp_table[n];
491 for (; (q = rcu_dereference_protected(*qpp,
492 lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
493 qpp = &q->next) {
494 if (q == qp) {
495 RCU_INIT_POINTER(*qpp,
496 rcu_dereference_protected(qp->next,
497 lockdep_is_held(&rdi->qp_dev->qpt_lock)));
498 removed = 1;
499 trace_rvt_qpremove(qp, n);
500 break;
501 }
502 }
503 }
504
505 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
506 if (removed) {
507 synchronize_rcu();
4d6f85c3 508 rvt_put_qp(qp);
79a225be
DD
509 }
510}
511
515667f8 512/**
222f7a9a
MM
513 * rvt_init_qp - initialize the QP state to the reset state
514 * @qp: the QP to init or reinit
515667f8 515 * @type: the QP type
222f7a9a
MM
516 *
517 * This function is called from both rvt_create_qp() and
518 * rvt_reset_qp(). The difference is that the reset
519 * patch the necessary locks to protect against concurent
520 * access.
515667f8 521 */
222f7a9a
MM
522static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
523 enum ib_qp_type type)
515667f8 524{
3b0b3fb3
DD
525 qp->remote_qpn = 0;
526 qp->qkey = 0;
527 qp->qp_access_flags = 0;
515667f8
DD
528 qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
529 qp->s_hdrwords = 0;
530 qp->s_wqe = NULL;
531 qp->s_draining = 0;
532 qp->s_next_psn = 0;
533 qp->s_last_psn = 0;
534 qp->s_sending_psn = 0;
535 qp->s_sending_hpsn = 0;
536 qp->s_psn = 0;
537 qp->r_psn = 0;
538 qp->r_msn = 0;
539 if (type == IB_QPT_RC) {
540 qp->s_state = IB_OPCODE_RC_SEND_LAST;
541 qp->r_state = IB_OPCODE_RC_SEND_LAST;
542 } else {
543 qp->s_state = IB_OPCODE_UC_SEND_LAST;
544 qp->r_state = IB_OPCODE_UC_SEND_LAST;
545 }
546 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
547 qp->r_nak_state = 0;
548 qp->r_aflags = 0;
549 qp->r_flags = 0;
550 qp->s_head = 0;
551 qp->s_tail = 0;
552 qp->s_cur = 0;
553 qp->s_acked = 0;
554 qp->s_last = 0;
555 qp->s_ssn = 1;
556 qp->s_lsn = 0;
557 qp->s_mig_state = IB_MIG_MIGRATED;
515667f8
DD
558 qp->r_head_ack_queue = 0;
559 qp->s_tail_ack_queue = 0;
560 qp->s_num_rd_atomic = 0;
561 if (qp->r_rq.wq) {
562 qp->r_rq.wq->head = 0;
563 qp->r_rq.wq->tail = 0;
564 }
565 qp->r_sge.num_sge = 0;
856cc4c2 566 atomic_set(&qp->s_reserved_used, 0);
515667f8
DD
567}
568
222f7a9a
MM
569/**
570 * rvt_reset_qp - initialize the QP state to the reset state
571 * @qp: the QP to reset
572 * @type: the QP type
573 *
574 * r_lock, s_hlock, and s_lock are required to be held by the caller
575 */
576static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
577 enum ib_qp_type type)
578 __must_hold(&qp->s_lock)
579 __must_hold(&qp->s_hlock)
580 __must_hold(&qp->r_lock)
581{
68e78b3d
MM
582 lockdep_assert_held(&qp->r_lock);
583 lockdep_assert_held(&qp->s_hlock);
584 lockdep_assert_held(&qp->s_lock);
222f7a9a
MM
585 if (qp->state != IB_QPS_RESET) {
586 qp->state = IB_QPS_RESET;
587
588 /* Let drivers flush their waitlist */
589 rdi->driver_f.flush_qp_waiters(qp);
590 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
591 spin_unlock(&qp->s_lock);
592 spin_unlock(&qp->s_hlock);
593 spin_unlock_irq(&qp->r_lock);
594
595 /* Stop the send queue and the retry timer */
596 rdi->driver_f.stop_send_queue(qp);
597
598 /* Wait for things to stop */
599 rdi->driver_f.quiesce_qp(qp);
600
601 /* take qp out the hash and wait for it to be unused */
602 rvt_remove_qp(rdi, qp);
603 wait_event(qp->wait, !atomic_read(&qp->refcount));
604
605 /* grab the lock b/c it was locked at call time */
606 spin_lock_irq(&qp->r_lock);
607 spin_lock(&qp->s_hlock);
608 spin_lock(&qp->s_lock);
609
610 rvt_clear_mr_refs(qp, 1);
611 /*
612 * Let the driver do any tear down or re-init it needs to for
613 * a qp that has been reset
614 */
615 rdi->driver_f.notify_qp_reset(qp);
616 }
617 rvt_init_qp(rdi, qp, type);
68e78b3d
MM
618 lockdep_assert_held(&qp->r_lock);
619 lockdep_assert_held(&qp->s_hlock);
620 lockdep_assert_held(&qp->s_lock);
222f7a9a
MM
621}
622
b518d3e6
DD
623/**
624 * rvt_create_qp - create a queue pair for a device
625 * @ibpd: the protection domain who's device we create the queue pair for
626 * @init_attr: the attributes of the queue pair
627 * @udata: user data for libibverbs.so
628 *
515667f8
DD
629 * Queue pair creation is mostly an rvt issue. However, drivers have their own
630 * unique idea of what queue pair numbers mean. For instance there is a reserved
631 * range for PSM.
632 *
90793f71 633 * Return: the queue pair on success, otherwise returns an errno.
b518d3e6
DD
634 *
635 * Called by the ib_create_qp() core verbs function.
636 */
637struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
638 struct ib_qp_init_attr *init_attr,
639 struct ib_udata *udata)
640{
515667f8
DD
641 struct rvt_qp *qp;
642 int err;
643 struct rvt_swqe *swq = NULL;
644 size_t sz;
645 size_t sg_list_sz;
646 struct ib_qp *ret = ERR_PTR(-ENOMEM);
647 struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
648 void *priv = NULL;
d2b8d4da 649 gfp_t gfp;
afcf8f76 650 size_t sqsize;
515667f8
DD
651
652 if (!rdi)
653 return ERR_PTR(-EINVAL);
654
655 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge ||
656 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
d2b8d4da 657 init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
515667f8
DD
658 return ERR_PTR(-EINVAL);
659
d2b8d4da
MM
660 /* GFP_NOIO is applicable to RC QP's only */
661
662 if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
663 init_attr->qp_type != IB_QPT_RC)
664 return ERR_PTR(-EINVAL);
665
666 gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
667 GFP_NOIO : GFP_KERNEL;
668
515667f8
DD
669 /* Check receive queue parameters if no SRQ is specified. */
670 if (!init_attr->srq) {
671 if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge ||
672 init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
673 return ERR_PTR(-EINVAL);
674
675 if (init_attr->cap.max_send_sge +
676 init_attr->cap.max_send_wr +
677 init_attr->cap.max_recv_sge +
678 init_attr->cap.max_recv_wr == 0)
679 return ERR_PTR(-EINVAL);
680 }
afcf8f76 681 sqsize =
856cc4c2
MM
682 init_attr->cap.max_send_wr + 1 +
683 rdi->dparms.reserved_operations;
515667f8
DD
684 switch (init_attr->qp_type) {
685 case IB_QPT_SMI:
686 case IB_QPT_GSI:
687 if (init_attr->port_num == 0 ||
688 init_attr->port_num > ibpd->device->phys_port_cnt)
689 return ERR_PTR(-EINVAL);
690 case IB_QPT_UC:
691 case IB_QPT_RC:
692 case IB_QPT_UD:
693 sz = sizeof(struct rvt_sge) *
694 init_attr->cap.max_send_sge +
695 sizeof(struct rvt_swqe);
d2b8d4da
MM
696 if (gfp == GFP_NOIO)
697 swq = __vmalloc(
afcf8f76 698 sqsize * sz,
654b6436 699 gfp | __GFP_ZERO, PAGE_KERNEL);
d2b8d4da 700 else
654b6436 701 swq = vzalloc_node(
afcf8f76 702 sqsize * sz,
d1b697b6 703 rdi->dparms.node);
515667f8
DD
704 if (!swq)
705 return ERR_PTR(-ENOMEM);
706
707 sz = sizeof(*qp);
708 sg_list_sz = 0;
709 if (init_attr->srq) {
710 struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
711
712 if (srq->rq.max_sge > 1)
713 sg_list_sz = sizeof(*qp->r_sg_list) *
714 (srq->rq.max_sge - 1);
715 } else if (init_attr->cap.max_recv_sge > 1)
716 sg_list_sz = sizeof(*qp->r_sg_list) *
717 (init_attr->cap.max_recv_sge - 1);
d1b697b6 718 qp = kzalloc_node(sz + sg_list_sz, gfp, rdi->dparms.node);
515667f8
DD
719 if (!qp)
720 goto bail_swq;
721
722 RCU_INIT_POINTER(qp->next, NULL);
8b103e9c
MM
723 if (init_attr->qp_type == IB_QPT_RC) {
724 qp->s_ack_queue =
725 kzalloc_node(
726 sizeof(*qp->s_ack_queue) *
727 rvt_max_atomic(rdi),
728 gfp,
729 rdi->dparms.node);
730 if (!qp->s_ack_queue)
731 goto bail_qp;
732 }
515667f8
DD
733
734 /*
735 * Driver needs to set up it's private QP structure and do any
736 * initialization that is needed.
737 */
d2b8d4da 738 priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
c755f4af
MM
739 if (IS_ERR(priv)) {
740 ret = priv;
515667f8 741 goto bail_qp;
c755f4af 742 }
515667f8
DD
743 qp->priv = priv;
744 qp->timeout_jiffies =
745 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
746 1000UL);
747 if (init_attr->srq) {
748 sz = 0;
749 } else {
750 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
751 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
752 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
753 sizeof(struct rvt_rwqe);
d2b8d4da
MM
754 if (udata)
755 qp->r_rq.wq = vmalloc_user(
756 sizeof(struct rvt_rwq) +
757 qp->r_rq.size * sz);
758 else if (gfp == GFP_NOIO)
759 qp->r_rq.wq = __vmalloc(
760 sizeof(struct rvt_rwq) +
761 qp->r_rq.size * sz,
654b6436 762 gfp | __GFP_ZERO, PAGE_KERNEL);
d2b8d4da 763 else
654b6436 764 qp->r_rq.wq = vzalloc_node(
d2b8d4da 765 sizeof(struct rvt_rwq) +
d1b697b6
MH
766 qp->r_rq.size * sz,
767 rdi->dparms.node);
515667f8
DD
768 if (!qp->r_rq.wq)
769 goto bail_driver_priv;
770 }
771
772 /*
773 * ib_create_qp() will initialize qp->ibqp
774 * except for qp->ibqp.qp_num.
775 */
776 spin_lock_init(&qp->r_lock);
46a80d62 777 spin_lock_init(&qp->s_hlock);
515667f8
DD
778 spin_lock_init(&qp->s_lock);
779 spin_lock_init(&qp->r_rq.lock);
780 atomic_set(&qp->refcount, 0);
d9f87239 781 atomic_set(&qp->local_ops_pending, 0);
515667f8
DD
782 init_waitqueue_head(&qp->wait);
783 init_timer(&qp->s_timer);
784 qp->s_timer.data = (unsigned long)qp;
785 INIT_LIST_HEAD(&qp->rspwait);
786 qp->state = IB_QPS_RESET;
787 qp->s_wq = swq;
afcf8f76 788 qp->s_size = sqsize;
46a80d62 789 qp->s_avail = init_attr->cap.max_send_wr;
515667f8
DD
790 qp->s_max_sge = init_attr->cap.max_send_sge;
791 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
792 qp->s_flags = RVT_S_SIGNAL_REQ_WR;
793
794 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
795 init_attr->qp_type,
d2b8d4da 796 init_attr->port_num, gfp);
515667f8
DD
797 if (err < 0) {
798 ret = ERR_PTR(err);
799 goto bail_rq_wq;
800 }
801 qp->ibqp.qp_num = err;
802 qp->port_num = init_attr->port_num;
222f7a9a 803 rvt_init_qp(rdi, qp, init_attr->qp_type);
515667f8
DD
804 break;
805
806 default:
807 /* Don't support raw QPs */
808 return ERR_PTR(-EINVAL);
809 }
810
811 init_attr->cap.max_inline_data = 0;
812
813 /*
814 * Return the address of the RWQ as the offset to mmap.
bfbac097 815 * See rvt_mmap() for details.
515667f8
DD
816 */
817 if (udata && udata->outlen >= sizeof(__u64)) {
818 if (!qp->r_rq.wq) {
819 __u64 offset = 0;
820
821 err = ib_copy_to_udata(udata, &offset,
822 sizeof(offset));
823 if (err) {
824 ret = ERR_PTR(err);
825 goto bail_qpn;
826 }
827 } else {
828 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
829
830 qp->ip = rvt_create_mmap_info(rdi, s,
831 ibpd->uobject->context,
832 qp->r_rq.wq);
833 if (!qp->ip) {
834 ret = ERR_PTR(-ENOMEM);
835 goto bail_qpn;
836 }
837
838 err = ib_copy_to_udata(udata, &qp->ip->offset,
839 sizeof(qp->ip->offset));
840 if (err) {
841 ret = ERR_PTR(err);
842 goto bail_ip;
843 }
844 }
ef086c0d 845 qp->pid = current->pid;
515667f8
DD
846 }
847
848 spin_lock(&rdi->n_qps_lock);
849 if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
850 spin_unlock(&rdi->n_qps_lock);
851 ret = ERR_PTR(-ENOMEM);
852 goto bail_ip;
853 }
854
855 rdi->n_qps_allocated++;
bfee5e32
VM
856 /*
857 * Maintain a busy_jiffies variable that will be added to the timeout
858 * period in mod_retry_timer and add_retry_timer. This busy jiffies
859 * is scaled by the number of rc qps created for the device to reduce
860 * the number of timeouts occurring when there is a large number of
861 * qps. busy_jiffies is incremented every rc qp scaling interval.
862 * The scaling interval is selected based on extensive performance
863 * evaluation of targeted workloads.
864 */
865 if (init_attr->qp_type == IB_QPT_RC) {
866 rdi->n_rc_qps++;
867 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
868 }
515667f8
DD
869 spin_unlock(&rdi->n_qps_lock);
870
871 if (qp->ip) {
872 spin_lock_irq(&rdi->pending_lock);
873 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
874 spin_unlock_irq(&rdi->pending_lock);
875 }
876
877 ret = &qp->ibqp;
878
b518d3e6 879 /*
515667f8
DD
880 * We have our QP and its good, now keep track of what types of opcodes
881 * can be processed on this QP. We do this by keeping track of what the
882 * 3 high order bits of the opcode are.
b518d3e6 883 */
515667f8
DD
884 switch (init_attr->qp_type) {
885 case IB_QPT_SMI:
886 case IB_QPT_GSI:
887 case IB_QPT_UD:
b218f786 888 qp->allowed_ops = IB_OPCODE_UD;
515667f8
DD
889 break;
890 case IB_QPT_RC:
b218f786 891 qp->allowed_ops = IB_OPCODE_RC;
515667f8
DD
892 break;
893 case IB_QPT_UC:
b218f786 894 qp->allowed_ops = IB_OPCODE_UC;
515667f8
DD
895 break;
896 default:
897 ret = ERR_PTR(-EINVAL);
898 goto bail_ip;
899 }
900
901 return ret;
902
903bail_ip:
22dccc54
JF
904 if (qp->ip)
905 kref_put(&qp->ip->ref, rvt_release_mmap_info);
515667f8
DD
906
907bail_qpn:
908 free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
909
910bail_rq_wq:
56c8ca51
MM
911 if (!qp->ip)
912 vfree(qp->r_rq.wq);
515667f8
DD
913
914bail_driver_priv:
915 rdi->driver_f.qp_priv_free(rdi, qp);
916
917bail_qp:
8b103e9c 918 kfree(qp->s_ack_queue);
515667f8
DD
919 kfree(qp);
920
921bail_swq:
922 vfree(swq);
923
924 return ret;
b518d3e6
DD
925}
926
3b0b3fb3
DD
927/**
928 * rvt_error_qp - put a QP into the error state
929 * @qp: the QP to put into the error state
930 * @err: the receive completion error to signal if a RWQE is active
931 *
932 * Flushes both send and receive work queues.
90793f71
DD
933 *
934 * Return: true if last WQE event should be generated.
3b0b3fb3
DD
935 * The QP r_lock and s_lock should be held and interrupts disabled.
936 * If we are already in error state, just return.
937 */
938int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
939{
940 struct ib_wc wc;
941 int ret = 0;
942 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
943
68e78b3d
MM
944 lockdep_assert_held(&qp->r_lock);
945 lockdep_assert_held(&qp->s_lock);
3b0b3fb3
DD
946 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
947 goto bail;
948
949 qp->state = IB_QPS_ERR;
950
951 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
952 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
953 del_timer(&qp->s_timer);
954 }
955
956 if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
957 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
958
959 rdi->driver_f.notify_error_qp(qp);
960
961 /* Schedule the sending tasklet to drain the send work queue. */
46a80d62 962 if (ACCESS_ONCE(qp->s_last) != qp->s_head)
3b0b3fb3
DD
963 rdi->driver_f.schedule_send(qp);
964
965 rvt_clear_mr_refs(qp, 0);
966
967 memset(&wc, 0, sizeof(wc));
968 wc.qp = &qp->ibqp;
969 wc.opcode = IB_WC_RECV;
970
971 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
972 wc.wr_id = qp->r_wr_id;
973 wc.status = err;
974 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
975 }
976 wc.status = IB_WC_WR_FLUSH_ERR;
977
978 if (qp->r_rq.wq) {
979 struct rvt_rwq *wq;
980 u32 head;
981 u32 tail;
982
983 spin_lock(&qp->r_rq.lock);
984
985 /* sanity check pointers before trusting them */
986 wq = qp->r_rq.wq;
987 head = wq->head;
988 if (head >= qp->r_rq.size)
989 head = 0;
990 tail = wq->tail;
991 if (tail >= qp->r_rq.size)
992 tail = 0;
993 while (tail != head) {
994 wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
995 if (++tail >= qp->r_rq.size)
996 tail = 0;
997 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
998 }
999 wq->tail = tail;
1000
1001 spin_unlock(&qp->r_rq.lock);
1002 } else if (qp->ibqp.event_handler) {
1003 ret = 1;
1004 }
1005
1006bail:
1007 return ret;
1008}
1009EXPORT_SYMBOL(rvt_error_qp);
1010
1011/*
1012 * Put the QP into the hash table.
1013 * The hash table holds a reference to the QP.
1014 */
1015static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
1016{
1017 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
1018 unsigned long flags;
1019
4d6f85c3 1020 rvt_get_qp(qp);
3b0b3fb3
DD
1021 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
1022
1023 if (qp->ibqp.qp_num <= 1) {
1024 rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
1025 } else {
1026 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
1027
1028 qp->next = rdi->qp_dev->qp_table[n];
1029 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
1030 trace_rvt_qpinsert(qp, n);
1031 }
1032
1033 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
1034}
1035
b518d3e6 1036/**
61347fa6 1037 * rvt_modify_qp - modify the attributes of a queue pair
b518d3e6
DD
1038 * @ibqp: the queue pair who's attributes we're modifying
1039 * @attr: the new attributes
1040 * @attr_mask: the mask of attributes to modify
1041 * @udata: user data for libibverbs.so
1042 *
90793f71 1043 * Return: 0 on success, otherwise returns an errno.
b518d3e6
DD
1044 */
1045int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1046 int attr_mask, struct ib_udata *udata)
1047{
3b0b3fb3
DD
1048 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1049 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1050 enum ib_qp_state cur_state, new_state;
1051 struct ib_event ev;
1052 int lastwqe = 0;
1053 int mig = 0;
1054 int pmtu = 0; /* for gcc warning only */
1055 enum rdma_link_layer link;
1056
1057 link = rdma_port_get_link_layer(ibqp->device, qp->port_num);
1058
1059 spin_lock_irq(&qp->r_lock);
46a80d62 1060 spin_lock(&qp->s_hlock);
3b0b3fb3
DD
1061 spin_lock(&qp->s_lock);
1062
1063 cur_state = attr_mask & IB_QP_CUR_STATE ?
1064 attr->cur_qp_state : qp->state;
1065 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1066
1067 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1068 attr_mask, link))
1069 goto inval;
1070
e85ec33d
IW
1071 if (rdi->driver_f.check_modify_qp &&
1072 rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
1073 goto inval;
1074
3b0b3fb3
DD
1075 if (attr_mask & IB_QP_AV) {
1076 if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
1077 goto inval;
1078 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
1079 goto inval;
1080 }
1081
1082 if (attr_mask & IB_QP_ALT_PATH) {
1083 if (attr->alt_ah_attr.dlid >=
1084 be16_to_cpu(IB_MULTICAST_LID_BASE))
1085 goto inval;
1086 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
1087 goto inval;
1088 if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
1089 goto inval;
1090 }
1091
1092 if (attr_mask & IB_QP_PKEY_INDEX)
1093 if (attr->pkey_index >= rvt_get_npkeys(rdi))
1094 goto inval;
1095
1096 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1097 if (attr->min_rnr_timer > 31)
1098 goto inval;
1099
1100 if (attr_mask & IB_QP_PORT)
1101 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1102 qp->ibqp.qp_type == IB_QPT_GSI ||
1103 attr->port_num == 0 ||
1104 attr->port_num > ibqp->device->phys_port_cnt)
1105 goto inval;
1106
1107 if (attr_mask & IB_QP_DEST_QPN)
1108 if (attr->dest_qp_num > RVT_QPN_MASK)
1109 goto inval;
1110
1111 if (attr_mask & IB_QP_RETRY_CNT)
1112 if (attr->retry_cnt > 7)
1113 goto inval;
1114
1115 if (attr_mask & IB_QP_RNR_RETRY)
1116 if (attr->rnr_retry > 7)
1117 goto inval;
1118
b518d3e6 1119 /*
3b0b3fb3
DD
1120 * Don't allow invalid path_mtu values. OK to set greater
1121 * than the active mtu (or even the max_cap, if we have tuned
1122 * that to a small mtu. We'll set qp->path_mtu
1123 * to the lesser of requested attribute mtu and active,
1124 * for packetizing messages.
1125 * Note that the QP port has to be set in INIT and MTU in RTR.
b518d3e6 1126 */
3b0b3fb3
DD
1127 if (attr_mask & IB_QP_PATH_MTU) {
1128 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
1129 if (pmtu < 0)
1130 goto inval;
1131 }
1132
1133 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1134 if (attr->path_mig_state == IB_MIG_REARM) {
1135 if (qp->s_mig_state == IB_MIG_ARMED)
1136 goto inval;
1137 if (new_state != IB_QPS_RTS)
1138 goto inval;
1139 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
1140 if (qp->s_mig_state == IB_MIG_REARM)
1141 goto inval;
1142 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
1143 goto inval;
1144 if (qp->s_mig_state == IB_MIG_ARMED)
1145 mig = 1;
1146 } else {
1147 goto inval;
1148 }
1149 }
1150
1151 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1152 if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
1153 goto inval;
1154
1155 switch (new_state) {
1156 case IB_QPS_RESET:
1157 if (qp->state != IB_QPS_RESET)
1158 rvt_reset_qp(rdi, qp, ibqp->qp_type);
1159 break;
1160
1161 case IB_QPS_RTR:
1162 /* Allow event to re-trigger if QP set to RTR more than once */
1163 qp->r_flags &= ~RVT_R_COMM_EST;
1164 qp->state = new_state;
1165 break;
1166
1167 case IB_QPS_SQD:
1168 qp->s_draining = qp->s_last != qp->s_cur;
1169 qp->state = new_state;
1170 break;
1171
1172 case IB_QPS_SQE:
1173 if (qp->ibqp.qp_type == IB_QPT_RC)
1174 goto inval;
1175 qp->state = new_state;
1176 break;
1177
1178 case IB_QPS_ERR:
1179 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1180 break;
1181
1182 default:
1183 qp->state = new_state;
1184 break;
1185 }
1186
1187 if (attr_mask & IB_QP_PKEY_INDEX)
1188 qp->s_pkey_index = attr->pkey_index;
1189
1190 if (attr_mask & IB_QP_PORT)
1191 qp->port_num = attr->port_num;
1192
1193 if (attr_mask & IB_QP_DEST_QPN)
1194 qp->remote_qpn = attr->dest_qp_num;
1195
1196 if (attr_mask & IB_QP_SQ_PSN) {
1197 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
1198 qp->s_psn = qp->s_next_psn;
1199 qp->s_sending_psn = qp->s_next_psn;
1200 qp->s_last_psn = qp->s_next_psn - 1;
1201 qp->s_sending_hpsn = qp->s_last_psn;
1202 }
1203
1204 if (attr_mask & IB_QP_RQ_PSN)
1205 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
1206
1207 if (attr_mask & IB_QP_ACCESS_FLAGS)
1208 qp->qp_access_flags = attr->qp_access_flags;
1209
1210 if (attr_mask & IB_QP_AV) {
1211 qp->remote_ah_attr = attr->ah_attr;
1212 qp->s_srate = attr->ah_attr.static_rate;
1213 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
1214 }
1215
1216 if (attr_mask & IB_QP_ALT_PATH) {
1217 qp->alt_ah_attr = attr->alt_ah_attr;
1218 qp->s_alt_pkey_index = attr->alt_pkey_index;
1219 }
1220
1221 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1222 qp->s_mig_state = attr->path_mig_state;
1223 if (mig) {
1224 qp->remote_ah_attr = qp->alt_ah_attr;
1225 qp->port_num = qp->alt_ah_attr.port_num;
1226 qp->s_pkey_index = qp->s_alt_pkey_index;
3b0b3fb3
DD
1227 }
1228 }
1229
1230 if (attr_mask & IB_QP_PATH_MTU) {
1231 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
1232 qp->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
46a80d62 1233 qp->log_pmtu = ilog2(qp->pmtu);
3b0b3fb3
DD
1234 }
1235
1236 if (attr_mask & IB_QP_RETRY_CNT) {
1237 qp->s_retry_cnt = attr->retry_cnt;
1238 qp->s_retry = attr->retry_cnt;
1239 }
1240
1241 if (attr_mask & IB_QP_RNR_RETRY) {
1242 qp->s_rnr_retry_cnt = attr->rnr_retry;
1243 qp->s_rnr_retry = attr->rnr_retry;
1244 }
1245
1246 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1247 qp->r_min_rnr_timer = attr->min_rnr_timer;
1248
1249 if (attr_mask & IB_QP_TIMEOUT) {
1250 qp->timeout = attr->timeout;
1251 qp->timeout_jiffies =
1252 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1253 1000UL);
1254 }
1255
1256 if (attr_mask & IB_QP_QKEY)
1257 qp->qkey = attr->qkey;
1258
1259 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1260 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
1261
1262 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1263 qp->s_max_rd_atomic = attr->max_rd_atomic;
1264
e85ec33d
IW
1265 if (rdi->driver_f.modify_qp)
1266 rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
1267
3b0b3fb3 1268 spin_unlock(&qp->s_lock);
46a80d62 1269 spin_unlock(&qp->s_hlock);
3b0b3fb3
DD
1270 spin_unlock_irq(&qp->r_lock);
1271
1272 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1273 rvt_insert_qp(rdi, qp);
1274
1275 if (lastwqe) {
1276 ev.device = qp->ibqp.device;
1277 ev.element.qp = &qp->ibqp;
1278 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1279 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1280 }
1281 if (mig) {
1282 ev.device = qp->ibqp.device;
1283 ev.element.qp = &qp->ibqp;
1284 ev.event = IB_EVENT_PATH_MIG;
1285 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1286 }
1287 return 0;
1288
1289inval:
1290 spin_unlock(&qp->s_lock);
46a80d62 1291 spin_unlock(&qp->s_hlock);
3b0b3fb3
DD
1292 spin_unlock_irq(&qp->r_lock);
1293 return -EINVAL;
b518d3e6
DD
1294}
1295
79a225be
DD
1296/** rvt_free_qpn - Free a qpn from the bit map
1297 * @qpt: QP table
1298 * @qpn: queue pair number to free
1299 */
1300static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
1301{
1302 struct rvt_qpn_map *map;
1303
1304 map = qpt->map + qpn / RVT_BITS_PER_PAGE;
1305 if (map->page)
1306 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
1307}
1308
b518d3e6
DD
1309/**
1310 * rvt_destroy_qp - destroy a queue pair
1311 * @ibqp: the queue pair to destroy
1312 *
b518d3e6
DD
1313 * Note that this can be called while the QP is actively sending or
1314 * receiving!
90793f71
DD
1315 *
1316 * Return: 0 on success.
b518d3e6
DD
1317 */
1318int rvt_destroy_qp(struct ib_qp *ibqp)
1319{
5a17ad11
DD
1320 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1321 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
b518d3e6 1322
5a17ad11 1323 spin_lock_irq(&qp->r_lock);
46a80d62 1324 spin_lock(&qp->s_hlock);
5a17ad11
DD
1325 spin_lock(&qp->s_lock);
1326 rvt_reset_qp(rdi, qp, ibqp->qp_type);
1327 spin_unlock(&qp->s_lock);
46a80d62 1328 spin_unlock(&qp->s_hlock);
5a17ad11
DD
1329 spin_unlock_irq(&qp->r_lock);
1330
1331 /* qpn is now available for use again */
1332 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1333
1334 spin_lock(&rdi->n_qps_lock);
1335 rdi->n_qps_allocated--;
bfee5e32
VM
1336 if (qp->ibqp.qp_type == IB_QPT_RC) {
1337 rdi->n_rc_qps--;
1338 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1339 }
5a17ad11
DD
1340 spin_unlock(&rdi->n_qps_lock);
1341
1342 if (qp->ip)
1343 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1344 else
1345 vfree(qp->r_rq.wq);
1346 vfree(qp->s_wq);
1347 rdi->driver_f.qp_priv_free(rdi, qp);
8b103e9c 1348 kfree(qp->s_ack_queue);
5a17ad11
DD
1349 kfree(qp);
1350 return 0;
b518d3e6
DD
1351}
1352
90793f71
DD
1353/**
1354 * rvt_query_qp - query an ipbq
1355 * @ibqp: IB qp to query
1356 * @attr: attr struct to fill in
1357 * @attr_mask: attr mask ignored
1358 * @init_attr: struct to fill in
1359 *
1360 * Return: always 0
1361 */
b518d3e6
DD
1362int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1363 int attr_mask, struct ib_qp_init_attr *init_attr)
1364{
74d2d500
HC
1365 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1366 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1367
1368 attr->qp_state = qp->state;
1369 attr->cur_qp_state = attr->qp_state;
1370 attr->path_mtu = qp->path_mtu;
1371 attr->path_mig_state = qp->s_mig_state;
1372 attr->qkey = qp->qkey;
1373 attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
1374 attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
1375 attr->dest_qp_num = qp->remote_qpn;
1376 attr->qp_access_flags = qp->qp_access_flags;
856cc4c2
MM
1377 attr->cap.max_send_wr = qp->s_size - 1 -
1378 rdi->dparms.reserved_operations;
74d2d500
HC
1379 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
1380 attr->cap.max_send_sge = qp->s_max_sge;
1381 attr->cap.max_recv_sge = qp->r_rq.max_sge;
1382 attr->cap.max_inline_data = 0;
1383 attr->ah_attr = qp->remote_ah_attr;
1384 attr->alt_ah_attr = qp->alt_ah_attr;
1385 attr->pkey_index = qp->s_pkey_index;
1386 attr->alt_pkey_index = qp->s_alt_pkey_index;
1387 attr->en_sqd_async_notify = 0;
1388 attr->sq_draining = qp->s_draining;
1389 attr->max_rd_atomic = qp->s_max_rd_atomic;
1390 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
1391 attr->min_rnr_timer = qp->r_min_rnr_timer;
1392 attr->port_num = qp->port_num;
1393 attr->timeout = qp->timeout;
1394 attr->retry_cnt = qp->s_retry_cnt;
1395 attr->rnr_retry = qp->s_rnr_retry_cnt;
1396 attr->alt_port_num = qp->alt_ah_attr.port_num;
1397 attr->alt_timeout = qp->alt_timeout;
1398
1399 init_attr->event_handler = qp->ibqp.event_handler;
1400 init_attr->qp_context = qp->ibqp.qp_context;
1401 init_attr->send_cq = qp->ibqp.send_cq;
1402 init_attr->recv_cq = qp->ibqp.recv_cq;
1403 init_attr->srq = qp->ibqp.srq;
1404 init_attr->cap = attr->cap;
1405 if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
1406 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
1407 else
1408 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
1409 init_attr->qp_type = qp->ibqp.qp_type;
1410 init_attr->port_num = qp->port_num;
1411 return 0;
b518d3e6 1412}
8cf4020b
DD
1413
1414/**
1415 * rvt_post_receive - post a receive on a QP
1416 * @ibqp: the QP to post the receive on
1417 * @wr: the WR to post
1418 * @bad_wr: the first bad WR is put here
1419 *
1420 * This may be called from interrupt context.
90793f71
DD
1421 *
1422 * Return: 0 on success otherwise errno
8cf4020b
DD
1423 */
1424int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1425 struct ib_recv_wr **bad_wr)
1426{
120bdafa
DD
1427 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1428 struct rvt_rwq *wq = qp->r_rq.wq;
1429 unsigned long flags;
000a830e
AE
1430 int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
1431 !qp->ibqp.srq;
8cf4020b 1432
120bdafa
DD
1433 /* Check that state is OK to post receive. */
1434 if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
1435 *bad_wr = wr;
1436 return -EINVAL;
1437 }
1438
1439 for (; wr; wr = wr->next) {
1440 struct rvt_rwqe *wqe;
1441 u32 next;
1442 int i;
1443
1444 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
1445 *bad_wr = wr;
1446 return -EINVAL;
1447 }
1448
1449 spin_lock_irqsave(&qp->r_rq.lock, flags);
1450 next = wq->head + 1;
1451 if (next >= qp->r_rq.size)
1452 next = 0;
1453 if (next == wq->tail) {
1454 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1455 *bad_wr = wr;
1456 return -ENOMEM;
1457 }
000a830e
AE
1458 if (unlikely(qp_err_flush)) {
1459 struct ib_wc wc;
1460
1461 memset(&wc, 0, sizeof(wc));
1462 wc.qp = &qp->ibqp;
1463 wc.opcode = IB_WC_RECV;
1464 wc.wr_id = wr->wr_id;
1465 wc.status = IB_WC_WR_FLUSH_ERR;
1466 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1467 } else {
1468 wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
1469 wqe->wr_id = wr->wr_id;
1470 wqe->num_sge = wr->num_sge;
1471 for (i = 0; i < wr->num_sge; i++)
1472 wqe->sg_list[i] = wr->sg_list[i];
1473 /*
1474 * Make sure queue entry is written
1475 * before the head index.
1476 */
1477 smp_wmb();
1478 wq->head = next;
1479 }
120bdafa
DD
1480 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1481 }
1482 return 0;
8cf4020b
DD
1483}
1484
46a80d62 1485/**
afcf8f76
MM
1486 * rvt_qp_valid_operation - validate post send wr request
1487 * @qp - the qp
1488 * @post-parms - the post send table for the driver
1489 * @wr - the work request
46a80d62 1490 *
afcf8f76
MM
1491 * The routine validates the operation based on the
1492 * validation table an returns the length of the operation
1493 * which can extend beyond the ib_send_bw. Operation
1494 * dependent flags key atomic operation validation.
1495 *
1496 * There is an exception for UD qps that validates the pd and
1497 * overrides the length to include the additional UD specific
1498 * length.
1499 *
1500 * Returns a negative error or the length of the work request
1501 * for building the swqe.
1502 */
1503static inline int rvt_qp_valid_operation(
1504 struct rvt_qp *qp,
1505 const struct rvt_operation_params *post_parms,
1506 struct ib_send_wr *wr)
1507{
1508 int len;
1509
1510 if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length)
1511 return -EINVAL;
1512 if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type)))
1513 return -EINVAL;
1514 if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) &&
1515 ibpd_to_rvtpd(qp->ibqp.pd)->user)
1516 return -EINVAL;
1517 if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE &&
1518 (wr->num_sge == 0 ||
1519 wr->sg_list[0].length < sizeof(u64) ||
1520 wr->sg_list[0].addr & (sizeof(u64) - 1)))
1521 return -EINVAL;
1522 if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC &&
1523 !qp->s_max_rd_atomic)
1524 return -EINVAL;
1525 len = post_parms[wr->opcode].length;
1526 /* UD specific */
1527 if (qp->ibqp.qp_type != IB_QPT_UC &&
1528 qp->ibqp.qp_type != IB_QPT_RC) {
1529 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
1530 return -EINVAL;
1531 len = sizeof(struct ib_ud_wr);
1532 }
1533 return len;
1534}
1535
1536/**
856cc4c2 1537 * rvt_qp_is_avail - determine queue capacity
46a80d62 1538 * @qp - the qp
856cc4c2
MM
1539 * @rdi - the rdmavt device
1540 * @reserved_op - is reserved operation
46a80d62
MM
1541 *
1542 * This assumes the s_hlock is held but the s_last
1543 * qp variable is uncontrolled.
afcf8f76 1544 *
856cc4c2
MM
1545 * For non reserved operations, the qp->s_avail
1546 * may be changed.
1547 *
1548 * The return value is zero or a -ENOMEM.
46a80d62 1549 */
856cc4c2
MM
1550static inline int rvt_qp_is_avail(
1551 struct rvt_qp *qp,
1552 struct rvt_dev_info *rdi,
1553 bool reserved_op)
46a80d62
MM
1554{
1555 u32 slast;
856cc4c2
MM
1556 u32 avail;
1557 u32 reserved_used;
1558
1559 /* see rvt_qp_wqe_unreserve() */
1560 smp_mb__before_atomic();
1561 reserved_used = atomic_read(&qp->s_reserved_used);
1562 if (unlikely(reserved_op)) {
1563 /* see rvt_qp_wqe_unreserve() */
1564 smp_mb__before_atomic();
1565 if (reserved_used >= rdi->dparms.reserved_operations)
1566 return -ENOMEM;
1567 return 0;
1568 }
1569 /* non-reserved operations */
1570 if (likely(qp->s_avail))
1571 return 0;
46a80d62
MM
1572 smp_read_barrier_depends(); /* see rc.c */
1573 slast = ACCESS_ONCE(qp->s_last);
1574 if (qp->s_head >= slast)
856cc4c2 1575 avail = qp->s_size - (qp->s_head - slast);
46a80d62 1576 else
856cc4c2
MM
1577 avail = slast - qp->s_head;
1578
1579 /* see rvt_qp_wqe_unreserve() */
1580 smp_mb__before_atomic();
1581 reserved_used = atomic_read(&qp->s_reserved_used);
1582 avail = avail - 1 -
1583 (rdi->dparms.reserved_operations - reserved_used);
1584 /* insure we don't assign a negative s_avail */
1585 if ((s32)avail <= 0)
1586 return -ENOMEM;
1587 qp->s_avail = avail;
1588 if (WARN_ON(qp->s_avail >
1589 (qp->s_size - 1 - rdi->dparms.reserved_operations)))
1590 rvt_pr_err(rdi,
1591 "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
1592 qp->ibqp.qp_num, qp->s_size, qp->s_avail,
1593 qp->s_head, qp->s_tail, qp->s_cur,
1594 qp->s_acked, qp->s_last);
1595 return 0;
46a80d62
MM
1596}
1597
bfbac097
DD
1598/**
1599 * rvt_post_one_wr - post one RC, UC, or UD send work request
1600 * @qp: the QP to post on
1601 * @wr: the work request to send
1602 */
91702b4a
MM
1603static int rvt_post_one_wr(struct rvt_qp *qp,
1604 struct ib_send_wr *wr,
1605 int *call_send)
bfbac097
DD
1606{
1607 struct rvt_swqe *wqe;
1608 u32 next;
1609 int i;
1610 int j;
1611 int acc;
1612 struct rvt_lkey_table *rkt;
1613 struct rvt_pd *pd;
1614 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
46a80d62
MM
1615 u8 log_pmtu;
1616 int ret;
2821c509 1617 size_t cplen;
856cc4c2 1618 bool reserved_op;
d9b13c20 1619 int local_ops_delayed = 0;
bfbac097 1620
afcf8f76
MM
1621 BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
1622
bfbac097
DD
1623 /* IB spec says that num_sge == 0 is OK. */
1624 if (unlikely(wr->num_sge > qp->s_max_sge))
1625 return -EINVAL;
1626
2821c509
MM
1627 ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
1628 if (ret < 0)
1629 return ret;
1630 cplen = ret;
1631
d9f87239 1632 /*
d9b13c20
JX
1633 * Local operations include fast register and local invalidate.
1634 * Fast register needs to be processed immediately because the
1635 * registered lkey may be used by following work requests and the
1636 * lkey needs to be valid at the time those requests are posted.
1637 * Local invalidate can be processed immediately if fencing is
1638 * not required and no previous local invalidate ops are pending.
1639 * Signaled local operations that have been processed immediately
1640 * need to have requests with "completion only" flags set posted
1641 * to the send queue in order to generate completions.
d9f87239 1642 */
d9b13c20 1643 if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) {
d9f87239
JX
1644 switch (wr->opcode) {
1645 case IB_WR_REG_MR:
d9b13c20
JX
1646 ret = rvt_fast_reg_mr(qp,
1647 reg_wr(wr)->mr,
1648 reg_wr(wr)->key,
1649 reg_wr(wr)->access);
1650 if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
1651 return ret;
1652 break;
d9f87239 1653 case IB_WR_LOCAL_INV:
d9b13c20
JX
1654 if ((wr->send_flags & IB_SEND_FENCE) ||
1655 atomic_read(&qp->local_ops_pending)) {
1656 local_ops_delayed = 1;
1657 } else {
1658 ret = rvt_invalidate_rkey(
1659 qp, wr->ex.invalidate_rkey);
1660 if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
1661 return ret;
1662 }
1663 break;
d9f87239
JX
1664 default:
1665 return -EINVAL;
1666 }
1667 }
1668
856cc4c2
MM
1669 reserved_op = rdi->post_parms[wr->opcode].flags &
1670 RVT_OPERATION_USE_RESERVE;
46a80d62 1671 /* check for avail */
856cc4c2
MM
1672 ret = rvt_qp_is_avail(qp, rdi, reserved_op);
1673 if (ret)
1674 return ret;
bfbac097
DD
1675 next = qp->s_head + 1;
1676 if (next >= qp->s_size)
1677 next = 0;
60c30f57 1678
bfbac097
DD
1679 rkt = &rdi->lkey_table;
1680 pd = ibpd_to_rvtpd(qp->ibqp.pd);
1681 wqe = rvt_get_swqe_ptr(qp, qp->s_head);
1682
2821c509
MM
1683 /* cplen has length from above */
1684 memcpy(&wqe->wr, wr, cplen);
bfbac097
DD
1685
1686 wqe->length = 0;
1687 j = 0;
1688 if (wr->num_sge) {
1689 acc = wr->opcode >= IB_WR_RDMA_READ ?
1690 IB_ACCESS_LOCAL_WRITE : 0;
1691 for (i = 0; i < wr->num_sge; i++) {
1692 u32 length = wr->sg_list[i].length;
1693 int ok;
1694
1695 if (length == 0)
1696 continue;
1697 ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j],
1698 &wr->sg_list[i], acc);
46a80d62
MM
1699 if (!ok) {
1700 ret = -EINVAL;
bfbac097 1701 goto bail_inval_free;
46a80d62 1702 }
bfbac097
DD
1703 wqe->length += length;
1704 j++;
1705 }
1706 wqe->wr.num_sge = j;
1707 }
46a80d62
MM
1708
1709 /* general part of wqe valid - allow for driver checks */
1710 if (rdi->driver_f.check_send_wqe) {
1711 ret = rdi->driver_f.check_send_wqe(qp, wqe);
91702b4a 1712 if (ret < 0)
bfbac097 1713 goto bail_inval_free;
91702b4a
MM
1714 if (ret)
1715 *call_send = ret;
46a80d62
MM
1716 }
1717
1718 log_pmtu = qp->log_pmtu;
1719 if (qp->ibqp.qp_type != IB_QPT_UC &&
1720 qp->ibqp.qp_type != IB_QPT_RC) {
1721 struct rvt_ah *ah = ibah_to_rvtah(wqe->ud_wr.ah);
1722
1723 log_pmtu = ah->log_pmtu;
bfbac097
DD
1724 atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
1725 }
46a80d62 1726
d9f87239 1727 if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) {
d9b13c20
JX
1728 if (local_ops_delayed)
1729 atomic_inc(&qp->local_ops_pending);
1730 else
1731 wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY;
d9f87239
JX
1732 wqe->ssn = 0;
1733 wqe->psn = 0;
1734 wqe->lpsn = 0;
1735 } else {
1736 wqe->ssn = qp->s_ssn++;
1737 wqe->psn = qp->s_next_psn;
1738 wqe->lpsn = wqe->psn +
1739 (wqe->length ?
1740 ((wqe->length - 1) >> log_pmtu) :
1741 0);
1742 qp->s_next_psn = wqe->lpsn + 1;
1743 }
e16689e4 1744 trace_rvt_post_one_wr(qp, wqe);
856cc4c2
MM
1745 if (unlikely(reserved_op))
1746 rvt_qp_wqe_reserve(qp, wqe);
1747 else
1748 qp->s_avail--;
46a80d62 1749 smp_wmb(); /* see request builders */
bfbac097
DD
1750 qp->s_head = next;
1751
1752 return 0;
1753
1754bail_inval_free:
1755 /* release mr holds */
1756 while (j) {
1757 struct rvt_sge *sge = &wqe->sg_list[--j];
1758
1759 rvt_put_mr(sge->mr);
1760 }
46a80d62 1761 return ret;
bfbac097
DD
1762}
1763
8cf4020b
DD
1764/**
1765 * rvt_post_send - post a send on a QP
1766 * @ibqp: the QP to post the send on
1767 * @wr: the list of work requests to post
1768 * @bad_wr: the first bad WR is put here
1769 *
1770 * This may be called from interrupt context.
90793f71
DD
1771 *
1772 * Return: 0 on success else errno
8cf4020b
DD
1773 */
1774int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1775 struct ib_send_wr **bad_wr)
1776{
bfbac097
DD
1777 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1778 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1779 unsigned long flags = 0;
1780 int call_send;
1781 unsigned nreq = 0;
1782 int err = 0;
1783
46a80d62 1784 spin_lock_irqsave(&qp->s_hlock, flags);
bfbac097 1785
8cf4020b 1786 /*
bfbac097
DD
1787 * Ensure QP state is such that we can send. If not bail out early,
1788 * there is no need to do this every time we post a send.
8cf4020b 1789 */
bfbac097 1790 if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
46a80d62 1791 spin_unlock_irqrestore(&qp->s_hlock, flags);
bfbac097
DD
1792 return -EINVAL;
1793 }
8cf4020b 1794
bfbac097
DD
1795 /*
1796 * If the send queue is empty, and we only have a single WR then just go
1797 * ahead and kick the send engine into gear. Otherwise we will always
1798 * just schedule the send to happen later.
1799 */
1800 call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next;
1801
1802 for (; wr; wr = wr->next) {
91702b4a 1803 err = rvt_post_one_wr(qp, wr, &call_send);
bfbac097
DD
1804 if (unlikely(err)) {
1805 *bad_wr = wr;
1806 goto bail;
1807 }
1808 nreq++;
1809 }
1810bail:
46a80d62
MM
1811 spin_unlock_irqrestore(&qp->s_hlock, flags);
1812 if (nreq) {
1813 if (call_send)
46a80d62 1814 rdi->driver_f.do_send(qp);
e6d2e017
JJ
1815 else
1816 rdi->driver_f.schedule_send_no_lock(qp);
46a80d62 1817 }
bfbac097 1818 return err;
8cf4020b
DD
1819}
1820
1821/**
1822 * rvt_post_srq_receive - post a receive on a shared receive queue
1823 * @ibsrq: the SRQ to post the receive on
1824 * @wr: the list of work requests to post
1825 * @bad_wr: A pointer to the first WR to cause a problem is put here
1826 *
1827 * This may be called from interrupt context.
90793f71
DD
1828 *
1829 * Return: 0 on success else errno
8cf4020b
DD
1830 */
1831int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
1832 struct ib_recv_wr **bad_wr)
1833{
b8f881b9
JJ
1834 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
1835 struct rvt_rwq *wq;
1836 unsigned long flags;
1837
1838 for (; wr; wr = wr->next) {
1839 struct rvt_rwqe *wqe;
1840 u32 next;
1841 int i;
1842
1843 if ((unsigned)wr->num_sge > srq->rq.max_sge) {
1844 *bad_wr = wr;
1845 return -EINVAL;
1846 }
1847
1848 spin_lock_irqsave(&srq->rq.lock, flags);
1849 wq = srq->rq.wq;
1850 next = wq->head + 1;
1851 if (next >= srq->rq.size)
1852 next = 0;
1853 if (next == wq->tail) {
1854 spin_unlock_irqrestore(&srq->rq.lock, flags);
1855 *bad_wr = wr;
1856 return -ENOMEM;
1857 }
1858
1859 wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
1860 wqe->wr_id = wr->wr_id;
1861 wqe->num_sge = wr->num_sge;
1862 for (i = 0; i < wr->num_sge; i++)
1863 wqe->sg_list[i] = wr->sg_list[i];
1864 /* Make sure queue entry is written before the head index. */
1865 smp_wmb();
1866 wq->head = next;
1867 spin_unlock_irqrestore(&srq->rq.lock, flags);
1868 }
1869 return 0;
8cf4020b 1870}