]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/infiniband/hw/qib/qib_qp.c
IB/qib: Use rdmavt send and receive flags
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / hw / qib / qib_qp.c
CommitLineData
f931551b 1/*
f7cf9a61 2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
1fb9fed6 3 * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved.
f931551b
RC
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/err.h>
36#include <linux/vmalloc.h>
af061a64 37#include <linux/jhash.h>
869a2a96 38#include <rdma/rdma_vt.h>
1dd173b0
MM
39#ifdef CONFIG_DEBUG_FS
40#include <linux/seq_file.h>
41#endif
f931551b
RC
42
43#include "qib.h"
44
898fa52b
HC
45/*
46 * mask field which was present in now deleted qib_qpn_table
47 * is not present in rvt_qpn_table. Defining the same field
48 * as qpt_mask here instead of adding the mask field to
49 * rvt_qpn_table.
50 */
51static u16 qpt_mask;
f931551b 52
898fa52b
HC
53static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
54 struct rvt_qpn_map *map, unsigned off)
f931551b 55{
898fa52b 56 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
f931551b
RC
57}
58
898fa52b
HC
59static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
60 struct rvt_qpn_map *map, unsigned off,
2528ea60 61 unsigned n)
f931551b 62{
898fa52b 63 if (qpt_mask) {
f931551b 64 off++;
898fa52b
HC
65 if (((off & qpt_mask) >> 1) >= n)
66 off = (off | qpt_mask) + 2;
67 } else {
68 off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
69 }
f931551b
RC
70 return off;
71}
72
73/*
74 * Convert the AETH credit code into the number of credits.
75 */
76static u32 credit_table[31] = {
77 0, /* 0 */
78 1, /* 1 */
79 2, /* 2 */
80 3, /* 3 */
81 4, /* 4 */
82 6, /* 5 */
83 8, /* 6 */
84 12, /* 7 */
85 16, /* 8 */
86 24, /* 9 */
87 32, /* A */
88 48, /* B */
89 64, /* C */
90 96, /* D */
91 128, /* E */
92 192, /* F */
93 256, /* 10 */
94 384, /* 11 */
95 512, /* 12 */
96 768, /* 13 */
97 1024, /* 14 */
98 1536, /* 15 */
99 2048, /* 16 */
100 3072, /* 17 */
101 4096, /* 18 */
102 6144, /* 19 */
103 8192, /* 1A */
104 12288, /* 1B */
105 16384, /* 1C */
106 24576, /* 1D */
107 32768 /* 1E */
108};
109
898fa52b 110static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
fbbeb863 111 gfp_t gfp)
f931551b 112{
fbbeb863 113 unsigned long page = get_zeroed_page(gfp);
f931551b
RC
114
115 /*
116 * Free the page if someone raced with us installing it.
117 */
118
119 spin_lock(&qpt->lock);
120 if (map->page)
121 free_page(page);
122 else
123 map->page = (void *)page;
124 spin_unlock(&qpt->lock);
125}
126
127/*
128 * Allocate the next available QPN or
129 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
130 */
898fa52b 131static int alloc_qpn(struct qib_devdata *dd, struct rvt_qpn_table *qpt,
fbbeb863 132 enum ib_qp_type type, u8 port, gfp_t gfp)
f931551b
RC
133{
134 u32 i, offset, max_scan, qpn;
898fa52b 135 struct rvt_qpn_map *map;
f931551b 136 u32 ret;
f931551b
RC
137
138 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
139 unsigned n;
140
141 ret = type == IB_QPT_GSI;
142 n = 1 << (ret + 2 * (port - 1));
143 spin_lock(&qpt->lock);
144 if (qpt->flags & n)
145 ret = -EINVAL;
146 else
147 qpt->flags |= n;
148 spin_unlock(&qpt->lock);
149 goto bail;
150 }
151
7c3edd3f 152 qpn = qpt->last + 2;
898fa52b 153 if (qpn >= RVT_QPN_MAX)
f931551b 154 qpn = 2;
898fa52b
HC
155 if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues)
156 qpn = (qpn | qpt_mask) + 2;
157 offset = qpn & RVT_BITS_PER_PAGE_MASK;
158 map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
f931551b
RC
159 max_scan = qpt->nmaps - !offset;
160 for (i = 0;;) {
161 if (unlikely(!map->page)) {
fbbeb863 162 get_map_page(qpt, map, gfp);
f931551b
RC
163 if (unlikely(!map->page))
164 break;
165 }
166 do {
167 if (!test_and_set_bit(offset, map->page)) {
168 qpt->last = qpn;
169 ret = qpn;
170 goto bail;
171 }
2528ea60
MM
172 offset = find_next_offset(qpt, map, offset,
173 dd->n_krcv_queues);
f931551b
RC
174 qpn = mk_qpn(qpt, map, offset);
175 /*
176 * This test differs from alloc_pidmap().
177 * If find_next_offset() does find a zero
178 * bit, we don't need to check for QPN
179 * wrapping around past our starting QPN.
180 * We just need to be sure we don't loop
181 * forever.
182 */
898fa52b 183 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
f931551b
RC
184 /*
185 * In order to keep the number of pages allocated to a
186 * minimum, we scan the all existing pages before increasing
187 * the size of the bitmap table.
188 */
189 if (++i > max_scan) {
898fa52b 190 if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
f931551b
RC
191 break;
192 map = &qpt->map[qpt->nmaps++];
2528ea60 193 offset = 0;
f931551b
RC
194 } else if (map < &qpt->map[qpt->nmaps]) {
195 ++map;
2528ea60 196 offset = 0;
f931551b
RC
197 } else {
198 map = &qpt->map[0];
2528ea60 199 offset = 2;
f931551b
RC
200 }
201 qpn = mk_qpn(qpt, map, offset);
202 }
203
204 ret = -ENOMEM;
205
206bail:
207 return ret;
208}
209
898fa52b 210static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
f931551b 211{
898fa52b 212 struct rvt_qpn_map *map;
f931551b 213
898fa52b 214 map = qpt->map + qpn / RVT_BITS_PER_PAGE;
f931551b 215 if (map->page)
898fa52b 216 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
f931551b
RC
217}
218
af061a64
MM
219static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
220{
221 return jhash_1word(qpn, dev->qp_rnd) &
898fa52b 222 (dev->rdi.qp_dev->qp_table_size - 1);
af061a64
MM
223}
224
225
f931551b
RC
226/*
227 * Put the QP into the hash table.
228 * The hash table holds a reference to the QP.
229 */
7c2e11fe 230static void insert_qp(struct qib_ibdev *dev, struct rvt_qp *qp)
f931551b
RC
231{
232 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
f931551b 233 unsigned long flags;
af061a64 234 unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
f931551b 235
af061a64 236 atomic_inc(&qp->refcount);
898fa52b 237 spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags);
f931551b
RC
238
239 if (qp->ibqp.qp_num == 0)
f24a6d48 240 rcu_assign_pointer(ibp->rvp.qp[0], qp);
f931551b 241 else if (qp->ibqp.qp_num == 1)
f24a6d48 242 rcu_assign_pointer(ibp->rvp.qp[1], qp);
f931551b 243 else {
898fa52b
HC
244 qp->next = dev->rdi.qp_dev->qp_table[n];
245 rcu_assign_pointer(dev->rdi.qp_dev->qp_table[n], qp);
f931551b 246 }
f931551b 247
898fa52b 248 spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags);
f931551b
RC
249}
250
251/*
252 * Remove the QP from the table so it can't be found asynchronously by
253 * the receive interrupt routine.
254 */
7c2e11fe 255static void remove_qp(struct qib_ibdev *dev, struct rvt_qp *qp)
f931551b
RC
256{
257 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
af061a64 258 unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
f931551b 259 unsigned long flags;
f7cf9a61 260 int removed = 1;
898fa52b 261 spinlock_t *qpt_lock_ptr; /* Pointer to make checkpatch happy */
f931551b 262
898fa52b 263 spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags);
f931551b 264
898fa52b 265 qpt_lock_ptr = &dev->rdi.qp_dev->qpt_lock;
f24a6d48 266 if (rcu_dereference_protected(ibp->rvp.qp[0],
898fa52b 267 lockdep_is_held(qpt_lock_ptr)) == qp) {
f24a6d48
HC
268 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
269 } else if (rcu_dereference_protected(ibp->rvp.qp[1],
898fa52b 270 lockdep_is_held(&dev->rdi.qp_dev->qpt_lock)) == qp) {
f24a6d48 271 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
af061a64 272 } else {
7c2e11fe
DD
273 struct rvt_qp *q;
274 struct rvt_qp __rcu **qpp;
af061a64 275
f7cf9a61 276 removed = 0;
898fa52b 277 qpp = &dev->rdi.qp_dev->qp_table[n];
d359f354 278 for (; (q = rcu_dereference_protected(*qpp,
898fa52b 279 lockdep_is_held(qpt_lock_ptr))) != NULL;
d359f354 280 qpp = &q->next)
f931551b 281 if (q == qp) {
03c88591 282 RCU_INIT_POINTER(*qpp,
bcc9b67a 283 rcu_dereference_protected(qp->next,
898fa52b 284 lockdep_is_held(qpt_lock_ptr)));
f7cf9a61 285 removed = 1;
f931551b
RC
286 break;
287 }
af061a64 288 }
f931551b 289
898fa52b 290 spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags);
f7cf9a61
MM
291 if (removed) {
292 synchronize_rcu();
293 atomic_dec(&qp->refcount);
294 }
f931551b
RC
295}
296
297/**
298 * qib_free_all_qps - check for QPs still in use
299 * @qpt: the QP table to empty
300 *
301 * There should not be any QPs still in use.
302 * Free memory for table.
303 */
304unsigned qib_free_all_qps(struct qib_devdata *dd)
305{
306 struct qib_ibdev *dev = &dd->verbs_dev;
307 unsigned long flags;
7c2e11fe 308 struct rvt_qp *qp;
f931551b 309 unsigned n, qp_inuse = 0;
898fa52b 310 spinlock_t *qpt_lock_ptr; /* Pointer to make checkpatch happy */
f931551b
RC
311
312 for (n = 0; n < dd->num_pports; n++) {
313 struct qib_ibport *ibp = &dd->pport[n].ibport_data;
314
315 if (!qib_mcast_tree_empty(ibp))
316 qp_inuse++;
af061a64 317 rcu_read_lock();
f24a6d48 318 if (rcu_dereference(ibp->rvp.qp[0]))
f931551b 319 qp_inuse++;
f24a6d48 320 if (rcu_dereference(ibp->rvp.qp[1]))
f931551b 321 qp_inuse++;
af061a64 322 rcu_read_unlock();
f931551b
RC
323 }
324
898fa52b
HC
325 spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags);
326 qpt_lock_ptr = &dev->rdi.qp_dev->qpt_lock;
327 for (n = 0; n < dev->rdi.qp_dev->qp_table_size; n++) {
328 qp = rcu_dereference_protected(dev->rdi.qp_dev->qp_table[n],
329 lockdep_is_held(qpt_lock_ptr));
330 RCU_INIT_POINTER(dev->rdi.qp_dev->qp_table[n], NULL);
f931551b 331
1fb9fed6 332 for (; qp; qp = rcu_dereference_protected(qp->next,
898fa52b 333 lockdep_is_held(qpt_lock_ptr)))
f931551b
RC
334 qp_inuse++;
335 }
898fa52b 336 spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags);
af061a64 337 synchronize_rcu();
f931551b
RC
338
339 return qp_inuse;
340}
341
342/**
343 * qib_lookup_qpn - return the QP with the given QPN
344 * @qpt: the QP table
345 * @qpn: the QP number to look up
346 *
347 * The caller is responsible for decrementing the QP reference count
348 * when done.
349 */
7c2e11fe 350struct rvt_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
f931551b 351{
7c2e11fe 352 struct rvt_qp *qp = NULL;
f931551b 353
f7cf9a61 354 rcu_read_lock();
af061a64 355 if (unlikely(qpn <= 1)) {
af061a64 356 if (qpn == 0)
f24a6d48 357 qp = rcu_dereference(ibp->rvp.qp[0]);
af061a64 358 else
f24a6d48 359 qp = rcu_dereference(ibp->rvp.qp[1]);
f7cf9a61
MM
360 if (qp)
361 atomic_inc(&qp->refcount);
af061a64
MM
362 } else {
363 struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
364 unsigned n = qpn_hash(dev, qpn);
f931551b 365
898fa52b 366 for (qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]); qp;
1fb9fed6 367 qp = rcu_dereference(qp->next))
f7cf9a61
MM
368 if (qp->ibqp.qp_num == qpn) {
369 atomic_inc(&qp->refcount);
f931551b 370 break;
f7cf9a61 371 }
af061a64 372 }
af061a64 373 rcu_read_unlock();
f931551b
RC
374 return qp;
375}
376
377/**
378 * qib_reset_qp - initialize the QP state to the reset state
379 * @qp: the QP to reset
380 * @type: the QP type
381 */
7c2e11fe 382static void qib_reset_qp(struct rvt_qp *qp, enum ib_qp_type type)
f931551b 383{
ffc26907 384 struct qib_qp_priv *priv = qp->priv;
f931551b
RC
385 qp->remote_qpn = 0;
386 qp->qkey = 0;
387 qp->qp_access_flags = 0;
ffc26907 388 atomic_set(&priv->s_dma_busy, 0);
01ba79d4 389 qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
f931551b
RC
390 qp->s_hdrwords = 0;
391 qp->s_wqe = NULL;
392 qp->s_draining = 0;
393 qp->s_next_psn = 0;
394 qp->s_last_psn = 0;
395 qp->s_sending_psn = 0;
396 qp->s_sending_hpsn = 0;
397 qp->s_psn = 0;
398 qp->r_psn = 0;
399 qp->r_msn = 0;
400 if (type == IB_QPT_RC) {
401 qp->s_state = IB_OPCODE_RC_SEND_LAST;
402 qp->r_state = IB_OPCODE_RC_SEND_LAST;
403 } else {
404 qp->s_state = IB_OPCODE_UC_SEND_LAST;
405 qp->r_state = IB_OPCODE_UC_SEND_LAST;
406 }
407 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
408 qp->r_nak_state = 0;
409 qp->r_aflags = 0;
410 qp->r_flags = 0;
411 qp->s_head = 0;
412 qp->s_tail = 0;
413 qp->s_cur = 0;
414 qp->s_acked = 0;
415 qp->s_last = 0;
416 qp->s_ssn = 1;
417 qp->s_lsn = 0;
418 qp->s_mig_state = IB_MIG_MIGRATED;
419 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
420 qp->r_head_ack_queue = 0;
421 qp->s_tail_ack_queue = 0;
422 qp->s_num_rd_atomic = 0;
423 if (qp->r_rq.wq) {
424 qp->r_rq.wq->head = 0;
425 qp->r_rq.wq->tail = 0;
426 }
427 qp->r_sge.num_sge = 0;
428}
429
7c2e11fe 430static void clear_mr_refs(struct rvt_qp *qp, int clr_sends)
f931551b
RC
431{
432 unsigned n;
433
01ba79d4 434 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
6a82649f 435 qib_put_ss(&qp->s_rdma_read_sge);
f931551b 436
6a82649f 437 qib_put_ss(&qp->r_sge);
f931551b
RC
438
439 if (clr_sends) {
440 while (qp->s_last != qp->s_head) {
7c2e11fe 441 struct rvt_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
f931551b
RC
442 unsigned i;
443
444 for (i = 0; i < wqe->wr.num_sge; i++) {
7c2e11fe 445 struct rvt_sge *sge = &wqe->sg_list[i];
f931551b 446
7c2e11fe 447 rvt_put_mr(sge->mr);
f931551b
RC
448 }
449 if (qp->ibqp.qp_type == IB_QPT_UD ||
450 qp->ibqp.qp_type == IB_QPT_SMI ||
451 qp->ibqp.qp_type == IB_QPT_GSI)
96ab1ac1
DD
452 atomic_dec(
453 &ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
f931551b
RC
454 if (++qp->s_last >= qp->s_size)
455 qp->s_last = 0;
456 }
457 if (qp->s_rdma_mr) {
7c2e11fe 458 rvt_put_mr(qp->s_rdma_mr);
f931551b
RC
459 qp->s_rdma_mr = NULL;
460 }
461 }
462
463 if (qp->ibqp.qp_type != IB_QPT_RC)
464 return;
465
466 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
7c2e11fe 467 struct rvt_ack_entry *e = &qp->s_ack_queue[n];
f931551b
RC
468
469 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
470 e->rdma_sge.mr) {
7c2e11fe 471 rvt_put_mr(e->rdma_sge.mr);
f931551b
RC
472 e->rdma_sge.mr = NULL;
473 }
474 }
475}
476
477/**
478 * qib_error_qp - put a QP into the error state
479 * @qp: the QP to put into the error state
480 * @err: the receive completion error to signal if a RWQE is active
481 *
482 * Flushes both send and receive work queues.
483 * Returns true if last WQE event should be generated.
a5210c12 484 * The QP r_lock and s_lock should be held and interrupts disabled.
f931551b
RC
485 * If we are already in error state, just return.
486 */
7c2e11fe 487int qib_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
f931551b 488{
ffc26907 489 struct qib_qp_priv *priv = qp->priv;
f931551b
RC
490 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
491 struct ib_wc wc;
492 int ret = 0;
493
494 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
495 goto bail;
496
497 qp->state = IB_QPS_ERR;
498
01ba79d4
HC
499 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
500 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
f931551b
RC
501 del_timer(&qp->s_timer);
502 }
16028f27 503
01ba79d4
HC
504 if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
505 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
16028f27 506
cd18201f 507 spin_lock(&dev->rdi.pending_lock);
01ba79d4
HC
508 if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) {
509 qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
ffc26907 510 list_del_init(&priv->iowait);
f931551b 511 }
cd18201f 512 spin_unlock(&dev->rdi.pending_lock);
f931551b 513
01ba79d4 514 if (!(qp->s_flags & RVT_S_BUSY)) {
f931551b
RC
515 qp->s_hdrwords = 0;
516 if (qp->s_rdma_mr) {
7c2e11fe 517 rvt_put_mr(qp->s_rdma_mr);
f931551b
RC
518 qp->s_rdma_mr = NULL;
519 }
ffc26907
DD
520 if (priv->s_tx) {
521 qib_put_txreq(priv->s_tx);
522 priv->s_tx = NULL;
f931551b
RC
523 }
524 }
525
526 /* Schedule the sending tasklet to drain the send work queue. */
527 if (qp->s_last != qp->s_head)
528 qib_schedule_send(qp);
529
530 clear_mr_refs(qp, 0);
531
532 memset(&wc, 0, sizeof(wc));
533 wc.qp = &qp->ibqp;
534 wc.opcode = IB_WC_RECV;
535
01ba79d4 536 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
f931551b
RC
537 wc.wr_id = qp->r_wr_id;
538 wc.status = err;
539 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
540 }
541 wc.status = IB_WC_WR_FLUSH_ERR;
542
543 if (qp->r_rq.wq) {
7c2e11fe 544 struct rvt_rwq *wq;
f931551b
RC
545 u32 head;
546 u32 tail;
547
548 spin_lock(&qp->r_rq.lock);
549
550 /* sanity check pointers before trusting them */
551 wq = qp->r_rq.wq;
552 head = wq->head;
553 if (head >= qp->r_rq.size)
554 head = 0;
555 tail = wq->tail;
556 if (tail >= qp->r_rq.size)
557 tail = 0;
558 while (tail != head) {
559 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
560 if (++tail >= qp->r_rq.size)
561 tail = 0;
562 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
563 }
564 wq->tail = tail;
565
566 spin_unlock(&qp->r_rq.lock);
567 } else if (qp->ibqp.event_handler)
568 ret = 1;
569
570bail:
571 return ret;
572}
573
574/**
575 * qib_modify_qp - modify the attributes of a queue pair
576 * @ibqp: the queue pair who's attributes we're modifying
577 * @attr: the new attributes
578 * @attr_mask: the mask of attributes to modify
579 * @udata: user data for libibverbs.so
580 *
581 * Returns 0 on success, otherwise returns an errno.
582 */
583int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
584 int attr_mask, struct ib_udata *udata)
585{
586 struct qib_ibdev *dev = to_idev(ibqp->device);
7c2e11fe 587 struct rvt_qp *qp = to_iqp(ibqp);
ffc26907 588 struct qib_qp_priv *priv = qp->priv;
f931551b
RC
589 enum ib_qp_state cur_state, new_state;
590 struct ib_event ev;
591 int lastwqe = 0;
592 int mig = 0;
593 int ret;
594 u32 pmtu = 0; /* for gcc warning only */
595
596 spin_lock_irq(&qp->r_lock);
597 spin_lock(&qp->s_lock);
598
599 cur_state = attr_mask & IB_QP_CUR_STATE ?
600 attr->cur_qp_state : qp->state;
601 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
602
603 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
dd5f03be 604 attr_mask, IB_LINK_LAYER_UNSPECIFIED))
f931551b
RC
605 goto inval;
606
607 if (attr_mask & IB_QP_AV) {
9ff198f5 608 if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
f931551b 609 goto inval;
96ab1ac1 610 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
f931551b
RC
611 goto inval;
612 }
613
614 if (attr_mask & IB_QP_ALT_PATH) {
9ff198f5
DD
615 if (attr->alt_ah_attr.dlid >=
616 be16_to_cpu(IB_MULTICAST_LID_BASE))
f931551b 617 goto inval;
96ab1ac1 618 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
f931551b
RC
619 goto inval;
620 if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
621 goto inval;
622 }
623
624 if (attr_mask & IB_QP_PKEY_INDEX)
625 if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
626 goto inval;
627
628 if (attr_mask & IB_QP_MIN_RNR_TIMER)
629 if (attr->min_rnr_timer > 31)
630 goto inval;
631
632 if (attr_mask & IB_QP_PORT)
633 if (qp->ibqp.qp_type == IB_QPT_SMI ||
634 qp->ibqp.qp_type == IB_QPT_GSI ||
635 attr->port_num == 0 ||
636 attr->port_num > ibqp->device->phys_port_cnt)
637 goto inval;
638
639 if (attr_mask & IB_QP_DEST_QPN)
640 if (attr->dest_qp_num > QIB_QPN_MASK)
641 goto inval;
642
643 if (attr_mask & IB_QP_RETRY_CNT)
644 if (attr->retry_cnt > 7)
645 goto inval;
646
647 if (attr_mask & IB_QP_RNR_RETRY)
648 if (attr->rnr_retry > 7)
649 goto inval;
650
651 /*
652 * Don't allow invalid path_mtu values. OK to set greater
653 * than the active mtu (or even the max_cap, if we have tuned
654 * that to a small mtu. We'll set qp->path_mtu
655 * to the lesser of requested attribute mtu and active,
656 * for packetizing messages.
657 * Note that the QP port has to be set in INIT and MTU in RTR.
658 */
659 if (attr_mask & IB_QP_PATH_MTU) {
660 struct qib_devdata *dd = dd_from_dev(dev);
661 int mtu, pidx = qp->port_num - 1;
662
663 mtu = ib_mtu_enum_to_int(attr->path_mtu);
664 if (mtu == -1)
665 goto inval;
666 if (mtu > dd->pport[pidx].ibmtu) {
667 switch (dd->pport[pidx].ibmtu) {
668 case 4096:
669 pmtu = IB_MTU_4096;
670 break;
671 case 2048:
672 pmtu = IB_MTU_2048;
673 break;
674 case 1024:
675 pmtu = IB_MTU_1024;
676 break;
677 case 512:
678 pmtu = IB_MTU_512;
679 break;
680 case 256:
681 pmtu = IB_MTU_256;
682 break;
683 default:
684 pmtu = IB_MTU_2048;
685 }
686 } else
687 pmtu = attr->path_mtu;
688 }
689
690 if (attr_mask & IB_QP_PATH_MIG_STATE) {
691 if (attr->path_mig_state == IB_MIG_REARM) {
692 if (qp->s_mig_state == IB_MIG_ARMED)
693 goto inval;
694 if (new_state != IB_QPS_RTS)
695 goto inval;
696 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
697 if (qp->s_mig_state == IB_MIG_REARM)
698 goto inval;
699 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
700 goto inval;
701 if (qp->s_mig_state == IB_MIG_ARMED)
702 mig = 1;
703 } else
704 goto inval;
705 }
706
707 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
708 if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
709 goto inval;
710
711 switch (new_state) {
712 case IB_QPS_RESET:
713 if (qp->state != IB_QPS_RESET) {
714 qp->state = IB_QPS_RESET;
cd18201f 715 spin_lock(&dev->rdi.pending_lock);
ffc26907
DD
716 if (!list_empty(&priv->iowait))
717 list_del_init(&priv->iowait);
cd18201f 718 spin_unlock(&dev->rdi.pending_lock);
01ba79d4 719 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
f931551b
RC
720 spin_unlock(&qp->s_lock);
721 spin_unlock_irq(&qp->r_lock);
722 /* Stop the sending work queue and retry timer */
ffc26907 723 cancel_work_sync(&priv->s_work);
f931551b 724 del_timer_sync(&qp->s_timer);
ffc26907
DD
725 wait_event(priv->wait_dma,
726 !atomic_read(&priv->s_dma_busy));
727 if (priv->s_tx) {
728 qib_put_txreq(priv->s_tx);
729 priv->s_tx = NULL;
f931551b
RC
730 }
731 remove_qp(dev, qp);
732 wait_event(qp->wait, !atomic_read(&qp->refcount));
733 spin_lock_irq(&qp->r_lock);
734 spin_lock(&qp->s_lock);
735 clear_mr_refs(qp, 1);
736 qib_reset_qp(qp, ibqp->qp_type);
737 }
738 break;
739
740 case IB_QPS_RTR:
741 /* Allow event to retrigger if QP set to RTR more than once */
01ba79d4 742 qp->r_flags &= ~RVT_R_COMM_EST;
f931551b
RC
743 qp->state = new_state;
744 break;
745
746 case IB_QPS_SQD:
747 qp->s_draining = qp->s_last != qp->s_cur;
748 qp->state = new_state;
749 break;
750
751 case IB_QPS_SQE:
752 if (qp->ibqp.qp_type == IB_QPT_RC)
753 goto inval;
754 qp->state = new_state;
755 break;
756
757 case IB_QPS_ERR:
758 lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
759 break;
760
761 default:
762 qp->state = new_state;
763 break;
764 }
765
766 if (attr_mask & IB_QP_PKEY_INDEX)
767 qp->s_pkey_index = attr->pkey_index;
768
769 if (attr_mask & IB_QP_PORT)
770 qp->port_num = attr->port_num;
771
772 if (attr_mask & IB_QP_DEST_QPN)
773 qp->remote_qpn = attr->dest_qp_num;
774
775 if (attr_mask & IB_QP_SQ_PSN) {
776 qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
777 qp->s_psn = qp->s_next_psn;
778 qp->s_sending_psn = qp->s_next_psn;
779 qp->s_last_psn = qp->s_next_psn - 1;
780 qp->s_sending_hpsn = qp->s_last_psn;
781 }
782
783 if (attr_mask & IB_QP_RQ_PSN)
784 qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
785
786 if (attr_mask & IB_QP_ACCESS_FLAGS)
787 qp->qp_access_flags = attr->qp_access_flags;
788
789 if (attr_mask & IB_QP_AV) {
790 qp->remote_ah_attr = attr->ah_attr;
791 qp->s_srate = attr->ah_attr.static_rate;
792 }
793
794 if (attr_mask & IB_QP_ALT_PATH) {
795 qp->alt_ah_attr = attr->alt_ah_attr;
796 qp->s_alt_pkey_index = attr->alt_pkey_index;
797 }
798
799 if (attr_mask & IB_QP_PATH_MIG_STATE) {
800 qp->s_mig_state = attr->path_mig_state;
801 if (mig) {
802 qp->remote_ah_attr = qp->alt_ah_attr;
803 qp->port_num = qp->alt_ah_attr.port_num;
804 qp->s_pkey_index = qp->s_alt_pkey_index;
805 }
806 }
807
cc6ea138 808 if (attr_mask & IB_QP_PATH_MTU) {
f931551b 809 qp->path_mtu = pmtu;
cc6ea138
MM
810 qp->pmtu = ib_mtu_enum_to_int(pmtu);
811 }
f931551b
RC
812
813 if (attr_mask & IB_QP_RETRY_CNT) {
814 qp->s_retry_cnt = attr->retry_cnt;
815 qp->s_retry = attr->retry_cnt;
816 }
817
818 if (attr_mask & IB_QP_RNR_RETRY) {
819 qp->s_rnr_retry_cnt = attr->rnr_retry;
820 qp->s_rnr_retry = attr->rnr_retry;
821 }
822
823 if (attr_mask & IB_QP_MIN_RNR_TIMER)
824 qp->r_min_rnr_timer = attr->min_rnr_timer;
825
d0f2faf7 826 if (attr_mask & IB_QP_TIMEOUT) {
f931551b 827 qp->timeout = attr->timeout;
d0f2faf7
MM
828 qp->timeout_jiffies =
829 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
830 1000UL);
831 }
f931551b
RC
832
833 if (attr_mask & IB_QP_QKEY)
834 qp->qkey = attr->qkey;
835
836 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
837 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
838
839 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
840 qp->s_max_rd_atomic = attr->max_rd_atomic;
841
842 spin_unlock(&qp->s_lock);
843 spin_unlock_irq(&qp->r_lock);
844
845 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
846 insert_qp(dev, qp);
847
848 if (lastwqe) {
849 ev.device = qp->ibqp.device;
850 ev.element.qp = &qp->ibqp;
851 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
852 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
853 }
854 if (mig) {
855 ev.device = qp->ibqp.device;
856 ev.element.qp = &qp->ibqp;
857 ev.event = IB_EVENT_PATH_MIG;
858 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
859 }
860 ret = 0;
861 goto bail;
862
863inval:
864 spin_unlock(&qp->s_lock);
865 spin_unlock_irq(&qp->r_lock);
866 ret = -EINVAL;
867
868bail:
869 return ret;
870}
871
872int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
873 int attr_mask, struct ib_qp_init_attr *init_attr)
874{
7c2e11fe 875 struct rvt_qp *qp = to_iqp(ibqp);
f931551b
RC
876
877 attr->qp_state = qp->state;
878 attr->cur_qp_state = attr->qp_state;
879 attr->path_mtu = qp->path_mtu;
880 attr->path_mig_state = qp->s_mig_state;
881 attr->qkey = qp->qkey;
882 attr->rq_psn = qp->r_psn & QIB_PSN_MASK;
883 attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK;
884 attr->dest_qp_num = qp->remote_qpn;
885 attr->qp_access_flags = qp->qp_access_flags;
886 attr->cap.max_send_wr = qp->s_size - 1;
887 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
888 attr->cap.max_send_sge = qp->s_max_sge;
889 attr->cap.max_recv_sge = qp->r_rq.max_sge;
890 attr->cap.max_inline_data = 0;
891 attr->ah_attr = qp->remote_ah_attr;
892 attr->alt_ah_attr = qp->alt_ah_attr;
893 attr->pkey_index = qp->s_pkey_index;
894 attr->alt_pkey_index = qp->s_alt_pkey_index;
895 attr->en_sqd_async_notify = 0;
896 attr->sq_draining = qp->s_draining;
897 attr->max_rd_atomic = qp->s_max_rd_atomic;
898 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
899 attr->min_rnr_timer = qp->r_min_rnr_timer;
900 attr->port_num = qp->port_num;
901 attr->timeout = qp->timeout;
902 attr->retry_cnt = qp->s_retry_cnt;
903 attr->rnr_retry = qp->s_rnr_retry_cnt;
904 attr->alt_port_num = qp->alt_ah_attr.port_num;
905 attr->alt_timeout = qp->alt_timeout;
906
907 init_attr->event_handler = qp->ibqp.event_handler;
908 init_attr->qp_context = qp->ibqp.qp_context;
909 init_attr->send_cq = qp->ibqp.send_cq;
910 init_attr->recv_cq = qp->ibqp.recv_cq;
911 init_attr->srq = qp->ibqp.srq;
912 init_attr->cap = attr->cap;
01ba79d4 913 if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
f931551b
RC
914 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
915 else
916 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
917 init_attr->qp_type = qp->ibqp.qp_type;
918 init_attr->port_num = qp->port_num;
919 return 0;
920}
921
922/**
923 * qib_compute_aeth - compute the AETH (syndrome + MSN)
924 * @qp: the queue pair to compute the AETH for
925 *
926 * Returns the AETH.
927 */
7c2e11fe 928__be32 qib_compute_aeth(struct rvt_qp *qp)
f931551b
RC
929{
930 u32 aeth = qp->r_msn & QIB_MSN_MASK;
931
932 if (qp->ibqp.srq) {
933 /*
934 * Shared receive queues don't generate credits.
935 * Set the credit field to the invalid value.
936 */
937 aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
938 } else {
939 u32 min, max, x;
940 u32 credits;
7c2e11fe 941 struct rvt_rwq *wq = qp->r_rq.wq;
f931551b
RC
942 u32 head;
943 u32 tail;
944
945 /* sanity check pointers before trusting them */
946 head = wq->head;
947 if (head >= qp->r_rq.size)
948 head = 0;
949 tail = wq->tail;
950 if (tail >= qp->r_rq.size)
951 tail = 0;
952 /*
953 * Compute the number of credits available (RWQEs).
954 * XXX Not holding the r_rq.lock here so there is a small
955 * chance that the pair of reads are not atomic.
956 */
957 credits = head - tail;
958 if ((int)credits < 0)
959 credits += qp->r_rq.size;
960 /*
961 * Binary search the credit table to find the code to
962 * use.
963 */
964 min = 0;
965 max = 31;
966 for (;;) {
967 x = (min + max) / 2;
968 if (credit_table[x] == credits)
969 break;
970 if (credit_table[x] > credits)
971 max = x;
972 else if (min == x)
973 break;
974 else
975 min = x;
976 }
977 aeth |= x << QIB_AETH_CREDIT_SHIFT;
978 }
979 return cpu_to_be32(aeth);
980}
981
982/**
983 * qib_create_qp - create a queue pair for a device
984 * @ibpd: the protection domain who's device we create the queue pair for
985 * @init_attr: the attributes of the queue pair
986 * @udata: user data for libibverbs.so
987 *
988 * Returns the queue pair on success, otherwise returns an errno.
989 *
990 * Called by the ib_create_qp() core verbs function.
991 */
992struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
993 struct ib_qp_init_attr *init_attr,
994 struct ib_udata *udata)
995{
7c2e11fe 996 struct rvt_qp *qp;
f931551b 997 int err;
7c2e11fe 998 struct rvt_swqe *swq = NULL;
f931551b
RC
999 struct qib_ibdev *dev;
1000 struct qib_devdata *dd;
1001 size_t sz;
1002 size_t sg_list_sz;
1003 struct ib_qp *ret;
fbbeb863 1004 gfp_t gfp;
ffc26907 1005 struct qib_qp_priv *priv;
f931551b
RC
1006
1007 if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
60093dc0 1008 init_attr->cap.max_send_wr > ib_qib_max_qp_wrs ||
fbbeb863
VA
1009 init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
1010 return ERR_PTR(-EINVAL);
1011
1012 /* GFP_NOIO is applicable in RC QPs only */
1013 if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
1014 init_attr->qp_type != IB_QPT_RC)
1015 return ERR_PTR(-EINVAL);
1016
1017 gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
1018 GFP_NOIO : GFP_KERNEL;
f931551b
RC
1019
1020 /* Check receive queue parameters if no SRQ is specified. */
1021 if (!init_attr->srq) {
1022 if (init_attr->cap.max_recv_sge > ib_qib_max_sges ||
1023 init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) {
1024 ret = ERR_PTR(-EINVAL);
1025 goto bail;
1026 }
1027 if (init_attr->cap.max_send_sge +
1028 init_attr->cap.max_send_wr +
1029 init_attr->cap.max_recv_sge +
1030 init_attr->cap.max_recv_wr == 0) {
1031 ret = ERR_PTR(-EINVAL);
1032 goto bail;
1033 }
1034 }
1035
1036 switch (init_attr->qp_type) {
1037 case IB_QPT_SMI:
1038 case IB_QPT_GSI:
1039 if (init_attr->port_num == 0 ||
1040 init_attr->port_num > ibpd->device->phys_port_cnt) {
1041 ret = ERR_PTR(-EINVAL);
1042 goto bail;
1043 }
1044 case IB_QPT_UC:
1045 case IB_QPT_RC:
1046 case IB_QPT_UD:
7c2e11fe 1047 sz = sizeof(struct rvt_sge) *
f931551b 1048 init_attr->cap.max_send_sge +
7c2e11fe 1049 sizeof(struct rvt_swqe);
fbbeb863
VA
1050 swq = __vmalloc((init_attr->cap.max_send_wr + 1) * sz,
1051 gfp, PAGE_KERNEL);
f931551b
RC
1052 if (swq == NULL) {
1053 ret = ERR_PTR(-ENOMEM);
1054 goto bail;
1055 }
1056 sz = sizeof(*qp);
1057 sg_list_sz = 0;
1058 if (init_attr->srq) {
894c727b 1059 struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
f931551b
RC
1060
1061 if (srq->rq.max_sge > 1)
1062 sg_list_sz = sizeof(*qp->r_sg_list) *
1063 (srq->rq.max_sge - 1);
1064 } else if (init_attr->cap.max_recv_sge > 1)
1065 sg_list_sz = sizeof(*qp->r_sg_list) *
1066 (init_attr->cap.max_recv_sge - 1);
fbbeb863 1067 qp = kzalloc(sz + sg_list_sz, gfp);
f931551b
RC
1068 if (!qp) {
1069 ret = ERR_PTR(-ENOMEM);
1070 goto bail_swq;
1071 }
af061a64 1072 RCU_INIT_POINTER(qp->next, NULL);
ffc26907
DD
1073 priv = kzalloc(sizeof(*priv), gfp);
1074 if (!priv) {
1075 ret = ERR_PTR(-ENOMEM);
1076 goto bail_qp_hdr;
1077 }
1078 priv->owner = qp;
1079 priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp);
1080 if (!priv->s_hdr) {
1c94283d
MM
1081 ret = ERR_PTR(-ENOMEM);
1082 goto bail_qp;
1083 }
ffc26907 1084 qp->priv = priv;
d0f2faf7
MM
1085 qp->timeout_jiffies =
1086 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1087 1000UL);
f931551b
RC
1088 if (init_attr->srq)
1089 sz = 0;
1090 else {
1091 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1092 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1093 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
7c2e11fe 1094 sizeof(struct rvt_rwqe);
fbbeb863
VA
1095 if (gfp != GFP_NOIO)
1096 qp->r_rq.wq = vmalloc_user(
7c2e11fe 1097 sizeof(struct rvt_rwq) +
fbbeb863
VA
1098 qp->r_rq.size * sz);
1099 else
1100 qp->r_rq.wq = __vmalloc(
7c2e11fe 1101 sizeof(struct rvt_rwq) +
fbbeb863
VA
1102 qp->r_rq.size * sz,
1103 gfp, PAGE_KERNEL);
1104
f931551b
RC
1105 if (!qp->r_rq.wq) {
1106 ret = ERR_PTR(-ENOMEM);
1107 goto bail_qp;
1108 }
1109 }
1110
1111 /*
1112 * ib_create_qp() will initialize qp->ibqp
1113 * except for qp->ibqp.qp_num.
1114 */
1115 spin_lock_init(&qp->r_lock);
1116 spin_lock_init(&qp->s_lock);
1117 spin_lock_init(&qp->r_rq.lock);
1118 atomic_set(&qp->refcount, 0);
1119 init_waitqueue_head(&qp->wait);
ffc26907 1120 init_waitqueue_head(&priv->wait_dma);
f931551b
RC
1121 init_timer(&qp->s_timer);
1122 qp->s_timer.data = (unsigned long)qp;
ffc26907
DD
1123 INIT_WORK(&priv->s_work, qib_do_send);
1124 INIT_LIST_HEAD(&priv->iowait);
f931551b
RC
1125 INIT_LIST_HEAD(&qp->rspwait);
1126 qp->state = IB_QPS_RESET;
1127 qp->s_wq = swq;
1128 qp->s_size = init_attr->cap.max_send_wr + 1;
1129 qp->s_max_sge = init_attr->cap.max_send_sge;
1130 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
01ba79d4 1131 qp->s_flags = RVT_S_SIGNAL_REQ_WR;
f931551b
RC
1132 dev = to_idev(ibpd->device);
1133 dd = dd_from_dev(dev);
898fa52b
HC
1134 err = alloc_qpn(dd, &dev->rdi.qp_dev->qpn_table,
1135 init_attr->qp_type, init_attr->port_num, gfp);
f931551b
RC
1136 if (err < 0) {
1137 ret = ERR_PTR(err);
1138 vfree(qp->r_rq.wq);
1139 goto bail_qp;
1140 }
1141 qp->ibqp.qp_num = err;
1142 qp->port_num = init_attr->port_num;
f931551b
RC
1143 qib_reset_qp(qp, init_attr->qp_type);
1144 break;
1145
1146 default:
1147 /* Don't support raw QPs */
1148 ret = ERR_PTR(-ENOSYS);
1149 goto bail;
1150 }
1151
1152 init_attr->cap.max_inline_data = 0;
1153
1154 /*
1155 * Return the address of the RWQ as the offset to mmap.
1156 * See qib_mmap() for details.
1157 */
1158 if (udata && udata->outlen >= sizeof(__u64)) {
1159 if (!qp->r_rq.wq) {
1160 __u64 offset = 0;
1161
1162 err = ib_copy_to_udata(udata, &offset,
1163 sizeof(offset));
1164 if (err) {
1165 ret = ERR_PTR(err);
1166 goto bail_ip;
1167 }
1168 } else {
7c2e11fe 1169 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
f931551b 1170
cd18201f 1171 qp->ip = rvt_create_mmap_info(&dev->rdi, s,
f931551b
RC
1172 ibpd->uobject->context,
1173 qp->r_rq.wq);
1174 if (!qp->ip) {
1175 ret = ERR_PTR(-ENOMEM);
1176 goto bail_ip;
1177 }
1178
1179 err = ib_copy_to_udata(udata, &(qp->ip->offset),
1180 sizeof(qp->ip->offset));
1181 if (err) {
1182 ret = ERR_PTR(err);
1183 goto bail_ip;
1184 }
1185 }
1186 }
1187
1188 spin_lock(&dev->n_qps_lock);
1189 if (dev->n_qps_allocated == ib_qib_max_qps) {
1190 spin_unlock(&dev->n_qps_lock);
1191 ret = ERR_PTR(-ENOMEM);
1192 goto bail_ip;
1193 }
1194
1195 dev->n_qps_allocated++;
1196 spin_unlock(&dev->n_qps_lock);
1197
1198 if (qp->ip) {
cd18201f
HC
1199 spin_lock_irq(&dev->rdi.pending_lock);
1200 list_add(&qp->ip->pending_mmaps, &dev->rdi.pending_mmaps);
1201 spin_unlock_irq(&dev->rdi.pending_lock);
f931551b
RC
1202 }
1203
1204 ret = &qp->ibqp;
1205 goto bail;
1206
1207bail_ip:
1208 if (qp->ip)
cd18201f 1209 kref_put(&qp->ip->ref, rvt_release_mmap_info);
f931551b
RC
1210 else
1211 vfree(qp->r_rq.wq);
898fa52b 1212 free_qpn(&dev->rdi.qp_dev->qpn_table, qp->ibqp.qp_num);
f931551b 1213bail_qp:
ffc26907
DD
1214 kfree(priv->s_hdr);
1215 kfree(priv);
1216bail_qp_hdr:
f931551b
RC
1217 kfree(qp);
1218bail_swq:
1219 vfree(swq);
1220bail:
1221 return ret;
1222}
1223
1224/**
1225 * qib_destroy_qp - destroy a queue pair
1226 * @ibqp: the queue pair to destroy
1227 *
1228 * Returns 0 on success.
1229 *
1230 * Note that this can be called while the QP is actively sending or
1231 * receiving!
1232 */
1233int qib_destroy_qp(struct ib_qp *ibqp)
1234{
7c2e11fe 1235 struct rvt_qp *qp = to_iqp(ibqp);
f931551b 1236 struct qib_ibdev *dev = to_idev(ibqp->device);
ffc26907 1237 struct qib_qp_priv *priv = qp->priv;
f931551b
RC
1238
1239 /* Make sure HW and driver activity is stopped. */
1240 spin_lock_irq(&qp->s_lock);
1241 if (qp->state != IB_QPS_RESET) {
1242 qp->state = IB_QPS_RESET;
cd18201f 1243 spin_lock(&dev->rdi.pending_lock);
ffc26907
DD
1244 if (!list_empty(&priv->iowait))
1245 list_del_init(&priv->iowait);
cd18201f 1246 spin_unlock(&dev->rdi.pending_lock);
01ba79d4 1247 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
f931551b 1248 spin_unlock_irq(&qp->s_lock);
ffc26907 1249 cancel_work_sync(&priv->s_work);
f931551b 1250 del_timer_sync(&qp->s_timer);
ffc26907
DD
1251 wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
1252 if (priv->s_tx) {
1253 qib_put_txreq(priv->s_tx);
1254 priv->s_tx = NULL;
f931551b
RC
1255 }
1256 remove_qp(dev, qp);
1257 wait_event(qp->wait, !atomic_read(&qp->refcount));
1258 clear_mr_refs(qp, 1);
1259 } else
1260 spin_unlock_irq(&qp->s_lock);
1261
1262 /* all user's cleaned up, mark it available */
898fa52b 1263 free_qpn(&dev->rdi.qp_dev->qpn_table, qp->ibqp.qp_num);
f931551b
RC
1264 spin_lock(&dev->n_qps_lock);
1265 dev->n_qps_allocated--;
1266 spin_unlock(&dev->n_qps_lock);
1267
1268 if (qp->ip)
cd18201f 1269 kref_put(&qp->ip->ref, rvt_release_mmap_info);
f931551b
RC
1270 else
1271 vfree(qp->r_rq.wq);
1272 vfree(qp->s_wq);
ffc26907
DD
1273 kfree(priv->s_hdr);
1274 kfree(priv);
f931551b
RC
1275 kfree(qp);
1276 return 0;
1277}
1278
1279/**
1280 * qib_init_qpn_table - initialize the QP number table for a device
1281 * @qpt: the QPN table
1282 */
898fa52b 1283void qib_init_qpn_table(struct qib_devdata *dd, struct rvt_qpn_table *qpt)
f931551b
RC
1284{
1285 spin_lock_init(&qpt->lock);
1286 qpt->last = 1; /* start with QPN 2 */
1287 qpt->nmaps = 1;
898fa52b 1288 qpt_mask = dd->qpn_mask;
f931551b
RC
1289}
1290
1291/**
1292 * qib_free_qpn_table - free the QP number table for a device
1293 * @qpt: the QPN table
1294 */
898fa52b 1295void qib_free_qpn_table(struct rvt_qpn_table *qpt)
f931551b
RC
1296{
1297 int i;
1298
1299 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
1300 if (qpt->map[i].page)
1301 free_page((unsigned long) qpt->map[i].page);
1302}
1303
1304/**
1305 * qib_get_credit - flush the send work queue of a QP
1306 * @qp: the qp who's send work queue to flush
1307 * @aeth: the Acknowledge Extended Transport Header
1308 *
1309 * The QP s_lock should be held.
1310 */
7c2e11fe 1311void qib_get_credit(struct rvt_qp *qp, u32 aeth)
f931551b
RC
1312{
1313 u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
1314
1315 /*
1316 * If the credit is invalid, we can send
1317 * as many packets as we like. Otherwise, we have to
1318 * honor the credit field.
1319 */
1320 if (credit == QIB_AETH_CREDIT_INVAL) {
01ba79d4
HC
1321 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
1322 qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
1323 if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
1324 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
f931551b
RC
1325 qib_schedule_send(qp);
1326 }
1327 }
01ba79d4 1328 } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
f931551b
RC
1329 /* Compute new LSN (i.e., MSN + credit) */
1330 credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
1331 if (qib_cmp24(credit, qp->s_lsn) > 0) {
1332 qp->s_lsn = credit;
01ba79d4
HC
1333 if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
1334 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
f931551b
RC
1335 qib_schedule_send(qp);
1336 }
1337 }
1338 }
1339}
1dd173b0
MM
1340
1341#ifdef CONFIG_DEBUG_FS
1342
1343struct qib_qp_iter {
1344 struct qib_ibdev *dev;
7c2e11fe 1345 struct rvt_qp *qp;
1dd173b0
MM
1346 int n;
1347};
1348
1349struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
1350{
1351 struct qib_qp_iter *iter;
1352
1353 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1354 if (!iter)
1355 return NULL;
1356
1357 iter->dev = dev;
1358 if (qib_qp_iter_next(iter)) {
1359 kfree(iter);
1360 return NULL;
1361 }
1362
1363 return iter;
1364}
1365
1366int qib_qp_iter_next(struct qib_qp_iter *iter)
1367{
1368 struct qib_ibdev *dev = iter->dev;
1369 int n = iter->n;
1370 int ret = 1;
7c2e11fe
DD
1371 struct rvt_qp *pqp = iter->qp;
1372 struct rvt_qp *qp;
1dd173b0 1373
898fa52b 1374 for (; n < dev->rdi.qp_dev->qp_table_size; n++) {
1dd173b0
MM
1375 if (pqp)
1376 qp = rcu_dereference(pqp->next);
1377 else
898fa52b 1378 qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]);
1dd173b0
MM
1379 pqp = qp;
1380 if (qp) {
1dd173b0
MM
1381 iter->qp = qp;
1382 iter->n = n;
1383 return 0;
1384 }
1385 }
1dd173b0
MM
1386 return ret;
1387}
1388
1389static const char * const qp_type_str[] = {
1390 "SMI", "GSI", "RC", "UC", "UD",
1391};
1392
1393void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
1394{
7c2e11fe
DD
1395 struct rvt_swqe *wqe;
1396 struct rvt_qp *qp = iter->qp;
ffc26907 1397 struct qib_qp_priv *priv = qp->priv;
1dd173b0
MM
1398
1399 wqe = get_swqe_ptr(qp, qp->s_last);
1400 seq_printf(s,
1401 "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
1402 iter->n,
1403 qp->ibqp.qp_num,
1404 qp_type_str[qp->ibqp.qp_type],
1405 qp->state,
1406 wqe->wr.opcode,
1407 qp->s_hdrwords,
1408 qp->s_flags,
ffc26907
DD
1409 atomic_read(&priv->s_dma_busy),
1410 !list_empty(&priv->iowait),
1dd173b0
MM
1411 qp->timeout,
1412 wqe->ssn,
1413 qp->s_lsn,
1414 qp->s_last_psn,
1415 qp->s_psn, qp->s_next_psn,
1416 qp->s_sending_psn, qp->s_sending_hpsn,
1417 qp->s_last, qp->s_acked, qp->s_cur,
1418 qp->s_tail, qp->s_head, qp->s_size,
1419 qp->remote_qpn,
1420 qp->remote_ah_attr.dlid);
1421}
1422
1423#endif