]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/infiniband/hw/qib/qib_qp.c
IB/qib: Rename several functions by adding a "qib_" prefix
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / hw / qib / qib_qp.c
CommitLineData
f931551b 1/*
f7cf9a61 2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
1fb9fed6 3 * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved.
f931551b
RC
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/err.h>
36#include <linux/vmalloc.h>
869a2a96 37#include <rdma/rdma_vt.h>
1dd173b0
MM
38#ifdef CONFIG_DEBUG_FS
39#include <linux/seq_file.h>
40#endif
f931551b
RC
41
42#include "qib.h"
43
898fa52b
HC
44/*
45 * mask field which was present in now deleted qib_qpn_table
46 * is not present in rvt_qpn_table. Defining the same field
47 * as qpt_mask here instead of adding the mask field to
48 * rvt_qpn_table.
49 */
47c7ea6d 50u16 qpt_mask;
f931551b 51
898fa52b
HC
52static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
53 struct rvt_qpn_map *map, unsigned off)
f931551b 54{
898fa52b 55 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
f931551b
RC
56}
57
898fa52b
HC
58static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
59 struct rvt_qpn_map *map, unsigned off,
2528ea60 60 unsigned n)
f931551b 61{
898fa52b 62 if (qpt_mask) {
f931551b 63 off++;
898fa52b
HC
64 if (((off & qpt_mask) >> 1) >= n)
65 off = (off | qpt_mask) + 2;
66 } else {
67 off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
68 }
f931551b
RC
69 return off;
70}
71
72/*
73 * Convert the AETH credit code into the number of credits.
74 */
75static u32 credit_table[31] = {
76 0, /* 0 */
77 1, /* 1 */
78 2, /* 2 */
79 3, /* 3 */
80 4, /* 4 */
81 6, /* 5 */
82 8, /* 6 */
83 12, /* 7 */
84 16, /* 8 */
85 24, /* 9 */
86 32, /* A */
87 48, /* B */
88 64, /* C */
89 96, /* D */
90 128, /* E */
91 192, /* F */
92 256, /* 10 */
93 384, /* 11 */
94 512, /* 12 */
95 768, /* 13 */
96 1024, /* 14 */
97 1536, /* 15 */
98 2048, /* 16 */
99 3072, /* 17 */
100 4096, /* 18 */
101 6144, /* 19 */
102 8192, /* 1A */
103 12288, /* 1B */
104 16384, /* 1C */
105 24576, /* 1D */
106 32768 /* 1E */
107};
108
898fa52b 109static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
fbbeb863 110 gfp_t gfp)
f931551b 111{
fbbeb863 112 unsigned long page = get_zeroed_page(gfp);
f931551b
RC
113
114 /*
115 * Free the page if someone raced with us installing it.
116 */
117
118 spin_lock(&qpt->lock);
119 if (map->page)
120 free_page(page);
121 else
122 map->page = (void *)page;
123 spin_unlock(&qpt->lock);
124}
125
126/*
127 * Allocate the next available QPN or
128 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
129 */
20f333b6
HC
130int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
131 enum ib_qp_type type, u8 port, gfp_t gfp)
f931551b
RC
132{
133 u32 i, offset, max_scan, qpn;
898fa52b 134 struct rvt_qpn_map *map;
f931551b 135 u32 ret;
47c7ea6d
HC
136 struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
137 struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
138 verbs_dev);
f931551b
RC
139
140 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
141 unsigned n;
142
143 ret = type == IB_QPT_GSI;
144 n = 1 << (ret + 2 * (port - 1));
145 spin_lock(&qpt->lock);
146 if (qpt->flags & n)
147 ret = -EINVAL;
148 else
149 qpt->flags |= n;
150 spin_unlock(&qpt->lock);
151 goto bail;
152 }
153
7c3edd3f 154 qpn = qpt->last + 2;
898fa52b 155 if (qpn >= RVT_QPN_MAX)
f931551b 156 qpn = 2;
898fa52b
HC
157 if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues)
158 qpn = (qpn | qpt_mask) + 2;
159 offset = qpn & RVT_BITS_PER_PAGE_MASK;
160 map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
f931551b
RC
161 max_scan = qpt->nmaps - !offset;
162 for (i = 0;;) {
163 if (unlikely(!map->page)) {
fbbeb863 164 get_map_page(qpt, map, gfp);
f931551b
RC
165 if (unlikely(!map->page))
166 break;
167 }
168 do {
169 if (!test_and_set_bit(offset, map->page)) {
170 qpt->last = qpn;
171 ret = qpn;
172 goto bail;
173 }
2528ea60
MM
174 offset = find_next_offset(qpt, map, offset,
175 dd->n_krcv_queues);
f931551b
RC
176 qpn = mk_qpn(qpt, map, offset);
177 /*
178 * This test differs from alloc_pidmap().
179 * If find_next_offset() does find a zero
180 * bit, we don't need to check for QPN
181 * wrapping around past our starting QPN.
182 * We just need to be sure we don't loop
183 * forever.
184 */
898fa52b 185 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
f931551b
RC
186 /*
187 * In order to keep the number of pages allocated to a
188 * minimum, we scan the all existing pages before increasing
189 * the size of the bitmap table.
190 */
191 if (++i > max_scan) {
898fa52b 192 if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
f931551b
RC
193 break;
194 map = &qpt->map[qpt->nmaps++];
2528ea60 195 offset = 0;
f931551b
RC
196 } else if (map < &qpt->map[qpt->nmaps]) {
197 ++map;
2528ea60 198 offset = 0;
f931551b
RC
199 } else {
200 map = &qpt->map[0];
2528ea60 201 offset = 2;
f931551b
RC
202 }
203 qpn = mk_qpn(qpt, map, offset);
204 }
205
206 ret = -ENOMEM;
207
208bail:
209 return ret;
210}
211
f931551b
RC
212/**
213 * qib_free_all_qps - check for QPs still in use
f931551b 214 */
47c7ea6d 215unsigned qib_free_all_qps(struct rvt_dev_info *rdi)
f931551b 216{
47c7ea6d
HC
217 struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
218 struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
219 verbs_dev);
f931551b
RC
220 unsigned n, qp_inuse = 0;
221
222 for (n = 0; n < dd->num_pports; n++) {
223 struct qib_ibport *ibp = &dd->pport[n].ibport_data;
224
af061a64 225 rcu_read_lock();
f24a6d48 226 if (rcu_dereference(ibp->rvp.qp[0]))
f931551b 227 qp_inuse++;
f24a6d48 228 if (rcu_dereference(ibp->rvp.qp[1]))
f931551b 229 qp_inuse++;
af061a64 230 rcu_read_unlock();
f931551b 231 }
f931551b
RC
232 return qp_inuse;
233}
234
20f333b6 235void qib_notify_qp_reset(struct rvt_qp *qp)
f931551b 236{
ffc26907 237 struct qib_qp_priv *priv = qp->priv;
47c7ea6d 238
ffc26907 239 atomic_set(&priv->s_dma_busy, 0);
f931551b
RC
240}
241
20f333b6 242void qib_notify_error_qp(struct rvt_qp *qp)
f931551b 243{
ffc26907 244 struct qib_qp_priv *priv = qp->priv;
f931551b 245 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
16028f27 246
cd18201f 247 spin_lock(&dev->rdi.pending_lock);
01ba79d4
HC
248 if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) {
249 qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
ffc26907 250 list_del_init(&priv->iowait);
f931551b 251 }
cd18201f 252 spin_unlock(&dev->rdi.pending_lock);
f931551b 253
01ba79d4 254 if (!(qp->s_flags & RVT_S_BUSY)) {
f931551b
RC
255 qp->s_hdrwords = 0;
256 if (qp->s_rdma_mr) {
7c2e11fe 257 rvt_put_mr(qp->s_rdma_mr);
f931551b
RC
258 qp->s_rdma_mr = NULL;
259 }
ffc26907
DD
260 if (priv->s_tx) {
261 qib_put_txreq(priv->s_tx);
262 priv->s_tx = NULL;
f931551b
RC
263 }
264 }
f931551b
RC
265}
266
70696ea7 267static int mtu_to_enum(u32 mtu)
f931551b 268{
70696ea7 269 int enum_mtu;
f931551b 270
70696ea7
HC
271 switch (mtu) {
272 case 4096:
273 enum_mtu = IB_MTU_4096;
f931551b 274 break;
70696ea7
HC
275 case 2048:
276 enum_mtu = IB_MTU_2048;
f931551b 277 break;
70696ea7
HC
278 case 1024:
279 enum_mtu = IB_MTU_1024;
f931551b 280 break;
70696ea7
HC
281 case 512:
282 enum_mtu = IB_MTU_512;
f931551b 283 break;
70696ea7
HC
284 case 256:
285 enum_mtu = IB_MTU_256;
f931551b 286 break;
f931551b 287 default:
70696ea7 288 enum_mtu = IB_MTU_2048;
d0f2faf7 289 }
70696ea7
HC
290 return enum_mtu;
291}
f931551b 292
20f333b6
HC
293int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
294 struct ib_qp_attr *attr)
70696ea7
HC
295{
296 int mtu, pmtu, pidx = qp->port_num - 1;
297 struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
298 struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
299 verbs_dev);
300 mtu = ib_mtu_enum_to_int(attr->path_mtu);
301 if (mtu == -1)
302 return -EINVAL;
f931551b 303
70696ea7
HC
304 if (mtu > dd->pport[pidx].ibmtu)
305 pmtu = mtu_to_enum(dd->pport[pidx].ibmtu);
306 else
307 pmtu = attr->path_mtu;
308 return pmtu;
309}
f931551b 310
20f333b6 311int qib_mtu_to_path_mtu(u32 mtu)
70696ea7
HC
312{
313 return mtu_to_enum(mtu);
314}
f931551b 315
20f333b6 316u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
70696ea7
HC
317{
318 return ib_mtu_enum_to_int(pmtu);
f931551b
RC
319}
320
f931551b
RC
321/**
322 * qib_compute_aeth - compute the AETH (syndrome + MSN)
323 * @qp: the queue pair to compute the AETH for
324 *
325 * Returns the AETH.
326 */
7c2e11fe 327__be32 qib_compute_aeth(struct rvt_qp *qp)
f931551b
RC
328{
329 u32 aeth = qp->r_msn & QIB_MSN_MASK;
330
331 if (qp->ibqp.srq) {
332 /*
333 * Shared receive queues don't generate credits.
334 * Set the credit field to the invalid value.
335 */
336 aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
337 } else {
338 u32 min, max, x;
339 u32 credits;
7c2e11fe 340 struct rvt_rwq *wq = qp->r_rq.wq;
f931551b
RC
341 u32 head;
342 u32 tail;
343
344 /* sanity check pointers before trusting them */
345 head = wq->head;
346 if (head >= qp->r_rq.size)
347 head = 0;
348 tail = wq->tail;
349 if (tail >= qp->r_rq.size)
350 tail = 0;
351 /*
352 * Compute the number of credits available (RWQEs).
353 * XXX Not holding the r_rq.lock here so there is a small
354 * chance that the pair of reads are not atomic.
355 */
356 credits = head - tail;
357 if ((int)credits < 0)
358 credits += qp->r_rq.size;
359 /*
360 * Binary search the credit table to find the code to
361 * use.
362 */
363 min = 0;
364 max = 31;
365 for (;;) {
366 x = (min + max) / 2;
367 if (credit_table[x] == credits)
368 break;
369 if (credit_table[x] > credits)
370 max = x;
371 else if (min == x)
372 break;
373 else
374 min = x;
375 }
376 aeth |= x << QIB_AETH_CREDIT_SHIFT;
377 }
378 return cpu_to_be32(aeth);
379}
380
20f333b6 381void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp)
f931551b 382{
ffc26907 383 struct qib_qp_priv *priv;
f931551b 384
47c7ea6d
HC
385 priv = kzalloc(sizeof(*priv), gfp);
386 if (!priv)
387 return ERR_PTR(-ENOMEM);
388 priv->owner = qp;
f931551b 389
47c7ea6d
HC
390 priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp);
391 if (!priv->s_hdr) {
392 kfree(priv);
393 return ERR_PTR(-ENOMEM);
f931551b 394 }
47c7ea6d 395 init_waitqueue_head(&priv->wait_dma);
db3ef0eb 396 INIT_WORK(&priv->s_work, _qib_do_send);
47c7ea6d 397 INIT_LIST_HEAD(&priv->iowait);
f931551b 398
47c7ea6d
HC
399 return priv;
400}
f931551b 401
20f333b6 402void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
47c7ea6d
HC
403{
404 struct qib_qp_priv *priv = qp->priv;
f931551b 405
ffc26907
DD
406 kfree(priv->s_hdr);
407 kfree(priv);
f931551b
RC
408}
409
20f333b6 410void qib_stop_send_queue(struct rvt_qp *qp)
70696ea7
HC
411{
412 struct qib_qp_priv *priv = qp->priv;
413
414 cancel_work_sync(&priv->s_work);
08279d5c 415 del_timer_sync(&qp->s_timer);
70696ea7
HC
416}
417
20f333b6 418void qib_quiesce_qp(struct rvt_qp *qp)
70696ea7
HC
419{
420 struct qib_qp_priv *priv = qp->priv;
421
422 wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
423 if (priv->s_tx) {
424 qib_put_txreq(priv->s_tx);
425 priv->s_tx = NULL;
426 }
427}
428
20f333b6 429void qib_flush_qp_waiters(struct rvt_qp *qp)
70696ea7
HC
430{
431 struct qib_qp_priv *priv = qp->priv;
432 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
433
434 spin_lock(&dev->rdi.pending_lock);
435 if (!list_empty(&priv->iowait))
436 list_del_init(&priv->iowait);
437 spin_unlock(&dev->rdi.pending_lock);
438}
439
f931551b
RC
440/**
441 * qib_get_credit - flush the send work queue of a QP
442 * @qp: the qp who's send work queue to flush
443 * @aeth: the Acknowledge Extended Transport Header
444 *
445 * The QP s_lock should be held.
446 */
7c2e11fe 447void qib_get_credit(struct rvt_qp *qp, u32 aeth)
f931551b
RC
448{
449 u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
450
451 /*
452 * If the credit is invalid, we can send
453 * as many packets as we like. Otherwise, we have to
454 * honor the credit field.
455 */
456 if (credit == QIB_AETH_CREDIT_INVAL) {
01ba79d4
HC
457 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
458 qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
459 if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
460 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
f931551b
RC
461 qib_schedule_send(qp);
462 }
463 }
01ba79d4 464 } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
f931551b
RC
465 /* Compute new LSN (i.e., MSN + credit) */
466 credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
467 if (qib_cmp24(credit, qp->s_lsn) > 0) {
468 qp->s_lsn = credit;
01ba79d4
HC
469 if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
470 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
f931551b
RC
471 qib_schedule_send(qp);
472 }
473 }
474 }
475}
1dd173b0
MM
476
477#ifdef CONFIG_DEBUG_FS
478
479struct qib_qp_iter {
480 struct qib_ibdev *dev;
7c2e11fe 481 struct rvt_qp *qp;
1dd173b0
MM
482 int n;
483};
484
485struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
486{
487 struct qib_qp_iter *iter;
488
489 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
490 if (!iter)
491 return NULL;
492
493 iter->dev = dev;
494 if (qib_qp_iter_next(iter)) {
495 kfree(iter);
496 return NULL;
497 }
498
499 return iter;
500}
501
502int qib_qp_iter_next(struct qib_qp_iter *iter)
503{
504 struct qib_ibdev *dev = iter->dev;
505 int n = iter->n;
506 int ret = 1;
7c2e11fe
DD
507 struct rvt_qp *pqp = iter->qp;
508 struct rvt_qp *qp;
1dd173b0 509
898fa52b 510 for (; n < dev->rdi.qp_dev->qp_table_size; n++) {
1dd173b0
MM
511 if (pqp)
512 qp = rcu_dereference(pqp->next);
513 else
898fa52b 514 qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]);
1dd173b0
MM
515 pqp = qp;
516 if (qp) {
1dd173b0
MM
517 iter->qp = qp;
518 iter->n = n;
519 return 0;
520 }
521 }
1dd173b0
MM
522 return ret;
523}
524
525static const char * const qp_type_str[] = {
526 "SMI", "GSI", "RC", "UC", "UD",
527};
528
529void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
530{
7c2e11fe
DD
531 struct rvt_swqe *wqe;
532 struct rvt_qp *qp = iter->qp;
ffc26907 533 struct qib_qp_priv *priv = qp->priv;
1dd173b0 534
db3ef0eb 535 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
1dd173b0
MM
536 seq_printf(s,
537 "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
538 iter->n,
539 qp->ibqp.qp_num,
540 qp_type_str[qp->ibqp.qp_type],
541 qp->state,
542 wqe->wr.opcode,
543 qp->s_hdrwords,
544 qp->s_flags,
ffc26907
DD
545 atomic_read(&priv->s_dma_busy),
546 !list_empty(&priv->iowait),
1dd173b0
MM
547 qp->timeout,
548 wqe->ssn,
549 qp->s_lsn,
550 qp->s_last_psn,
551 qp->s_psn, qp->s_next_psn,
552 qp->s_sending_psn, qp->s_sending_hpsn,
553 qp->s_last, qp->s_acked, qp->s_cur,
554 qp->s_tail, qp->s_head, qp->s_size,
555 qp->remote_qpn,
556 qp->remote_ah_attr.dlid);
557}
558
559#endif