]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/infiniband/hw/qib/qib_verbs.c
aa28dbd18e6d091ff3d352b77c9eb30bf116cb3d
[mirror_ubuntu-bionic-kernel.git] / drivers / infiniband / hw / qib / qib_verbs.c
1 /*
2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include <rdma/ib_mad.h>
36 #include <rdma/ib_user_verbs.h>
37 #include <linux/io.h>
38 #include <linux/module.h>
39 #include <linux/utsname.h>
40 #include <linux/rculist.h>
41 #include <linux/mm.h>
42 #include <linux/random.h>
43 #include <linux/vmalloc.h>
44 #include <rdma/rdma_vt.h>
45
46 #include "qib.h"
47 #include "qib_common.h"
48
49 static unsigned int ib_qib_qp_table_size = 256;
50 module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
51 MODULE_PARM_DESC(qp_table_size, "QP table size");
52
53 static unsigned int qib_lkey_table_size = 16;
54 module_param_named(lkey_table_size, qib_lkey_table_size, uint,
55 S_IRUGO);
56 MODULE_PARM_DESC(lkey_table_size,
57 "LKEY table size in bits (2^n, 1 <= n <= 23)");
58
59 static unsigned int ib_qib_max_pds = 0xFFFF;
60 module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
61 MODULE_PARM_DESC(max_pds,
62 "Maximum number of protection domains to support");
63
64 static unsigned int ib_qib_max_ahs = 0xFFFF;
65 module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
66 MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
67
68 unsigned int ib_qib_max_cqes = 0x2FFFF;
69 module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
70 MODULE_PARM_DESC(max_cqes,
71 "Maximum number of completion queue entries to support");
72
73 unsigned int ib_qib_max_cqs = 0x1FFFF;
74 module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
75 MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
76
77 unsigned int ib_qib_max_qp_wrs = 0x3FFF;
78 module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
79 MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
80
81 unsigned int ib_qib_max_qps = 16384;
82 module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
83 MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
84
85 unsigned int ib_qib_max_sges = 0x60;
86 module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
87 MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
88
89 unsigned int ib_qib_max_mcast_grps = 16384;
90 module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
91 MODULE_PARM_DESC(max_mcast_grps,
92 "Maximum number of multicast groups to support");
93
94 unsigned int ib_qib_max_mcast_qp_attached = 16;
95 module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
96 uint, S_IRUGO);
97 MODULE_PARM_DESC(max_mcast_qp_attached,
98 "Maximum number of attached QPs to support");
99
100 unsigned int ib_qib_max_srqs = 1024;
101 module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
102 MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
103
104 unsigned int ib_qib_max_srq_sges = 128;
105 module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
106 MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
107
108 unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
109 module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
110 MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
111
112 static unsigned int ib_qib_disable_sma;
113 module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
114 MODULE_PARM_DESC(disable_sma, "Disable the SMA");
115
116 /*
117 * Translate ib_wr_opcode into ib_wc_opcode.
118 */
119 const enum ib_wc_opcode ib_qib_wc_opcode[] = {
120 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
121 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
122 [IB_WR_SEND] = IB_WC_SEND,
123 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
124 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
125 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
126 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
127 };
128
129 /*
130 * System image GUID.
131 */
132 __be64 ib_qib_sys_image_guid;
133
134 /**
135 * qib_copy_sge - copy data to SGE memory
136 * @ss: the SGE state
137 * @data: the data to copy
138 * @length: the length of the data
139 */
140 void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release)
141 {
142 struct rvt_sge *sge = &ss->sge;
143
144 while (length) {
145 u32 len = rvt_get_sge_length(sge, length);
146
147 WARN_ON_ONCE(len == 0);
148 memcpy(sge->vaddr, data, len);
149 rvt_update_sge(ss, len, release);
150 data += len;
151 length -= len;
152 }
153 }
154
155 /*
156 * Count the number of DMA descriptors needed to send length bytes of data.
157 * Don't modify the qib_sge_state to get the count.
158 * Return zero if any of the segments is not aligned.
159 */
160 static u32 qib_count_sge(struct rvt_sge_state *ss, u32 length)
161 {
162 struct rvt_sge *sg_list = ss->sg_list;
163 struct rvt_sge sge = ss->sge;
164 u8 num_sge = ss->num_sge;
165 u32 ndesc = 1; /* count the header */
166
167 while (length) {
168 u32 len = sge.length;
169
170 if (len > length)
171 len = length;
172 if (len > sge.sge_length)
173 len = sge.sge_length;
174 BUG_ON(len == 0);
175 if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
176 (len != length && (len & (sizeof(u32) - 1)))) {
177 ndesc = 0;
178 break;
179 }
180 ndesc++;
181 sge.vaddr += len;
182 sge.length -= len;
183 sge.sge_length -= len;
184 if (sge.sge_length == 0) {
185 if (--num_sge)
186 sge = *sg_list++;
187 } else if (sge.length == 0 && sge.mr->lkey) {
188 if (++sge.n >= RVT_SEGSZ) {
189 if (++sge.m >= sge.mr->mapsz)
190 break;
191 sge.n = 0;
192 }
193 sge.vaddr =
194 sge.mr->map[sge.m]->segs[sge.n].vaddr;
195 sge.length =
196 sge.mr->map[sge.m]->segs[sge.n].length;
197 }
198 length -= len;
199 }
200 return ndesc;
201 }
202
203 /*
204 * Copy from the SGEs to the data buffer.
205 */
206 static void qib_copy_from_sge(void *data, struct rvt_sge_state *ss, u32 length)
207 {
208 struct rvt_sge *sge = &ss->sge;
209
210 while (length) {
211 u32 len = sge->length;
212
213 if (len > length)
214 len = length;
215 if (len > sge->sge_length)
216 len = sge->sge_length;
217 BUG_ON(len == 0);
218 memcpy(data, sge->vaddr, len);
219 sge->vaddr += len;
220 sge->length -= len;
221 sge->sge_length -= len;
222 if (sge->sge_length == 0) {
223 if (--ss->num_sge)
224 *sge = *ss->sg_list++;
225 } else if (sge->length == 0 && sge->mr->lkey) {
226 if (++sge->n >= RVT_SEGSZ) {
227 if (++sge->m >= sge->mr->mapsz)
228 break;
229 sge->n = 0;
230 }
231 sge->vaddr =
232 sge->mr->map[sge->m]->segs[sge->n].vaddr;
233 sge->length =
234 sge->mr->map[sge->m]->segs[sge->n].length;
235 }
236 data += len;
237 length -= len;
238 }
239 }
240
241 /**
242 * qib_qp_rcv - processing an incoming packet on a QP
243 * @rcd: the context pointer
244 * @hdr: the packet header
245 * @has_grh: true if the packet has a GRH
246 * @data: the packet data
247 * @tlen: the packet length
248 * @qp: the QP the packet came on
249 *
250 * This is called from qib_ib_rcv() to process an incoming packet
251 * for the given QP.
252 * Called at interrupt level.
253 */
254 static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
255 int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
256 {
257 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
258
259 spin_lock(&qp->r_lock);
260
261 /* Check for valid receive state. */
262 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
263 ibp->rvp.n_pkt_drops++;
264 goto unlock;
265 }
266
267 switch (qp->ibqp.qp_type) {
268 case IB_QPT_SMI:
269 case IB_QPT_GSI:
270 if (ib_qib_disable_sma)
271 break;
272 /* FALLTHROUGH */
273 case IB_QPT_UD:
274 qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
275 break;
276
277 case IB_QPT_RC:
278 qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
279 break;
280
281 case IB_QPT_UC:
282 qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
283 break;
284
285 default:
286 break;
287 }
288
289 unlock:
290 spin_unlock(&qp->r_lock);
291 }
292
293 /**
294 * qib_ib_rcv - process an incoming packet
295 * @rcd: the context pointer
296 * @rhdr: the header of the packet
297 * @data: the packet payload
298 * @tlen: the packet length
299 *
300 * This is called from qib_kreceive() to process an incoming packet at
301 * interrupt level. Tlen is the length of the header + data + CRC in bytes.
302 */
303 void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
304 {
305 struct qib_pportdata *ppd = rcd->ppd;
306 struct qib_ibport *ibp = &ppd->ibport_data;
307 struct ib_header *hdr = rhdr;
308 struct qib_devdata *dd = ppd->dd;
309 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
310 struct ib_other_headers *ohdr;
311 struct rvt_qp *qp;
312 u32 qp_num;
313 int lnh;
314 u8 opcode;
315 u16 lid;
316
317 /* 24 == LRH+BTH+CRC */
318 if (unlikely(tlen < 24))
319 goto drop;
320
321 /* Check for a valid destination LID (see ch. 7.11.1). */
322 lid = be16_to_cpu(hdr->lrh[1]);
323 if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
324 lid &= ~((1 << ppd->lmc) - 1);
325 if (unlikely(lid != ppd->lid))
326 goto drop;
327 }
328
329 /* Check for GRH */
330 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
331 if (lnh == QIB_LRH_BTH)
332 ohdr = &hdr->u.oth;
333 else if (lnh == QIB_LRH_GRH) {
334 u32 vtf;
335
336 ohdr = &hdr->u.l.oth;
337 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
338 goto drop;
339 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
340 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
341 goto drop;
342 } else
343 goto drop;
344
345 opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
346 #ifdef CONFIG_DEBUG_FS
347 rcd->opstats->stats[opcode].n_bytes += tlen;
348 rcd->opstats->stats[opcode].n_packets++;
349 #endif
350
351 /* Get the destination QP number. */
352 qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
353 if (qp_num == QIB_MULTICAST_QPN) {
354 struct rvt_mcast *mcast;
355 struct rvt_mcast_qp *p;
356
357 if (lnh != QIB_LRH_GRH)
358 goto drop;
359 mcast = rvt_mcast_find(&ibp->rvp, &hdr->u.l.grh.dgid, lid);
360 if (mcast == NULL)
361 goto drop;
362 this_cpu_inc(ibp->pmastats->n_multicast_rcv);
363 list_for_each_entry_rcu(p, &mcast->qp_list, list)
364 qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
365 /*
366 * Notify rvt_multicast_detach() if it is waiting for us
367 * to finish.
368 */
369 if (atomic_dec_return(&mcast->refcount) <= 1)
370 wake_up(&mcast->wait);
371 } else {
372 rcu_read_lock();
373 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
374 if (!qp) {
375 rcu_read_unlock();
376 goto drop;
377 }
378 this_cpu_inc(ibp->pmastats->n_unicast_rcv);
379 qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
380 rcu_read_unlock();
381 }
382 return;
383
384 drop:
385 ibp->rvp.n_pkt_drops++;
386 }
387
388 /*
389 * This is called from a timer to check for QPs
390 * which need kernel memory in order to send a packet.
391 */
392 static void mem_timer(unsigned long data)
393 {
394 struct qib_ibdev *dev = (struct qib_ibdev *) data;
395 struct list_head *list = &dev->memwait;
396 struct rvt_qp *qp = NULL;
397 struct qib_qp_priv *priv = NULL;
398 unsigned long flags;
399
400 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
401 if (!list_empty(list)) {
402 priv = list_entry(list->next, struct qib_qp_priv, iowait);
403 qp = priv->owner;
404 list_del_init(&priv->iowait);
405 rvt_get_qp(qp);
406 if (!list_empty(list))
407 mod_timer(&dev->mem_timer, jiffies + 1);
408 }
409 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
410
411 if (qp) {
412 spin_lock_irqsave(&qp->s_lock, flags);
413 if (qp->s_flags & RVT_S_WAIT_KMEM) {
414 qp->s_flags &= ~RVT_S_WAIT_KMEM;
415 qib_schedule_send(qp);
416 }
417 spin_unlock_irqrestore(&qp->s_lock, flags);
418 rvt_put_qp(qp);
419 }
420 }
421
422 #ifdef __LITTLE_ENDIAN
423 static inline u32 get_upper_bits(u32 data, u32 shift)
424 {
425 return data >> shift;
426 }
427
428 static inline u32 set_upper_bits(u32 data, u32 shift)
429 {
430 return data << shift;
431 }
432
433 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
434 {
435 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
436 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
437 return data;
438 }
439 #else
440 static inline u32 get_upper_bits(u32 data, u32 shift)
441 {
442 return data << shift;
443 }
444
445 static inline u32 set_upper_bits(u32 data, u32 shift)
446 {
447 return data >> shift;
448 }
449
450 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
451 {
452 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
453 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
454 return data;
455 }
456 #endif
457
458 static void copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss,
459 u32 length, unsigned flush_wc)
460 {
461 u32 extra = 0;
462 u32 data = 0;
463 u32 last;
464
465 while (1) {
466 u32 len = ss->sge.length;
467 u32 off;
468
469 if (len > length)
470 len = length;
471 if (len > ss->sge.sge_length)
472 len = ss->sge.sge_length;
473 BUG_ON(len == 0);
474 /* If the source address is not aligned, try to align it. */
475 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
476 if (off) {
477 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
478 ~(sizeof(u32) - 1));
479 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
480 u32 y;
481
482 y = sizeof(u32) - off;
483 if (len > y)
484 len = y;
485 if (len + extra >= sizeof(u32)) {
486 data |= set_upper_bits(v, extra *
487 BITS_PER_BYTE);
488 len = sizeof(u32) - extra;
489 if (len == length) {
490 last = data;
491 break;
492 }
493 __raw_writel(data, piobuf);
494 piobuf++;
495 extra = 0;
496 data = 0;
497 } else {
498 /* Clear unused upper bytes */
499 data |= clear_upper_bytes(v, len, extra);
500 if (len == length) {
501 last = data;
502 break;
503 }
504 extra += len;
505 }
506 } else if (extra) {
507 /* Source address is aligned. */
508 u32 *addr = (u32 *) ss->sge.vaddr;
509 int shift = extra * BITS_PER_BYTE;
510 int ushift = 32 - shift;
511 u32 l = len;
512
513 while (l >= sizeof(u32)) {
514 u32 v = *addr;
515
516 data |= set_upper_bits(v, shift);
517 __raw_writel(data, piobuf);
518 data = get_upper_bits(v, ushift);
519 piobuf++;
520 addr++;
521 l -= sizeof(u32);
522 }
523 /*
524 * We still have 'extra' number of bytes leftover.
525 */
526 if (l) {
527 u32 v = *addr;
528
529 if (l + extra >= sizeof(u32)) {
530 data |= set_upper_bits(v, shift);
531 len -= l + extra - sizeof(u32);
532 if (len == length) {
533 last = data;
534 break;
535 }
536 __raw_writel(data, piobuf);
537 piobuf++;
538 extra = 0;
539 data = 0;
540 } else {
541 /* Clear unused upper bytes */
542 data |= clear_upper_bytes(v, l, extra);
543 if (len == length) {
544 last = data;
545 break;
546 }
547 extra += l;
548 }
549 } else if (len == length) {
550 last = data;
551 break;
552 }
553 } else if (len == length) {
554 u32 w;
555
556 /*
557 * Need to round up for the last dword in the
558 * packet.
559 */
560 w = (len + 3) >> 2;
561 qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
562 piobuf += w - 1;
563 last = ((u32 *) ss->sge.vaddr)[w - 1];
564 break;
565 } else {
566 u32 w = len >> 2;
567
568 qib_pio_copy(piobuf, ss->sge.vaddr, w);
569 piobuf += w;
570
571 extra = len & (sizeof(u32) - 1);
572 if (extra) {
573 u32 v = ((u32 *) ss->sge.vaddr)[w];
574
575 /* Clear unused upper bytes */
576 data = clear_upper_bytes(v, extra, 0);
577 }
578 }
579 rvt_update_sge(ss, len, false);
580 length -= len;
581 }
582 /* Update address before sending packet. */
583 rvt_update_sge(ss, length, false);
584 if (flush_wc) {
585 /* must flush early everything before trigger word */
586 qib_flush_wc();
587 __raw_writel(last, piobuf);
588 /* be sure trigger word is written */
589 qib_flush_wc();
590 } else
591 __raw_writel(last, piobuf);
592 }
593
594 static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
595 struct rvt_qp *qp)
596 {
597 struct qib_qp_priv *priv = qp->priv;
598 struct qib_verbs_txreq *tx;
599 unsigned long flags;
600
601 spin_lock_irqsave(&qp->s_lock, flags);
602 spin_lock(&dev->rdi.pending_lock);
603
604 if (!list_empty(&dev->txreq_free)) {
605 struct list_head *l = dev->txreq_free.next;
606
607 list_del(l);
608 spin_unlock(&dev->rdi.pending_lock);
609 spin_unlock_irqrestore(&qp->s_lock, flags);
610 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
611 } else {
612 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK &&
613 list_empty(&priv->iowait)) {
614 dev->n_txwait++;
615 qp->s_flags |= RVT_S_WAIT_TX;
616 list_add_tail(&priv->iowait, &dev->txwait);
617 }
618 qp->s_flags &= ~RVT_S_BUSY;
619 spin_unlock(&dev->rdi.pending_lock);
620 spin_unlock_irqrestore(&qp->s_lock, flags);
621 tx = ERR_PTR(-EBUSY);
622 }
623 return tx;
624 }
625
626 static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
627 struct rvt_qp *qp)
628 {
629 struct qib_verbs_txreq *tx;
630 unsigned long flags;
631
632 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
633 /* assume the list non empty */
634 if (likely(!list_empty(&dev->txreq_free))) {
635 struct list_head *l = dev->txreq_free.next;
636
637 list_del(l);
638 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
639 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
640 } else {
641 /* call slow path to get the extra lock */
642 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
643 tx = __get_txreq(dev, qp);
644 }
645 return tx;
646 }
647
648 void qib_put_txreq(struct qib_verbs_txreq *tx)
649 {
650 struct qib_ibdev *dev;
651 struct rvt_qp *qp;
652 struct qib_qp_priv *priv;
653 unsigned long flags;
654
655 qp = tx->qp;
656 dev = to_idev(qp->ibqp.device);
657
658 if (tx->mr) {
659 rvt_put_mr(tx->mr);
660 tx->mr = NULL;
661 }
662 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
663 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
664 dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
665 tx->txreq.addr, tx->hdr_dwords << 2,
666 DMA_TO_DEVICE);
667 kfree(tx->align_buf);
668 }
669
670 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
671
672 /* Put struct back on free list */
673 list_add(&tx->txreq.list, &dev->txreq_free);
674
675 if (!list_empty(&dev->txwait)) {
676 /* Wake up first QP wanting a free struct */
677 priv = list_entry(dev->txwait.next, struct qib_qp_priv,
678 iowait);
679 qp = priv->owner;
680 list_del_init(&priv->iowait);
681 rvt_get_qp(qp);
682 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
683
684 spin_lock_irqsave(&qp->s_lock, flags);
685 if (qp->s_flags & RVT_S_WAIT_TX) {
686 qp->s_flags &= ~RVT_S_WAIT_TX;
687 qib_schedule_send(qp);
688 }
689 spin_unlock_irqrestore(&qp->s_lock, flags);
690
691 rvt_put_qp(qp);
692 } else
693 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
694 }
695
696 /*
697 * This is called when there are send DMA descriptors that might be
698 * available.
699 *
700 * This is called with ppd->sdma_lock held.
701 */
702 void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
703 {
704 struct rvt_qp *qp, *nqp;
705 struct qib_qp_priv *qpp, *nqpp;
706 struct rvt_qp *qps[20];
707 struct qib_ibdev *dev;
708 unsigned i, n;
709
710 n = 0;
711 dev = &ppd->dd->verbs_dev;
712 spin_lock(&dev->rdi.pending_lock);
713
714 /* Search wait list for first QP wanting DMA descriptors. */
715 list_for_each_entry_safe(qpp, nqpp, &dev->dmawait, iowait) {
716 qp = qpp->owner;
717 nqp = nqpp->owner;
718 if (qp->port_num != ppd->port)
719 continue;
720 if (n == ARRAY_SIZE(qps))
721 break;
722 if (qpp->s_tx->txreq.sg_count > avail)
723 break;
724 avail -= qpp->s_tx->txreq.sg_count;
725 list_del_init(&qpp->iowait);
726 rvt_get_qp(qp);
727 qps[n++] = qp;
728 }
729
730 spin_unlock(&dev->rdi.pending_lock);
731
732 for (i = 0; i < n; i++) {
733 qp = qps[i];
734 spin_lock(&qp->s_lock);
735 if (qp->s_flags & RVT_S_WAIT_DMA_DESC) {
736 qp->s_flags &= ~RVT_S_WAIT_DMA_DESC;
737 qib_schedule_send(qp);
738 }
739 spin_unlock(&qp->s_lock);
740 rvt_put_qp(qp);
741 }
742 }
743
744 /*
745 * This is called with ppd->sdma_lock held.
746 */
747 static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
748 {
749 struct qib_verbs_txreq *tx =
750 container_of(cookie, struct qib_verbs_txreq, txreq);
751 struct rvt_qp *qp = tx->qp;
752 struct qib_qp_priv *priv = qp->priv;
753
754 spin_lock(&qp->s_lock);
755 if (tx->wqe)
756 qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
757 else if (qp->ibqp.qp_type == IB_QPT_RC) {
758 struct ib_header *hdr;
759
760 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
761 hdr = &tx->align_buf->hdr;
762 else {
763 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
764
765 hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
766 }
767 qib_rc_send_complete(qp, hdr);
768 }
769 if (atomic_dec_and_test(&priv->s_dma_busy)) {
770 if (qp->state == IB_QPS_RESET)
771 wake_up(&priv->wait_dma);
772 else if (qp->s_flags & RVT_S_WAIT_DMA) {
773 qp->s_flags &= ~RVT_S_WAIT_DMA;
774 qib_schedule_send(qp);
775 }
776 }
777 spin_unlock(&qp->s_lock);
778
779 qib_put_txreq(tx);
780 }
781
782 static int wait_kmem(struct qib_ibdev *dev, struct rvt_qp *qp)
783 {
784 struct qib_qp_priv *priv = qp->priv;
785 unsigned long flags;
786 int ret = 0;
787
788 spin_lock_irqsave(&qp->s_lock, flags);
789 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
790 spin_lock(&dev->rdi.pending_lock);
791 if (list_empty(&priv->iowait)) {
792 if (list_empty(&dev->memwait))
793 mod_timer(&dev->mem_timer, jiffies + 1);
794 qp->s_flags |= RVT_S_WAIT_KMEM;
795 list_add_tail(&priv->iowait, &dev->memwait);
796 }
797 spin_unlock(&dev->rdi.pending_lock);
798 qp->s_flags &= ~RVT_S_BUSY;
799 ret = -EBUSY;
800 }
801 spin_unlock_irqrestore(&qp->s_lock, flags);
802
803 return ret;
804 }
805
806 static int qib_verbs_send_dma(struct rvt_qp *qp, struct ib_header *hdr,
807 u32 hdrwords, struct rvt_sge_state *ss, u32 len,
808 u32 plen, u32 dwords)
809 {
810 struct qib_qp_priv *priv = qp->priv;
811 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
812 struct qib_devdata *dd = dd_from_dev(dev);
813 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
814 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
815 struct qib_verbs_txreq *tx;
816 struct qib_pio_header *phdr;
817 u32 control;
818 u32 ndesc;
819 int ret;
820
821 tx = priv->s_tx;
822 if (tx) {
823 priv->s_tx = NULL;
824 /* resend previously constructed packet */
825 ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
826 goto bail;
827 }
828
829 tx = get_txreq(dev, qp);
830 if (IS_ERR(tx))
831 goto bail_tx;
832
833 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
834 be16_to_cpu(hdr->lrh[0]) >> 12);
835 tx->qp = qp;
836 tx->wqe = qp->s_wqe;
837 tx->mr = qp->s_rdma_mr;
838 if (qp->s_rdma_mr)
839 qp->s_rdma_mr = NULL;
840 tx->txreq.callback = sdma_complete;
841 if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
842 tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
843 else
844 tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
845 if (plen + 1 > dd->piosize2kmax_dwords)
846 tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
847
848 if (len) {
849 /*
850 * Don't try to DMA if it takes more descriptors than
851 * the queue holds.
852 */
853 ndesc = qib_count_sge(ss, len);
854 if (ndesc >= ppd->sdma_descq_cnt)
855 ndesc = 0;
856 } else
857 ndesc = 1;
858 if (ndesc) {
859 phdr = &dev->pio_hdrs[tx->hdr_inx];
860 phdr->pbc[0] = cpu_to_le32(plen);
861 phdr->pbc[1] = cpu_to_le32(control);
862 memcpy(&phdr->hdr, hdr, hdrwords << 2);
863 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
864 tx->txreq.sg_count = ndesc;
865 tx->txreq.addr = dev->pio_hdrs_phys +
866 tx->hdr_inx * sizeof(struct qib_pio_header);
867 tx->hdr_dwords = hdrwords + 2; /* add PBC length */
868 ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
869 goto bail;
870 }
871
872 /* Allocate a buffer and copy the header and payload to it. */
873 tx->hdr_dwords = plen + 1;
874 phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
875 if (!phdr)
876 goto err_tx;
877 phdr->pbc[0] = cpu_to_le32(plen);
878 phdr->pbc[1] = cpu_to_le32(control);
879 memcpy(&phdr->hdr, hdr, hdrwords << 2);
880 qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
881
882 tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
883 tx->hdr_dwords << 2, DMA_TO_DEVICE);
884 if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
885 goto map_err;
886 tx->align_buf = phdr;
887 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
888 tx->txreq.sg_count = 1;
889 ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
890 goto unaligned;
891
892 map_err:
893 kfree(phdr);
894 err_tx:
895 qib_put_txreq(tx);
896 ret = wait_kmem(dev, qp);
897 unaligned:
898 ibp->rvp.n_unaligned++;
899 bail:
900 return ret;
901 bail_tx:
902 ret = PTR_ERR(tx);
903 goto bail;
904 }
905
906 /*
907 * If we are now in the error state, return zero to flush the
908 * send work request.
909 */
910 static int no_bufs_available(struct rvt_qp *qp)
911 {
912 struct qib_qp_priv *priv = qp->priv;
913 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
914 struct qib_devdata *dd;
915 unsigned long flags;
916 int ret = 0;
917
918 /*
919 * Note that as soon as want_buffer() is called and
920 * possibly before it returns, qib_ib_piobufavail()
921 * could be called. Therefore, put QP on the I/O wait list before
922 * enabling the PIO avail interrupt.
923 */
924 spin_lock_irqsave(&qp->s_lock, flags);
925 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
926 spin_lock(&dev->rdi.pending_lock);
927 if (list_empty(&priv->iowait)) {
928 dev->n_piowait++;
929 qp->s_flags |= RVT_S_WAIT_PIO;
930 list_add_tail(&priv->iowait, &dev->piowait);
931 dd = dd_from_dev(dev);
932 dd->f_wantpiobuf_intr(dd, 1);
933 }
934 spin_unlock(&dev->rdi.pending_lock);
935 qp->s_flags &= ~RVT_S_BUSY;
936 ret = -EBUSY;
937 }
938 spin_unlock_irqrestore(&qp->s_lock, flags);
939 return ret;
940 }
941
942 static int qib_verbs_send_pio(struct rvt_qp *qp, struct ib_header *ibhdr,
943 u32 hdrwords, struct rvt_sge_state *ss, u32 len,
944 u32 plen, u32 dwords)
945 {
946 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
947 struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
948 u32 *hdr = (u32 *) ibhdr;
949 u32 __iomem *piobuf_orig;
950 u32 __iomem *piobuf;
951 u64 pbc;
952 unsigned long flags;
953 unsigned flush_wc;
954 u32 control;
955 u32 pbufn;
956
957 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
958 be16_to_cpu(ibhdr->lrh[0]) >> 12);
959 pbc = ((u64) control << 32) | plen;
960 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
961 if (unlikely(piobuf == NULL))
962 return no_bufs_available(qp);
963
964 /*
965 * Write the pbc.
966 * We have to flush after the PBC for correctness on some cpus
967 * or WC buffer can be written out of order.
968 */
969 writeq(pbc, piobuf);
970 piobuf_orig = piobuf;
971 piobuf += 2;
972
973 flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
974 if (len == 0) {
975 /*
976 * If there is just the header portion, must flush before
977 * writing last word of header for correctness, and after
978 * the last header word (trigger word).
979 */
980 if (flush_wc) {
981 qib_flush_wc();
982 qib_pio_copy(piobuf, hdr, hdrwords - 1);
983 qib_flush_wc();
984 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
985 qib_flush_wc();
986 } else
987 qib_pio_copy(piobuf, hdr, hdrwords);
988 goto done;
989 }
990
991 if (flush_wc)
992 qib_flush_wc();
993 qib_pio_copy(piobuf, hdr, hdrwords);
994 piobuf += hdrwords;
995
996 /* The common case is aligned and contained in one segment. */
997 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
998 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
999 u32 *addr = (u32 *) ss->sge.vaddr;
1000
1001 /* Update address before sending packet. */
1002 rvt_update_sge(ss, len, false);
1003 if (flush_wc) {
1004 qib_pio_copy(piobuf, addr, dwords - 1);
1005 /* must flush early everything before trigger word */
1006 qib_flush_wc();
1007 __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
1008 /* be sure trigger word is written */
1009 qib_flush_wc();
1010 } else
1011 qib_pio_copy(piobuf, addr, dwords);
1012 goto done;
1013 }
1014 copy_io(piobuf, ss, len, flush_wc);
1015 done:
1016 if (dd->flags & QIB_USE_SPCL_TRIG) {
1017 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
1018
1019 qib_flush_wc();
1020 __raw_writel(0xaebecede, piobuf_orig + spcl_off);
1021 }
1022 qib_sendbuf_done(dd, pbufn);
1023 if (qp->s_rdma_mr) {
1024 rvt_put_mr(qp->s_rdma_mr);
1025 qp->s_rdma_mr = NULL;
1026 }
1027 if (qp->s_wqe) {
1028 spin_lock_irqsave(&qp->s_lock, flags);
1029 qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
1030 spin_unlock_irqrestore(&qp->s_lock, flags);
1031 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
1032 spin_lock_irqsave(&qp->s_lock, flags);
1033 qib_rc_send_complete(qp, ibhdr);
1034 spin_unlock_irqrestore(&qp->s_lock, flags);
1035 }
1036 return 0;
1037 }
1038
1039 /**
1040 * qib_verbs_send - send a packet
1041 * @qp: the QP to send on
1042 * @hdr: the packet header
1043 * @hdrwords: the number of 32-bit words in the header
1044 * @ss: the SGE to send
1045 * @len: the length of the packet in bytes
1046 *
1047 * Return zero if packet is sent or queued OK.
1048 * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
1049 */
1050 int qib_verbs_send(struct rvt_qp *qp, struct ib_header *hdr,
1051 u32 hdrwords, struct rvt_sge_state *ss, u32 len)
1052 {
1053 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1054 u32 plen;
1055 int ret;
1056 u32 dwords = (len + 3) >> 2;
1057
1058 /*
1059 * Calculate the send buffer trigger address.
1060 * The +1 counts for the pbc control dword following the pbc length.
1061 */
1062 plen = hdrwords + dwords + 1;
1063
1064 /*
1065 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
1066 * can defer SDMA restart until link goes ACTIVE without
1067 * worrying about just how we got there.
1068 */
1069 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1070 !(dd->flags & QIB_HAS_SEND_DMA))
1071 ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1072 plen, dwords);
1073 else
1074 ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
1075 plen, dwords);
1076
1077 return ret;
1078 }
1079
1080 int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
1081 u64 *rwords, u64 *spkts, u64 *rpkts,
1082 u64 *xmit_wait)
1083 {
1084 int ret;
1085 struct qib_devdata *dd = ppd->dd;
1086
1087 if (!(dd->flags & QIB_PRESENT)) {
1088 /* no hardware, freeze, etc. */
1089 ret = -EINVAL;
1090 goto bail;
1091 }
1092 *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
1093 *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
1094 *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
1095 *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
1096 *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
1097
1098 ret = 0;
1099
1100 bail:
1101 return ret;
1102 }
1103
1104 /**
1105 * qib_get_counters - get various chip counters
1106 * @dd: the qlogic_ib device
1107 * @cntrs: counters are placed here
1108 *
1109 * Return the counters needed by recv_pma_get_portcounters().
1110 */
1111 int qib_get_counters(struct qib_pportdata *ppd,
1112 struct qib_verbs_counters *cntrs)
1113 {
1114 int ret;
1115
1116 if (!(ppd->dd->flags & QIB_PRESENT)) {
1117 /* no hardware, freeze, etc. */
1118 ret = -EINVAL;
1119 goto bail;
1120 }
1121 cntrs->symbol_error_counter =
1122 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
1123 cntrs->link_error_recovery_counter =
1124 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
1125 /*
1126 * The link downed counter counts when the other side downs the
1127 * connection. We add in the number of times we downed the link
1128 * due to local link integrity errors to compensate.
1129 */
1130 cntrs->link_downed_counter =
1131 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
1132 cntrs->port_rcv_errors =
1133 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
1134 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
1135 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
1136 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
1137 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
1138 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
1139 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
1140 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
1141 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
1142 cntrs->port_rcv_errors +=
1143 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
1144 cntrs->port_rcv_errors +=
1145 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
1146 cntrs->port_rcv_remphys_errors =
1147 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
1148 cntrs->port_xmit_discards =
1149 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
1150 cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
1151 QIBPORTCNTR_WORDSEND);
1152 cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
1153 QIBPORTCNTR_WORDRCV);
1154 cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
1155 QIBPORTCNTR_PKTSEND);
1156 cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
1157 QIBPORTCNTR_PKTRCV);
1158 cntrs->local_link_integrity_errors =
1159 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
1160 cntrs->excessive_buffer_overrun_errors =
1161 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
1162 cntrs->vl15_dropped =
1163 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
1164
1165 ret = 0;
1166
1167 bail:
1168 return ret;
1169 }
1170
1171 /**
1172 * qib_ib_piobufavail - callback when a PIO buffer is available
1173 * @dd: the device pointer
1174 *
1175 * This is called from qib_intr() at interrupt level when a PIO buffer is
1176 * available after qib_verbs_send() returned an error that no buffers were
1177 * available. Disable the interrupt if there are no more QPs waiting.
1178 */
1179 void qib_ib_piobufavail(struct qib_devdata *dd)
1180 {
1181 struct qib_ibdev *dev = &dd->verbs_dev;
1182 struct list_head *list;
1183 struct rvt_qp *qps[5];
1184 struct rvt_qp *qp;
1185 unsigned long flags;
1186 unsigned i, n;
1187 struct qib_qp_priv *priv;
1188
1189 list = &dev->piowait;
1190 n = 0;
1191
1192 /*
1193 * Note: checking that the piowait list is empty and clearing
1194 * the buffer available interrupt needs to be atomic or we
1195 * could end up with QPs on the wait list with the interrupt
1196 * disabled.
1197 */
1198 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
1199 while (!list_empty(list)) {
1200 if (n == ARRAY_SIZE(qps))
1201 goto full;
1202 priv = list_entry(list->next, struct qib_qp_priv, iowait);
1203 qp = priv->owner;
1204 list_del_init(&priv->iowait);
1205 rvt_get_qp(qp);
1206 qps[n++] = qp;
1207 }
1208 dd->f_wantpiobuf_intr(dd, 0);
1209 full:
1210 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
1211
1212 for (i = 0; i < n; i++) {
1213 qp = qps[i];
1214
1215 spin_lock_irqsave(&qp->s_lock, flags);
1216 if (qp->s_flags & RVT_S_WAIT_PIO) {
1217 qp->s_flags &= ~RVT_S_WAIT_PIO;
1218 qib_schedule_send(qp);
1219 }
1220 spin_unlock_irqrestore(&qp->s_lock, flags);
1221
1222 /* Notify qib_destroy_qp() if it is waiting. */
1223 rvt_put_qp(qp);
1224 }
1225 }
1226
1227 static int qib_query_port(struct rvt_dev_info *rdi, u8 port_num,
1228 struct ib_port_attr *props)
1229 {
1230 struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
1231 struct qib_devdata *dd = dd_from_dev(ibdev);
1232 struct qib_pportdata *ppd = &dd->pport[port_num - 1];
1233 enum ib_mtu mtu;
1234 u16 lid = ppd->lid;
1235
1236 /* props being zeroed by the caller, avoid zeroing it here */
1237 props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
1238 props->lmc = ppd->lmc;
1239 props->state = dd->f_iblink_state(ppd->lastibcstat);
1240 props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
1241 props->gid_tbl_len = QIB_GUIDS_PER_PORT;
1242 props->active_width = ppd->link_width_active;
1243 /* See rate_show() */
1244 props->active_speed = ppd->link_speed_active;
1245 props->max_vl_num = qib_num_vls(ppd->vls_supported);
1246
1247 props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
1248 switch (ppd->ibmtu) {
1249 case 4096:
1250 mtu = IB_MTU_4096;
1251 break;
1252 case 2048:
1253 mtu = IB_MTU_2048;
1254 break;
1255 case 1024:
1256 mtu = IB_MTU_1024;
1257 break;
1258 case 512:
1259 mtu = IB_MTU_512;
1260 break;
1261 case 256:
1262 mtu = IB_MTU_256;
1263 break;
1264 default:
1265 mtu = IB_MTU_2048;
1266 }
1267 props->active_mtu = mtu;
1268
1269 return 0;
1270 }
1271
1272 static int qib_modify_device(struct ib_device *device,
1273 int device_modify_mask,
1274 struct ib_device_modify *device_modify)
1275 {
1276 struct qib_devdata *dd = dd_from_ibdev(device);
1277 unsigned i;
1278 int ret;
1279
1280 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1281 IB_DEVICE_MODIFY_NODE_DESC)) {
1282 ret = -EOPNOTSUPP;
1283 goto bail;
1284 }
1285
1286 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
1287 memcpy(device->node_desc, device_modify->node_desc,
1288 IB_DEVICE_NODE_DESC_MAX);
1289 for (i = 0; i < dd->num_pports; i++) {
1290 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1291
1292 qib_node_desc_chg(ibp);
1293 }
1294 }
1295
1296 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1297 ib_qib_sys_image_guid =
1298 cpu_to_be64(device_modify->sys_image_guid);
1299 for (i = 0; i < dd->num_pports; i++) {
1300 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1301
1302 qib_sys_guid_chg(ibp);
1303 }
1304 }
1305
1306 ret = 0;
1307
1308 bail:
1309 return ret;
1310 }
1311
1312 static int qib_shut_down_port(struct rvt_dev_info *rdi, u8 port_num)
1313 {
1314 struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
1315 struct qib_devdata *dd = dd_from_dev(ibdev);
1316 struct qib_pportdata *ppd = &dd->pport[port_num - 1];
1317
1318 qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
1319
1320 return 0;
1321 }
1322
1323 static int qib_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
1324 int guid_index, __be64 *guid)
1325 {
1326 struct qib_ibport *ibp = container_of(rvp, struct qib_ibport, rvp);
1327 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1328
1329 if (guid_index == 0)
1330 *guid = ppd->guid;
1331 else if (guid_index < QIB_GUIDS_PER_PORT)
1332 *guid = ibp->guids[guid_index - 1];
1333 else
1334 return -EINVAL;
1335
1336 return 0;
1337 }
1338
1339 int qib_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
1340 {
1341 if (rdma_ah_get_sl(ah_attr) > 15)
1342 return -EINVAL;
1343
1344 return 0;
1345 }
1346
1347 static void qib_notify_new_ah(struct ib_device *ibdev,
1348 struct rdma_ah_attr *ah_attr,
1349 struct rvt_ah *ah)
1350 {
1351 struct qib_ibport *ibp;
1352 struct qib_pportdata *ppd;
1353
1354 /*
1355 * Do not trust reading anything from rvt_ah at this point as it is not
1356 * done being setup. We can however modify things which we need to set.
1357 */
1358
1359 ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
1360 ppd = ppd_from_ibp(ibp);
1361 ah->vl = ibp->sl_to_vl[rdma_ah_get_sl(&ah->attr)];
1362 ah->log_pmtu = ilog2(ppd->ibmtu);
1363 }
1364
1365 struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
1366 {
1367 struct rdma_ah_attr attr;
1368 struct ib_ah *ah = ERR_PTR(-EINVAL);
1369 struct rvt_qp *qp0;
1370 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1371 u8 port_num = ppd->port;
1372
1373 memset(&attr, 0, sizeof(attr));
1374 rdma_ah_set_dlid(&attr, dlid);
1375 rdma_ah_set_port_num(&attr, port_num);
1376 rcu_read_lock();
1377 qp0 = rcu_dereference(ibp->rvp.qp[0]);
1378 if (qp0)
1379 ah = rdma_create_ah(qp0->ibqp.pd, &attr);
1380 rcu_read_unlock();
1381 return ah;
1382 }
1383
1384 /**
1385 * qib_get_npkeys - return the size of the PKEY table for context 0
1386 * @dd: the qlogic_ib device
1387 */
1388 unsigned qib_get_npkeys(struct qib_devdata *dd)
1389 {
1390 return ARRAY_SIZE(dd->rcd[0]->pkeys);
1391 }
1392
1393 /*
1394 * Return the indexed PKEY from the port PKEY table.
1395 * No need to validate rcd[ctxt]; the port is setup if we are here.
1396 */
1397 unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
1398 {
1399 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1400 struct qib_devdata *dd = ppd->dd;
1401 unsigned ctxt = ppd->hw_pidx;
1402 unsigned ret;
1403
1404 /* dd->rcd null if mini_init or some init failures */
1405 if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
1406 ret = 0;
1407 else
1408 ret = dd->rcd[ctxt]->pkeys[index];
1409
1410 return ret;
1411 }
1412
1413 static void init_ibport(struct qib_pportdata *ppd)
1414 {
1415 struct qib_verbs_counters cntrs;
1416 struct qib_ibport *ibp = &ppd->ibport_data;
1417
1418 spin_lock_init(&ibp->rvp.lock);
1419 /* Set the prefix to the default value (see ch. 4.1.1) */
1420 ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
1421 ibp->rvp.sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
1422 ibp->rvp.port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
1423 IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
1424 IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
1425 IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
1426 IB_PORT_OTHER_LOCAL_CHANGES_SUP;
1427 if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
1428 ibp->rvp.port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
1429 ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1430 ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1431 ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1432 ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1433 ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1434
1435 /* Snapshot current HW counters to "clear" them. */
1436 qib_get_counters(ppd, &cntrs);
1437 ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1438 ibp->z_link_error_recovery_counter =
1439 cntrs.link_error_recovery_counter;
1440 ibp->z_link_downed_counter = cntrs.link_downed_counter;
1441 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1442 ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
1443 ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
1444 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1445 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1446 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1447 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1448 ibp->z_local_link_integrity_errors =
1449 cntrs.local_link_integrity_errors;
1450 ibp->z_excessive_buffer_overrun_errors =
1451 cntrs.excessive_buffer_overrun_errors;
1452 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1453 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
1454 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
1455 }
1456
1457 /**
1458 * qib_fill_device_attr - Fill in rvt dev info device attributes.
1459 * @dd: the device data structure
1460 */
1461 static void qib_fill_device_attr(struct qib_devdata *dd)
1462 {
1463 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
1464
1465 memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
1466
1467 rdi->dparms.props.max_pd = ib_qib_max_pds;
1468 rdi->dparms.props.max_ah = ib_qib_max_ahs;
1469 rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1470 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1471 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
1472 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
1473 rdi->dparms.props.page_size_cap = PAGE_SIZE;
1474 rdi->dparms.props.vendor_id =
1475 QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
1476 rdi->dparms.props.vendor_part_id = dd->deviceid;
1477 rdi->dparms.props.hw_ver = dd->minrev;
1478 rdi->dparms.props.sys_image_guid = ib_qib_sys_image_guid;
1479 rdi->dparms.props.max_mr_size = ~0ULL;
1480 rdi->dparms.props.max_qp = ib_qib_max_qps;
1481 rdi->dparms.props.max_qp_wr = ib_qib_max_qp_wrs;
1482 rdi->dparms.props.max_sge = ib_qib_max_sges;
1483 rdi->dparms.props.max_sge_rd = ib_qib_max_sges;
1484 rdi->dparms.props.max_cq = ib_qib_max_cqs;
1485 rdi->dparms.props.max_cqe = ib_qib_max_cqes;
1486 rdi->dparms.props.max_ah = ib_qib_max_ahs;
1487 rdi->dparms.props.max_mr = rdi->lkey_table.max;
1488 rdi->dparms.props.max_fmr = rdi->lkey_table.max;
1489 rdi->dparms.props.max_map_per_fmr = 32767;
1490 rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
1491 rdi->dparms.props.max_qp_init_rd_atom = 255;
1492 rdi->dparms.props.max_srq = ib_qib_max_srqs;
1493 rdi->dparms.props.max_srq_wr = ib_qib_max_srq_wrs;
1494 rdi->dparms.props.max_srq_sge = ib_qib_max_srq_sges;
1495 rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB;
1496 rdi->dparms.props.max_pkeys = qib_get_npkeys(dd);
1497 rdi->dparms.props.max_mcast_grp = ib_qib_max_mcast_grps;
1498 rdi->dparms.props.max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
1499 rdi->dparms.props.max_total_mcast_qp_attach =
1500 rdi->dparms.props.max_mcast_qp_attach *
1501 rdi->dparms.props.max_mcast_grp;
1502 /* post send table */
1503 dd->verbs_dev.rdi.post_parms = qib_post_parms;
1504 }
1505
1506 /**
1507 * qib_register_ib_device - register our device with the infiniband core
1508 * @dd: the device data structure
1509 * Return the allocated qib_ibdev pointer or NULL on error.
1510 */
1511 int qib_register_ib_device(struct qib_devdata *dd)
1512 {
1513 struct qib_ibdev *dev = &dd->verbs_dev;
1514 struct ib_device *ibdev = &dev->rdi.ibdev;
1515 struct qib_pportdata *ppd = dd->pport;
1516 unsigned i, ctxt;
1517 int ret;
1518
1519 get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
1520 for (i = 0; i < dd->num_pports; i++)
1521 init_ibport(ppd + i);
1522
1523 /* Only need to initialize non-zero fields. */
1524 setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
1525
1526 INIT_LIST_HEAD(&dev->piowait);
1527 INIT_LIST_HEAD(&dev->dmawait);
1528 INIT_LIST_HEAD(&dev->txwait);
1529 INIT_LIST_HEAD(&dev->memwait);
1530 INIT_LIST_HEAD(&dev->txreq_free);
1531
1532 if (ppd->sdma_descq_cnt) {
1533 dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
1534 ppd->sdma_descq_cnt *
1535 sizeof(struct qib_pio_header),
1536 &dev->pio_hdrs_phys,
1537 GFP_KERNEL);
1538 if (!dev->pio_hdrs) {
1539 ret = -ENOMEM;
1540 goto err_hdrs;
1541 }
1542 }
1543
1544 for (i = 0; i < ppd->sdma_descq_cnt; i++) {
1545 struct qib_verbs_txreq *tx;
1546
1547 tx = kzalloc(sizeof(*tx), GFP_KERNEL);
1548 if (!tx) {
1549 ret = -ENOMEM;
1550 goto err_tx;
1551 }
1552 tx->hdr_inx = i;
1553 list_add(&tx->txreq.list, &dev->txreq_free);
1554 }
1555
1556 /*
1557 * The system image GUID is supposed to be the same for all
1558 * IB HCAs in a single system but since there can be other
1559 * device types in the system, we can't be sure this is unique.
1560 */
1561 if (!ib_qib_sys_image_guid)
1562 ib_qib_sys_image_guid = ppd->guid;
1563
1564 strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
1565 ibdev->owner = THIS_MODULE;
1566 ibdev->node_guid = ppd->guid;
1567 ibdev->phys_port_cnt = dd->num_pports;
1568 ibdev->dev.parent = &dd->pcidev->dev;
1569 ibdev->modify_device = qib_modify_device;
1570 ibdev->process_mad = qib_process_mad;
1571
1572 snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
1573 "Intel Infiniband HCA %s", init_utsname()->nodename);
1574
1575 /*
1576 * Fill in rvt info object.
1577 */
1578 dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files;
1579 dd->verbs_dev.rdi.driver_f.get_card_name = qib_get_card_name;
1580 dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
1581 dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
1582 dd->verbs_dev.rdi.driver_f.check_send_wqe = qib_check_send_wqe;
1583 dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah;
1584 dd->verbs_dev.rdi.driver_f.alloc_qpn = qib_alloc_qpn;
1585 dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qib_qp_priv_alloc;
1586 dd->verbs_dev.rdi.driver_f.qp_priv_free = qib_qp_priv_free;
1587 dd->verbs_dev.rdi.driver_f.free_all_qps = qib_free_all_qps;
1588 dd->verbs_dev.rdi.driver_f.notify_qp_reset = qib_notify_qp_reset;
1589 dd->verbs_dev.rdi.driver_f.do_send = qib_do_send;
1590 dd->verbs_dev.rdi.driver_f.schedule_send = qib_schedule_send;
1591 dd->verbs_dev.rdi.driver_f.quiesce_qp = qib_quiesce_qp;
1592 dd->verbs_dev.rdi.driver_f.stop_send_queue = qib_stop_send_queue;
1593 dd->verbs_dev.rdi.driver_f.flush_qp_waiters = qib_flush_qp_waiters;
1594 dd->verbs_dev.rdi.driver_f.notify_error_qp = qib_notify_error_qp;
1595 dd->verbs_dev.rdi.driver_f.notify_restart_rc = qib_restart_rc;
1596 dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = qib_mtu_to_path_mtu;
1597 dd->verbs_dev.rdi.driver_f.mtu_from_qp = qib_mtu_from_qp;
1598 dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = qib_get_pmtu_from_attr;
1599 dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _qib_schedule_send;
1600 dd->verbs_dev.rdi.driver_f.query_port_state = qib_query_port;
1601 dd->verbs_dev.rdi.driver_f.shut_down_port = qib_shut_down_port;
1602 dd->verbs_dev.rdi.driver_f.cap_mask_chg = qib_cap_mask_chg;
1603 dd->verbs_dev.rdi.driver_f.notify_create_mad_agent =
1604 qib_notify_create_mad_agent;
1605 dd->verbs_dev.rdi.driver_f.notify_free_mad_agent =
1606 qib_notify_free_mad_agent;
1607
1608 dd->verbs_dev.rdi.dparms.max_rdma_atomic = QIB_MAX_RDMA_ATOMIC;
1609 dd->verbs_dev.rdi.driver_f.get_guid_be = qib_get_guid_be;
1610 dd->verbs_dev.rdi.dparms.lkey_table_size = qib_lkey_table_size;
1611 dd->verbs_dev.rdi.dparms.qp_table_size = ib_qib_qp_table_size;
1612 dd->verbs_dev.rdi.dparms.qpn_start = 1;
1613 dd->verbs_dev.rdi.dparms.qpn_res_start = QIB_KD_QP;
1614 dd->verbs_dev.rdi.dparms.qpn_res_end = QIB_KD_QP; /* Reserve one QP */
1615 dd->verbs_dev.rdi.dparms.qpn_inc = 1;
1616 dd->verbs_dev.rdi.dparms.qos_shift = 1;
1617 dd->verbs_dev.rdi.dparms.psn_mask = QIB_PSN_MASK;
1618 dd->verbs_dev.rdi.dparms.psn_shift = QIB_PSN_SHIFT;
1619 dd->verbs_dev.rdi.dparms.psn_modify_mask = QIB_PSN_MASK;
1620 dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
1621 dd->verbs_dev.rdi.dparms.npkeys = qib_get_npkeys(dd);
1622 dd->verbs_dev.rdi.dparms.node = dd->assigned_node_id;
1623 dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_IBA_IB;
1624 dd->verbs_dev.rdi.dparms.max_mad_size = IB_MGMT_MAD_SIZE;
1625
1626 snprintf(dd->verbs_dev.rdi.dparms.cq_name,
1627 sizeof(dd->verbs_dev.rdi.dparms.cq_name),
1628 "qib_cq%d", dd->unit);
1629
1630 qib_fill_device_attr(dd);
1631
1632 ppd = dd->pport;
1633 for (i = 0; i < dd->num_pports; i++, ppd++) {
1634 ctxt = ppd->hw_pidx;
1635 rvt_init_port(&dd->verbs_dev.rdi,
1636 &ppd->ibport_data.rvp,
1637 i,
1638 dd->rcd[ctxt]->pkeys);
1639 }
1640
1641 ret = rvt_register_device(&dd->verbs_dev.rdi);
1642 if (ret)
1643 goto err_tx;
1644
1645 ret = qib_verbs_register_sysfs(dd);
1646 if (ret)
1647 goto err_class;
1648
1649 return ret;
1650
1651 err_class:
1652 rvt_unregister_device(&dd->verbs_dev.rdi);
1653 err_tx:
1654 while (!list_empty(&dev->txreq_free)) {
1655 struct list_head *l = dev->txreq_free.next;
1656 struct qib_verbs_txreq *tx;
1657
1658 list_del(l);
1659 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
1660 kfree(tx);
1661 }
1662 if (ppd->sdma_descq_cnt)
1663 dma_free_coherent(&dd->pcidev->dev,
1664 ppd->sdma_descq_cnt *
1665 sizeof(struct qib_pio_header),
1666 dev->pio_hdrs, dev->pio_hdrs_phys);
1667 err_hdrs:
1668 qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
1669 return ret;
1670 }
1671
1672 void qib_unregister_ib_device(struct qib_devdata *dd)
1673 {
1674 struct qib_ibdev *dev = &dd->verbs_dev;
1675
1676 qib_verbs_unregister_sysfs(dd);
1677
1678 rvt_unregister_device(&dd->verbs_dev.rdi);
1679
1680 if (!list_empty(&dev->piowait))
1681 qib_dev_err(dd, "piowait list not empty!\n");
1682 if (!list_empty(&dev->dmawait))
1683 qib_dev_err(dd, "dmawait list not empty!\n");
1684 if (!list_empty(&dev->txwait))
1685 qib_dev_err(dd, "txwait list not empty!\n");
1686 if (!list_empty(&dev->memwait))
1687 qib_dev_err(dd, "memwait list not empty!\n");
1688
1689 del_timer_sync(&dev->mem_timer);
1690 while (!list_empty(&dev->txreq_free)) {
1691 struct list_head *l = dev->txreq_free.next;
1692 struct qib_verbs_txreq *tx;
1693
1694 list_del(l);
1695 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
1696 kfree(tx);
1697 }
1698 if (dd->pport->sdma_descq_cnt)
1699 dma_free_coherent(&dd->pcidev->dev,
1700 dd->pport->sdma_descq_cnt *
1701 sizeof(struct qib_pio_header),
1702 dev->pio_hdrs, dev->pio_hdrs_phys);
1703 }
1704
1705 /**
1706 * _qib_schedule_send - schedule progress
1707 * @qp - the qp
1708 *
1709 * This schedules progress w/o regard to the s_flags.
1710 *
1711 * It is only used in post send, which doesn't hold
1712 * the s_lock.
1713 */
1714 void _qib_schedule_send(struct rvt_qp *qp)
1715 {
1716 struct qib_ibport *ibp =
1717 to_iport(qp->ibqp.device, qp->port_num);
1718 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1719 struct qib_qp_priv *priv = qp->priv;
1720
1721 queue_work(ppd->qib_wq, &priv->s_work);
1722 }
1723
1724 /**
1725 * qib_schedule_send - schedule progress
1726 * @qp - the qp
1727 *
1728 * This schedules qp progress. The s_lock
1729 * should be held.
1730 */
1731 void qib_schedule_send(struct rvt_qp *qp)
1732 {
1733 if (qib_send_ok(qp))
1734 _qib_schedule_send(qp);
1735 }