3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 #include <rdma/ib_mad.h>
52 #include <rdma/ib_user_verbs.h>
54 #include <linux/module.h>
55 #include <linux/utsname.h>
56 #include <linux/rculist.h>
58 #include <linux/random.h>
59 #include <linux/vmalloc.h>
68 unsigned int hfi1_lkey_table_size
= 16;
69 module_param_named(lkey_table_size
, hfi1_lkey_table_size
, uint
,
71 MODULE_PARM_DESC(lkey_table_size
,
72 "LKEY table size in bits (2^n, 1 <= n <= 23)");
74 static unsigned int hfi1_max_pds
= 0xFFFF;
75 module_param_named(max_pds
, hfi1_max_pds
, uint
, S_IRUGO
);
76 MODULE_PARM_DESC(max_pds
,
77 "Maximum number of protection domains to support");
79 static unsigned int hfi1_max_ahs
= 0xFFFF;
80 module_param_named(max_ahs
, hfi1_max_ahs
, uint
, S_IRUGO
);
81 MODULE_PARM_DESC(max_ahs
, "Maximum number of address handles to support");
83 unsigned int hfi1_max_cqes
= 0x2FFFF;
84 module_param_named(max_cqes
, hfi1_max_cqes
, uint
, S_IRUGO
);
85 MODULE_PARM_DESC(max_cqes
,
86 "Maximum number of completion queue entries to support");
88 unsigned int hfi1_max_cqs
= 0x1FFFF;
89 module_param_named(max_cqs
, hfi1_max_cqs
, uint
, S_IRUGO
);
90 MODULE_PARM_DESC(max_cqs
, "Maximum number of completion queues to support");
92 unsigned int hfi1_max_qp_wrs
= 0x3FFF;
93 module_param_named(max_qp_wrs
, hfi1_max_qp_wrs
, uint
, S_IRUGO
);
94 MODULE_PARM_DESC(max_qp_wrs
, "Maximum number of QP WRs to support");
96 unsigned int hfi1_max_qps
= 16384;
97 module_param_named(max_qps
, hfi1_max_qps
, uint
, S_IRUGO
);
98 MODULE_PARM_DESC(max_qps
, "Maximum number of QPs to support");
100 unsigned int hfi1_max_sges
= 0x60;
101 module_param_named(max_sges
, hfi1_max_sges
, uint
, S_IRUGO
);
102 MODULE_PARM_DESC(max_sges
, "Maximum number of SGEs to support");
104 unsigned int hfi1_max_mcast_grps
= 16384;
105 module_param_named(max_mcast_grps
, hfi1_max_mcast_grps
, uint
, S_IRUGO
);
106 MODULE_PARM_DESC(max_mcast_grps
,
107 "Maximum number of multicast groups to support");
109 unsigned int hfi1_max_mcast_qp_attached
= 16;
110 module_param_named(max_mcast_qp_attached
, hfi1_max_mcast_qp_attached
,
112 MODULE_PARM_DESC(max_mcast_qp_attached
,
113 "Maximum number of attached QPs to support");
115 unsigned int hfi1_max_srqs
= 1024;
116 module_param_named(max_srqs
, hfi1_max_srqs
, uint
, S_IRUGO
);
117 MODULE_PARM_DESC(max_srqs
, "Maximum number of SRQs to support");
119 unsigned int hfi1_max_srq_sges
= 128;
120 module_param_named(max_srq_sges
, hfi1_max_srq_sges
, uint
, S_IRUGO
);
121 MODULE_PARM_DESC(max_srq_sges
, "Maximum number of SRQ SGEs to support");
123 unsigned int hfi1_max_srq_wrs
= 0x1FFFF;
124 module_param_named(max_srq_wrs
, hfi1_max_srq_wrs
, uint
, S_IRUGO
);
125 MODULE_PARM_DESC(max_srq_wrs
, "Maximum number of SRQ WRs support");
127 static void verbs_sdma_complete(
128 struct sdma_txreq
*cookie
,
133 * Note that it is OK to post send work requests in the SQE and ERR
134 * states; hfi1_do_send() will process them and generate error
135 * completions as per IB 1.2 C10-96.
137 const int ib_hfi1_state_ops
[IB_QPS_ERR
+ 1] = {
139 [IB_QPS_INIT
] = HFI1_POST_RECV_OK
,
140 [IB_QPS_RTR
] = HFI1_POST_RECV_OK
| HFI1_PROCESS_RECV_OK
,
141 [IB_QPS_RTS
] = HFI1_POST_RECV_OK
| HFI1_PROCESS_RECV_OK
|
142 HFI1_POST_SEND_OK
| HFI1_PROCESS_SEND_OK
|
143 HFI1_PROCESS_NEXT_SEND_OK
,
144 [IB_QPS_SQD
] = HFI1_POST_RECV_OK
| HFI1_PROCESS_RECV_OK
|
145 HFI1_POST_SEND_OK
| HFI1_PROCESS_SEND_OK
,
146 [IB_QPS_SQE
] = HFI1_POST_RECV_OK
| HFI1_PROCESS_RECV_OK
|
147 HFI1_POST_SEND_OK
| HFI1_FLUSH_SEND
,
148 [IB_QPS_ERR
] = HFI1_POST_RECV_OK
| HFI1_FLUSH_RECV
|
149 HFI1_POST_SEND_OK
| HFI1_FLUSH_SEND
,
152 struct hfi1_ucontext
{
153 struct ib_ucontext ibucontext
;
156 static inline struct hfi1_ucontext
*to_iucontext(struct ib_ucontext
159 return container_of(ibucontext
, struct hfi1_ucontext
, ibucontext
);
163 * Translate ib_wr_opcode into ib_wc_opcode.
165 const enum ib_wc_opcode ib_hfi1_wc_opcode
[] = {
166 [IB_WR_RDMA_WRITE
] = IB_WC_RDMA_WRITE
,
167 [IB_WR_RDMA_WRITE_WITH_IMM
] = IB_WC_RDMA_WRITE
,
168 [IB_WR_SEND
] = IB_WC_SEND
,
169 [IB_WR_SEND_WITH_IMM
] = IB_WC_SEND
,
170 [IB_WR_RDMA_READ
] = IB_WC_RDMA_READ
,
171 [IB_WR_ATOMIC_CMP_AND_SWP
] = IB_WC_COMP_SWAP
,
172 [IB_WR_ATOMIC_FETCH_AND_ADD
] = IB_WC_FETCH_ADD
176 * Length of header by opcode, 0 --> not supported
178 const u8 hdr_len_by_opcode
[256] = {
180 [IB_OPCODE_RC_SEND_FIRST
] = 12 + 8,
181 [IB_OPCODE_RC_SEND_MIDDLE
] = 12 + 8,
182 [IB_OPCODE_RC_SEND_LAST
] = 12 + 8,
183 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE
] = 12 + 8 + 4,
184 [IB_OPCODE_RC_SEND_ONLY
] = 12 + 8,
185 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE
] = 12 + 8 + 4,
186 [IB_OPCODE_RC_RDMA_WRITE_FIRST
] = 12 + 8 + 16,
187 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE
] = 12 + 8,
188 [IB_OPCODE_RC_RDMA_WRITE_LAST
] = 12 + 8,
189 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE
] = 12 + 8 + 4,
190 [IB_OPCODE_RC_RDMA_WRITE_ONLY
] = 12 + 8 + 16,
191 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE
] = 12 + 8 + 20,
192 [IB_OPCODE_RC_RDMA_READ_REQUEST
] = 12 + 8 + 16,
193 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST
] = 12 + 8 + 4,
194 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE
] = 12 + 8,
195 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST
] = 12 + 8 + 4,
196 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY
] = 12 + 8 + 4,
197 [IB_OPCODE_RC_ACKNOWLEDGE
] = 12 + 8 + 4,
198 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE
] = 12 + 8 + 4,
199 [IB_OPCODE_RC_COMPARE_SWAP
] = 12 + 8 + 28,
200 [IB_OPCODE_RC_FETCH_ADD
] = 12 + 8 + 28,
202 [IB_OPCODE_UC_SEND_FIRST
] = 12 + 8,
203 [IB_OPCODE_UC_SEND_MIDDLE
] = 12 + 8,
204 [IB_OPCODE_UC_SEND_LAST
] = 12 + 8,
205 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE
] = 12 + 8 + 4,
206 [IB_OPCODE_UC_SEND_ONLY
] = 12 + 8,
207 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE
] = 12 + 8 + 4,
208 [IB_OPCODE_UC_RDMA_WRITE_FIRST
] = 12 + 8 + 16,
209 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE
] = 12 + 8,
210 [IB_OPCODE_UC_RDMA_WRITE_LAST
] = 12 + 8,
211 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE
] = 12 + 8 + 4,
212 [IB_OPCODE_UC_RDMA_WRITE_ONLY
] = 12 + 8 + 16,
213 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE
] = 12 + 8 + 20,
215 [IB_OPCODE_UD_SEND_ONLY
] = 12 + 8 + 8,
216 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
] = 12 + 8 + 12
219 static const opcode_handler opcode_handler_tbl
[256] = {
221 [IB_OPCODE_RC_SEND_FIRST
] = &hfi1_rc_rcv
,
222 [IB_OPCODE_RC_SEND_MIDDLE
] = &hfi1_rc_rcv
,
223 [IB_OPCODE_RC_SEND_LAST
] = &hfi1_rc_rcv
,
224 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE
] = &hfi1_rc_rcv
,
225 [IB_OPCODE_RC_SEND_ONLY
] = &hfi1_rc_rcv
,
226 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE
] = &hfi1_rc_rcv
,
227 [IB_OPCODE_RC_RDMA_WRITE_FIRST
] = &hfi1_rc_rcv
,
228 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE
] = &hfi1_rc_rcv
,
229 [IB_OPCODE_RC_RDMA_WRITE_LAST
] = &hfi1_rc_rcv
,
230 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE
] = &hfi1_rc_rcv
,
231 [IB_OPCODE_RC_RDMA_WRITE_ONLY
] = &hfi1_rc_rcv
,
232 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE
] = &hfi1_rc_rcv
,
233 [IB_OPCODE_RC_RDMA_READ_REQUEST
] = &hfi1_rc_rcv
,
234 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST
] = &hfi1_rc_rcv
,
235 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE
] = &hfi1_rc_rcv
,
236 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST
] = &hfi1_rc_rcv
,
237 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY
] = &hfi1_rc_rcv
,
238 [IB_OPCODE_RC_ACKNOWLEDGE
] = &hfi1_rc_rcv
,
239 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE
] = &hfi1_rc_rcv
,
240 [IB_OPCODE_RC_COMPARE_SWAP
] = &hfi1_rc_rcv
,
241 [IB_OPCODE_RC_FETCH_ADD
] = &hfi1_rc_rcv
,
243 [IB_OPCODE_UC_SEND_FIRST
] = &hfi1_uc_rcv
,
244 [IB_OPCODE_UC_SEND_MIDDLE
] = &hfi1_uc_rcv
,
245 [IB_OPCODE_UC_SEND_LAST
] = &hfi1_uc_rcv
,
246 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE
] = &hfi1_uc_rcv
,
247 [IB_OPCODE_UC_SEND_ONLY
] = &hfi1_uc_rcv
,
248 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE
] = &hfi1_uc_rcv
,
249 [IB_OPCODE_UC_RDMA_WRITE_FIRST
] = &hfi1_uc_rcv
,
250 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE
] = &hfi1_uc_rcv
,
251 [IB_OPCODE_UC_RDMA_WRITE_LAST
] = &hfi1_uc_rcv
,
252 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE
] = &hfi1_uc_rcv
,
253 [IB_OPCODE_UC_RDMA_WRITE_ONLY
] = &hfi1_uc_rcv
,
254 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE
] = &hfi1_uc_rcv
,
256 [IB_OPCODE_UD_SEND_ONLY
] = &hfi1_ud_rcv
,
257 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
] = &hfi1_ud_rcv
,
259 [IB_OPCODE_CNP
] = &hfi1_cnp_rcv
265 __be64 ib_hfi1_sys_image_guid
;
268 * hfi1_copy_sge - copy data to SGE memory
270 * @data: the data to copy
271 * @length: the length of the data
274 struct hfi1_sge_state
*ss
,
275 void *data
, u32 length
,
278 struct hfi1_sge
*sge
= &ss
->sge
;
281 u32 len
= sge
->length
;
285 if (len
> sge
->sge_length
)
286 len
= sge
->sge_length
;
287 WARN_ON_ONCE(len
== 0);
288 memcpy(sge
->vaddr
, data
, len
);
291 sge
->sge_length
-= len
;
292 if (sge
->sge_length
== 0) {
294 hfi1_put_mr(sge
->mr
);
296 *sge
= *ss
->sg_list
++;
297 } else if (sge
->length
== 0 && sge
->mr
->lkey
) {
298 if (++sge
->n
>= HFI1_SEGSZ
) {
299 if (++sge
->m
>= sge
->mr
->mapsz
)
304 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
306 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
314 * hfi1_skip_sge - skip over SGE memory
316 * @length: the number of bytes to skip
318 void hfi1_skip_sge(struct hfi1_sge_state
*ss
, u32 length
, int release
)
320 struct hfi1_sge
*sge
= &ss
->sge
;
323 u32 len
= sge
->length
;
327 if (len
> sge
->sge_length
)
328 len
= sge
->sge_length
;
329 WARN_ON_ONCE(len
== 0);
332 sge
->sge_length
-= len
;
333 if (sge
->sge_length
== 0) {
335 hfi1_put_mr(sge
->mr
);
337 *sge
= *ss
->sg_list
++;
338 } else if (sge
->length
== 0 && sge
->mr
->lkey
) {
339 if (++sge
->n
>= HFI1_SEGSZ
) {
340 if (++sge
->m
>= sge
->mr
->mapsz
)
345 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
347 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
354 * post_one_send - post one RC, UC, or UD send work request
355 * @qp: the QP to post on
356 * @wr: the work request to send
358 static int post_one_send(struct hfi1_qp
*qp
, struct ib_send_wr
*wr
)
360 struct hfi1_swqe
*wqe
;
365 struct hfi1_lkey_table
*rkt
;
367 struct hfi1_devdata
*dd
= dd_from_ibdev(qp
->ibqp
.device
);
368 struct hfi1_pportdata
*ppd
;
369 struct hfi1_ibport
*ibp
;
371 /* IB spec says that num_sge == 0 is OK. */
372 if (unlikely(wr
->num_sge
> qp
->s_max_sge
))
375 ppd
= &dd
->pport
[qp
->port_num
- 1];
376 ibp
= &ppd
->ibport_data
;
379 * Don't allow RDMA reads or atomic operations on UC or
380 * undefined operations.
381 * Make sure buffer is large enough to hold the result for atomics.
383 if (wr
->opcode
== IB_WR_FAST_REG_MR
) {
385 } else if (qp
->ibqp
.qp_type
== IB_QPT_UC
) {
386 if ((unsigned) wr
->opcode
>= IB_WR_RDMA_READ
)
388 } else if (qp
->ibqp
.qp_type
!= IB_QPT_RC
) {
389 /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
390 if (wr
->opcode
!= IB_WR_SEND
&&
391 wr
->opcode
!= IB_WR_SEND_WITH_IMM
)
393 /* Check UD destination address PD */
394 if (qp
->ibqp
.pd
!= wr
->wr
.ud
.ah
->pd
)
396 } else if ((unsigned) wr
->opcode
> IB_WR_ATOMIC_FETCH_AND_ADD
)
398 else if (wr
->opcode
>= IB_WR_ATOMIC_CMP_AND_SWP
&&
400 wr
->sg_list
[0].length
< sizeof(u64
) ||
401 wr
->sg_list
[0].addr
& (sizeof(u64
) - 1)))
403 else if (wr
->opcode
>= IB_WR_RDMA_READ
&& !qp
->s_max_rd_atomic
)
406 next
= qp
->s_head
+ 1;
407 if (next
>= qp
->s_size
)
409 if (next
== qp
->s_last
)
412 rkt
= &to_idev(qp
->ibqp
.device
)->lk_table
;
413 pd
= to_ipd(qp
->ibqp
.pd
);
414 wqe
= get_swqe_ptr(qp
, qp
->s_head
);
419 acc
= wr
->opcode
>= IB_WR_RDMA_READ
?
420 IB_ACCESS_LOCAL_WRITE
: 0;
421 for (i
= 0; i
< wr
->num_sge
; i
++) {
422 u32 length
= wr
->sg_list
[i
].length
;
427 ok
= hfi1_lkey_ok(rkt
, pd
, &wqe
->sg_list
[j
],
428 &wr
->sg_list
[i
], acc
);
430 goto bail_inval_free
;
431 wqe
->length
+= length
;
436 if (qp
->ibqp
.qp_type
== IB_QPT_UC
||
437 qp
->ibqp
.qp_type
== IB_QPT_RC
) {
438 if (wqe
->length
> 0x80000000U
)
439 goto bail_inval_free
;
441 struct hfi1_ah
*ah
= to_iah(wr
->wr
.ud
.ah
);
443 atomic_inc(&ah
->refcount
);
445 wqe
->ssn
= qp
->s_ssn
++;
451 /* release mr holds */
453 struct hfi1_sge
*sge
= &wqe
->sg_list
[--j
];
455 hfi1_put_mr(sge
->mr
);
461 * post_send - post a send on a QP
462 * @ibqp: the QP to post the send on
463 * @wr: the list of work requests to post
464 * @bad_wr: the first bad WR is put here
466 * This may be called from interrupt context.
468 static int post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
469 struct ib_send_wr
**bad_wr
)
471 struct hfi1_qp
*qp
= to_iqp(ibqp
);
477 spin_lock_irqsave(&qp
->s_lock
, flags
);
479 /* Check that state is OK to post send. */
480 if (unlikely(!(ib_hfi1_state_ops
[qp
->state
] & HFI1_POST_SEND_OK
))) {
481 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
485 /* sq empty and not list -> call send */
486 call_send
= qp
->s_head
== qp
->s_last
&& !wr
->next
;
488 for (; wr
; wr
= wr
->next
) {
489 err
= post_one_send(qp
, wr
);
497 if (nreq
&& !call_send
)
498 hfi1_schedule_send(qp
);
499 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
500 if (nreq
&& call_send
)
501 hfi1_do_send(&qp
->s_iowait
.iowork
);
506 * post_receive - post a receive on a QP
507 * @ibqp: the QP to post the receive on
508 * @wr: the WR to post
509 * @bad_wr: the first bad WR is put here
511 * This may be called from interrupt context.
513 static int post_receive(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
514 struct ib_recv_wr
**bad_wr
)
516 struct hfi1_qp
*qp
= to_iqp(ibqp
);
517 struct hfi1_rwq
*wq
= qp
->r_rq
.wq
;
521 /* Check that state is OK to post receive. */
522 if (!(ib_hfi1_state_ops
[qp
->state
] & HFI1_POST_RECV_OK
) || !wq
) {
528 for (; wr
; wr
= wr
->next
) {
529 struct hfi1_rwqe
*wqe
;
533 if ((unsigned) wr
->num_sge
> qp
->r_rq
.max_sge
) {
539 spin_lock_irqsave(&qp
->r_rq
.lock
, flags
);
541 if (next
>= qp
->r_rq
.size
)
543 if (next
== wq
->tail
) {
544 spin_unlock_irqrestore(&qp
->r_rq
.lock
, flags
);
550 wqe
= get_rwqe_ptr(&qp
->r_rq
, wq
->head
);
551 wqe
->wr_id
= wr
->wr_id
;
552 wqe
->num_sge
= wr
->num_sge
;
553 for (i
= 0; i
< wr
->num_sge
; i
++)
554 wqe
->sg_list
[i
] = wr
->sg_list
[i
];
555 /* Make sure queue entry is written before the head index. */
558 spin_unlock_irqrestore(&qp
->r_rq
.lock
, flags
);
567 * Make sure the QP is ready and able to accept the given opcode.
569 static inline int qp_ok(int opcode
, struct hfi1_packet
*packet
)
571 struct hfi1_ibport
*ibp
;
573 if (!(ib_hfi1_state_ops
[packet
->qp
->state
] & HFI1_PROCESS_RECV_OK
))
575 if (((opcode
& OPCODE_QP_MASK
) == packet
->qp
->allowed_ops
) ||
576 (opcode
== IB_OPCODE_CNP
))
579 ibp
= &packet
->rcd
->ppd
->ibport_data
;
586 * hfi1_ib_rcv - process an incoming packet
587 * @packet: data packet information
589 * This is called to process an incoming packet at interrupt level.
591 * Tlen is the length of the header + data + CRC in bytes.
593 void hfi1_ib_rcv(struct hfi1_packet
*packet
)
595 struct hfi1_ctxtdata
*rcd
= packet
->rcd
;
596 struct hfi1_ib_header
*hdr
= packet
->hdr
;
597 u32 tlen
= packet
->tlen
;
598 struct hfi1_pportdata
*ppd
= rcd
->ppd
;
599 struct hfi1_ibport
*ibp
= &ppd
->ibport_data
;
606 lnh
= be16_to_cpu(hdr
->lrh
[0]) & 3;
607 if (lnh
== HFI1_LRH_BTH
)
608 packet
->ohdr
= &hdr
->u
.oth
;
609 else if (lnh
== HFI1_LRH_GRH
) {
612 packet
->ohdr
= &hdr
->u
.l
.oth
;
613 if (hdr
->u
.l
.grh
.next_hdr
!= IB_GRH_NEXT_HDR
)
615 vtf
= be32_to_cpu(hdr
->u
.l
.grh
.version_tclass_flow
);
616 if ((vtf
>> IB_GRH_VERSION_SHIFT
) != IB_GRH_VERSION
)
618 packet
->rcv_flags
|= HFI1_HAS_GRH
;
622 trace_input_ibhdr(rcd
->dd
, hdr
);
624 opcode
= (be32_to_cpu(packet
->ohdr
->bth
[0]) >> 24);
625 inc_opstats(tlen
, &rcd
->opstats
->stats
[opcode
]);
627 /* Get the destination QP number. */
628 qp_num
= be32_to_cpu(packet
->ohdr
->bth
[1]) & HFI1_QPN_MASK
;
629 lid
= be16_to_cpu(hdr
->lrh
[1]);
630 if (unlikely((lid
>= HFI1_MULTICAST_LID_BASE
) &&
631 (lid
!= HFI1_PERMISSIVE_LID
))) {
632 struct hfi1_mcast
*mcast
;
633 struct hfi1_mcast_qp
*p
;
635 if (lnh
!= HFI1_LRH_GRH
)
637 mcast
= hfi1_mcast_find(ibp
, &hdr
->u
.l
.grh
.dgid
);
640 list_for_each_entry_rcu(p
, &mcast
->qp_list
, list
) {
642 spin_lock(&packet
->qp
->r_lock
);
643 if (likely((qp_ok(opcode
, packet
))))
644 opcode_handler_tbl
[opcode
](packet
);
645 spin_unlock(&packet
->qp
->r_lock
);
648 * Notify hfi1_multicast_detach() if it is waiting for us
651 if (atomic_dec_return(&mcast
->refcount
) <= 1)
652 wake_up(&mcast
->wait
);
655 packet
->qp
= hfi1_lookup_qpn(ibp
, qp_num
);
660 spin_lock(&packet
->qp
->r_lock
);
661 if (likely((qp_ok(opcode
, packet
))))
662 opcode_handler_tbl
[opcode
](packet
);
663 spin_unlock(&packet
->qp
->r_lock
);
673 * This is called from a timer to check for QPs
674 * which need kernel memory in order to send a packet.
676 static void mem_timer(unsigned long data
)
678 struct hfi1_ibdev
*dev
= (struct hfi1_ibdev
*)data
;
679 struct list_head
*list
= &dev
->memwait
;
680 struct hfi1_qp
*qp
= NULL
;
684 write_seqlock_irqsave(&dev
->iowait_lock
, flags
);
685 if (!list_empty(list
)) {
686 wait
= list_first_entry(list
, struct iowait
, list
);
687 qp
= container_of(wait
, struct hfi1_qp
, s_iowait
);
688 list_del_init(&qp
->s_iowait
.list
);
689 /* refcount held until actual wake up */
690 if (!list_empty(list
))
691 mod_timer(&dev
->mem_timer
, jiffies
+ 1);
693 write_sequnlock_irqrestore(&dev
->iowait_lock
, flags
);
696 hfi1_qp_wakeup(qp
, HFI1_S_WAIT_KMEM
);
699 void update_sge(struct hfi1_sge_state
*ss
, u32 length
)
701 struct hfi1_sge
*sge
= &ss
->sge
;
703 sge
->vaddr
+= length
;
704 sge
->length
-= length
;
705 sge
->sge_length
-= length
;
706 if (sge
->sge_length
== 0) {
708 *sge
= *ss
->sg_list
++;
709 } else if (sge
->length
== 0 && sge
->mr
->lkey
) {
710 if (++sge
->n
>= HFI1_SEGSZ
) {
711 if (++sge
->m
>= sge
->mr
->mapsz
)
715 sge
->vaddr
= sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
716 sge
->length
= sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
720 static noinline
struct verbs_txreq
*__get_txreq(struct hfi1_ibdev
*dev
,
723 struct verbs_txreq
*tx
;
726 tx
= kmem_cache_alloc(dev
->verbs_txreq_cache
, GFP_ATOMIC
);
728 spin_lock_irqsave(&qp
->s_lock
, flags
);
729 write_seqlock(&dev
->iowait_lock
);
730 if (ib_hfi1_state_ops
[qp
->state
] & HFI1_PROCESS_RECV_OK
&&
731 list_empty(&qp
->s_iowait
.list
)) {
733 qp
->s_flags
|= HFI1_S_WAIT_TX
;
734 list_add_tail(&qp
->s_iowait
.list
, &dev
->txwait
);
735 trace_hfi1_qpsleep(qp
, HFI1_S_WAIT_TX
);
736 atomic_inc(&qp
->refcount
);
738 qp
->s_flags
&= ~HFI1_S_BUSY
;
739 write_sequnlock(&dev
->iowait_lock
);
740 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
741 tx
= ERR_PTR(-EBUSY
);
746 static inline struct verbs_txreq
*get_txreq(struct hfi1_ibdev
*dev
,
749 struct verbs_txreq
*tx
;
751 tx
= kmem_cache_alloc(dev
->verbs_txreq_cache
, GFP_ATOMIC
);
753 /* call slow path to get the lock */
754 tx
= __get_txreq(dev
, qp
);
760 void hfi1_put_txreq(struct verbs_txreq
*tx
)
762 struct hfi1_ibdev
*dev
;
768 dev
= to_idev(qp
->ibqp
.device
);
774 sdma_txclean(dd_from_dev(dev
), &tx
->txreq
);
776 /* Free verbs_txreq and return to slab cache */
777 kmem_cache_free(dev
->verbs_txreq_cache
, tx
);
780 seq
= read_seqbegin(&dev
->iowait_lock
);
781 if (!list_empty(&dev
->txwait
)) {
784 write_seqlock_irqsave(&dev
->iowait_lock
, flags
);
785 /* Wake up first QP wanting a free struct */
786 wait
= list_first_entry(&dev
->txwait
, struct iowait
,
788 qp
= container_of(wait
, struct hfi1_qp
, s_iowait
);
789 list_del_init(&qp
->s_iowait
.list
);
790 /* refcount held until actual wake up */
791 write_sequnlock_irqrestore(&dev
->iowait_lock
, flags
);
792 hfi1_qp_wakeup(qp
, HFI1_S_WAIT_TX
);
795 } while (read_seqretry(&dev
->iowait_lock
, seq
));
799 * This is called with progress side lock held.
802 static void verbs_sdma_complete(
803 struct sdma_txreq
*cookie
,
807 struct verbs_txreq
*tx
=
808 container_of(cookie
, struct verbs_txreq
, txreq
);
809 struct hfi1_qp
*qp
= tx
->qp
;
811 spin_lock(&qp
->s_lock
);
813 hfi1_send_complete(qp
, tx
->wqe
, IB_WC_SUCCESS
);
814 else if (qp
->ibqp
.qp_type
== IB_QPT_RC
) {
815 struct hfi1_ib_header
*hdr
;
818 hfi1_rc_send_complete(qp
, hdr
);
822 * This happens when the send engine notes
823 * a QP in the error state and cannot
824 * do the flush work until that QP's
825 * sdma work has finished.
827 if (qp
->s_flags
& HFI1_S_WAIT_DMA
) {
828 qp
->s_flags
&= ~HFI1_S_WAIT_DMA
;
829 hfi1_schedule_send(qp
);
832 spin_unlock(&qp
->s_lock
);
837 static int wait_kmem(struct hfi1_ibdev
*dev
, struct hfi1_qp
*qp
)
842 spin_lock_irqsave(&qp
->s_lock
, flags
);
843 if (ib_hfi1_state_ops
[qp
->state
] & HFI1_PROCESS_RECV_OK
) {
844 write_seqlock(&dev
->iowait_lock
);
845 if (list_empty(&qp
->s_iowait
.list
)) {
846 if (list_empty(&dev
->memwait
))
847 mod_timer(&dev
->mem_timer
, jiffies
+ 1);
848 qp
->s_flags
|= HFI1_S_WAIT_KMEM
;
849 list_add_tail(&qp
->s_iowait
.list
, &dev
->memwait
);
850 trace_hfi1_qpsleep(qp
, HFI1_S_WAIT_KMEM
);
851 atomic_inc(&qp
->refcount
);
853 write_sequnlock(&dev
->iowait_lock
);
854 qp
->s_flags
&= ~HFI1_S_BUSY
;
857 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
863 * This routine calls txadds for each sg entry.
865 * Add failures will revert the sge cursor
867 static int build_verbs_ulp_payload(
868 struct sdma_engine
*sde
,
869 struct hfi1_sge_state
*ss
,
871 struct verbs_txreq
*tx
)
873 struct hfi1_sge
*sg_list
= ss
->sg_list
;
874 struct hfi1_sge sge
= ss
->sge
;
875 u8 num_sge
= ss
->num_sge
;
880 len
= ss
->sge
.length
;
883 if (len
> ss
->sge
.sge_length
)
884 len
= ss
->sge
.sge_length
;
885 WARN_ON_ONCE(len
== 0);
886 ret
= sdma_txadd_kvaddr(
900 ss
->num_sge
= num_sge
;
901 ss
->sg_list
= sg_list
;
906 * Build the number of DMA descriptors needed to send length bytes of data.
908 * NOTE: DMA mapping is held in the tx until completed in the ring or
909 * the tx desc is freed without having been submitted to the ring
911 * This routine insures the following all the helper routine
915 static int build_verbs_tx_desc(
916 struct sdma_engine
*sde
,
917 struct hfi1_sge_state
*ss
,
919 struct verbs_txreq
*tx
,
920 struct ahg_ib_header
*ahdr
,
924 struct hfi1_pio_header
*phdr
;
925 u16 hdrbytes
= tx
->hdr_dwords
<< 2;
928 if (!ahdr
->ahgcount
) {
929 ret
= sdma_txinit_ahg(
937 verbs_sdma_complete
);
940 phdr
->pbc
= cpu_to_le64(pbc
);
941 memcpy(&phdr
->hdr
, &ahdr
->ibh
, hdrbytes
- sizeof(phdr
->pbc
));
943 ret
= sdma_txadd_kvaddr(
947 tx
->hdr_dwords
<< 2);
951 struct hfi1_other_headers
*sohdr
= &ahdr
->ibh
.u
.oth
;
952 struct hfi1_other_headers
*dohdr
= &phdr
->hdr
.u
.oth
;
954 /* needed in rc_send_complete() */
955 phdr
->hdr
.lrh
[0] = ahdr
->ibh
.lrh
[0];
956 if ((be16_to_cpu(phdr
->hdr
.lrh
[0]) & 3) == HFI1_LRH_GRH
) {
957 sohdr
= &ahdr
->ibh
.u
.l
.oth
;
958 dohdr
= &phdr
->hdr
.u
.l
.oth
;
961 dohdr
->bth
[0] = sohdr
->bth
[0];
963 dohdr
->bth
[2] = sohdr
->bth
[2];
964 ret
= sdma_txinit_ahg(
972 verbs_sdma_complete
);
977 /* add the ulp payload - if any. ss can be NULL for acks */
979 ret
= build_verbs_ulp_payload(sde
, ss
, length
, tx
);
984 int hfi1_verbs_send_dma(struct hfi1_qp
*qp
, struct ahg_ib_header
*ahdr
,
985 u32 hdrwords
, struct hfi1_sge_state
*ss
, u32 len
,
986 u32 plen
, u32 dwords
, u64 pbc
)
988 struct hfi1_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
989 struct hfi1_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
990 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
991 struct verbs_txreq
*tx
;
992 struct sdma_txreq
*stx
;
994 struct sdma_engine
*sde
;
998 if (!list_empty(&qp
->s_iowait
.tx_head
)) {
999 stx
= list_first_entry(
1000 &qp
->s_iowait
.tx_head
,
1003 list_del_init(&stx
->list
);
1004 tx
= container_of(stx
, struct verbs_txreq
, txreq
);
1005 ret
= sdma_send_txreq(tx
->sde
, &qp
->s_iowait
, stx
);
1006 if (unlikely(ret
== -ECOMM
))
1011 tx
= get_txreq(dev
, qp
);
1015 if (!qp
->s_hdr
->sde
) {
1016 tx
->sde
= sde
= qp_to_sdma_engine(qp
, sc5
);
1020 tx
->sde
= sde
= qp
->s_hdr
->sde
;
1022 if (likely(pbc
== 0)) {
1023 u32 vl
= sc_to_vlt(dd_from_ibdev(qp
->ibqp
.device
), sc5
);
1025 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
1026 pbc_flags
|= (!!(sc5
& 0x10)) << PBC_DC_INFO_SHIFT
;
1028 pbc
= create_pbc(ppd
, pbc_flags
, qp
->srate_mbps
, vl
, plen
);
1030 tx
->wqe
= qp
->s_wqe
;
1031 tx
->mr
= qp
->s_rdma_mr
;
1033 qp
->s_rdma_mr
= NULL
;
1034 tx
->hdr_dwords
= hdrwords
+ 2;
1035 ret
= build_verbs_tx_desc(sde
, ss
, len
, tx
, ahdr
, pbc
);
1038 trace_output_ibhdr(dd_from_ibdev(qp
->ibqp
.device
), &ahdr
->ibh
);
1039 ret
= sdma_send_txreq(sde
, &qp
->s_iowait
, &tx
->txreq
);
1040 if (unlikely(ret
== -ECOMM
))
1047 /* The current one got "sent" */
1050 /* kmalloc or mapping fail */
1052 return wait_kmem(dev
, qp
);
1058 * If we are now in the error state, return zero to flush the
1059 * send work request.
1061 static int no_bufs_available(struct hfi1_qp
*qp
, struct send_context
*sc
)
1063 struct hfi1_devdata
*dd
= sc
->dd
;
1064 struct hfi1_ibdev
*dev
= &dd
->verbs_dev
;
1065 unsigned long flags
;
1069 * Note that as soon as want_buffer() is called and
1070 * possibly before it returns, sc_piobufavail()
1071 * could be called. Therefore, put QP on the I/O wait list before
1072 * enabling the PIO avail interrupt.
1074 spin_lock_irqsave(&qp
->s_lock
, flags
);
1075 if (ib_hfi1_state_ops
[qp
->state
] & HFI1_PROCESS_RECV_OK
) {
1076 write_seqlock(&dev
->iowait_lock
);
1077 if (list_empty(&qp
->s_iowait
.list
)) {
1078 struct hfi1_ibdev
*dev
= &dd
->verbs_dev
;
1082 qp
->s_flags
|= HFI1_S_WAIT_PIO
;
1083 was_empty
= list_empty(&sc
->piowait
);
1084 list_add_tail(&qp
->s_iowait
.list
, &sc
->piowait
);
1085 trace_hfi1_qpsleep(qp
, HFI1_S_WAIT_PIO
);
1086 atomic_inc(&qp
->refcount
);
1087 /* counting: only call wantpiobuf_intr if first user */
1089 hfi1_sc_wantpiobuf_intr(sc
, 1);
1091 write_sequnlock(&dev
->iowait_lock
);
1092 qp
->s_flags
&= ~HFI1_S_BUSY
;
1095 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1099 struct send_context
*qp_to_send_context(struct hfi1_qp
*qp
, u8 sc5
)
1101 struct hfi1_devdata
*dd
= dd_from_ibdev(qp
->ibqp
.device
);
1102 struct hfi1_pportdata
*ppd
= dd
->pport
+ (qp
->port_num
- 1);
1105 vl
= sc_to_vlt(dd
, sc5
);
1106 if (vl
>= ppd
->vls_supported
&& vl
!= 15)
1108 return dd
->vld
[vl
].sc
;
1111 int hfi1_verbs_send_pio(struct hfi1_qp
*qp
, struct ahg_ib_header
*ahdr
,
1112 u32 hdrwords
, struct hfi1_sge_state
*ss
, u32 len
,
1113 u32 plen
, u32 dwords
, u64 pbc
)
1115 struct hfi1_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
1116 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
1117 u32
*hdr
= (u32
*)&ahdr
->ibh
;
1120 unsigned long flags
= 0;
1121 struct send_context
*sc
;
1122 struct pio_buf
*pbuf
;
1123 int wc_status
= IB_WC_SUCCESS
;
1125 /* vl15 special case taken care of in ud.c */
1127 sc
= qp_to_send_context(qp
, sc5
);
1131 if (likely(pbc
== 0)) {
1132 u32 vl
= sc_to_vlt(dd_from_ibdev(qp
->ibqp
.device
), sc5
);
1133 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
1134 pbc_flags
|= (!!(sc5
& 0x10)) << PBC_DC_INFO_SHIFT
;
1135 pbc
= create_pbc(ppd
, pbc_flags
, qp
->srate_mbps
, vl
, plen
);
1137 pbuf
= sc_buffer_alloc(sc
, plen
, NULL
, NULL
);
1138 if (unlikely(pbuf
== NULL
)) {
1139 if (ppd
->host_link_state
!= HLS_UP_ACTIVE
) {
1141 * If we have filled the PIO buffers to capacity and are
1142 * not in an active state this request is not going to
1143 * go out to so just complete it with an error or else a
1144 * ULP or the core may be stuck waiting.
1148 "alloc failed. state not active, completing");
1149 wc_status
= IB_WC_GENERAL_ERR
;
1153 * This is a normal occurrence. The PIO buffs are full
1154 * up but we are still happily sending, well we could be
1155 * so lets continue to queue the request.
1157 hfi1_cdbg(PIO
, "alloc failed. state active, queuing");
1158 return no_bufs_available(qp
, sc
);
1163 pio_copy(ppd
->dd
, pbuf
, pbc
, hdr
, hdrwords
);
1166 seg_pio_copy_start(pbuf
, pbc
, hdr
, hdrwords
*4);
1168 void *addr
= ss
->sge
.vaddr
;
1169 u32 slen
= ss
->sge
.length
;
1173 update_sge(ss
, slen
);
1174 seg_pio_copy_mid(pbuf
, addr
, slen
);
1177 seg_pio_copy_end(pbuf
);
1181 trace_output_ibhdr(dd_from_ibdev(qp
->ibqp
.device
), &ahdr
->ibh
);
1183 if (qp
->s_rdma_mr
) {
1184 hfi1_put_mr(qp
->s_rdma_mr
);
1185 qp
->s_rdma_mr
= NULL
;
1190 spin_lock_irqsave(&qp
->s_lock
, flags
);
1191 hfi1_send_complete(qp
, qp
->s_wqe
, wc_status
);
1192 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1193 } else if (qp
->ibqp
.qp_type
== IB_QPT_RC
) {
1194 spin_lock_irqsave(&qp
->s_lock
, flags
);
1195 hfi1_rc_send_complete(qp
, &ahdr
->ibh
);
1196 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1201 * egress_pkey_matches_entry - return 1 if the pkey matches ent (ent
1202 * being an entry from the ingress partition key table), return 0
1203 * otherwise. Use the matching criteria for egress partition keys
1204 * specified in the OPAv1 spec., section 9.1l.7.
1206 static inline int egress_pkey_matches_entry(u16 pkey
, u16 ent
)
1208 u16 mkey
= pkey
& PKEY_LOW_15_MASK
;
1209 u16 ment
= ent
& PKEY_LOW_15_MASK
;
1213 * If pkey[15] is set (full partition member),
1214 * is bit 15 in the corresponding table element
1215 * clear (limited member)?
1217 if (pkey
& PKEY_MEMBER_MASK
)
1218 return !!(ent
& PKEY_MEMBER_MASK
);
1225 * egress_pkey_check - return 0 if hdr's pkey matches according to the
1226 * criteria in the OPAv1 spec., section 9.11.7.
1228 static inline int egress_pkey_check(struct hfi1_pportdata
*ppd
,
1229 struct hfi1_ib_header
*hdr
,
1232 struct hfi1_other_headers
*ohdr
;
1233 struct hfi1_devdata
*dd
;
1236 u8 lnh
, sc5
= qp
->s_sc
;
1238 if (!(ppd
->part_enforce
& HFI1_PART_ENFORCE_OUT
))
1241 /* locate the pkey within the headers */
1242 lnh
= be16_to_cpu(hdr
->lrh
[0]) & 3;
1243 if (lnh
== HFI1_LRH_GRH
)
1244 ohdr
= &hdr
->u
.l
.oth
;
1248 pkey
= (u16
)be32_to_cpu(ohdr
->bth
[0]);
1250 /* If SC15, pkey[0:14] must be 0x7fff */
1251 if ((sc5
== 0xf) && ((pkey
& PKEY_LOW_15_MASK
) != PKEY_LOW_15_MASK
))
1255 /* Is the pkey = 0x0, or 0x8000? */
1256 if ((pkey
& PKEY_LOW_15_MASK
) == 0)
1259 /* The most likely matching pkey has index qp->s_pkey_index */
1260 if (unlikely(!egress_pkey_matches_entry(pkey
,
1261 ppd
->pkeys
[qp
->s_pkey_index
]))) {
1262 /* no match - try the entire table */
1263 for (; i
< MAX_PKEY_VALUES
; i
++) {
1264 if (egress_pkey_matches_entry(pkey
, ppd
->pkeys
[i
]))
1269 if (i
< MAX_PKEY_VALUES
)
1272 incr_cntr64(&ppd
->port_xmit_constraint_errors
);
1274 if (!(dd
->err_info_xmit_constraint
.status
& OPA_EI_STATUS_SMASK
)) {
1275 u16 slid
= be16_to_cpu(hdr
->lrh
[3]);
1277 dd
->err_info_xmit_constraint
.status
|= OPA_EI_STATUS_SMASK
;
1278 dd
->err_info_xmit_constraint
.slid
= slid
;
1279 dd
->err_info_xmit_constraint
.pkey
= pkey
;
1285 * hfi1_verbs_send - send a packet
1286 * @qp: the QP to send on
1287 * @ahdr: the packet header
1288 * @hdrwords: the number of 32-bit words in the header
1289 * @ss: the SGE to send
1290 * @len: the length of the packet in bytes
1292 * Return zero if packet is sent or queued OK.
1293 * Return non-zero and clear qp->s_flags HFI1_S_BUSY otherwise.
1295 int hfi1_verbs_send(struct hfi1_qp
*qp
, struct ahg_ib_header
*ahdr
,
1296 u32 hdrwords
, struct hfi1_sge_state
*ss
, u32 len
)
1298 struct hfi1_devdata
*dd
= dd_from_ibdev(qp
->ibqp
.device
);
1302 unsigned long flags
= 0;
1303 u32 dwords
= (len
+ 3) >> 2;
1306 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
1307 * can defer SDMA restart until link goes ACTIVE without
1308 * worrying about just how we got there.
1310 if ((qp
->ibqp
.qp_type
== IB_QPT_SMI
) ||
1311 !(dd
->flags
& HFI1_HAS_SEND_DMA
))
1314 ret
= egress_pkey_check(dd
->pport
, &ahdr
->ibh
, qp
);
1315 if (unlikely(ret
)) {
1317 * The value we are returning here does not get propagated to
1318 * the verbs caller. Thus we need to complete the request with
1319 * error otherwise the caller could be sitting waiting on the
1320 * completion event. Only do this for PIO. SDMA has its own
1321 * mechanism for handling the errors. So for SDMA we can just
1325 hfi1_cdbg(PIO
, "%s() Failed. Completing with err",
1327 spin_lock_irqsave(&qp
->s_lock
, flags
);
1328 hfi1_send_complete(qp
, qp
->s_wqe
, IB_WC_GENERAL_ERR
);
1329 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1335 * Calculate the send buffer trigger address.
1336 * The +2 counts for the pbc control qword
1338 plen
= hdrwords
+ dwords
+ 2;
1341 ret
= dd
->process_pio_send(
1342 qp
, ahdr
, hdrwords
, ss
, len
, plen
, dwords
, 0);
1344 #ifdef CONFIG_SDMA_VERBOSITY
1345 dd_dev_err(dd
, "CONFIG SDMA %s:%d %s()\n",
1346 slashstrip(__FILE__
), __LINE__
, __func__
);
1347 dd_dev_err(dd
, "SDMA hdrwords = %u, len = %u\n", hdrwords
, len
);
1349 ret
= dd
->process_dma_send(
1350 qp
, ahdr
, hdrwords
, ss
, len
, plen
, dwords
, 0);
1356 static int query_device(struct ib_device
*ibdev
,
1357 struct ib_device_attr
*props
,
1358 struct ib_udata
*uhw
)
1360 struct hfi1_devdata
*dd
= dd_from_ibdev(ibdev
);
1361 struct hfi1_ibdev
*dev
= to_idev(ibdev
);
1363 if (uhw
->inlen
|| uhw
->outlen
)
1365 memset(props
, 0, sizeof(*props
));
1367 props
->device_cap_flags
= IB_DEVICE_BAD_PKEY_CNTR
|
1368 IB_DEVICE_BAD_QKEY_CNTR
| IB_DEVICE_SHUTDOWN_PORT
|
1369 IB_DEVICE_SYS_IMAGE_GUID
| IB_DEVICE_RC_RNR_NAK_GEN
|
1370 IB_DEVICE_PORT_ACTIVE_EVENT
| IB_DEVICE_SRQ_RESIZE
;
1372 props
->page_size_cap
= PAGE_SIZE
;
1374 dd
->oui1
<< 16 | dd
->oui2
<< 8 | dd
->oui3
;
1375 props
->vendor_part_id
= dd
->pcidev
->device
;
1376 props
->hw_ver
= dd
->minrev
;
1377 props
->sys_image_guid
= ib_hfi1_sys_image_guid
;
1378 props
->max_mr_size
= ~0ULL;
1379 props
->max_qp
= hfi1_max_qps
;
1380 props
->max_qp_wr
= hfi1_max_qp_wrs
;
1381 props
->max_sge
= hfi1_max_sges
;
1382 props
->max_sge_rd
= hfi1_max_sges
;
1383 props
->max_cq
= hfi1_max_cqs
;
1384 props
->max_ah
= hfi1_max_ahs
;
1385 props
->max_cqe
= hfi1_max_cqes
;
1386 props
->max_mr
= dev
->lk_table
.max
;
1387 props
->max_fmr
= dev
->lk_table
.max
;
1388 props
->max_map_per_fmr
= 32767;
1389 props
->max_pd
= hfi1_max_pds
;
1390 props
->max_qp_rd_atom
= HFI1_MAX_RDMA_ATOMIC
;
1391 props
->max_qp_init_rd_atom
= 255;
1392 /* props->max_res_rd_atom */
1393 props
->max_srq
= hfi1_max_srqs
;
1394 props
->max_srq_wr
= hfi1_max_srq_wrs
;
1395 props
->max_srq_sge
= hfi1_max_srq_sges
;
1396 /* props->local_ca_ack_delay */
1397 props
->atomic_cap
= IB_ATOMIC_GLOB
;
1398 props
->max_pkeys
= hfi1_get_npkeys(dd
);
1399 props
->max_mcast_grp
= hfi1_max_mcast_grps
;
1400 props
->max_mcast_qp_attach
= hfi1_max_mcast_qp_attached
;
1401 props
->max_total_mcast_qp_attach
= props
->max_mcast_qp_attach
*
1402 props
->max_mcast_grp
;
1407 static inline u16
opa_speed_to_ib(u16 in
)
1411 if (in
& OPA_LINK_SPEED_25G
)
1412 out
|= IB_SPEED_EDR
;
1413 if (in
& OPA_LINK_SPEED_12_5G
)
1414 out
|= IB_SPEED_FDR
;
1420 * Convert a single OPA link width (no multiple flags) to an IB value.
1421 * A zero OPA link width means link down, which means the IB width value
1424 static inline u16
opa_width_to_ib(u16 in
)
1427 case OPA_LINK_WIDTH_1X
:
1428 /* map 2x and 3x to 1x as they don't exist in IB */
1429 case OPA_LINK_WIDTH_2X
:
1430 case OPA_LINK_WIDTH_3X
:
1432 default: /* link down or unknown, return our largest width */
1433 case OPA_LINK_WIDTH_4X
:
1438 static int query_port(struct ib_device
*ibdev
, u8 port
,
1439 struct ib_port_attr
*props
)
1441 struct hfi1_devdata
*dd
= dd_from_ibdev(ibdev
);
1442 struct hfi1_ibport
*ibp
= to_iport(ibdev
, port
);
1443 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
1446 memset(props
, 0, sizeof(*props
));
1447 props
->lid
= lid
? lid
: 0;
1448 props
->lmc
= ppd
->lmc
;
1449 props
->sm_lid
= ibp
->sm_lid
;
1450 props
->sm_sl
= ibp
->sm_sl
;
1451 /* OPA logical states match IB logical states */
1452 props
->state
= driver_lstate(ppd
);
1453 props
->phys_state
= hfi1_ibphys_portstate(ppd
);
1454 props
->port_cap_flags
= ibp
->port_cap_flags
;
1455 props
->gid_tbl_len
= HFI1_GUIDS_PER_PORT
;
1456 props
->max_msg_sz
= 0x80000000;
1457 props
->pkey_tbl_len
= hfi1_get_npkeys(dd
);
1458 props
->bad_pkey_cntr
= ibp
->pkey_violations
;
1459 props
->qkey_viol_cntr
= ibp
->qkey_violations
;
1460 props
->active_width
= (u8
)opa_width_to_ib(ppd
->link_width_active
);
1461 /* see rate_show() in ib core/sysfs.c */
1462 props
->active_speed
= (u8
)opa_speed_to_ib(ppd
->link_speed_active
);
1463 props
->max_vl_num
= ppd
->vls_supported
;
1464 props
->init_type_reply
= 0;
1466 /* Once we are a "first class" citizen and have added the OPA MTUs to
1467 * the core we can advertise the larger MTU enum to the ULPs, for now
1468 * advertise only 4K.
1470 * Those applications which are either OPA aware or pass the MTU enum
1471 * from the Path Records to us will get the new 8k MTU. Those that
1472 * attempt to process the MTU enum may fail in various ways.
1474 props
->max_mtu
= mtu_to_enum((!valid_ib_mtu(hfi1_max_mtu
) ?
1475 4096 : hfi1_max_mtu
), IB_MTU_4096
);
1476 props
->active_mtu
= !valid_ib_mtu(ppd
->ibmtu
) ? props
->max_mtu
:
1477 mtu_to_enum(ppd
->ibmtu
, IB_MTU_2048
);
1478 props
->subnet_timeout
= ibp
->subnet_timeout
;
1483 static int port_immutable(struct ib_device
*ibdev
, u8 port_num
,
1484 struct ib_port_immutable
*immutable
)
1486 struct ib_port_attr attr
;
1489 err
= query_port(ibdev
, port_num
, &attr
);
1493 memset(immutable
, 0, sizeof(*immutable
));
1495 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
1496 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
1497 immutable
->core_cap_flags
= RDMA_CORE_PORT_INTEL_OPA
;
1498 immutable
->max_mad_size
= OPA_MGMT_MAD_SIZE
;
1503 static int modify_device(struct ib_device
*device
,
1504 int device_modify_mask
,
1505 struct ib_device_modify
*device_modify
)
1507 struct hfi1_devdata
*dd
= dd_from_ibdev(device
);
1511 if (device_modify_mask
& ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID
|
1512 IB_DEVICE_MODIFY_NODE_DESC
)) {
1517 if (device_modify_mask
& IB_DEVICE_MODIFY_NODE_DESC
) {
1518 memcpy(device
->node_desc
, device_modify
->node_desc
, 64);
1519 for (i
= 0; i
< dd
->num_pports
; i
++) {
1520 struct hfi1_ibport
*ibp
= &dd
->pport
[i
].ibport_data
;
1522 hfi1_node_desc_chg(ibp
);
1526 if (device_modify_mask
& IB_DEVICE_MODIFY_SYS_IMAGE_GUID
) {
1527 ib_hfi1_sys_image_guid
=
1528 cpu_to_be64(device_modify
->sys_image_guid
);
1529 for (i
= 0; i
< dd
->num_pports
; i
++) {
1530 struct hfi1_ibport
*ibp
= &dd
->pport
[i
].ibport_data
;
1532 hfi1_sys_guid_chg(ibp
);
1542 static int modify_port(struct ib_device
*ibdev
, u8 port
,
1543 int port_modify_mask
, struct ib_port_modify
*props
)
1545 struct hfi1_ibport
*ibp
= to_iport(ibdev
, port
);
1546 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
1549 ibp
->port_cap_flags
|= props
->set_port_cap_mask
;
1550 ibp
->port_cap_flags
&= ~props
->clr_port_cap_mask
;
1551 if (props
->set_port_cap_mask
|| props
->clr_port_cap_mask
)
1552 hfi1_cap_mask_chg(ibp
);
1553 if (port_modify_mask
& IB_PORT_SHUTDOWN
) {
1554 set_link_down_reason(ppd
, OPA_LINKDOWN_REASON_UNKNOWN
, 0,
1555 OPA_LINKDOWN_REASON_UNKNOWN
);
1556 ret
= set_link_state(ppd
, HLS_DN_DOWNDEF
);
1558 if (port_modify_mask
& IB_PORT_RESET_QKEY_CNTR
)
1559 ibp
->qkey_violations
= 0;
1563 static int query_gid(struct ib_device
*ibdev
, u8 port
,
1564 int index
, union ib_gid
*gid
)
1566 struct hfi1_devdata
*dd
= dd_from_ibdev(ibdev
);
1569 if (!port
|| port
> dd
->num_pports
)
1572 struct hfi1_ibport
*ibp
= to_iport(ibdev
, port
);
1573 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
1575 gid
->global
.subnet_prefix
= ibp
->gid_prefix
;
1577 gid
->global
.interface_id
= cpu_to_be64(ppd
->guid
);
1578 else if (index
< HFI1_GUIDS_PER_PORT
)
1579 gid
->global
.interface_id
= ibp
->guids
[index
- 1];
1587 static struct ib_pd
*alloc_pd(struct ib_device
*ibdev
,
1588 struct ib_ucontext
*context
,
1589 struct ib_udata
*udata
)
1591 struct hfi1_ibdev
*dev
= to_idev(ibdev
);
1596 * This is actually totally arbitrary. Some correctness tests
1597 * assume there's a maximum number of PDs that can be allocated.
1598 * We don't actually have this limit, but we fail the test if
1599 * we allow allocations of more than we report for this value.
1602 pd
= kmalloc(sizeof(*pd
), GFP_KERNEL
);
1604 ret
= ERR_PTR(-ENOMEM
);
1608 spin_lock(&dev
->n_pds_lock
);
1609 if (dev
->n_pds_allocated
== hfi1_max_pds
) {
1610 spin_unlock(&dev
->n_pds_lock
);
1612 ret
= ERR_PTR(-ENOMEM
);
1616 dev
->n_pds_allocated
++;
1617 spin_unlock(&dev
->n_pds_lock
);
1619 /* ib_alloc_pd() will initialize pd->ibpd. */
1620 pd
->user
= udata
!= NULL
;
1628 static int dealloc_pd(struct ib_pd
*ibpd
)
1630 struct hfi1_pd
*pd
= to_ipd(ibpd
);
1631 struct hfi1_ibdev
*dev
= to_idev(ibpd
->device
);
1633 spin_lock(&dev
->n_pds_lock
);
1634 dev
->n_pds_allocated
--;
1635 spin_unlock(&dev
->n_pds_lock
);
1643 * convert ah port,sl to sc
1645 u8
ah_to_sc(struct ib_device
*ibdev
, struct ib_ah_attr
*ah
)
1647 struct hfi1_ibport
*ibp
= to_iport(ibdev
, ah
->port_num
);
1649 return ibp
->sl_to_sc
[ah
->sl
];
1652 int hfi1_check_ah(struct ib_device
*ibdev
, struct ib_ah_attr
*ah_attr
)
1654 struct hfi1_ibport
*ibp
;
1655 struct hfi1_pportdata
*ppd
;
1656 struct hfi1_devdata
*dd
;
1659 /* A multicast address requires a GRH (see ch. 8.4.1). */
1660 if (ah_attr
->dlid
>= HFI1_MULTICAST_LID_BASE
&&
1661 ah_attr
->dlid
!= HFI1_PERMISSIVE_LID
&&
1662 !(ah_attr
->ah_flags
& IB_AH_GRH
))
1664 if ((ah_attr
->ah_flags
& IB_AH_GRH
) &&
1665 ah_attr
->grh
.sgid_index
>= HFI1_GUIDS_PER_PORT
)
1667 if (ah_attr
->dlid
== 0)
1669 if (ah_attr
->port_num
< 1 ||
1670 ah_attr
->port_num
> ibdev
->phys_port_cnt
)
1672 if (ah_attr
->static_rate
!= IB_RATE_PORT_CURRENT
&&
1673 ib_rate_to_mbps(ah_attr
->static_rate
) < 0)
1675 if (ah_attr
->sl
>= OPA_MAX_SLS
)
1677 /* test the mapping for validity */
1678 ibp
= to_iport(ibdev
, ah_attr
->port_num
);
1679 ppd
= ppd_from_ibp(ibp
);
1680 sc5
= ibp
->sl_to_sc
[ah_attr
->sl
];
1681 dd
= dd_from_ppd(ppd
);
1682 if (sc_to_vlt(dd
, sc5
) > num_vls
&& sc_to_vlt(dd
, sc5
) != 0xf)
1690 * create_ah - create an address handle
1691 * @pd: the protection domain
1692 * @ah_attr: the attributes of the AH
1694 * This may be called from interrupt context.
1696 static struct ib_ah
*create_ah(struct ib_pd
*pd
,
1697 struct ib_ah_attr
*ah_attr
)
1701 struct hfi1_ibdev
*dev
= to_idev(pd
->device
);
1702 unsigned long flags
;
1704 if (hfi1_check_ah(pd
->device
, ah_attr
)) {
1705 ret
= ERR_PTR(-EINVAL
);
1709 ah
= kmalloc(sizeof(*ah
), GFP_ATOMIC
);
1711 ret
= ERR_PTR(-ENOMEM
);
1715 spin_lock_irqsave(&dev
->n_ahs_lock
, flags
);
1716 if (dev
->n_ahs_allocated
== hfi1_max_ahs
) {
1717 spin_unlock_irqrestore(&dev
->n_ahs_lock
, flags
);
1719 ret
= ERR_PTR(-ENOMEM
);
1723 dev
->n_ahs_allocated
++;
1724 spin_unlock_irqrestore(&dev
->n_ahs_lock
, flags
);
1726 /* ib_create_ah() will initialize ah->ibah. */
1727 ah
->attr
= *ah_attr
;
1728 atomic_set(&ah
->refcount
, 0);
1736 struct ib_ah
*hfi1_create_qp0_ah(struct hfi1_ibport
*ibp
, u16 dlid
)
1738 struct ib_ah_attr attr
;
1739 struct ib_ah
*ah
= ERR_PTR(-EINVAL
);
1740 struct hfi1_qp
*qp0
;
1742 memset(&attr
, 0, sizeof(attr
));
1744 attr
.port_num
= ppd_from_ibp(ibp
)->port
;
1746 qp0
= rcu_dereference(ibp
->qp
[0]);
1748 ah
= ib_create_ah(qp0
->ibqp
.pd
, &attr
);
1754 * destroy_ah - destroy an address handle
1755 * @ibah: the AH to destroy
1757 * This may be called from interrupt context.
1759 static int destroy_ah(struct ib_ah
*ibah
)
1761 struct hfi1_ibdev
*dev
= to_idev(ibah
->device
);
1762 struct hfi1_ah
*ah
= to_iah(ibah
);
1763 unsigned long flags
;
1765 if (atomic_read(&ah
->refcount
) != 0)
1768 spin_lock_irqsave(&dev
->n_ahs_lock
, flags
);
1769 dev
->n_ahs_allocated
--;
1770 spin_unlock_irqrestore(&dev
->n_ahs_lock
, flags
);
1777 static int modify_ah(struct ib_ah
*ibah
, struct ib_ah_attr
*ah_attr
)
1779 struct hfi1_ah
*ah
= to_iah(ibah
);
1781 if (hfi1_check_ah(ibah
->device
, ah_attr
))
1784 ah
->attr
= *ah_attr
;
1789 static int query_ah(struct ib_ah
*ibah
, struct ib_ah_attr
*ah_attr
)
1791 struct hfi1_ah
*ah
= to_iah(ibah
);
1793 *ah_attr
= ah
->attr
;
1799 * hfi1_get_npkeys - return the size of the PKEY table for context 0
1800 * @dd: the hfi1_ib device
1802 unsigned hfi1_get_npkeys(struct hfi1_devdata
*dd
)
1804 return ARRAY_SIZE(dd
->pport
[0].pkeys
);
1807 static int query_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
,
1810 struct hfi1_devdata
*dd
= dd_from_ibdev(ibdev
);
1813 if (index
>= hfi1_get_npkeys(dd
)) {
1818 *pkey
= hfi1_get_pkey(to_iport(ibdev
, port
), index
);
1826 * alloc_ucontext - allocate a ucontest
1827 * @ibdev: the infiniband device
1828 * @udata: not used by the driver
1831 static struct ib_ucontext
*alloc_ucontext(struct ib_device
*ibdev
,
1832 struct ib_udata
*udata
)
1834 struct hfi1_ucontext
*context
;
1835 struct ib_ucontext
*ret
;
1837 context
= kmalloc(sizeof(*context
), GFP_KERNEL
);
1839 ret
= ERR_PTR(-ENOMEM
);
1843 ret
= &context
->ibucontext
;
1849 static int dealloc_ucontext(struct ib_ucontext
*context
)
1851 kfree(to_iucontext(context
));
1855 static void init_ibport(struct hfi1_pportdata
*ppd
)
1857 struct hfi1_ibport
*ibp
= &ppd
->ibport_data
;
1858 size_t sz
= ARRAY_SIZE(ibp
->sl_to_sc
);
1861 for (i
= 0; i
< sz
; i
++) {
1862 ibp
->sl_to_sc
[i
] = i
;
1863 ibp
->sc_to_sl
[i
] = i
;
1866 spin_lock_init(&ibp
->lock
);
1867 /* Set the prefix to the default value (see ch. 4.1.1) */
1868 ibp
->gid_prefix
= IB_DEFAULT_GID_PREFIX
;
1870 /* Below should only set bits defined in OPA PortInfo.CapabilityMask */
1871 ibp
->port_cap_flags
= IB_PORT_AUTO_MIGR_SUP
|
1872 IB_PORT_CAP_MASK_NOTICE_SUP
;
1873 ibp
->pma_counter_select
[0] = IB_PMA_PORT_XMIT_DATA
;
1874 ibp
->pma_counter_select
[1] = IB_PMA_PORT_RCV_DATA
;
1875 ibp
->pma_counter_select
[2] = IB_PMA_PORT_XMIT_PKTS
;
1876 ibp
->pma_counter_select
[3] = IB_PMA_PORT_RCV_PKTS
;
1877 ibp
->pma_counter_select
[4] = IB_PMA_PORT_XMIT_WAIT
;
1879 RCU_INIT_POINTER(ibp
->qp
[0], NULL
);
1880 RCU_INIT_POINTER(ibp
->qp
[1], NULL
);
1883 static void verbs_txreq_kmem_cache_ctor(void *obj
)
1885 struct verbs_txreq
*tx
= (struct verbs_txreq
*)obj
;
1887 memset(tx
, 0, sizeof(*tx
));
1891 * hfi1_register_ib_device - register our device with the infiniband core
1892 * @dd: the device data structure
1893 * Return 0 if successful, errno if unsuccessful.
1895 int hfi1_register_ib_device(struct hfi1_devdata
*dd
)
1897 struct hfi1_ibdev
*dev
= &dd
->verbs_dev
;
1898 struct ib_device
*ibdev
= &dev
->ibdev
;
1899 struct hfi1_pportdata
*ppd
= dd
->pport
;
1900 unsigned i
, lk_tab_size
;
1902 size_t lcpysz
= IB_DEVICE_NAME_MAX
;
1905 ret
= hfi1_qp_init(dev
);
1910 for (i
= 0; i
< dd
->num_pports
; i
++)
1911 init_ibport(ppd
+ i
);
1913 /* Only need to initialize non-zero fields. */
1914 spin_lock_init(&dev
->n_pds_lock
);
1915 spin_lock_init(&dev
->n_ahs_lock
);
1916 spin_lock_init(&dev
->n_cqs_lock
);
1917 spin_lock_init(&dev
->n_qps_lock
);
1918 spin_lock_init(&dev
->n_srqs_lock
);
1919 spin_lock_init(&dev
->n_mcast_grps_lock
);
1920 init_timer(&dev
->mem_timer
);
1921 dev
->mem_timer
.function
= mem_timer
;
1922 dev
->mem_timer
.data
= (unsigned long) dev
;
1925 * The top hfi1_lkey_table_size bits are used to index the
1926 * table. The lower 8 bits can be owned by the user (copied from
1927 * the LKEY). The remaining bits act as a generation number or tag.
1929 spin_lock_init(&dev
->lk_table
.lock
);
1930 dev
->lk_table
.max
= 1 << hfi1_lkey_table_size
;
1931 /* ensure generation is at least 4 bits (keys.c) */
1932 if (hfi1_lkey_table_size
> MAX_LKEY_TABLE_BITS
) {
1933 dd_dev_warn(dd
, "lkey bits %u too large, reduced to %u\n",
1934 hfi1_lkey_table_size
, MAX_LKEY_TABLE_BITS
);
1935 hfi1_lkey_table_size
= MAX_LKEY_TABLE_BITS
;
1937 lk_tab_size
= dev
->lk_table
.max
* sizeof(*dev
->lk_table
.table
);
1938 dev
->lk_table
.table
= (struct hfi1_mregion __rcu
**)
1939 vmalloc(lk_tab_size
);
1940 if (dev
->lk_table
.table
== NULL
) {
1944 RCU_INIT_POINTER(dev
->dma_mr
, NULL
);
1945 for (i
= 0; i
< dev
->lk_table
.max
; i
++)
1946 RCU_INIT_POINTER(dev
->lk_table
.table
[i
], NULL
);
1947 INIT_LIST_HEAD(&dev
->pending_mmaps
);
1948 spin_lock_init(&dev
->pending_lock
);
1949 seqlock_init(&dev
->iowait_lock
);
1950 dev
->mmap_offset
= PAGE_SIZE
;
1951 spin_lock_init(&dev
->mmap_offset_lock
);
1952 INIT_LIST_HEAD(&dev
->txwait
);
1953 INIT_LIST_HEAD(&dev
->memwait
);
1955 descq_cnt
= sdma_get_descq_cnt();
1957 /* SLAB_HWCACHE_ALIGN for AHG */
1958 dev
->verbs_txreq_cache
= kmem_cache_create("hfi1_vtxreq_cache",
1959 sizeof(struct verbs_txreq
),
1960 0, SLAB_HWCACHE_ALIGN
,
1961 verbs_txreq_kmem_cache_ctor
);
1962 if (!dev
->verbs_txreq_cache
) {
1964 goto err_verbs_txreq
;
1968 * The system image GUID is supposed to be the same for all
1969 * HFIs in a single system but since there can be other
1970 * device types in the system, we can't be sure this is unique.
1972 if (!ib_hfi1_sys_image_guid
)
1973 ib_hfi1_sys_image_guid
= cpu_to_be64(ppd
->guid
);
1974 lcpysz
= strlcpy(ibdev
->name
, class_name(), lcpysz
);
1975 strlcpy(ibdev
->name
+ lcpysz
, "_%d", IB_DEVICE_NAME_MAX
- lcpysz
);
1976 ibdev
->owner
= THIS_MODULE
;
1977 ibdev
->node_guid
= cpu_to_be64(ppd
->guid
);
1978 ibdev
->uverbs_abi_ver
= HFI1_UVERBS_ABI_VERSION
;
1979 ibdev
->uverbs_cmd_mask
=
1980 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
1981 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
1982 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
1983 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
1984 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
1985 (1ull << IB_USER_VERBS_CMD_CREATE_AH
) |
1986 (1ull << IB_USER_VERBS_CMD_MODIFY_AH
) |
1987 (1ull << IB_USER_VERBS_CMD_QUERY_AH
) |
1988 (1ull << IB_USER_VERBS_CMD_DESTROY_AH
) |
1989 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
1990 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
1991 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
1992 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
1993 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ
) |
1994 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
1995 (1ull << IB_USER_VERBS_CMD_POLL_CQ
) |
1996 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ
) |
1997 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
1998 (1ull << IB_USER_VERBS_CMD_QUERY_QP
) |
1999 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
2000 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
2001 (1ull << IB_USER_VERBS_CMD_POST_SEND
) |
2002 (1ull << IB_USER_VERBS_CMD_POST_RECV
) |
2003 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST
) |
2004 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST
) |
2005 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ
) |
2006 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ
) |
2007 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ
) |
2008 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ
) |
2009 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV
);
2010 ibdev
->node_type
= RDMA_NODE_IB_CA
;
2011 ibdev
->phys_port_cnt
= dd
->num_pports
;
2012 ibdev
->num_comp_vectors
= 1;
2013 ibdev
->dma_device
= &dd
->pcidev
->dev
;
2014 ibdev
->query_device
= query_device
;
2015 ibdev
->modify_device
= modify_device
;
2016 ibdev
->query_port
= query_port
;
2017 ibdev
->modify_port
= modify_port
;
2018 ibdev
->query_pkey
= query_pkey
;
2019 ibdev
->query_gid
= query_gid
;
2020 ibdev
->alloc_ucontext
= alloc_ucontext
;
2021 ibdev
->dealloc_ucontext
= dealloc_ucontext
;
2022 ibdev
->alloc_pd
= alloc_pd
;
2023 ibdev
->dealloc_pd
= dealloc_pd
;
2024 ibdev
->create_ah
= create_ah
;
2025 ibdev
->destroy_ah
= destroy_ah
;
2026 ibdev
->modify_ah
= modify_ah
;
2027 ibdev
->query_ah
= query_ah
;
2028 ibdev
->create_srq
= hfi1_create_srq
;
2029 ibdev
->modify_srq
= hfi1_modify_srq
;
2030 ibdev
->query_srq
= hfi1_query_srq
;
2031 ibdev
->destroy_srq
= hfi1_destroy_srq
;
2032 ibdev
->create_qp
= hfi1_create_qp
;
2033 ibdev
->modify_qp
= hfi1_modify_qp
;
2034 ibdev
->query_qp
= hfi1_query_qp
;
2035 ibdev
->destroy_qp
= hfi1_destroy_qp
;
2036 ibdev
->post_send
= post_send
;
2037 ibdev
->post_recv
= post_receive
;
2038 ibdev
->post_srq_recv
= hfi1_post_srq_receive
;
2039 ibdev
->create_cq
= hfi1_create_cq
;
2040 ibdev
->destroy_cq
= hfi1_destroy_cq
;
2041 ibdev
->resize_cq
= hfi1_resize_cq
;
2042 ibdev
->poll_cq
= hfi1_poll_cq
;
2043 ibdev
->req_notify_cq
= hfi1_req_notify_cq
;
2044 ibdev
->get_dma_mr
= hfi1_get_dma_mr
;
2045 ibdev
->reg_phys_mr
= hfi1_reg_phys_mr
;
2046 ibdev
->reg_user_mr
= hfi1_reg_user_mr
;
2047 ibdev
->dereg_mr
= hfi1_dereg_mr
;
2048 ibdev
->alloc_mr
= hfi1_alloc_mr
;
2049 ibdev
->alloc_fast_reg_page_list
= hfi1_alloc_fast_reg_page_list
;
2050 ibdev
->free_fast_reg_page_list
= hfi1_free_fast_reg_page_list
;
2051 ibdev
->alloc_fmr
= hfi1_alloc_fmr
;
2052 ibdev
->map_phys_fmr
= hfi1_map_phys_fmr
;
2053 ibdev
->unmap_fmr
= hfi1_unmap_fmr
;
2054 ibdev
->dealloc_fmr
= hfi1_dealloc_fmr
;
2055 ibdev
->attach_mcast
= hfi1_multicast_attach
;
2056 ibdev
->detach_mcast
= hfi1_multicast_detach
;
2057 ibdev
->process_mad
= hfi1_process_mad
;
2058 ibdev
->mmap
= hfi1_mmap
;
2059 ibdev
->dma_ops
= &hfi1_dma_mapping_ops
;
2060 ibdev
->get_port_immutable
= port_immutable
;
2062 strncpy(ibdev
->node_desc
, init_utsname()->nodename
,
2063 sizeof(ibdev
->node_desc
));
2065 ret
= ib_register_device(ibdev
, hfi1_create_port_files
);
2069 ret
= hfi1_create_agents(dev
);
2073 ret
= hfi1_verbs_register_sysfs(dd
);
2080 hfi1_free_agents(dev
);
2082 ib_unregister_device(ibdev
);
2085 kmem_cache_destroy(dev
->verbs_txreq_cache
);
2086 vfree(dev
->lk_table
.table
);
2090 dd_dev_err(dd
, "cannot register verbs: %d!\n", -ret
);
2095 void hfi1_unregister_ib_device(struct hfi1_devdata
*dd
)
2097 struct hfi1_ibdev
*dev
= &dd
->verbs_dev
;
2098 struct ib_device
*ibdev
= &dev
->ibdev
;
2100 hfi1_verbs_unregister_sysfs(dd
);
2102 hfi1_free_agents(dev
);
2104 ib_unregister_device(ibdev
);
2106 if (!list_empty(&dev
->txwait
))
2107 dd_dev_err(dd
, "txwait list not empty!\n");
2108 if (!list_empty(&dev
->memwait
))
2109 dd_dev_err(dd
, "memwait list not empty!\n");
2111 dd_dev_err(dd
, "DMA MR not NULL!\n");
2114 del_timer_sync(&dev
->mem_timer
);
2115 kmem_cache_destroy(dev
->verbs_txreq_cache
);
2116 vfree(dev
->lk_table
.table
);
2120 * This must be called with s_lock held.
2122 void hfi1_schedule_send(struct hfi1_qp
*qp
)
2124 if (hfi1_send_ok(qp
)) {
2125 struct hfi1_ibport
*ibp
=
2126 to_iport(qp
->ibqp
.device
, qp
->port_num
);
2127 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
2129 iowait_schedule(&qp
->s_iowait
, ppd
->hfi1_wq
);
2133 void hfi1_cnp_rcv(struct hfi1_packet
*packet
)
2135 struct hfi1_ibport
*ibp
= &packet
->rcd
->ppd
->ibport_data
;
2137 if (packet
->qp
->ibqp
.qp_type
== IB_QPT_UC
)
2138 hfi1_uc_rcv(packet
);
2139 else if (packet
->qp
->ibqp
.qp_type
== IB_QPT_UD
)
2140 hfi1_ud_rcv(packet
);