2 * Copyright(c) 2015 - 2018 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <rdma/ib_mad.h>
49 #include <rdma/ib_user_verbs.h>
51 #include <linux/module.h>
52 #include <linux/utsname.h>
53 #include <linux/rculist.h>
55 #include <linux/vmalloc.h>
56 #include <rdma/opa_addr.h>
63 #include "verbs_txreq.h"
69 static unsigned int hfi1_lkey_table_size
= 16;
70 module_param_named(lkey_table_size
, hfi1_lkey_table_size
, uint
,
72 MODULE_PARM_DESC(lkey_table_size
,
73 "LKEY table size in bits (2^n, 1 <= n <= 23)");
75 static unsigned int hfi1_max_pds
= 0xFFFF;
76 module_param_named(max_pds
, hfi1_max_pds
, uint
, S_IRUGO
);
77 MODULE_PARM_DESC(max_pds
,
78 "Maximum number of protection domains to support");
80 static unsigned int hfi1_max_ahs
= 0xFFFF;
81 module_param_named(max_ahs
, hfi1_max_ahs
, uint
, S_IRUGO
);
82 MODULE_PARM_DESC(max_ahs
, "Maximum number of address handles to support");
84 unsigned int hfi1_max_cqes
= 0x2FFFFF;
85 module_param_named(max_cqes
, hfi1_max_cqes
, uint
, S_IRUGO
);
86 MODULE_PARM_DESC(max_cqes
,
87 "Maximum number of completion queue entries to support");
89 unsigned int hfi1_max_cqs
= 0x1FFFF;
90 module_param_named(max_cqs
, hfi1_max_cqs
, uint
, S_IRUGO
);
91 MODULE_PARM_DESC(max_cqs
, "Maximum number of completion queues to support");
93 unsigned int hfi1_max_qp_wrs
= 0x3FFF;
94 module_param_named(max_qp_wrs
, hfi1_max_qp_wrs
, uint
, S_IRUGO
);
95 MODULE_PARM_DESC(max_qp_wrs
, "Maximum number of QP WRs to support");
97 unsigned int hfi1_max_qps
= 32768;
98 module_param_named(max_qps
, hfi1_max_qps
, uint
, S_IRUGO
);
99 MODULE_PARM_DESC(max_qps
, "Maximum number of QPs to support");
101 unsigned int hfi1_max_sges
= 0x60;
102 module_param_named(max_sges
, hfi1_max_sges
, uint
, S_IRUGO
);
103 MODULE_PARM_DESC(max_sges
, "Maximum number of SGEs to support");
105 unsigned int hfi1_max_mcast_grps
= 16384;
106 module_param_named(max_mcast_grps
, hfi1_max_mcast_grps
, uint
, S_IRUGO
);
107 MODULE_PARM_DESC(max_mcast_grps
,
108 "Maximum number of multicast groups to support");
110 unsigned int hfi1_max_mcast_qp_attached
= 16;
111 module_param_named(max_mcast_qp_attached
, hfi1_max_mcast_qp_attached
,
113 MODULE_PARM_DESC(max_mcast_qp_attached
,
114 "Maximum number of attached QPs to support");
116 unsigned int hfi1_max_srqs
= 1024;
117 module_param_named(max_srqs
, hfi1_max_srqs
, uint
, S_IRUGO
);
118 MODULE_PARM_DESC(max_srqs
, "Maximum number of SRQs to support");
120 unsigned int hfi1_max_srq_sges
= 128;
121 module_param_named(max_srq_sges
, hfi1_max_srq_sges
, uint
, S_IRUGO
);
122 MODULE_PARM_DESC(max_srq_sges
, "Maximum number of SRQ SGEs to support");
124 unsigned int hfi1_max_srq_wrs
= 0x1FFFF;
125 module_param_named(max_srq_wrs
, hfi1_max_srq_wrs
, uint
, S_IRUGO
);
126 MODULE_PARM_DESC(max_srq_wrs
, "Maximum number of SRQ WRs support");
128 unsigned short piothreshold
= 256;
129 module_param(piothreshold
, ushort
, S_IRUGO
);
130 MODULE_PARM_DESC(piothreshold
, "size used to determine sdma vs. pio");
132 static unsigned int sge_copy_mode
;
133 module_param(sge_copy_mode
, uint
, S_IRUGO
);
134 MODULE_PARM_DESC(sge_copy_mode
,
135 "Verbs copy mode: 0 use memcpy, 1 use cacheless copy, 2 adapt based on WSS");
137 static void verbs_sdma_complete(
138 struct sdma_txreq
*cookie
,
141 static int pio_wait(struct rvt_qp
*qp
,
142 struct send_context
*sc
,
143 struct hfi1_pkt_state
*ps
,
146 /* Length of buffer to create verbs txreq cache name */
147 #define TXREQ_NAME_LEN 24
149 /* 16B trailing buffer */
150 static const u8 trail_buf
[MAX_16B_PADDING
];
152 static uint wss_threshold
= 80;
153 module_param(wss_threshold
, uint
, S_IRUGO
);
154 MODULE_PARM_DESC(wss_threshold
, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
155 static uint wss_clean_period
= 256;
156 module_param(wss_clean_period
, uint
, S_IRUGO
);
157 MODULE_PARM_DESC(wss_clean_period
, "Count of verbs copies before an entry in the page copy table is cleaned");
160 * Translate ib_wr_opcode into ib_wc_opcode.
162 const enum ib_wc_opcode ib_hfi1_wc_opcode
[] = {
163 [IB_WR_RDMA_WRITE
] = IB_WC_RDMA_WRITE
,
164 [IB_WR_RDMA_WRITE_WITH_IMM
] = IB_WC_RDMA_WRITE
,
165 [IB_WR_SEND
] = IB_WC_SEND
,
166 [IB_WR_SEND_WITH_IMM
] = IB_WC_SEND
,
167 [IB_WR_RDMA_READ
] = IB_WC_RDMA_READ
,
168 [IB_WR_ATOMIC_CMP_AND_SWP
] = IB_WC_COMP_SWAP
,
169 [IB_WR_ATOMIC_FETCH_AND_ADD
] = IB_WC_FETCH_ADD
,
170 [IB_WR_SEND_WITH_INV
] = IB_WC_SEND
,
171 [IB_WR_LOCAL_INV
] = IB_WC_LOCAL_INV
,
172 [IB_WR_REG_MR
] = IB_WC_REG_MR
176 * Length of header by opcode, 0 --> not supported
178 const u8 hdr_len_by_opcode
[256] = {
180 [IB_OPCODE_RC_SEND_FIRST
] = 12 + 8,
181 [IB_OPCODE_RC_SEND_MIDDLE
] = 12 + 8,
182 [IB_OPCODE_RC_SEND_LAST
] = 12 + 8,
183 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE
] = 12 + 8 + 4,
184 [IB_OPCODE_RC_SEND_ONLY
] = 12 + 8,
185 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE
] = 12 + 8 + 4,
186 [IB_OPCODE_RC_RDMA_WRITE_FIRST
] = 12 + 8 + 16,
187 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE
] = 12 + 8,
188 [IB_OPCODE_RC_RDMA_WRITE_LAST
] = 12 + 8,
189 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE
] = 12 + 8 + 4,
190 [IB_OPCODE_RC_RDMA_WRITE_ONLY
] = 12 + 8 + 16,
191 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE
] = 12 + 8 + 20,
192 [IB_OPCODE_RC_RDMA_READ_REQUEST
] = 12 + 8 + 16,
193 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST
] = 12 + 8 + 4,
194 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE
] = 12 + 8,
195 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST
] = 12 + 8 + 4,
196 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY
] = 12 + 8 + 4,
197 [IB_OPCODE_RC_ACKNOWLEDGE
] = 12 + 8 + 4,
198 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE
] = 12 + 8 + 4 + 8,
199 [IB_OPCODE_RC_COMPARE_SWAP
] = 12 + 8 + 28,
200 [IB_OPCODE_RC_FETCH_ADD
] = 12 + 8 + 28,
201 [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE
] = 12 + 8 + 4,
202 [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE
] = 12 + 8 + 4,
204 [IB_OPCODE_UC_SEND_FIRST
] = 12 + 8,
205 [IB_OPCODE_UC_SEND_MIDDLE
] = 12 + 8,
206 [IB_OPCODE_UC_SEND_LAST
] = 12 + 8,
207 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE
] = 12 + 8 + 4,
208 [IB_OPCODE_UC_SEND_ONLY
] = 12 + 8,
209 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE
] = 12 + 8 + 4,
210 [IB_OPCODE_UC_RDMA_WRITE_FIRST
] = 12 + 8 + 16,
211 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE
] = 12 + 8,
212 [IB_OPCODE_UC_RDMA_WRITE_LAST
] = 12 + 8,
213 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE
] = 12 + 8 + 4,
214 [IB_OPCODE_UC_RDMA_WRITE_ONLY
] = 12 + 8 + 16,
215 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE
] = 12 + 8 + 20,
217 [IB_OPCODE_UD_SEND_ONLY
] = 12 + 8 + 8,
218 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
] = 12 + 8 + 12
221 static const opcode_handler opcode_handler_tbl
[256] = {
223 [IB_OPCODE_RC_SEND_FIRST
] = &hfi1_rc_rcv
,
224 [IB_OPCODE_RC_SEND_MIDDLE
] = &hfi1_rc_rcv
,
225 [IB_OPCODE_RC_SEND_LAST
] = &hfi1_rc_rcv
,
226 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE
] = &hfi1_rc_rcv
,
227 [IB_OPCODE_RC_SEND_ONLY
] = &hfi1_rc_rcv
,
228 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE
] = &hfi1_rc_rcv
,
229 [IB_OPCODE_RC_RDMA_WRITE_FIRST
] = &hfi1_rc_rcv
,
230 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE
] = &hfi1_rc_rcv
,
231 [IB_OPCODE_RC_RDMA_WRITE_LAST
] = &hfi1_rc_rcv
,
232 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE
] = &hfi1_rc_rcv
,
233 [IB_OPCODE_RC_RDMA_WRITE_ONLY
] = &hfi1_rc_rcv
,
234 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE
] = &hfi1_rc_rcv
,
235 [IB_OPCODE_RC_RDMA_READ_REQUEST
] = &hfi1_rc_rcv
,
236 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST
] = &hfi1_rc_rcv
,
237 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE
] = &hfi1_rc_rcv
,
238 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST
] = &hfi1_rc_rcv
,
239 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY
] = &hfi1_rc_rcv
,
240 [IB_OPCODE_RC_ACKNOWLEDGE
] = &hfi1_rc_rcv
,
241 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE
] = &hfi1_rc_rcv
,
242 [IB_OPCODE_RC_COMPARE_SWAP
] = &hfi1_rc_rcv
,
243 [IB_OPCODE_RC_FETCH_ADD
] = &hfi1_rc_rcv
,
244 [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE
] = &hfi1_rc_rcv
,
245 [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE
] = &hfi1_rc_rcv
,
247 [IB_OPCODE_UC_SEND_FIRST
] = &hfi1_uc_rcv
,
248 [IB_OPCODE_UC_SEND_MIDDLE
] = &hfi1_uc_rcv
,
249 [IB_OPCODE_UC_SEND_LAST
] = &hfi1_uc_rcv
,
250 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE
] = &hfi1_uc_rcv
,
251 [IB_OPCODE_UC_SEND_ONLY
] = &hfi1_uc_rcv
,
252 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE
] = &hfi1_uc_rcv
,
253 [IB_OPCODE_UC_RDMA_WRITE_FIRST
] = &hfi1_uc_rcv
,
254 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE
] = &hfi1_uc_rcv
,
255 [IB_OPCODE_UC_RDMA_WRITE_LAST
] = &hfi1_uc_rcv
,
256 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE
] = &hfi1_uc_rcv
,
257 [IB_OPCODE_UC_RDMA_WRITE_ONLY
] = &hfi1_uc_rcv
,
258 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE
] = &hfi1_uc_rcv
,
260 [IB_OPCODE_UD_SEND_ONLY
] = &hfi1_ud_rcv
,
261 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
] = &hfi1_ud_rcv
,
263 [IB_OPCODE_CNP
] = &hfi1_cnp_rcv
268 static const u32 pio_opmask
[BIT(3)] = {
270 [IB_OPCODE_RC
>> 5] =
271 BIT(RC_OP(SEND_ONLY
) & OPMASK
) |
272 BIT(RC_OP(SEND_ONLY_WITH_IMMEDIATE
) & OPMASK
) |
273 BIT(RC_OP(RDMA_WRITE_ONLY
) & OPMASK
) |
274 BIT(RC_OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE
) & OPMASK
) |
275 BIT(RC_OP(RDMA_READ_REQUEST
) & OPMASK
) |
276 BIT(RC_OP(ACKNOWLEDGE
) & OPMASK
) |
277 BIT(RC_OP(ATOMIC_ACKNOWLEDGE
) & OPMASK
) |
278 BIT(RC_OP(COMPARE_SWAP
) & OPMASK
) |
279 BIT(RC_OP(FETCH_ADD
) & OPMASK
),
281 [IB_OPCODE_UC
>> 5] =
282 BIT(UC_OP(SEND_ONLY
) & OPMASK
) |
283 BIT(UC_OP(SEND_ONLY_WITH_IMMEDIATE
) & OPMASK
) |
284 BIT(UC_OP(RDMA_WRITE_ONLY
) & OPMASK
) |
285 BIT(UC_OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE
) & OPMASK
),
291 __be64 ib_hfi1_sys_image_guid
;
294 * Make sure the QP is ready and able to accept the given opcode.
296 static inline opcode_handler
qp_ok(struct hfi1_packet
*packet
)
298 if (!(ib_rvt_state_ops
[packet
->qp
->state
] & RVT_PROCESS_RECV_OK
))
300 if (((packet
->opcode
& RVT_OPCODE_QP_MASK
) ==
301 packet
->qp
->allowed_ops
) ||
302 (packet
->opcode
== IB_OPCODE_CNP
))
303 return opcode_handler_tbl
[packet
->opcode
];
308 static u64
hfi1_fault_tx(struct rvt_qp
*qp
, u8 opcode
, u64 pbc
)
310 #ifdef CONFIG_FAULT_INJECTION
311 if ((opcode
& IB_OPCODE_MSP
) == IB_OPCODE_MSP
)
313 * In order to drop non-IB traffic we
314 * set PbcInsertHrc to NONE (0x2).
315 * The packet will still be delivered
316 * to the receiving node but a
317 * KHdrHCRCErr (KDETH packet with a bad
318 * HCRC) will be triggered and the
319 * packet will not be delivered to the
322 pbc
|= (u64
)PBC_IHCRC_NONE
<< PBC_INSERT_HCRC_SHIFT
;
325 * In order to drop regular verbs
326 * traffic we set the PbcTestEbp
327 * flag. The packet will still be
328 * delivered to the receiving node but
329 * a 'late ebp error' will be
330 * triggered and will be dropped.
337 static int hfi1_do_pkey_check(struct hfi1_packet
*packet
)
339 struct hfi1_ctxtdata
*rcd
= packet
->rcd
;
340 struct hfi1_pportdata
*ppd
= rcd
->ppd
;
341 struct hfi1_16b_header
*hdr
= packet
->hdr
;
344 /* Pkey check needed only for bypass packets */
345 if (packet
->etype
!= RHF_RCV_TYPE_BYPASS
)
348 /* Perform pkey check */
349 pkey
= hfi1_16B_get_pkey(hdr
);
350 return ingress_pkey_check(ppd
, pkey
, packet
->sc
,
351 packet
->qp
->s_pkey_index
,
355 static inline void hfi1_handle_packet(struct hfi1_packet
*packet
,
359 struct hfi1_ctxtdata
*rcd
= packet
->rcd
;
360 struct hfi1_pportdata
*ppd
= rcd
->ppd
;
361 struct hfi1_ibport
*ibp
= rcd_to_iport(rcd
);
362 struct rvt_dev_info
*rdi
= &ppd
->dd
->verbs_dev
.rdi
;
363 opcode_handler packet_handler
;
366 inc_opstats(packet
->tlen
, &rcd
->opstats
->stats
[packet
->opcode
]);
368 if (unlikely(is_mcast
)) {
369 struct rvt_mcast
*mcast
;
370 struct rvt_mcast_qp
*p
;
374 mcast
= rvt_mcast_find(&ibp
->rvp
,
376 opa_get_lid(packet
->dlid
, 9B
));
379 list_for_each_entry_rcu(p
, &mcast
->qp_list
, list
) {
381 if (hfi1_do_pkey_check(packet
))
383 spin_lock_irqsave(&packet
->qp
->r_lock
, flags
);
384 packet_handler
= qp_ok(packet
);
385 if (likely(packet_handler
))
386 packet_handler(packet
);
388 ibp
->rvp
.n_pkt_drops
++;
389 spin_unlock_irqrestore(&packet
->qp
->r_lock
, flags
);
392 * Notify rvt_multicast_detach() if it is waiting for us
395 if (atomic_dec_return(&mcast
->refcount
) <= 1)
396 wake_up(&mcast
->wait
);
398 /* Get the destination QP number. */
399 if (packet
->etype
== RHF_RCV_TYPE_BYPASS
&&
400 hfi1_16B_get_l4(packet
->hdr
) == OPA_16B_L4_FM
)
401 qp_num
= hfi1_16B_get_dest_qpn(packet
->mgmt
);
403 qp_num
= ib_bth_get_qpn(packet
->ohdr
);
406 packet
->qp
= rvt_lookup_qpn(rdi
, &ibp
->rvp
, qp_num
);
410 if (hfi1_do_pkey_check(packet
))
413 spin_lock_irqsave(&packet
->qp
->r_lock
, flags
);
414 packet_handler
= qp_ok(packet
);
415 if (likely(packet_handler
))
416 packet_handler(packet
);
418 ibp
->rvp
.n_pkt_drops
++;
419 spin_unlock_irqrestore(&packet
->qp
->r_lock
, flags
);
426 ibp
->rvp
.n_pkt_drops
++;
430 * hfi1_ib_rcv - process an incoming packet
431 * @packet: data packet information
433 * This is called to process an incoming packet at interrupt level.
435 void hfi1_ib_rcv(struct hfi1_packet
*packet
)
437 struct hfi1_ctxtdata
*rcd
= packet
->rcd
;
439 trace_input_ibhdr(rcd
->dd
, packet
, !!(rhf_dc_info(packet
->rhf
)));
440 hfi1_handle_packet(packet
, hfi1_check_mcast(packet
->dlid
));
443 void hfi1_16B_rcv(struct hfi1_packet
*packet
)
445 struct hfi1_ctxtdata
*rcd
= packet
->rcd
;
447 trace_input_ibhdr(rcd
->dd
, packet
, false);
448 hfi1_handle_packet(packet
, hfi1_check_mcast(packet
->dlid
));
452 * This is called from a timer to check for QPs
453 * which need kernel memory in order to send a packet.
455 static void mem_timer(struct timer_list
*t
)
457 struct hfi1_ibdev
*dev
= from_timer(dev
, t
, mem_timer
);
458 struct list_head
*list
= &dev
->memwait
;
459 struct rvt_qp
*qp
= NULL
;
462 struct hfi1_qp_priv
*priv
;
464 write_seqlock_irqsave(&dev
->iowait_lock
, flags
);
465 if (!list_empty(list
)) {
466 wait
= list_first_entry(list
, struct iowait
, list
);
467 qp
= iowait_to_qp(wait
);
469 list_del_init(&priv
->s_iowait
.list
);
470 priv
->s_iowait
.lock
= NULL
;
471 /* refcount held until actual wake up */
472 if (!list_empty(list
))
473 mod_timer(&dev
->mem_timer
, jiffies
+ 1);
475 write_sequnlock_irqrestore(&dev
->iowait_lock
, flags
);
478 hfi1_qp_wakeup(qp
, RVT_S_WAIT_KMEM
);
482 * This is called with progress side lock held.
485 static void verbs_sdma_complete(
486 struct sdma_txreq
*cookie
,
489 struct verbs_txreq
*tx
=
490 container_of(cookie
, struct verbs_txreq
, txreq
);
491 struct rvt_qp
*qp
= tx
->qp
;
493 spin_lock(&qp
->s_lock
);
495 rvt_send_complete(qp
, tx
->wqe
, IB_WC_SUCCESS
);
496 } else if (qp
->ibqp
.qp_type
== IB_QPT_RC
) {
497 struct hfi1_opa_header
*hdr
;
500 hfi1_rc_send_complete(qp
, hdr
);
502 spin_unlock(&qp
->s_lock
);
507 static int wait_kmem(struct hfi1_ibdev
*dev
,
509 struct hfi1_pkt_state
*ps
)
511 struct hfi1_qp_priv
*priv
= qp
->priv
;
515 spin_lock_irqsave(&qp
->s_lock
, flags
);
516 if (ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
) {
517 write_seqlock(&dev
->iowait_lock
);
518 list_add_tail(&ps
->s_txreq
->txreq
.list
,
520 if (list_empty(&priv
->s_iowait
.list
)) {
521 if (list_empty(&dev
->memwait
))
522 mod_timer(&dev
->mem_timer
, jiffies
+ 1);
523 qp
->s_flags
|= RVT_S_WAIT_KMEM
;
524 list_add_tail(&priv
->s_iowait
.list
, &dev
->memwait
);
525 priv
->s_iowait
.lock
= &dev
->iowait_lock
;
526 trace_hfi1_qpsleep(qp
, RVT_S_WAIT_KMEM
);
529 write_sequnlock(&dev
->iowait_lock
);
530 hfi1_qp_unbusy(qp
, ps
->wait
);
533 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
539 * This routine calls txadds for each sg entry.
541 * Add failures will revert the sge cursor
543 static noinline
int build_verbs_ulp_payload(
544 struct sdma_engine
*sde
,
546 struct verbs_txreq
*tx
)
548 struct rvt_sge_state
*ss
= tx
->ss
;
549 struct rvt_sge
*sg_list
= ss
->sg_list
;
550 struct rvt_sge sge
= ss
->sge
;
551 u8 num_sge
= ss
->num_sge
;
556 len
= ss
->sge
.length
;
559 if (len
> ss
->sge
.sge_length
)
560 len
= ss
->sge
.sge_length
;
561 WARN_ON_ONCE(len
== 0);
562 ret
= sdma_txadd_kvaddr(
569 rvt_update_sge(ss
, len
, false);
576 ss
->num_sge
= num_sge
;
577 ss
->sg_list
= sg_list
;
582 * update_tx_opstats - record stats by opcode
584 * @ps: transmit packet state
585 * @plen: the plen in dwords
587 * This is a routine to record the tx opstats after a
588 * packet has been presented to the egress mechanism.
590 static void update_tx_opstats(struct rvt_qp
*qp
, struct hfi1_pkt_state
*ps
,
593 #ifdef CONFIG_DEBUG_FS
594 struct hfi1_devdata
*dd
= dd_from_ibdev(qp
->ibqp
.device
);
595 struct hfi1_opcode_stats_perctx
*s
= get_cpu_ptr(dd
->tx_opstats
);
597 inc_opstats(plen
* 4, &s
->stats
[ps
->opcode
]);
603 * Build the number of DMA descriptors needed to send length bytes of data.
605 * NOTE: DMA mapping is held in the tx until completed in the ring or
606 * the tx desc is freed without having been submitted to the ring
608 * This routine ensures all the helper routine calls succeed.
611 static int build_verbs_tx_desc(
612 struct sdma_engine
*sde
,
614 struct verbs_txreq
*tx
,
615 struct hfi1_ahg_info
*ahg_info
,
619 struct hfi1_sdma_header
*phdr
= &tx
->phdr
;
620 u16 hdrbytes
= (tx
->hdr_dwords
+ sizeof(pbc
) / 4) << 2;
623 if (tx
->phdr
.hdr
.hdr_type
) {
625 * hdrbytes accounts for PBC. Need to subtract 8 bytes
626 * before calculating padding.
628 extra_bytes
= hfi1_get_16b_padding(hdrbytes
- 8, length
) +
629 (SIZE_OF_CRC
<< 2) + SIZE_OF_LT
;
631 if (!ahg_info
->ahgcount
) {
632 ret
= sdma_txinit_ahg(
641 verbs_sdma_complete
);
644 phdr
->pbc
= cpu_to_le64(pbc
);
645 ret
= sdma_txadd_kvaddr(
653 ret
= sdma_txinit_ahg(
661 verbs_sdma_complete
);
665 /* add the ulp payload - if any. tx->ss can be NULL for acks */
667 ret
= build_verbs_ulp_payload(sde
, length
, tx
);
672 /* add icrc, lt byte, and padding to flit */
674 ret
= sdma_txadd_kvaddr(sde
->dd
, &tx
->txreq
,
675 (void *)trail_buf
, extra_bytes
);
681 int hfi1_verbs_send_dma(struct rvt_qp
*qp
, struct hfi1_pkt_state
*ps
,
684 struct hfi1_qp_priv
*priv
= qp
->priv
;
685 struct hfi1_ahg_info
*ahg_info
= priv
->s_ahg
;
686 u32 hdrwords
= ps
->s_txreq
->hdr_dwords
;
687 u32 len
= ps
->s_txreq
->s_cur_size
;
689 struct hfi1_ibdev
*dev
= ps
->dev
;
690 struct hfi1_pportdata
*ppd
= ps
->ppd
;
691 struct verbs_txreq
*tx
;
696 if (ps
->s_txreq
->phdr
.hdr
.hdr_type
) {
697 u8 extra_bytes
= hfi1_get_16b_padding((hdrwords
<< 2), len
);
699 dwords
= (len
+ extra_bytes
+ (SIZE_OF_CRC
<< 2) +
702 dwords
= (len
+ 3) >> 2;
704 plen
= hdrwords
+ dwords
+ sizeof(pbc
) / 4;
707 if (!sdma_txreq_built(&tx
->txreq
)) {
708 if (likely(pbc
== 0)) {
709 u32 vl
= sc_to_vlt(dd_from_ibdev(qp
->ibqp
.device
), sc5
);
712 /* set PBC_DC_INFO bit (aka SC[4]) in pbc */
713 if (ps
->s_txreq
->phdr
.hdr
.hdr_type
)
714 pbc
|= PBC_PACKET_BYPASS
|
715 PBC_INSERT_BYPASS_ICRC
;
717 pbc
|= (ib_is_sc5(sc5
) << PBC_DC_INFO_SHIFT
);
719 if (unlikely(hfi1_dbg_should_fault_tx(qp
, ps
->opcode
)))
720 pbc
= hfi1_fault_tx(qp
, ps
->opcode
, pbc
);
721 pbc
= create_pbc(ppd
,
728 ret
= build_verbs_tx_desc(tx
->sde
, len
, tx
, ahg_info
, pbc
);
732 ret
= sdma_send_txreq(tx
->sde
, ps
->wait
, &tx
->txreq
, ps
->pkts_sent
);
733 if (unlikely(ret
< 0)) {
739 update_tx_opstats(qp
, ps
, plen
);
740 trace_sdma_output_ibhdr(dd_from_ibdev(qp
->ibqp
.device
),
741 &ps
->s_txreq
->phdr
.hdr
, ib_is_sc5(sc5
));
745 /* The current one got "sent" */
748 ret
= wait_kmem(dev
, qp
, ps
);
750 /* free txreq - bad state */
751 hfi1_put_txreq(ps
->s_txreq
);
758 * If we are now in the error state, return zero to flush the
761 static int pio_wait(struct rvt_qp
*qp
,
762 struct send_context
*sc
,
763 struct hfi1_pkt_state
*ps
,
766 struct hfi1_qp_priv
*priv
= qp
->priv
;
767 struct hfi1_devdata
*dd
= sc
->dd
;
772 * Note that as soon as want_buffer() is called and
773 * possibly before it returns, sc_piobufavail()
774 * could be called. Therefore, put QP on the I/O wait list before
775 * enabling the PIO avail interrupt.
777 spin_lock_irqsave(&qp
->s_lock
, flags
);
778 if (ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
) {
779 write_seqlock(&sc
->waitlock
);
780 list_add_tail(&ps
->s_txreq
->txreq
.list
,
782 if (list_empty(&priv
->s_iowait
.list
)) {
783 struct hfi1_ibdev
*dev
= &dd
->verbs_dev
;
786 dev
->n_piowait
+= !!(flag
& RVT_S_WAIT_PIO
);
787 dev
->n_piodrain
+= !!(flag
& HFI1_S_WAIT_PIO_DRAIN
);
789 was_empty
= list_empty(&sc
->piowait
);
790 iowait_queue(ps
->pkts_sent
, &priv
->s_iowait
,
792 priv
->s_iowait
.lock
= &sc
->waitlock
;
793 trace_hfi1_qpsleep(qp
, RVT_S_WAIT_PIO
);
795 /* counting: only call wantpiobuf_intr if first user */
797 hfi1_sc_wantpiobuf_intr(sc
, 1);
799 write_sequnlock(&sc
->waitlock
);
800 hfi1_qp_unbusy(qp
, ps
->wait
);
803 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
807 static void verbs_pio_complete(void *arg
, int code
)
809 struct rvt_qp
*qp
= (struct rvt_qp
*)arg
;
810 struct hfi1_qp_priv
*priv
= qp
->priv
;
812 if (iowait_pio_dec(&priv
->s_iowait
))
813 iowait_drain_wakeup(&priv
->s_iowait
);
816 int hfi1_verbs_send_pio(struct rvt_qp
*qp
, struct hfi1_pkt_state
*ps
,
819 struct hfi1_qp_priv
*priv
= qp
->priv
;
820 u32 hdrwords
= ps
->s_txreq
->hdr_dwords
;
821 struct rvt_sge_state
*ss
= ps
->s_txreq
->ss
;
822 u32 len
= ps
->s_txreq
->s_cur_size
;
825 struct hfi1_pportdata
*ppd
= ps
->ppd
;
828 unsigned long flags
= 0;
829 struct send_context
*sc
;
830 struct pio_buf
*pbuf
;
831 int wc_status
= IB_WC_SUCCESS
;
833 pio_release_cb cb
= NULL
;
836 if (ps
->s_txreq
->phdr
.hdr
.hdr_type
) {
837 u8 pad_size
= hfi1_get_16b_padding((hdrwords
<< 2), len
);
839 extra_bytes
= pad_size
+ (SIZE_OF_CRC
<< 2) + SIZE_OF_LT
;
840 dwords
= (len
+ extra_bytes
) >> 2;
841 hdr
= (u32
*)&ps
->s_txreq
->phdr
.hdr
.opah
;
843 dwords
= (len
+ 3) >> 2;
844 hdr
= (u32
*)&ps
->s_txreq
->phdr
.hdr
.ibh
;
846 plen
= hdrwords
+ dwords
+ sizeof(pbc
) / 4;
848 /* only RC/UC use complete */
849 switch (qp
->ibqp
.qp_type
) {
852 cb
= verbs_pio_complete
;
858 /* vl15 special case taken care of in ud.c */
860 sc
= ps
->s_txreq
->psc
;
862 if (likely(pbc
== 0)) {
863 u8 vl
= sc_to_vlt(dd_from_ibdev(qp
->ibqp
.device
), sc5
);
865 /* set PBC_DC_INFO bit (aka SC[4]) in pbc */
866 if (ps
->s_txreq
->phdr
.hdr
.hdr_type
)
867 pbc
|= PBC_PACKET_BYPASS
| PBC_INSERT_BYPASS_ICRC
;
869 pbc
|= (ib_is_sc5(sc5
) << PBC_DC_INFO_SHIFT
);
871 if (unlikely(hfi1_dbg_should_fault_tx(qp
, ps
->opcode
)))
872 pbc
= hfi1_fault_tx(qp
, ps
->opcode
, pbc
);
873 pbc
= create_pbc(ppd
, pbc
, qp
->srate_mbps
, vl
, plen
);
876 iowait_pio_inc(&priv
->s_iowait
);
877 pbuf
= sc_buffer_alloc(sc
, plen
, cb
, qp
);
878 if (unlikely(!pbuf
)) {
880 verbs_pio_complete(qp
, 0);
881 if (ppd
->host_link_state
!= HLS_UP_ACTIVE
) {
883 * If we have filled the PIO buffers to capacity and are
884 * not in an active state this request is not going to
885 * go out to so just complete it with an error or else a
886 * ULP or the core may be stuck waiting.
890 "alloc failed. state not active, completing");
891 wc_status
= IB_WC_GENERAL_ERR
;
895 * This is a normal occurrence. The PIO buffs are full
896 * up but we are still happily sending, well we could be
897 * so lets continue to queue the request.
899 hfi1_cdbg(PIO
, "alloc failed. state active, queuing");
900 ret
= pio_wait(qp
, sc
, ps
, RVT_S_WAIT_PIO
);
902 /* txreq not queued - free */
904 /* tx consumed in wait */
910 pio_copy(ppd
->dd
, pbuf
, pbc
, hdr
, hdrwords
);
912 seg_pio_copy_start(pbuf
, pbc
,
916 void *addr
= ss
->sge
.vaddr
;
917 u32 slen
= ss
->sge
.length
;
921 if (slen
> ss
->sge
.sge_length
)
922 slen
= ss
->sge
.sge_length
;
923 rvt_update_sge(ss
, slen
, false);
924 seg_pio_copy_mid(pbuf
, addr
, slen
);
928 /* add icrc, lt byte, and padding to flit */
930 seg_pio_copy_mid(pbuf
, trail_buf
, extra_bytes
);
932 seg_pio_copy_end(pbuf
);
935 update_tx_opstats(qp
, ps
, plen
);
936 trace_pio_output_ibhdr(dd_from_ibdev(qp
->ibqp
.device
),
937 &ps
->s_txreq
->phdr
.hdr
, ib_is_sc5(sc5
));
941 spin_lock_irqsave(&qp
->s_lock
, flags
);
942 rvt_send_complete(qp
, qp
->s_wqe
, wc_status
);
943 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
944 } else if (qp
->ibqp
.qp_type
== IB_QPT_RC
) {
945 spin_lock_irqsave(&qp
->s_lock
, flags
);
946 hfi1_rc_send_complete(qp
, &ps
->s_txreq
->phdr
.hdr
);
947 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
953 hfi1_put_txreq(ps
->s_txreq
);
958 * egress_pkey_matches_entry - return 1 if the pkey matches ent (ent
959 * being an entry from the partition key table), return 0
960 * otherwise. Use the matching criteria for egress partition keys
961 * specified in the OPAv1 spec., section 9.1l.7.
963 static inline int egress_pkey_matches_entry(u16 pkey
, u16 ent
)
965 u16 mkey
= pkey
& PKEY_LOW_15_MASK
;
966 u16 mentry
= ent
& PKEY_LOW_15_MASK
;
968 if (mkey
== mentry
) {
970 * If pkey[15] is set (full partition member),
971 * is bit 15 in the corresponding table element
972 * clear (limited member)?
974 if (pkey
& PKEY_MEMBER_MASK
)
975 return !!(ent
& PKEY_MEMBER_MASK
);
982 * egress_pkey_check - check P_KEY of a packet
983 * @ppd: Physical IB port data
984 * @slid: SLID for packet
985 * @bkey: PKEY for header
986 * @sc5: SC for packet
987 * @s_pkey_index: It will be used for look up optimization for kernel contexts
988 * only. If it is negative value, then it means user contexts is calling this
991 * It checks if hdr's pkey is valid.
993 * Return: 0 on success, otherwise, 1
995 int egress_pkey_check(struct hfi1_pportdata
*ppd
, u32 slid
, u16 pkey
,
996 u8 sc5
, int8_t s_pkey_index
)
998 struct hfi1_devdata
*dd
;
1000 int is_user_ctxt_mechanism
= (s_pkey_index
< 0);
1002 if (!(ppd
->part_enforce
& HFI1_PART_ENFORCE_OUT
))
1005 /* If SC15, pkey[0:14] must be 0x7fff */
1006 if ((sc5
== 0xf) && ((pkey
& PKEY_LOW_15_MASK
) != PKEY_LOW_15_MASK
))
1009 /* Is the pkey = 0x0, or 0x8000? */
1010 if ((pkey
& PKEY_LOW_15_MASK
) == 0)
1014 * For the kernel contexts only, if a qp is passed into the function,
1015 * the most likely matching pkey has index qp->s_pkey_index
1017 if (!is_user_ctxt_mechanism
&&
1018 egress_pkey_matches_entry(pkey
, ppd
->pkeys
[s_pkey_index
])) {
1022 for (i
= 0; i
< MAX_PKEY_VALUES
; i
++) {
1023 if (egress_pkey_matches_entry(pkey
, ppd
->pkeys
[i
]))
1028 * For the user-context mechanism, the P_KEY check would only happen
1029 * once per SDMA request, not once per packet. Therefore, there's no
1030 * need to increment the counter for the user-context mechanism.
1032 if (!is_user_ctxt_mechanism
) {
1033 incr_cntr64(&ppd
->port_xmit_constraint_errors
);
1035 if (!(dd
->err_info_xmit_constraint
.status
&
1036 OPA_EI_STATUS_SMASK
)) {
1037 dd
->err_info_xmit_constraint
.status
|=
1038 OPA_EI_STATUS_SMASK
;
1039 dd
->err_info_xmit_constraint
.slid
= slid
;
1040 dd
->err_info_xmit_constraint
.pkey
= pkey
;
1047 * get_send_routine - choose an egress routine
1049 * Choose an egress routine based on QP type
1052 static inline send_routine
get_send_routine(struct rvt_qp
*qp
,
1053 struct hfi1_pkt_state
*ps
)
1055 struct hfi1_devdata
*dd
= dd_from_ibdev(qp
->ibqp
.device
);
1056 struct hfi1_qp_priv
*priv
= qp
->priv
;
1057 struct verbs_txreq
*tx
= ps
->s_txreq
;
1059 if (unlikely(!(dd
->flags
& HFI1_HAS_SEND_DMA
)))
1060 return dd
->process_pio_send
;
1061 switch (qp
->ibqp
.qp_type
) {
1063 return dd
->process_pio_send
;
1070 tx
->s_cur_size
<= min(piothreshold
, qp
->pmtu
) &&
1071 (BIT(ps
->opcode
& OPMASK
) & pio_opmask
[ps
->opcode
>> 5]) &&
1072 iowait_sdma_pending(&priv
->s_iowait
) == 0 &&
1073 !sdma_txreq_built(&tx
->txreq
))
1074 return dd
->process_pio_send
;
1080 return dd
->process_dma_send
;
1084 * hfi1_verbs_send - send a packet
1085 * @qp: the QP to send on
1086 * @ps: the state of the packet to send
1088 * Return zero if packet is sent or queued OK.
1089 * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
1091 int hfi1_verbs_send(struct rvt_qp
*qp
, struct hfi1_pkt_state
*ps
)
1093 struct hfi1_devdata
*dd
= dd_from_ibdev(qp
->ibqp
.device
);
1094 struct hfi1_qp_priv
*priv
= qp
->priv
;
1095 struct ib_other_headers
*ohdr
= NULL
;
1102 /* locate the pkey within the headers */
1103 if (ps
->s_txreq
->phdr
.hdr
.hdr_type
) {
1104 struct hfi1_16b_header
*hdr
= &ps
->s_txreq
->phdr
.hdr
.opah
;
1106 l4
= hfi1_16B_get_l4(hdr
);
1107 if (l4
== OPA_16B_L4_IB_LOCAL
)
1109 else if (l4
== OPA_16B_L4_IB_GLOBAL
)
1110 ohdr
= &hdr
->u
.l
.oth
;
1112 slid
= hfi1_16B_get_slid(hdr
);
1113 pkey
= hfi1_16B_get_pkey(hdr
);
1115 struct ib_header
*hdr
= &ps
->s_txreq
->phdr
.hdr
.ibh
;
1116 u8 lnh
= ib_get_lnh(hdr
);
1118 if (lnh
== HFI1_LRH_GRH
)
1119 ohdr
= &hdr
->u
.l
.oth
;
1122 slid
= ib_get_slid(hdr
);
1123 pkey
= ib_bth_get_pkey(ohdr
);
1126 if (likely(l4
!= OPA_16B_L4_FM
))
1127 ps
->opcode
= ib_bth_get_opcode(ohdr
);
1129 ps
->opcode
= IB_OPCODE_UD_SEND_ONLY
;
1131 sr
= get_send_routine(qp
, ps
);
1132 ret
= egress_pkey_check(dd
->pport
, slid
, pkey
,
1133 priv
->s_sc
, qp
->s_pkey_index
);
1134 if (unlikely(ret
)) {
1136 * The value we are returning here does not get propagated to
1137 * the verbs caller. Thus we need to complete the request with
1138 * error otherwise the caller could be sitting waiting on the
1139 * completion event. Only do this for PIO. SDMA has its own
1140 * mechanism for handling the errors. So for SDMA we can just
1143 if (sr
== dd
->process_pio_send
) {
1144 unsigned long flags
;
1146 hfi1_cdbg(PIO
, "%s() Failed. Completing with err",
1148 spin_lock_irqsave(&qp
->s_lock
, flags
);
1149 rvt_send_complete(qp
, qp
->s_wqe
, IB_WC_GENERAL_ERR
);
1150 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1154 if (sr
== dd
->process_dma_send
&& iowait_pio_pending(&priv
->s_iowait
))
1158 HFI1_S_WAIT_PIO_DRAIN
);
1159 return sr(qp
, ps
, 0);
1163 * hfi1_fill_device_attr - Fill in rvt dev info device attributes.
1164 * @dd: the device data structure
1166 static void hfi1_fill_device_attr(struct hfi1_devdata
*dd
)
1168 struct rvt_dev_info
*rdi
= &dd
->verbs_dev
.rdi
;
1169 u32 ver
= dd
->dc8051_ver
;
1171 memset(&rdi
->dparms
.props
, 0, sizeof(rdi
->dparms
.props
));
1173 rdi
->dparms
.props
.fw_ver
= ((u64
)(dc8051_ver_maj(ver
)) << 32) |
1174 ((u64
)(dc8051_ver_min(ver
)) << 16) |
1175 (u64
)dc8051_ver_patch(ver
);
1177 rdi
->dparms
.props
.device_cap_flags
= IB_DEVICE_BAD_PKEY_CNTR
|
1178 IB_DEVICE_BAD_QKEY_CNTR
| IB_DEVICE_SHUTDOWN_PORT
|
1179 IB_DEVICE_SYS_IMAGE_GUID
| IB_DEVICE_RC_RNR_NAK_GEN
|
1180 IB_DEVICE_PORT_ACTIVE_EVENT
| IB_DEVICE_SRQ_RESIZE
|
1181 IB_DEVICE_MEM_MGT_EXTENSIONS
|
1182 IB_DEVICE_RDMA_NETDEV_OPA_VNIC
;
1183 rdi
->dparms
.props
.page_size_cap
= PAGE_SIZE
;
1184 rdi
->dparms
.props
.vendor_id
= dd
->oui1
<< 16 | dd
->oui2
<< 8 | dd
->oui3
;
1185 rdi
->dparms
.props
.vendor_part_id
= dd
->pcidev
->device
;
1186 rdi
->dparms
.props
.hw_ver
= dd
->minrev
;
1187 rdi
->dparms
.props
.sys_image_guid
= ib_hfi1_sys_image_guid
;
1188 rdi
->dparms
.props
.max_mr_size
= U64_MAX
;
1189 rdi
->dparms
.props
.max_fast_reg_page_list_len
= UINT_MAX
;
1190 rdi
->dparms
.props
.max_qp
= hfi1_max_qps
;
1191 rdi
->dparms
.props
.max_qp_wr
= hfi1_max_qp_wrs
;
1192 rdi
->dparms
.props
.max_send_sge
= hfi1_max_sges
;
1193 rdi
->dparms
.props
.max_recv_sge
= hfi1_max_sges
;
1194 rdi
->dparms
.props
.max_sge_rd
= hfi1_max_sges
;
1195 rdi
->dparms
.props
.max_cq
= hfi1_max_cqs
;
1196 rdi
->dparms
.props
.max_ah
= hfi1_max_ahs
;
1197 rdi
->dparms
.props
.max_cqe
= hfi1_max_cqes
;
1198 rdi
->dparms
.props
.max_mr
= rdi
->lkey_table
.max
;
1199 rdi
->dparms
.props
.max_fmr
= rdi
->lkey_table
.max
;
1200 rdi
->dparms
.props
.max_map_per_fmr
= 32767;
1201 rdi
->dparms
.props
.max_pd
= hfi1_max_pds
;
1202 rdi
->dparms
.props
.max_qp_rd_atom
= HFI1_MAX_RDMA_ATOMIC
;
1203 rdi
->dparms
.props
.max_qp_init_rd_atom
= 255;
1204 rdi
->dparms
.props
.max_srq
= hfi1_max_srqs
;
1205 rdi
->dparms
.props
.max_srq_wr
= hfi1_max_srq_wrs
;
1206 rdi
->dparms
.props
.max_srq_sge
= hfi1_max_srq_sges
;
1207 rdi
->dparms
.props
.atomic_cap
= IB_ATOMIC_GLOB
;
1208 rdi
->dparms
.props
.max_pkeys
= hfi1_get_npkeys(dd
);
1209 rdi
->dparms
.props
.max_mcast_grp
= hfi1_max_mcast_grps
;
1210 rdi
->dparms
.props
.max_mcast_qp_attach
= hfi1_max_mcast_qp_attached
;
1211 rdi
->dparms
.props
.max_total_mcast_qp_attach
=
1212 rdi
->dparms
.props
.max_mcast_qp_attach
*
1213 rdi
->dparms
.props
.max_mcast_grp
;
1216 static inline u16
opa_speed_to_ib(u16 in
)
1220 if (in
& OPA_LINK_SPEED_25G
)
1221 out
|= IB_SPEED_EDR
;
1222 if (in
& OPA_LINK_SPEED_12_5G
)
1223 out
|= IB_SPEED_FDR
;
1229 * Convert a single OPA link width (no multiple flags) to an IB value.
1230 * A zero OPA link width means link down, which means the IB width value
1233 static inline u16
opa_width_to_ib(u16 in
)
1236 case OPA_LINK_WIDTH_1X
:
1237 /* map 2x and 3x to 1x as they don't exist in IB */
1238 case OPA_LINK_WIDTH_2X
:
1239 case OPA_LINK_WIDTH_3X
:
1241 default: /* link down or unknown, return our largest width */
1242 case OPA_LINK_WIDTH_4X
:
1247 static int query_port(struct rvt_dev_info
*rdi
, u8 port_num
,
1248 struct ib_port_attr
*props
)
1250 struct hfi1_ibdev
*verbs_dev
= dev_from_rdi(rdi
);
1251 struct hfi1_devdata
*dd
= dd_from_dev(verbs_dev
);
1252 struct hfi1_pportdata
*ppd
= &dd
->pport
[port_num
- 1];
1255 /* props being zeroed by the caller, avoid zeroing it here */
1256 props
->lid
= lid
? lid
: 0;
1257 props
->lmc
= ppd
->lmc
;
1258 /* OPA logical states match IB logical states */
1259 props
->state
= driver_lstate(ppd
);
1260 props
->phys_state
= driver_pstate(ppd
);
1261 props
->gid_tbl_len
= HFI1_GUIDS_PER_PORT
;
1262 props
->active_width
= (u8
)opa_width_to_ib(ppd
->link_width_active
);
1263 /* see rate_show() in ib core/sysfs.c */
1264 props
->active_speed
= (u8
)opa_speed_to_ib(ppd
->link_speed_active
);
1265 props
->max_vl_num
= ppd
->vls_supported
;
1267 /* Once we are a "first class" citizen and have added the OPA MTUs to
1268 * the core we can advertise the larger MTU enum to the ULPs, for now
1269 * advertise only 4K.
1271 * Those applications which are either OPA aware or pass the MTU enum
1272 * from the Path Records to us will get the new 8k MTU. Those that
1273 * attempt to process the MTU enum may fail in various ways.
1275 props
->max_mtu
= mtu_to_enum((!valid_ib_mtu(hfi1_max_mtu
) ?
1276 4096 : hfi1_max_mtu
), IB_MTU_4096
);
1277 props
->active_mtu
= !valid_ib_mtu(ppd
->ibmtu
) ? props
->max_mtu
:
1278 mtu_to_enum(ppd
->ibmtu
, IB_MTU_4096
);
1283 static int modify_device(struct ib_device
*device
,
1284 int device_modify_mask
,
1285 struct ib_device_modify
*device_modify
)
1287 struct hfi1_devdata
*dd
= dd_from_ibdev(device
);
1291 if (device_modify_mask
& ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID
|
1292 IB_DEVICE_MODIFY_NODE_DESC
)) {
1297 if (device_modify_mask
& IB_DEVICE_MODIFY_NODE_DESC
) {
1298 memcpy(device
->node_desc
, device_modify
->node_desc
,
1299 IB_DEVICE_NODE_DESC_MAX
);
1300 for (i
= 0; i
< dd
->num_pports
; i
++) {
1301 struct hfi1_ibport
*ibp
= &dd
->pport
[i
].ibport_data
;
1303 hfi1_node_desc_chg(ibp
);
1307 if (device_modify_mask
& IB_DEVICE_MODIFY_SYS_IMAGE_GUID
) {
1308 ib_hfi1_sys_image_guid
=
1309 cpu_to_be64(device_modify
->sys_image_guid
);
1310 for (i
= 0; i
< dd
->num_pports
; i
++) {
1311 struct hfi1_ibport
*ibp
= &dd
->pport
[i
].ibport_data
;
1313 hfi1_sys_guid_chg(ibp
);
1323 static int shut_down_port(struct rvt_dev_info
*rdi
, u8 port_num
)
1325 struct hfi1_ibdev
*verbs_dev
= dev_from_rdi(rdi
);
1326 struct hfi1_devdata
*dd
= dd_from_dev(verbs_dev
);
1327 struct hfi1_pportdata
*ppd
= &dd
->pport
[port_num
- 1];
1330 set_link_down_reason(ppd
, OPA_LINKDOWN_REASON_UNKNOWN
, 0,
1331 OPA_LINKDOWN_REASON_UNKNOWN
);
1332 ret
= set_link_state(ppd
, HLS_DN_DOWNDEF
);
1336 static int hfi1_get_guid_be(struct rvt_dev_info
*rdi
, struct rvt_ibport
*rvp
,
1337 int guid_index
, __be64
*guid
)
1339 struct hfi1_ibport
*ibp
= container_of(rvp
, struct hfi1_ibport
, rvp
);
1341 if (guid_index
>= HFI1_GUIDS_PER_PORT
)
1344 *guid
= get_sguid(ibp
, guid_index
);
1349 * convert ah port,sl to sc
1351 u8
ah_to_sc(struct ib_device
*ibdev
, struct rdma_ah_attr
*ah
)
1353 struct hfi1_ibport
*ibp
= to_iport(ibdev
, rdma_ah_get_port_num(ah
));
1355 return ibp
->sl_to_sc
[rdma_ah_get_sl(ah
)];
1358 static int hfi1_check_ah(struct ib_device
*ibdev
, struct rdma_ah_attr
*ah_attr
)
1360 struct hfi1_ibport
*ibp
;
1361 struct hfi1_pportdata
*ppd
;
1362 struct hfi1_devdata
*dd
;
1366 if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr
)) &&
1367 !(rdma_ah_get_ah_flags(ah_attr
) & IB_AH_GRH
))
1370 /* test the mapping for validity */
1371 ibp
= to_iport(ibdev
, rdma_ah_get_port_num(ah_attr
));
1372 ppd
= ppd_from_ibp(ibp
);
1373 dd
= dd_from_ppd(ppd
);
1375 sl
= rdma_ah_get_sl(ah_attr
);
1376 if (sl
>= ARRAY_SIZE(ibp
->sl_to_sc
))
1379 sc5
= ibp
->sl_to_sc
[sl
];
1380 if (sc_to_vlt(dd
, sc5
) > num_vls
&& sc_to_vlt(dd
, sc5
) != 0xf)
1385 static void hfi1_notify_new_ah(struct ib_device
*ibdev
,
1386 struct rdma_ah_attr
*ah_attr
,
1389 struct hfi1_ibport
*ibp
;
1390 struct hfi1_pportdata
*ppd
;
1391 struct hfi1_devdata
*dd
;
1393 struct rdma_ah_attr
*attr
= &ah
->attr
;
1396 * Do not trust reading anything from rvt_ah at this point as it is not
1397 * done being setup. We can however modify things which we need to set.
1400 ibp
= to_iport(ibdev
, rdma_ah_get_port_num(ah_attr
));
1401 ppd
= ppd_from_ibp(ibp
);
1402 sc5
= ibp
->sl_to_sc
[rdma_ah_get_sl(&ah
->attr
)];
1403 hfi1_update_ah_attr(ibdev
, attr
);
1404 hfi1_make_opa_lid(attr
);
1405 dd
= dd_from_ppd(ppd
);
1406 ah
->vl
= sc_to_vlt(dd
, sc5
);
1407 if (ah
->vl
< num_vls
|| ah
->vl
== 15)
1408 ah
->log_pmtu
= ilog2(dd
->vld
[ah
->vl
].mtu
);
1412 * hfi1_get_npkeys - return the size of the PKEY table for context 0
1413 * @dd: the hfi1_ib device
1415 unsigned hfi1_get_npkeys(struct hfi1_devdata
*dd
)
1417 return ARRAY_SIZE(dd
->pport
[0].pkeys
);
1420 static void init_ibport(struct hfi1_pportdata
*ppd
)
1422 struct hfi1_ibport
*ibp
= &ppd
->ibport_data
;
1423 size_t sz
= ARRAY_SIZE(ibp
->sl_to_sc
);
1426 for (i
= 0; i
< sz
; i
++) {
1427 ibp
->sl_to_sc
[i
] = i
;
1428 ibp
->sc_to_sl
[i
] = i
;
1431 for (i
= 0; i
< RVT_MAX_TRAP_LISTS
; i
++)
1432 INIT_LIST_HEAD(&ibp
->rvp
.trap_lists
[i
].list
);
1433 timer_setup(&ibp
->rvp
.trap_timer
, hfi1_handle_trap_timer
, 0);
1435 spin_lock_init(&ibp
->rvp
.lock
);
1436 /* Set the prefix to the default value (see ch. 4.1.1) */
1437 ibp
->rvp
.gid_prefix
= IB_DEFAULT_GID_PREFIX
;
1438 ibp
->rvp
.sm_lid
= 0;
1440 * Below should only set bits defined in OPA PortInfo.CapabilityMask
1441 * and PortInfo.CapabilityMask3
1443 ibp
->rvp
.port_cap_flags
= IB_PORT_AUTO_MIGR_SUP
|
1444 IB_PORT_CAP_MASK_NOTICE_SUP
;
1445 ibp
->rvp
.port_cap3_flags
= OPA_CAP_MASK3_IsSharedSpaceSupported
;
1446 ibp
->rvp
.pma_counter_select
[0] = IB_PMA_PORT_XMIT_DATA
;
1447 ibp
->rvp
.pma_counter_select
[1] = IB_PMA_PORT_RCV_DATA
;
1448 ibp
->rvp
.pma_counter_select
[2] = IB_PMA_PORT_XMIT_PKTS
;
1449 ibp
->rvp
.pma_counter_select
[3] = IB_PMA_PORT_RCV_PKTS
;
1450 ibp
->rvp
.pma_counter_select
[4] = IB_PMA_PORT_XMIT_WAIT
;
1452 RCU_INIT_POINTER(ibp
->rvp
.qp
[0], NULL
);
1453 RCU_INIT_POINTER(ibp
->rvp
.qp
[1], NULL
);
1456 static void hfi1_get_dev_fw_str(struct ib_device
*ibdev
, char *str
)
1458 struct rvt_dev_info
*rdi
= ib_to_rvt(ibdev
);
1459 struct hfi1_ibdev
*dev
= dev_from_rdi(rdi
);
1460 u32 ver
= dd_from_dev(dev
)->dc8051_ver
;
1462 snprintf(str
, IB_FW_VERSION_NAME_MAX
, "%u.%u.%u", dc8051_ver_maj(ver
),
1463 dc8051_ver_min(ver
), dc8051_ver_patch(ver
));
1466 static const char * const driver_cntr_names
[] = {
1467 /* must be element 0*/
1475 "DRIVER_RcvLen_Errs",
1476 "DRIVER_EgrBufFull",
1480 static DEFINE_MUTEX(cntr_names_lock
); /* protects the *_cntr_names bufers */
1481 static const char **dev_cntr_names
;
1482 static const char **port_cntr_names
;
1483 int num_driver_cntrs
= ARRAY_SIZE(driver_cntr_names
);
1484 static int num_dev_cntrs
;
1485 static int num_port_cntrs
;
1486 static int cntr_names_initialized
;
1489 * Convert a list of names separated by '\n' into an array of NULL terminated
1490 * strings. Optionally some entries can be reserved in the array to hold extra
1493 static int init_cntr_names(const char *names_in
,
1494 const size_t names_len
,
1495 int num_extra_names
,
1497 const char ***cntr_names
)
1499 char *names_out
, *p
, **q
;
1503 for (i
= 0; i
< names_len
; i
++)
1504 if (names_in
[i
] == '\n')
1507 names_out
= kmalloc((n
+ num_extra_names
) * sizeof(char *) + names_len
,
1515 p
= names_out
+ (n
+ num_extra_names
) * sizeof(char *);
1516 memcpy(p
, names_in
, names_len
);
1518 q
= (char **)names_out
;
1519 for (i
= 0; i
< n
; i
++) {
1521 p
= strchr(p
, '\n');
1526 *cntr_names
= (const char **)names_out
;
1530 static struct rdma_hw_stats
*alloc_hw_stats(struct ib_device
*ibdev
,
1535 mutex_lock(&cntr_names_lock
);
1536 if (!cntr_names_initialized
) {
1537 struct hfi1_devdata
*dd
= dd_from_ibdev(ibdev
);
1539 err
= init_cntr_names(dd
->cntrnames
,
1545 mutex_unlock(&cntr_names_lock
);
1549 for (i
= 0; i
< num_driver_cntrs
; i
++)
1550 dev_cntr_names
[num_dev_cntrs
+ i
] =
1551 driver_cntr_names
[i
];
1553 err
= init_cntr_names(dd
->portcntrnames
,
1554 dd
->portcntrnameslen
,
1559 kfree(dev_cntr_names
);
1560 dev_cntr_names
= NULL
;
1561 mutex_unlock(&cntr_names_lock
);
1564 cntr_names_initialized
= 1;
1566 mutex_unlock(&cntr_names_lock
);
1569 return rdma_alloc_hw_stats_struct(
1571 num_dev_cntrs
+ num_driver_cntrs
,
1572 RDMA_HW_STATS_DEFAULT_LIFESPAN
);
1574 return rdma_alloc_hw_stats_struct(
1577 RDMA_HW_STATS_DEFAULT_LIFESPAN
);
1580 static u64
hfi1_sps_ints(void)
1582 unsigned long flags
;
1583 struct hfi1_devdata
*dd
;
1586 spin_lock_irqsave(&hfi1_devs_lock
, flags
);
1587 list_for_each_entry(dd
, &hfi1_dev_list
, list
) {
1588 sps_ints
+= get_all_cpu_total(dd
->int_counter
);
1590 spin_unlock_irqrestore(&hfi1_devs_lock
, flags
);
1594 static int get_hw_stats(struct ib_device
*ibdev
, struct rdma_hw_stats
*stats
,
1601 u64
*stats
= (u64
*)&hfi1_stats
;
1604 hfi1_read_cntrs(dd_from_ibdev(ibdev
), NULL
, &values
);
1605 values
[num_dev_cntrs
] = hfi1_sps_ints();
1606 for (i
= 1; i
< num_driver_cntrs
; i
++)
1607 values
[num_dev_cntrs
+ i
] = stats
[i
];
1608 count
= num_dev_cntrs
+ num_driver_cntrs
;
1610 struct hfi1_ibport
*ibp
= to_iport(ibdev
, port
);
1612 hfi1_read_portcntrs(ppd_from_ibp(ibp
), NULL
, &values
);
1613 count
= num_port_cntrs
;
1616 memcpy(stats
->value
, values
, count
* sizeof(u64
));
1620 static const struct ib_device_ops hfi1_dev_ops
= {
1621 .alloc_hw_stats
= alloc_hw_stats
,
1622 .alloc_rdma_netdev
= hfi1_vnic_alloc_rn
,
1623 .get_dev_fw_str
= hfi1_get_dev_fw_str
,
1624 .get_hw_stats
= get_hw_stats
,
1625 .modify_device
= modify_device
,
1626 /* keep process mad in the driver */
1627 .process_mad
= hfi1_process_mad
,
1631 * hfi1_register_ib_device - register our device with the infiniband core
1632 * @dd: the device data structure
1633 * Return 0 if successful, errno if unsuccessful.
1635 int hfi1_register_ib_device(struct hfi1_devdata
*dd
)
1637 struct hfi1_ibdev
*dev
= &dd
->verbs_dev
;
1638 struct ib_device
*ibdev
= &dev
->rdi
.ibdev
;
1639 struct hfi1_pportdata
*ppd
= dd
->pport
;
1640 struct hfi1_ibport
*ibp
= &ppd
->ibport_data
;
1644 for (i
= 0; i
< dd
->num_pports
; i
++)
1645 init_ibport(ppd
+ i
);
1647 /* Only need to initialize non-zero fields. */
1649 timer_setup(&dev
->mem_timer
, mem_timer
, 0);
1651 seqlock_init(&dev
->iowait_lock
);
1652 seqlock_init(&dev
->txwait_lock
);
1653 INIT_LIST_HEAD(&dev
->txwait
);
1654 INIT_LIST_HEAD(&dev
->memwait
);
1656 ret
= verbs_txreq_init(dev
);
1658 goto err_verbs_txreq
;
1660 /* Use first-port GUID as node guid */
1661 ibdev
->node_guid
= get_sguid(ibp
, HFI1_PORT_GUID_INDEX
);
1664 * The system image GUID is supposed to be the same for all
1665 * HFIs in a single system but since there can be other
1666 * device types in the system, we can't be sure this is unique.
1668 if (!ib_hfi1_sys_image_guid
)
1669 ib_hfi1_sys_image_guid
= ibdev
->node_guid
;
1670 ibdev
->owner
= THIS_MODULE
;
1671 ibdev
->phys_port_cnt
= dd
->num_pports
;
1672 ibdev
->dev
.parent
= &dd
->pcidev
->dev
;
1674 ib_set_device_ops(ibdev
, &hfi1_dev_ops
);
1676 strlcpy(ibdev
->node_desc
, init_utsname()->nodename
,
1677 sizeof(ibdev
->node_desc
));
1680 * Fill in rvt info object.
1682 dd
->verbs_dev
.rdi
.driver_f
.port_callback
= hfi1_create_port_files
;
1683 dd
->verbs_dev
.rdi
.driver_f
.get_pci_dev
= get_pci_dev
;
1684 dd
->verbs_dev
.rdi
.driver_f
.check_ah
= hfi1_check_ah
;
1685 dd
->verbs_dev
.rdi
.driver_f
.notify_new_ah
= hfi1_notify_new_ah
;
1686 dd
->verbs_dev
.rdi
.driver_f
.get_guid_be
= hfi1_get_guid_be
;
1687 dd
->verbs_dev
.rdi
.driver_f
.query_port_state
= query_port
;
1688 dd
->verbs_dev
.rdi
.driver_f
.shut_down_port
= shut_down_port
;
1689 dd
->verbs_dev
.rdi
.driver_f
.cap_mask_chg
= hfi1_cap_mask_chg
;
1691 * Fill in rvt info device attributes.
1693 hfi1_fill_device_attr(dd
);
1696 dd
->verbs_dev
.rdi
.dparms
.qp_table_size
= hfi1_qp_table_size
;
1697 dd
->verbs_dev
.rdi
.dparms
.qpn_start
= 0;
1698 dd
->verbs_dev
.rdi
.dparms
.qpn_inc
= 1;
1699 dd
->verbs_dev
.rdi
.dparms
.qos_shift
= dd
->qos_shift
;
1700 dd
->verbs_dev
.rdi
.dparms
.qpn_res_start
= kdeth_qp
<< 16;
1701 dd
->verbs_dev
.rdi
.dparms
.qpn_res_end
=
1702 dd
->verbs_dev
.rdi
.dparms
.qpn_res_start
+ 65535;
1703 dd
->verbs_dev
.rdi
.dparms
.max_rdma_atomic
= HFI1_MAX_RDMA_ATOMIC
;
1704 dd
->verbs_dev
.rdi
.dparms
.psn_mask
= PSN_MASK
;
1705 dd
->verbs_dev
.rdi
.dparms
.psn_shift
= PSN_SHIFT
;
1706 dd
->verbs_dev
.rdi
.dparms
.psn_modify_mask
= PSN_MODIFY_MASK
;
1707 dd
->verbs_dev
.rdi
.dparms
.core_cap_flags
= RDMA_CORE_PORT_INTEL_OPA
|
1708 RDMA_CORE_CAP_OPA_AH
;
1709 dd
->verbs_dev
.rdi
.dparms
.max_mad_size
= OPA_MGMT_MAD_SIZE
;
1711 dd
->verbs_dev
.rdi
.driver_f
.qp_priv_alloc
= qp_priv_alloc
;
1712 dd
->verbs_dev
.rdi
.driver_f
.qp_priv_init
= hfi1_qp_priv_init
;
1713 dd
->verbs_dev
.rdi
.driver_f
.qp_priv_free
= qp_priv_free
;
1714 dd
->verbs_dev
.rdi
.driver_f
.free_all_qps
= free_all_qps
;
1715 dd
->verbs_dev
.rdi
.driver_f
.notify_qp_reset
= notify_qp_reset
;
1716 dd
->verbs_dev
.rdi
.driver_f
.do_send
= hfi1_do_send_from_rvt
;
1717 dd
->verbs_dev
.rdi
.driver_f
.schedule_send
= hfi1_schedule_send
;
1718 dd
->verbs_dev
.rdi
.driver_f
.schedule_send_no_lock
= _hfi1_schedule_send
;
1719 dd
->verbs_dev
.rdi
.driver_f
.get_pmtu_from_attr
= get_pmtu_from_attr
;
1720 dd
->verbs_dev
.rdi
.driver_f
.notify_error_qp
= notify_error_qp
;
1721 dd
->verbs_dev
.rdi
.driver_f
.flush_qp_waiters
= flush_qp_waiters
;
1722 dd
->verbs_dev
.rdi
.driver_f
.stop_send_queue
= stop_send_queue
;
1723 dd
->verbs_dev
.rdi
.driver_f
.quiesce_qp
= quiesce_qp
;
1724 dd
->verbs_dev
.rdi
.driver_f
.notify_error_qp
= notify_error_qp
;
1725 dd
->verbs_dev
.rdi
.driver_f
.mtu_from_qp
= mtu_from_qp
;
1726 dd
->verbs_dev
.rdi
.driver_f
.mtu_to_path_mtu
= mtu_to_path_mtu
;
1727 dd
->verbs_dev
.rdi
.driver_f
.check_modify_qp
= hfi1_check_modify_qp
;
1728 dd
->verbs_dev
.rdi
.driver_f
.modify_qp
= hfi1_modify_qp
;
1729 dd
->verbs_dev
.rdi
.driver_f
.notify_restart_rc
= hfi1_restart_rc
;
1730 dd
->verbs_dev
.rdi
.driver_f
.setup_wqe
= hfi1_setup_wqe
;
1731 dd
->verbs_dev
.rdi
.driver_f
.comp_vect_cpu_lookup
=
1732 hfi1_comp_vect_mappings_lookup
;
1734 /* completeion queue */
1735 dd
->verbs_dev
.rdi
.ibdev
.num_comp_vectors
= dd
->comp_vect_possible_cpus
;
1736 dd
->verbs_dev
.rdi
.dparms
.node
= dd
->node
;
1739 dd
->verbs_dev
.rdi
.flags
= 0; /* Let rdmavt handle it all */
1740 dd
->verbs_dev
.rdi
.dparms
.lkey_table_size
= hfi1_lkey_table_size
;
1741 dd
->verbs_dev
.rdi
.dparms
.nports
= dd
->num_pports
;
1742 dd
->verbs_dev
.rdi
.dparms
.npkeys
= hfi1_get_npkeys(dd
);
1743 dd
->verbs_dev
.rdi
.dparms
.sge_copy_mode
= sge_copy_mode
;
1744 dd
->verbs_dev
.rdi
.dparms
.wss_threshold
= wss_threshold
;
1745 dd
->verbs_dev
.rdi
.dparms
.wss_clean_period
= wss_clean_period
;
1747 /* post send table */
1748 dd
->verbs_dev
.rdi
.post_parms
= hfi1_post_parms
;
1750 /* opcode translation table */
1751 dd
->verbs_dev
.rdi
.wc_opcode
= ib_hfi1_wc_opcode
;
1754 for (i
= 0; i
< dd
->num_pports
; i
++, ppd
++)
1755 rvt_init_port(&dd
->verbs_dev
.rdi
,
1756 &ppd
->ibport_data
.rvp
,
1760 rdma_set_device_sysfs_group(&dd
->verbs_dev
.rdi
.ibdev
,
1761 &ib_hfi1_attr_group
);
1763 ret
= rvt_register_device(&dd
->verbs_dev
.rdi
, RDMA_DRIVER_HFI1
);
1765 goto err_verbs_txreq
;
1767 ret
= hfi1_verbs_register_sysfs(dd
);
1774 rvt_unregister_device(&dd
->verbs_dev
.rdi
);
1776 verbs_txreq_exit(dev
);
1777 dd_dev_err(dd
, "cannot register verbs: %d!\n", -ret
);
1781 void hfi1_unregister_ib_device(struct hfi1_devdata
*dd
)
1783 struct hfi1_ibdev
*dev
= &dd
->verbs_dev
;
1785 hfi1_verbs_unregister_sysfs(dd
);
1787 rvt_unregister_device(&dd
->verbs_dev
.rdi
);
1789 if (!list_empty(&dev
->txwait
))
1790 dd_dev_err(dd
, "txwait list not empty!\n");
1791 if (!list_empty(&dev
->memwait
))
1792 dd_dev_err(dd
, "memwait list not empty!\n");
1794 del_timer_sync(&dev
->mem_timer
);
1795 verbs_txreq_exit(dev
);
1797 mutex_lock(&cntr_names_lock
);
1798 kfree(dev_cntr_names
);
1799 kfree(port_cntr_names
);
1800 dev_cntr_names
= NULL
;
1801 port_cntr_names
= NULL
;
1802 cntr_names_initialized
= 0;
1803 mutex_unlock(&cntr_names_lock
);
1806 void hfi1_cnp_rcv(struct hfi1_packet
*packet
)
1808 struct hfi1_ibport
*ibp
= rcd_to_iport(packet
->rcd
);
1809 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
1810 struct ib_header
*hdr
= packet
->hdr
;
1811 struct rvt_qp
*qp
= packet
->qp
;
1814 u8 sl
, sc5
, svc_type
;
1816 switch (packet
->qp
->ibqp
.qp_type
) {
1818 rlid
= rdma_ah_get_dlid(&qp
->remote_ah_attr
);
1819 rqpn
= qp
->remote_qpn
;
1820 svc_type
= IB_CC_SVCTYPE_UC
;
1823 rlid
= rdma_ah_get_dlid(&qp
->remote_ah_attr
);
1824 rqpn
= qp
->remote_qpn
;
1825 svc_type
= IB_CC_SVCTYPE_RC
;
1830 svc_type
= IB_CC_SVCTYPE_UD
;
1833 ibp
->rvp
.n_pkt_drops
++;
1837 sc5
= hfi1_9B_get_sc5(hdr
, packet
->rhf
);
1838 sl
= ibp
->sc_to_sl
[sc5
];
1839 lqpn
= qp
->ibqp
.qp_num
;
1841 process_becn(ppd
, sl
, rlid
, lqpn
, rqpn
, svc_type
);