2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <rdma/ib_mad.h>
49 #include <rdma/ib_user_verbs.h>
51 #include <linux/module.h>
52 #include <linux/utsname.h>
53 #include <linux/rculist.h>
55 #include <linux/vmalloc.h>
62 #include "verbs_txreq.h"
64 static unsigned int hfi1_lkey_table_size
= 16;
65 module_param_named(lkey_table_size
, hfi1_lkey_table_size
, uint
,
67 MODULE_PARM_DESC(lkey_table_size
,
68 "LKEY table size in bits (2^n, 1 <= n <= 23)");
70 static unsigned int hfi1_max_pds
= 0xFFFF;
71 module_param_named(max_pds
, hfi1_max_pds
, uint
, S_IRUGO
);
72 MODULE_PARM_DESC(max_pds
,
73 "Maximum number of protection domains to support");
75 static unsigned int hfi1_max_ahs
= 0xFFFF;
76 module_param_named(max_ahs
, hfi1_max_ahs
, uint
, S_IRUGO
);
77 MODULE_PARM_DESC(max_ahs
, "Maximum number of address handles to support");
79 unsigned int hfi1_max_cqes
= 0x2FFFF;
80 module_param_named(max_cqes
, hfi1_max_cqes
, uint
, S_IRUGO
);
81 MODULE_PARM_DESC(max_cqes
,
82 "Maximum number of completion queue entries to support");
84 unsigned int hfi1_max_cqs
= 0x1FFFF;
85 module_param_named(max_cqs
, hfi1_max_cqs
, uint
, S_IRUGO
);
86 MODULE_PARM_DESC(max_cqs
, "Maximum number of completion queues to support");
88 unsigned int hfi1_max_qp_wrs
= 0x3FFF;
89 module_param_named(max_qp_wrs
, hfi1_max_qp_wrs
, uint
, S_IRUGO
);
90 MODULE_PARM_DESC(max_qp_wrs
, "Maximum number of QP WRs to support");
92 unsigned int hfi1_max_qps
= 16384;
93 module_param_named(max_qps
, hfi1_max_qps
, uint
, S_IRUGO
);
94 MODULE_PARM_DESC(max_qps
, "Maximum number of QPs to support");
96 unsigned int hfi1_max_sges
= 0x60;
97 module_param_named(max_sges
, hfi1_max_sges
, uint
, S_IRUGO
);
98 MODULE_PARM_DESC(max_sges
, "Maximum number of SGEs to support");
100 unsigned int hfi1_max_mcast_grps
= 16384;
101 module_param_named(max_mcast_grps
, hfi1_max_mcast_grps
, uint
, S_IRUGO
);
102 MODULE_PARM_DESC(max_mcast_grps
,
103 "Maximum number of multicast groups to support");
105 unsigned int hfi1_max_mcast_qp_attached
= 16;
106 module_param_named(max_mcast_qp_attached
, hfi1_max_mcast_qp_attached
,
108 MODULE_PARM_DESC(max_mcast_qp_attached
,
109 "Maximum number of attached QPs to support");
111 unsigned int hfi1_max_srqs
= 1024;
112 module_param_named(max_srqs
, hfi1_max_srqs
, uint
, S_IRUGO
);
113 MODULE_PARM_DESC(max_srqs
, "Maximum number of SRQs to support");
115 unsigned int hfi1_max_srq_sges
= 128;
116 module_param_named(max_srq_sges
, hfi1_max_srq_sges
, uint
, S_IRUGO
);
117 MODULE_PARM_DESC(max_srq_sges
, "Maximum number of SRQ SGEs to support");
119 unsigned int hfi1_max_srq_wrs
= 0x1FFFF;
120 module_param_named(max_srq_wrs
, hfi1_max_srq_wrs
, uint
, S_IRUGO
);
121 MODULE_PARM_DESC(max_srq_wrs
, "Maximum number of SRQ WRs support");
123 unsigned short piothreshold
= 256;
124 module_param(piothreshold
, ushort
, S_IRUGO
);
125 MODULE_PARM_DESC(piothreshold
, "size used to determine sdma vs. pio");
127 #define COPY_CACHELESS 1
128 #define COPY_ADAPTIVE 2
129 static unsigned int sge_copy_mode
;
130 module_param(sge_copy_mode
, uint
, S_IRUGO
);
131 MODULE_PARM_DESC(sge_copy_mode
,
132 "Verbs copy mode: 0 use memcpy, 1 use cacheless copy, 2 adapt based on WSS");
134 static void verbs_sdma_complete(
135 struct sdma_txreq
*cookie
,
138 static int pio_wait(struct rvt_qp
*qp
,
139 struct send_context
*sc
,
140 struct hfi1_pkt_state
*ps
,
143 /* Length of buffer to create verbs txreq cache name */
144 #define TXREQ_NAME_LEN 24
146 static uint wss_threshold
;
147 module_param(wss_threshold
, uint
, S_IRUGO
);
148 MODULE_PARM_DESC(wss_threshold
, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
149 static uint wss_clean_period
= 256;
150 module_param(wss_clean_period
, uint
, S_IRUGO
);
151 MODULE_PARM_DESC(wss_clean_period
, "Count of verbs copies before an entry in the page copy table is cleaned");
153 /* memory working set size */
155 unsigned long *entries
;
156 atomic_t total_count
;
157 atomic_t clean_counter
;
158 atomic_t clean_entry
;
165 static struct hfi1_wss wss
;
167 int hfi1_wss_init(void)
174 /* check for a valid percent range - default to 80 if none or invalid */
175 if (wss_threshold
< 1 || wss_threshold
> 100)
177 /* reject a wildly large period */
178 if (wss_clean_period
> 1000000)
179 wss_clean_period
= 256;
180 /* reject a zero period */
181 if (wss_clean_period
== 0)
182 wss_clean_period
= 1;
185 * Calculate the table size - the next power of 2 larger than the
186 * LLC size. LLC size is in KiB.
188 llc_size
= wss_llc_size() * 1024;
189 table_size
= roundup_pow_of_two(llc_size
);
191 /* one bit per page in rounded up table */
192 llc_bits
= llc_size
/ PAGE_SIZE
;
193 table_bits
= table_size
/ PAGE_SIZE
;
194 wss
.pages_mask
= table_bits
- 1;
195 wss
.num_entries
= table_bits
/ BITS_PER_LONG
;
197 wss
.threshold
= (llc_bits
* wss_threshold
) / 100;
198 if (wss
.threshold
== 0)
201 atomic_set(&wss
.clean_counter
, wss_clean_period
);
203 wss
.entries
= kcalloc(wss
.num_entries
, sizeof(*wss
.entries
),
213 void hfi1_wss_exit(void)
215 /* coded to handle partially initialized and repeat callers */
221 * Advance the clean counter. When the clean period has expired,
224 * This is implemented in atomics to avoid locking. Because multiple
225 * variables are involved, it can be racy which can lead to slightly
226 * inaccurate information. Since this is only a heuristic, this is
227 * OK. Any innaccuracies will clean themselves out as the counter
228 * advances. That said, it is unlikely the entry clean operation will
229 * race - the next possible racer will not start until the next clean
232 * The clean counter is implemented as a decrement to zero. When zero
233 * is reached an entry is cleaned.
235 static void wss_advance_clean_counter(void)
241 /* become the cleaner if we decrement the counter to zero */
242 if (atomic_dec_and_test(&wss
.clean_counter
)) {
244 * Set, not add, the clean period. This avoids an issue
245 * where the counter could decrement below the clean period.
246 * Doing a set can result in lost decrements, slowing the
247 * clean advance. Since this a heuristic, this possible
250 * An alternative is to loop, advancing the counter by a
251 * clean period until the result is > 0. However, this could
252 * lead to several threads keeping another in the clean loop.
253 * This could be mitigated by limiting the number of times
254 * we stay in the loop.
256 atomic_set(&wss
.clean_counter
, wss_clean_period
);
259 * Uniquely grab the entry to clean and move to next.
260 * The current entry is always the lower bits of
261 * wss.clean_entry. The table size, wss.num_entries,
262 * is always a power-of-2.
264 entry
= (atomic_inc_return(&wss
.clean_entry
) - 1)
265 & (wss
.num_entries
- 1);
267 /* clear the entry and count the bits */
268 bits
= xchg(&wss
.entries
[entry
], 0);
269 weight
= hweight64((u64
)bits
);
270 /* only adjust the contended total count if needed */
272 atomic_sub(weight
, &wss
.total_count
);
277 * Insert the given address into the working set array.
279 static void wss_insert(void *address
)
281 u32 page
= ((unsigned long)address
>> PAGE_SHIFT
) & wss
.pages_mask
;
282 u32 entry
= page
/ BITS_PER_LONG
; /* assumes this ends up a shift */
283 u32 nr
= page
& (BITS_PER_LONG
- 1);
285 if (!test_and_set_bit(nr
, &wss
.entries
[entry
]))
286 atomic_inc(&wss
.total_count
);
288 wss_advance_clean_counter();
292 * Is the working set larger than the threshold?
294 static inline int wss_exceeds_threshold(void)
296 return atomic_read(&wss
.total_count
) >= wss
.threshold
;
300 * Translate ib_wr_opcode into ib_wc_opcode.
302 const enum ib_wc_opcode ib_hfi1_wc_opcode
[] = {
303 [IB_WR_RDMA_WRITE
] = IB_WC_RDMA_WRITE
,
304 [IB_WR_RDMA_WRITE_WITH_IMM
] = IB_WC_RDMA_WRITE
,
305 [IB_WR_SEND
] = IB_WC_SEND
,
306 [IB_WR_SEND_WITH_IMM
] = IB_WC_SEND
,
307 [IB_WR_RDMA_READ
] = IB_WC_RDMA_READ
,
308 [IB_WR_ATOMIC_CMP_AND_SWP
] = IB_WC_COMP_SWAP
,
309 [IB_WR_ATOMIC_FETCH_AND_ADD
] = IB_WC_FETCH_ADD
313 * Length of header by opcode, 0 --> not supported
315 const u8 hdr_len_by_opcode
[256] = {
317 [IB_OPCODE_RC_SEND_FIRST
] = 12 + 8,
318 [IB_OPCODE_RC_SEND_MIDDLE
] = 12 + 8,
319 [IB_OPCODE_RC_SEND_LAST
] = 12 + 8,
320 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE
] = 12 + 8 + 4,
321 [IB_OPCODE_RC_SEND_ONLY
] = 12 + 8,
322 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE
] = 12 + 8 + 4,
323 [IB_OPCODE_RC_RDMA_WRITE_FIRST
] = 12 + 8 + 16,
324 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE
] = 12 + 8,
325 [IB_OPCODE_RC_RDMA_WRITE_LAST
] = 12 + 8,
326 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE
] = 12 + 8 + 4,
327 [IB_OPCODE_RC_RDMA_WRITE_ONLY
] = 12 + 8 + 16,
328 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE
] = 12 + 8 + 20,
329 [IB_OPCODE_RC_RDMA_READ_REQUEST
] = 12 + 8 + 16,
330 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST
] = 12 + 8 + 4,
331 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE
] = 12 + 8,
332 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST
] = 12 + 8 + 4,
333 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY
] = 12 + 8 + 4,
334 [IB_OPCODE_RC_ACKNOWLEDGE
] = 12 + 8 + 4,
335 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE
] = 12 + 8 + 4,
336 [IB_OPCODE_RC_COMPARE_SWAP
] = 12 + 8 + 28,
337 [IB_OPCODE_RC_FETCH_ADD
] = 12 + 8 + 28,
339 [IB_OPCODE_UC_SEND_FIRST
] = 12 + 8,
340 [IB_OPCODE_UC_SEND_MIDDLE
] = 12 + 8,
341 [IB_OPCODE_UC_SEND_LAST
] = 12 + 8,
342 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE
] = 12 + 8 + 4,
343 [IB_OPCODE_UC_SEND_ONLY
] = 12 + 8,
344 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE
] = 12 + 8 + 4,
345 [IB_OPCODE_UC_RDMA_WRITE_FIRST
] = 12 + 8 + 16,
346 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE
] = 12 + 8,
347 [IB_OPCODE_UC_RDMA_WRITE_LAST
] = 12 + 8,
348 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE
] = 12 + 8 + 4,
349 [IB_OPCODE_UC_RDMA_WRITE_ONLY
] = 12 + 8 + 16,
350 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE
] = 12 + 8 + 20,
352 [IB_OPCODE_UD_SEND_ONLY
] = 12 + 8 + 8,
353 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
] = 12 + 8 + 12
356 static const opcode_handler opcode_handler_tbl
[256] = {
358 [IB_OPCODE_RC_SEND_FIRST
] = &hfi1_rc_rcv
,
359 [IB_OPCODE_RC_SEND_MIDDLE
] = &hfi1_rc_rcv
,
360 [IB_OPCODE_RC_SEND_LAST
] = &hfi1_rc_rcv
,
361 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE
] = &hfi1_rc_rcv
,
362 [IB_OPCODE_RC_SEND_ONLY
] = &hfi1_rc_rcv
,
363 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE
] = &hfi1_rc_rcv
,
364 [IB_OPCODE_RC_RDMA_WRITE_FIRST
] = &hfi1_rc_rcv
,
365 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE
] = &hfi1_rc_rcv
,
366 [IB_OPCODE_RC_RDMA_WRITE_LAST
] = &hfi1_rc_rcv
,
367 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE
] = &hfi1_rc_rcv
,
368 [IB_OPCODE_RC_RDMA_WRITE_ONLY
] = &hfi1_rc_rcv
,
369 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE
] = &hfi1_rc_rcv
,
370 [IB_OPCODE_RC_RDMA_READ_REQUEST
] = &hfi1_rc_rcv
,
371 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST
] = &hfi1_rc_rcv
,
372 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE
] = &hfi1_rc_rcv
,
373 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST
] = &hfi1_rc_rcv
,
374 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY
] = &hfi1_rc_rcv
,
375 [IB_OPCODE_RC_ACKNOWLEDGE
] = &hfi1_rc_rcv
,
376 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE
] = &hfi1_rc_rcv
,
377 [IB_OPCODE_RC_COMPARE_SWAP
] = &hfi1_rc_rcv
,
378 [IB_OPCODE_RC_FETCH_ADD
] = &hfi1_rc_rcv
,
380 [IB_OPCODE_UC_SEND_FIRST
] = &hfi1_uc_rcv
,
381 [IB_OPCODE_UC_SEND_MIDDLE
] = &hfi1_uc_rcv
,
382 [IB_OPCODE_UC_SEND_LAST
] = &hfi1_uc_rcv
,
383 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE
] = &hfi1_uc_rcv
,
384 [IB_OPCODE_UC_SEND_ONLY
] = &hfi1_uc_rcv
,
385 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE
] = &hfi1_uc_rcv
,
386 [IB_OPCODE_UC_RDMA_WRITE_FIRST
] = &hfi1_uc_rcv
,
387 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE
] = &hfi1_uc_rcv
,
388 [IB_OPCODE_UC_RDMA_WRITE_LAST
] = &hfi1_uc_rcv
,
389 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE
] = &hfi1_uc_rcv
,
390 [IB_OPCODE_UC_RDMA_WRITE_ONLY
] = &hfi1_uc_rcv
,
391 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE
] = &hfi1_uc_rcv
,
393 [IB_OPCODE_UD_SEND_ONLY
] = &hfi1_ud_rcv
,
394 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
] = &hfi1_ud_rcv
,
396 [IB_OPCODE_CNP
] = &hfi1_cnp_rcv
402 __be64 ib_hfi1_sys_image_guid
;
405 * hfi1_copy_sge - copy data to SGE memory
407 * @data: the data to copy
408 * @length: the length of the data
409 * @copy_last: do a separate copy of the last 8 bytes
412 struct rvt_sge_state
*ss
,
413 void *data
, u32 length
,
417 struct rvt_sge
*sge
= &ss
->sge
;
420 int cacheless_copy
= 0;
422 if (sge_copy_mode
== COPY_CACHELESS
) {
423 cacheless_copy
= length
>= PAGE_SIZE
;
424 } else if (sge_copy_mode
== COPY_ADAPTIVE
) {
425 if (length
>= PAGE_SIZE
) {
427 * NOTE: this *assumes*:
428 * o The first vaddr is the dest.
429 * o If multiple pages, then vaddr is sequential.
431 wss_insert(sge
->vaddr
);
432 if (length
>= (2 * PAGE_SIZE
))
433 wss_insert(sge
->vaddr
+ PAGE_SIZE
);
435 cacheless_copy
= wss_exceeds_threshold();
437 wss_advance_clean_counter();
451 u32 len
= sge
->length
;
455 if (len
> sge
->sge_length
)
456 len
= sge
->sge_length
;
457 WARN_ON_ONCE(len
== 0);
458 if (unlikely(in_last
)) {
459 /* enforce byte transfer ordering */
460 for (i
= 0; i
< len
; i
++)
461 ((u8
*)sge
->vaddr
)[i
] = ((u8
*)data
)[i
];
462 } else if (cacheless_copy
) {
463 cacheless_memcpy(sge
->vaddr
, data
, len
);
465 memcpy(sge
->vaddr
, data
, len
);
469 sge
->sge_length
-= len
;
470 if (sge
->sge_length
== 0) {
474 *sge
= *ss
->sg_list
++;
475 } else if (sge
->length
== 0 && sge
->mr
->lkey
) {
476 if (++sge
->n
>= RVT_SEGSZ
) {
477 if (++sge
->m
>= sge
->mr
->mapsz
)
482 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
484 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
499 * hfi1_skip_sge - skip over SGE memory
501 * @length: the number of bytes to skip
503 void hfi1_skip_sge(struct rvt_sge_state
*ss
, u32 length
, int release
)
505 struct rvt_sge
*sge
= &ss
->sge
;
508 u32 len
= sge
->length
;
512 if (len
> sge
->sge_length
)
513 len
= sge
->sge_length
;
514 WARN_ON_ONCE(len
== 0);
517 sge
->sge_length
-= len
;
518 if (sge
->sge_length
== 0) {
522 *sge
= *ss
->sg_list
++;
523 } else if (sge
->length
== 0 && sge
->mr
->lkey
) {
524 if (++sge
->n
>= RVT_SEGSZ
) {
525 if (++sge
->m
>= sge
->mr
->mapsz
)
530 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
532 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
539 * Make sure the QP is ready and able to accept the given opcode.
541 static inline int qp_ok(int opcode
, struct hfi1_packet
*packet
)
543 struct hfi1_ibport
*ibp
;
545 if (!(ib_rvt_state_ops
[packet
->qp
->state
] & RVT_PROCESS_RECV_OK
))
547 if (((opcode
& RVT_OPCODE_QP_MASK
) == packet
->qp
->allowed_ops
) ||
548 (opcode
== IB_OPCODE_CNP
))
551 ibp
= &packet
->rcd
->ppd
->ibport_data
;
552 ibp
->rvp
.n_pkt_drops
++;
557 * hfi1_ib_rcv - process an incoming packet
558 * @packet: data packet information
560 * This is called to process an incoming packet at interrupt level.
562 * Tlen is the length of the header + data + CRC in bytes.
564 void hfi1_ib_rcv(struct hfi1_packet
*packet
)
566 struct hfi1_ctxtdata
*rcd
= packet
->rcd
;
567 struct hfi1_ib_header
*hdr
= packet
->hdr
;
568 u32 tlen
= packet
->tlen
;
569 struct hfi1_pportdata
*ppd
= rcd
->ppd
;
570 struct hfi1_ibport
*ibp
= &ppd
->ibport_data
;
571 struct rvt_dev_info
*rdi
= &ppd
->dd
->verbs_dev
.rdi
;
579 lnh
= be16_to_cpu(hdr
->lrh
[0]) & 3;
580 if (lnh
== HFI1_LRH_BTH
) {
581 packet
->ohdr
= &hdr
->u
.oth
;
582 } else if (lnh
== HFI1_LRH_GRH
) {
585 packet
->ohdr
= &hdr
->u
.l
.oth
;
586 if (hdr
->u
.l
.grh
.next_hdr
!= IB_GRH_NEXT_HDR
)
588 vtf
= be32_to_cpu(hdr
->u
.l
.grh
.version_tclass_flow
);
589 if ((vtf
>> IB_GRH_VERSION_SHIFT
) != IB_GRH_VERSION
)
591 packet
->rcv_flags
|= HFI1_HAS_GRH
;
596 trace_input_ibhdr(rcd
->dd
, hdr
);
598 opcode
= (be32_to_cpu(packet
->ohdr
->bth
[0]) >> 24);
599 inc_opstats(tlen
, &rcd
->opstats
->stats
[opcode
]);
601 /* Get the destination QP number. */
602 qp_num
= be32_to_cpu(packet
->ohdr
->bth
[1]) & RVT_QPN_MASK
;
603 lid
= be16_to_cpu(hdr
->lrh
[1]);
604 if (unlikely((lid
>= be16_to_cpu(IB_MULTICAST_LID_BASE
)) &&
605 (lid
!= be16_to_cpu(IB_LID_PERMISSIVE
)))) {
606 struct rvt_mcast
*mcast
;
607 struct rvt_mcast_qp
*p
;
609 if (lnh
!= HFI1_LRH_GRH
)
611 mcast
= rvt_mcast_find(&ibp
->rvp
, &hdr
->u
.l
.grh
.dgid
);
614 list_for_each_entry_rcu(p
, &mcast
->qp_list
, list
) {
616 spin_lock_irqsave(&packet
->qp
->r_lock
, flags
);
617 if (likely((qp_ok(opcode
, packet
))))
618 opcode_handler_tbl
[opcode
](packet
);
619 spin_unlock_irqrestore(&packet
->qp
->r_lock
, flags
);
622 * Notify rvt_multicast_detach() if it is waiting for us
625 if (atomic_dec_return(&mcast
->refcount
) <= 1)
626 wake_up(&mcast
->wait
);
629 packet
->qp
= rvt_lookup_qpn(rdi
, &ibp
->rvp
, qp_num
);
634 spin_lock_irqsave(&packet
->qp
->r_lock
, flags
);
635 if (likely((qp_ok(opcode
, packet
))))
636 opcode_handler_tbl
[opcode
](packet
);
637 spin_unlock_irqrestore(&packet
->qp
->r_lock
, flags
);
643 ibp
->rvp
.n_pkt_drops
++;
647 * This is called from a timer to check for QPs
648 * which need kernel memory in order to send a packet.
650 static void mem_timer(unsigned long data
)
652 struct hfi1_ibdev
*dev
= (struct hfi1_ibdev
*)data
;
653 struct list_head
*list
= &dev
->memwait
;
654 struct rvt_qp
*qp
= NULL
;
657 struct hfi1_qp_priv
*priv
;
659 write_seqlock_irqsave(&dev
->iowait_lock
, flags
);
660 if (!list_empty(list
)) {
661 wait
= list_first_entry(list
, struct iowait
, list
);
662 qp
= iowait_to_qp(wait
);
664 list_del_init(&priv
->s_iowait
.list
);
665 /* refcount held until actual wake up */
666 if (!list_empty(list
))
667 mod_timer(&dev
->mem_timer
, jiffies
+ 1);
669 write_sequnlock_irqrestore(&dev
->iowait_lock
, flags
);
672 hfi1_qp_wakeup(qp
, RVT_S_WAIT_KMEM
);
675 void update_sge(struct rvt_sge_state
*ss
, u32 length
)
677 struct rvt_sge
*sge
= &ss
->sge
;
679 sge
->vaddr
+= length
;
680 sge
->length
-= length
;
681 sge
->sge_length
-= length
;
682 if (sge
->sge_length
== 0) {
684 *sge
= *ss
->sg_list
++;
685 } else if (sge
->length
== 0 && sge
->mr
->lkey
) {
686 if (++sge
->n
>= RVT_SEGSZ
) {
687 if (++sge
->m
>= sge
->mr
->mapsz
)
691 sge
->vaddr
= sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
692 sge
->length
= sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
697 * This is called with progress side lock held.
700 static void verbs_sdma_complete(
701 struct sdma_txreq
*cookie
,
704 struct verbs_txreq
*tx
=
705 container_of(cookie
, struct verbs_txreq
, txreq
);
706 struct rvt_qp
*qp
= tx
->qp
;
708 spin_lock(&qp
->s_lock
);
710 hfi1_send_complete(qp
, tx
->wqe
, IB_WC_SUCCESS
);
711 } else if (qp
->ibqp
.qp_type
== IB_QPT_RC
) {
712 struct hfi1_ib_header
*hdr
;
715 hfi1_rc_send_complete(qp
, hdr
);
717 spin_unlock(&qp
->s_lock
);
722 static int wait_kmem(struct hfi1_ibdev
*dev
,
724 struct hfi1_pkt_state
*ps
)
726 struct hfi1_qp_priv
*priv
= qp
->priv
;
730 spin_lock_irqsave(&qp
->s_lock
, flags
);
731 if (ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
) {
732 write_seqlock(&dev
->iowait_lock
);
733 list_add_tail(&ps
->s_txreq
->txreq
.list
,
734 &priv
->s_iowait
.tx_head
);
735 if (list_empty(&priv
->s_iowait
.list
)) {
736 if (list_empty(&dev
->memwait
))
737 mod_timer(&dev
->mem_timer
, jiffies
+ 1);
738 qp
->s_flags
|= RVT_S_WAIT_KMEM
;
739 list_add_tail(&priv
->s_iowait
.list
, &dev
->memwait
);
740 trace_hfi1_qpsleep(qp
, RVT_S_WAIT_KMEM
);
741 atomic_inc(&qp
->refcount
);
743 write_sequnlock(&dev
->iowait_lock
);
744 qp
->s_flags
&= ~RVT_S_BUSY
;
747 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
753 * This routine calls txadds for each sg entry.
755 * Add failures will revert the sge cursor
757 static noinline
int build_verbs_ulp_payload(
758 struct sdma_engine
*sde
,
759 struct rvt_sge_state
*ss
,
761 struct verbs_txreq
*tx
)
763 struct rvt_sge
*sg_list
= ss
->sg_list
;
764 struct rvt_sge sge
= ss
->sge
;
765 u8 num_sge
= ss
->num_sge
;
770 len
= ss
->sge
.length
;
773 if (len
> ss
->sge
.sge_length
)
774 len
= ss
->sge
.sge_length
;
775 WARN_ON_ONCE(len
== 0);
776 ret
= sdma_txadd_kvaddr(
790 ss
->num_sge
= num_sge
;
791 ss
->sg_list
= sg_list
;
796 * Build the number of DMA descriptors needed to send length bytes of data.
798 * NOTE: DMA mapping is held in the tx until completed in the ring or
799 * the tx desc is freed without having been submitted to the ring
801 * This routine ensures all the helper routine calls succeed.
804 static int build_verbs_tx_desc(
805 struct sdma_engine
*sde
,
806 struct rvt_sge_state
*ss
,
808 struct verbs_txreq
*tx
,
809 struct ahg_ib_header
*ahdr
,
813 struct hfi1_pio_header
*phdr
= &tx
->phdr
;
814 u16 hdrbytes
= tx
->hdr_dwords
<< 2;
816 if (!ahdr
->ahgcount
) {
817 ret
= sdma_txinit_ahg(
825 verbs_sdma_complete
);
828 phdr
->pbc
= cpu_to_le64(pbc
);
829 ret
= sdma_txadd_kvaddr(
837 ret
= sdma_txinit_ahg(
845 verbs_sdma_complete
);
850 /* add the ulp payload - if any. ss can be NULL for acks */
852 ret
= build_verbs_ulp_payload(sde
, ss
, length
, tx
);
857 int hfi1_verbs_send_dma(struct rvt_qp
*qp
, struct hfi1_pkt_state
*ps
,
860 struct hfi1_qp_priv
*priv
= qp
->priv
;
861 struct ahg_ib_header
*ahdr
= priv
->s_hdr
;
862 u32 hdrwords
= qp
->s_hdrwords
;
863 struct rvt_sge_state
*ss
= qp
->s_cur_sge
;
864 u32 len
= qp
->s_cur_size
;
865 u32 plen
= hdrwords
+ ((len
+ 3) >> 2) + 2; /* includes pbc */
866 struct hfi1_ibdev
*dev
= ps
->dev
;
867 struct hfi1_pportdata
*ppd
= ps
->ppd
;
868 struct verbs_txreq
*tx
;
875 if (!sdma_txreq_built(&tx
->txreq
)) {
876 if (likely(pbc
== 0)) {
877 u32 vl
= sc_to_vlt(dd_from_ibdev(qp
->ibqp
.device
), sc5
);
879 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
880 pbc_flags
|= (!!(sc5
& 0x10)) << PBC_DC_INFO_SHIFT
;
882 pbc
= create_pbc(ppd
,
889 ret
= build_verbs_tx_desc(tx
->sde
, ss
, len
, tx
, ahdr
, pbc
);
893 ret
= sdma_send_txreq(tx
->sde
, &priv
->s_iowait
, &tx
->txreq
);
894 if (unlikely(ret
< 0)) {
899 trace_sdma_output_ibhdr(dd_from_ibdev(qp
->ibqp
.device
),
900 &ps
->s_txreq
->phdr
.hdr
);
904 /* The current one got "sent" */
907 ret
= wait_kmem(dev
, qp
, ps
);
909 /* free txreq - bad state */
910 hfi1_put_txreq(ps
->s_txreq
);
917 * If we are now in the error state, return zero to flush the
920 static int pio_wait(struct rvt_qp
*qp
,
921 struct send_context
*sc
,
922 struct hfi1_pkt_state
*ps
,
925 struct hfi1_qp_priv
*priv
= qp
->priv
;
926 struct hfi1_devdata
*dd
= sc
->dd
;
927 struct hfi1_ibdev
*dev
= &dd
->verbs_dev
;
932 * Note that as soon as want_buffer() is called and
933 * possibly before it returns, sc_piobufavail()
934 * could be called. Therefore, put QP on the I/O wait list before
935 * enabling the PIO avail interrupt.
937 spin_lock_irqsave(&qp
->s_lock
, flags
);
938 if (ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
) {
939 write_seqlock(&dev
->iowait_lock
);
940 list_add_tail(&ps
->s_txreq
->txreq
.list
,
941 &priv
->s_iowait
.tx_head
);
942 if (list_empty(&priv
->s_iowait
.list
)) {
943 struct hfi1_ibdev
*dev
= &dd
->verbs_dev
;
946 dev
->n_piowait
+= !!(flag
& RVT_S_WAIT_PIO
);
947 dev
->n_piodrain
+= !!(flag
& RVT_S_WAIT_PIO_DRAIN
);
949 was_empty
= list_empty(&sc
->piowait
);
950 list_add_tail(&priv
->s_iowait
.list
, &sc
->piowait
);
951 trace_hfi1_qpsleep(qp
, RVT_S_WAIT_PIO
);
952 atomic_inc(&qp
->refcount
);
953 /* counting: only call wantpiobuf_intr if first user */
955 hfi1_sc_wantpiobuf_intr(sc
, 1);
957 write_sequnlock(&dev
->iowait_lock
);
958 qp
->s_flags
&= ~RVT_S_BUSY
;
961 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
965 static void verbs_pio_complete(void *arg
, int code
)
967 struct rvt_qp
*qp
= (struct rvt_qp
*)arg
;
968 struct hfi1_qp_priv
*priv
= qp
->priv
;
970 if (iowait_pio_dec(&priv
->s_iowait
))
971 iowait_drain_wakeup(&priv
->s_iowait
);
974 int hfi1_verbs_send_pio(struct rvt_qp
*qp
, struct hfi1_pkt_state
*ps
,
977 struct hfi1_qp_priv
*priv
= qp
->priv
;
978 u32 hdrwords
= qp
->s_hdrwords
;
979 struct rvt_sge_state
*ss
= qp
->s_cur_sge
;
980 u32 len
= qp
->s_cur_size
;
981 u32 dwords
= (len
+ 3) >> 2;
982 u32 plen
= hdrwords
+ dwords
+ 2; /* includes pbc */
983 struct hfi1_pportdata
*ppd
= ps
->ppd
;
984 u32
*hdr
= (u32
*)&ps
->s_txreq
->phdr
.hdr
;
987 unsigned long flags
= 0;
988 struct send_context
*sc
;
989 struct pio_buf
*pbuf
;
990 int wc_status
= IB_WC_SUCCESS
;
992 pio_release_cb cb
= NULL
;
994 /* only RC/UC use complete */
995 switch (qp
->ibqp
.qp_type
) {
998 cb
= verbs_pio_complete
;
1004 /* vl15 special case taken care of in ud.c */
1006 sc
= ps
->s_txreq
->psc
;
1008 if (likely(pbc
== 0)) {
1009 u8 vl
= sc_to_vlt(dd_from_ibdev(qp
->ibqp
.device
), sc5
);
1010 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
1011 pbc_flags
|= (!!(sc5
& 0x10)) << PBC_DC_INFO_SHIFT
;
1012 pbc
= create_pbc(ppd
, pbc_flags
, qp
->srate_mbps
, vl
, plen
);
1015 iowait_pio_inc(&priv
->s_iowait
);
1016 pbuf
= sc_buffer_alloc(sc
, plen
, cb
, qp
);
1017 if (unlikely(!pbuf
)) {
1019 verbs_pio_complete(qp
, 0);
1020 if (ppd
->host_link_state
!= HLS_UP_ACTIVE
) {
1022 * If we have filled the PIO buffers to capacity and are
1023 * not in an active state this request is not going to
1024 * go out to so just complete it with an error or else a
1025 * ULP or the core may be stuck waiting.
1029 "alloc failed. state not active, completing");
1030 wc_status
= IB_WC_GENERAL_ERR
;
1034 * This is a normal occurrence. The PIO buffs are full
1035 * up but we are still happily sending, well we could be
1036 * so lets continue to queue the request.
1038 hfi1_cdbg(PIO
, "alloc failed. state active, queuing");
1039 ret
= pio_wait(qp
, sc
, ps
, RVT_S_WAIT_PIO
);
1041 /* txreq not queued - free */
1043 /* tx consumed in wait */
1049 pio_copy(ppd
->dd
, pbuf
, pbc
, hdr
, hdrwords
);
1052 seg_pio_copy_start(pbuf
, pbc
, hdr
, hdrwords
* 4);
1054 void *addr
= ss
->sge
.vaddr
;
1055 u32 slen
= ss
->sge
.length
;
1059 update_sge(ss
, slen
);
1060 seg_pio_copy_mid(pbuf
, addr
, slen
);
1063 seg_pio_copy_end(pbuf
);
1067 trace_pio_output_ibhdr(dd_from_ibdev(qp
->ibqp
.device
),
1068 &ps
->s_txreq
->phdr
.hdr
);
1072 spin_lock_irqsave(&qp
->s_lock
, flags
);
1073 hfi1_send_complete(qp
, qp
->s_wqe
, wc_status
);
1074 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1075 } else if (qp
->ibqp
.qp_type
== IB_QPT_RC
) {
1076 spin_lock_irqsave(&qp
->s_lock
, flags
);
1077 hfi1_rc_send_complete(qp
, &ps
->s_txreq
->phdr
.hdr
);
1078 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1084 hfi1_put_txreq(ps
->s_txreq
);
1089 * egress_pkey_matches_entry - return 1 if the pkey matches ent (ent
1090 * being an entry from the partition key table), return 0
1091 * otherwise. Use the matching criteria for egress partition keys
1092 * specified in the OPAv1 spec., section 9.1l.7.
1094 static inline int egress_pkey_matches_entry(u16 pkey
, u16 ent
)
1096 u16 mkey
= pkey
& PKEY_LOW_15_MASK
;
1097 u16 mentry
= ent
& PKEY_LOW_15_MASK
;
1099 if (mkey
== mentry
) {
1101 * If pkey[15] is set (full partition member),
1102 * is bit 15 in the corresponding table element
1103 * clear (limited member)?
1105 if (pkey
& PKEY_MEMBER_MASK
)
1106 return !!(ent
& PKEY_MEMBER_MASK
);
1113 * egress_pkey_check - check P_KEY of a packet
1114 * @ppd: Physical IB port data
1115 * @lrh: Local route header
1116 * @bth: Base transport header
1117 * @sc5: SC for packet
1118 * @s_pkey_index: It will be used for look up optimization for kernel contexts
1119 * only. If it is negative value, then it means user contexts is calling this
1122 * It checks if hdr's pkey is valid.
1124 * Return: 0 on success, otherwise, 1
1126 int egress_pkey_check(struct hfi1_pportdata
*ppd
, __be16
*lrh
, __be32
*bth
,
1127 u8 sc5
, int8_t s_pkey_index
)
1129 struct hfi1_devdata
*dd
;
1132 int is_user_ctxt_mechanism
= (s_pkey_index
< 0);
1134 if (!(ppd
->part_enforce
& HFI1_PART_ENFORCE_OUT
))
1137 pkey
= (u16
)be32_to_cpu(bth
[0]);
1139 /* If SC15, pkey[0:14] must be 0x7fff */
1140 if ((sc5
== 0xf) && ((pkey
& PKEY_LOW_15_MASK
) != PKEY_LOW_15_MASK
))
1143 /* Is the pkey = 0x0, or 0x8000? */
1144 if ((pkey
& PKEY_LOW_15_MASK
) == 0)
1148 * For the kernel contexts only, if a qp is passed into the function,
1149 * the most likely matching pkey has index qp->s_pkey_index
1151 if (!is_user_ctxt_mechanism
&&
1152 egress_pkey_matches_entry(pkey
, ppd
->pkeys
[s_pkey_index
])) {
1156 for (i
= 0; i
< MAX_PKEY_VALUES
; i
++) {
1157 if (egress_pkey_matches_entry(pkey
, ppd
->pkeys
[i
]))
1162 * For the user-context mechanism, the P_KEY check would only happen
1163 * once per SDMA request, not once per packet. Therefore, there's no
1164 * need to increment the counter for the user-context mechanism.
1166 if (!is_user_ctxt_mechanism
) {
1167 incr_cntr64(&ppd
->port_xmit_constraint_errors
);
1169 if (!(dd
->err_info_xmit_constraint
.status
&
1170 OPA_EI_STATUS_SMASK
)) {
1171 u16 slid
= be16_to_cpu(lrh
[3]);
1173 dd
->err_info_xmit_constraint
.status
|=
1174 OPA_EI_STATUS_SMASK
;
1175 dd
->err_info_xmit_constraint
.slid
= slid
;
1176 dd
->err_info_xmit_constraint
.pkey
= pkey
;
1183 * get_send_routine - choose an egress routine
1185 * Choose an egress routine based on QP type
1188 static inline send_routine
get_send_routine(struct rvt_qp
*qp
,
1189 struct verbs_txreq
*tx
)
1191 struct hfi1_devdata
*dd
= dd_from_ibdev(qp
->ibqp
.device
);
1192 struct hfi1_qp_priv
*priv
= qp
->priv
;
1193 struct hfi1_ib_header
*h
= &tx
->phdr
.hdr
;
1195 if (unlikely(!(dd
->flags
& HFI1_HAS_SEND_DMA
)))
1196 return dd
->process_pio_send
;
1197 switch (qp
->ibqp
.qp_type
) {
1199 return dd
->process_pio_send
;
1205 qp
->s_cur_size
<= min(piothreshold
, qp
->pmtu
) &&
1206 (BIT(get_opcode(h
) & 0x1f) & rc_only_opcode
) &&
1207 iowait_sdma_pending(&priv
->s_iowait
) == 0 &&
1208 !sdma_txreq_built(&tx
->txreq
))
1209 return dd
->process_pio_send
;
1213 qp
->s_cur_size
<= min(piothreshold
, qp
->pmtu
) &&
1214 (BIT(get_opcode(h
) & 0x1f) & uc_only_opcode
) &&
1215 iowait_sdma_pending(&priv
->s_iowait
) == 0 &&
1216 !sdma_txreq_built(&tx
->txreq
))
1217 return dd
->process_pio_send
;
1222 return dd
->process_dma_send
;
1226 * hfi1_verbs_send - send a packet
1227 * @qp: the QP to send on
1228 * @ps: the state of the packet to send
1230 * Return zero if packet is sent or queued OK.
1231 * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
1233 int hfi1_verbs_send(struct rvt_qp
*qp
, struct hfi1_pkt_state
*ps
)
1235 struct hfi1_devdata
*dd
= dd_from_ibdev(qp
->ibqp
.device
);
1236 struct hfi1_qp_priv
*priv
= qp
->priv
;
1237 struct hfi1_other_headers
*ohdr
;
1238 struct hfi1_ib_header
*hdr
;
1243 hdr
= &ps
->s_txreq
->phdr
.hdr
;
1244 /* locate the pkey within the headers */
1245 lnh
= be16_to_cpu(hdr
->lrh
[0]) & 3;
1246 if (lnh
== HFI1_LRH_GRH
)
1247 ohdr
= &hdr
->u
.l
.oth
;
1251 sr
= get_send_routine(qp
, ps
->s_txreq
);
1252 ret
= egress_pkey_check(dd
->pport
,
1257 if (unlikely(ret
)) {
1259 * The value we are returning here does not get propagated to
1260 * the verbs caller. Thus we need to complete the request with
1261 * error otherwise the caller could be sitting waiting on the
1262 * completion event. Only do this for PIO. SDMA has its own
1263 * mechanism for handling the errors. So for SDMA we can just
1266 if (sr
== dd
->process_pio_send
) {
1267 unsigned long flags
;
1269 hfi1_cdbg(PIO
, "%s() Failed. Completing with err",
1271 spin_lock_irqsave(&qp
->s_lock
, flags
);
1272 hfi1_send_complete(qp
, qp
->s_wqe
, IB_WC_GENERAL_ERR
);
1273 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1277 if (sr
== dd
->process_dma_send
&& iowait_pio_pending(&priv
->s_iowait
))
1281 RVT_S_WAIT_PIO_DRAIN
);
1282 return sr(qp
, ps
, 0);
1286 * hfi1_fill_device_attr - Fill in rvt dev info device attributes.
1287 * @dd: the device data structure
1289 static void hfi1_fill_device_attr(struct hfi1_devdata
*dd
)
1291 struct rvt_dev_info
*rdi
= &dd
->verbs_dev
.rdi
;
1293 memset(&rdi
->dparms
.props
, 0, sizeof(rdi
->dparms
.props
));
1295 rdi
->dparms
.props
.device_cap_flags
= IB_DEVICE_BAD_PKEY_CNTR
|
1296 IB_DEVICE_BAD_QKEY_CNTR
| IB_DEVICE_SHUTDOWN_PORT
|
1297 IB_DEVICE_SYS_IMAGE_GUID
| IB_DEVICE_RC_RNR_NAK_GEN
|
1298 IB_DEVICE_PORT_ACTIVE_EVENT
| IB_DEVICE_SRQ_RESIZE
;
1299 rdi
->dparms
.props
.page_size_cap
= PAGE_SIZE
;
1300 rdi
->dparms
.props
.vendor_id
= dd
->oui1
<< 16 | dd
->oui2
<< 8 | dd
->oui3
;
1301 rdi
->dparms
.props
.vendor_part_id
= dd
->pcidev
->device
;
1302 rdi
->dparms
.props
.hw_ver
= dd
->minrev
;
1303 rdi
->dparms
.props
.sys_image_guid
= ib_hfi1_sys_image_guid
;
1304 rdi
->dparms
.props
.max_mr_size
= ~0ULL;
1305 rdi
->dparms
.props
.max_qp
= hfi1_max_qps
;
1306 rdi
->dparms
.props
.max_qp_wr
= hfi1_max_qp_wrs
;
1307 rdi
->dparms
.props
.max_sge
= hfi1_max_sges
;
1308 rdi
->dparms
.props
.max_sge_rd
= hfi1_max_sges
;
1309 rdi
->dparms
.props
.max_cq
= hfi1_max_cqs
;
1310 rdi
->dparms
.props
.max_ah
= hfi1_max_ahs
;
1311 rdi
->dparms
.props
.max_cqe
= hfi1_max_cqes
;
1312 rdi
->dparms
.props
.max_mr
= rdi
->lkey_table
.max
;
1313 rdi
->dparms
.props
.max_fmr
= rdi
->lkey_table
.max
;
1314 rdi
->dparms
.props
.max_map_per_fmr
= 32767;
1315 rdi
->dparms
.props
.max_pd
= hfi1_max_pds
;
1316 rdi
->dparms
.props
.max_qp_rd_atom
= HFI1_MAX_RDMA_ATOMIC
;
1317 rdi
->dparms
.props
.max_qp_init_rd_atom
= 255;
1318 rdi
->dparms
.props
.max_srq
= hfi1_max_srqs
;
1319 rdi
->dparms
.props
.max_srq_wr
= hfi1_max_srq_wrs
;
1320 rdi
->dparms
.props
.max_srq_sge
= hfi1_max_srq_sges
;
1321 rdi
->dparms
.props
.atomic_cap
= IB_ATOMIC_GLOB
;
1322 rdi
->dparms
.props
.max_pkeys
= hfi1_get_npkeys(dd
);
1323 rdi
->dparms
.props
.max_mcast_grp
= hfi1_max_mcast_grps
;
1324 rdi
->dparms
.props
.max_mcast_qp_attach
= hfi1_max_mcast_qp_attached
;
1325 rdi
->dparms
.props
.max_total_mcast_qp_attach
=
1326 rdi
->dparms
.props
.max_mcast_qp_attach
*
1327 rdi
->dparms
.props
.max_mcast_grp
;
1330 static inline u16
opa_speed_to_ib(u16 in
)
1334 if (in
& OPA_LINK_SPEED_25G
)
1335 out
|= IB_SPEED_EDR
;
1336 if (in
& OPA_LINK_SPEED_12_5G
)
1337 out
|= IB_SPEED_FDR
;
1343 * Convert a single OPA link width (no multiple flags) to an IB value.
1344 * A zero OPA link width means link down, which means the IB width value
1347 static inline u16
opa_width_to_ib(u16 in
)
1350 case OPA_LINK_WIDTH_1X
:
1351 /* map 2x and 3x to 1x as they don't exist in IB */
1352 case OPA_LINK_WIDTH_2X
:
1353 case OPA_LINK_WIDTH_3X
:
1355 default: /* link down or unknown, return our largest width */
1356 case OPA_LINK_WIDTH_4X
:
1361 static int query_port(struct rvt_dev_info
*rdi
, u8 port_num
,
1362 struct ib_port_attr
*props
)
1364 struct hfi1_ibdev
*verbs_dev
= dev_from_rdi(rdi
);
1365 struct hfi1_devdata
*dd
= dd_from_dev(verbs_dev
);
1366 struct hfi1_pportdata
*ppd
= &dd
->pport
[port_num
- 1];
1369 props
->lid
= lid
? lid
: 0;
1370 props
->lmc
= ppd
->lmc
;
1371 /* OPA logical states match IB logical states */
1372 props
->state
= driver_lstate(ppd
);
1373 props
->phys_state
= hfi1_ibphys_portstate(ppd
);
1374 props
->gid_tbl_len
= HFI1_GUIDS_PER_PORT
;
1375 props
->active_width
= (u8
)opa_width_to_ib(ppd
->link_width_active
);
1376 /* see rate_show() in ib core/sysfs.c */
1377 props
->active_speed
= (u8
)opa_speed_to_ib(ppd
->link_speed_active
);
1378 props
->max_vl_num
= ppd
->vls_supported
;
1380 /* Once we are a "first class" citizen and have added the OPA MTUs to
1381 * the core we can advertise the larger MTU enum to the ULPs, for now
1382 * advertise only 4K.
1384 * Those applications which are either OPA aware or pass the MTU enum
1385 * from the Path Records to us will get the new 8k MTU. Those that
1386 * attempt to process the MTU enum may fail in various ways.
1388 props
->max_mtu
= mtu_to_enum((!valid_ib_mtu(hfi1_max_mtu
) ?
1389 4096 : hfi1_max_mtu
), IB_MTU_4096
);
1390 props
->active_mtu
= !valid_ib_mtu(ppd
->ibmtu
) ? props
->max_mtu
:
1391 mtu_to_enum(ppd
->ibmtu
, IB_MTU_2048
);
1396 static int modify_device(struct ib_device
*device
,
1397 int device_modify_mask
,
1398 struct ib_device_modify
*device_modify
)
1400 struct hfi1_devdata
*dd
= dd_from_ibdev(device
);
1404 if (device_modify_mask
& ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID
|
1405 IB_DEVICE_MODIFY_NODE_DESC
)) {
1410 if (device_modify_mask
& IB_DEVICE_MODIFY_NODE_DESC
) {
1411 memcpy(device
->node_desc
, device_modify
->node_desc
, 64);
1412 for (i
= 0; i
< dd
->num_pports
; i
++) {
1413 struct hfi1_ibport
*ibp
= &dd
->pport
[i
].ibport_data
;
1415 hfi1_node_desc_chg(ibp
);
1419 if (device_modify_mask
& IB_DEVICE_MODIFY_SYS_IMAGE_GUID
) {
1420 ib_hfi1_sys_image_guid
=
1421 cpu_to_be64(device_modify
->sys_image_guid
);
1422 for (i
= 0; i
< dd
->num_pports
; i
++) {
1423 struct hfi1_ibport
*ibp
= &dd
->pport
[i
].ibport_data
;
1425 hfi1_sys_guid_chg(ibp
);
1435 static int shut_down_port(struct rvt_dev_info
*rdi
, u8 port_num
)
1437 struct hfi1_ibdev
*verbs_dev
= dev_from_rdi(rdi
);
1438 struct hfi1_devdata
*dd
= dd_from_dev(verbs_dev
);
1439 struct hfi1_pportdata
*ppd
= &dd
->pport
[port_num
- 1];
1442 set_link_down_reason(ppd
, OPA_LINKDOWN_REASON_UNKNOWN
, 0,
1443 OPA_LINKDOWN_REASON_UNKNOWN
);
1444 ret
= set_link_state(ppd
, HLS_DN_DOWNDEF
);
1448 static int hfi1_get_guid_be(struct rvt_dev_info
*rdi
, struct rvt_ibport
*rvp
,
1449 int guid_index
, __be64
*guid
)
1451 struct hfi1_ibport
*ibp
= container_of(rvp
, struct hfi1_ibport
, rvp
);
1452 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
1454 if (guid_index
== 0)
1455 *guid
= cpu_to_be64(ppd
->guid
);
1456 else if (guid_index
< HFI1_GUIDS_PER_PORT
)
1457 *guid
= ibp
->guids
[guid_index
- 1];
1465 * convert ah port,sl to sc
1467 u8
ah_to_sc(struct ib_device
*ibdev
, struct ib_ah_attr
*ah
)
1469 struct hfi1_ibport
*ibp
= to_iport(ibdev
, ah
->port_num
);
1471 return ibp
->sl_to_sc
[ah
->sl
];
1474 static int hfi1_check_ah(struct ib_device
*ibdev
, struct ib_ah_attr
*ah_attr
)
1476 struct hfi1_ibport
*ibp
;
1477 struct hfi1_pportdata
*ppd
;
1478 struct hfi1_devdata
*dd
;
1481 /* test the mapping for validity */
1482 ibp
= to_iport(ibdev
, ah_attr
->port_num
);
1483 ppd
= ppd_from_ibp(ibp
);
1484 sc5
= ibp
->sl_to_sc
[ah_attr
->sl
];
1485 dd
= dd_from_ppd(ppd
);
1486 if (sc_to_vlt(dd
, sc5
) > num_vls
&& sc_to_vlt(dd
, sc5
) != 0xf)
1491 static void hfi1_notify_new_ah(struct ib_device
*ibdev
,
1492 struct ib_ah_attr
*ah_attr
,
1495 struct hfi1_ibport
*ibp
;
1496 struct hfi1_pportdata
*ppd
;
1497 struct hfi1_devdata
*dd
;
1501 * Do not trust reading anything from rvt_ah at this point as it is not
1502 * done being setup. We can however modify things which we need to set.
1505 ibp
= to_iport(ibdev
, ah_attr
->port_num
);
1506 ppd
= ppd_from_ibp(ibp
);
1507 sc5
= ibp
->sl_to_sc
[ah
->attr
.sl
];
1508 dd
= dd_from_ppd(ppd
);
1509 ah
->vl
= sc_to_vlt(dd
, sc5
);
1510 if (ah
->vl
< num_vls
|| ah
->vl
== 15)
1511 ah
->log_pmtu
= ilog2(dd
->vld
[ah
->vl
].mtu
);
1514 struct ib_ah
*hfi1_create_qp0_ah(struct hfi1_ibport
*ibp
, u16 dlid
)
1516 struct ib_ah_attr attr
;
1517 struct ib_ah
*ah
= ERR_PTR(-EINVAL
);
1520 memset(&attr
, 0, sizeof(attr
));
1522 attr
.port_num
= ppd_from_ibp(ibp
)->port
;
1524 qp0
= rcu_dereference(ibp
->rvp
.qp
[0]);
1526 ah
= ib_create_ah(qp0
->ibqp
.pd
, &attr
);
1532 * hfi1_get_npkeys - return the size of the PKEY table for context 0
1533 * @dd: the hfi1_ib device
1535 unsigned hfi1_get_npkeys(struct hfi1_devdata
*dd
)
1537 return ARRAY_SIZE(dd
->pport
[0].pkeys
);
1540 static void init_ibport(struct hfi1_pportdata
*ppd
)
1542 struct hfi1_ibport
*ibp
= &ppd
->ibport_data
;
1543 size_t sz
= ARRAY_SIZE(ibp
->sl_to_sc
);
1546 for (i
= 0; i
< sz
; i
++) {
1547 ibp
->sl_to_sc
[i
] = i
;
1548 ibp
->sc_to_sl
[i
] = i
;
1551 spin_lock_init(&ibp
->rvp
.lock
);
1552 /* Set the prefix to the default value (see ch. 4.1.1) */
1553 ibp
->rvp
.gid_prefix
= IB_DEFAULT_GID_PREFIX
;
1554 ibp
->rvp
.sm_lid
= 0;
1555 /* Below should only set bits defined in OPA PortInfo.CapabilityMask */
1556 ibp
->rvp
.port_cap_flags
= IB_PORT_AUTO_MIGR_SUP
|
1557 IB_PORT_CAP_MASK_NOTICE_SUP
;
1558 ibp
->rvp
.pma_counter_select
[0] = IB_PMA_PORT_XMIT_DATA
;
1559 ibp
->rvp
.pma_counter_select
[1] = IB_PMA_PORT_RCV_DATA
;
1560 ibp
->rvp
.pma_counter_select
[2] = IB_PMA_PORT_XMIT_PKTS
;
1561 ibp
->rvp
.pma_counter_select
[3] = IB_PMA_PORT_RCV_PKTS
;
1562 ibp
->rvp
.pma_counter_select
[4] = IB_PMA_PORT_XMIT_WAIT
;
1564 RCU_INIT_POINTER(ibp
->rvp
.qp
[0], NULL
);
1565 RCU_INIT_POINTER(ibp
->rvp
.qp
[1], NULL
);
1569 * hfi1_register_ib_device - register our device with the infiniband core
1570 * @dd: the device data structure
1571 * Return 0 if successful, errno if unsuccessful.
1573 int hfi1_register_ib_device(struct hfi1_devdata
*dd
)
1575 struct hfi1_ibdev
*dev
= &dd
->verbs_dev
;
1576 struct ib_device
*ibdev
= &dev
->rdi
.ibdev
;
1577 struct hfi1_pportdata
*ppd
= dd
->pport
;
1580 size_t lcpysz
= IB_DEVICE_NAME_MAX
;
1582 for (i
= 0; i
< dd
->num_pports
; i
++)
1583 init_ibport(ppd
+ i
);
1585 /* Only need to initialize non-zero fields. */
1587 setup_timer(&dev
->mem_timer
, mem_timer
, (unsigned long)dev
);
1589 seqlock_init(&dev
->iowait_lock
);
1590 INIT_LIST_HEAD(&dev
->txwait
);
1591 INIT_LIST_HEAD(&dev
->memwait
);
1593 ret
= verbs_txreq_init(dev
);
1595 goto err_verbs_txreq
;
1598 * The system image GUID is supposed to be the same for all
1599 * HFIs in a single system but since there can be other
1600 * device types in the system, we can't be sure this is unique.
1602 if (!ib_hfi1_sys_image_guid
)
1603 ib_hfi1_sys_image_guid
= cpu_to_be64(ppd
->guid
);
1604 lcpysz
= strlcpy(ibdev
->name
, class_name(), lcpysz
);
1605 strlcpy(ibdev
->name
+ lcpysz
, "_%d", IB_DEVICE_NAME_MAX
- lcpysz
);
1606 ibdev
->owner
= THIS_MODULE
;
1607 ibdev
->node_guid
= cpu_to_be64(ppd
->guid
);
1608 ibdev
->phys_port_cnt
= dd
->num_pports
;
1609 ibdev
->dma_device
= &dd
->pcidev
->dev
;
1610 ibdev
->modify_device
= modify_device
;
1612 /* keep process mad in the driver */
1613 ibdev
->process_mad
= hfi1_process_mad
;
1615 strncpy(ibdev
->node_desc
, init_utsname()->nodename
,
1616 sizeof(ibdev
->node_desc
));
1619 * Fill in rvt info object.
1621 dd
->verbs_dev
.rdi
.driver_f
.port_callback
= hfi1_create_port_files
;
1622 dd
->verbs_dev
.rdi
.driver_f
.get_card_name
= get_card_name
;
1623 dd
->verbs_dev
.rdi
.driver_f
.get_pci_dev
= get_pci_dev
;
1624 dd
->verbs_dev
.rdi
.driver_f
.check_ah
= hfi1_check_ah
;
1625 dd
->verbs_dev
.rdi
.driver_f
.notify_new_ah
= hfi1_notify_new_ah
;
1626 dd
->verbs_dev
.rdi
.driver_f
.get_guid_be
= hfi1_get_guid_be
;
1627 dd
->verbs_dev
.rdi
.driver_f
.query_port_state
= query_port
;
1628 dd
->verbs_dev
.rdi
.driver_f
.shut_down_port
= shut_down_port
;
1629 dd
->verbs_dev
.rdi
.driver_f
.cap_mask_chg
= hfi1_cap_mask_chg
;
1631 * Fill in rvt info device attributes.
1633 hfi1_fill_device_attr(dd
);
1636 dd
->verbs_dev
.rdi
.dparms
.qp_table_size
= hfi1_qp_table_size
;
1637 dd
->verbs_dev
.rdi
.dparms
.qpn_start
= 0;
1638 dd
->verbs_dev
.rdi
.dparms
.qpn_inc
= 1;
1639 dd
->verbs_dev
.rdi
.dparms
.qos_shift
= dd
->qos_shift
;
1640 dd
->verbs_dev
.rdi
.dparms
.qpn_res_start
= kdeth_qp
<< 16;
1641 dd
->verbs_dev
.rdi
.dparms
.qpn_res_end
=
1642 dd
->verbs_dev
.rdi
.dparms
.qpn_res_start
+ 65535;
1643 dd
->verbs_dev
.rdi
.dparms
.max_rdma_atomic
= HFI1_MAX_RDMA_ATOMIC
;
1644 dd
->verbs_dev
.rdi
.dparms
.psn_mask
= PSN_MASK
;
1645 dd
->verbs_dev
.rdi
.dparms
.psn_shift
= PSN_SHIFT
;
1646 dd
->verbs_dev
.rdi
.dparms
.psn_modify_mask
= PSN_MODIFY_MASK
;
1647 dd
->verbs_dev
.rdi
.dparms
.core_cap_flags
= RDMA_CORE_PORT_INTEL_OPA
;
1648 dd
->verbs_dev
.rdi
.dparms
.max_mad_size
= OPA_MGMT_MAD_SIZE
;
1650 dd
->verbs_dev
.rdi
.driver_f
.qp_priv_alloc
= qp_priv_alloc
;
1651 dd
->verbs_dev
.rdi
.driver_f
.qp_priv_free
= qp_priv_free
;
1652 dd
->verbs_dev
.rdi
.driver_f
.free_all_qps
= free_all_qps
;
1653 dd
->verbs_dev
.rdi
.driver_f
.notify_qp_reset
= notify_qp_reset
;
1654 dd
->verbs_dev
.rdi
.driver_f
.do_send
= hfi1_do_send
;
1655 dd
->verbs_dev
.rdi
.driver_f
.schedule_send
= hfi1_schedule_send
;
1656 dd
->verbs_dev
.rdi
.driver_f
.schedule_send_no_lock
= _hfi1_schedule_send
;
1657 dd
->verbs_dev
.rdi
.driver_f
.get_pmtu_from_attr
= get_pmtu_from_attr
;
1658 dd
->verbs_dev
.rdi
.driver_f
.notify_error_qp
= notify_error_qp
;
1659 dd
->verbs_dev
.rdi
.driver_f
.flush_qp_waiters
= flush_qp_waiters
;
1660 dd
->verbs_dev
.rdi
.driver_f
.stop_send_queue
= stop_send_queue
;
1661 dd
->verbs_dev
.rdi
.driver_f
.quiesce_qp
= quiesce_qp
;
1662 dd
->verbs_dev
.rdi
.driver_f
.notify_error_qp
= notify_error_qp
;
1663 dd
->verbs_dev
.rdi
.driver_f
.mtu_from_qp
= mtu_from_qp
;
1664 dd
->verbs_dev
.rdi
.driver_f
.mtu_to_path_mtu
= mtu_to_path_mtu
;
1665 dd
->verbs_dev
.rdi
.driver_f
.check_modify_qp
= hfi1_check_modify_qp
;
1666 dd
->verbs_dev
.rdi
.driver_f
.modify_qp
= hfi1_modify_qp
;
1667 dd
->verbs_dev
.rdi
.driver_f
.check_send_wqe
= hfi1_check_send_wqe
;
1669 /* completeion queue */
1670 snprintf(dd
->verbs_dev
.rdi
.dparms
.cq_name
,
1671 sizeof(dd
->verbs_dev
.rdi
.dparms
.cq_name
),
1672 "hfi1_cq%d", dd
->unit
);
1673 dd
->verbs_dev
.rdi
.dparms
.node
= dd
->node
;
1676 dd
->verbs_dev
.rdi
.flags
= 0; /* Let rdmavt handle it all */
1677 dd
->verbs_dev
.rdi
.dparms
.lkey_table_size
= hfi1_lkey_table_size
;
1678 dd
->verbs_dev
.rdi
.dparms
.nports
= dd
->num_pports
;
1679 dd
->verbs_dev
.rdi
.dparms
.npkeys
= hfi1_get_npkeys(dd
);
1682 for (i
= 0; i
< dd
->num_pports
; i
++, ppd
++)
1683 rvt_init_port(&dd
->verbs_dev
.rdi
,
1684 &ppd
->ibport_data
.rvp
,
1688 ret
= rvt_register_device(&dd
->verbs_dev
.rdi
);
1690 goto err_verbs_txreq
;
1692 ret
= hfi1_verbs_register_sysfs(dd
);
1699 rvt_unregister_device(&dd
->verbs_dev
.rdi
);
1701 verbs_txreq_exit(dev
);
1702 dd_dev_err(dd
, "cannot register verbs: %d!\n", -ret
);
1706 void hfi1_unregister_ib_device(struct hfi1_devdata
*dd
)
1708 struct hfi1_ibdev
*dev
= &dd
->verbs_dev
;
1710 hfi1_verbs_unregister_sysfs(dd
);
1712 rvt_unregister_device(&dd
->verbs_dev
.rdi
);
1714 if (!list_empty(&dev
->txwait
))
1715 dd_dev_err(dd
, "txwait list not empty!\n");
1716 if (!list_empty(&dev
->memwait
))
1717 dd_dev_err(dd
, "memwait list not empty!\n");
1719 del_timer_sync(&dev
->mem_timer
);
1720 verbs_txreq_exit(dev
);
1723 void hfi1_cnp_rcv(struct hfi1_packet
*packet
)
1725 struct hfi1_ibport
*ibp
= &packet
->rcd
->ppd
->ibport_data
;
1726 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
1727 struct hfi1_ib_header
*hdr
= packet
->hdr
;
1728 struct rvt_qp
*qp
= packet
->qp
;
1731 u8 sl
, sc5
, sc4_bit
, svc_type
;
1732 bool sc4_set
= has_sc4_bit(packet
);
1734 switch (packet
->qp
->ibqp
.qp_type
) {
1736 rlid
= qp
->remote_ah_attr
.dlid
;
1737 rqpn
= qp
->remote_qpn
;
1738 svc_type
= IB_CC_SVCTYPE_UC
;
1741 rlid
= qp
->remote_ah_attr
.dlid
;
1742 rqpn
= qp
->remote_qpn
;
1743 svc_type
= IB_CC_SVCTYPE_RC
;
1748 svc_type
= IB_CC_SVCTYPE_UD
;
1751 ibp
->rvp
.n_pkt_drops
++;
1755 sc4_bit
= sc4_set
<< 4;
1756 sc5
= (be16_to_cpu(hdr
->lrh
[0]) >> 12) & 0xf;
1758 sl
= ibp
->sc_to_sl
[sc5
];
1759 lqpn
= qp
->ibqp
.qp_num
;
1761 process_becn(ppd
, sl
, rlid
, lqpn
, rqpn
, svc_type
);