2 * Copyright(c) 2015 - 2018 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/pci.h>
49 #include <linux/netdevice.h>
50 #include <linux/vmalloc.h>
51 #include <linux/delay.h>
52 #include <linux/idr.h>
53 #include <linux/module.h>
54 #include <linux/printk.h>
55 #include <linux/hrtimer.h>
56 #include <linux/bitmap.h>
57 #include <linux/numa.h>
58 #include <rdma/rdma_vt.h>
74 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
77 * min buffers we want to have per context, after driver
79 #define HFI1_MIN_USER_CTXT_BUFCNT 7
81 #define HFI1_MIN_HDRQ_EGRBUF_CNT 2
82 #define HFI1_MAX_HDRQ_EGRBUF_CNT 16352
83 #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */
84 #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */
86 #define NUM_IB_PORTS 1
89 * Number of user receive contexts we are configured to use (to allow for more
90 * pio buffers per ctxt, etc.) Zero means use one user context per CPU.
92 int num_user_contexts
= -1;
93 module_param_named(num_user_contexts
, num_user_contexts
, int, 0444);
95 num_user_contexts
, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)");
97 uint krcvqs
[RXE_NUM_DATA_VL
];
99 module_param_array(krcvqs
, uint
, &krcvqsset
, S_IRUGO
);
100 MODULE_PARM_DESC(krcvqs
, "Array of the number of non-control kernel receive queues by VL");
102 /* computed based on above array */
103 unsigned long n_krcvqs
;
105 static unsigned hfi1_rcvarr_split
= 25;
106 module_param_named(rcvarr_split
, hfi1_rcvarr_split
, uint
, S_IRUGO
);
107 MODULE_PARM_DESC(rcvarr_split
, "Percent of context's RcvArray entries used for Eager buffers");
109 static uint eager_buffer_size
= (8 << 20); /* 8MB */
110 module_param(eager_buffer_size
, uint
, S_IRUGO
);
111 MODULE_PARM_DESC(eager_buffer_size
, "Size of the eager buffers, default: 8MB");
113 static uint rcvhdrcnt
= 2048; /* 2x the max eager buffer count */
114 module_param_named(rcvhdrcnt
, rcvhdrcnt
, uint
, S_IRUGO
);
115 MODULE_PARM_DESC(rcvhdrcnt
, "Receive header queue count (default 2048)");
117 static uint hfi1_hdrq_entsize
= 32;
118 module_param_named(hdrq_entsize
, hfi1_hdrq_entsize
, uint
, 0444);
119 MODULE_PARM_DESC(hdrq_entsize
, "Size of header queue entries: 2 - 8B, 16 - 64B, 32 - 128B (default)");
121 unsigned int user_credit_return_threshold
= 33; /* default is 33% */
122 module_param(user_credit_return_threshold
, uint
, S_IRUGO
);
123 MODULE_PARM_DESC(user_credit_return_threshold
, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)");
125 static inline u64
encode_rcv_header_entry_size(u16 size
);
127 static struct idr hfi1_unit_table
;
129 static int hfi1_create_kctxt(struct hfi1_devdata
*dd
,
130 struct hfi1_pportdata
*ppd
)
132 struct hfi1_ctxtdata
*rcd
;
135 /* Control context has to be always 0 */
136 BUILD_BUG_ON(HFI1_CTRL_CTXT
!= 0);
138 ret
= hfi1_create_ctxtdata(ppd
, dd
->node
, &rcd
);
140 dd_dev_err(dd
, "Kernel receive context allocation failed\n");
145 * Set up the kernel context flags here and now because they use
146 * default values for all receive side memories. User contexts will
147 * be handled as they are created.
149 rcd
->flags
= HFI1_CAP_KGET(MULTI_PKT_EGR
) |
150 HFI1_CAP_KGET(NODROP_RHQ_FULL
) |
151 HFI1_CAP_KGET(NODROP_EGR_FULL
) |
152 HFI1_CAP_KGET(DMA_RTAIL
);
154 /* Control context must use DMA_RTAIL */
155 if (rcd
->ctxt
== HFI1_CTRL_CTXT
)
156 rcd
->flags
|= HFI1_CAP_DMA_RTAIL
;
159 rcd
->sc
= sc_alloc(dd
, SC_ACK
, rcd
->rcvhdrqentsize
, dd
->node
);
161 dd_dev_err(dd
, "Kernel send context allocation failed\n");
164 hfi1_init_ctxt(rcd
->sc
);
170 * Create the receive context array and one or more kernel contexts
172 int hfi1_create_kctxts(struct hfi1_devdata
*dd
)
177 dd
->rcd
= kcalloc_node(dd
->num_rcv_contexts
, sizeof(*dd
->rcd
),
178 GFP_KERNEL
, dd
->node
);
182 for (i
= 0; i
< dd
->first_dyn_alloc_ctxt
; ++i
) {
183 ret
= hfi1_create_kctxt(dd
, dd
->pport
);
190 for (i
= 0; dd
->rcd
&& i
< dd
->first_dyn_alloc_ctxt
; ++i
)
191 hfi1_free_ctxt(dd
->rcd
[i
]);
193 /* All the contexts should be freed, free the array */
200 * Helper routines for the receive context reference count (rcd and uctxt).
202 static void hfi1_rcd_init(struct hfi1_ctxtdata
*rcd
)
204 kref_init(&rcd
->kref
);
208 * hfi1_rcd_free - When reference is zero clean up.
209 * @kref: pointer to an initialized rcd data structure
212 static void hfi1_rcd_free(struct kref
*kref
)
215 struct hfi1_ctxtdata
*rcd
=
216 container_of(kref
, struct hfi1_ctxtdata
, kref
);
218 spin_lock_irqsave(&rcd
->dd
->uctxt_lock
, flags
);
219 rcd
->dd
->rcd
[rcd
->ctxt
] = NULL
;
220 spin_unlock_irqrestore(&rcd
->dd
->uctxt_lock
, flags
);
222 hfi1_free_ctxtdata(rcd
->dd
, rcd
);
228 * hfi1_rcd_put - decrement reference for rcd
229 * @rcd: pointer to an initialized rcd data structure
231 * Use this to put a reference after the init.
233 int hfi1_rcd_put(struct hfi1_ctxtdata
*rcd
)
236 return kref_put(&rcd
->kref
, hfi1_rcd_free
);
242 * hfi1_rcd_get - increment reference for rcd
243 * @rcd: pointer to an initialized rcd data structure
245 * Use this to get a reference after the init.
247 * Return : reflect kref_get_unless_zero(), which returns non-zero on
248 * increment, otherwise 0.
250 int hfi1_rcd_get(struct hfi1_ctxtdata
*rcd
)
252 return kref_get_unless_zero(&rcd
->kref
);
256 * allocate_rcd_index - allocate an rcd index from the rcd array
257 * @dd: pointer to a valid devdata structure
258 * @rcd: rcd data structure to assign
259 * @index: pointer to index that is allocated
261 * Find an empty index in the rcd array, and assign the given rcd to it.
262 * If the array is full, we are EBUSY.
265 static int allocate_rcd_index(struct hfi1_devdata
*dd
,
266 struct hfi1_ctxtdata
*rcd
, u16
*index
)
271 spin_lock_irqsave(&dd
->uctxt_lock
, flags
);
272 for (ctxt
= 0; ctxt
< dd
->num_rcv_contexts
; ctxt
++)
276 if (ctxt
< dd
->num_rcv_contexts
) {
281 spin_unlock_irqrestore(&dd
->uctxt_lock
, flags
);
283 if (ctxt
>= dd
->num_rcv_contexts
)
292 * hfi1_rcd_get_by_index_safe - validate the ctxt index before accessing the
294 * @dd: pointer to a valid devdata structure
295 * @ctxt: the index of an possilbe rcd
297 * This is a wrapper for hfi1_rcd_get_by_index() to validate that the given
298 * ctxt index is valid.
300 * The caller is responsible for making the _put().
303 struct hfi1_ctxtdata
*hfi1_rcd_get_by_index_safe(struct hfi1_devdata
*dd
,
306 if (ctxt
< dd
->num_rcv_contexts
)
307 return hfi1_rcd_get_by_index(dd
, ctxt
);
313 * hfi1_rcd_get_by_index
314 * @dd: pointer to a valid devdata structure
315 * @ctxt: the index of an possilbe rcd
317 * We need to protect access to the rcd array. If access is needed to
318 * one or more index, get the protecting spinlock and then increment the
321 * The caller is responsible for making the _put().
324 struct hfi1_ctxtdata
*hfi1_rcd_get_by_index(struct hfi1_devdata
*dd
, u16 ctxt
)
327 struct hfi1_ctxtdata
*rcd
= NULL
;
329 spin_lock_irqsave(&dd
->uctxt_lock
, flags
);
332 if (!hfi1_rcd_get(rcd
))
335 spin_unlock_irqrestore(&dd
->uctxt_lock
, flags
);
341 * Common code for user and kernel context create and setup.
342 * NOTE: the initial kref is done here (hf1_rcd_init()).
344 int hfi1_create_ctxtdata(struct hfi1_pportdata
*ppd
, int numa
,
345 struct hfi1_ctxtdata
**context
)
347 struct hfi1_devdata
*dd
= ppd
->dd
;
348 struct hfi1_ctxtdata
*rcd
;
349 unsigned kctxt_ngroups
= 0;
352 if (dd
->rcv_entries
.nctxt_extra
>
353 dd
->num_rcv_contexts
- dd
->first_dyn_alloc_ctxt
)
354 kctxt_ngroups
= (dd
->rcv_entries
.nctxt_extra
-
355 (dd
->num_rcv_contexts
- dd
->first_dyn_alloc_ctxt
));
356 rcd
= kzalloc_node(sizeof(*rcd
), GFP_KERNEL
, numa
);
358 u32 rcvtids
, max_entries
;
362 ret
= allocate_rcd_index(dd
, rcd
, &ctxt
);
369 INIT_LIST_HEAD(&rcd
->qp_wait_list
);
370 hfi1_exp_tid_group_init(rcd
);
374 rcd
->rcv_array_groups
= dd
->rcv_entries
.ngroups
;
375 rcd
->rhf_rcv_function_map
= normal_rhf_rcv_functions
;
377 mutex_init(&rcd
->exp_mutex
);
378 spin_lock_init(&rcd
->exp_lock
);
379 INIT_LIST_HEAD(&rcd
->flow_queue
.queue_head
);
380 INIT_LIST_HEAD(&rcd
->rarr_queue
.queue_head
);
382 hfi1_cdbg(PROC
, "setting up context %u\n", rcd
->ctxt
);
385 * Calculate the context's RcvArray entry starting point.
386 * We do this here because we have to take into account all
387 * the RcvArray entries that previous context would have
388 * taken and we have to account for any extra groups assigned
389 * to the static (kernel) or dynamic (vnic/user) contexts.
391 if (ctxt
< dd
->first_dyn_alloc_ctxt
) {
392 if (ctxt
< kctxt_ngroups
) {
393 base
= ctxt
* (dd
->rcv_entries
.ngroups
+ 1);
394 rcd
->rcv_array_groups
++;
396 base
= kctxt_ngroups
+
397 (ctxt
* dd
->rcv_entries
.ngroups
);
400 u16 ct
= ctxt
- dd
->first_dyn_alloc_ctxt
;
402 base
= ((dd
->n_krcv_queues
* dd
->rcv_entries
.ngroups
) +
404 if (ct
< dd
->rcv_entries
.nctxt_extra
) {
405 base
+= ct
* (dd
->rcv_entries
.ngroups
+ 1);
406 rcd
->rcv_array_groups
++;
408 base
+= dd
->rcv_entries
.nctxt_extra
+
409 (ct
* dd
->rcv_entries
.ngroups
);
412 rcd
->eager_base
= base
* dd
->rcv_entries
.group_size
;
414 rcd
->rcvhdrq_cnt
= rcvhdrcnt
;
415 rcd
->rcvhdrqentsize
= hfi1_hdrq_entsize
;
417 rcd
->rcvhdrqentsize
- sizeof(u64
) / sizeof(u32
);
419 * Simple Eager buffer allocation: we have already pre-allocated
420 * the number of RcvArray entry groups. Each ctxtdata structure
421 * holds the number of groups for that context.
423 * To follow CSR requirements and maintain cacheline alignment,
424 * make sure all sizes and bases are multiples of group_size.
426 * The expected entry count is what is left after assigning
429 max_entries
= rcd
->rcv_array_groups
*
430 dd
->rcv_entries
.group_size
;
431 rcvtids
= ((max_entries
* hfi1_rcvarr_split
) / 100);
432 rcd
->egrbufs
.count
= round_down(rcvtids
,
433 dd
->rcv_entries
.group_size
);
434 if (rcd
->egrbufs
.count
> MAX_EAGER_ENTRIES
) {
435 dd_dev_err(dd
, "ctxt%u: requested too many RcvArray entries.\n",
437 rcd
->egrbufs
.count
= MAX_EAGER_ENTRIES
;
440 "ctxt%u: max Eager buffer RcvArray entries: %u\n",
441 rcd
->ctxt
, rcd
->egrbufs
.count
);
444 * Allocate array that will hold the eager buffer accounting
446 * This will allocate the maximum possible buffer count based
447 * on the value of the RcvArray split parameter.
448 * The resulting value will be rounded down to the closest
449 * multiple of dd->rcv_entries.group_size.
451 rcd
->egrbufs
.buffers
=
452 kcalloc_node(rcd
->egrbufs
.count
,
453 sizeof(*rcd
->egrbufs
.buffers
),
455 if (!rcd
->egrbufs
.buffers
)
457 rcd
->egrbufs
.rcvtids
=
458 kcalloc_node(rcd
->egrbufs
.count
,
459 sizeof(*rcd
->egrbufs
.rcvtids
),
461 if (!rcd
->egrbufs
.rcvtids
)
463 rcd
->egrbufs
.size
= eager_buffer_size
;
465 * The size of the buffers programmed into the RcvArray
466 * entries needs to be big enough to handle the highest
469 if (rcd
->egrbufs
.size
< hfi1_max_mtu
) {
470 rcd
->egrbufs
.size
= __roundup_pow_of_two(hfi1_max_mtu
);
472 "ctxt%u: eager bufs size too small. Adjusting to %zu\n",
473 rcd
->ctxt
, rcd
->egrbufs
.size
);
475 rcd
->egrbufs
.rcvtid_size
= HFI1_MAX_EAGER_BUFFER_SIZE
;
477 /* Applicable only for statically created kernel contexts */
478 if (ctxt
< dd
->first_dyn_alloc_ctxt
) {
479 rcd
->opstats
= kzalloc_node(sizeof(*rcd
->opstats
),
484 /* Initialize TID flow generations for the context */
485 hfi1_kern_init_ctxt_generations(rcd
);
500 * @rcd: pointer to an initialized rcd data structure
502 * This wrapper is the free function that matches hfi1_create_ctxtdata().
503 * When a context is done being used (kernel or user), this function is called
504 * for the "final" put to match the kref init from hf1i_create_ctxtdata().
505 * Other users of the context do a get/put sequence to make sure that the
506 * structure isn't removed while in use.
508 void hfi1_free_ctxt(struct hfi1_ctxtdata
*rcd
)
514 * Convert a receive header entry size that to the encoding used in the CSR.
516 * Return a zero if the given size is invalid.
518 static inline u64
encode_rcv_header_entry_size(u16 size
)
520 /* there are only 3 valid receive header entry sizes */
527 return 0; /* invalid */
531 * Select the largest ccti value over all SLs to determine the intra-
532 * packet gap for the link.
534 * called with cca_timer_lock held (to protect access to cca_timer
535 * array), and rcu_read_lock() (to protect access to cc_state).
537 void set_link_ipg(struct hfi1_pportdata
*ppd
)
539 struct hfi1_devdata
*dd
= ppd
->dd
;
540 struct cc_state
*cc_state
;
542 u16 cce
, ccti_limit
, max_ccti
= 0;
545 u32 current_egress_rate
; /* Mbits /sec */
548 * max_pkt_time is the maximum packet egress time in units
549 * of the fabric clock period 1/(805 MHz).
552 cc_state
= get_cc_state(ppd
);
556 * This should _never_ happen - rcu_read_lock() is held,
557 * and set_link_ipg() should not be called if cc_state
562 for (i
= 0; i
< OPA_MAX_SLS
; i
++) {
563 u16 ccti
= ppd
->cca_timer
[i
].ccti
;
569 ccti_limit
= cc_state
->cct
.ccti_limit
;
570 if (max_ccti
> ccti_limit
)
571 max_ccti
= ccti_limit
;
573 cce
= cc_state
->cct
.entries
[max_ccti
].entry
;
574 shift
= (cce
& 0xc000) >> 14;
575 mult
= (cce
& 0x3fff);
577 current_egress_rate
= active_egress_rate(ppd
);
579 max_pkt_time
= egress_cycles(ppd
->ibmaxlen
, current_egress_rate
);
581 src
= (max_pkt_time
>> shift
) * mult
;
583 src
&= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK
;
584 src
<<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT
;
586 write_csr(dd
, SEND_STATIC_RATE_CONTROL
, src
);
589 static enum hrtimer_restart
cca_timer_fn(struct hrtimer
*t
)
591 struct cca_timer
*cca_timer
;
592 struct hfi1_pportdata
*ppd
;
594 u16 ccti_timer
, ccti_min
;
595 struct cc_state
*cc_state
;
597 enum hrtimer_restart ret
= HRTIMER_NORESTART
;
599 cca_timer
= container_of(t
, struct cca_timer
, hrtimer
);
600 ppd
= cca_timer
->ppd
;
605 cc_state
= get_cc_state(ppd
);
609 return HRTIMER_NORESTART
;
613 * 1) decrement ccti for SL
614 * 2) calculate IPG for link (set_link_ipg())
615 * 3) restart timer, unless ccti is at min value
618 ccti_min
= cc_state
->cong_setting
.entries
[sl
].ccti_min
;
619 ccti_timer
= cc_state
->cong_setting
.entries
[sl
].ccti_timer
;
621 spin_lock_irqsave(&ppd
->cca_timer_lock
, flags
);
623 if (cca_timer
->ccti
> ccti_min
) {
628 if (cca_timer
->ccti
> ccti_min
) {
629 unsigned long nsec
= 1024 * ccti_timer
;
630 /* ccti_timer is in units of 1.024 usec */
631 hrtimer_forward_now(t
, ns_to_ktime(nsec
));
632 ret
= HRTIMER_RESTART
;
635 spin_unlock_irqrestore(&ppd
->cca_timer_lock
, flags
);
641 * Common code for initializing the physical port structure.
643 void hfi1_init_pportdata(struct pci_dev
*pdev
, struct hfi1_pportdata
*ppd
,
644 struct hfi1_devdata
*dd
, u8 hw_pidx
, u8 port
)
647 uint default_pkey_idx
;
648 struct cc_state
*cc_state
;
651 ppd
->hw_pidx
= hw_pidx
;
652 ppd
->port
= port
; /* IB port number, not index */
653 ppd
->prev_link_width
= LINK_WIDTH_DEFAULT
;
655 * There are C_VL_COUNT number of PortVLXmitWait counters.
656 * Adding 1 to C_VL_COUNT to include the PortXmitWait counter.
658 for (i
= 0; i
< C_VL_COUNT
+ 1; i
++) {
659 ppd
->port_vl_xmit_wait_last
[i
] = 0;
660 ppd
->vl_xmit_flit_cnt
[i
] = 0;
663 default_pkey_idx
= 1;
665 ppd
->pkeys
[default_pkey_idx
] = DEFAULT_P_KEY
;
666 ppd
->part_enforce
|= HFI1_PART_ENFORCE_IN
;
669 dd_dev_err(dd
, "Faking data partition 0x8001 in idx %u\n",
671 ppd
->pkeys
[!default_pkey_idx
] = 0x8001;
674 INIT_WORK(&ppd
->link_vc_work
, handle_verify_cap
);
675 INIT_WORK(&ppd
->link_up_work
, handle_link_up
);
676 INIT_WORK(&ppd
->link_down_work
, handle_link_down
);
677 INIT_WORK(&ppd
->freeze_work
, handle_freeze
);
678 INIT_WORK(&ppd
->link_downgrade_work
, handle_link_downgrade
);
679 INIT_WORK(&ppd
->sma_message_work
, handle_sma_message
);
680 INIT_WORK(&ppd
->link_bounce_work
, handle_link_bounce
);
681 INIT_DELAYED_WORK(&ppd
->start_link_work
, handle_start_link
);
682 INIT_WORK(&ppd
->linkstate_active_work
, receive_interrupt_work
);
683 INIT_WORK(&ppd
->qsfp_info
.qsfp_work
, qsfp_event
);
685 mutex_init(&ppd
->hls_lock
);
686 spin_lock_init(&ppd
->qsfp_info
.qsfp_lock
);
688 ppd
->qsfp_info
.ppd
= ppd
;
689 ppd
->sm_trap_qp
= 0x0;
694 spin_lock_init(&ppd
->cca_timer_lock
);
696 for (i
= 0; i
< OPA_MAX_SLS
; i
++) {
697 hrtimer_init(&ppd
->cca_timer
[i
].hrtimer
, CLOCK_MONOTONIC
,
699 ppd
->cca_timer
[i
].ppd
= ppd
;
700 ppd
->cca_timer
[i
].sl
= i
;
701 ppd
->cca_timer
[i
].ccti
= 0;
702 ppd
->cca_timer
[i
].hrtimer
.function
= cca_timer_fn
;
705 ppd
->cc_max_table_entries
= IB_CC_TABLE_CAP_DEFAULT
;
707 spin_lock_init(&ppd
->cc_state_lock
);
708 spin_lock_init(&ppd
->cc_log_lock
);
709 cc_state
= kzalloc(sizeof(*cc_state
), GFP_KERNEL
);
710 RCU_INIT_POINTER(ppd
->cc_state
, cc_state
);
716 dd_dev_err(dd
, "Congestion Control Agent disabled for port %d\n", port
);
720 * Do initialization for device that is only needed on
721 * first detect, not on resets.
723 static int loadtime_init(struct hfi1_devdata
*dd
)
729 * init_after_reset - re-initialize after a reset
730 * @dd: the hfi1_ib device
732 * sanity check at least some of the values after reset, and
733 * ensure no receive or transmit (explicitly, in case reset
736 static int init_after_reset(struct hfi1_devdata
*dd
)
739 struct hfi1_ctxtdata
*rcd
;
741 * Ensure chip does no sends or receives, tail updates, or
742 * pioavail updates while we re-initialize. This is mostly
743 * for the driver data structures, not chip registers.
745 for (i
= 0; i
< dd
->num_rcv_contexts
; i
++) {
746 rcd
= hfi1_rcd_get_by_index(dd
, i
);
747 hfi1_rcvctrl(dd
, HFI1_RCVCTRL_CTXT_DIS
|
748 HFI1_RCVCTRL_INTRAVAIL_DIS
|
749 HFI1_RCVCTRL_TAILUPD_DIS
, rcd
);
752 pio_send_control(dd
, PSC_GLOBAL_DISABLE
);
753 for (i
= 0; i
< dd
->num_send_contexts
; i
++)
754 sc_disable(dd
->send_contexts
[i
].sc
);
759 static void enable_chip(struct hfi1_devdata
*dd
)
761 struct hfi1_ctxtdata
*rcd
;
765 /* enable PIO send */
766 pio_send_control(dd
, PSC_GLOBAL_ENABLE
);
769 * Enable kernel ctxts' receive and receive interrupt.
770 * Other ctxts done as user opens and initializes them.
772 for (i
= 0; i
< dd
->first_dyn_alloc_ctxt
; ++i
) {
773 rcd
= hfi1_rcd_get_by_index(dd
, i
);
776 rcvmask
= HFI1_RCVCTRL_CTXT_ENB
| HFI1_RCVCTRL_INTRAVAIL_ENB
;
777 rcvmask
|= HFI1_CAP_KGET_MASK(rcd
->flags
, DMA_RTAIL
) ?
778 HFI1_RCVCTRL_TAILUPD_ENB
: HFI1_RCVCTRL_TAILUPD_DIS
;
779 if (!HFI1_CAP_KGET_MASK(rcd
->flags
, MULTI_PKT_EGR
))
780 rcvmask
|= HFI1_RCVCTRL_ONE_PKT_EGR_ENB
;
781 if (HFI1_CAP_KGET_MASK(rcd
->flags
, NODROP_RHQ_FULL
))
782 rcvmask
|= HFI1_RCVCTRL_NO_RHQ_DROP_ENB
;
783 if (HFI1_CAP_KGET_MASK(rcd
->flags
, NODROP_EGR_FULL
))
784 rcvmask
|= HFI1_RCVCTRL_NO_EGR_DROP_ENB
;
785 if (HFI1_CAP_IS_KSET(TID_RDMA
))
786 rcvmask
|= HFI1_RCVCTRL_TIDFLOW_ENB
;
787 hfi1_rcvctrl(dd
, rcvmask
, rcd
);
794 * create_workqueues - create per port workqueues
795 * @dd: the hfi1_ib device
797 static int create_workqueues(struct hfi1_devdata
*dd
)
800 struct hfi1_pportdata
*ppd
;
802 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
803 ppd
= dd
->pport
+ pidx
;
808 WQ_SYSFS
| WQ_HIGHPRI
| WQ_CPU_INTENSIVE
,
809 HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES
,
816 * Make the link workqueue single-threaded to enforce
822 WQ_SYSFS
| WQ_MEM_RECLAIM
| WQ_UNBOUND
,
831 pr_err("alloc_workqueue failed for port %d\n", pidx
+ 1);
832 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
833 ppd
= dd
->pport
+ pidx
;
835 destroy_workqueue(ppd
->hfi1_wq
);
839 destroy_workqueue(ppd
->link_wq
);
847 * enable_general_intr() - Enable the IRQs that will be handled by the
848 * general interrupt handler.
852 static void enable_general_intr(struct hfi1_devdata
*dd
)
854 set_intr_bits(dd
, CCE_ERR_INT
, MISC_ERR_INT
, true);
855 set_intr_bits(dd
, PIO_ERR_INT
, TXE_ERR_INT
, true);
856 set_intr_bits(dd
, IS_SENDCTXT_ERR_START
, IS_SENDCTXT_ERR_END
, true);
857 set_intr_bits(dd
, PBC_INT
, GPIO_ASSERT_INT
, true);
858 set_intr_bits(dd
, TCRIT_INT
, TCRIT_INT
, true);
859 set_intr_bits(dd
, IS_DC_START
, IS_DC_END
, true);
860 set_intr_bits(dd
, IS_SENDCREDIT_START
, IS_SENDCREDIT_END
, true);
864 * hfi1_init - do the actual initialization sequence on the chip
865 * @dd: the hfi1_ib device
866 * @reinit: re-initializing, so don't allocate new memory
868 * Do the actual initialization sequence on the chip. This is done
869 * both from the init routine called from the PCI infrastructure, and
870 * when we reset the chip, or detect that it was reset internally,
871 * or it's administratively re-enabled.
873 * Memory allocation here and in called routines is only done in
874 * the first case (reinit == 0). We have to be careful, because even
875 * without memory allocation, we need to re-write all the chip registers
876 * TIDs, etc. after the reset or enable has completed.
878 int hfi1_init(struct hfi1_devdata
*dd
, int reinit
)
880 int ret
= 0, pidx
, lastfail
= 0;
883 struct hfi1_ctxtdata
*rcd
;
884 struct hfi1_pportdata
*ppd
;
886 /* Set up send low level handlers */
887 dd
->process_pio_send
= hfi1_verbs_send_pio
;
888 dd
->process_dma_send
= hfi1_verbs_send_dma
;
889 dd
->pio_inline_send
= pio_copy
;
890 dd
->process_vnic_dma_send
= hfi1_vnic_send_dma
;
893 atomic_set(&dd
->drop_packet
, DROP_PACKET_ON
);
896 atomic_set(&dd
->drop_packet
, DROP_PACKET_OFF
);
900 /* make sure the link is not "up" */
901 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
902 ppd
= dd
->pport
+ pidx
;
907 ret
= init_after_reset(dd
);
909 ret
= loadtime_init(dd
);
913 /* allocate dummy tail memory for all receive contexts */
914 dd
->rcvhdrtail_dummy_kvaddr
= dma_alloc_coherent(&dd
->pcidev
->dev
,
916 &dd
->rcvhdrtail_dummy_dma
,
919 if (!dd
->rcvhdrtail_dummy_kvaddr
) {
920 dd_dev_err(dd
, "cannot allocate dummy tail memory\n");
925 /* dd->rcd can be NULL if early initialization failed */
926 for (i
= 0; dd
->rcd
&& i
< dd
->first_dyn_alloc_ctxt
; ++i
) {
928 * Set up the (kernel) rcvhdr queue and egr TIDs. If doing
929 * re-init, the simplest way to handle this is to free
930 * existing, and re-allocate.
931 * Need to re-create rest of ctxt 0 ctxtdata as well.
933 rcd
= hfi1_rcd_get_by_index(dd
, i
);
937 rcd
->do_interrupt
= &handle_receive_interrupt
;
939 lastfail
= hfi1_create_rcvhdrq(dd
, rcd
);
941 lastfail
= hfi1_setup_eagerbufs(rcd
);
943 lastfail
= hfi1_kern_exp_rcv_init(rcd
, reinit
);
946 "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
953 /* Allocate enough memory for user event notification. */
954 len
= PAGE_ALIGN(chip_rcv_contexts(dd
) * HFI1_MAX_SHARED_CTXTS
*
955 sizeof(*dd
->events
));
956 dd
->events
= vmalloc_user(len
);
958 dd_dev_err(dd
, "Failed to allocate user events page\n");
960 * Allocate a page for device and port status.
961 * Page will be shared amongst all user processes.
963 dd
->status
= vmalloc_user(PAGE_SIZE
);
965 dd_dev_err(dd
, "Failed to allocate dev status page\n");
966 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
967 ppd
= dd
->pport
+ pidx
;
969 /* Currently, we only have one port */
970 ppd
->statusp
= &dd
->status
->port
;
975 /* enable chip even if we have an error, so we can debug cause */
980 * Set status even if port serdes is not initialized
981 * so that diags will work.
984 dd
->status
->dev
|= HFI1_STATUS_CHIP_PRESENT
|
987 /* enable all interrupts from the chip */
988 enable_general_intr(dd
);
991 /* chip is OK for user apps; mark it as initialized */
992 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
993 ppd
= dd
->pport
+ pidx
;
996 * start the serdes - must be after interrupts are
997 * enabled so we are notified when the link goes up
999 lastfail
= bringup_serdes(ppd
);
1002 "Failed to bring up port %u\n",
1006 * Set status even if port serdes is not initialized
1007 * so that diags will work.
1010 *ppd
->statusp
|= HFI1_STATUS_CHIP_PRESENT
|
1011 HFI1_STATUS_INITTED
;
1012 if (!ppd
->link_speed_enabled
)
1017 /* if ret is non-zero, we probably should do some cleanup here... */
1021 static inline struct hfi1_devdata
*__hfi1_lookup(int unit
)
1023 return idr_find(&hfi1_unit_table
, unit
);
1026 struct hfi1_devdata
*hfi1_lookup(int unit
)
1028 struct hfi1_devdata
*dd
;
1029 unsigned long flags
;
1031 spin_lock_irqsave(&hfi1_devs_lock
, flags
);
1032 dd
= __hfi1_lookup(unit
);
1033 spin_unlock_irqrestore(&hfi1_devs_lock
, flags
);
1039 * Stop the timers during unit shutdown, or after an error late
1040 * in initialization.
1042 static void stop_timers(struct hfi1_devdata
*dd
)
1044 struct hfi1_pportdata
*ppd
;
1047 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
1048 ppd
= dd
->pport
+ pidx
;
1049 if (ppd
->led_override_timer
.function
) {
1050 del_timer_sync(&ppd
->led_override_timer
);
1051 atomic_set(&ppd
->led_override_timer_active
, 0);
1057 * shutdown_device - shut down a device
1058 * @dd: the hfi1_ib device
1060 * This is called to make the device quiet when we are about to
1061 * unload the driver, and also when the device is administratively
1062 * disabled. It does not free any data structures.
1063 * Everything it does has to be setup again by hfi1_init(dd, 1)
1065 static void shutdown_device(struct hfi1_devdata
*dd
)
1067 struct hfi1_pportdata
*ppd
;
1068 struct hfi1_ctxtdata
*rcd
;
1072 if (dd
->flags
& HFI1_SHUTDOWN
)
1074 dd
->flags
|= HFI1_SHUTDOWN
;
1076 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
1077 ppd
= dd
->pport
+ pidx
;
1081 *ppd
->statusp
&= ~(HFI1_STATUS_IB_CONF
|
1082 HFI1_STATUS_IB_READY
);
1084 dd
->flags
&= ~HFI1_INITTED
;
1086 /* mask and clean up interrupts */
1087 set_intr_bits(dd
, IS_FIRST_SOURCE
, IS_LAST_SOURCE
, false);
1088 msix_clean_up_interrupts(dd
);
1090 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
1091 ppd
= dd
->pport
+ pidx
;
1092 for (i
= 0; i
< dd
->num_rcv_contexts
; i
++) {
1093 rcd
= hfi1_rcd_get_by_index(dd
, i
);
1094 hfi1_rcvctrl(dd
, HFI1_RCVCTRL_TAILUPD_DIS
|
1095 HFI1_RCVCTRL_CTXT_DIS
|
1096 HFI1_RCVCTRL_INTRAVAIL_DIS
|
1097 HFI1_RCVCTRL_PKEY_DIS
|
1098 HFI1_RCVCTRL_ONE_PKT_EGR_DIS
, rcd
);
1102 * Gracefully stop all sends allowing any in progress to
1103 * trickle out first.
1105 for (i
= 0; i
< dd
->num_send_contexts
; i
++)
1106 sc_flush(dd
->send_contexts
[i
].sc
);
1110 * Enough for anything that's going to trickle out to have actually
1115 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
1116 ppd
= dd
->pport
+ pidx
;
1118 /* disable all contexts */
1119 for (i
= 0; i
< dd
->num_send_contexts
; i
++)
1120 sc_disable(dd
->send_contexts
[i
].sc
);
1121 /* disable the send device */
1122 pio_send_control(dd
, PSC_GLOBAL_DISABLE
);
1124 shutdown_led_override(ppd
);
1127 * Clear SerdesEnable.
1128 * We can't count on interrupts since we are stopping.
1130 hfi1_quiet_serdes(ppd
);
1133 destroy_workqueue(ppd
->hfi1_wq
);
1134 ppd
->hfi1_wq
= NULL
;
1137 destroy_workqueue(ppd
->link_wq
);
1138 ppd
->link_wq
= NULL
;
1145 * hfi1_free_ctxtdata - free a context's allocated data
1146 * @dd: the hfi1_ib device
1147 * @rcd: the ctxtdata structure
1149 * free up any allocated data for a context
1150 * It should never change any chip state, or global driver state.
1152 void hfi1_free_ctxtdata(struct hfi1_devdata
*dd
, struct hfi1_ctxtdata
*rcd
)
1160 dma_free_coherent(&dd
->pcidev
->dev
, rcvhdrq_size(rcd
),
1161 rcd
->rcvhdrq
, rcd
->rcvhdrq_dma
);
1162 rcd
->rcvhdrq
= NULL
;
1163 if (rcd
->rcvhdrtail_kvaddr
) {
1164 dma_free_coherent(&dd
->pcidev
->dev
, PAGE_SIZE
,
1165 (void *)rcd
->rcvhdrtail_kvaddr
,
1166 rcd
->rcvhdrqtailaddr_dma
);
1167 rcd
->rcvhdrtail_kvaddr
= NULL
;
1171 /* all the RcvArray entries should have been cleared by now */
1172 kfree(rcd
->egrbufs
.rcvtids
);
1173 rcd
->egrbufs
.rcvtids
= NULL
;
1175 for (e
= 0; e
< rcd
->egrbufs
.alloced
; e
++) {
1176 if (rcd
->egrbufs
.buffers
[e
].dma
)
1177 dma_free_coherent(&dd
->pcidev
->dev
,
1178 rcd
->egrbufs
.buffers
[e
].len
,
1179 rcd
->egrbufs
.buffers
[e
].addr
,
1180 rcd
->egrbufs
.buffers
[e
].dma
);
1182 kfree(rcd
->egrbufs
.buffers
);
1183 rcd
->egrbufs
.alloced
= 0;
1184 rcd
->egrbufs
.buffers
= NULL
;
1189 vfree(rcd
->subctxt_uregbase
);
1190 vfree(rcd
->subctxt_rcvegrbuf
);
1191 vfree(rcd
->subctxt_rcvhdr_base
);
1192 kfree(rcd
->opstats
);
1194 rcd
->subctxt_uregbase
= NULL
;
1195 rcd
->subctxt_rcvegrbuf
= NULL
;
1196 rcd
->subctxt_rcvhdr_base
= NULL
;
1197 rcd
->opstats
= NULL
;
1201 * Release our hold on the shared asic data. If we are the last one,
1202 * return the structure to be finalized outside the lock. Must be
1203 * holding hfi1_devs_lock.
1205 static struct hfi1_asic_data
*release_asic_data(struct hfi1_devdata
*dd
)
1207 struct hfi1_asic_data
*ad
;
1212 dd
->asic_data
->dds
[dd
->hfi1_id
] = NULL
;
1213 other
= dd
->hfi1_id
? 0 : 1;
1215 dd
->asic_data
= NULL
;
1216 /* return NULL if the other dd still has a link */
1217 return ad
->dds
[other
] ? NULL
: ad
;
1220 static void finalize_asic_data(struct hfi1_devdata
*dd
,
1221 struct hfi1_asic_data
*ad
)
1223 clean_up_i2c(dd
, ad
);
1228 * hfi1_clean_devdata - cleans up per-unit data structure
1229 * @dd: pointer to a valid devdata structure
1231 * It cleans up all data structures set up by
1232 * by hfi1_alloc_devdata().
1234 static void hfi1_clean_devdata(struct hfi1_devdata
*dd
)
1236 struct hfi1_asic_data
*ad
;
1237 unsigned long flags
;
1239 spin_lock_irqsave(&hfi1_devs_lock
, flags
);
1240 if (!list_empty(&dd
->list
)) {
1241 idr_remove(&hfi1_unit_table
, dd
->unit
);
1242 list_del_init(&dd
->list
);
1244 ad
= release_asic_data(dd
);
1245 spin_unlock_irqrestore(&hfi1_devs_lock
, flags
);
1247 finalize_asic_data(dd
, ad
);
1248 free_platform_config(dd
);
1249 rcu_barrier(); /* wait for rcu callbacks to complete */
1250 free_percpu(dd
->int_counter
);
1251 free_percpu(dd
->rcv_limit
);
1252 free_percpu(dd
->send_schedule
);
1253 free_percpu(dd
->tx_opstats
);
1254 dd
->int_counter
= NULL
;
1255 dd
->rcv_limit
= NULL
;
1256 dd
->send_schedule
= NULL
;
1257 dd
->tx_opstats
= NULL
;
1258 kfree(dd
->comp_vect
);
1259 dd
->comp_vect
= NULL
;
1260 sdma_clean(dd
, dd
->num_sdma
);
1261 rvt_dealloc_device(&dd
->verbs_dev
.rdi
);
1264 static void __hfi1_free_devdata(struct kobject
*kobj
)
1266 struct hfi1_devdata
*dd
=
1267 container_of(kobj
, struct hfi1_devdata
, kobj
);
1269 hfi1_clean_devdata(dd
);
1272 static struct kobj_type hfi1_devdata_type
= {
1273 .release
= __hfi1_free_devdata
,
1276 void hfi1_free_devdata(struct hfi1_devdata
*dd
)
1278 kobject_put(&dd
->kobj
);
1282 * hfi1_alloc_devdata - Allocate our primary per-unit data structure.
1283 * @pdev: Valid PCI device
1284 * @extra: How many bytes to alloc past the default
1286 * Must be done via verbs allocator, because the verbs cleanup process
1287 * both does cleanup and free of the data structure.
1288 * "extra" is for chip-specific data.
1290 * Use the idr mechanism to get a unit number for this unit.
1292 static struct hfi1_devdata
*hfi1_alloc_devdata(struct pci_dev
*pdev
,
1295 unsigned long flags
;
1296 struct hfi1_devdata
*dd
;
1299 /* extra is * number of ports */
1300 nports
= extra
/ sizeof(struct hfi1_pportdata
);
1302 dd
= (struct hfi1_devdata
*)rvt_alloc_device(sizeof(*dd
) + extra
,
1305 return ERR_PTR(-ENOMEM
);
1306 dd
->num_pports
= nports
;
1307 dd
->pport
= (struct hfi1_pportdata
*)(dd
+ 1);
1309 pci_set_drvdata(pdev
, dd
);
1311 INIT_LIST_HEAD(&dd
->list
);
1312 idr_preload(GFP_KERNEL
);
1313 spin_lock_irqsave(&hfi1_devs_lock
, flags
);
1315 ret
= idr_alloc(&hfi1_unit_table
, dd
, 0, 0, GFP_NOWAIT
);
1318 list_add(&dd
->list
, &hfi1_dev_list
);
1320 dd
->node
= NUMA_NO_NODE
;
1322 spin_unlock_irqrestore(&hfi1_devs_lock
, flags
);
1327 "Could not allocate unit ID: error %d\n", -ret
);
1330 rvt_set_ibdev_name(&dd
->verbs_dev
.rdi
, "%s_%d", class_name(), dd
->unit
);
1333 * Initialize all locks for the device. This needs to be as early as
1334 * possible so locks are usable.
1336 spin_lock_init(&dd
->sc_lock
);
1337 spin_lock_init(&dd
->sendctrl_lock
);
1338 spin_lock_init(&dd
->rcvctrl_lock
);
1339 spin_lock_init(&dd
->uctxt_lock
);
1340 spin_lock_init(&dd
->hfi1_diag_trans_lock
);
1341 spin_lock_init(&dd
->sc_init_lock
);
1342 spin_lock_init(&dd
->dc8051_memlock
);
1343 seqlock_init(&dd
->sc2vl_lock
);
1344 spin_lock_init(&dd
->sde_map_lock
);
1345 spin_lock_init(&dd
->pio_map_lock
);
1346 mutex_init(&dd
->dc8051_lock
);
1347 init_waitqueue_head(&dd
->event_queue
);
1348 spin_lock_init(&dd
->irq_src_lock
);
1350 dd
->int_counter
= alloc_percpu(u64
);
1351 if (!dd
->int_counter
) {
1356 dd
->rcv_limit
= alloc_percpu(u64
);
1357 if (!dd
->rcv_limit
) {
1362 dd
->send_schedule
= alloc_percpu(u64
);
1363 if (!dd
->send_schedule
) {
1368 dd
->tx_opstats
= alloc_percpu(struct hfi1_opcode_stats_perctx
);
1369 if (!dd
->tx_opstats
) {
1374 dd
->comp_vect
= kzalloc(sizeof(*dd
->comp_vect
), GFP_KERNEL
);
1375 if (!dd
->comp_vect
) {
1380 kobject_init(&dd
->kobj
, &hfi1_devdata_type
);
1384 hfi1_clean_devdata(dd
);
1385 return ERR_PTR(ret
);
1389 * Called from freeze mode handlers, and from PCI error
1390 * reporting code. Should be paranoid about state of
1391 * system and data structures.
1393 void hfi1_disable_after_error(struct hfi1_devdata
*dd
)
1395 if (dd
->flags
& HFI1_INITTED
) {
1398 dd
->flags
&= ~HFI1_INITTED
;
1400 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
1401 struct hfi1_pportdata
*ppd
;
1403 ppd
= dd
->pport
+ pidx
;
1404 if (dd
->flags
& HFI1_PRESENT
)
1405 set_link_state(ppd
, HLS_DN_DISABLE
);
1408 *ppd
->statusp
&= ~HFI1_STATUS_IB_READY
;
1413 * Mark as having had an error for driver, and also
1414 * for /sys and status word mapped to user programs.
1415 * This marks unit as not usable, until reset.
1418 dd
->status
->dev
|= HFI1_STATUS_HWERROR
;
1421 static void remove_one(struct pci_dev
*);
1422 static int init_one(struct pci_dev
*, const struct pci_device_id
*);
1423 static void shutdown_one(struct pci_dev
*);
1425 #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
1426 #define PFX DRIVER_NAME ": "
1428 const struct pci_device_id hfi1_pci_tbl
[] = {
1429 { PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL0
) },
1430 { PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL1
) },
1434 MODULE_DEVICE_TABLE(pci
, hfi1_pci_tbl
);
1436 static struct pci_driver hfi1_pci_driver
= {
1437 .name
= DRIVER_NAME
,
1439 .remove
= remove_one
,
1440 .shutdown
= shutdown_one
,
1441 .id_table
= hfi1_pci_tbl
,
1442 .err_handler
= &hfi1_pci_err_handler
,
1445 static void __init
compute_krcvqs(void)
1449 for (i
= 0; i
< krcvqsset
; i
++)
1450 n_krcvqs
+= krcvqs
[i
];
1454 * Do all the generic driver unit- and chip-independent memory
1455 * allocation and initialization.
1457 static int __init
hfi1_mod_init(void)
1465 ret
= node_affinity_init();
1469 /* validate max MTU before any devices start */
1470 if (!valid_opa_max_mtu(hfi1_max_mtu
)) {
1471 pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n",
1472 hfi1_max_mtu
, HFI1_DEFAULT_MAX_MTU
);
1473 hfi1_max_mtu
= HFI1_DEFAULT_MAX_MTU
;
1475 /* valid CUs run from 1-128 in powers of 2 */
1476 if (hfi1_cu
> 128 || !is_power_of_2(hfi1_cu
))
1478 /* valid credit return threshold is 0-100, variable is unsigned */
1479 if (user_credit_return_threshold
> 100)
1480 user_credit_return_threshold
= 100;
1484 * sanitize receive interrupt count, time must wait until after
1485 * the hardware type is known
1487 if (rcv_intr_count
> RCV_HDR_HEAD_COUNTER_MASK
)
1488 rcv_intr_count
= RCV_HDR_HEAD_COUNTER_MASK
;
1489 /* reject invalid combinations */
1490 if (rcv_intr_count
== 0 && rcv_intr_timeout
== 0) {
1491 pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n");
1494 if (rcv_intr_count
> 1 && rcv_intr_timeout
== 0) {
1496 * Avoid indefinite packet delivery by requiring a timeout
1499 pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n");
1500 rcv_intr_timeout
= 1;
1502 if (rcv_intr_dynamic
&& !(rcv_intr_count
> 1 && rcv_intr_timeout
> 0)) {
1504 * The dynamic algorithm expects a non-zero timeout
1507 pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n");
1508 rcv_intr_dynamic
= 0;
1511 /* sanitize link CRC options */
1512 link_crc_mask
&= SUPPORTED_CRCS
;
1516 pr_err("Failed to allocate opfn_wq");
1520 hfi1_compute_tid_rdma_flow_wt();
1522 * These must be called before the driver is registered with
1523 * the PCI subsystem.
1525 idr_init(&hfi1_unit_table
);
1528 ret
= pci_register_driver(&hfi1_pci_driver
);
1530 pr_err("Unable to register driver: error %d\n", -ret
);
1533 goto bail
; /* all OK */
1537 idr_destroy(&hfi1_unit_table
);
1543 module_init(hfi1_mod_init
);
1546 * Do the non-unit driver cleanup, memory free, etc. at unload.
1548 static void __exit
hfi1_mod_cleanup(void)
1550 pci_unregister_driver(&hfi1_pci_driver
);
1552 node_affinity_destroy_all();
1555 idr_destroy(&hfi1_unit_table
);
1556 dispose_firmware(); /* asymmetric with obtain_firmware() */
1560 module_exit(hfi1_mod_cleanup
);
1562 /* this can only be called after a successful initialization */
1563 static void cleanup_device_data(struct hfi1_devdata
*dd
)
1568 /* users can't do anything more with chip */
1569 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
1570 struct hfi1_pportdata
*ppd
= &dd
->pport
[pidx
];
1571 struct cc_state
*cc_state
;
1575 *ppd
->statusp
&= ~HFI1_STATUS_CHIP_PRESENT
;
1577 for (i
= 0; i
< OPA_MAX_SLS
; i
++)
1578 hrtimer_cancel(&ppd
->cca_timer
[i
].hrtimer
);
1580 spin_lock(&ppd
->cc_state_lock
);
1581 cc_state
= get_cc_state_protected(ppd
);
1582 RCU_INIT_POINTER(ppd
->cc_state
, NULL
);
1583 spin_unlock(&ppd
->cc_state_lock
);
1586 kfree_rcu(cc_state
, rcu
);
1589 free_credit_return(dd
);
1591 if (dd
->rcvhdrtail_dummy_kvaddr
) {
1592 dma_free_coherent(&dd
->pcidev
->dev
, sizeof(u64
),
1593 (void *)dd
->rcvhdrtail_dummy_kvaddr
,
1594 dd
->rcvhdrtail_dummy_dma
);
1595 dd
->rcvhdrtail_dummy_kvaddr
= NULL
;
1599 * Free any resources still in use (usually just kernel contexts)
1600 * at unload; we do for ctxtcnt, because that's what we allocate.
1602 for (ctxt
= 0; dd
->rcd
&& ctxt
< dd
->num_rcv_contexts
; ctxt
++) {
1603 struct hfi1_ctxtdata
*rcd
= dd
->rcd
[ctxt
];
1606 hfi1_free_ctxt_rcv_groups(rcd
);
1607 hfi1_free_ctxt(rcd
);
1615 /* must follow rcv context free - need to remove rcv's hooks */
1616 for (ctxt
= 0; ctxt
< dd
->num_send_contexts
; ctxt
++)
1617 sc_free(dd
->send_contexts
[ctxt
].sc
);
1618 dd
->num_send_contexts
= 0;
1619 kfree(dd
->send_contexts
);
1620 dd
->send_contexts
= NULL
;
1621 kfree(dd
->hw_to_sw
);
1622 dd
->hw_to_sw
= NULL
;
1623 kfree(dd
->boardname
);
1629 * Clean up on unit shutdown, or error during unit load after
1630 * successful initialization.
1632 static void postinit_cleanup(struct hfi1_devdata
*dd
)
1634 hfi1_start_cleanup(dd
);
1635 hfi1_comp_vectors_clean_up(dd
);
1636 hfi1_dev_affinity_clean_up(dd
);
1638 hfi1_pcie_ddcleanup(dd
);
1639 hfi1_pcie_cleanup(dd
->pcidev
);
1641 cleanup_device_data(dd
);
1643 hfi1_free_devdata(dd
);
1646 static int init_validate_rcvhdrcnt(struct hfi1_devdata
*dd
, uint thecnt
)
1648 if (thecnt
<= HFI1_MIN_HDRQ_EGRBUF_CNT
) {
1649 dd_dev_err(dd
, "Receive header queue count too small\n");
1653 if (thecnt
> HFI1_MAX_HDRQ_EGRBUF_CNT
) {
1655 "Receive header queue count cannot be greater than %u\n",
1656 HFI1_MAX_HDRQ_EGRBUF_CNT
);
1660 if (thecnt
% HDRQ_INCREMENT
) {
1661 dd_dev_err(dd
, "Receive header queue count %d must be divisible by %lu\n",
1662 thecnt
, HDRQ_INCREMENT
);
1669 static int init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1671 int ret
= 0, j
, pidx
, initfail
;
1672 struct hfi1_devdata
*dd
;
1673 struct hfi1_pportdata
*ppd
;
1675 /* First, lock the non-writable module parameters */
1678 /* Validate dev ids */
1679 if (!(ent
->device
== PCI_DEVICE_ID_INTEL0
||
1680 ent
->device
== PCI_DEVICE_ID_INTEL1
)) {
1681 dev_err(&pdev
->dev
, "Failing on unknown Intel deviceid 0x%x\n",
1687 /* Allocate the dd so we can get to work */
1688 dd
= hfi1_alloc_devdata(pdev
, NUM_IB_PORTS
*
1689 sizeof(struct hfi1_pportdata
));
1695 /* Validate some global module parameters */
1696 ret
= init_validate_rcvhdrcnt(dd
, rcvhdrcnt
);
1700 /* use the encoding function as a sanitization check */
1701 if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize
)) {
1702 dd_dev_err(dd
, "Invalid HdrQ Entry size %u\n",
1708 /* The receive eager buffer size must be set before the receive
1709 * contexts are created.
1711 * Set the eager buffer size. Validate that it falls in a range
1712 * allowed by the hardware - all powers of 2 between the min and
1713 * max. The maximum valid MTU is within the eager buffer range
1714 * so we do not need to cap the max_mtu by an eager buffer size
1717 if (eager_buffer_size
) {
1718 if (!is_power_of_2(eager_buffer_size
))
1720 roundup_pow_of_two(eager_buffer_size
);
1722 clamp_val(eager_buffer_size
,
1723 MIN_EAGER_BUFFER
* 8,
1724 MAX_EAGER_BUFFER_TOTAL
);
1725 dd_dev_info(dd
, "Eager buffer size %u\n",
1728 dd_dev_err(dd
, "Invalid Eager buffer size of 0\n");
1733 /* restrict value of hfi1_rcvarr_split */
1734 hfi1_rcvarr_split
= clamp_val(hfi1_rcvarr_split
, 0, 100);
1736 ret
= hfi1_pcie_init(dd
);
1741 * Do device-specific initialization, function table setup, dd
1744 ret
= hfi1_init_dd(dd
);
1746 goto clean_bail
; /* error already printed */
1748 ret
= create_workqueues(dd
);
1752 /* do the generic initialization */
1753 initfail
= hfi1_init(dd
, 0);
1756 hfi1_vnic_setup(dd
);
1758 ret
= hfi1_register_ib_device(dd
);
1761 * Now ready for use. this should be cleared whenever we
1762 * detect a reset, or initiate one. If earlier failure,
1763 * we still create devices, so diags, etc. can be used
1764 * to determine cause of problem.
1766 if (!initfail
&& !ret
) {
1767 dd
->flags
|= HFI1_INITTED
;
1768 /* create debufs files after init and ib register */
1769 hfi1_dbg_ibdev_init(&dd
->verbs_dev
);
1772 j
= hfi1_device_create(dd
);
1774 dd_dev_err(dd
, "Failed to create /dev devices: %d\n", -j
);
1776 if (initfail
|| ret
) {
1777 msix_clean_up_interrupts(dd
);
1779 flush_workqueue(ib_wq
);
1780 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
1781 hfi1_quiet_serdes(dd
->pport
+ pidx
);
1782 ppd
= dd
->pport
+ pidx
;
1784 destroy_workqueue(ppd
->hfi1_wq
);
1785 ppd
->hfi1_wq
= NULL
;
1788 destroy_workqueue(ppd
->link_wq
);
1789 ppd
->link_wq
= NULL
;
1793 hfi1_device_remove(dd
);
1795 hfi1_unregister_ib_device(dd
);
1796 hfi1_vnic_cleanup(dd
);
1797 postinit_cleanup(dd
);
1800 goto bail
; /* everything already cleaned */
1808 hfi1_pcie_cleanup(pdev
);
1813 static void wait_for_clients(struct hfi1_devdata
*dd
)
1816 * Remove the device init value and complete the device if there is
1817 * no clients or wait for active clients to finish.
1819 if (atomic_dec_and_test(&dd
->user_refcount
))
1820 complete(&dd
->user_comp
);
1822 wait_for_completion(&dd
->user_comp
);
1825 static void remove_one(struct pci_dev
*pdev
)
1827 struct hfi1_devdata
*dd
= pci_get_drvdata(pdev
);
1829 /* close debugfs files before ib unregister */
1830 hfi1_dbg_ibdev_exit(&dd
->verbs_dev
);
1832 /* remove the /dev hfi1 interface */
1833 hfi1_device_remove(dd
);
1835 /* wait for existing user space clients to finish */
1836 wait_for_clients(dd
);
1838 /* unregister from IB core */
1839 hfi1_unregister_ib_device(dd
);
1842 hfi1_vnic_cleanup(dd
);
1845 * Disable the IB link, disable interrupts on the device,
1846 * clear dma engines, etc.
1848 shutdown_device(dd
);
1852 /* wait until all of our (qsfp) queue_work() calls complete */
1853 flush_workqueue(ib_wq
);
1855 postinit_cleanup(dd
);
1858 static void shutdown_one(struct pci_dev
*pdev
)
1860 struct hfi1_devdata
*dd
= pci_get_drvdata(pdev
);
1862 shutdown_device(dd
);
1866 * hfi1_create_rcvhdrq - create a receive header queue
1867 * @dd: the hfi1_ib device
1868 * @rcd: the context data
1870 * This must be contiguous memory (from an i/o perspective), and must be
1871 * DMA'able (which means for some systems, it will go through an IOMMU,
1872 * or be forced into a low address range).
1874 int hfi1_create_rcvhdrq(struct hfi1_devdata
*dd
, struct hfi1_ctxtdata
*rcd
)
1879 if (!rcd
->rcvhdrq
) {
1882 amt
= rcvhdrq_size(rcd
);
1884 if (rcd
->ctxt
< dd
->first_dyn_alloc_ctxt
|| rcd
->is_vnic
)
1885 gfp_flags
= GFP_KERNEL
;
1887 gfp_flags
= GFP_USER
;
1888 rcd
->rcvhdrq
= dma_alloc_coherent(&dd
->pcidev
->dev
, amt
,
1890 gfp_flags
| __GFP_COMP
);
1892 if (!rcd
->rcvhdrq
) {
1894 "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
1899 if (HFI1_CAP_KGET_MASK(rcd
->flags
, DMA_RTAIL
) ||
1900 HFI1_CAP_UGET_MASK(rcd
->flags
, DMA_RTAIL
)) {
1901 rcd
->rcvhdrtail_kvaddr
= dma_alloc_coherent(&dd
->pcidev
->dev
,
1903 &rcd
->rcvhdrqtailaddr_dma
,
1905 if (!rcd
->rcvhdrtail_kvaddr
)
1910 * These values are per-context:
1915 reg
= ((u64
)(rcd
->rcvhdrq_cnt
>> HDRQ_SIZE_SHIFT
)
1916 & RCV_HDR_CNT_CNT_MASK
)
1917 << RCV_HDR_CNT_CNT_SHIFT
;
1918 write_kctxt_csr(dd
, rcd
->ctxt
, RCV_HDR_CNT
, reg
);
1919 reg
= (encode_rcv_header_entry_size(rcd
->rcvhdrqentsize
)
1920 & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK
)
1921 << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT
;
1922 write_kctxt_csr(dd
, rcd
->ctxt
, RCV_HDR_ENT_SIZE
, reg
);
1923 reg
= ((u64
)DEFAULT_RCVHDRSIZE
& RCV_HDR_SIZE_HDR_SIZE_MASK
)
1924 << RCV_HDR_SIZE_HDR_SIZE_SHIFT
;
1925 write_kctxt_csr(dd
, rcd
->ctxt
, RCV_HDR_SIZE
, reg
);
1928 * Program dummy tail address for every receive context
1929 * before enabling any receive context
1931 write_kctxt_csr(dd
, rcd
->ctxt
, RCV_HDR_TAIL_ADDR
,
1932 dd
->rcvhdrtail_dummy_dma
);
1938 "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
1940 dma_free_coherent(&dd
->pcidev
->dev
, amt
, rcd
->rcvhdrq
,
1942 rcd
->rcvhdrq
= NULL
;
1948 * allocate eager buffers, both kernel and user contexts.
1949 * @rcd: the context we are setting up.
1951 * Allocate the eager TID buffers and program them into hip.
1952 * They are no longer completely contiguous, we do multiple allocation
1953 * calls. Otherwise we get the OOM code involved, by asking for too
1954 * much per call, with disastrous results on some kernels.
1956 int hfi1_setup_eagerbufs(struct hfi1_ctxtdata
*rcd
)
1958 struct hfi1_devdata
*dd
= rcd
->dd
;
1959 u32 max_entries
, egrtop
, alloced_bytes
= 0;
1963 u16 round_mtu
= roundup_pow_of_two(hfi1_max_mtu
);
1966 * GFP_USER, but without GFP_FS, so buffer cache can be
1967 * coalesced (we hope); otherwise, even at order 4,
1968 * heavy filesystem activity makes these fail, and we can
1969 * use compound pages.
1971 gfp_flags
= __GFP_RECLAIM
| __GFP_IO
| __GFP_COMP
;
1974 * The minimum size of the eager buffers is a groups of MTU-sized
1976 * The global eager_buffer_size parameter is checked against the
1977 * theoretical lower limit of the value. Here, we check against the
1980 if (rcd
->egrbufs
.size
< (round_mtu
* dd
->rcv_entries
.group_size
))
1981 rcd
->egrbufs
.size
= round_mtu
* dd
->rcv_entries
.group_size
;
1983 * If using one-pkt-per-egr-buffer, lower the eager buffer
1984 * size to the max MTU (page-aligned).
1986 if (!HFI1_CAP_KGET_MASK(rcd
->flags
, MULTI_PKT_EGR
))
1987 rcd
->egrbufs
.rcvtid_size
= round_mtu
;
1990 * Eager buffers sizes of 1MB or less require smaller TID sizes
1991 * to satisfy the "multiple of 8 RcvArray entries" requirement.
1993 if (rcd
->egrbufs
.size
<= (1 << 20))
1994 rcd
->egrbufs
.rcvtid_size
= max((unsigned long)round_mtu
,
1995 rounddown_pow_of_two(rcd
->egrbufs
.size
/ 8));
1997 while (alloced_bytes
< rcd
->egrbufs
.size
&&
1998 rcd
->egrbufs
.alloced
< rcd
->egrbufs
.count
) {
1999 rcd
->egrbufs
.buffers
[idx
].addr
=
2000 dma_alloc_coherent(&dd
->pcidev
->dev
,
2001 rcd
->egrbufs
.rcvtid_size
,
2002 &rcd
->egrbufs
.buffers
[idx
].dma
,
2004 if (rcd
->egrbufs
.buffers
[idx
].addr
) {
2005 rcd
->egrbufs
.buffers
[idx
].len
=
2006 rcd
->egrbufs
.rcvtid_size
;
2007 rcd
->egrbufs
.rcvtids
[rcd
->egrbufs
.alloced
].addr
=
2008 rcd
->egrbufs
.buffers
[idx
].addr
;
2009 rcd
->egrbufs
.rcvtids
[rcd
->egrbufs
.alloced
].dma
=
2010 rcd
->egrbufs
.buffers
[idx
].dma
;
2011 rcd
->egrbufs
.alloced
++;
2012 alloced_bytes
+= rcd
->egrbufs
.rcvtid_size
;
2019 * Fail the eager buffer allocation if:
2020 * - we are already using the lowest acceptable size
2021 * - we are using one-pkt-per-egr-buffer (this implies
2022 * that we are accepting only one size)
2024 if (rcd
->egrbufs
.rcvtid_size
== round_mtu
||
2025 !HFI1_CAP_KGET_MASK(rcd
->flags
, MULTI_PKT_EGR
)) {
2026 dd_dev_err(dd
, "ctxt%u: Failed to allocate eager buffers\n",
2029 goto bail_rcvegrbuf_phys
;
2032 new_size
= rcd
->egrbufs
.rcvtid_size
/ 2;
2035 * If the first attempt to allocate memory failed, don't
2036 * fail everything but continue with the next lower
2040 rcd
->egrbufs
.rcvtid_size
= new_size
;
2045 * Re-partition already allocated buffers to a smaller
2048 rcd
->egrbufs
.alloced
= 0;
2049 for (i
= 0, j
= 0, offset
= 0; j
< idx
; i
++) {
2050 if (i
>= rcd
->egrbufs
.count
)
2052 rcd
->egrbufs
.rcvtids
[i
].dma
=
2053 rcd
->egrbufs
.buffers
[j
].dma
+ offset
;
2054 rcd
->egrbufs
.rcvtids
[i
].addr
=
2055 rcd
->egrbufs
.buffers
[j
].addr
+ offset
;
2056 rcd
->egrbufs
.alloced
++;
2057 if ((rcd
->egrbufs
.buffers
[j
].dma
+ offset
+
2059 (rcd
->egrbufs
.buffers
[j
].dma
+
2060 rcd
->egrbufs
.buffers
[j
].len
)) {
2067 rcd
->egrbufs
.rcvtid_size
= new_size
;
2070 rcd
->egrbufs
.numbufs
= idx
;
2071 rcd
->egrbufs
.size
= alloced_bytes
;
2074 "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n",
2075 rcd
->ctxt
, rcd
->egrbufs
.alloced
,
2076 rcd
->egrbufs
.rcvtid_size
/ 1024, rcd
->egrbufs
.size
/ 1024);
2079 * Set the contexts rcv array head update threshold to the closest
2080 * power of 2 (so we can use a mask instead of modulo) below half
2081 * the allocated entries.
2083 rcd
->egrbufs
.threshold
=
2084 rounddown_pow_of_two(rcd
->egrbufs
.alloced
/ 2);
2086 * Compute the expected RcvArray entry base. This is done after
2087 * allocating the eager buffers in order to maximize the
2088 * expected RcvArray entries for the context.
2090 max_entries
= rcd
->rcv_array_groups
* dd
->rcv_entries
.group_size
;
2091 egrtop
= roundup(rcd
->egrbufs
.alloced
, dd
->rcv_entries
.group_size
);
2092 rcd
->expected_count
= max_entries
- egrtop
;
2093 if (rcd
->expected_count
> MAX_TID_PAIR_ENTRIES
* 2)
2094 rcd
->expected_count
= MAX_TID_PAIR_ENTRIES
* 2;
2096 rcd
->expected_base
= rcd
->eager_base
+ egrtop
;
2097 hfi1_cdbg(PROC
, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n",
2098 rcd
->ctxt
, rcd
->egrbufs
.alloced
, rcd
->expected_count
,
2099 rcd
->eager_base
, rcd
->expected_base
);
2101 if (!hfi1_rcvbuf_validate(rcd
->egrbufs
.rcvtid_size
, PT_EAGER
, &order
)) {
2103 "ctxt%u: current Eager buffer size is invalid %u\n",
2104 rcd
->ctxt
, rcd
->egrbufs
.rcvtid_size
);
2106 goto bail_rcvegrbuf_phys
;
2109 for (idx
= 0; idx
< rcd
->egrbufs
.alloced
; idx
++) {
2110 hfi1_put_tid(dd
, rcd
->eager_base
+ idx
, PT_EAGER
,
2111 rcd
->egrbufs
.rcvtids
[idx
].dma
, order
);
2117 bail_rcvegrbuf_phys
:
2118 for (idx
= 0; idx
< rcd
->egrbufs
.alloced
&&
2119 rcd
->egrbufs
.buffers
[idx
].addr
;
2121 dma_free_coherent(&dd
->pcidev
->dev
,
2122 rcd
->egrbufs
.buffers
[idx
].len
,
2123 rcd
->egrbufs
.buffers
[idx
].addr
,
2124 rcd
->egrbufs
.buffers
[idx
].dma
);
2125 rcd
->egrbufs
.buffers
[idx
].addr
= NULL
;
2126 rcd
->egrbufs
.buffers
[idx
].dma
= 0;
2127 rcd
->egrbufs
.buffers
[idx
].len
= 0;