2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/pci.h>
49 #include <linux/netdevice.h>
50 #include <linux/vmalloc.h>
51 #include <linux/delay.h>
52 #include <linux/idr.h>
53 #include <linux/module.h>
54 #include <linux/printk.h>
55 #include <linux/hrtimer.h>
56 #include <rdma/rdma_vt.h>
69 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
72 * min buffers we want to have per context, after driver
74 #define HFI1_MIN_USER_CTXT_BUFCNT 7
76 #define HFI1_MIN_HDRQ_EGRBUF_CNT 2
77 #define HFI1_MAX_HDRQ_EGRBUF_CNT 16352
78 #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */
79 #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */
82 * Number of user receive contexts we are configured to use (to allow for more
83 * pio buffers per ctxt, etc.) Zero means use one user context per CPU.
85 int num_user_contexts
= -1;
86 module_param_named(num_user_contexts
, num_user_contexts
, uint
, S_IRUGO
);
88 num_user_contexts
, "Set max number of user contexts to use");
90 uint krcvqs
[RXE_NUM_DATA_VL
];
92 module_param_array(krcvqs
, uint
, &krcvqsset
, S_IRUGO
);
93 MODULE_PARM_DESC(krcvqs
, "Array of the number of non-control kernel receive queues by VL");
95 /* computed based on above array */
98 static unsigned hfi1_rcvarr_split
= 25;
99 module_param_named(rcvarr_split
, hfi1_rcvarr_split
, uint
, S_IRUGO
);
100 MODULE_PARM_DESC(rcvarr_split
, "Percent of context's RcvArray entries used for Eager buffers");
102 static uint eager_buffer_size
= (2 << 20); /* 2MB */
103 module_param(eager_buffer_size
, uint
, S_IRUGO
);
104 MODULE_PARM_DESC(eager_buffer_size
, "Size of the eager buffers, default: 2MB");
106 static uint rcvhdrcnt
= 2048; /* 2x the max eager buffer count */
107 module_param_named(rcvhdrcnt
, rcvhdrcnt
, uint
, S_IRUGO
);
108 MODULE_PARM_DESC(rcvhdrcnt
, "Receive header queue count (default 2048)");
110 static uint hfi1_hdrq_entsize
= 32;
111 module_param_named(hdrq_entsize
, hfi1_hdrq_entsize
, uint
, S_IRUGO
);
112 MODULE_PARM_DESC(hdrq_entsize
, "Size of header queue entries: 2 - 8B, 16 - 64B (default), 32 - 128B");
114 unsigned int user_credit_return_threshold
= 33; /* default is 33% */
115 module_param(user_credit_return_threshold
, uint
, S_IRUGO
);
116 MODULE_PARM_DESC(user_credit_return_threshold
, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)");
118 static inline u64
encode_rcv_header_entry_size(u16
);
120 static struct idr hfi1_unit_table
;
121 u32 hfi1_cpulist_count
;
122 unsigned long *hfi1_cpulist
;
125 * Common code for creating the receive context array.
127 int hfi1_create_ctxts(struct hfi1_devdata
*dd
)
132 /* Control context has to be always 0 */
133 BUILD_BUG_ON(HFI1_CTRL_CTXT
!= 0);
135 dd
->rcd
= kzalloc_node(dd
->num_rcv_contexts
* sizeof(*dd
->rcd
),
136 GFP_KERNEL
, dd
->node
);
140 /* create one or more kernel contexts */
141 for (i
= 0; i
< dd
->first_user_ctxt
; ++i
) {
142 struct hfi1_pportdata
*ppd
;
143 struct hfi1_ctxtdata
*rcd
;
145 ppd
= dd
->pport
+ (i
% dd
->num_pports
);
146 rcd
= hfi1_create_ctxtdata(ppd
, i
, dd
->node
);
149 "Unable to allocate kernel receive context, failing\n");
153 * Set up the kernel context flags here and now because they
154 * use default values for all receive side memories. User
155 * contexts will be handled as they are created.
157 rcd
->flags
= HFI1_CAP_KGET(MULTI_PKT_EGR
) |
158 HFI1_CAP_KGET(NODROP_RHQ_FULL
) |
159 HFI1_CAP_KGET(NODROP_EGR_FULL
) |
160 HFI1_CAP_KGET(DMA_RTAIL
);
162 /* Control context must use DMA_RTAIL */
163 if (rcd
->ctxt
== HFI1_CTRL_CTXT
)
164 rcd
->flags
|= HFI1_CAP_DMA_RTAIL
;
167 rcd
->sc
= sc_alloc(dd
, SC_ACK
, rcd
->rcvhdrqentsize
, dd
->node
);
170 "Unable to allocate kernel send context, failing\n");
171 dd
->rcd
[rcd
->ctxt
] = NULL
;
172 hfi1_free_ctxtdata(dd
, rcd
);
176 ret
= hfi1_init_ctxt(rcd
->sc
);
179 "Failed to setup kernel receive context, failing\n");
181 dd
->rcd
[rcd
->ctxt
] = NULL
;
182 hfi1_free_ctxtdata(dd
, rcd
);
189 * Initialize aspm, to be done after gen3 transition and setting up
190 * contexts and before enabling interrupts
204 * Common code for user and kernel context setup.
206 struct hfi1_ctxtdata
*hfi1_create_ctxtdata(struct hfi1_pportdata
*ppd
, u32 ctxt
,
209 struct hfi1_devdata
*dd
= ppd
->dd
;
210 struct hfi1_ctxtdata
*rcd
;
211 unsigned kctxt_ngroups
= 0;
214 if (dd
->rcv_entries
.nctxt_extra
>
215 dd
->num_rcv_contexts
- dd
->first_user_ctxt
)
216 kctxt_ngroups
= (dd
->rcv_entries
.nctxt_extra
-
217 (dd
->num_rcv_contexts
- dd
->first_user_ctxt
));
218 rcd
= kzalloc(sizeof(*rcd
), GFP_KERNEL
);
220 u32 rcvtids
, max_entries
;
222 hfi1_cdbg(PROC
, "setting up context %u\n", ctxt
);
224 INIT_LIST_HEAD(&rcd
->qp_wait_list
);
231 rcd
->rcv_array_groups
= dd
->rcv_entries
.ngroups
;
233 mutex_init(&rcd
->exp_lock
);
236 * Calculate the context's RcvArray entry starting point.
237 * We do this here because we have to take into account all
238 * the RcvArray entries that previous context would have
239 * taken and we have to account for any extra groups
240 * assigned to the kernel or user contexts.
242 if (ctxt
< dd
->first_user_ctxt
) {
243 if (ctxt
< kctxt_ngroups
) {
244 base
= ctxt
* (dd
->rcv_entries
.ngroups
+ 1);
245 rcd
->rcv_array_groups
++;
247 base
= kctxt_ngroups
+
248 (ctxt
* dd
->rcv_entries
.ngroups
);
250 u16 ct
= ctxt
- dd
->first_user_ctxt
;
252 base
= ((dd
->n_krcv_queues
* dd
->rcv_entries
.ngroups
) +
254 if (ct
< dd
->rcv_entries
.nctxt_extra
) {
255 base
+= ct
* (dd
->rcv_entries
.ngroups
+ 1);
256 rcd
->rcv_array_groups
++;
258 base
+= dd
->rcv_entries
.nctxt_extra
+
259 (ct
* dd
->rcv_entries
.ngroups
);
261 rcd
->eager_base
= base
* dd
->rcv_entries
.group_size
;
263 /* Validate and initialize Rcv Hdr Q variables */
264 if (rcvhdrcnt
% HDRQ_INCREMENT
) {
266 "ctxt%u: header queue count %d must be divisible by %lu\n",
267 rcd
->ctxt
, rcvhdrcnt
, HDRQ_INCREMENT
);
270 rcd
->rcvhdrq_cnt
= rcvhdrcnt
;
271 rcd
->rcvhdrqentsize
= hfi1_hdrq_entsize
;
273 * Simple Eager buffer allocation: we have already pre-allocated
274 * the number of RcvArray entry groups. Each ctxtdata structure
275 * holds the number of groups for that context.
277 * To follow CSR requirements and maintain cacheline alignment,
278 * make sure all sizes and bases are multiples of group_size.
280 * The expected entry count is what is left after assigning
283 max_entries
= rcd
->rcv_array_groups
*
284 dd
->rcv_entries
.group_size
;
285 rcvtids
= ((max_entries
* hfi1_rcvarr_split
) / 100);
286 rcd
->egrbufs
.count
= round_down(rcvtids
,
287 dd
->rcv_entries
.group_size
);
288 if (rcd
->egrbufs
.count
> MAX_EAGER_ENTRIES
) {
289 dd_dev_err(dd
, "ctxt%u: requested too many RcvArray entries.\n",
291 rcd
->egrbufs
.count
= MAX_EAGER_ENTRIES
;
294 "ctxt%u: max Eager buffer RcvArray entries: %u\n",
295 rcd
->ctxt
, rcd
->egrbufs
.count
);
298 * Allocate array that will hold the eager buffer accounting
300 * This will allocate the maximum possible buffer count based
301 * on the value of the RcvArray split parameter.
302 * The resulting value will be rounded down to the closest
303 * multiple of dd->rcv_entries.group_size.
305 rcd
->egrbufs
.buffers
= kcalloc(rcd
->egrbufs
.count
,
306 sizeof(*rcd
->egrbufs
.buffers
),
308 if (!rcd
->egrbufs
.buffers
)
310 rcd
->egrbufs
.rcvtids
= kcalloc(rcd
->egrbufs
.count
,
311 sizeof(*rcd
->egrbufs
.rcvtids
),
313 if (!rcd
->egrbufs
.rcvtids
)
315 rcd
->egrbufs
.size
= eager_buffer_size
;
317 * The size of the buffers programmed into the RcvArray
318 * entries needs to be big enough to handle the highest
321 if (rcd
->egrbufs
.size
< hfi1_max_mtu
) {
322 rcd
->egrbufs
.size
= __roundup_pow_of_two(hfi1_max_mtu
);
324 "ctxt%u: eager bufs size too small. Adjusting to %zu\n",
325 rcd
->ctxt
, rcd
->egrbufs
.size
);
327 rcd
->egrbufs
.rcvtid_size
= HFI1_MAX_EAGER_BUFFER_SIZE
;
329 if (ctxt
< dd
->first_user_ctxt
) { /* N/A for PSM contexts */
330 rcd
->opstats
= kzalloc(sizeof(*rcd
->opstats
),
338 kfree(rcd
->egrbufs
.rcvtids
);
339 kfree(rcd
->egrbufs
.buffers
);
345 * Convert a receive header entry size that to the encoding used in the CSR.
347 * Return a zero if the given size is invalid.
349 static inline u64
encode_rcv_header_entry_size(u16 size
)
351 /* there are only 3 valid receive header entry sizes */
358 return 0; /* invalid */
362 * Select the largest ccti value over all SLs to determine the intra-
363 * packet gap for the link.
365 * called with cca_timer_lock held (to protect access to cca_timer
366 * array), and rcu_read_lock() (to protect access to cc_state).
368 void set_link_ipg(struct hfi1_pportdata
*ppd
)
370 struct hfi1_devdata
*dd
= ppd
->dd
;
371 struct cc_state
*cc_state
;
373 u16 cce
, ccti_limit
, max_ccti
= 0;
376 u32 current_egress_rate
; /* Mbits /sec */
379 * max_pkt_time is the maximum packet egress time in units
380 * of the fabric clock period 1/(805 MHz).
383 cc_state
= get_cc_state(ppd
);
387 * This should _never_ happen - rcu_read_lock() is held,
388 * and set_link_ipg() should not be called if cc_state
393 for (i
= 0; i
< OPA_MAX_SLS
; i
++) {
394 u16 ccti
= ppd
->cca_timer
[i
].ccti
;
400 ccti_limit
= cc_state
->cct
.ccti_limit
;
401 if (max_ccti
> ccti_limit
)
402 max_ccti
= ccti_limit
;
404 cce
= cc_state
->cct
.entries
[max_ccti
].entry
;
405 shift
= (cce
& 0xc000) >> 14;
406 mult
= (cce
& 0x3fff);
408 current_egress_rate
= active_egress_rate(ppd
);
410 max_pkt_time
= egress_cycles(ppd
->ibmaxlen
, current_egress_rate
);
412 src
= (max_pkt_time
>> shift
) * mult
;
414 src
&= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK
;
415 src
<<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT
;
417 write_csr(dd
, SEND_STATIC_RATE_CONTROL
, src
);
420 static enum hrtimer_restart
cca_timer_fn(struct hrtimer
*t
)
422 struct cca_timer
*cca_timer
;
423 struct hfi1_pportdata
*ppd
;
425 u16 ccti_timer
, ccti_min
;
426 struct cc_state
*cc_state
;
428 enum hrtimer_restart ret
= HRTIMER_NORESTART
;
430 cca_timer
= container_of(t
, struct cca_timer
, hrtimer
);
431 ppd
= cca_timer
->ppd
;
436 cc_state
= get_cc_state(ppd
);
440 return HRTIMER_NORESTART
;
444 * 1) decrement ccti for SL
445 * 2) calculate IPG for link (set_link_ipg())
446 * 3) restart timer, unless ccti is at min value
449 ccti_min
= cc_state
->cong_setting
.entries
[sl
].ccti_min
;
450 ccti_timer
= cc_state
->cong_setting
.entries
[sl
].ccti_timer
;
452 spin_lock_irqsave(&ppd
->cca_timer_lock
, flags
);
454 if (cca_timer
->ccti
> ccti_min
) {
459 if (cca_timer
->ccti
> ccti_min
) {
460 unsigned long nsec
= 1024 * ccti_timer
;
461 /* ccti_timer is in units of 1.024 usec */
462 hrtimer_forward_now(t
, ns_to_ktime(nsec
));
463 ret
= HRTIMER_RESTART
;
466 spin_unlock_irqrestore(&ppd
->cca_timer_lock
, flags
);
472 * Common code for initializing the physical port structure.
474 void hfi1_init_pportdata(struct pci_dev
*pdev
, struct hfi1_pportdata
*ppd
,
475 struct hfi1_devdata
*dd
, u8 hw_pidx
, u8 port
)
478 uint default_pkey_idx
;
481 ppd
->hw_pidx
= hw_pidx
;
482 ppd
->port
= port
; /* IB port number, not index */
484 default_pkey_idx
= 1;
486 ppd
->pkeys
[default_pkey_idx
] = DEFAULT_P_KEY
;
488 hfi1_early_err(&pdev
->dev
,
489 "Faking data partition 0x8001 in idx %u\n",
491 ppd
->pkeys
[!default_pkey_idx
] = 0x8001;
494 INIT_WORK(&ppd
->link_vc_work
, handle_verify_cap
);
495 INIT_WORK(&ppd
->link_up_work
, handle_link_up
);
496 INIT_WORK(&ppd
->link_down_work
, handle_link_down
);
497 INIT_WORK(&ppd
->freeze_work
, handle_freeze
);
498 INIT_WORK(&ppd
->link_downgrade_work
, handle_link_downgrade
);
499 INIT_WORK(&ppd
->sma_message_work
, handle_sma_message
);
500 INIT_WORK(&ppd
->link_bounce_work
, handle_link_bounce
);
501 INIT_WORK(&ppd
->linkstate_active_work
, receive_interrupt_work
);
502 INIT_WORK(&ppd
->qsfp_info
.qsfp_work
, qsfp_event
);
504 mutex_init(&ppd
->hls_lock
);
505 spin_lock_init(&ppd
->sdma_alllock
);
506 spin_lock_init(&ppd
->qsfp_info
.qsfp_lock
);
508 ppd
->qsfp_info
.ppd
= ppd
;
509 ppd
->sm_trap_qp
= 0x0;
514 spin_lock_init(&ppd
->cca_timer_lock
);
516 for (i
= 0; i
< OPA_MAX_SLS
; i
++) {
517 hrtimer_init(&ppd
->cca_timer
[i
].hrtimer
, CLOCK_MONOTONIC
,
519 ppd
->cca_timer
[i
].ppd
= ppd
;
520 ppd
->cca_timer
[i
].sl
= i
;
521 ppd
->cca_timer
[i
].ccti
= 0;
522 ppd
->cca_timer
[i
].hrtimer
.function
= cca_timer_fn
;
525 ppd
->cc_max_table_entries
= IB_CC_TABLE_CAP_DEFAULT
;
527 spin_lock_init(&ppd
->cc_state_lock
);
528 spin_lock_init(&ppd
->cc_log_lock
);
529 size
= sizeof(struct cc_state
);
530 RCU_INIT_POINTER(ppd
->cc_state
, kzalloc(size
, GFP_KERNEL
));
531 if (!rcu_dereference(ppd
->cc_state
))
537 hfi1_early_err(&pdev
->dev
,
538 "Congestion Control Agent disabled for port %d\n", port
);
542 * Do initialization for device that is only needed on
543 * first detect, not on resets.
545 static int loadtime_init(struct hfi1_devdata
*dd
)
551 * init_after_reset - re-initialize after a reset
552 * @dd: the hfi1_ib device
554 * sanity check at least some of the values after reset, and
555 * ensure no receive or transmit (explicitly, in case reset
558 static int init_after_reset(struct hfi1_devdata
*dd
)
563 * Ensure chip does no sends or receives, tail updates, or
564 * pioavail updates while we re-initialize. This is mostly
565 * for the driver data structures, not chip registers.
567 for (i
= 0; i
< dd
->num_rcv_contexts
; i
++)
568 hfi1_rcvctrl(dd
, HFI1_RCVCTRL_CTXT_DIS
|
569 HFI1_RCVCTRL_INTRAVAIL_DIS
|
570 HFI1_RCVCTRL_TAILUPD_DIS
, i
);
571 pio_send_control(dd
, PSC_GLOBAL_DISABLE
);
572 for (i
= 0; i
< dd
->num_send_contexts
; i
++)
573 sc_disable(dd
->send_contexts
[i
].sc
);
578 static void enable_chip(struct hfi1_devdata
*dd
)
583 /* enable PIO send */
584 pio_send_control(dd
, PSC_GLOBAL_ENABLE
);
587 * Enable kernel ctxts' receive and receive interrupt.
588 * Other ctxts done as user opens and initializes them.
590 for (i
= 0; i
< dd
->first_user_ctxt
; ++i
) {
591 rcvmask
= HFI1_RCVCTRL_CTXT_ENB
| HFI1_RCVCTRL_INTRAVAIL_ENB
;
592 rcvmask
|= HFI1_CAP_KGET_MASK(dd
->rcd
[i
]->flags
, DMA_RTAIL
) ?
593 HFI1_RCVCTRL_TAILUPD_ENB
: HFI1_RCVCTRL_TAILUPD_DIS
;
594 if (!HFI1_CAP_KGET_MASK(dd
->rcd
[i
]->flags
, MULTI_PKT_EGR
))
595 rcvmask
|= HFI1_RCVCTRL_ONE_PKT_EGR_ENB
;
596 if (HFI1_CAP_KGET_MASK(dd
->rcd
[i
]->flags
, NODROP_RHQ_FULL
))
597 rcvmask
|= HFI1_RCVCTRL_NO_RHQ_DROP_ENB
;
598 if (HFI1_CAP_KGET_MASK(dd
->rcd
[i
]->flags
, NODROP_EGR_FULL
))
599 rcvmask
|= HFI1_RCVCTRL_NO_EGR_DROP_ENB
;
600 hfi1_rcvctrl(dd
, rcvmask
, i
);
601 sc_enable(dd
->rcd
[i
]->sc
);
606 * create_workqueues - create per port workqueues
607 * @dd: the hfi1_ib device
609 static int create_workqueues(struct hfi1_devdata
*dd
)
612 struct hfi1_pportdata
*ppd
;
614 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
615 ppd
= dd
->pport
+ pidx
;
620 WQ_SYSFS
| WQ_HIGHPRI
| WQ_CPU_INTENSIVE
,
629 pr_err("alloc_workqueue failed for port %d\n", pidx
+ 1);
630 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
631 ppd
= dd
->pport
+ pidx
;
633 destroy_workqueue(ppd
->hfi1_wq
);
641 * hfi1_init - do the actual initialization sequence on the chip
642 * @dd: the hfi1_ib device
643 * @reinit: re-initializing, so don't allocate new memory
645 * Do the actual initialization sequence on the chip. This is done
646 * both from the init routine called from the PCI infrastructure, and
647 * when we reset the chip, or detect that it was reset internally,
648 * or it's administratively re-enabled.
650 * Memory allocation here and in called routines is only done in
651 * the first case (reinit == 0). We have to be careful, because even
652 * without memory allocation, we need to re-write all the chip registers
653 * TIDs, etc. after the reset or enable has completed.
655 int hfi1_init(struct hfi1_devdata
*dd
, int reinit
)
657 int ret
= 0, pidx
, lastfail
= 0;
659 struct hfi1_ctxtdata
*rcd
;
660 struct hfi1_pportdata
*ppd
;
662 /* Set up recv low level handlers */
663 dd
->normal_rhf_rcv_functions
[RHF_RCV_TYPE_EXPECTED
] =
664 kdeth_process_expected
;
665 dd
->normal_rhf_rcv_functions
[RHF_RCV_TYPE_EAGER
] =
667 dd
->normal_rhf_rcv_functions
[RHF_RCV_TYPE_IB
] = process_receive_ib
;
668 dd
->normal_rhf_rcv_functions
[RHF_RCV_TYPE_ERROR
] =
669 process_receive_error
;
670 dd
->normal_rhf_rcv_functions
[RHF_RCV_TYPE_BYPASS
] =
671 process_receive_bypass
;
672 dd
->normal_rhf_rcv_functions
[RHF_RCV_TYPE_INVALID5
] =
673 process_receive_invalid
;
674 dd
->normal_rhf_rcv_functions
[RHF_RCV_TYPE_INVALID6
] =
675 process_receive_invalid
;
676 dd
->normal_rhf_rcv_functions
[RHF_RCV_TYPE_INVALID7
] =
677 process_receive_invalid
;
678 dd
->rhf_rcv_function_map
= dd
->normal_rhf_rcv_functions
;
680 /* Set up send low level handlers */
681 dd
->process_pio_send
= hfi1_verbs_send_pio
;
682 dd
->process_dma_send
= hfi1_verbs_send_dma
;
683 dd
->pio_inline_send
= pio_copy
;
686 atomic_set(&dd
->drop_packet
, DROP_PACKET_ON
);
689 atomic_set(&dd
->drop_packet
, DROP_PACKET_OFF
);
693 /* make sure the link is not "up" */
694 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
695 ppd
= dd
->pport
+ pidx
;
700 ret
= init_after_reset(dd
);
702 ret
= loadtime_init(dd
);
706 /* allocate dummy tail memory for all receive contexts */
707 dd
->rcvhdrtail_dummy_kvaddr
= dma_zalloc_coherent(
708 &dd
->pcidev
->dev
, sizeof(u64
),
709 &dd
->rcvhdrtail_dummy_physaddr
,
712 if (!dd
->rcvhdrtail_dummy_kvaddr
) {
713 dd_dev_err(dd
, "cannot allocate dummy tail memory\n");
718 /* dd->rcd can be NULL if early initialization failed */
719 for (i
= 0; dd
->rcd
&& i
< dd
->first_user_ctxt
; ++i
) {
721 * Set up the (kernel) rcvhdr queue and egr TIDs. If doing
722 * re-init, the simplest way to handle this is to free
723 * existing, and re-allocate.
724 * Need to re-create rest of ctxt 0 ctxtdata as well.
730 rcd
->do_interrupt
= &handle_receive_interrupt
;
732 lastfail
= hfi1_create_rcvhdrq(dd
, rcd
);
734 lastfail
= hfi1_setup_eagerbufs(rcd
);
737 "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
742 /* Allocate enough memory for user event notification. */
743 len
= PAGE_ALIGN(dd
->chip_rcv_contexts
* HFI1_MAX_SHARED_CTXTS
*
744 sizeof(*dd
->events
));
745 dd
->events
= vmalloc_user(len
);
747 dd_dev_err(dd
, "Failed to allocate user events page\n");
749 * Allocate a page for device and port status.
750 * Page will be shared amongst all user processes.
752 dd
->status
= vmalloc_user(PAGE_SIZE
);
754 dd_dev_err(dd
, "Failed to allocate dev status page\n");
756 dd
->freezelen
= PAGE_SIZE
- (sizeof(*dd
->status
) -
757 sizeof(dd
->status
->freezemsg
));
758 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
759 ppd
= dd
->pport
+ pidx
;
761 /* Currently, we only have one port */
762 ppd
->statusp
= &dd
->status
->port
;
767 /* enable chip even if we have an error, so we can debug cause */
772 * Set status even if port serdes is not initialized
773 * so that diags will work.
776 dd
->status
->dev
|= HFI1_STATUS_CHIP_PRESENT
|
779 /* enable all interrupts from the chip */
780 set_intr_state(dd
, 1);
782 /* chip is OK for user apps; mark it as initialized */
783 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
784 ppd
= dd
->pport
+ pidx
;
787 * start the serdes - must be after interrupts are
788 * enabled so we are notified when the link goes up
790 lastfail
= bringup_serdes(ppd
);
793 "Failed to bring up port %u\n",
797 * Set status even if port serdes is not initialized
798 * so that diags will work.
801 *ppd
->statusp
|= HFI1_STATUS_CHIP_PRESENT
|
803 if (!ppd
->link_speed_enabled
)
808 /* if ret is non-zero, we probably should do some cleanup here... */
812 static inline struct hfi1_devdata
*__hfi1_lookup(int unit
)
814 return idr_find(&hfi1_unit_table
, unit
);
817 struct hfi1_devdata
*hfi1_lookup(int unit
)
819 struct hfi1_devdata
*dd
;
822 spin_lock_irqsave(&hfi1_devs_lock
, flags
);
823 dd
= __hfi1_lookup(unit
);
824 spin_unlock_irqrestore(&hfi1_devs_lock
, flags
);
830 * Stop the timers during unit shutdown, or after an error late
833 static void stop_timers(struct hfi1_devdata
*dd
)
835 struct hfi1_pportdata
*ppd
;
838 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
839 ppd
= dd
->pport
+ pidx
;
840 if (ppd
->led_override_timer
.data
) {
841 del_timer_sync(&ppd
->led_override_timer
);
842 atomic_set(&ppd
->led_override_timer_active
, 0);
848 * shutdown_device - shut down a device
849 * @dd: the hfi1_ib device
851 * This is called to make the device quiet when we are about to
852 * unload the driver, and also when the device is administratively
853 * disabled. It does not free any data structures.
854 * Everything it does has to be setup again by hfi1_init(dd, 1)
856 static void shutdown_device(struct hfi1_devdata
*dd
)
858 struct hfi1_pportdata
*ppd
;
862 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
863 ppd
= dd
->pport
+ pidx
;
867 *ppd
->statusp
&= ~(HFI1_STATUS_IB_CONF
|
868 HFI1_STATUS_IB_READY
);
870 dd
->flags
&= ~HFI1_INITTED
;
872 /* mask interrupts, but not errors */
873 set_intr_state(dd
, 0);
875 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
876 ppd
= dd
->pport
+ pidx
;
877 for (i
= 0; i
< dd
->num_rcv_contexts
; i
++)
878 hfi1_rcvctrl(dd
, HFI1_RCVCTRL_TAILUPD_DIS
|
879 HFI1_RCVCTRL_CTXT_DIS
|
880 HFI1_RCVCTRL_INTRAVAIL_DIS
|
881 HFI1_RCVCTRL_PKEY_DIS
|
882 HFI1_RCVCTRL_ONE_PKT_EGR_DIS
, i
);
884 * Gracefully stop all sends allowing any in progress to
887 for (i
= 0; i
< dd
->num_send_contexts
; i
++)
888 sc_flush(dd
->send_contexts
[i
].sc
);
892 * Enough for anything that's going to trickle out to have actually
897 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
898 ppd
= dd
->pport
+ pidx
;
900 /* disable all contexts */
901 for (i
= 0; i
< dd
->num_send_contexts
; i
++)
902 sc_disable(dd
->send_contexts
[i
].sc
);
903 /* disable the send device */
904 pio_send_control(dd
, PSC_GLOBAL_DISABLE
);
906 shutdown_led_override(ppd
);
909 * Clear SerdesEnable.
910 * We can't count on interrupts since we are stopping.
912 hfi1_quiet_serdes(ppd
);
915 destroy_workqueue(ppd
->hfi1_wq
);
923 * hfi1_free_ctxtdata - free a context's allocated data
924 * @dd: the hfi1_ib device
925 * @rcd: the ctxtdata structure
927 * free up any allocated data for a context
928 * This should not touch anything that would affect a simultaneous
929 * re-allocation of context data, because it is called after hfi1_mutex
930 * is released (and can be called from reinit as well).
931 * It should never change any chip state, or global driver state.
933 void hfi1_free_ctxtdata(struct hfi1_devdata
*dd
, struct hfi1_ctxtdata
*rcd
)
941 dma_free_coherent(&dd
->pcidev
->dev
, rcd
->rcvhdrq_size
,
942 rcd
->rcvhdrq
, rcd
->rcvhdrq_phys
);
944 if (rcd
->rcvhdrtail_kvaddr
) {
945 dma_free_coherent(&dd
->pcidev
->dev
, PAGE_SIZE
,
946 (void *)rcd
->rcvhdrtail_kvaddr
,
947 rcd
->rcvhdrqtailaddr_phys
);
948 rcd
->rcvhdrtail_kvaddr
= NULL
;
952 /* all the RcvArray entries should have been cleared by now */
953 kfree(rcd
->egrbufs
.rcvtids
);
955 for (e
= 0; e
< rcd
->egrbufs
.alloced
; e
++) {
956 if (rcd
->egrbufs
.buffers
[e
].phys
)
957 dma_free_coherent(&dd
->pcidev
->dev
,
958 rcd
->egrbufs
.buffers
[e
].len
,
959 rcd
->egrbufs
.buffers
[e
].addr
,
960 rcd
->egrbufs
.buffers
[e
].phys
);
962 kfree(rcd
->egrbufs
.buffers
);
965 vfree(rcd
->user_event_mask
);
966 vfree(rcd
->subctxt_uregbase
);
967 vfree(rcd
->subctxt_rcvegrbuf
);
968 vfree(rcd
->subctxt_rcvhdr_base
);
974 * Release our hold on the shared asic data. If we are the last one,
975 * free the structure. Must be holding hfi1_devs_lock.
977 static void release_asic_data(struct hfi1_devdata
*dd
)
983 dd
->asic_data
->dds
[dd
->hfi1_id
] = NULL
;
984 other
= dd
->hfi1_id
? 0 : 1;
985 if (!dd
->asic_data
->dds
[other
]) {
986 /* we are the last holder, free it */
987 kfree(dd
->asic_data
);
989 dd
->asic_data
= NULL
;
992 static void __hfi1_free_devdata(struct kobject
*kobj
)
994 struct hfi1_devdata
*dd
=
995 container_of(kobj
, struct hfi1_devdata
, kobj
);
998 spin_lock_irqsave(&hfi1_devs_lock
, flags
);
999 idr_remove(&hfi1_unit_table
, dd
->unit
);
1000 list_del(&dd
->list
);
1001 release_asic_data(dd
);
1002 spin_unlock_irqrestore(&hfi1_devs_lock
, flags
);
1003 free_platform_config(dd
);
1004 rcu_barrier(); /* wait for rcu callbacks to complete */
1005 free_percpu(dd
->int_counter
);
1006 free_percpu(dd
->rcv_limit
);
1007 hfi1_dev_affinity_free(dd
);
1008 free_percpu(dd
->send_schedule
);
1009 rvt_dealloc_device(&dd
->verbs_dev
.rdi
);
1012 static struct kobj_type hfi1_devdata_type
= {
1013 .release
= __hfi1_free_devdata
,
1016 void hfi1_free_devdata(struct hfi1_devdata
*dd
)
1018 kobject_put(&dd
->kobj
);
1022 * Allocate our primary per-unit data structure. Must be done via verbs
1023 * allocator, because the verbs cleanup process both does cleanup and
1024 * free of the data structure.
1025 * "extra" is for chip-specific data.
1027 * Use the idr mechanism to get a unit number for this unit.
1029 struct hfi1_devdata
*hfi1_alloc_devdata(struct pci_dev
*pdev
, size_t extra
)
1031 unsigned long flags
;
1032 struct hfi1_devdata
*dd
;
1035 /* extra is * number of ports */
1036 nports
= extra
/ sizeof(struct hfi1_pportdata
);
1038 dd
= (struct hfi1_devdata
*)rvt_alloc_device(sizeof(*dd
) + extra
,
1041 return ERR_PTR(-ENOMEM
);
1042 dd
->num_pports
= nports
;
1043 dd
->pport
= (struct hfi1_pportdata
*)(dd
+ 1);
1045 INIT_LIST_HEAD(&dd
->list
);
1046 idr_preload(GFP_KERNEL
);
1047 spin_lock_irqsave(&hfi1_devs_lock
, flags
);
1049 ret
= idr_alloc(&hfi1_unit_table
, dd
, 0, 0, GFP_NOWAIT
);
1052 list_add(&dd
->list
, &hfi1_dev_list
);
1055 spin_unlock_irqrestore(&hfi1_devs_lock
, flags
);
1059 hfi1_early_err(&pdev
->dev
,
1060 "Could not allocate unit ID: error %d\n", -ret
);
1064 * Initialize all locks for the device. This needs to be as early as
1065 * possible so locks are usable.
1067 spin_lock_init(&dd
->sc_lock
);
1068 spin_lock_init(&dd
->sendctrl_lock
);
1069 spin_lock_init(&dd
->rcvctrl_lock
);
1070 spin_lock_init(&dd
->uctxt_lock
);
1071 spin_lock_init(&dd
->hfi1_diag_trans_lock
);
1072 spin_lock_init(&dd
->sc_init_lock
);
1073 spin_lock_init(&dd
->dc8051_lock
);
1074 spin_lock_init(&dd
->dc8051_memlock
);
1075 seqlock_init(&dd
->sc2vl_lock
);
1076 spin_lock_init(&dd
->sde_map_lock
);
1077 spin_lock_init(&dd
->pio_map_lock
);
1078 init_waitqueue_head(&dd
->event_queue
);
1080 dd
->int_counter
= alloc_percpu(u64
);
1081 if (!dd
->int_counter
) {
1083 hfi1_early_err(&pdev
->dev
,
1084 "Could not allocate per-cpu int_counter\n");
1088 dd
->rcv_limit
= alloc_percpu(u64
);
1089 if (!dd
->rcv_limit
) {
1091 hfi1_early_err(&pdev
->dev
,
1092 "Could not allocate per-cpu rcv_limit\n");
1096 dd
->send_schedule
= alloc_percpu(u64
);
1097 if (!dd
->send_schedule
) {
1099 hfi1_early_err(&pdev
->dev
,
1100 "Could not allocate per-cpu int_counter\n");
1104 if (!hfi1_cpulist_count
) {
1105 u32 count
= num_online_cpus();
1107 hfi1_cpulist
= kcalloc(BITS_TO_LONGS(count
), sizeof(long),
1110 hfi1_cpulist_count
= count
;
1114 "Could not alloc cpulist info, cpu affinity might be wrong\n");
1116 kobject_init(&dd
->kobj
, &hfi1_devdata_type
);
1120 if (!list_empty(&dd
->list
))
1121 list_del_init(&dd
->list
);
1122 rvt_dealloc_device(&dd
->verbs_dev
.rdi
);
1123 return ERR_PTR(ret
);
1127 * Called from freeze mode handlers, and from PCI error
1128 * reporting code. Should be paranoid about state of
1129 * system and data structures.
1131 void hfi1_disable_after_error(struct hfi1_devdata
*dd
)
1133 if (dd
->flags
& HFI1_INITTED
) {
1136 dd
->flags
&= ~HFI1_INITTED
;
1138 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
1139 struct hfi1_pportdata
*ppd
;
1141 ppd
= dd
->pport
+ pidx
;
1142 if (dd
->flags
& HFI1_PRESENT
)
1143 set_link_state(ppd
, HLS_DN_DISABLE
);
1146 *ppd
->statusp
&= ~HFI1_STATUS_IB_READY
;
1151 * Mark as having had an error for driver, and also
1152 * for /sys and status word mapped to user programs.
1153 * This marks unit as not usable, until reset.
1156 dd
->status
->dev
|= HFI1_STATUS_HWERROR
;
1159 static void remove_one(struct pci_dev
*);
1160 static int init_one(struct pci_dev
*, const struct pci_device_id
*);
1162 #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
1163 #define PFX DRIVER_NAME ": "
1165 static const struct pci_device_id hfi1_pci_tbl
[] = {
1166 { PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL0
) },
1167 { PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL1
) },
1171 MODULE_DEVICE_TABLE(pci
, hfi1_pci_tbl
);
1173 static struct pci_driver hfi1_pci_driver
= {
1174 .name
= DRIVER_NAME
,
1176 .remove
= remove_one
,
1177 .id_table
= hfi1_pci_tbl
,
1178 .err_handler
= &hfi1_pci_err_handler
,
1181 static void __init
compute_krcvqs(void)
1185 for (i
= 0; i
< krcvqsset
; i
++)
1186 n_krcvqs
+= krcvqs
[i
];
1190 * Do all the generic driver unit- and chip-independent memory
1191 * allocation and initialization.
1193 static int __init
hfi1_mod_init(void)
1201 /* validate max MTU before any devices start */
1202 if (!valid_opa_max_mtu(hfi1_max_mtu
)) {
1203 pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n",
1204 hfi1_max_mtu
, HFI1_DEFAULT_MAX_MTU
);
1205 hfi1_max_mtu
= HFI1_DEFAULT_MAX_MTU
;
1207 /* valid CUs run from 1-128 in powers of 2 */
1208 if (hfi1_cu
> 128 || !is_power_of_2(hfi1_cu
))
1210 /* valid credit return threshold is 0-100, variable is unsigned */
1211 if (user_credit_return_threshold
> 100)
1212 user_credit_return_threshold
= 100;
1216 * sanitize receive interrupt count, time must wait until after
1217 * the hardware type is known
1219 if (rcv_intr_count
> RCV_HDR_HEAD_COUNTER_MASK
)
1220 rcv_intr_count
= RCV_HDR_HEAD_COUNTER_MASK
;
1221 /* reject invalid combinations */
1222 if (rcv_intr_count
== 0 && rcv_intr_timeout
== 0) {
1223 pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n");
1226 if (rcv_intr_count
> 1 && rcv_intr_timeout
== 0) {
1228 * Avoid indefinite packet delivery by requiring a timeout
1231 pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n");
1232 rcv_intr_timeout
= 1;
1234 if (rcv_intr_dynamic
&& !(rcv_intr_count
> 1 && rcv_intr_timeout
> 0)) {
1236 * The dynamic algorithm expects a non-zero timeout
1239 pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n");
1240 rcv_intr_dynamic
= 0;
1243 /* sanitize link CRC options */
1244 link_crc_mask
&= SUPPORTED_CRCS
;
1247 * These must be called before the driver is registered with
1248 * the PCI subsystem.
1250 idr_init(&hfi1_unit_table
);
1253 ret
= hfi1_wss_init();
1256 ret
= pci_register_driver(&hfi1_pci_driver
);
1258 pr_err("Unable to register driver: error %d\n", -ret
);
1261 goto bail
; /* all OK */
1267 idr_destroy(&hfi1_unit_table
);
1273 module_init(hfi1_mod_init
);
1276 * Do the non-unit driver cleanup, memory free, etc. at unload.
1278 static void __exit
hfi1_mod_cleanup(void)
1280 pci_unregister_driver(&hfi1_pci_driver
);
1283 hfi1_cpulist_count
= 0;
1284 kfree(hfi1_cpulist
);
1286 idr_destroy(&hfi1_unit_table
);
1287 dispose_firmware(); /* asymmetric with obtain_firmware() */
1291 module_exit(hfi1_mod_cleanup
);
1293 /* this can only be called after a successful initialization */
1294 static void cleanup_device_data(struct hfi1_devdata
*dd
)
1298 struct hfi1_ctxtdata
**tmp
;
1299 unsigned long flags
;
1301 /* users can't do anything more with chip */
1302 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
1303 struct hfi1_pportdata
*ppd
= &dd
->pport
[pidx
];
1304 struct cc_state
*cc_state
;
1308 *ppd
->statusp
&= ~HFI1_STATUS_CHIP_PRESENT
;
1310 for (i
= 0; i
< OPA_MAX_SLS
; i
++)
1311 hrtimer_cancel(&ppd
->cca_timer
[i
].hrtimer
);
1313 spin_lock(&ppd
->cc_state_lock
);
1314 cc_state
= get_cc_state(ppd
);
1315 RCU_INIT_POINTER(ppd
->cc_state
, NULL
);
1316 spin_unlock(&ppd
->cc_state_lock
);
1319 call_rcu(&cc_state
->rcu
, cc_state_reclaim
);
1322 free_credit_return(dd
);
1325 * Free any resources still in use (usually just kernel contexts)
1326 * at unload; we do for ctxtcnt, because that's what we allocate.
1327 * We acquire lock to be really paranoid that rcd isn't being
1328 * accessed from some interrupt-related code (that should not happen,
1329 * but best to be sure).
1331 spin_lock_irqsave(&dd
->uctxt_lock
, flags
);
1334 spin_unlock_irqrestore(&dd
->uctxt_lock
, flags
);
1336 if (dd
->rcvhdrtail_dummy_kvaddr
) {
1337 dma_free_coherent(&dd
->pcidev
->dev
, sizeof(u64
),
1338 (void *)dd
->rcvhdrtail_dummy_kvaddr
,
1339 dd
->rcvhdrtail_dummy_physaddr
);
1340 dd
->rcvhdrtail_dummy_kvaddr
= NULL
;
1343 for (ctxt
= 0; tmp
&& ctxt
< dd
->num_rcv_contexts
; ctxt
++) {
1344 struct hfi1_ctxtdata
*rcd
= tmp
[ctxt
];
1346 tmp
[ctxt
] = NULL
; /* debugging paranoia */
1348 hfi1_clear_tids(rcd
);
1349 hfi1_free_ctxtdata(dd
, rcd
);
1354 /* must follow rcv context free - need to remove rcv's hooks */
1355 for (ctxt
= 0; ctxt
< dd
->num_send_contexts
; ctxt
++)
1356 sc_free(dd
->send_contexts
[ctxt
].sc
);
1357 dd
->num_send_contexts
= 0;
1358 kfree(dd
->send_contexts
);
1359 dd
->send_contexts
= NULL
;
1360 kfree(dd
->hw_to_sw
);
1361 dd
->hw_to_sw
= NULL
;
1362 kfree(dd
->boardname
);
1368 * Clean up on unit shutdown, or error during unit load after
1369 * successful initialization.
1371 static void postinit_cleanup(struct hfi1_devdata
*dd
)
1373 hfi1_start_cleanup(dd
);
1375 hfi1_pcie_ddcleanup(dd
);
1376 hfi1_pcie_cleanup(dd
->pcidev
);
1378 cleanup_device_data(dd
);
1380 hfi1_free_devdata(dd
);
1383 static int init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1385 int ret
= 0, j
, pidx
, initfail
;
1386 struct hfi1_devdata
*dd
= NULL
;
1387 struct hfi1_pportdata
*ppd
;
1389 /* First, lock the non-writable module parameters */
1392 /* Validate some global module parameters */
1393 if (rcvhdrcnt
<= HFI1_MIN_HDRQ_EGRBUF_CNT
) {
1394 hfi1_early_err(&pdev
->dev
, "Header queue count too small\n");
1398 if (rcvhdrcnt
> HFI1_MAX_HDRQ_EGRBUF_CNT
) {
1399 hfi1_early_err(&pdev
->dev
,
1400 "Receive header queue count cannot be greater than %u\n",
1401 HFI1_MAX_HDRQ_EGRBUF_CNT
);
1405 /* use the encoding function as a sanitization check */
1406 if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize
)) {
1407 hfi1_early_err(&pdev
->dev
, "Invalid HdrQ Entry size %u\n",
1413 /* The receive eager buffer size must be set before the receive
1414 * contexts are created.
1416 * Set the eager buffer size. Validate that it falls in a range
1417 * allowed by the hardware - all powers of 2 between the min and
1418 * max. The maximum valid MTU is within the eager buffer range
1419 * so we do not need to cap the max_mtu by an eager buffer size
1422 if (eager_buffer_size
) {
1423 if (!is_power_of_2(eager_buffer_size
))
1425 roundup_pow_of_two(eager_buffer_size
);
1427 clamp_val(eager_buffer_size
,
1428 MIN_EAGER_BUFFER
* 8,
1429 MAX_EAGER_BUFFER_TOTAL
);
1430 hfi1_early_info(&pdev
->dev
, "Eager buffer size %u\n",
1433 hfi1_early_err(&pdev
->dev
, "Invalid Eager buffer size of 0\n");
1438 /* restrict value of hfi1_rcvarr_split */
1439 hfi1_rcvarr_split
= clamp_val(hfi1_rcvarr_split
, 0, 100);
1441 ret
= hfi1_pcie_init(pdev
, ent
);
1446 * Do device-specific initialization, function table setup, dd
1449 switch (ent
->device
) {
1450 case PCI_DEVICE_ID_INTEL0
:
1451 case PCI_DEVICE_ID_INTEL1
:
1452 dd
= hfi1_init_dd(pdev
, ent
);
1455 hfi1_early_err(&pdev
->dev
,
1456 "Failing on unknown Intel deviceid 0x%x\n",
1464 goto clean_bail
; /* error already printed */
1466 ret
= create_workqueues(dd
);
1470 /* do the generic initialization */
1471 initfail
= hfi1_init(dd
, 0);
1473 ret
= hfi1_register_ib_device(dd
);
1476 * Now ready for use. this should be cleared whenever we
1477 * detect a reset, or initiate one. If earlier failure,
1478 * we still create devices, so diags, etc. can be used
1479 * to determine cause of problem.
1481 if (!initfail
&& !ret
) {
1482 dd
->flags
|= HFI1_INITTED
;
1483 /* create debufs files after init and ib register */
1484 hfi1_dbg_ibdev_init(&dd
->verbs_dev
);
1487 j
= hfi1_device_create(dd
);
1489 dd_dev_err(dd
, "Failed to create /dev devices: %d\n", -j
);
1491 if (initfail
|| ret
) {
1493 flush_workqueue(ib_wq
);
1494 for (pidx
= 0; pidx
< dd
->num_pports
; ++pidx
) {
1495 hfi1_quiet_serdes(dd
->pport
+ pidx
);
1496 ppd
= dd
->pport
+ pidx
;
1498 destroy_workqueue(ppd
->hfi1_wq
);
1499 ppd
->hfi1_wq
= NULL
;
1503 hfi1_device_remove(dd
);
1505 hfi1_unregister_ib_device(dd
);
1506 postinit_cleanup(dd
);
1509 goto bail
; /* everything already cleaned */
1517 hfi1_pcie_cleanup(pdev
);
1522 static void remove_one(struct pci_dev
*pdev
)
1524 struct hfi1_devdata
*dd
= pci_get_drvdata(pdev
);
1526 /* close debugfs files before ib unregister */
1527 hfi1_dbg_ibdev_exit(&dd
->verbs_dev
);
1528 /* unregister from IB core */
1529 hfi1_unregister_ib_device(dd
);
1532 * Disable the IB link, disable interrupts on the device,
1533 * clear dma engines, etc.
1535 shutdown_device(dd
);
1539 /* wait until all of our (qsfp) queue_work() calls complete */
1540 flush_workqueue(ib_wq
);
1542 hfi1_device_remove(dd
);
1544 postinit_cleanup(dd
);
1548 * hfi1_create_rcvhdrq - create a receive header queue
1549 * @dd: the hfi1_ib device
1550 * @rcd: the context data
1552 * This must be contiguous memory (from an i/o perspective), and must be
1553 * DMA'able (which means for some systems, it will go through an IOMMU,
1554 * or be forced into a low address range).
1556 int hfi1_create_rcvhdrq(struct hfi1_devdata
*dd
, struct hfi1_ctxtdata
*rcd
)
1561 if (!rcd
->rcvhdrq
) {
1562 dma_addr_t phys_hdrqtail
;
1566 * rcvhdrqentsize is in DWs, so we have to convert to bytes
1569 amt
= PAGE_ALIGN(rcd
->rcvhdrq_cnt
* rcd
->rcvhdrqentsize
*
1572 gfp_flags
= (rcd
->ctxt
>= dd
->first_user_ctxt
) ?
1573 GFP_USER
: GFP_KERNEL
;
1574 rcd
->rcvhdrq
= dma_zalloc_coherent(
1575 &dd
->pcidev
->dev
, amt
, &rcd
->rcvhdrq_phys
,
1576 gfp_flags
| __GFP_COMP
);
1578 if (!rcd
->rcvhdrq
) {
1580 "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
1585 if (HFI1_CAP_KGET_MASK(rcd
->flags
, DMA_RTAIL
)) {
1586 rcd
->rcvhdrtail_kvaddr
= dma_zalloc_coherent(
1587 &dd
->pcidev
->dev
, PAGE_SIZE
, &phys_hdrqtail
,
1589 if (!rcd
->rcvhdrtail_kvaddr
)
1591 rcd
->rcvhdrqtailaddr_phys
= phys_hdrqtail
;
1594 rcd
->rcvhdrq_size
= amt
;
1597 * These values are per-context:
1602 reg
= ((u64
)(rcd
->rcvhdrq_cnt
>> HDRQ_SIZE_SHIFT
)
1603 & RCV_HDR_CNT_CNT_MASK
)
1604 << RCV_HDR_CNT_CNT_SHIFT
;
1605 write_kctxt_csr(dd
, rcd
->ctxt
, RCV_HDR_CNT
, reg
);
1606 reg
= (encode_rcv_header_entry_size(rcd
->rcvhdrqentsize
)
1607 & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK
)
1608 << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT
;
1609 write_kctxt_csr(dd
, rcd
->ctxt
, RCV_HDR_ENT_SIZE
, reg
);
1610 reg
= (dd
->rcvhdrsize
& RCV_HDR_SIZE_HDR_SIZE_MASK
)
1611 << RCV_HDR_SIZE_HDR_SIZE_SHIFT
;
1612 write_kctxt_csr(dd
, rcd
->ctxt
, RCV_HDR_SIZE
, reg
);
1615 * Program dummy tail address for every receive context
1616 * before enabling any receive context
1618 write_kctxt_csr(dd
, rcd
->ctxt
, RCV_HDR_TAIL_ADDR
,
1619 dd
->rcvhdrtail_dummy_physaddr
);
1625 "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
1627 vfree(rcd
->user_event_mask
);
1628 rcd
->user_event_mask
= NULL
;
1629 dma_free_coherent(&dd
->pcidev
->dev
, amt
, rcd
->rcvhdrq
,
1631 rcd
->rcvhdrq
= NULL
;
1637 * allocate eager buffers, both kernel and user contexts.
1638 * @rcd: the context we are setting up.
1640 * Allocate the eager TID buffers and program them into hip.
1641 * They are no longer completely contiguous, we do multiple allocation
1642 * calls. Otherwise we get the OOM code involved, by asking for too
1643 * much per call, with disastrous results on some kernels.
1645 int hfi1_setup_eagerbufs(struct hfi1_ctxtdata
*rcd
)
1647 struct hfi1_devdata
*dd
= rcd
->dd
;
1648 u32 max_entries
, egrtop
, alloced_bytes
= 0, idx
= 0;
1652 u16 round_mtu
= roundup_pow_of_two(hfi1_max_mtu
);
1655 * GFP_USER, but without GFP_FS, so buffer cache can be
1656 * coalesced (we hope); otherwise, even at order 4,
1657 * heavy filesystem activity makes these fail, and we can
1658 * use compound pages.
1660 gfp_flags
= __GFP_RECLAIM
| __GFP_IO
| __GFP_COMP
;
1663 * The minimum size of the eager buffers is a groups of MTU-sized
1665 * The global eager_buffer_size parameter is checked against the
1666 * theoretical lower limit of the value. Here, we check against the
1669 if (rcd
->egrbufs
.size
< (round_mtu
* dd
->rcv_entries
.group_size
))
1670 rcd
->egrbufs
.size
= round_mtu
* dd
->rcv_entries
.group_size
;
1672 * If using one-pkt-per-egr-buffer, lower the eager buffer
1673 * size to the max MTU (page-aligned).
1675 if (!HFI1_CAP_KGET_MASK(rcd
->flags
, MULTI_PKT_EGR
))
1676 rcd
->egrbufs
.rcvtid_size
= round_mtu
;
1679 * Eager buffers sizes of 1MB or less require smaller TID sizes
1680 * to satisfy the "multiple of 8 RcvArray entries" requirement.
1682 if (rcd
->egrbufs
.size
<= (1 << 20))
1683 rcd
->egrbufs
.rcvtid_size
= max((unsigned long)round_mtu
,
1684 rounddown_pow_of_two(rcd
->egrbufs
.size
/ 8));
1686 while (alloced_bytes
< rcd
->egrbufs
.size
&&
1687 rcd
->egrbufs
.alloced
< rcd
->egrbufs
.count
) {
1688 rcd
->egrbufs
.buffers
[idx
].addr
=
1689 dma_zalloc_coherent(&dd
->pcidev
->dev
,
1690 rcd
->egrbufs
.rcvtid_size
,
1691 &rcd
->egrbufs
.buffers
[idx
].phys
,
1693 if (rcd
->egrbufs
.buffers
[idx
].addr
) {
1694 rcd
->egrbufs
.buffers
[idx
].len
=
1695 rcd
->egrbufs
.rcvtid_size
;
1696 rcd
->egrbufs
.rcvtids
[rcd
->egrbufs
.alloced
].addr
=
1697 rcd
->egrbufs
.buffers
[idx
].addr
;
1698 rcd
->egrbufs
.rcvtids
[rcd
->egrbufs
.alloced
].phys
=
1699 rcd
->egrbufs
.buffers
[idx
].phys
;
1700 rcd
->egrbufs
.alloced
++;
1701 alloced_bytes
+= rcd
->egrbufs
.rcvtid_size
;
1708 * Fail the eager buffer allocation if:
1709 * - we are already using the lowest acceptable size
1710 * - we are using one-pkt-per-egr-buffer (this implies
1711 * that we are accepting only one size)
1713 if (rcd
->egrbufs
.rcvtid_size
== round_mtu
||
1714 !HFI1_CAP_KGET_MASK(rcd
->flags
, MULTI_PKT_EGR
)) {
1715 dd_dev_err(dd
, "ctxt%u: Failed to allocate eager buffers\n",
1717 goto bail_rcvegrbuf_phys
;
1720 new_size
= rcd
->egrbufs
.rcvtid_size
/ 2;
1723 * If the first attempt to allocate memory failed, don't
1724 * fail everything but continue with the next lower
1728 rcd
->egrbufs
.rcvtid_size
= new_size
;
1733 * Re-partition already allocated buffers to a smaller
1736 rcd
->egrbufs
.alloced
= 0;
1737 for (i
= 0, j
= 0, offset
= 0; j
< idx
; i
++) {
1738 if (i
>= rcd
->egrbufs
.count
)
1740 rcd
->egrbufs
.rcvtids
[i
].phys
=
1741 rcd
->egrbufs
.buffers
[j
].phys
+ offset
;
1742 rcd
->egrbufs
.rcvtids
[i
].addr
=
1743 rcd
->egrbufs
.buffers
[j
].addr
+ offset
;
1744 rcd
->egrbufs
.alloced
++;
1745 if ((rcd
->egrbufs
.buffers
[j
].phys
+ offset
+
1747 (rcd
->egrbufs
.buffers
[j
].phys
+
1748 rcd
->egrbufs
.buffers
[j
].len
)) {
1755 rcd
->egrbufs
.rcvtid_size
= new_size
;
1758 rcd
->egrbufs
.numbufs
= idx
;
1759 rcd
->egrbufs
.size
= alloced_bytes
;
1762 "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n",
1763 rcd
->ctxt
, rcd
->egrbufs
.alloced
, rcd
->egrbufs
.rcvtid_size
,
1767 * Set the contexts rcv array head update threshold to the closest
1768 * power of 2 (so we can use a mask instead of modulo) below half
1769 * the allocated entries.
1771 rcd
->egrbufs
.threshold
=
1772 rounddown_pow_of_two(rcd
->egrbufs
.alloced
/ 2);
1774 * Compute the expected RcvArray entry base. This is done after
1775 * allocating the eager buffers in order to maximize the
1776 * expected RcvArray entries for the context.
1778 max_entries
= rcd
->rcv_array_groups
* dd
->rcv_entries
.group_size
;
1779 egrtop
= roundup(rcd
->egrbufs
.alloced
, dd
->rcv_entries
.group_size
);
1780 rcd
->expected_count
= max_entries
- egrtop
;
1781 if (rcd
->expected_count
> MAX_TID_PAIR_ENTRIES
* 2)
1782 rcd
->expected_count
= MAX_TID_PAIR_ENTRIES
* 2;
1784 rcd
->expected_base
= rcd
->eager_base
+ egrtop
;
1785 hfi1_cdbg(PROC
, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n",
1786 rcd
->ctxt
, rcd
->egrbufs
.alloced
, rcd
->expected_count
,
1787 rcd
->eager_base
, rcd
->expected_base
);
1789 if (!hfi1_rcvbuf_validate(rcd
->egrbufs
.rcvtid_size
, PT_EAGER
, &order
)) {
1791 "ctxt%u: current Eager buffer size is invalid %u\n",
1792 rcd
->ctxt
, rcd
->egrbufs
.rcvtid_size
);
1797 for (idx
= 0; idx
< rcd
->egrbufs
.alloced
; idx
++) {
1798 hfi1_put_tid(dd
, rcd
->eager_base
+ idx
, PT_EAGER
,
1799 rcd
->egrbufs
.rcvtids
[idx
].phys
, order
);
1804 bail_rcvegrbuf_phys
:
1805 for (idx
= 0; idx
< rcd
->egrbufs
.alloced
&&
1806 rcd
->egrbufs
.buffers
[idx
].addr
;
1808 dma_free_coherent(&dd
->pcidev
->dev
,
1809 rcd
->egrbufs
.buffers
[idx
].len
,
1810 rcd
->egrbufs
.buffers
[idx
].addr
,
1811 rcd
->egrbufs
.buffers
[idx
].phys
);
1812 rcd
->egrbufs
.buffers
[idx
].addr
= NULL
;
1813 rcd
->egrbufs
.buffers
[idx
].phys
= 0;
1814 rcd
->egrbufs
.buffers
[idx
].len
= 0;