2 * Copyright(c) 2015 - 2017 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 * This file contains all of the code that is specific to the HFI chip
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
69 #define NUM_IB_PORTS 1
72 module_param_named(kdeth_qp
, kdeth_qp
, uint
, S_IRUGO
);
73 MODULE_PARM_DESC(kdeth_qp
, "Set the KDETH queue pair prefix");
75 uint num_vls
= HFI1_MAX_VLS_SUPPORTED
;
76 module_param(num_vls
, uint
, S_IRUGO
);
77 MODULE_PARM_DESC(num_vls
, "Set number of Virtual Lanes to use (1-8)");
80 * Default time to aggregate two 10K packets from the idle state
81 * (timer not running). The timer starts at the end of the first packet,
82 * so only the time for one 10K packet and header plus a bit extra is needed.
83 * 10 * 1024 + 64 header byte = 10304 byte
84 * 10304 byte / 12.5 GB/s = 824.32ns
86 uint rcv_intr_timeout
= (824 + 16); /* 16 is for coalescing interrupt */
87 module_param(rcv_intr_timeout
, uint
, S_IRUGO
);
88 MODULE_PARM_DESC(rcv_intr_timeout
, "Receive interrupt mitigation timeout in ns");
90 uint rcv_intr_count
= 16; /* same as qib */
91 module_param(rcv_intr_count
, uint
, S_IRUGO
);
92 MODULE_PARM_DESC(rcv_intr_count
, "Receive interrupt mitigation count");
94 ushort link_crc_mask
= SUPPORTED_CRCS
;
95 module_param(link_crc_mask
, ushort
, S_IRUGO
);
96 MODULE_PARM_DESC(link_crc_mask
, "CRCs to use on the link");
99 module_param_named(loopback
, loopback
, uint
, S_IRUGO
);
100 MODULE_PARM_DESC(loopback
, "Put into loopback mode (1 = serdes, 3 = external cable");
102 /* Other driver tunables */
103 uint rcv_intr_dynamic
= 1; /* enable dynamic mode for rcv int mitigation*/
104 static ushort crc_14b_sideband
= 1;
105 static uint use_flr
= 1;
106 uint quick_linkup
; /* skip LNI */
109 u64 flag
; /* the flag */
110 char *str
; /* description string */
111 u16 extra
; /* extra information */
116 /* str must be a string constant */
117 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
118 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
120 /* Send Error Consequences */
121 #define SEC_WRITE_DROPPED 0x1
122 #define SEC_PACKET_DROPPED 0x2
123 #define SEC_SC_HALTED 0x4 /* per-context only */
124 #define SEC_SPC_FREEZE 0x8 /* per-HFI only */
126 #define DEFAULT_KRCVQS 2
127 #define MIN_KERNEL_KCTXTS 2
128 #define FIRST_KERNEL_KCTXT 1
131 * RSM instance allocation
133 * 1 - User Fecn Handling
136 #define RSM_INS_VERBS 0
137 #define RSM_INS_FECN 1
138 #define RSM_INS_VNIC 2
140 /* Bit offset into the GUID which carries HFI id information */
141 #define GUID_HFI_INDEX_SHIFT 39
143 /* extract the emulation revision */
144 #define emulator_rev(dd) ((dd)->irev >> 8)
145 /* parallel and serial emulation versions are 3 and 4 respectively */
146 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
147 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
149 /* RSM fields for Verbs */
151 #define IB_PACKET_TYPE 2ull
152 #define QW_SHIFT 6ull
154 #define QPN_WIDTH 7ull
156 /* LRH.BTH: QW 0, OFFSET 48 - for match */
157 #define LRH_BTH_QW 0ull
158 #define LRH_BTH_BIT_OFFSET 48ull
159 #define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
160 #define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
161 #define LRH_BTH_SELECT
162 #define LRH_BTH_MASK 3ull
163 #define LRH_BTH_VALUE 2ull
165 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
166 #define LRH_SC_QW 0ull
167 #define LRH_SC_BIT_OFFSET 56ull
168 #define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
169 #define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
170 #define LRH_SC_MASK 128ull
171 #define LRH_SC_VALUE 0ull
173 /* SC[n..0] QW 0, OFFSET 60 - for select */
174 #define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
176 /* QPN[m+n:1] QW 1, OFFSET 1 */
177 #define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
179 /* RSM fields for Vnic */
180 /* L2_TYPE: QW 0, OFFSET 61 - for match */
181 #define L2_TYPE_QW 0ull
182 #define L2_TYPE_BIT_OFFSET 61ull
183 #define L2_TYPE_OFFSET(off) ((L2_TYPE_QW << QW_SHIFT) | (off))
184 #define L2_TYPE_MATCH_OFFSET L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
185 #define L2_TYPE_MASK 3ull
186 #define L2_16B_VALUE 2ull
188 /* L4_TYPE QW 1, OFFSET 0 - for match */
189 #define L4_TYPE_QW 1ull
190 #define L4_TYPE_BIT_OFFSET 0ull
191 #define L4_TYPE_OFFSET(off) ((L4_TYPE_QW << QW_SHIFT) | (off))
192 #define L4_TYPE_MATCH_OFFSET L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
193 #define L4_16B_TYPE_MASK 0xFFull
194 #define L4_16B_ETH_VALUE 0x78ull
196 /* 16B VESWID - for select */
197 #define L4_16B_HDR_VESWID_OFFSET ((2 << QW_SHIFT) | (16ull))
198 /* 16B ENTROPY - for select */
199 #define L2_16B_ENTROPY_OFFSET ((1 << QW_SHIFT) | (32ull))
201 /* defines to build power on SC2VL table */
213 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
214 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
215 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
216 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
217 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
218 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
219 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
220 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
223 #define DC_SC_VL_VAL( \
242 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
243 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
244 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
245 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
246 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
247 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
248 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
249 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
250 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
251 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
252 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
253 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
254 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
255 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
256 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
257 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
260 /* all CceStatus sub-block freeze bits */
261 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
262 | CCE_STATUS_RXE_FROZE_SMASK \
263 | CCE_STATUS_TXE_FROZE_SMASK \
264 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
265 /* all CceStatus sub-block TXE pause bits */
266 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
267 | CCE_STATUS_TXE_PAUSED_SMASK \
268 | CCE_STATUS_SDMA_PAUSED_SMASK)
269 /* all CceStatus sub-block RXE pause bits */
270 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
272 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
273 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
278 static struct flag_table cce_err_status_flags
[] = {
279 /* 0*/ FLAG_ENTRY0("CceCsrParityErr",
280 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK
),
281 /* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
282 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK
),
283 /* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
284 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK
),
285 /* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
286 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK
),
287 /* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
288 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK
),
289 /* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
290 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK
),
291 /* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
292 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK
),
293 /* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
294 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK
),
295 /* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
296 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK
),
297 /* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
298 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK
),
299 /*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
300 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK
),
301 /*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
302 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK
),
303 /*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
304 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK
),
305 /*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
306 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK
),
307 /*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
308 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK
),
309 /*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
310 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK
),
311 /*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
312 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK
),
313 /*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
314 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK
),
315 /*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
316 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK
),
317 /*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
318 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK
),
319 /*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
320 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK
),
321 /*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
322 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK
),
323 /*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
324 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK
),
325 /*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
326 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK
),
327 /*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
328 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK
),
329 /*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
330 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK
),
331 /*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
332 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK
),
333 /*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
334 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK
),
335 /*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
336 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK
),
337 /*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
338 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK
),
339 /*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
340 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK
),
341 /*31*/ FLAG_ENTRY0("LATriggered",
342 CCE_ERR_STATUS_LA_TRIGGERED_SMASK
),
343 /*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
344 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK
),
345 /*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
346 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK
),
347 /*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
348 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK
),
349 /*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
350 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK
),
351 /*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
352 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK
),
353 /*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
354 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK
),
355 /*38*/ FLAG_ENTRY0("CceIntMapCorErr",
356 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK
),
357 /*39*/ FLAG_ENTRY0("CceIntMapUncErr",
358 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK
),
359 /*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
360 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK
),
367 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
368 static struct flag_table misc_err_status_flags
[] = {
369 /* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY
)),
370 /* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR
)),
371 /* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR
)),
372 /* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED
)),
373 /* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH
)),
374 /* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED
)),
375 /* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY
)),
376 /* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR
)),
377 /* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE
)),
378 /* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY
)),
379 /*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD
)),
380 /*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL
)),
381 /*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL
))
385 * TXE PIO Error flags and consequences
387 static struct flag_table pio_err_status_flags
[] = {
388 /* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
390 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK
),
391 /* 1*/ FLAG_ENTRY("PioWriteAddrParity",
393 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK
),
394 /* 2*/ FLAG_ENTRY("PioCsrParity",
396 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK
),
397 /* 3*/ FLAG_ENTRY("PioSbMemFifo0",
399 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK
),
400 /* 4*/ FLAG_ENTRY("PioSbMemFifo1",
402 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK
),
403 /* 5*/ FLAG_ENTRY("PioPccFifoParity",
405 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK
),
406 /* 6*/ FLAG_ENTRY("PioPecFifoParity",
408 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK
),
409 /* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
411 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK
),
412 /* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
414 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK
),
415 /* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
417 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK
),
418 /*10*/ FLAG_ENTRY("PioSmPktResetParity",
420 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK
),
421 /*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
423 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK
),
424 /*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
426 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK
),
427 /*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
429 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK
),
430 /*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
432 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK
),
433 /*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
435 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK
),
436 /*16*/ FLAG_ENTRY("PioPpmcPblFifo",
438 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK
),
439 /*17*/ FLAG_ENTRY("PioInitSmIn",
441 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK
),
442 /*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
444 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK
),
445 /*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
447 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK
),
448 /*20*/ FLAG_ENTRY("PioHostAddrMemCor",
450 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK
),
451 /*21*/ FLAG_ENTRY("PioWriteDataParity",
453 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK
),
454 /*22*/ FLAG_ENTRY("PioStateMachine",
456 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK
),
457 /*23*/ FLAG_ENTRY("PioWriteQwValidParity",
458 SEC_WRITE_DROPPED
| SEC_SPC_FREEZE
,
459 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK
),
460 /*24*/ FLAG_ENTRY("PioBlockQwCountParity",
461 SEC_WRITE_DROPPED
| SEC_SPC_FREEZE
,
462 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK
),
463 /*25*/ FLAG_ENTRY("PioVlfVlLenParity",
465 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK
),
466 /*26*/ FLAG_ENTRY("PioVlfSopParity",
468 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK
),
469 /*27*/ FLAG_ENTRY("PioVlFifoParity",
471 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK
),
472 /*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
474 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK
),
475 /*29*/ FLAG_ENTRY("PioPpmcSopLen",
477 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK
),
479 /*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
481 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK
),
482 /*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
484 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK
),
485 /*34*/ FLAG_ENTRY("PioPccSopHeadParity",
487 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK
),
488 /*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
490 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK
),
494 /* TXE PIO errors that cause an SPC freeze */
495 #define ALL_PIO_FREEZE_ERR \
496 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
497 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
498 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
499 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
500 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
501 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
502 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
503 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
504 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
505 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
506 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
507 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
508 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
509 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
510 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
511 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
512 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
513 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
514 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
515 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
516 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
517 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
518 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
519 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
520 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
521 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
522 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
523 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
524 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
527 * TXE SDMA Error flags
529 static struct flag_table sdma_err_status_flags
[] = {
530 /* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
531 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK
),
532 /* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
533 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK
),
534 /* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
535 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK
),
536 /* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
537 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK
),
541 /* TXE SDMA errors that cause an SPC freeze */
542 #define ALL_SDMA_FREEZE_ERR \
543 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
544 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
545 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
547 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
548 #define PORT_DISCARD_EGRESS_ERRS \
549 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
550 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
551 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
554 * TXE Egress Error flags
556 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
557 static struct flag_table egress_err_status_flags
[] = {
558 /* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR
)),
559 /* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC
)),
561 /* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
562 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY
)),
563 /* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN
)),
564 /* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE
)),
566 /* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
567 SEES(TX_PIO_LAUNCH_INTF_PARITY
)),
568 /* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
569 SEES(TX_SDMA_LAUNCH_INTF_PARITY
)),
571 /*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
572 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY
)),
573 /*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL
)),
574 /*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY
)),
575 /*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY
)),
576 /*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY
)),
577 /*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
578 SEES(TX_SDMA0_DISALLOWED_PACKET
)),
579 /*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
580 SEES(TX_SDMA1_DISALLOWED_PACKET
)),
581 /*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
582 SEES(TX_SDMA2_DISALLOWED_PACKET
)),
583 /*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
584 SEES(TX_SDMA3_DISALLOWED_PACKET
)),
585 /*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
586 SEES(TX_SDMA4_DISALLOWED_PACKET
)),
587 /*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
588 SEES(TX_SDMA5_DISALLOWED_PACKET
)),
589 /*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
590 SEES(TX_SDMA6_DISALLOWED_PACKET
)),
591 /*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
592 SEES(TX_SDMA7_DISALLOWED_PACKET
)),
593 /*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
594 SEES(TX_SDMA8_DISALLOWED_PACKET
)),
595 /*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
596 SEES(TX_SDMA9_DISALLOWED_PACKET
)),
597 /*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
598 SEES(TX_SDMA10_DISALLOWED_PACKET
)),
599 /*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
600 SEES(TX_SDMA11_DISALLOWED_PACKET
)),
601 /*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
602 SEES(TX_SDMA12_DISALLOWED_PACKET
)),
603 /*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
604 SEES(TX_SDMA13_DISALLOWED_PACKET
)),
605 /*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
606 SEES(TX_SDMA14_DISALLOWED_PACKET
)),
607 /*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
608 SEES(TX_SDMA15_DISALLOWED_PACKET
)),
609 /*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
610 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY
)),
611 /*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
612 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY
)),
613 /*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
614 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY
)),
615 /*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
616 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY
)),
617 /*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
618 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY
)),
619 /*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
620 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY
)),
621 /*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
622 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY
)),
623 /*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
624 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY
)),
625 /*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
626 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY
)),
627 /*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY
)),
628 /*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC
)),
629 /*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC
)),
630 /*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC
)),
631 /*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC
)),
632 /*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION
)),
633 /*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL
)),
634 /*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR
)),
635 /*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR
)),
636 /*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR
)),
637 /*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR
)),
638 /*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR
)),
639 /*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR
)),
640 /*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR
)),
641 /*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR
)),
642 /*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR
)),
643 /*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN
)),
644 /*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR
)),
645 /*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR
)),
646 /*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR
)),
647 /*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR
)),
648 /*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
649 SEES(TX_READ_SDMA_MEMORY_CSR_UNC
)),
650 /*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
651 SEES(TX_READ_PIO_MEMORY_CSR_UNC
)),
655 * TXE Egress Error Info flags
657 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
658 static struct flag_table egress_err_info_flags
[] = {
659 /* 0*/ FLAG_ENTRY0("Reserved", 0ull),
660 /* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL
)),
661 /* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY
)),
662 /* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY
)),
663 /* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY
)),
664 /* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID
)),
665 /* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE
)),
666 /* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING
)),
667 /* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW
)),
668 /* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6
)),
669 /*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH
)),
670 /*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS
)),
671 /*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS
)),
672 /*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS
)),
673 /*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS
)),
674 /*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS
)),
675 /*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST
)),
676 /*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN
)),
677 /*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET
)),
678 /*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS
)),
679 /*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL
)),
680 /*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN
)),
683 /* TXE Egress errors that cause an SPC freeze */
684 #define ALL_TXE_EGRESS_FREEZE_ERR \
685 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
686 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
687 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
688 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
689 | SEES(TX_LAUNCH_CSR_PARITY) \
690 | SEES(TX_SBRD_CTL_CSR_PARITY) \
691 | SEES(TX_CONFIG_PARITY) \
692 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
693 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
694 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
695 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
696 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
697 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
698 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
699 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
700 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
701 | SEES(TX_CREDIT_RETURN_PARITY))
704 * TXE Send error flags
706 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
707 static struct flag_table send_err_status_flags
[] = {
708 /* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY
)),
709 /* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR
)),
710 /* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR
))
714 * TXE Send Context Error flags and consequences
716 static struct flag_table sc_err_status_flags
[] = {
717 /* 0*/ FLAG_ENTRY("InconsistentSop",
718 SEC_PACKET_DROPPED
| SEC_SC_HALTED
,
719 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK
),
720 /* 1*/ FLAG_ENTRY("DisallowedPacket",
721 SEC_PACKET_DROPPED
| SEC_SC_HALTED
,
722 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK
),
723 /* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
724 SEC_WRITE_DROPPED
| SEC_SC_HALTED
,
725 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK
),
726 /* 3*/ FLAG_ENTRY("WriteOverflow",
727 SEC_WRITE_DROPPED
| SEC_SC_HALTED
,
728 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK
),
729 /* 4*/ FLAG_ENTRY("WriteOutOfBounds",
730 SEC_WRITE_DROPPED
| SEC_SC_HALTED
,
731 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK
),
736 * RXE Receive Error flags
738 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
739 static struct flag_table rxe_err_status_flags
[] = {
740 /* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR
)),
741 /* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY
)),
742 /* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC
)),
743 /* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR
)),
744 /* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC
)),
745 /* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR
)),
746 /* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC
)),
747 /* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR
)),
748 /* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY
)),
749 /* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY
)),
750 /*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC
)),
751 /*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR
)),
752 /*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING
)),
753 /*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC
)),
754 /*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR
)),
755 /*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC
)),
756 /*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
757 RXES(RBUF_LOOKUP_DES_REG_UNC_COR
)),
758 /*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC
)),
759 /*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR
)),
760 /*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
761 RXES(RBUF_BLOCK_LIST_READ_UNC
)),
762 /*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
763 RXES(RBUF_BLOCK_LIST_READ_COR
)),
764 /*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
765 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY
)),
766 /*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
767 RXES(RBUF_CSR_QENT_CNT_PARITY
)),
768 /*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
769 RXES(RBUF_CSR_QNEXT_BUF_PARITY
)),
770 /*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
771 RXES(RBUF_CSR_QVLD_BIT_PARITY
)),
772 /*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY
)),
773 /*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY
)),
774 /*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
775 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY
)),
776 /*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY
)),
777 /*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY
)),
778 /*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP
)),
779 /*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL
)),
780 /*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY
)),
781 /*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY
)),
782 /*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY
)),
783 /*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
784 RXES(RBUF_FL_INITDONE_PARITY
)),
785 /*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
786 RXES(RBUF_FL_INIT_WR_ADDR_PARITY
)),
787 /*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC
)),
788 /*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR
)),
789 /*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC
)),
790 /*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
791 RXES(LOOKUP_DES_PART1_UNC_COR
)),
792 /*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
793 RXES(LOOKUP_DES_PART2_PARITY
)),
794 /*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC
)),
795 /*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR
)),
796 /*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY
)),
797 /*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY
)),
798 /*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM
)),
799 /*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC
)),
800 /*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR
)),
801 /*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC
)),
802 /*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR
)),
803 /*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC
)),
804 /*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR
)),
805 /*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC
)),
806 /*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR
)),
807 /*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC
)),
808 /*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR
)),
809 /*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY
)),
810 /*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING
)),
811 /*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING
)),
812 /*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC
)),
813 /*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR
)),
814 /*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR
)),
815 /*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY
))
818 /* RXE errors that will trigger an SPC freeze */
819 #define ALL_RXE_FREEZE_ERR \
820 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
831 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
832 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
833 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
834 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
835 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
836 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
837 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
838 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
839 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
840 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
841 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
842 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
843 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
844 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
845 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
846 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
847 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
848 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
849 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
850 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
851 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
852 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
853 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
854 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
855 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
856 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
857 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
858 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
859 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
860 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
861 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
862 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
863 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
865 #define RXE_FREEZE_ABORT_MASK \
866 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
867 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
868 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
873 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
874 static struct flag_table dcc_err_flags
[] = {
875 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR
)),
876 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR
)),
877 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR
)),
878 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR
)),
879 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR
)),
880 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR
)),
881 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR
)),
882 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR
)),
883 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR
)),
884 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR
)),
885 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR
)),
886 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE
)),
887 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR
)),
888 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR
)),
889 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR
)),
890 FLAG_ENTRY0("link_err", DCCE(LINK_ERR
)),
891 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR
)),
892 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR
)),
893 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR
)),
894 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR
)),
895 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR
)),
896 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR
)),
897 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR
)),
898 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR
)),
899 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR
)),
900 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR
)),
901 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR
)),
902 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR
)),
903 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR
)),
904 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR
)),
905 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR
)),
906 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR
)),
907 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR
)),
908 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR
)),
909 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST
)),
910 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC
)),
911 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR
)),
912 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR
)),
913 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR
)),
914 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR
)),
915 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR
)),
916 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR
)),
917 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR
)),
918 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR
)),
919 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR
)),
920 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR
)),
926 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
927 static struct flag_table lcb_err_flags
[] = {
928 /* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR
)),
929 /* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR
)),
930 /* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW
)),
931 /* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
932 LCBE(ALL_LNS_FAILED_REINIT_TEST
)),
933 /* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS
)),
934 /* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS
)),
935 /* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS
)),
936 /* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR
)),
937 /* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER
)),
938 /* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE
)),
939 /*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT
)),
940 /*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED
)),
941 /*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER
)),
942 /*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
943 LCBE(UNEXPECTED_ROUND_TRIP_MARKER
)),
944 /*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP
)),
945 /*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING
)),
946 /*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW
)),
947 /*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW
)),
948 /*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR
)),
949 /*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
950 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE
)),
951 /*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE
)),
952 /*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE
)),
953 /*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE
)),
954 /*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE
)),
955 /*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE
)),
956 /*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT
)),
957 /*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
958 LCBE(RST_FOR_INCOMPLT_RND_TRIP
)),
959 /*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT
)),
960 /*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
961 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE
)),
962 /*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
963 LCBE(REDUNDANT_FLIT_PARITY_ERR
))
969 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
970 static struct flag_table dc8051_err_flags
[] = {
971 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051
)),
972 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT
)),
973 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE
)),
974 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE
)),
975 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE
)),
976 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE
)),
977 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE
)),
978 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE
)),
979 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
980 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES
)),
981 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR
)),
985 * DC8051 Information Error flags
987 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
989 static struct flag_table dc8051_info_err_flags
[] = {
990 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED
),
991 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME
),
992 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET
),
993 FLAG_ENTRY0("Serdes internal loopback failure",
994 FAILED_SERDES_INTERNAL_LOOPBACK
),
995 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT
),
996 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING
),
997 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE
),
998 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM
),
999 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ
),
1000 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1
),
1001 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2
),
1002 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT
),
1003 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT
),
1004 FLAG_ENTRY0("External Device Request Timeout",
1005 EXTERNAL_DEVICE_REQ_TIMEOUT
),
1009 * DC8051 Information Host Information flags
1011 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
1013 static struct flag_table dc8051_info_host_msg_flags
[] = {
1014 FLAG_ENTRY0("Host request done", 0x0001),
1015 FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
1016 FLAG_ENTRY0("BC SMA message", 0x0004),
1017 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
1018 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
1019 FLAG_ENTRY0("External device config request", 0x0020),
1020 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
1021 FLAG_ENTRY0("LinkUp achieved", 0x0080),
1022 FLAG_ENTRY0("Link going down", 0x0100),
1023 FLAG_ENTRY0("Link width downgraded", 0x0200),
1026 static u32
encoded_size(u32 size
);
1027 static u32
chip_to_opa_lstate(struct hfi1_devdata
*dd
, u32 chip_lstate
);
1028 static int set_physical_link_state(struct hfi1_devdata
*dd
, u64 state
);
1029 static void read_vc_remote_phy(struct hfi1_devdata
*dd
, u8
*power_management
,
1031 static void read_vc_remote_fabric(struct hfi1_devdata
*dd
, u8
*vau
, u8
*z
,
1032 u8
*vcu
, u16
*vl15buf
, u8
*crc_sizes
);
1033 static void read_vc_remote_link_width(struct hfi1_devdata
*dd
,
1034 u8
*remote_tx_rate
, u16
*link_widths
);
1035 static void read_vc_local_link_width(struct hfi1_devdata
*dd
, u8
*misc_bits
,
1036 u8
*flag_bits
, u16
*link_widths
);
1037 static void read_remote_device_id(struct hfi1_devdata
*dd
, u16
*device_id
,
1039 static void read_mgmt_allowed(struct hfi1_devdata
*dd
, u8
*mgmt_allowed
);
1040 static void read_local_lni(struct hfi1_devdata
*dd
, u8
*enable_lane_rx
);
1041 static int read_tx_settings(struct hfi1_devdata
*dd
, u8
*enable_lane_tx
,
1042 u8
*tx_polarity_inversion
,
1043 u8
*rx_polarity_inversion
, u8
*max_rate
);
1044 static void handle_sdma_eng_err(struct hfi1_devdata
*dd
,
1045 unsigned int context
, u64 err_status
);
1046 static void handle_qsfp_int(struct hfi1_devdata
*dd
, u32 source
, u64 reg
);
1047 static void handle_dcc_err(struct hfi1_devdata
*dd
,
1048 unsigned int context
, u64 err_status
);
1049 static void handle_lcb_err(struct hfi1_devdata
*dd
,
1050 unsigned int context
, u64 err_status
);
1051 static void handle_8051_interrupt(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
);
1052 static void handle_cce_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
);
1053 static void handle_rxe_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
);
1054 static void handle_misc_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
);
1055 static void handle_pio_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
);
1056 static void handle_sdma_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
);
1057 static void handle_egress_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
);
1058 static void handle_txe_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
);
1059 static void set_partition_keys(struct hfi1_pportdata
*ppd
);
1060 static const char *link_state_name(u32 state
);
1061 static const char *link_state_reason_name(struct hfi1_pportdata
*ppd
,
1063 static int do_8051_command(struct hfi1_devdata
*dd
, u32 type
, u64 in_data
,
1065 static int read_idle_sma(struct hfi1_devdata
*dd
, u64
*data
);
1066 static int thermal_init(struct hfi1_devdata
*dd
);
1068 static void update_statusp(struct hfi1_pportdata
*ppd
, u32 state
);
1069 static int wait_phys_link_offline_substates(struct hfi1_pportdata
*ppd
,
1071 static int wait_logical_linkstate(struct hfi1_pportdata
*ppd
, u32 state
,
1073 static void log_state_transition(struct hfi1_pportdata
*ppd
, u32 state
);
1074 static void log_physical_state(struct hfi1_pportdata
*ppd
, u32 state
);
1075 static int wait_physical_linkstate(struct hfi1_pportdata
*ppd
, u32 state
,
1077 static void read_planned_down_reason_code(struct hfi1_devdata
*dd
, u8
*pdrrc
);
1078 static void read_link_down_reason(struct hfi1_devdata
*dd
, u8
*ldr
);
1079 static void handle_temp_err(struct hfi1_devdata
*dd
);
1080 static void dc_shutdown(struct hfi1_devdata
*dd
);
1081 static void dc_start(struct hfi1_devdata
*dd
);
1082 static int qos_rmt_entries(struct hfi1_devdata
*dd
, unsigned int *mp
,
1084 static void clear_full_mgmt_pkey(struct hfi1_pportdata
*ppd
);
1085 static int wait_link_transfer_active(struct hfi1_devdata
*dd
, int wait_ms
);
1086 static void clear_rsm_rule(struct hfi1_devdata
*dd
, u8 rule_index
);
1089 * Error interrupt table entry. This is used as input to the interrupt
1090 * "clear down" routine used for all second tier error interrupt register.
1091 * Second tier interrupt registers have a single bit representing them
1092 * in the top-level CceIntStatus.
1094 struct err_reg_info
{
1095 u32 status
; /* status CSR offset */
1096 u32 clear
; /* clear CSR offset */
1097 u32 mask
; /* mask CSR offset */
1098 void (*handler
)(struct hfi1_devdata
*dd
, u32 source
, u64 reg
);
1102 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1103 #define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1104 #define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1107 * Helpers for building HFI and DC error interrupt table entries. Different
1108 * helpers are needed because of inconsistent register names.
1110 #define EE(reg, handler, desc) \
1111 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1113 #define DC_EE1(reg, handler, desc) \
1114 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1115 #define DC_EE2(reg, handler, desc) \
1116 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1119 * Table of the "misc" grouping of error interrupts. Each entry refers to
1120 * another register containing more information.
1122 static const struct err_reg_info misc_errs
[NUM_MISC_ERRS
] = {
1123 /* 0*/ EE(CCE_ERR
, handle_cce_err
, "CceErr"),
1124 /* 1*/ EE(RCV_ERR
, handle_rxe_err
, "RxeErr"),
1125 /* 2*/ EE(MISC_ERR
, handle_misc_err
, "MiscErr"),
1126 /* 3*/ { 0, 0, 0, NULL
}, /* reserved */
1127 /* 4*/ EE(SEND_PIO_ERR
, handle_pio_err
, "PioErr"),
1128 /* 5*/ EE(SEND_DMA_ERR
, handle_sdma_err
, "SDmaErr"),
1129 /* 6*/ EE(SEND_EGRESS_ERR
, handle_egress_err
, "EgressErr"),
1130 /* 7*/ EE(SEND_ERR
, handle_txe_err
, "TxeErr")
1131 /* the rest are reserved */
1135 * Index into the Various section of the interrupt sources
1136 * corresponding to the Critical Temperature interrupt.
1138 #define TCRIT_INT_SOURCE 4
1141 * SDMA error interrupt entry - refers to another register containing more
1144 static const struct err_reg_info sdma_eng_err
=
1145 EE(SEND_DMA_ENG_ERR
, handle_sdma_eng_err
, "SDmaEngErr");
1147 static const struct err_reg_info various_err
[NUM_VARIOUS
] = {
1148 /* 0*/ { 0, 0, 0, NULL
}, /* PbcInt */
1149 /* 1*/ { 0, 0, 0, NULL
}, /* GpioAssertInt */
1150 /* 2*/ EE(ASIC_QSFP1
, handle_qsfp_int
, "QSFP1"),
1151 /* 3*/ EE(ASIC_QSFP2
, handle_qsfp_int
, "QSFP2"),
1152 /* 4*/ { 0, 0, 0, NULL
}, /* TCritInt */
1153 /* rest are reserved */
1157 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1158 * register can not be derived from the MTU value because 10K is not
1159 * a power of 2. Therefore, we need a constant. Everything else can
1162 #define DCC_CFG_PORT_MTU_CAP_10240 7
1165 * Table of the DC grouping of error interrupts. Each entry refers to
1166 * another register containing more information.
1168 static const struct err_reg_info dc_errs
[NUM_DC_ERRS
] = {
1169 /* 0*/ DC_EE1(DCC_ERR
, handle_dcc_err
, "DCC Err"),
1170 /* 1*/ DC_EE2(DC_LCB_ERR
, handle_lcb_err
, "LCB Err"),
1171 /* 2*/ DC_EE2(DC_DC8051_ERR
, handle_8051_interrupt
, "DC8051 Interrupt"),
1172 /* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1173 /* the rest are reserved */
1183 * csr to read for name (if applicable)
1188 * offset into dd or ppd to store the counter's value
1198 * accessor for stat element, context either dd or ppd
1200 u64 (*rw_cntr
)(const struct cntr_entry
*, void *context
, int vl
,
1201 int mode
, u64 data
);
1204 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1205 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1207 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1217 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1219 (counter * 8 + RCV_COUNTER_ARRAY32), \
1220 0, flags | CNTR_32BIT, \
1221 port_access_u32_csr)
1223 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1225 (counter * 8 + RCV_COUNTER_ARRAY32), \
1226 0, flags | CNTR_32BIT, \
1230 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1232 (counter * 8 + RCV_COUNTER_ARRAY64), \
1234 port_access_u64_csr)
1236 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1238 (counter * 8 + RCV_COUNTER_ARRAY64), \
1242 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1243 #define OVR_ELM(ctx) \
1244 CNTR_ELEM("RcvHdrOvr" #ctx, \
1245 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1246 0, CNTR_NORMAL, port_access_u64_csr)
1249 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1251 (counter * 8 + SEND_COUNTER_ARRAY32), \
1252 0, flags | CNTR_32BIT, \
1253 port_access_u32_csr)
1256 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1258 (counter * 8 + SEND_COUNTER_ARRAY64), \
1260 port_access_u64_csr)
1262 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1264 counter * 8 + SEND_COUNTER_ARRAY64, \
1270 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1272 (counter * 8 + CCE_COUNTER_ARRAY32), \
1273 0, flags | CNTR_32BIT, \
1276 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1278 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1279 0, flags | CNTR_32BIT, \
1283 #define DC_PERF_CNTR(name, counter, flags) \
1290 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1298 #define SW_IBP_CNTR(name, cntr) \
1306 * hfi_addr_from_offset - return addr for readq/writeq
1307 * @dd - the dd device
1308 * @offset - the offset of the CSR within bar0
1310 * This routine selects the appropriate base address
1311 * based on the indicated offset.
1313 static inline void __iomem
*hfi1_addr_from_offset(
1314 const struct hfi1_devdata
*dd
,
1317 if (offset
>= dd
->base2_start
)
1318 return dd
->kregbase2
+ (offset
- dd
->base2_start
);
1319 return dd
->kregbase1
+ offset
;
1323 * read_csr - read CSR at the indicated offset
1324 * @dd - the dd device
1325 * @offset - the offset of the CSR within bar0
1327 * Return: the value read or all FF's if there
1330 u64
read_csr(const struct hfi1_devdata
*dd
, u32 offset
)
1332 if (dd
->flags
& HFI1_PRESENT
)
1333 return readq(hfi1_addr_from_offset(dd
, offset
));
1338 * write_csr - write CSR at the indicated offset
1339 * @dd - the dd device
1340 * @offset - the offset of the CSR within bar0
1341 * @value - value to write
1343 void write_csr(const struct hfi1_devdata
*dd
, u32 offset
, u64 value
)
1345 if (dd
->flags
& HFI1_PRESENT
) {
1346 void __iomem
*base
= hfi1_addr_from_offset(dd
, offset
);
1348 /* avoid write to RcvArray */
1349 if (WARN_ON(offset
>= RCV_ARRAY
&& offset
< dd
->base2_start
))
1351 writeq(value
, base
);
1356 * get_csr_addr - return te iomem address for offset
1357 * @dd - the dd device
1358 * @offset - the offset of the CSR within bar0
1360 * Return: The iomem address to use in subsequent
1361 * writeq/readq operations.
1363 void __iomem
*get_csr_addr(
1364 const struct hfi1_devdata
*dd
,
1367 if (dd
->flags
& HFI1_PRESENT
)
1368 return hfi1_addr_from_offset(dd
, offset
);
1372 static inline u64
read_write_csr(const struct hfi1_devdata
*dd
, u32 csr
,
1373 int mode
, u64 value
)
1377 if (mode
== CNTR_MODE_R
) {
1378 ret
= read_csr(dd
, csr
);
1379 } else if (mode
== CNTR_MODE_W
) {
1380 write_csr(dd
, csr
, value
);
1383 dd_dev_err(dd
, "Invalid cntr register access mode");
1387 hfi1_cdbg(CNTR
, "csr 0x%x val 0x%llx mode %d", csr
, ret
, mode
);
1392 static u64
dev_access_u32_csr(const struct cntr_entry
*entry
,
1393 void *context
, int vl
, int mode
, u64 data
)
1395 struct hfi1_devdata
*dd
= context
;
1396 u64 csr
= entry
->csr
;
1398 if (entry
->flags
& CNTR_SDMA
) {
1399 if (vl
== CNTR_INVALID_VL
)
1403 if (vl
!= CNTR_INVALID_VL
)
1406 return read_write_csr(dd
, csr
, mode
, data
);
1409 static u64
access_sde_err_cnt(const struct cntr_entry
*entry
,
1410 void *context
, int idx
, int mode
, u64 data
)
1412 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1414 if (dd
->per_sdma
&& idx
< dd
->num_sdma
)
1415 return dd
->per_sdma
[idx
].err_cnt
;
1419 static u64
access_sde_int_cnt(const struct cntr_entry
*entry
,
1420 void *context
, int idx
, int mode
, u64 data
)
1422 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1424 if (dd
->per_sdma
&& idx
< dd
->num_sdma
)
1425 return dd
->per_sdma
[idx
].sdma_int_cnt
;
1429 static u64
access_sde_idle_int_cnt(const struct cntr_entry
*entry
,
1430 void *context
, int idx
, int mode
, u64 data
)
1432 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1434 if (dd
->per_sdma
&& idx
< dd
->num_sdma
)
1435 return dd
->per_sdma
[idx
].idle_int_cnt
;
1439 static u64
access_sde_progress_int_cnt(const struct cntr_entry
*entry
,
1440 void *context
, int idx
, int mode
,
1443 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1445 if (dd
->per_sdma
&& idx
< dd
->num_sdma
)
1446 return dd
->per_sdma
[idx
].progress_int_cnt
;
1450 static u64
dev_access_u64_csr(const struct cntr_entry
*entry
, void *context
,
1451 int vl
, int mode
, u64 data
)
1453 struct hfi1_devdata
*dd
= context
;
1456 u64 csr
= entry
->csr
;
1458 if (entry
->flags
& CNTR_VL
) {
1459 if (vl
== CNTR_INVALID_VL
)
1463 if (vl
!= CNTR_INVALID_VL
)
1467 val
= read_write_csr(dd
, csr
, mode
, data
);
1471 static u64
dc_access_lcb_cntr(const struct cntr_entry
*entry
, void *context
,
1472 int vl
, int mode
, u64 data
)
1474 struct hfi1_devdata
*dd
= context
;
1475 u32 csr
= entry
->csr
;
1478 if (vl
!= CNTR_INVALID_VL
)
1480 if (mode
== CNTR_MODE_R
)
1481 ret
= read_lcb_csr(dd
, csr
, &data
);
1482 else if (mode
== CNTR_MODE_W
)
1483 ret
= write_lcb_csr(dd
, csr
, data
);
1486 dd_dev_err(dd
, "Could not acquire LCB for counter 0x%x", csr
);
1490 hfi1_cdbg(CNTR
, "csr 0x%x val 0x%llx mode %d", csr
, data
, mode
);
1495 static u64
port_access_u32_csr(const struct cntr_entry
*entry
, void *context
,
1496 int vl
, int mode
, u64 data
)
1498 struct hfi1_pportdata
*ppd
= context
;
1500 if (vl
!= CNTR_INVALID_VL
)
1502 return read_write_csr(ppd
->dd
, entry
->csr
, mode
, data
);
1505 static u64
port_access_u64_csr(const struct cntr_entry
*entry
,
1506 void *context
, int vl
, int mode
, u64 data
)
1508 struct hfi1_pportdata
*ppd
= context
;
1510 u64 csr
= entry
->csr
;
1512 if (entry
->flags
& CNTR_VL
) {
1513 if (vl
== CNTR_INVALID_VL
)
1517 if (vl
!= CNTR_INVALID_VL
)
1520 val
= read_write_csr(ppd
->dd
, csr
, mode
, data
);
1524 /* Software defined */
1525 static inline u64
read_write_sw(struct hfi1_devdata
*dd
, u64
*cntr
, int mode
,
1530 if (mode
== CNTR_MODE_R
) {
1532 } else if (mode
== CNTR_MODE_W
) {
1536 dd_dev_err(dd
, "Invalid cntr sw access mode");
1540 hfi1_cdbg(CNTR
, "val 0x%llx mode %d", ret
, mode
);
1545 static u64
access_sw_link_dn_cnt(const struct cntr_entry
*entry
, void *context
,
1546 int vl
, int mode
, u64 data
)
1548 struct hfi1_pportdata
*ppd
= context
;
1550 if (vl
!= CNTR_INVALID_VL
)
1552 return read_write_sw(ppd
->dd
, &ppd
->link_downed
, mode
, data
);
1555 static u64
access_sw_link_up_cnt(const struct cntr_entry
*entry
, void *context
,
1556 int vl
, int mode
, u64 data
)
1558 struct hfi1_pportdata
*ppd
= context
;
1560 if (vl
!= CNTR_INVALID_VL
)
1562 return read_write_sw(ppd
->dd
, &ppd
->link_up
, mode
, data
);
1565 static u64
access_sw_unknown_frame_cnt(const struct cntr_entry
*entry
,
1566 void *context
, int vl
, int mode
,
1569 struct hfi1_pportdata
*ppd
= (struct hfi1_pportdata
*)context
;
1571 if (vl
!= CNTR_INVALID_VL
)
1573 return read_write_sw(ppd
->dd
, &ppd
->unknown_frame_count
, mode
, data
);
1576 static u64
access_sw_xmit_discards(const struct cntr_entry
*entry
,
1577 void *context
, int vl
, int mode
, u64 data
)
1579 struct hfi1_pportdata
*ppd
= (struct hfi1_pportdata
*)context
;
1583 if (vl
== CNTR_INVALID_VL
)
1584 counter
= &ppd
->port_xmit_discards
;
1585 else if (vl
>= 0 && vl
< C_VL_COUNT
)
1586 counter
= &ppd
->port_xmit_discards_vl
[vl
];
1590 return read_write_sw(ppd
->dd
, counter
, mode
, data
);
1593 static u64
access_xmit_constraint_errs(const struct cntr_entry
*entry
,
1594 void *context
, int vl
, int mode
,
1597 struct hfi1_pportdata
*ppd
= context
;
1599 if (vl
!= CNTR_INVALID_VL
)
1602 return read_write_sw(ppd
->dd
, &ppd
->port_xmit_constraint_errors
,
1606 static u64
access_rcv_constraint_errs(const struct cntr_entry
*entry
,
1607 void *context
, int vl
, int mode
, u64 data
)
1609 struct hfi1_pportdata
*ppd
= context
;
1611 if (vl
!= CNTR_INVALID_VL
)
1614 return read_write_sw(ppd
->dd
, &ppd
->port_rcv_constraint_errors
,
1618 u64
get_all_cpu_total(u64 __percpu
*cntr
)
1623 for_each_possible_cpu(cpu
)
1624 counter
+= *per_cpu_ptr(cntr
, cpu
);
1628 static u64
read_write_cpu(struct hfi1_devdata
*dd
, u64
*z_val
,
1630 int vl
, int mode
, u64 data
)
1634 if (vl
!= CNTR_INVALID_VL
)
1637 if (mode
== CNTR_MODE_R
) {
1638 ret
= get_all_cpu_total(cntr
) - *z_val
;
1639 } else if (mode
== CNTR_MODE_W
) {
1640 /* A write can only zero the counter */
1642 *z_val
= get_all_cpu_total(cntr
);
1644 dd_dev_err(dd
, "Per CPU cntrs can only be zeroed");
1646 dd_dev_err(dd
, "Invalid cntr sw cpu access mode");
1653 static u64
access_sw_cpu_intr(const struct cntr_entry
*entry
,
1654 void *context
, int vl
, int mode
, u64 data
)
1656 struct hfi1_devdata
*dd
= context
;
1658 return read_write_cpu(dd
, &dd
->z_int_counter
, dd
->int_counter
, vl
,
1662 static u64
access_sw_cpu_rcv_limit(const struct cntr_entry
*entry
,
1663 void *context
, int vl
, int mode
, u64 data
)
1665 struct hfi1_devdata
*dd
= context
;
1667 return read_write_cpu(dd
, &dd
->z_rcv_limit
, dd
->rcv_limit
, vl
,
1671 static u64
access_sw_pio_wait(const struct cntr_entry
*entry
,
1672 void *context
, int vl
, int mode
, u64 data
)
1674 struct hfi1_devdata
*dd
= context
;
1676 return dd
->verbs_dev
.n_piowait
;
1679 static u64
access_sw_pio_drain(const struct cntr_entry
*entry
,
1680 void *context
, int vl
, int mode
, u64 data
)
1682 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1684 return dd
->verbs_dev
.n_piodrain
;
1687 static u64
access_sw_vtx_wait(const struct cntr_entry
*entry
,
1688 void *context
, int vl
, int mode
, u64 data
)
1690 struct hfi1_devdata
*dd
= context
;
1692 return dd
->verbs_dev
.n_txwait
;
1695 static u64
access_sw_kmem_wait(const struct cntr_entry
*entry
,
1696 void *context
, int vl
, int mode
, u64 data
)
1698 struct hfi1_devdata
*dd
= context
;
1700 return dd
->verbs_dev
.n_kmem_wait
;
1703 static u64
access_sw_send_schedule(const struct cntr_entry
*entry
,
1704 void *context
, int vl
, int mode
, u64 data
)
1706 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1708 return read_write_cpu(dd
, &dd
->z_send_schedule
, dd
->send_schedule
, vl
,
1712 /* Software counters for the error status bits within MISC_ERR_STATUS */
1713 static u64
access_misc_pll_lock_fail_err_cnt(const struct cntr_entry
*entry
,
1714 void *context
, int vl
, int mode
,
1717 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1719 return dd
->misc_err_status_cnt
[12];
1722 static u64
access_misc_mbist_fail_err_cnt(const struct cntr_entry
*entry
,
1723 void *context
, int vl
, int mode
,
1726 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1728 return dd
->misc_err_status_cnt
[11];
1731 static u64
access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry
*entry
,
1732 void *context
, int vl
, int mode
,
1735 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1737 return dd
->misc_err_status_cnt
[10];
1740 static u64
access_misc_efuse_done_parity_err_cnt(const struct cntr_entry
*entry
,
1741 void *context
, int vl
,
1744 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1746 return dd
->misc_err_status_cnt
[9];
1749 static u64
access_misc_efuse_write_err_cnt(const struct cntr_entry
*entry
,
1750 void *context
, int vl
, int mode
,
1753 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1755 return dd
->misc_err_status_cnt
[8];
1758 static u64
access_misc_efuse_read_bad_addr_err_cnt(
1759 const struct cntr_entry
*entry
,
1760 void *context
, int vl
, int mode
, u64 data
)
1762 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1764 return dd
->misc_err_status_cnt
[7];
1767 static u64
access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry
*entry
,
1768 void *context
, int vl
,
1771 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1773 return dd
->misc_err_status_cnt
[6];
1776 static u64
access_misc_fw_auth_failed_err_cnt(const struct cntr_entry
*entry
,
1777 void *context
, int vl
, int mode
,
1780 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1782 return dd
->misc_err_status_cnt
[5];
1785 static u64
access_misc_key_mismatch_err_cnt(const struct cntr_entry
*entry
,
1786 void *context
, int vl
, int mode
,
1789 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1791 return dd
->misc_err_status_cnt
[4];
1794 static u64
access_misc_sbus_write_failed_err_cnt(const struct cntr_entry
*entry
,
1795 void *context
, int vl
,
1798 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1800 return dd
->misc_err_status_cnt
[3];
1803 static u64
access_misc_csr_write_bad_addr_err_cnt(
1804 const struct cntr_entry
*entry
,
1805 void *context
, int vl
, int mode
, u64 data
)
1807 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1809 return dd
->misc_err_status_cnt
[2];
1812 static u64
access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry
*entry
,
1813 void *context
, int vl
,
1816 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1818 return dd
->misc_err_status_cnt
[1];
1821 static u64
access_misc_csr_parity_err_cnt(const struct cntr_entry
*entry
,
1822 void *context
, int vl
, int mode
,
1825 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1827 return dd
->misc_err_status_cnt
[0];
1831 * Software counter for the aggregate of
1832 * individual CceErrStatus counters
1834 static u64
access_sw_cce_err_status_aggregated_cnt(
1835 const struct cntr_entry
*entry
,
1836 void *context
, int vl
, int mode
, u64 data
)
1838 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1840 return dd
->sw_cce_err_status_aggregate
;
1844 * Software counters corresponding to each of the
1845 * error status bits within CceErrStatus
1847 static u64
access_cce_msix_csr_parity_err_cnt(const struct cntr_entry
*entry
,
1848 void *context
, int vl
, int mode
,
1851 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1853 return dd
->cce_err_status_cnt
[40];
1856 static u64
access_cce_int_map_unc_err_cnt(const struct cntr_entry
*entry
,
1857 void *context
, int vl
, int mode
,
1860 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1862 return dd
->cce_err_status_cnt
[39];
1865 static u64
access_cce_int_map_cor_err_cnt(const struct cntr_entry
*entry
,
1866 void *context
, int vl
, int mode
,
1869 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1871 return dd
->cce_err_status_cnt
[38];
1874 static u64
access_cce_msix_table_unc_err_cnt(const struct cntr_entry
*entry
,
1875 void *context
, int vl
, int mode
,
1878 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1880 return dd
->cce_err_status_cnt
[37];
1883 static u64
access_cce_msix_table_cor_err_cnt(const struct cntr_entry
*entry
,
1884 void *context
, int vl
, int mode
,
1887 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1889 return dd
->cce_err_status_cnt
[36];
1892 static u64
access_cce_rxdma_conv_fifo_parity_err_cnt(
1893 const struct cntr_entry
*entry
,
1894 void *context
, int vl
, int mode
, u64 data
)
1896 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1898 return dd
->cce_err_status_cnt
[35];
1901 static u64
access_cce_rcpl_async_fifo_parity_err_cnt(
1902 const struct cntr_entry
*entry
,
1903 void *context
, int vl
, int mode
, u64 data
)
1905 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1907 return dd
->cce_err_status_cnt
[34];
1910 static u64
access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry
*entry
,
1911 void *context
, int vl
,
1914 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1916 return dd
->cce_err_status_cnt
[33];
1919 static u64
access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry
*entry
,
1920 void *context
, int vl
, int mode
,
1923 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1925 return dd
->cce_err_status_cnt
[32];
1928 static u64
access_la_triggered_cnt(const struct cntr_entry
*entry
,
1929 void *context
, int vl
, int mode
, u64 data
)
1931 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1933 return dd
->cce_err_status_cnt
[31];
1936 static u64
access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry
*entry
,
1937 void *context
, int vl
, int mode
,
1940 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1942 return dd
->cce_err_status_cnt
[30];
1945 static u64
access_pcic_receive_parity_err_cnt(const struct cntr_entry
*entry
,
1946 void *context
, int vl
, int mode
,
1949 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1951 return dd
->cce_err_status_cnt
[29];
1954 static u64
access_pcic_transmit_back_parity_err_cnt(
1955 const struct cntr_entry
*entry
,
1956 void *context
, int vl
, int mode
, u64 data
)
1958 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1960 return dd
->cce_err_status_cnt
[28];
1963 static u64
access_pcic_transmit_front_parity_err_cnt(
1964 const struct cntr_entry
*entry
,
1965 void *context
, int vl
, int mode
, u64 data
)
1967 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1969 return dd
->cce_err_status_cnt
[27];
1972 static u64
access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry
*entry
,
1973 void *context
, int vl
, int mode
,
1976 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1978 return dd
->cce_err_status_cnt
[26];
1981 static u64
access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry
*entry
,
1982 void *context
, int vl
, int mode
,
1985 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1987 return dd
->cce_err_status_cnt
[25];
1990 static u64
access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry
*entry
,
1991 void *context
, int vl
, int mode
,
1994 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1996 return dd
->cce_err_status_cnt
[24];
1999 static u64
access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry
*entry
,
2000 void *context
, int vl
, int mode
,
2003 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2005 return dd
->cce_err_status_cnt
[23];
2008 static u64
access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry
*entry
,
2009 void *context
, int vl
,
2012 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2014 return dd
->cce_err_status_cnt
[22];
2017 static u64
access_pcic_retry_mem_unc_err(const struct cntr_entry
*entry
,
2018 void *context
, int vl
, int mode
,
2021 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2023 return dd
->cce_err_status_cnt
[21];
2026 static u64
access_pcic_n_post_dat_q_parity_err_cnt(
2027 const struct cntr_entry
*entry
,
2028 void *context
, int vl
, int mode
, u64 data
)
2030 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2032 return dd
->cce_err_status_cnt
[20];
2035 static u64
access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry
*entry
,
2036 void *context
, int vl
,
2039 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2041 return dd
->cce_err_status_cnt
[19];
2044 static u64
access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry
*entry
,
2045 void *context
, int vl
, int mode
,
2048 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2050 return dd
->cce_err_status_cnt
[18];
2053 static u64
access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry
*entry
,
2054 void *context
, int vl
, int mode
,
2057 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2059 return dd
->cce_err_status_cnt
[17];
2062 static u64
access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry
*entry
,
2063 void *context
, int vl
, int mode
,
2066 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2068 return dd
->cce_err_status_cnt
[16];
2071 static u64
access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry
*entry
,
2072 void *context
, int vl
, int mode
,
2075 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2077 return dd
->cce_err_status_cnt
[15];
2080 static u64
access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry
*entry
,
2081 void *context
, int vl
,
2084 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2086 return dd
->cce_err_status_cnt
[14];
2089 static u64
access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry
*entry
,
2090 void *context
, int vl
, int mode
,
2093 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2095 return dd
->cce_err_status_cnt
[13];
2098 static u64
access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2099 const struct cntr_entry
*entry
,
2100 void *context
, int vl
, int mode
, u64 data
)
2102 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2104 return dd
->cce_err_status_cnt
[12];
2107 static u64
access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2108 const struct cntr_entry
*entry
,
2109 void *context
, int vl
, int mode
, u64 data
)
2111 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2113 return dd
->cce_err_status_cnt
[11];
2116 static u64
access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2117 const struct cntr_entry
*entry
,
2118 void *context
, int vl
, int mode
, u64 data
)
2120 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2122 return dd
->cce_err_status_cnt
[10];
2125 static u64
access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2126 const struct cntr_entry
*entry
,
2127 void *context
, int vl
, int mode
, u64 data
)
2129 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2131 return dd
->cce_err_status_cnt
[9];
2134 static u64
access_cce_cli2_async_fifo_parity_err_cnt(
2135 const struct cntr_entry
*entry
,
2136 void *context
, int vl
, int mode
, u64 data
)
2138 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2140 return dd
->cce_err_status_cnt
[8];
2143 static u64
access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry
*entry
,
2144 void *context
, int vl
,
2147 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2149 return dd
->cce_err_status_cnt
[7];
2152 static u64
access_cce_cli0_async_fifo_parity_err_cnt(
2153 const struct cntr_entry
*entry
,
2154 void *context
, int vl
, int mode
, u64 data
)
2156 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2158 return dd
->cce_err_status_cnt
[6];
2161 static u64
access_cce_rspd_data_parity_err_cnt(const struct cntr_entry
*entry
,
2162 void *context
, int vl
, int mode
,
2165 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2167 return dd
->cce_err_status_cnt
[5];
2170 static u64
access_cce_trgt_access_err_cnt(const struct cntr_entry
*entry
,
2171 void *context
, int vl
, int mode
,
2174 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2176 return dd
->cce_err_status_cnt
[4];
2179 static u64
access_cce_trgt_async_fifo_parity_err_cnt(
2180 const struct cntr_entry
*entry
,
2181 void *context
, int vl
, int mode
, u64 data
)
2183 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2185 return dd
->cce_err_status_cnt
[3];
2188 static u64
access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry
*entry
,
2189 void *context
, int vl
,
2192 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2194 return dd
->cce_err_status_cnt
[2];
2197 static u64
access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry
*entry
,
2198 void *context
, int vl
,
2201 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2203 return dd
->cce_err_status_cnt
[1];
2206 static u64
access_ccs_csr_parity_err_cnt(const struct cntr_entry
*entry
,
2207 void *context
, int vl
, int mode
,
2210 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2212 return dd
->cce_err_status_cnt
[0];
2216 * Software counters corresponding to each of the
2217 * error status bits within RcvErrStatus
2219 static u64
access_rx_csr_parity_err_cnt(const struct cntr_entry
*entry
,
2220 void *context
, int vl
, int mode
,
2223 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2225 return dd
->rcv_err_status_cnt
[63];
2228 static u64
access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry
*entry
,
2229 void *context
, int vl
,
2232 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2234 return dd
->rcv_err_status_cnt
[62];
2237 static u64
access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry
*entry
,
2238 void *context
, int vl
, int mode
,
2241 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2243 return dd
->rcv_err_status_cnt
[61];
2246 static u64
access_rx_dma_csr_unc_err_cnt(const struct cntr_entry
*entry
,
2247 void *context
, int vl
, int mode
,
2250 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2252 return dd
->rcv_err_status_cnt
[60];
2255 static u64
access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry
*entry
,
2256 void *context
, int vl
,
2259 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2261 return dd
->rcv_err_status_cnt
[59];
2264 static u64
access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry
*entry
,
2265 void *context
, int vl
,
2268 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2270 return dd
->rcv_err_status_cnt
[58];
2273 static u64
access_rx_dma_csr_parity_err_cnt(const struct cntr_entry
*entry
,
2274 void *context
, int vl
, int mode
,
2277 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2279 return dd
->rcv_err_status_cnt
[57];
2282 static u64
access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry
*entry
,
2283 void *context
, int vl
, int mode
,
2286 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2288 return dd
->rcv_err_status_cnt
[56];
2291 static u64
access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry
*entry
,
2292 void *context
, int vl
, int mode
,
2295 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2297 return dd
->rcv_err_status_cnt
[55];
2300 static u64
access_rx_dma_data_fifo_rd_cor_err_cnt(
2301 const struct cntr_entry
*entry
,
2302 void *context
, int vl
, int mode
, u64 data
)
2304 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2306 return dd
->rcv_err_status_cnt
[54];
2309 static u64
access_rx_dma_data_fifo_rd_unc_err_cnt(
2310 const struct cntr_entry
*entry
,
2311 void *context
, int vl
, int mode
, u64 data
)
2313 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2315 return dd
->rcv_err_status_cnt
[53];
2318 static u64
access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry
*entry
,
2319 void *context
, int vl
,
2322 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2324 return dd
->rcv_err_status_cnt
[52];
2327 static u64
access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry
*entry
,
2328 void *context
, int vl
,
2331 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2333 return dd
->rcv_err_status_cnt
[51];
2336 static u64
access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry
*entry
,
2337 void *context
, int vl
,
2340 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2342 return dd
->rcv_err_status_cnt
[50];
2345 static u64
access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry
*entry
,
2346 void *context
, int vl
,
2349 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2351 return dd
->rcv_err_status_cnt
[49];
2354 static u64
access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry
*entry
,
2355 void *context
, int vl
,
2358 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2360 return dd
->rcv_err_status_cnt
[48];
2363 static u64
access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry
*entry
,
2364 void *context
, int vl
,
2367 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2369 return dd
->rcv_err_status_cnt
[47];
2372 static u64
access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry
*entry
,
2373 void *context
, int vl
, int mode
,
2376 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2378 return dd
->rcv_err_status_cnt
[46];
2381 static u64
access_rx_hq_intr_csr_parity_err_cnt(
2382 const struct cntr_entry
*entry
,
2383 void *context
, int vl
, int mode
, u64 data
)
2385 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2387 return dd
->rcv_err_status_cnt
[45];
2390 static u64
access_rx_lookup_csr_parity_err_cnt(
2391 const struct cntr_entry
*entry
,
2392 void *context
, int vl
, int mode
, u64 data
)
2394 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2396 return dd
->rcv_err_status_cnt
[44];
2399 static u64
access_rx_lookup_rcv_array_cor_err_cnt(
2400 const struct cntr_entry
*entry
,
2401 void *context
, int vl
, int mode
, u64 data
)
2403 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2405 return dd
->rcv_err_status_cnt
[43];
2408 static u64
access_rx_lookup_rcv_array_unc_err_cnt(
2409 const struct cntr_entry
*entry
,
2410 void *context
, int vl
, int mode
, u64 data
)
2412 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2414 return dd
->rcv_err_status_cnt
[42];
2417 static u64
access_rx_lookup_des_part2_parity_err_cnt(
2418 const struct cntr_entry
*entry
,
2419 void *context
, int vl
, int mode
, u64 data
)
2421 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2423 return dd
->rcv_err_status_cnt
[41];
2426 static u64
access_rx_lookup_des_part1_unc_cor_err_cnt(
2427 const struct cntr_entry
*entry
,
2428 void *context
, int vl
, int mode
, u64 data
)
2430 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2432 return dd
->rcv_err_status_cnt
[40];
2435 static u64
access_rx_lookup_des_part1_unc_err_cnt(
2436 const struct cntr_entry
*entry
,
2437 void *context
, int vl
, int mode
, u64 data
)
2439 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2441 return dd
->rcv_err_status_cnt
[39];
2444 static u64
access_rx_rbuf_next_free_buf_cor_err_cnt(
2445 const struct cntr_entry
*entry
,
2446 void *context
, int vl
, int mode
, u64 data
)
2448 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2450 return dd
->rcv_err_status_cnt
[38];
2453 static u64
access_rx_rbuf_next_free_buf_unc_err_cnt(
2454 const struct cntr_entry
*entry
,
2455 void *context
, int vl
, int mode
, u64 data
)
2457 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2459 return dd
->rcv_err_status_cnt
[37];
2462 static u64
access_rbuf_fl_init_wr_addr_parity_err_cnt(
2463 const struct cntr_entry
*entry
,
2464 void *context
, int vl
, int mode
, u64 data
)
2466 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2468 return dd
->rcv_err_status_cnt
[36];
2471 static u64
access_rx_rbuf_fl_initdone_parity_err_cnt(
2472 const struct cntr_entry
*entry
,
2473 void *context
, int vl
, int mode
, u64 data
)
2475 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2477 return dd
->rcv_err_status_cnt
[35];
2480 static u64
access_rx_rbuf_fl_write_addr_parity_err_cnt(
2481 const struct cntr_entry
*entry
,
2482 void *context
, int vl
, int mode
, u64 data
)
2484 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2486 return dd
->rcv_err_status_cnt
[34];
2489 static u64
access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2490 const struct cntr_entry
*entry
,
2491 void *context
, int vl
, int mode
, u64 data
)
2493 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2495 return dd
->rcv_err_status_cnt
[33];
2498 static u64
access_rx_rbuf_empty_err_cnt(const struct cntr_entry
*entry
,
2499 void *context
, int vl
, int mode
,
2502 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2504 return dd
->rcv_err_status_cnt
[32];
2507 static u64
access_rx_rbuf_full_err_cnt(const struct cntr_entry
*entry
,
2508 void *context
, int vl
, int mode
,
2511 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2513 return dd
->rcv_err_status_cnt
[31];
2516 static u64
access_rbuf_bad_lookup_err_cnt(const struct cntr_entry
*entry
,
2517 void *context
, int vl
, int mode
,
2520 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2522 return dd
->rcv_err_status_cnt
[30];
2525 static u64
access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry
*entry
,
2526 void *context
, int vl
, int mode
,
2529 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2531 return dd
->rcv_err_status_cnt
[29];
2534 static u64
access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry
*entry
,
2535 void *context
, int vl
,
2538 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2540 return dd
->rcv_err_status_cnt
[28];
2543 static u64
access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2544 const struct cntr_entry
*entry
,
2545 void *context
, int vl
, int mode
, u64 data
)
2547 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2549 return dd
->rcv_err_status_cnt
[27];
2552 static u64
access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2553 const struct cntr_entry
*entry
,
2554 void *context
, int vl
, int mode
, u64 data
)
2556 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2558 return dd
->rcv_err_status_cnt
[26];
2561 static u64
access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2562 const struct cntr_entry
*entry
,
2563 void *context
, int vl
, int mode
, u64 data
)
2565 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2567 return dd
->rcv_err_status_cnt
[25];
2570 static u64
access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2571 const struct cntr_entry
*entry
,
2572 void *context
, int vl
, int mode
, u64 data
)
2574 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2576 return dd
->rcv_err_status_cnt
[24];
2579 static u64
access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2580 const struct cntr_entry
*entry
,
2581 void *context
, int vl
, int mode
, u64 data
)
2583 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2585 return dd
->rcv_err_status_cnt
[23];
2588 static u64
access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2589 const struct cntr_entry
*entry
,
2590 void *context
, int vl
, int mode
, u64 data
)
2592 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2594 return dd
->rcv_err_status_cnt
[22];
2597 static u64
access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2598 const struct cntr_entry
*entry
,
2599 void *context
, int vl
, int mode
, u64 data
)
2601 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2603 return dd
->rcv_err_status_cnt
[21];
2606 static u64
access_rx_rbuf_block_list_read_cor_err_cnt(
2607 const struct cntr_entry
*entry
,
2608 void *context
, int vl
, int mode
, u64 data
)
2610 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2612 return dd
->rcv_err_status_cnt
[20];
2615 static u64
access_rx_rbuf_block_list_read_unc_err_cnt(
2616 const struct cntr_entry
*entry
,
2617 void *context
, int vl
, int mode
, u64 data
)
2619 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2621 return dd
->rcv_err_status_cnt
[19];
2624 static u64
access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry
*entry
,
2625 void *context
, int vl
,
2628 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2630 return dd
->rcv_err_status_cnt
[18];
2633 static u64
access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry
*entry
,
2634 void *context
, int vl
,
2637 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2639 return dd
->rcv_err_status_cnt
[17];
2642 static u64
access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2643 const struct cntr_entry
*entry
,
2644 void *context
, int vl
, int mode
, u64 data
)
2646 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2648 return dd
->rcv_err_status_cnt
[16];
2651 static u64
access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2652 const struct cntr_entry
*entry
,
2653 void *context
, int vl
, int mode
, u64 data
)
2655 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2657 return dd
->rcv_err_status_cnt
[15];
2660 static u64
access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry
*entry
,
2661 void *context
, int vl
,
2664 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2666 return dd
->rcv_err_status_cnt
[14];
2669 static u64
access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry
*entry
,
2670 void *context
, int vl
,
2673 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2675 return dd
->rcv_err_status_cnt
[13];
2678 static u64
access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry
*entry
,
2679 void *context
, int vl
, int mode
,
2682 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2684 return dd
->rcv_err_status_cnt
[12];
2687 static u64
access_rx_dma_flag_cor_err_cnt(const struct cntr_entry
*entry
,
2688 void *context
, int vl
, int mode
,
2691 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2693 return dd
->rcv_err_status_cnt
[11];
2696 static u64
access_rx_dma_flag_unc_err_cnt(const struct cntr_entry
*entry
,
2697 void *context
, int vl
, int mode
,
2700 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2702 return dd
->rcv_err_status_cnt
[10];
2705 static u64
access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry
*entry
,
2706 void *context
, int vl
, int mode
,
2709 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2711 return dd
->rcv_err_status_cnt
[9];
2714 static u64
access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry
*entry
,
2715 void *context
, int vl
, int mode
,
2718 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2720 return dd
->rcv_err_status_cnt
[8];
2723 static u64
access_rx_rcv_qp_map_table_cor_err_cnt(
2724 const struct cntr_entry
*entry
,
2725 void *context
, int vl
, int mode
, u64 data
)
2727 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2729 return dd
->rcv_err_status_cnt
[7];
2732 static u64
access_rx_rcv_qp_map_table_unc_err_cnt(
2733 const struct cntr_entry
*entry
,
2734 void *context
, int vl
, int mode
, u64 data
)
2736 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2738 return dd
->rcv_err_status_cnt
[6];
2741 static u64
access_rx_rcv_data_cor_err_cnt(const struct cntr_entry
*entry
,
2742 void *context
, int vl
, int mode
,
2745 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2747 return dd
->rcv_err_status_cnt
[5];
2750 static u64
access_rx_rcv_data_unc_err_cnt(const struct cntr_entry
*entry
,
2751 void *context
, int vl
, int mode
,
2754 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2756 return dd
->rcv_err_status_cnt
[4];
2759 static u64
access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry
*entry
,
2760 void *context
, int vl
, int mode
,
2763 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2765 return dd
->rcv_err_status_cnt
[3];
2768 static u64
access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry
*entry
,
2769 void *context
, int vl
, int mode
,
2772 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2774 return dd
->rcv_err_status_cnt
[2];
2777 static u64
access_rx_dc_intf_parity_err_cnt(const struct cntr_entry
*entry
,
2778 void *context
, int vl
, int mode
,
2781 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2783 return dd
->rcv_err_status_cnt
[1];
2786 static u64
access_rx_dma_csr_cor_err_cnt(const struct cntr_entry
*entry
,
2787 void *context
, int vl
, int mode
,
2790 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2792 return dd
->rcv_err_status_cnt
[0];
2796 * Software counters corresponding to each of the
2797 * error status bits within SendPioErrStatus
2799 static u64
access_pio_pec_sop_head_parity_err_cnt(
2800 const struct cntr_entry
*entry
,
2801 void *context
, int vl
, int mode
, u64 data
)
2803 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2805 return dd
->send_pio_err_status_cnt
[35];
2808 static u64
access_pio_pcc_sop_head_parity_err_cnt(
2809 const struct cntr_entry
*entry
,
2810 void *context
, int vl
, int mode
, u64 data
)
2812 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2814 return dd
->send_pio_err_status_cnt
[34];
2817 static u64
access_pio_last_returned_cnt_parity_err_cnt(
2818 const struct cntr_entry
*entry
,
2819 void *context
, int vl
, int mode
, u64 data
)
2821 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2823 return dd
->send_pio_err_status_cnt
[33];
2826 static u64
access_pio_current_free_cnt_parity_err_cnt(
2827 const struct cntr_entry
*entry
,
2828 void *context
, int vl
, int mode
, u64 data
)
2830 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2832 return dd
->send_pio_err_status_cnt
[32];
2835 static u64
access_pio_reserved_31_err_cnt(const struct cntr_entry
*entry
,
2836 void *context
, int vl
, int mode
,
2839 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2841 return dd
->send_pio_err_status_cnt
[31];
2844 static u64
access_pio_reserved_30_err_cnt(const struct cntr_entry
*entry
,
2845 void *context
, int vl
, int mode
,
2848 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2850 return dd
->send_pio_err_status_cnt
[30];
2853 static u64
access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry
*entry
,
2854 void *context
, int vl
, int mode
,
2857 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2859 return dd
->send_pio_err_status_cnt
[29];
2862 static u64
access_pio_ppmc_bqc_mem_parity_err_cnt(
2863 const struct cntr_entry
*entry
,
2864 void *context
, int vl
, int mode
, u64 data
)
2866 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2868 return dd
->send_pio_err_status_cnt
[28];
2871 static u64
access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry
*entry
,
2872 void *context
, int vl
, int mode
,
2875 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2877 return dd
->send_pio_err_status_cnt
[27];
2880 static u64
access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry
*entry
,
2881 void *context
, int vl
, int mode
,
2884 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2886 return dd
->send_pio_err_status_cnt
[26];
2889 static u64
access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry
*entry
,
2890 void *context
, int vl
,
2893 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2895 return dd
->send_pio_err_status_cnt
[25];
2898 static u64
access_pio_block_qw_count_parity_err_cnt(
2899 const struct cntr_entry
*entry
,
2900 void *context
, int vl
, int mode
, u64 data
)
2902 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2904 return dd
->send_pio_err_status_cnt
[24];
2907 static u64
access_pio_write_qw_valid_parity_err_cnt(
2908 const struct cntr_entry
*entry
,
2909 void *context
, int vl
, int mode
, u64 data
)
2911 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2913 return dd
->send_pio_err_status_cnt
[23];
2916 static u64
access_pio_state_machine_err_cnt(const struct cntr_entry
*entry
,
2917 void *context
, int vl
, int mode
,
2920 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2922 return dd
->send_pio_err_status_cnt
[22];
2925 static u64
access_pio_write_data_parity_err_cnt(const struct cntr_entry
*entry
,
2926 void *context
, int vl
,
2929 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2931 return dd
->send_pio_err_status_cnt
[21];
2934 static u64
access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry
*entry
,
2935 void *context
, int vl
,
2938 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2940 return dd
->send_pio_err_status_cnt
[20];
2943 static u64
access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry
*entry
,
2944 void *context
, int vl
,
2947 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2949 return dd
->send_pio_err_status_cnt
[19];
2952 static u64
access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2953 const struct cntr_entry
*entry
,
2954 void *context
, int vl
, int mode
, u64 data
)
2956 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2958 return dd
->send_pio_err_status_cnt
[18];
2961 static u64
access_pio_init_sm_in_err_cnt(const struct cntr_entry
*entry
,
2962 void *context
, int vl
, int mode
,
2965 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2967 return dd
->send_pio_err_status_cnt
[17];
2970 static u64
access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry
*entry
,
2971 void *context
, int vl
, int mode
,
2974 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2976 return dd
->send_pio_err_status_cnt
[16];
2979 static u64
access_pio_credit_ret_fifo_parity_err_cnt(
2980 const struct cntr_entry
*entry
,
2981 void *context
, int vl
, int mode
, u64 data
)
2983 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2985 return dd
->send_pio_err_status_cnt
[15];
2988 static u64
access_pio_v1_len_mem_bank1_cor_err_cnt(
2989 const struct cntr_entry
*entry
,
2990 void *context
, int vl
, int mode
, u64 data
)
2992 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2994 return dd
->send_pio_err_status_cnt
[14];
2997 static u64
access_pio_v1_len_mem_bank0_cor_err_cnt(
2998 const struct cntr_entry
*entry
,
2999 void *context
, int vl
, int mode
, u64 data
)
3001 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3003 return dd
->send_pio_err_status_cnt
[13];
3006 static u64
access_pio_v1_len_mem_bank1_unc_err_cnt(
3007 const struct cntr_entry
*entry
,
3008 void *context
, int vl
, int mode
, u64 data
)
3010 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3012 return dd
->send_pio_err_status_cnt
[12];
3015 static u64
access_pio_v1_len_mem_bank0_unc_err_cnt(
3016 const struct cntr_entry
*entry
,
3017 void *context
, int vl
, int mode
, u64 data
)
3019 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3021 return dd
->send_pio_err_status_cnt
[11];
3024 static u64
access_pio_sm_pkt_reset_parity_err_cnt(
3025 const struct cntr_entry
*entry
,
3026 void *context
, int vl
, int mode
, u64 data
)
3028 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3030 return dd
->send_pio_err_status_cnt
[10];
3033 static u64
access_pio_pkt_evict_fifo_parity_err_cnt(
3034 const struct cntr_entry
*entry
,
3035 void *context
, int vl
, int mode
, u64 data
)
3037 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3039 return dd
->send_pio_err_status_cnt
[9];
3042 static u64
access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
3043 const struct cntr_entry
*entry
,
3044 void *context
, int vl
, int mode
, u64 data
)
3046 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3048 return dd
->send_pio_err_status_cnt
[8];
3051 static u64
access_pio_sbrdctl_crrel_parity_err_cnt(
3052 const struct cntr_entry
*entry
,
3053 void *context
, int vl
, int mode
, u64 data
)
3055 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3057 return dd
->send_pio_err_status_cnt
[7];
3060 static u64
access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry
*entry
,
3061 void *context
, int vl
, int mode
,
3064 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3066 return dd
->send_pio_err_status_cnt
[6];
3069 static u64
access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry
*entry
,
3070 void *context
, int vl
, int mode
,
3073 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3075 return dd
->send_pio_err_status_cnt
[5];
3078 static u64
access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry
*entry
,
3079 void *context
, int vl
, int mode
,
3082 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3084 return dd
->send_pio_err_status_cnt
[4];
3087 static u64
access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry
*entry
,
3088 void *context
, int vl
, int mode
,
3091 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3093 return dd
->send_pio_err_status_cnt
[3];
3096 static u64
access_pio_csr_parity_err_cnt(const struct cntr_entry
*entry
,
3097 void *context
, int vl
, int mode
,
3100 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3102 return dd
->send_pio_err_status_cnt
[2];
3105 static u64
access_pio_write_addr_parity_err_cnt(const struct cntr_entry
*entry
,
3106 void *context
, int vl
,
3109 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3111 return dd
->send_pio_err_status_cnt
[1];
3114 static u64
access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry
*entry
,
3115 void *context
, int vl
, int mode
,
3118 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3120 return dd
->send_pio_err_status_cnt
[0];
3124 * Software counters corresponding to each of the
3125 * error status bits within SendDmaErrStatus
3127 static u64
access_sdma_pcie_req_tracking_cor_err_cnt(
3128 const struct cntr_entry
*entry
,
3129 void *context
, int vl
, int mode
, u64 data
)
3131 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3133 return dd
->send_dma_err_status_cnt
[3];
3136 static u64
access_sdma_pcie_req_tracking_unc_err_cnt(
3137 const struct cntr_entry
*entry
,
3138 void *context
, int vl
, int mode
, u64 data
)
3140 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3142 return dd
->send_dma_err_status_cnt
[2];
3145 static u64
access_sdma_csr_parity_err_cnt(const struct cntr_entry
*entry
,
3146 void *context
, int vl
, int mode
,
3149 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3151 return dd
->send_dma_err_status_cnt
[1];
3154 static u64
access_sdma_rpy_tag_err_cnt(const struct cntr_entry
*entry
,
3155 void *context
, int vl
, int mode
,
3158 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3160 return dd
->send_dma_err_status_cnt
[0];
3164 * Software counters corresponding to each of the
3165 * error status bits within SendEgressErrStatus
3167 static u64
access_tx_read_pio_memory_csr_unc_err_cnt(
3168 const struct cntr_entry
*entry
,
3169 void *context
, int vl
, int mode
, u64 data
)
3171 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3173 return dd
->send_egress_err_status_cnt
[63];
3176 static u64
access_tx_read_sdma_memory_csr_err_cnt(
3177 const struct cntr_entry
*entry
,
3178 void *context
, int vl
, int mode
, u64 data
)
3180 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3182 return dd
->send_egress_err_status_cnt
[62];
3185 static u64
access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry
*entry
,
3186 void *context
, int vl
, int mode
,
3189 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3191 return dd
->send_egress_err_status_cnt
[61];
3194 static u64
access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry
*entry
,
3195 void *context
, int vl
,
3198 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3200 return dd
->send_egress_err_status_cnt
[60];
3203 static u64
access_tx_read_sdma_memory_cor_err_cnt(
3204 const struct cntr_entry
*entry
,
3205 void *context
, int vl
, int mode
, u64 data
)
3207 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3209 return dd
->send_egress_err_status_cnt
[59];
3212 static u64
access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry
*entry
,
3213 void *context
, int vl
, int mode
,
3216 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3218 return dd
->send_egress_err_status_cnt
[58];
3221 static u64
access_tx_credit_overrun_err_cnt(const struct cntr_entry
*entry
,
3222 void *context
, int vl
, int mode
,
3225 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3227 return dd
->send_egress_err_status_cnt
[57];
3230 static u64
access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry
*entry
,
3231 void *context
, int vl
, int mode
,
3234 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3236 return dd
->send_egress_err_status_cnt
[56];
3239 static u64
access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry
*entry
,
3240 void *context
, int vl
, int mode
,
3243 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3245 return dd
->send_egress_err_status_cnt
[55];
3248 static u64
access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry
*entry
,
3249 void *context
, int vl
, int mode
,
3252 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3254 return dd
->send_egress_err_status_cnt
[54];
3257 static u64
access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry
*entry
,
3258 void *context
, int vl
, int mode
,
3261 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3263 return dd
->send_egress_err_status_cnt
[53];
3266 static u64
access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry
*entry
,
3267 void *context
, int vl
, int mode
,
3270 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3272 return dd
->send_egress_err_status_cnt
[52];
3275 static u64
access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry
*entry
,
3276 void *context
, int vl
, int mode
,
3279 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3281 return dd
->send_egress_err_status_cnt
[51];
3284 static u64
access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry
*entry
,
3285 void *context
, int vl
, int mode
,
3288 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3290 return dd
->send_egress_err_status_cnt
[50];
3293 static u64
access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry
*entry
,
3294 void *context
, int vl
, int mode
,
3297 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3299 return dd
->send_egress_err_status_cnt
[49];
3302 static u64
access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry
*entry
,
3303 void *context
, int vl
, int mode
,
3306 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3308 return dd
->send_egress_err_status_cnt
[48];
3311 static u64
access_tx_credit_return_vl_err_cnt(const struct cntr_entry
*entry
,
3312 void *context
, int vl
, int mode
,
3315 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3317 return dd
->send_egress_err_status_cnt
[47];
3320 static u64
access_tx_hcrc_insertion_err_cnt(const struct cntr_entry
*entry
,
3321 void *context
, int vl
, int mode
,
3324 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3326 return dd
->send_egress_err_status_cnt
[46];
3329 static u64
access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry
*entry
,
3330 void *context
, int vl
, int mode
,
3333 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3335 return dd
->send_egress_err_status_cnt
[45];
3338 static u64
access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry
*entry
,
3339 void *context
, int vl
,
3342 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3344 return dd
->send_egress_err_status_cnt
[44];
3347 static u64
access_tx_read_sdma_memory_unc_err_cnt(
3348 const struct cntr_entry
*entry
,
3349 void *context
, int vl
, int mode
, u64 data
)
3351 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3353 return dd
->send_egress_err_status_cnt
[43];
3356 static u64
access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry
*entry
,
3357 void *context
, int vl
, int mode
,
3360 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3362 return dd
->send_egress_err_status_cnt
[42];
3365 static u64
access_tx_credit_return_partiy_err_cnt(
3366 const struct cntr_entry
*entry
,
3367 void *context
, int vl
, int mode
, u64 data
)
3369 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3371 return dd
->send_egress_err_status_cnt
[41];
3374 static u64
access_tx_launch_fifo8_unc_or_parity_err_cnt(
3375 const struct cntr_entry
*entry
,
3376 void *context
, int vl
, int mode
, u64 data
)
3378 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3380 return dd
->send_egress_err_status_cnt
[40];
3383 static u64
access_tx_launch_fifo7_unc_or_parity_err_cnt(
3384 const struct cntr_entry
*entry
,
3385 void *context
, int vl
, int mode
, u64 data
)
3387 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3389 return dd
->send_egress_err_status_cnt
[39];
3392 static u64
access_tx_launch_fifo6_unc_or_parity_err_cnt(
3393 const struct cntr_entry
*entry
,
3394 void *context
, int vl
, int mode
, u64 data
)
3396 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3398 return dd
->send_egress_err_status_cnt
[38];
3401 static u64
access_tx_launch_fifo5_unc_or_parity_err_cnt(
3402 const struct cntr_entry
*entry
,
3403 void *context
, int vl
, int mode
, u64 data
)
3405 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3407 return dd
->send_egress_err_status_cnt
[37];
3410 static u64
access_tx_launch_fifo4_unc_or_parity_err_cnt(
3411 const struct cntr_entry
*entry
,
3412 void *context
, int vl
, int mode
, u64 data
)
3414 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3416 return dd
->send_egress_err_status_cnt
[36];
3419 static u64
access_tx_launch_fifo3_unc_or_parity_err_cnt(
3420 const struct cntr_entry
*entry
,
3421 void *context
, int vl
, int mode
, u64 data
)
3423 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3425 return dd
->send_egress_err_status_cnt
[35];
3428 static u64
access_tx_launch_fifo2_unc_or_parity_err_cnt(
3429 const struct cntr_entry
*entry
,
3430 void *context
, int vl
, int mode
, u64 data
)
3432 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3434 return dd
->send_egress_err_status_cnt
[34];
3437 static u64
access_tx_launch_fifo1_unc_or_parity_err_cnt(
3438 const struct cntr_entry
*entry
,
3439 void *context
, int vl
, int mode
, u64 data
)
3441 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3443 return dd
->send_egress_err_status_cnt
[33];
3446 static u64
access_tx_launch_fifo0_unc_or_parity_err_cnt(
3447 const struct cntr_entry
*entry
,
3448 void *context
, int vl
, int mode
, u64 data
)
3450 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3452 return dd
->send_egress_err_status_cnt
[32];
3455 static u64
access_tx_sdma15_disallowed_packet_err_cnt(
3456 const struct cntr_entry
*entry
,
3457 void *context
, int vl
, int mode
, u64 data
)
3459 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3461 return dd
->send_egress_err_status_cnt
[31];
3464 static u64
access_tx_sdma14_disallowed_packet_err_cnt(
3465 const struct cntr_entry
*entry
,
3466 void *context
, int vl
, int mode
, u64 data
)
3468 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3470 return dd
->send_egress_err_status_cnt
[30];
3473 static u64
access_tx_sdma13_disallowed_packet_err_cnt(
3474 const struct cntr_entry
*entry
,
3475 void *context
, int vl
, int mode
, u64 data
)
3477 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3479 return dd
->send_egress_err_status_cnt
[29];
3482 static u64
access_tx_sdma12_disallowed_packet_err_cnt(
3483 const struct cntr_entry
*entry
,
3484 void *context
, int vl
, int mode
, u64 data
)
3486 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3488 return dd
->send_egress_err_status_cnt
[28];
3491 static u64
access_tx_sdma11_disallowed_packet_err_cnt(
3492 const struct cntr_entry
*entry
,
3493 void *context
, int vl
, int mode
, u64 data
)
3495 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3497 return dd
->send_egress_err_status_cnt
[27];
3500 static u64
access_tx_sdma10_disallowed_packet_err_cnt(
3501 const struct cntr_entry
*entry
,
3502 void *context
, int vl
, int mode
, u64 data
)
3504 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3506 return dd
->send_egress_err_status_cnt
[26];
3509 static u64
access_tx_sdma9_disallowed_packet_err_cnt(
3510 const struct cntr_entry
*entry
,
3511 void *context
, int vl
, int mode
, u64 data
)
3513 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3515 return dd
->send_egress_err_status_cnt
[25];
3518 static u64
access_tx_sdma8_disallowed_packet_err_cnt(
3519 const struct cntr_entry
*entry
,
3520 void *context
, int vl
, int mode
, u64 data
)
3522 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3524 return dd
->send_egress_err_status_cnt
[24];
3527 static u64
access_tx_sdma7_disallowed_packet_err_cnt(
3528 const struct cntr_entry
*entry
,
3529 void *context
, int vl
, int mode
, u64 data
)
3531 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3533 return dd
->send_egress_err_status_cnt
[23];
3536 static u64
access_tx_sdma6_disallowed_packet_err_cnt(
3537 const struct cntr_entry
*entry
,
3538 void *context
, int vl
, int mode
, u64 data
)
3540 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3542 return dd
->send_egress_err_status_cnt
[22];
3545 static u64
access_tx_sdma5_disallowed_packet_err_cnt(
3546 const struct cntr_entry
*entry
,
3547 void *context
, int vl
, int mode
, u64 data
)
3549 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3551 return dd
->send_egress_err_status_cnt
[21];
3554 static u64
access_tx_sdma4_disallowed_packet_err_cnt(
3555 const struct cntr_entry
*entry
,
3556 void *context
, int vl
, int mode
, u64 data
)
3558 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3560 return dd
->send_egress_err_status_cnt
[20];
3563 static u64
access_tx_sdma3_disallowed_packet_err_cnt(
3564 const struct cntr_entry
*entry
,
3565 void *context
, int vl
, int mode
, u64 data
)
3567 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3569 return dd
->send_egress_err_status_cnt
[19];
3572 static u64
access_tx_sdma2_disallowed_packet_err_cnt(
3573 const struct cntr_entry
*entry
,
3574 void *context
, int vl
, int mode
, u64 data
)
3576 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3578 return dd
->send_egress_err_status_cnt
[18];
3581 static u64
access_tx_sdma1_disallowed_packet_err_cnt(
3582 const struct cntr_entry
*entry
,
3583 void *context
, int vl
, int mode
, u64 data
)
3585 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3587 return dd
->send_egress_err_status_cnt
[17];
3590 static u64
access_tx_sdma0_disallowed_packet_err_cnt(
3591 const struct cntr_entry
*entry
,
3592 void *context
, int vl
, int mode
, u64 data
)
3594 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3596 return dd
->send_egress_err_status_cnt
[16];
3599 static u64
access_tx_config_parity_err_cnt(const struct cntr_entry
*entry
,
3600 void *context
, int vl
, int mode
,
3603 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3605 return dd
->send_egress_err_status_cnt
[15];
3608 static u64
access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry
*entry
,
3609 void *context
, int vl
,
3612 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3614 return dd
->send_egress_err_status_cnt
[14];
3617 static u64
access_tx_launch_csr_parity_err_cnt(const struct cntr_entry
*entry
,
3618 void *context
, int vl
, int mode
,
3621 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3623 return dd
->send_egress_err_status_cnt
[13];
3626 static u64
access_tx_illegal_vl_err_cnt(const struct cntr_entry
*entry
,
3627 void *context
, int vl
, int mode
,
3630 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3632 return dd
->send_egress_err_status_cnt
[12];
3635 static u64
access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3636 const struct cntr_entry
*entry
,
3637 void *context
, int vl
, int mode
, u64 data
)
3639 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3641 return dd
->send_egress_err_status_cnt
[11];
3644 static u64
access_egress_reserved_10_err_cnt(const struct cntr_entry
*entry
,
3645 void *context
, int vl
, int mode
,
3648 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3650 return dd
->send_egress_err_status_cnt
[10];
3653 static u64
access_egress_reserved_9_err_cnt(const struct cntr_entry
*entry
,
3654 void *context
, int vl
, int mode
,
3657 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3659 return dd
->send_egress_err_status_cnt
[9];
3662 static u64
access_tx_sdma_launch_intf_parity_err_cnt(
3663 const struct cntr_entry
*entry
,
3664 void *context
, int vl
, int mode
, u64 data
)
3666 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3668 return dd
->send_egress_err_status_cnt
[8];
3671 static u64
access_tx_pio_launch_intf_parity_err_cnt(
3672 const struct cntr_entry
*entry
,
3673 void *context
, int vl
, int mode
, u64 data
)
3675 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3677 return dd
->send_egress_err_status_cnt
[7];
3680 static u64
access_egress_reserved_6_err_cnt(const struct cntr_entry
*entry
,
3681 void *context
, int vl
, int mode
,
3684 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3686 return dd
->send_egress_err_status_cnt
[6];
3689 static u64
access_tx_incorrect_link_state_err_cnt(
3690 const struct cntr_entry
*entry
,
3691 void *context
, int vl
, int mode
, u64 data
)
3693 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3695 return dd
->send_egress_err_status_cnt
[5];
3698 static u64
access_tx_linkdown_err_cnt(const struct cntr_entry
*entry
,
3699 void *context
, int vl
, int mode
,
3702 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3704 return dd
->send_egress_err_status_cnt
[4];
3707 static u64
access_tx_egress_fifi_underrun_or_parity_err_cnt(
3708 const struct cntr_entry
*entry
,
3709 void *context
, int vl
, int mode
, u64 data
)
3711 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3713 return dd
->send_egress_err_status_cnt
[3];
3716 static u64
access_egress_reserved_2_err_cnt(const struct cntr_entry
*entry
,
3717 void *context
, int vl
, int mode
,
3720 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3722 return dd
->send_egress_err_status_cnt
[2];
3725 static u64
access_tx_pkt_integrity_mem_unc_err_cnt(
3726 const struct cntr_entry
*entry
,
3727 void *context
, int vl
, int mode
, u64 data
)
3729 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3731 return dd
->send_egress_err_status_cnt
[1];
3734 static u64
access_tx_pkt_integrity_mem_cor_err_cnt(
3735 const struct cntr_entry
*entry
,
3736 void *context
, int vl
, int mode
, u64 data
)
3738 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3740 return dd
->send_egress_err_status_cnt
[0];
3744 * Software counters corresponding to each of the
3745 * error status bits within SendErrStatus
3747 static u64
access_send_csr_write_bad_addr_err_cnt(
3748 const struct cntr_entry
*entry
,
3749 void *context
, int vl
, int mode
, u64 data
)
3751 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3753 return dd
->send_err_status_cnt
[2];
3756 static u64
access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry
*entry
,
3757 void *context
, int vl
,
3760 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3762 return dd
->send_err_status_cnt
[1];
3765 static u64
access_send_csr_parity_cnt(const struct cntr_entry
*entry
,
3766 void *context
, int vl
, int mode
,
3769 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3771 return dd
->send_err_status_cnt
[0];
3775 * Software counters corresponding to each of the
3776 * error status bits within SendCtxtErrStatus
3778 static u64
access_pio_write_out_of_bounds_err_cnt(
3779 const struct cntr_entry
*entry
,
3780 void *context
, int vl
, int mode
, u64 data
)
3782 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3784 return dd
->sw_ctxt_err_status_cnt
[4];
3787 static u64
access_pio_write_overflow_err_cnt(const struct cntr_entry
*entry
,
3788 void *context
, int vl
, int mode
,
3791 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3793 return dd
->sw_ctxt_err_status_cnt
[3];
3796 static u64
access_pio_write_crosses_boundary_err_cnt(
3797 const struct cntr_entry
*entry
,
3798 void *context
, int vl
, int mode
, u64 data
)
3800 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3802 return dd
->sw_ctxt_err_status_cnt
[2];
3805 static u64
access_pio_disallowed_packet_err_cnt(const struct cntr_entry
*entry
,
3806 void *context
, int vl
,
3809 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3811 return dd
->sw_ctxt_err_status_cnt
[1];
3814 static u64
access_pio_inconsistent_sop_err_cnt(const struct cntr_entry
*entry
,
3815 void *context
, int vl
, int mode
,
3818 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3820 return dd
->sw_ctxt_err_status_cnt
[0];
3824 * Software counters corresponding to each of the
3825 * error status bits within SendDmaEngErrStatus
3827 static u64
access_sdma_header_request_fifo_cor_err_cnt(
3828 const struct cntr_entry
*entry
,
3829 void *context
, int vl
, int mode
, u64 data
)
3831 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3833 return dd
->sw_send_dma_eng_err_status_cnt
[23];
3836 static u64
access_sdma_header_storage_cor_err_cnt(
3837 const struct cntr_entry
*entry
,
3838 void *context
, int vl
, int mode
, u64 data
)
3840 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3842 return dd
->sw_send_dma_eng_err_status_cnt
[22];
3845 static u64
access_sdma_packet_tracking_cor_err_cnt(
3846 const struct cntr_entry
*entry
,
3847 void *context
, int vl
, int mode
, u64 data
)
3849 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3851 return dd
->sw_send_dma_eng_err_status_cnt
[21];
3854 static u64
access_sdma_assembly_cor_err_cnt(const struct cntr_entry
*entry
,
3855 void *context
, int vl
, int mode
,
3858 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3860 return dd
->sw_send_dma_eng_err_status_cnt
[20];
3863 static u64
access_sdma_desc_table_cor_err_cnt(const struct cntr_entry
*entry
,
3864 void *context
, int vl
, int mode
,
3867 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3869 return dd
->sw_send_dma_eng_err_status_cnt
[19];
3872 static u64
access_sdma_header_request_fifo_unc_err_cnt(
3873 const struct cntr_entry
*entry
,
3874 void *context
, int vl
, int mode
, u64 data
)
3876 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3878 return dd
->sw_send_dma_eng_err_status_cnt
[18];
3881 static u64
access_sdma_header_storage_unc_err_cnt(
3882 const struct cntr_entry
*entry
,
3883 void *context
, int vl
, int mode
, u64 data
)
3885 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3887 return dd
->sw_send_dma_eng_err_status_cnt
[17];
3890 static u64
access_sdma_packet_tracking_unc_err_cnt(
3891 const struct cntr_entry
*entry
,
3892 void *context
, int vl
, int mode
, u64 data
)
3894 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3896 return dd
->sw_send_dma_eng_err_status_cnt
[16];
3899 static u64
access_sdma_assembly_unc_err_cnt(const struct cntr_entry
*entry
,
3900 void *context
, int vl
, int mode
,
3903 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3905 return dd
->sw_send_dma_eng_err_status_cnt
[15];
3908 static u64
access_sdma_desc_table_unc_err_cnt(const struct cntr_entry
*entry
,
3909 void *context
, int vl
, int mode
,
3912 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3914 return dd
->sw_send_dma_eng_err_status_cnt
[14];
3917 static u64
access_sdma_timeout_err_cnt(const struct cntr_entry
*entry
,
3918 void *context
, int vl
, int mode
,
3921 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3923 return dd
->sw_send_dma_eng_err_status_cnt
[13];
3926 static u64
access_sdma_header_length_err_cnt(const struct cntr_entry
*entry
,
3927 void *context
, int vl
, int mode
,
3930 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3932 return dd
->sw_send_dma_eng_err_status_cnt
[12];
3935 static u64
access_sdma_header_address_err_cnt(const struct cntr_entry
*entry
,
3936 void *context
, int vl
, int mode
,
3939 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3941 return dd
->sw_send_dma_eng_err_status_cnt
[11];
3944 static u64
access_sdma_header_select_err_cnt(const struct cntr_entry
*entry
,
3945 void *context
, int vl
, int mode
,
3948 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3950 return dd
->sw_send_dma_eng_err_status_cnt
[10];
3953 static u64
access_sdma_reserved_9_err_cnt(const struct cntr_entry
*entry
,
3954 void *context
, int vl
, int mode
,
3957 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3959 return dd
->sw_send_dma_eng_err_status_cnt
[9];
3962 static u64
access_sdma_packet_desc_overflow_err_cnt(
3963 const struct cntr_entry
*entry
,
3964 void *context
, int vl
, int mode
, u64 data
)
3966 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3968 return dd
->sw_send_dma_eng_err_status_cnt
[8];
3971 static u64
access_sdma_length_mismatch_err_cnt(const struct cntr_entry
*entry
,
3972 void *context
, int vl
,
3975 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3977 return dd
->sw_send_dma_eng_err_status_cnt
[7];
3980 static u64
access_sdma_halt_err_cnt(const struct cntr_entry
*entry
,
3981 void *context
, int vl
, int mode
, u64 data
)
3983 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3985 return dd
->sw_send_dma_eng_err_status_cnt
[6];
3988 static u64
access_sdma_mem_read_err_cnt(const struct cntr_entry
*entry
,
3989 void *context
, int vl
, int mode
,
3992 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3994 return dd
->sw_send_dma_eng_err_status_cnt
[5];
3997 static u64
access_sdma_first_desc_err_cnt(const struct cntr_entry
*entry
,
3998 void *context
, int vl
, int mode
,
4001 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
4003 return dd
->sw_send_dma_eng_err_status_cnt
[4];
4006 static u64
access_sdma_tail_out_of_bounds_err_cnt(
4007 const struct cntr_entry
*entry
,
4008 void *context
, int vl
, int mode
, u64 data
)
4010 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
4012 return dd
->sw_send_dma_eng_err_status_cnt
[3];
4015 static u64
access_sdma_too_long_err_cnt(const struct cntr_entry
*entry
,
4016 void *context
, int vl
, int mode
,
4019 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
4021 return dd
->sw_send_dma_eng_err_status_cnt
[2];
4024 static u64
access_sdma_gen_mismatch_err_cnt(const struct cntr_entry
*entry
,
4025 void *context
, int vl
, int mode
,
4028 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
4030 return dd
->sw_send_dma_eng_err_status_cnt
[1];
4033 static u64
access_sdma_wrong_dw_err_cnt(const struct cntr_entry
*entry
,
4034 void *context
, int vl
, int mode
,
4037 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
4039 return dd
->sw_send_dma_eng_err_status_cnt
[0];
4042 static u64
access_dc_rcv_err_cnt(const struct cntr_entry
*entry
,
4043 void *context
, int vl
, int mode
,
4046 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
4049 u64 csr
= entry
->csr
;
4051 val
= read_write_csr(dd
, csr
, mode
, data
);
4052 if (mode
== CNTR_MODE_R
) {
4053 val
= val
> CNTR_MAX
- dd
->sw_rcv_bypass_packet_errors
?
4054 CNTR_MAX
: val
+ dd
->sw_rcv_bypass_packet_errors
;
4055 } else if (mode
== CNTR_MODE_W
) {
4056 dd
->sw_rcv_bypass_packet_errors
= 0;
4058 dd_dev_err(dd
, "Invalid cntr register access mode");
4064 #define def_access_sw_cpu(cntr) \
4065 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
4066 void *context, int vl, int mode, u64 data) \
4068 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4069 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
4070 ppd->ibport_data.rvp.cntr, vl, \
4074 def_access_sw_cpu(rc_acks
);
4075 def_access_sw_cpu(rc_qacks
);
4076 def_access_sw_cpu(rc_delayed_comp
);
4078 #define def_access_ibp_counter(cntr) \
4079 static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
4080 void *context, int vl, int mode, u64 data) \
4082 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4084 if (vl != CNTR_INVALID_VL) \
4087 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
4091 def_access_ibp_counter(loop_pkts
);
4092 def_access_ibp_counter(rc_resends
);
4093 def_access_ibp_counter(rnr_naks
);
4094 def_access_ibp_counter(other_naks
);
4095 def_access_ibp_counter(rc_timeouts
);
4096 def_access_ibp_counter(pkt_drops
);
4097 def_access_ibp_counter(dmawait
);
4098 def_access_ibp_counter(rc_seqnak
);
4099 def_access_ibp_counter(rc_dupreq
);
4100 def_access_ibp_counter(rdma_seq
);
4101 def_access_ibp_counter(unaligned
);
4102 def_access_ibp_counter(seq_naks
);
4104 static struct cntr_entry dev_cntrs
[DEV_CNTR_LAST
] = {
4105 [C_RCV_OVF
] = RXE32_DEV_CNTR_ELEM(RcvOverflow
, RCV_BUF_OVFL_CNT
, CNTR_SYNTH
),
4106 [C_RX_TID_FULL
] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr
, RCV_TID_FULL_ERR_CNT
,
4108 [C_RX_TID_INVALID
] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid
, RCV_TID_VALID_ERR_CNT
,
4110 [C_RX_TID_FLGMS
] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs
,
4111 RCV_TID_FLOW_GEN_MISMATCH_CNT
,
4113 [C_RX_CTX_EGRS
] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS
, RCV_CONTEXT_EGR_STALL
,
4115 [C_RCV_TID_FLSMS
] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs
,
4116 RCV_TID_FLOW_SEQ_MISMATCH_CNT
, CNTR_NORMAL
),
4117 [C_CCE_PCI_CR_ST
] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt
,
4118 CCE_PCIE_POSTED_CRDT_STALL_CNT
, CNTR_NORMAL
),
4119 [C_CCE_PCI_TR_ST
] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt
, CCE_PCIE_TRGT_STALL_CNT
,
4121 [C_CCE_PIO_WR_ST
] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt
, CCE_PIO_WR_STALL_CNT
,
4123 [C_CCE_ERR_INT
] = CCE_INT_DEV_CNTR_ELEM(CceErrInt
, CCE_ERR_INT_CNT
,
4125 [C_CCE_SDMA_INT
] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt
, CCE_SDMA_INT_CNT
,
4127 [C_CCE_MISC_INT
] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt
, CCE_MISC_INT_CNT
,
4129 [C_CCE_RCV_AV_INT
] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt
, CCE_RCV_AVAIL_INT_CNT
,
4131 [C_CCE_RCV_URG_INT
] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt
,
4132 CCE_RCV_URGENT_INT_CNT
, CNTR_NORMAL
),
4133 [C_CCE_SEND_CR_INT
] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt
,
4134 CCE_SEND_CREDIT_INT_CNT
, CNTR_NORMAL
),
4135 [C_DC_UNC_ERR
] = DC_PERF_CNTR(DcUnctblErr
, DCC_ERR_UNCORRECTABLE_CNT
,
4137 [C_DC_RCV_ERR
] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT
, 0, CNTR_SYNTH
,
4138 access_dc_rcv_err_cnt
),
4139 [C_DC_FM_CFG_ERR
] = DC_PERF_CNTR(DcFmCfgErr
, DCC_ERR_FMCONFIG_ERR_CNT
,
4141 [C_DC_RMT_PHY_ERR
] = DC_PERF_CNTR(DcRmtPhyErr
, DCC_ERR_RCVREMOTE_PHY_ERR_CNT
,
4143 [C_DC_DROPPED_PKT
] = DC_PERF_CNTR(DcDroppedPkt
, DCC_ERR_DROPPED_PKT_CNT
,
4145 [C_DC_MC_XMIT_PKTS
] = DC_PERF_CNTR(DcMcXmitPkts
,
4146 DCC_PRF_PORT_XMIT_MULTICAST_CNT
, CNTR_SYNTH
),
4147 [C_DC_MC_RCV_PKTS
] = DC_PERF_CNTR(DcMcRcvPkts
,
4148 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT
,
4150 [C_DC_XMIT_CERR
] = DC_PERF_CNTR(DcXmitCorr
,
4151 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT
, CNTR_SYNTH
),
4152 [C_DC_RCV_CERR
] = DC_PERF_CNTR(DcRcvCorrCnt
, DCC_PRF_PORT_RCV_CORRECTABLE_CNT
,
4154 [C_DC_RCV_FCC
] = DC_PERF_CNTR(DcRxFCntl
, DCC_PRF_RX_FLOW_CRTL_CNT
,
4156 [C_DC_XMIT_FCC
] = DC_PERF_CNTR(DcXmitFCntl
, DCC_PRF_TX_FLOW_CRTL_CNT
,
4158 [C_DC_XMIT_FLITS
] = DC_PERF_CNTR(DcXmitFlits
, DCC_PRF_PORT_XMIT_DATA_CNT
,
4160 [C_DC_RCV_FLITS
] = DC_PERF_CNTR(DcRcvFlits
, DCC_PRF_PORT_RCV_DATA_CNT
,
4162 [C_DC_XMIT_PKTS
] = DC_PERF_CNTR(DcXmitPkts
, DCC_PRF_PORT_XMIT_PKTS_CNT
,
4164 [C_DC_RCV_PKTS
] = DC_PERF_CNTR(DcRcvPkts
, DCC_PRF_PORT_RCV_PKTS_CNT
,
4166 [C_DC_RX_FLIT_VL
] = DC_PERF_CNTR(DcRxFlitVl
, DCC_PRF_PORT_VL_RCV_DATA_CNT
,
4167 CNTR_SYNTH
| CNTR_VL
),
4168 [C_DC_RX_PKT_VL
] = DC_PERF_CNTR(DcRxPktVl
, DCC_PRF_PORT_VL_RCV_PKTS_CNT
,
4169 CNTR_SYNTH
| CNTR_VL
),
4170 [C_DC_RCV_FCN
] = DC_PERF_CNTR(DcRcvFcn
, DCC_PRF_PORT_RCV_FECN_CNT
, CNTR_SYNTH
),
4171 [C_DC_RCV_FCN_VL
] = DC_PERF_CNTR(DcRcvFcnVl
, DCC_PRF_PORT_VL_RCV_FECN_CNT
,
4172 CNTR_SYNTH
| CNTR_VL
),
4173 [C_DC_RCV_BCN
] = DC_PERF_CNTR(DcRcvBcn
, DCC_PRF_PORT_RCV_BECN_CNT
, CNTR_SYNTH
),
4174 [C_DC_RCV_BCN_VL
] = DC_PERF_CNTR(DcRcvBcnVl
, DCC_PRF_PORT_VL_RCV_BECN_CNT
,
4175 CNTR_SYNTH
| CNTR_VL
),
4176 [C_DC_RCV_BBL
] = DC_PERF_CNTR(DcRcvBbl
, DCC_PRF_PORT_RCV_BUBBLE_CNT
,
4178 [C_DC_RCV_BBL_VL
] = DC_PERF_CNTR(DcRcvBblVl
, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT
,
4179 CNTR_SYNTH
| CNTR_VL
),
4180 [C_DC_MARK_FECN
] = DC_PERF_CNTR(DcMarkFcn
, DCC_PRF_PORT_MARK_FECN_CNT
,
4182 [C_DC_MARK_FECN_VL
] = DC_PERF_CNTR(DcMarkFcnVl
, DCC_PRF_PORT_VL_MARK_FECN_CNT
,
4183 CNTR_SYNTH
| CNTR_VL
),
4185 DC_PERF_CNTR_LCB(DcTotCrc
, DC_LCB_ERR_INFO_TOTAL_CRC_ERR
,
4187 [C_DC_CRC_LN0
] = DC_PERF_CNTR_LCB(DcCrcLn0
, DC_LCB_ERR_INFO_CRC_ERR_LN0
,
4189 [C_DC_CRC_LN1
] = DC_PERF_CNTR_LCB(DcCrcLn1
, DC_LCB_ERR_INFO_CRC_ERR_LN1
,
4191 [C_DC_CRC_LN2
] = DC_PERF_CNTR_LCB(DcCrcLn2
, DC_LCB_ERR_INFO_CRC_ERR_LN2
,
4193 [C_DC_CRC_LN3
] = DC_PERF_CNTR_LCB(DcCrcLn3
, DC_LCB_ERR_INFO_CRC_ERR_LN3
,
4195 [C_DC_CRC_MULT_LN
] =
4196 DC_PERF_CNTR_LCB(DcMultLn
, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN
,
4198 [C_DC_TX_REPLAY
] = DC_PERF_CNTR_LCB(DcTxReplay
, DC_LCB_ERR_INFO_TX_REPLAY_CNT
,
4200 [C_DC_RX_REPLAY
] = DC_PERF_CNTR_LCB(DcRxReplay
, DC_LCB_ERR_INFO_RX_REPLAY_CNT
,
4202 [C_DC_SEQ_CRC_CNT
] =
4203 DC_PERF_CNTR_LCB(DcLinkSeqCrc
, DC_LCB_ERR_INFO_SEQ_CRC_CNT
,
4205 [C_DC_ESC0_ONLY_CNT
] =
4206 DC_PERF_CNTR_LCB(DcEsc0
, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT
,
4208 [C_DC_ESC0_PLUS1_CNT
] =
4209 DC_PERF_CNTR_LCB(DcEsc1
, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT
,
4211 [C_DC_ESC0_PLUS2_CNT
] =
4212 DC_PERF_CNTR_LCB(DcEsc0Plus2
, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT
,
4214 [C_DC_REINIT_FROM_PEER_CNT
] =
4215 DC_PERF_CNTR_LCB(DcReinitPeer
, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT
,
4217 [C_DC_SBE_CNT
] = DC_PERF_CNTR_LCB(DcSbe
, DC_LCB_ERR_INFO_SBE_CNT
,
4219 [C_DC_MISC_FLG_CNT
] =
4220 DC_PERF_CNTR_LCB(DcMiscFlg
, DC_LCB_ERR_INFO_MISC_FLG_CNT
,
4222 [C_DC_PRF_GOOD_LTP_CNT
] =
4223 DC_PERF_CNTR_LCB(DcGoodLTP
, DC_LCB_PRF_GOOD_LTP_CNT
, CNTR_SYNTH
),
4224 [C_DC_PRF_ACCEPTED_LTP_CNT
] =
4225 DC_PERF_CNTR_LCB(DcAccLTP
, DC_LCB_PRF_ACCEPTED_LTP_CNT
,
4227 [C_DC_PRF_RX_FLIT_CNT
] =
4228 DC_PERF_CNTR_LCB(DcPrfRxFlit
, DC_LCB_PRF_RX_FLIT_CNT
, CNTR_SYNTH
),
4229 [C_DC_PRF_TX_FLIT_CNT
] =
4230 DC_PERF_CNTR_LCB(DcPrfTxFlit
, DC_LCB_PRF_TX_FLIT_CNT
, CNTR_SYNTH
),
4231 [C_DC_PRF_CLK_CNTR
] =
4232 DC_PERF_CNTR_LCB(DcPrfClk
, DC_LCB_PRF_CLK_CNTR
, CNTR_SYNTH
),
4233 [C_DC_PG_DBG_FLIT_CRDTS_CNT
] =
4234 DC_PERF_CNTR_LCB(DcFltCrdts
, DC_LCB_PG_DBG_FLIT_CRDTS_CNT
, CNTR_SYNTH
),
4235 [C_DC_PG_STS_PAUSE_COMPLETE_CNT
] =
4236 DC_PERF_CNTR_LCB(DcPauseComp
, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT
,
4238 [C_DC_PG_STS_TX_SBE_CNT
] =
4239 DC_PERF_CNTR_LCB(DcStsTxSbe
, DC_LCB_PG_STS_TX_SBE_CNT
, CNTR_SYNTH
),
4240 [C_DC_PG_STS_TX_MBE_CNT
] =
4241 DC_PERF_CNTR_LCB(DcStsTxMbe
, DC_LCB_PG_STS_TX_MBE_CNT
,
4243 [C_SW_CPU_INTR
] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL
,
4244 access_sw_cpu_intr
),
4245 [C_SW_CPU_RCV_LIM
] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL
,
4246 access_sw_cpu_rcv_limit
),
4247 [C_SW_VTX_WAIT
] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL
,
4248 access_sw_vtx_wait
),
4249 [C_SW_PIO_WAIT
] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL
,
4250 access_sw_pio_wait
),
4251 [C_SW_PIO_DRAIN
] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL
,
4252 access_sw_pio_drain
),
4253 [C_SW_KMEM_WAIT
] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL
,
4254 access_sw_kmem_wait
),
4255 [C_SW_SEND_SCHED
] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL
,
4256 access_sw_send_schedule
),
4257 [C_SDMA_DESC_FETCHED_CNT
] = CNTR_ELEM("SDEDscFdCn",
4258 SEND_DMA_DESC_FETCHED_CNT
, 0,
4259 CNTR_NORMAL
| CNTR_32BIT
| CNTR_SDMA
,
4260 dev_access_u32_csr
),
4261 [C_SDMA_INT_CNT
] = CNTR_ELEM("SDMAInt", 0, 0,
4262 CNTR_NORMAL
| CNTR_32BIT
| CNTR_SDMA
,
4263 access_sde_int_cnt
),
4264 [C_SDMA_ERR_CNT
] = CNTR_ELEM("SDMAErrCt", 0, 0,
4265 CNTR_NORMAL
| CNTR_32BIT
| CNTR_SDMA
,
4266 access_sde_err_cnt
),
4267 [C_SDMA_IDLE_INT_CNT
] = CNTR_ELEM("SDMAIdInt", 0, 0,
4268 CNTR_NORMAL
| CNTR_32BIT
| CNTR_SDMA
,
4269 access_sde_idle_int_cnt
),
4270 [C_SDMA_PROGRESS_INT_CNT
] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4271 CNTR_NORMAL
| CNTR_32BIT
| CNTR_SDMA
,
4272 access_sde_progress_int_cnt
),
4273 /* MISC_ERR_STATUS */
4274 [C_MISC_PLL_LOCK_FAIL_ERR
] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4276 access_misc_pll_lock_fail_err_cnt
),
4277 [C_MISC_MBIST_FAIL_ERR
] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4279 access_misc_mbist_fail_err_cnt
),
4280 [C_MISC_INVALID_EEP_CMD_ERR
] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4282 access_misc_invalid_eep_cmd_err_cnt
),
4283 [C_MISC_EFUSE_DONE_PARITY_ERR
] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4285 access_misc_efuse_done_parity_err_cnt
),
4286 [C_MISC_EFUSE_WRITE_ERR
] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4288 access_misc_efuse_write_err_cnt
),
4289 [C_MISC_EFUSE_READ_BAD_ADDR_ERR
] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4291 access_misc_efuse_read_bad_addr_err_cnt
),
4292 [C_MISC_EFUSE_CSR_PARITY_ERR
] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4294 access_misc_efuse_csr_parity_err_cnt
),
4295 [C_MISC_FW_AUTH_FAILED_ERR
] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4297 access_misc_fw_auth_failed_err_cnt
),
4298 [C_MISC_KEY_MISMATCH_ERR
] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4300 access_misc_key_mismatch_err_cnt
),
4301 [C_MISC_SBUS_WRITE_FAILED_ERR
] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4303 access_misc_sbus_write_failed_err_cnt
),
4304 [C_MISC_CSR_WRITE_BAD_ADDR_ERR
] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4306 access_misc_csr_write_bad_addr_err_cnt
),
4307 [C_MISC_CSR_READ_BAD_ADDR_ERR
] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4309 access_misc_csr_read_bad_addr_err_cnt
),
4310 [C_MISC_CSR_PARITY_ERR
] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4312 access_misc_csr_parity_err_cnt
),
4314 [C_CCE_ERR_STATUS_AGGREGATED_CNT
] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4316 access_sw_cce_err_status_aggregated_cnt
),
4317 [C_CCE_MSIX_CSR_PARITY_ERR
] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4319 access_cce_msix_csr_parity_err_cnt
),
4320 [C_CCE_INT_MAP_UNC_ERR
] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4322 access_cce_int_map_unc_err_cnt
),
4323 [C_CCE_INT_MAP_COR_ERR
] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4325 access_cce_int_map_cor_err_cnt
),
4326 [C_CCE_MSIX_TABLE_UNC_ERR
] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4328 access_cce_msix_table_unc_err_cnt
),
4329 [C_CCE_MSIX_TABLE_COR_ERR
] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4331 access_cce_msix_table_cor_err_cnt
),
4332 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR
] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4334 access_cce_rxdma_conv_fifo_parity_err_cnt
),
4335 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR
] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4337 access_cce_rcpl_async_fifo_parity_err_cnt
),
4338 [C_CCE_SEG_WRITE_BAD_ADDR_ERR
] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4340 access_cce_seg_write_bad_addr_err_cnt
),
4341 [C_CCE_SEG_READ_BAD_ADDR_ERR
] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4343 access_cce_seg_read_bad_addr_err_cnt
),
4344 [C_LA_TRIGGERED
] = CNTR_ELEM("Cce LATriggered", 0, 0,
4346 access_la_triggered_cnt
),
4347 [C_CCE_TRGT_CPL_TIMEOUT_ERR
] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4349 access_cce_trgt_cpl_timeout_err_cnt
),
4350 [C_PCIC_RECEIVE_PARITY_ERR
] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4352 access_pcic_receive_parity_err_cnt
),
4353 [C_PCIC_TRANSMIT_BACK_PARITY_ERR
] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4355 access_pcic_transmit_back_parity_err_cnt
),
4356 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR
] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4358 access_pcic_transmit_front_parity_err_cnt
),
4359 [C_PCIC_CPL_DAT_Q_UNC_ERR
] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4361 access_pcic_cpl_dat_q_unc_err_cnt
),
4362 [C_PCIC_CPL_HD_Q_UNC_ERR
] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4364 access_pcic_cpl_hd_q_unc_err_cnt
),
4365 [C_PCIC_POST_DAT_Q_UNC_ERR
] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4367 access_pcic_post_dat_q_unc_err_cnt
),
4368 [C_PCIC_POST_HD_Q_UNC_ERR
] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4370 access_pcic_post_hd_q_unc_err_cnt
),
4371 [C_PCIC_RETRY_SOT_MEM_UNC_ERR
] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4373 access_pcic_retry_sot_mem_unc_err_cnt
),
4374 [C_PCIC_RETRY_MEM_UNC_ERR
] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4376 access_pcic_retry_mem_unc_err
),
4377 [C_PCIC_N_POST_DAT_Q_PARITY_ERR
] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4379 access_pcic_n_post_dat_q_parity_err_cnt
),
4380 [C_PCIC_N_POST_H_Q_PARITY_ERR
] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4382 access_pcic_n_post_h_q_parity_err_cnt
),
4383 [C_PCIC_CPL_DAT_Q_COR_ERR
] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4385 access_pcic_cpl_dat_q_cor_err_cnt
),
4386 [C_PCIC_CPL_HD_Q_COR_ERR
] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4388 access_pcic_cpl_hd_q_cor_err_cnt
),
4389 [C_PCIC_POST_DAT_Q_COR_ERR
] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4391 access_pcic_post_dat_q_cor_err_cnt
),
4392 [C_PCIC_POST_HD_Q_COR_ERR
] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4394 access_pcic_post_hd_q_cor_err_cnt
),
4395 [C_PCIC_RETRY_SOT_MEM_COR_ERR
] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4397 access_pcic_retry_sot_mem_cor_err_cnt
),
4398 [C_PCIC_RETRY_MEM_COR_ERR
] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4400 access_pcic_retry_mem_cor_err_cnt
),
4401 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR
] = CNTR_ELEM(
4402 "CceCli1AsyncFifoDbgParityError", 0, 0,
4404 access_cce_cli1_async_fifo_dbg_parity_err_cnt
),
4405 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR
] = CNTR_ELEM(
4406 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4408 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4410 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR
] = CNTR_ELEM(
4411 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4413 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt
),
4414 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR
] = CNTR_ELEM(
4415 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4417 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt
),
4418 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR
] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4420 access_cce_cli2_async_fifo_parity_err_cnt
),
4421 [C_CCE_CSR_CFG_BUS_PARITY_ERR
] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4423 access_cce_csr_cfg_bus_parity_err_cnt
),
4424 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR
] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4426 access_cce_cli0_async_fifo_parity_err_cnt
),
4427 [C_CCE_RSPD_DATA_PARITY_ERR
] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4429 access_cce_rspd_data_parity_err_cnt
),
4430 [C_CCE_TRGT_ACCESS_ERR
] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4432 access_cce_trgt_access_err_cnt
),
4433 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR
] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4435 access_cce_trgt_async_fifo_parity_err_cnt
),
4436 [C_CCE_CSR_WRITE_BAD_ADDR_ERR
] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4438 access_cce_csr_write_bad_addr_err_cnt
),
4439 [C_CCE_CSR_READ_BAD_ADDR_ERR
] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4441 access_cce_csr_read_bad_addr_err_cnt
),
4442 [C_CCE_CSR_PARITY_ERR
] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4444 access_ccs_csr_parity_err_cnt
),
4447 [C_RX_CSR_PARITY_ERR
] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4449 access_rx_csr_parity_err_cnt
),
4450 [C_RX_CSR_WRITE_BAD_ADDR_ERR
] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4452 access_rx_csr_write_bad_addr_err_cnt
),
4453 [C_RX_CSR_READ_BAD_ADDR_ERR
] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4455 access_rx_csr_read_bad_addr_err_cnt
),
4456 [C_RX_DMA_CSR_UNC_ERR
] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4458 access_rx_dma_csr_unc_err_cnt
),
4459 [C_RX_DMA_DQ_FSM_ENCODING_ERR
] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4461 access_rx_dma_dq_fsm_encoding_err_cnt
),
4462 [C_RX_DMA_EQ_FSM_ENCODING_ERR
] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4464 access_rx_dma_eq_fsm_encoding_err_cnt
),
4465 [C_RX_DMA_CSR_PARITY_ERR
] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4467 access_rx_dma_csr_parity_err_cnt
),
4468 [C_RX_RBUF_DATA_COR_ERR
] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4470 access_rx_rbuf_data_cor_err_cnt
),
4471 [C_RX_RBUF_DATA_UNC_ERR
] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4473 access_rx_rbuf_data_unc_err_cnt
),
4474 [C_RX_DMA_DATA_FIFO_RD_COR_ERR
] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4476 access_rx_dma_data_fifo_rd_cor_err_cnt
),
4477 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR
] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4479 access_rx_dma_data_fifo_rd_unc_err_cnt
),
4480 [C_RX_DMA_HDR_FIFO_RD_COR_ERR
] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4482 access_rx_dma_hdr_fifo_rd_cor_err_cnt
),
4483 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR
] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4485 access_rx_dma_hdr_fifo_rd_unc_err_cnt
),
4486 [C_RX_RBUF_DESC_PART2_COR_ERR
] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4488 access_rx_rbuf_desc_part2_cor_err_cnt
),
4489 [C_RX_RBUF_DESC_PART2_UNC_ERR
] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4491 access_rx_rbuf_desc_part2_unc_err_cnt
),
4492 [C_RX_RBUF_DESC_PART1_COR_ERR
] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4494 access_rx_rbuf_desc_part1_cor_err_cnt
),
4495 [C_RX_RBUF_DESC_PART1_UNC_ERR
] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4497 access_rx_rbuf_desc_part1_unc_err_cnt
),
4498 [C_RX_HQ_INTR_FSM_ERR
] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4500 access_rx_hq_intr_fsm_err_cnt
),
4501 [C_RX_HQ_INTR_CSR_PARITY_ERR
] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4503 access_rx_hq_intr_csr_parity_err_cnt
),
4504 [C_RX_LOOKUP_CSR_PARITY_ERR
] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4506 access_rx_lookup_csr_parity_err_cnt
),
4507 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR
] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4509 access_rx_lookup_rcv_array_cor_err_cnt
),
4510 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR
] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4512 access_rx_lookup_rcv_array_unc_err_cnt
),
4513 [C_RX_LOOKUP_DES_PART2_PARITY_ERR
] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4515 access_rx_lookup_des_part2_parity_err_cnt
),
4516 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR
] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4518 access_rx_lookup_des_part1_unc_cor_err_cnt
),
4519 [C_RX_LOOKUP_DES_PART1_UNC_ERR
] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4521 access_rx_lookup_des_part1_unc_err_cnt
),
4522 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR
] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4524 access_rx_rbuf_next_free_buf_cor_err_cnt
),
4525 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR
] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4527 access_rx_rbuf_next_free_buf_unc_err_cnt
),
4528 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR
] = CNTR_ELEM(
4529 "RxRbufFlInitWrAddrParityErr", 0, 0,
4531 access_rbuf_fl_init_wr_addr_parity_err_cnt
),
4532 [C_RX_RBUF_FL_INITDONE_PARITY_ERR
] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4534 access_rx_rbuf_fl_initdone_parity_err_cnt
),
4535 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR
] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4537 access_rx_rbuf_fl_write_addr_parity_err_cnt
),
4538 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR
] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4540 access_rx_rbuf_fl_rd_addr_parity_err_cnt
),
4541 [C_RX_RBUF_EMPTY_ERR
] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4543 access_rx_rbuf_empty_err_cnt
),
4544 [C_RX_RBUF_FULL_ERR
] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4546 access_rx_rbuf_full_err_cnt
),
4547 [C_RX_RBUF_BAD_LOOKUP_ERR
] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4549 access_rbuf_bad_lookup_err_cnt
),
4550 [C_RX_RBUF_CTX_ID_PARITY_ERR
] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4552 access_rbuf_ctx_id_parity_err_cnt
),
4553 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR
] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4555 access_rbuf_csr_qeopdw_parity_err_cnt
),
4556 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR
] = CNTR_ELEM(
4557 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4559 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt
),
4560 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR
] = CNTR_ELEM(
4561 "RxRbufCsrQTlPtrParityErr", 0, 0,
4563 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt
),
4564 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR
] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4566 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt
),
4567 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR
] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4569 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt
),
4570 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR
] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4572 access_rx_rbuf_csr_q_next_buf_parity_err_cnt
),
4573 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR
] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4575 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt
),
4576 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR
] = CNTR_ELEM(
4577 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4579 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt
),
4580 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR
] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4582 access_rx_rbuf_block_list_read_cor_err_cnt
),
4583 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR
] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4585 access_rx_rbuf_block_list_read_unc_err_cnt
),
4586 [C_RX_RBUF_LOOKUP_DES_COR_ERR
] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4588 access_rx_rbuf_lookup_des_cor_err_cnt
),
4589 [C_RX_RBUF_LOOKUP_DES_UNC_ERR
] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4591 access_rx_rbuf_lookup_des_unc_err_cnt
),
4592 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR
] = CNTR_ELEM(
4593 "RxRbufLookupDesRegUncCorErr", 0, 0,
4595 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt
),
4596 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR
] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4598 access_rx_rbuf_lookup_des_reg_unc_err_cnt
),
4599 [C_RX_RBUF_FREE_LIST_COR_ERR
] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4601 access_rx_rbuf_free_list_cor_err_cnt
),
4602 [C_RX_RBUF_FREE_LIST_UNC_ERR
] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4604 access_rx_rbuf_free_list_unc_err_cnt
),
4605 [C_RX_RCV_FSM_ENCODING_ERR
] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4607 access_rx_rcv_fsm_encoding_err_cnt
),
4608 [C_RX_DMA_FLAG_COR_ERR
] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4610 access_rx_dma_flag_cor_err_cnt
),
4611 [C_RX_DMA_FLAG_UNC_ERR
] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4613 access_rx_dma_flag_unc_err_cnt
),
4614 [C_RX_DC_SOP_EOP_PARITY_ERR
] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4616 access_rx_dc_sop_eop_parity_err_cnt
),
4617 [C_RX_RCV_CSR_PARITY_ERR
] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4619 access_rx_rcv_csr_parity_err_cnt
),
4620 [C_RX_RCV_QP_MAP_TABLE_COR_ERR
] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4622 access_rx_rcv_qp_map_table_cor_err_cnt
),
4623 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR
] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4625 access_rx_rcv_qp_map_table_unc_err_cnt
),
4626 [C_RX_RCV_DATA_COR_ERR
] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4628 access_rx_rcv_data_cor_err_cnt
),
4629 [C_RX_RCV_DATA_UNC_ERR
] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4631 access_rx_rcv_data_unc_err_cnt
),
4632 [C_RX_RCV_HDR_COR_ERR
] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4634 access_rx_rcv_hdr_cor_err_cnt
),
4635 [C_RX_RCV_HDR_UNC_ERR
] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4637 access_rx_rcv_hdr_unc_err_cnt
),
4638 [C_RX_DC_INTF_PARITY_ERR
] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4640 access_rx_dc_intf_parity_err_cnt
),
4641 [C_RX_DMA_CSR_COR_ERR
] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4643 access_rx_dma_csr_cor_err_cnt
),
4644 /* SendPioErrStatus */
4645 [C_PIO_PEC_SOP_HEAD_PARITY_ERR
] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4647 access_pio_pec_sop_head_parity_err_cnt
),
4648 [C_PIO_PCC_SOP_HEAD_PARITY_ERR
] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4650 access_pio_pcc_sop_head_parity_err_cnt
),
4651 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR
] = CNTR_ELEM("PioLastReturnedCntParityErr",
4653 access_pio_last_returned_cnt_parity_err_cnt
),
4654 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR
] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4656 access_pio_current_free_cnt_parity_err_cnt
),
4657 [C_PIO_RSVD_31_ERR
] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4659 access_pio_reserved_31_err_cnt
),
4660 [C_PIO_RSVD_30_ERR
] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4662 access_pio_reserved_30_err_cnt
),
4663 [C_PIO_PPMC_SOP_LEN_ERR
] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4665 access_pio_ppmc_sop_len_err_cnt
),
4666 [C_PIO_PPMC_BQC_MEM_PARITY_ERR
] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4668 access_pio_ppmc_bqc_mem_parity_err_cnt
),
4669 [C_PIO_VL_FIFO_PARITY_ERR
] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4671 access_pio_vl_fifo_parity_err_cnt
),
4672 [C_PIO_VLF_SOP_PARITY_ERR
] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4674 access_pio_vlf_sop_parity_err_cnt
),
4675 [C_PIO_VLF_V1_LEN_PARITY_ERR
] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4677 access_pio_vlf_v1_len_parity_err_cnt
),
4678 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR
] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4680 access_pio_block_qw_count_parity_err_cnt
),
4681 [C_PIO_WRITE_QW_VALID_PARITY_ERR
] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4683 access_pio_write_qw_valid_parity_err_cnt
),
4684 [C_PIO_STATE_MACHINE_ERR
] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4686 access_pio_state_machine_err_cnt
),
4687 [C_PIO_WRITE_DATA_PARITY_ERR
] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4689 access_pio_write_data_parity_err_cnt
),
4690 [C_PIO_HOST_ADDR_MEM_COR_ERR
] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4692 access_pio_host_addr_mem_cor_err_cnt
),
4693 [C_PIO_HOST_ADDR_MEM_UNC_ERR
] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4695 access_pio_host_addr_mem_unc_err_cnt
),
4696 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR
] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4698 access_pio_pkt_evict_sm_or_arb_sm_err_cnt
),
4699 [C_PIO_INIT_SM_IN_ERR
] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4701 access_pio_init_sm_in_err_cnt
),
4702 [C_PIO_PPMC_PBL_FIFO_ERR
] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4704 access_pio_ppmc_pbl_fifo_err_cnt
),
4705 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR
] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4707 access_pio_credit_ret_fifo_parity_err_cnt
),
4708 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR
] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4710 access_pio_v1_len_mem_bank1_cor_err_cnt
),
4711 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR
] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4713 access_pio_v1_len_mem_bank0_cor_err_cnt
),
4714 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR
] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4716 access_pio_v1_len_mem_bank1_unc_err_cnt
),
4717 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR
] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4719 access_pio_v1_len_mem_bank0_unc_err_cnt
),
4720 [C_PIO_SM_PKT_RESET_PARITY_ERR
] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4722 access_pio_sm_pkt_reset_parity_err_cnt
),
4723 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR
] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4725 access_pio_pkt_evict_fifo_parity_err_cnt
),
4726 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR
] = CNTR_ELEM(
4727 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4729 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt
),
4730 [C_PIO_SBRDCTL_CRREL_PARITY_ERR
] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4732 access_pio_sbrdctl_crrel_parity_err_cnt
),
4733 [C_PIO_PEC_FIFO_PARITY_ERR
] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4735 access_pio_pec_fifo_parity_err_cnt
),
4736 [C_PIO_PCC_FIFO_PARITY_ERR
] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4738 access_pio_pcc_fifo_parity_err_cnt
),
4739 [C_PIO_SB_MEM_FIFO1_ERR
] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4741 access_pio_sb_mem_fifo1_err_cnt
),
4742 [C_PIO_SB_MEM_FIFO0_ERR
] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4744 access_pio_sb_mem_fifo0_err_cnt
),
4745 [C_PIO_CSR_PARITY_ERR
] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4747 access_pio_csr_parity_err_cnt
),
4748 [C_PIO_WRITE_ADDR_PARITY_ERR
] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4750 access_pio_write_addr_parity_err_cnt
),
4751 [C_PIO_WRITE_BAD_CTXT_ERR
] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4753 access_pio_write_bad_ctxt_err_cnt
),
4754 /* SendDmaErrStatus */
4755 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR
] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4757 access_sdma_pcie_req_tracking_cor_err_cnt
),
4758 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR
] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4760 access_sdma_pcie_req_tracking_unc_err_cnt
),
4761 [C_SDMA_CSR_PARITY_ERR
] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4763 access_sdma_csr_parity_err_cnt
),
4764 [C_SDMA_RPY_TAG_ERR
] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4766 access_sdma_rpy_tag_err_cnt
),
4767 /* SendEgressErrStatus */
4768 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR
] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4770 access_tx_read_pio_memory_csr_unc_err_cnt
),
4771 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR
] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4773 access_tx_read_sdma_memory_csr_err_cnt
),
4774 [C_TX_EGRESS_FIFO_COR_ERR
] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4776 access_tx_egress_fifo_cor_err_cnt
),
4777 [C_TX_READ_PIO_MEMORY_COR_ERR
] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4779 access_tx_read_pio_memory_cor_err_cnt
),
4780 [C_TX_READ_SDMA_MEMORY_COR_ERR
] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4782 access_tx_read_sdma_memory_cor_err_cnt
),
4783 [C_TX_SB_HDR_COR_ERR
] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4785 access_tx_sb_hdr_cor_err_cnt
),
4786 [C_TX_CREDIT_OVERRUN_ERR
] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4788 access_tx_credit_overrun_err_cnt
),
4789 [C_TX_LAUNCH_FIFO8_COR_ERR
] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4791 access_tx_launch_fifo8_cor_err_cnt
),
4792 [C_TX_LAUNCH_FIFO7_COR_ERR
] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4794 access_tx_launch_fifo7_cor_err_cnt
),
4795 [C_TX_LAUNCH_FIFO6_COR_ERR
] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4797 access_tx_launch_fifo6_cor_err_cnt
),
4798 [C_TX_LAUNCH_FIFO5_COR_ERR
] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4800 access_tx_launch_fifo5_cor_err_cnt
),
4801 [C_TX_LAUNCH_FIFO4_COR_ERR
] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4803 access_tx_launch_fifo4_cor_err_cnt
),
4804 [C_TX_LAUNCH_FIFO3_COR_ERR
] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4806 access_tx_launch_fifo3_cor_err_cnt
),
4807 [C_TX_LAUNCH_FIFO2_COR_ERR
] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4809 access_tx_launch_fifo2_cor_err_cnt
),
4810 [C_TX_LAUNCH_FIFO1_COR_ERR
] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4812 access_tx_launch_fifo1_cor_err_cnt
),
4813 [C_TX_LAUNCH_FIFO0_COR_ERR
] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4815 access_tx_launch_fifo0_cor_err_cnt
),
4816 [C_TX_CREDIT_RETURN_VL_ERR
] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4818 access_tx_credit_return_vl_err_cnt
),
4819 [C_TX_HCRC_INSERTION_ERR
] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4821 access_tx_hcrc_insertion_err_cnt
),
4822 [C_TX_EGRESS_FIFI_UNC_ERR
] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4824 access_tx_egress_fifo_unc_err_cnt
),
4825 [C_TX_READ_PIO_MEMORY_UNC_ERR
] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4827 access_tx_read_pio_memory_unc_err_cnt
),
4828 [C_TX_READ_SDMA_MEMORY_UNC_ERR
] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4830 access_tx_read_sdma_memory_unc_err_cnt
),
4831 [C_TX_SB_HDR_UNC_ERR
] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4833 access_tx_sb_hdr_unc_err_cnt
),
4834 [C_TX_CREDIT_RETURN_PARITY_ERR
] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4836 access_tx_credit_return_partiy_err_cnt
),
4837 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4839 access_tx_launch_fifo8_unc_or_parity_err_cnt
),
4840 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4842 access_tx_launch_fifo7_unc_or_parity_err_cnt
),
4843 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4845 access_tx_launch_fifo6_unc_or_parity_err_cnt
),
4846 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4848 access_tx_launch_fifo5_unc_or_parity_err_cnt
),
4849 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4851 access_tx_launch_fifo4_unc_or_parity_err_cnt
),
4852 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4854 access_tx_launch_fifo3_unc_or_parity_err_cnt
),
4855 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4857 access_tx_launch_fifo2_unc_or_parity_err_cnt
),
4858 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4860 access_tx_launch_fifo1_unc_or_parity_err_cnt
),
4861 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4863 access_tx_launch_fifo0_unc_or_parity_err_cnt
),
4864 [C_TX_SDMA15_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4866 access_tx_sdma15_disallowed_packet_err_cnt
),
4867 [C_TX_SDMA14_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4869 access_tx_sdma14_disallowed_packet_err_cnt
),
4870 [C_TX_SDMA13_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4872 access_tx_sdma13_disallowed_packet_err_cnt
),
4873 [C_TX_SDMA12_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4875 access_tx_sdma12_disallowed_packet_err_cnt
),
4876 [C_TX_SDMA11_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4878 access_tx_sdma11_disallowed_packet_err_cnt
),
4879 [C_TX_SDMA10_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4881 access_tx_sdma10_disallowed_packet_err_cnt
),
4882 [C_TX_SDMA9_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4884 access_tx_sdma9_disallowed_packet_err_cnt
),
4885 [C_TX_SDMA8_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4887 access_tx_sdma8_disallowed_packet_err_cnt
),
4888 [C_TX_SDMA7_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4890 access_tx_sdma7_disallowed_packet_err_cnt
),
4891 [C_TX_SDMA6_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4893 access_tx_sdma6_disallowed_packet_err_cnt
),
4894 [C_TX_SDMA5_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4896 access_tx_sdma5_disallowed_packet_err_cnt
),
4897 [C_TX_SDMA4_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4899 access_tx_sdma4_disallowed_packet_err_cnt
),
4900 [C_TX_SDMA3_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4902 access_tx_sdma3_disallowed_packet_err_cnt
),
4903 [C_TX_SDMA2_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4905 access_tx_sdma2_disallowed_packet_err_cnt
),
4906 [C_TX_SDMA1_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4908 access_tx_sdma1_disallowed_packet_err_cnt
),
4909 [C_TX_SDMA0_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4911 access_tx_sdma0_disallowed_packet_err_cnt
),
4912 [C_TX_CONFIG_PARITY_ERR
] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4914 access_tx_config_parity_err_cnt
),
4915 [C_TX_SBRD_CTL_CSR_PARITY_ERR
] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4917 access_tx_sbrd_ctl_csr_parity_err_cnt
),
4918 [C_TX_LAUNCH_CSR_PARITY_ERR
] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4920 access_tx_launch_csr_parity_err_cnt
),
4921 [C_TX_ILLEGAL_CL_ERR
] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4923 access_tx_illegal_vl_err_cnt
),
4924 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR
] = CNTR_ELEM(
4925 "TxSbrdCtlStateMachineParityErr", 0, 0,
4927 access_tx_sbrd_ctl_state_machine_parity_err_cnt
),
4928 [C_TX_RESERVED_10
] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4930 access_egress_reserved_10_err_cnt
),
4931 [C_TX_RESERVED_9
] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4933 access_egress_reserved_9_err_cnt
),
4934 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR
] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4936 access_tx_sdma_launch_intf_parity_err_cnt
),
4937 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR
] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4939 access_tx_pio_launch_intf_parity_err_cnt
),
4940 [C_TX_RESERVED_6
] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4942 access_egress_reserved_6_err_cnt
),
4943 [C_TX_INCORRECT_LINK_STATE_ERR
] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4945 access_tx_incorrect_link_state_err_cnt
),
4946 [C_TX_LINK_DOWN_ERR
] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4948 access_tx_linkdown_err_cnt
),
4949 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR
] = CNTR_ELEM(
4950 "EgressFifoUnderrunOrParityErr", 0, 0,
4952 access_tx_egress_fifi_underrun_or_parity_err_cnt
),
4953 [C_TX_RESERVED_2
] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4955 access_egress_reserved_2_err_cnt
),
4956 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR
] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4958 access_tx_pkt_integrity_mem_unc_err_cnt
),
4959 [C_TX_PKT_INTEGRITY_MEM_COR_ERR
] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4961 access_tx_pkt_integrity_mem_cor_err_cnt
),
4963 [C_SEND_CSR_WRITE_BAD_ADDR_ERR
] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4965 access_send_csr_write_bad_addr_err_cnt
),
4966 [C_SEND_CSR_READ_BAD_ADD_ERR
] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4968 access_send_csr_read_bad_addr_err_cnt
),
4969 [C_SEND_CSR_PARITY_ERR
] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4971 access_send_csr_parity_cnt
),
4972 /* SendCtxtErrStatus */
4973 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR
] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4975 access_pio_write_out_of_bounds_err_cnt
),
4976 [C_PIO_WRITE_OVERFLOW_ERR
] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4978 access_pio_write_overflow_err_cnt
),
4979 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR
] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4981 access_pio_write_crosses_boundary_err_cnt
),
4982 [C_PIO_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4984 access_pio_disallowed_packet_err_cnt
),
4985 [C_PIO_INCONSISTENT_SOP_ERR
] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4987 access_pio_inconsistent_sop_err_cnt
),
4988 /* SendDmaEngErrStatus */
4989 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR
] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4991 access_sdma_header_request_fifo_cor_err_cnt
),
4992 [C_SDMA_HEADER_STORAGE_COR_ERR
] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4994 access_sdma_header_storage_cor_err_cnt
),
4995 [C_SDMA_PACKET_TRACKING_COR_ERR
] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4997 access_sdma_packet_tracking_cor_err_cnt
),
4998 [C_SDMA_ASSEMBLY_COR_ERR
] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
5000 access_sdma_assembly_cor_err_cnt
),
5001 [C_SDMA_DESC_TABLE_COR_ERR
] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
5003 access_sdma_desc_table_cor_err_cnt
),
5004 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR
] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
5006 access_sdma_header_request_fifo_unc_err_cnt
),
5007 [C_SDMA_HEADER_STORAGE_UNC_ERR
] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
5009 access_sdma_header_storage_unc_err_cnt
),
5010 [C_SDMA_PACKET_TRACKING_UNC_ERR
] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
5012 access_sdma_packet_tracking_unc_err_cnt
),
5013 [C_SDMA_ASSEMBLY_UNC_ERR
] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
5015 access_sdma_assembly_unc_err_cnt
),
5016 [C_SDMA_DESC_TABLE_UNC_ERR
] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
5018 access_sdma_desc_table_unc_err_cnt
),
5019 [C_SDMA_TIMEOUT_ERR
] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
5021 access_sdma_timeout_err_cnt
),
5022 [C_SDMA_HEADER_LENGTH_ERR
] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
5024 access_sdma_header_length_err_cnt
),
5025 [C_SDMA_HEADER_ADDRESS_ERR
] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
5027 access_sdma_header_address_err_cnt
),
5028 [C_SDMA_HEADER_SELECT_ERR
] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
5030 access_sdma_header_select_err_cnt
),
5031 [C_SMDA_RESERVED_9
] = CNTR_ELEM("SDma Reserved 9", 0, 0,
5033 access_sdma_reserved_9_err_cnt
),
5034 [C_SDMA_PACKET_DESC_OVERFLOW_ERR
] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
5036 access_sdma_packet_desc_overflow_err_cnt
),
5037 [C_SDMA_LENGTH_MISMATCH_ERR
] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
5039 access_sdma_length_mismatch_err_cnt
),
5040 [C_SDMA_HALT_ERR
] = CNTR_ELEM("SDmaHaltErr", 0, 0,
5042 access_sdma_halt_err_cnt
),
5043 [C_SDMA_MEM_READ_ERR
] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
5045 access_sdma_mem_read_err_cnt
),
5046 [C_SDMA_FIRST_DESC_ERR
] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
5048 access_sdma_first_desc_err_cnt
),
5049 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR
] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
5051 access_sdma_tail_out_of_bounds_err_cnt
),
5052 [C_SDMA_TOO_LONG_ERR
] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
5054 access_sdma_too_long_err_cnt
),
5055 [C_SDMA_GEN_MISMATCH_ERR
] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
5057 access_sdma_gen_mismatch_err_cnt
),
5058 [C_SDMA_WRONG_DW_ERR
] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
5060 access_sdma_wrong_dw_err_cnt
),
5063 static struct cntr_entry port_cntrs
[PORT_CNTR_LAST
] = {
5064 [C_TX_UNSUP_VL
] = TXE32_PORT_CNTR_ELEM(TxUnVLErr
, SEND_UNSUP_VL_ERR_CNT
,
5066 [C_TX_INVAL_LEN
] = TXE32_PORT_CNTR_ELEM(TxInvalLen
, SEND_LEN_ERR_CNT
,
5068 [C_TX_MM_LEN_ERR
] = TXE32_PORT_CNTR_ELEM(TxMMLenErr
, SEND_MAX_MIN_LEN_ERR_CNT
,
5070 [C_TX_UNDERRUN
] = TXE32_PORT_CNTR_ELEM(TxUnderrun
, SEND_UNDERRUN_CNT
,
5072 [C_TX_FLOW_STALL
] = TXE32_PORT_CNTR_ELEM(TxFlowStall
, SEND_FLOW_STALL_CNT
,
5074 [C_TX_DROPPED
] = TXE32_PORT_CNTR_ELEM(TxDropped
, SEND_DROPPED_PKT_CNT
,
5076 [C_TX_HDR_ERR
] = TXE32_PORT_CNTR_ELEM(TxHdrErr
, SEND_HEADERS_ERR_CNT
,
5078 [C_TX_PKT
] = TXE64_PORT_CNTR_ELEM(TxPkt
, SEND_DATA_PKT_CNT
, CNTR_NORMAL
),
5079 [C_TX_WORDS
] = TXE64_PORT_CNTR_ELEM(TxWords
, SEND_DWORD_CNT
, CNTR_NORMAL
),
5080 [C_TX_WAIT
] = TXE64_PORT_CNTR_ELEM(TxWait
, SEND_WAIT_CNT
, CNTR_SYNTH
),
5081 [C_TX_FLIT_VL
] = TXE64_PORT_CNTR_ELEM(TxFlitVL
, SEND_DATA_VL0_CNT
,
5082 CNTR_SYNTH
| CNTR_VL
),
5083 [C_TX_PKT_VL
] = TXE64_PORT_CNTR_ELEM(TxPktVL
, SEND_DATA_PKT_VL0_CNT
,
5084 CNTR_SYNTH
| CNTR_VL
),
5085 [C_TX_WAIT_VL
] = TXE64_PORT_CNTR_ELEM(TxWaitVL
, SEND_WAIT_VL0_CNT
,
5086 CNTR_SYNTH
| CNTR_VL
),
5087 [C_RX_PKT
] = RXE64_PORT_CNTR_ELEM(RxPkt
, RCV_DATA_PKT_CNT
, CNTR_NORMAL
),
5088 [C_RX_WORDS
] = RXE64_PORT_CNTR_ELEM(RxWords
, RCV_DWORD_CNT
, CNTR_NORMAL
),
5089 [C_SW_LINK_DOWN
] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH
| CNTR_32BIT
,
5090 access_sw_link_dn_cnt
),
5091 [C_SW_LINK_UP
] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH
| CNTR_32BIT
,
5092 access_sw_link_up_cnt
),
5093 [C_SW_UNKNOWN_FRAME
] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL
,
5094 access_sw_unknown_frame_cnt
),
5095 [C_SW_XMIT_DSCD
] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH
| CNTR_32BIT
,
5096 access_sw_xmit_discards
),
5097 [C_SW_XMIT_DSCD_VL
] = CNTR_ELEM("XmitDscdVl", 0, 0,
5098 CNTR_SYNTH
| CNTR_32BIT
| CNTR_VL
,
5099 access_sw_xmit_discards
),
5100 [C_SW_XMIT_CSTR_ERR
] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH
,
5101 access_xmit_constraint_errs
),
5102 [C_SW_RCV_CSTR_ERR
] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH
,
5103 access_rcv_constraint_errs
),
5104 [C_SW_IBP_LOOP_PKTS
] = SW_IBP_CNTR(LoopPkts
, loop_pkts
),
5105 [C_SW_IBP_RC_RESENDS
] = SW_IBP_CNTR(RcResend
, rc_resends
),
5106 [C_SW_IBP_RNR_NAKS
] = SW_IBP_CNTR(RnrNak
, rnr_naks
),
5107 [C_SW_IBP_OTHER_NAKS
] = SW_IBP_CNTR(OtherNak
, other_naks
),
5108 [C_SW_IBP_RC_TIMEOUTS
] = SW_IBP_CNTR(RcTimeOut
, rc_timeouts
),
5109 [C_SW_IBP_PKT_DROPS
] = SW_IBP_CNTR(PktDrop
, pkt_drops
),
5110 [C_SW_IBP_DMA_WAIT
] = SW_IBP_CNTR(DmaWait
, dmawait
),
5111 [C_SW_IBP_RC_SEQNAK
] = SW_IBP_CNTR(RcSeqNak
, rc_seqnak
),
5112 [C_SW_IBP_RC_DUPREQ
] = SW_IBP_CNTR(RcDupRew
, rc_dupreq
),
5113 [C_SW_IBP_RDMA_SEQ
] = SW_IBP_CNTR(RdmaSeq
, rdma_seq
),
5114 [C_SW_IBP_UNALIGNED
] = SW_IBP_CNTR(Unaligned
, unaligned
),
5115 [C_SW_IBP_SEQ_NAK
] = SW_IBP_CNTR(SeqNak
, seq_naks
),
5116 [C_SW_CPU_RC_ACKS
] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL
,
5117 access_sw_cpu_rc_acks
),
5118 [C_SW_CPU_RC_QACKS
] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL
,
5119 access_sw_cpu_rc_qacks
),
5120 [C_SW_CPU_RC_DELAYED_COMP
] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL
,
5121 access_sw_cpu_rc_delayed_comp
),
5122 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5123 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5124 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5125 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5126 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5127 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5128 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5129 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5130 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5131 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5132 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5133 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5134 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5135 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5136 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5137 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5138 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5139 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5140 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5141 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5142 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5143 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5144 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5145 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5146 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5147 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5148 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5149 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5150 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5151 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5152 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5153 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5154 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5155 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5156 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5157 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5158 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5159 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5160 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5161 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5162 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5163 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5164 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5165 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5166 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5167 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5168 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5169 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5170 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5171 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5172 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5173 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5174 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5175 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5176 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5177 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5178 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5179 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5180 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5181 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5182 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5183 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5184 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5185 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5186 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5187 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5188 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5189 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5190 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5191 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5192 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5193 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5194 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5195 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5196 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5197 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5198 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5199 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5200 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5201 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5204 /* ======================================================================== */
5206 /* return true if this is chip revision revision a */
5207 int is_ax(struct hfi1_devdata
*dd
)
5210 dd
->revision
>> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5211 & CCE_REVISION_CHIP_REV_MINOR_MASK
;
5212 return (chip_rev_minor
& 0xf0) == 0;
5215 /* return true if this is chip revision revision b */
5216 int is_bx(struct hfi1_devdata
*dd
)
5219 dd
->revision
>> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5220 & CCE_REVISION_CHIP_REV_MINOR_MASK
;
5221 return (chip_rev_minor
& 0xF0) == 0x10;
5225 * Append string s to buffer buf. Arguments curp and len are the current
5226 * position and remaining length, respectively.
5228 * return 0 on success, 1 on out of room
5230 static int append_str(char *buf
, char **curp
, int *lenp
, const char *s
)
5234 int result
= 0; /* success */
5237 /* add a comma, if first in the buffer */
5240 result
= 1; /* out of room */
5247 /* copy the string */
5248 while ((c
= *s
++) != 0) {
5250 result
= 1; /* out of room */
5258 /* write return values */
5266 * Using the given flag table, print a comma separated string into
5267 * the buffer. End in '*' if the buffer is too short.
5269 static char *flag_string(char *buf
, int buf_len
, u64 flags
,
5270 struct flag_table
*table
, int table_size
)
5278 /* make sure there is at least 2 so we can form "*" */
5282 len
--; /* leave room for a nul */
5283 for (i
= 0; i
< table_size
; i
++) {
5284 if (flags
& table
[i
].flag
) {
5285 no_room
= append_str(buf
, &p
, &len
, table
[i
].str
);
5288 flags
&= ~table
[i
].flag
;
5292 /* any undocumented bits left? */
5293 if (!no_room
&& flags
) {
5294 snprintf(extra
, sizeof(extra
), "bits 0x%llx", flags
);
5295 no_room
= append_str(buf
, &p
, &len
, extra
);
5298 /* add * if ran out of room */
5300 /* may need to back up to add space for a '*' */
5306 /* add final nul - space already allocated above */
5311 /* first 8 CCE error interrupt source names */
5312 static const char * const cce_misc_names
[] = {
5313 "CceErrInt", /* 0 */
5314 "RxeErrInt", /* 1 */
5315 "MiscErrInt", /* 2 */
5316 "Reserved3", /* 3 */
5317 "PioErrInt", /* 4 */
5318 "SDmaErrInt", /* 5 */
5319 "EgressErrInt", /* 6 */
5324 * Return the miscellaneous error interrupt name.
5326 static char *is_misc_err_name(char *buf
, size_t bsize
, unsigned int source
)
5328 if (source
< ARRAY_SIZE(cce_misc_names
))
5329 strncpy(buf
, cce_misc_names
[source
], bsize
);
5331 snprintf(buf
, bsize
, "Reserved%u",
5332 source
+ IS_GENERAL_ERR_START
);
5338 * Return the SDMA engine error interrupt name.
5340 static char *is_sdma_eng_err_name(char *buf
, size_t bsize
, unsigned int source
)
5342 snprintf(buf
, bsize
, "SDmaEngErrInt%u", source
);
5347 * Return the send context error interrupt name.
5349 static char *is_sendctxt_err_name(char *buf
, size_t bsize
, unsigned int source
)
5351 snprintf(buf
, bsize
, "SendCtxtErrInt%u", source
);
5355 static const char * const various_names
[] = {
5364 * Return the various interrupt name.
5366 static char *is_various_name(char *buf
, size_t bsize
, unsigned int source
)
5368 if (source
< ARRAY_SIZE(various_names
))
5369 strncpy(buf
, various_names
[source
], bsize
);
5371 snprintf(buf
, bsize
, "Reserved%u", source
+ IS_VARIOUS_START
);
5376 * Return the DC interrupt name.
5378 static char *is_dc_name(char *buf
, size_t bsize
, unsigned int source
)
5380 static const char * const dc_int_names
[] = {
5384 "lbm" /* local block merge */
5387 if (source
< ARRAY_SIZE(dc_int_names
))
5388 snprintf(buf
, bsize
, "dc_%s_int", dc_int_names
[source
]);
5390 snprintf(buf
, bsize
, "DCInt%u", source
);
5394 static const char * const sdma_int_names
[] = {
5401 * Return the SDMA engine interrupt name.
5403 static char *is_sdma_eng_name(char *buf
, size_t bsize
, unsigned int source
)
5405 /* what interrupt */
5406 unsigned int what
= source
/ TXE_NUM_SDMA_ENGINES
;
5408 unsigned int which
= source
% TXE_NUM_SDMA_ENGINES
;
5410 if (likely(what
< 3))
5411 snprintf(buf
, bsize
, "%s%u", sdma_int_names
[what
], which
);
5413 snprintf(buf
, bsize
, "Invalid SDMA interrupt %u", source
);
5418 * Return the receive available interrupt name.
5420 static char *is_rcv_avail_name(char *buf
, size_t bsize
, unsigned int source
)
5422 snprintf(buf
, bsize
, "RcvAvailInt%u", source
);
5427 * Return the receive urgent interrupt name.
5429 static char *is_rcv_urgent_name(char *buf
, size_t bsize
, unsigned int source
)
5431 snprintf(buf
, bsize
, "RcvUrgentInt%u", source
);
5436 * Return the send credit interrupt name.
5438 static char *is_send_credit_name(char *buf
, size_t bsize
, unsigned int source
)
5440 snprintf(buf
, bsize
, "SendCreditInt%u", source
);
5445 * Return the reserved interrupt name.
5447 static char *is_reserved_name(char *buf
, size_t bsize
, unsigned int source
)
5449 snprintf(buf
, bsize
, "Reserved%u", source
+ IS_RESERVED_START
);
5453 static char *cce_err_status_string(char *buf
, int buf_len
, u64 flags
)
5455 return flag_string(buf
, buf_len
, flags
,
5456 cce_err_status_flags
,
5457 ARRAY_SIZE(cce_err_status_flags
));
5460 static char *rxe_err_status_string(char *buf
, int buf_len
, u64 flags
)
5462 return flag_string(buf
, buf_len
, flags
,
5463 rxe_err_status_flags
,
5464 ARRAY_SIZE(rxe_err_status_flags
));
5467 static char *misc_err_status_string(char *buf
, int buf_len
, u64 flags
)
5469 return flag_string(buf
, buf_len
, flags
, misc_err_status_flags
,
5470 ARRAY_SIZE(misc_err_status_flags
));
5473 static char *pio_err_status_string(char *buf
, int buf_len
, u64 flags
)
5475 return flag_string(buf
, buf_len
, flags
,
5476 pio_err_status_flags
,
5477 ARRAY_SIZE(pio_err_status_flags
));
5480 static char *sdma_err_status_string(char *buf
, int buf_len
, u64 flags
)
5482 return flag_string(buf
, buf_len
, flags
,
5483 sdma_err_status_flags
,
5484 ARRAY_SIZE(sdma_err_status_flags
));
5487 static char *egress_err_status_string(char *buf
, int buf_len
, u64 flags
)
5489 return flag_string(buf
, buf_len
, flags
,
5490 egress_err_status_flags
,
5491 ARRAY_SIZE(egress_err_status_flags
));
5494 static char *egress_err_info_string(char *buf
, int buf_len
, u64 flags
)
5496 return flag_string(buf
, buf_len
, flags
,
5497 egress_err_info_flags
,
5498 ARRAY_SIZE(egress_err_info_flags
));
5501 static char *send_err_status_string(char *buf
, int buf_len
, u64 flags
)
5503 return flag_string(buf
, buf_len
, flags
,
5504 send_err_status_flags
,
5505 ARRAY_SIZE(send_err_status_flags
));
5508 static void handle_cce_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
5514 * For most these errors, there is nothing that can be done except
5515 * report or record it.
5517 dd_dev_info(dd
, "CCE Error: %s\n",
5518 cce_err_status_string(buf
, sizeof(buf
), reg
));
5520 if ((reg
& CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK
) &&
5521 is_ax(dd
) && (dd
->icode
!= ICODE_FUNCTIONAL_SIMULATOR
)) {
5522 /* this error requires a manual drop into SPC freeze mode */
5524 start_freeze_handling(dd
->pport
, FREEZE_SELF
);
5527 for (i
= 0; i
< NUM_CCE_ERR_STATUS_COUNTERS
; i
++) {
5528 if (reg
& (1ull << i
)) {
5529 incr_cntr64(&dd
->cce_err_status_cnt
[i
]);
5530 /* maintain a counter over all cce_err_status errors */
5531 incr_cntr64(&dd
->sw_cce_err_status_aggregate
);
5537 * Check counters for receive errors that do not have an interrupt
5538 * associated with them.
5540 #define RCVERR_CHECK_TIME 10
5541 static void update_rcverr_timer(unsigned long opaque
)
5543 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)opaque
;
5544 struct hfi1_pportdata
*ppd
= dd
->pport
;
5545 u32 cur_ovfl_cnt
= read_dev_cntr(dd
, C_RCV_OVF
, CNTR_INVALID_VL
);
5547 if (dd
->rcv_ovfl_cnt
< cur_ovfl_cnt
&&
5548 ppd
->port_error_action
& OPA_PI_MASK_EX_BUFFER_OVERRUN
) {
5549 dd_dev_info(dd
, "%s: PortErrorAction bounce\n", __func__
);
5550 set_link_down_reason(
5551 ppd
, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN
, 0,
5552 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN
);
5553 queue_work(ppd
->link_wq
, &ppd
->link_bounce_work
);
5555 dd
->rcv_ovfl_cnt
= (u32
)cur_ovfl_cnt
;
5557 mod_timer(&dd
->rcverr_timer
, jiffies
+ HZ
* RCVERR_CHECK_TIME
);
5560 static int init_rcverr(struct hfi1_devdata
*dd
)
5562 setup_timer(&dd
->rcverr_timer
, update_rcverr_timer
, (unsigned long)dd
);
5563 /* Assume the hardware counter has been reset */
5564 dd
->rcv_ovfl_cnt
= 0;
5565 return mod_timer(&dd
->rcverr_timer
, jiffies
+ HZ
* RCVERR_CHECK_TIME
);
5568 static void free_rcverr(struct hfi1_devdata
*dd
)
5570 if (dd
->rcverr_timer
.data
)
5571 del_timer_sync(&dd
->rcverr_timer
);
5572 dd
->rcverr_timer
.data
= 0;
5575 static void handle_rxe_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
5580 dd_dev_info(dd
, "Receive Error: %s\n",
5581 rxe_err_status_string(buf
, sizeof(buf
), reg
));
5583 if (reg
& ALL_RXE_FREEZE_ERR
) {
5587 * Freeze mode recovery is disabled for the errors
5588 * in RXE_FREEZE_ABORT_MASK
5590 if (is_ax(dd
) && (reg
& RXE_FREEZE_ABORT_MASK
))
5591 flags
= FREEZE_ABORT
;
5593 start_freeze_handling(dd
->pport
, flags
);
5596 for (i
= 0; i
< NUM_RCV_ERR_STATUS_COUNTERS
; i
++) {
5597 if (reg
& (1ull << i
))
5598 incr_cntr64(&dd
->rcv_err_status_cnt
[i
]);
5602 static void handle_misc_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
5607 dd_dev_info(dd
, "Misc Error: %s",
5608 misc_err_status_string(buf
, sizeof(buf
), reg
));
5609 for (i
= 0; i
< NUM_MISC_ERR_STATUS_COUNTERS
; i
++) {
5610 if (reg
& (1ull << i
))
5611 incr_cntr64(&dd
->misc_err_status_cnt
[i
]);
5615 static void handle_pio_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
5620 dd_dev_info(dd
, "PIO Error: %s\n",
5621 pio_err_status_string(buf
, sizeof(buf
), reg
));
5623 if (reg
& ALL_PIO_FREEZE_ERR
)
5624 start_freeze_handling(dd
->pport
, 0);
5626 for (i
= 0; i
< NUM_SEND_PIO_ERR_STATUS_COUNTERS
; i
++) {
5627 if (reg
& (1ull << i
))
5628 incr_cntr64(&dd
->send_pio_err_status_cnt
[i
]);
5632 static void handle_sdma_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
5637 dd_dev_info(dd
, "SDMA Error: %s\n",
5638 sdma_err_status_string(buf
, sizeof(buf
), reg
));
5640 if (reg
& ALL_SDMA_FREEZE_ERR
)
5641 start_freeze_handling(dd
->pport
, 0);
5643 for (i
= 0; i
< NUM_SEND_DMA_ERR_STATUS_COUNTERS
; i
++) {
5644 if (reg
& (1ull << i
))
5645 incr_cntr64(&dd
->send_dma_err_status_cnt
[i
]);
5649 static inline void __count_port_discards(struct hfi1_pportdata
*ppd
)
5651 incr_cntr64(&ppd
->port_xmit_discards
);
5654 static void count_port_inactive(struct hfi1_devdata
*dd
)
5656 __count_port_discards(dd
->pport
);
5660 * We have had a "disallowed packet" error during egress. Determine the
5661 * integrity check which failed, and update relevant error counter, etc.
5663 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5664 * bit of state per integrity check, and so we can miss the reason for an
5665 * egress error if more than one packet fails the same integrity check
5666 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5668 static void handle_send_egress_err_info(struct hfi1_devdata
*dd
,
5671 struct hfi1_pportdata
*ppd
= dd
->pport
;
5672 u64 src
= read_csr(dd
, SEND_EGRESS_ERR_SOURCE
); /* read first */
5673 u64 info
= read_csr(dd
, SEND_EGRESS_ERR_INFO
);
5676 /* clear down all observed info as quickly as possible after read */
5677 write_csr(dd
, SEND_EGRESS_ERR_INFO
, info
);
5680 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5681 info
, egress_err_info_string(buf
, sizeof(buf
), info
), src
);
5683 /* Eventually add other counters for each bit */
5684 if (info
& PORT_DISCARD_EGRESS_ERRS
) {
5688 * Count all applicable bits as individual errors and
5689 * attribute them to the packet that triggered this handler.
5690 * This may not be completely accurate due to limitations
5691 * on the available hardware error information. There is
5692 * a single information register and any number of error
5693 * packets may have occurred and contributed to it before
5694 * this routine is called. This means that:
5695 * a) If multiple packets with the same error occur before
5696 * this routine is called, earlier packets are missed.
5697 * There is only a single bit for each error type.
5698 * b) Errors may not be attributed to the correct VL.
5699 * The driver is attributing all bits in the info register
5700 * to the packet that triggered this call, but bits
5701 * could be an accumulation of different packets with
5703 * c) A single error packet may have multiple counts attached
5704 * to it. There is no way for the driver to know if
5705 * multiple bits set in the info register are due to a
5706 * single packet or multiple packets. The driver assumes
5709 weight
= hweight64(info
& PORT_DISCARD_EGRESS_ERRS
);
5710 for (i
= 0; i
< weight
; i
++) {
5711 __count_port_discards(ppd
);
5712 if (vl
>= 0 && vl
< TXE_NUM_DATA_VL
)
5713 incr_cntr64(&ppd
->port_xmit_discards_vl
[vl
]);
5715 incr_cntr64(&ppd
->port_xmit_discards_vl
5722 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5723 * register. Does it represent a 'port inactive' error?
5725 static inline int port_inactive_err(u64 posn
)
5727 return (posn
>= SEES(TX_LINKDOWN
) &&
5728 posn
<= SEES(TX_INCORRECT_LINK_STATE
));
5732 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5733 * register. Does it represent a 'disallowed packet' error?
5735 static inline int disallowed_pkt_err(int posn
)
5737 return (posn
>= SEES(TX_SDMA0_DISALLOWED_PACKET
) &&
5738 posn
<= SEES(TX_SDMA15_DISALLOWED_PACKET
));
5742 * Input value is a bit position of one of the SDMA engine disallowed
5743 * packet errors. Return which engine. Use of this must be guarded by
5744 * disallowed_pkt_err().
5746 static inline int disallowed_pkt_engine(int posn
)
5748 return posn
- SEES(TX_SDMA0_DISALLOWED_PACKET
);
5752 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5755 static int engine_to_vl(struct hfi1_devdata
*dd
, int engine
)
5757 struct sdma_vl_map
*m
;
5761 if (engine
< 0 || engine
>= TXE_NUM_SDMA_ENGINES
)
5765 m
= rcu_dereference(dd
->sdma_map
);
5766 vl
= m
->engine_to_vl
[engine
];
5773 * Translate the send context (sofware index) into a VL. Return -1 if the
5774 * translation cannot be done.
5776 static int sc_to_vl(struct hfi1_devdata
*dd
, int sw_index
)
5778 struct send_context_info
*sci
;
5779 struct send_context
*sc
;
5782 sci
= &dd
->send_contexts
[sw_index
];
5784 /* there is no information for user (PSM) and ack contexts */
5785 if ((sci
->type
!= SC_KERNEL
) && (sci
->type
!= SC_VL15
))
5791 if (dd
->vld
[15].sc
== sc
)
5793 for (i
= 0; i
< num_vls
; i
++)
5794 if (dd
->vld
[i
].sc
== sc
)
5800 static void handle_egress_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
5802 u64 reg_copy
= reg
, handled
= 0;
5806 if (reg
& ALL_TXE_EGRESS_FREEZE_ERR
)
5807 start_freeze_handling(dd
->pport
, 0);
5808 else if (is_ax(dd
) &&
5809 (reg
& SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK
) &&
5810 (dd
->icode
!= ICODE_FUNCTIONAL_SIMULATOR
))
5811 start_freeze_handling(dd
->pport
, 0);
5814 int posn
= fls64(reg_copy
);
5815 /* fls64() returns a 1-based offset, we want it zero based */
5816 int shift
= posn
- 1;
5817 u64 mask
= 1ULL << shift
;
5819 if (port_inactive_err(shift
)) {
5820 count_port_inactive(dd
);
5822 } else if (disallowed_pkt_err(shift
)) {
5823 int vl
= engine_to_vl(dd
, disallowed_pkt_engine(shift
));
5825 handle_send_egress_err_info(dd
, vl
);
5834 dd_dev_info(dd
, "Egress Error: %s\n",
5835 egress_err_status_string(buf
, sizeof(buf
), reg
));
5837 for (i
= 0; i
< NUM_SEND_EGRESS_ERR_STATUS_COUNTERS
; i
++) {
5838 if (reg
& (1ull << i
))
5839 incr_cntr64(&dd
->send_egress_err_status_cnt
[i
]);
5843 static void handle_txe_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
5848 dd_dev_info(dd
, "Send Error: %s\n",
5849 send_err_status_string(buf
, sizeof(buf
), reg
));
5851 for (i
= 0; i
< NUM_SEND_ERR_STATUS_COUNTERS
; i
++) {
5852 if (reg
& (1ull << i
))
5853 incr_cntr64(&dd
->send_err_status_cnt
[i
]);
5858 * The maximum number of times the error clear down will loop before
5859 * blocking a repeating error. This value is arbitrary.
5861 #define MAX_CLEAR_COUNT 20
5864 * Clear and handle an error register. All error interrupts are funneled
5865 * through here to have a central location to correctly handle single-
5866 * or multi-shot errors.
5868 * For non per-context registers, call this routine with a context value
5869 * of 0 so the per-context offset is zero.
5871 * If the handler loops too many times, assume that something is wrong
5872 * and can't be fixed, so mask the error bits.
5874 static void interrupt_clear_down(struct hfi1_devdata
*dd
,
5876 const struct err_reg_info
*eri
)
5881 /* read in a loop until no more errors are seen */
5884 reg
= read_kctxt_csr(dd
, context
, eri
->status
);
5887 write_kctxt_csr(dd
, context
, eri
->clear
, reg
);
5888 if (likely(eri
->handler
))
5889 eri
->handler(dd
, context
, reg
);
5891 if (count
> MAX_CLEAR_COUNT
) {
5894 dd_dev_err(dd
, "Repeating %s bits 0x%llx - masking\n",
5897 * Read-modify-write so any other masked bits
5900 mask
= read_kctxt_csr(dd
, context
, eri
->mask
);
5902 write_kctxt_csr(dd
, context
, eri
->mask
, mask
);
5909 * CCE block "misc" interrupt. Source is < 16.
5911 static void is_misc_err_int(struct hfi1_devdata
*dd
, unsigned int source
)
5913 const struct err_reg_info
*eri
= &misc_errs
[source
];
5916 interrupt_clear_down(dd
, 0, eri
);
5918 dd_dev_err(dd
, "Unexpected misc interrupt (%u) - reserved\n",
5923 static char *send_context_err_status_string(char *buf
, int buf_len
, u64 flags
)
5925 return flag_string(buf
, buf_len
, flags
,
5926 sc_err_status_flags
,
5927 ARRAY_SIZE(sc_err_status_flags
));
5931 * Send context error interrupt. Source (hw_context) is < 160.
5933 * All send context errors cause the send context to halt. The normal
5934 * clear-down mechanism cannot be used because we cannot clear the
5935 * error bits until several other long-running items are done first.
5936 * This is OK because with the context halted, nothing else is going
5937 * to happen on it anyway.
5939 static void is_sendctxt_err_int(struct hfi1_devdata
*dd
,
5940 unsigned int hw_context
)
5942 struct send_context_info
*sci
;
5943 struct send_context
*sc
;
5949 sw_index
= dd
->hw_to_sw
[hw_context
];
5950 if (sw_index
>= dd
->num_send_contexts
) {
5952 "out of range sw index %u for send context %u\n",
5953 sw_index
, hw_context
);
5956 sci
= &dd
->send_contexts
[sw_index
];
5959 dd_dev_err(dd
, "%s: context %u(%u): no sc?\n", __func__
,
5960 sw_index
, hw_context
);
5964 /* tell the software that a halt has begun */
5965 sc_stop(sc
, SCF_HALTED
);
5967 status
= read_kctxt_csr(dd
, hw_context
, SEND_CTXT_ERR_STATUS
);
5969 dd_dev_info(dd
, "Send Context %u(%u) Error: %s\n", sw_index
, hw_context
,
5970 send_context_err_status_string(flags
, sizeof(flags
),
5973 if (status
& SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK
)
5974 handle_send_egress_err_info(dd
, sc_to_vl(dd
, sw_index
));
5977 * Automatically restart halted kernel contexts out of interrupt
5978 * context. User contexts must ask the driver to restart the context.
5980 if (sc
->type
!= SC_USER
)
5981 queue_work(dd
->pport
->hfi1_wq
, &sc
->halt_work
);
5984 * Update the counters for the corresponding status bits.
5985 * Note that these particular counters are aggregated over all
5988 for (i
= 0; i
< NUM_SEND_CTXT_ERR_STATUS_COUNTERS
; i
++) {
5989 if (status
& (1ull << i
))
5990 incr_cntr64(&dd
->sw_ctxt_err_status_cnt
[i
]);
5994 static void handle_sdma_eng_err(struct hfi1_devdata
*dd
,
5995 unsigned int source
, u64 status
)
5997 struct sdma_engine
*sde
;
6000 sde
= &dd
->per_sdma
[source
];
6001 #ifdef CONFIG_SDMA_VERBOSITY
6002 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) %s:%d %s()\n", sde
->this_idx
,
6003 slashstrip(__FILE__
), __LINE__
, __func__
);
6004 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
6005 sde
->this_idx
, source
, (unsigned long long)status
);
6008 sdma_engine_error(sde
, status
);
6011 * Update the counters for the corresponding status bits.
6012 * Note that these particular counters are aggregated over
6013 * all 16 DMA engines.
6015 for (i
= 0; i
< NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS
; i
++) {
6016 if (status
& (1ull << i
))
6017 incr_cntr64(&dd
->sw_send_dma_eng_err_status_cnt
[i
]);
6022 * CCE block SDMA error interrupt. Source is < 16.
6024 static void is_sdma_eng_err_int(struct hfi1_devdata
*dd
, unsigned int source
)
6026 #ifdef CONFIG_SDMA_VERBOSITY
6027 struct sdma_engine
*sde
= &dd
->per_sdma
[source
];
6029 dd_dev_err(dd
, "CONFIG SDMA(%u) %s:%d %s()\n", sde
->this_idx
,
6030 slashstrip(__FILE__
), __LINE__
, __func__
);
6031 dd_dev_err(dd
, "CONFIG SDMA(%u) source: %u\n", sde
->this_idx
,
6033 sdma_dumpstate(sde
);
6035 interrupt_clear_down(dd
, source
, &sdma_eng_err
);
6039 * CCE block "various" interrupt. Source is < 8.
6041 static void is_various_int(struct hfi1_devdata
*dd
, unsigned int source
)
6043 const struct err_reg_info
*eri
= &various_err
[source
];
6046 * TCritInt cannot go through interrupt_clear_down()
6047 * because it is not a second tier interrupt. The handler
6048 * should be called directly.
6050 if (source
== TCRIT_INT_SOURCE
)
6051 handle_temp_err(dd
);
6052 else if (eri
->handler
)
6053 interrupt_clear_down(dd
, 0, eri
);
6056 "%s: Unimplemented/reserved interrupt %d\n",
6060 static void handle_qsfp_int(struct hfi1_devdata
*dd
, u32 src_ctx
, u64 reg
)
6062 /* src_ctx is always zero */
6063 struct hfi1_pportdata
*ppd
= dd
->pport
;
6064 unsigned long flags
;
6065 u64 qsfp_int_mgmt
= (u64
)(QSFP_HFI0_INT_N
| QSFP_HFI0_MODPRST_N
);
6067 if (reg
& QSFP_HFI0_MODPRST_N
) {
6068 if (!qsfp_mod_present(ppd
)) {
6069 dd_dev_info(dd
, "%s: QSFP module removed\n",
6072 ppd
->driver_link_ready
= 0;
6074 * Cable removed, reset all our information about the
6075 * cache and cable capabilities
6078 spin_lock_irqsave(&ppd
->qsfp_info
.qsfp_lock
, flags
);
6080 * We don't set cache_refresh_required here as we expect
6081 * an interrupt when a cable is inserted
6083 ppd
->qsfp_info
.cache_valid
= 0;
6084 ppd
->qsfp_info
.reset_needed
= 0;
6085 ppd
->qsfp_info
.limiting_active
= 0;
6086 spin_unlock_irqrestore(&ppd
->qsfp_info
.qsfp_lock
,
6088 /* Invert the ModPresent pin now to detect plug-in */
6089 write_csr(dd
, dd
->hfi1_id
? ASIC_QSFP2_INVERT
:
6090 ASIC_QSFP1_INVERT
, qsfp_int_mgmt
);
6092 if ((ppd
->offline_disabled_reason
>
6094 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED
)) ||
6095 (ppd
->offline_disabled_reason
==
6096 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE
)))
6097 ppd
->offline_disabled_reason
=
6099 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED
);
6101 if (ppd
->host_link_state
== HLS_DN_POLL
) {
6103 * The link is still in POLL. This means
6104 * that the normal link down processing
6105 * will not happen. We have to do it here
6106 * before turning the DC off.
6108 queue_work(ppd
->link_wq
, &ppd
->link_down_work
);
6111 dd_dev_info(dd
, "%s: QSFP module inserted\n",
6114 spin_lock_irqsave(&ppd
->qsfp_info
.qsfp_lock
, flags
);
6115 ppd
->qsfp_info
.cache_valid
= 0;
6116 ppd
->qsfp_info
.cache_refresh_required
= 1;
6117 spin_unlock_irqrestore(&ppd
->qsfp_info
.qsfp_lock
,
6121 * Stop inversion of ModPresent pin to detect
6122 * removal of the cable
6124 qsfp_int_mgmt
&= ~(u64
)QSFP_HFI0_MODPRST_N
;
6125 write_csr(dd
, dd
->hfi1_id
? ASIC_QSFP2_INVERT
:
6126 ASIC_QSFP1_INVERT
, qsfp_int_mgmt
);
6128 ppd
->offline_disabled_reason
=
6129 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT
);
6133 if (reg
& QSFP_HFI0_INT_N
) {
6134 dd_dev_info(dd
, "%s: Interrupt received from QSFP module\n",
6136 spin_lock_irqsave(&ppd
->qsfp_info
.qsfp_lock
, flags
);
6137 ppd
->qsfp_info
.check_interrupt_flags
= 1;
6138 spin_unlock_irqrestore(&ppd
->qsfp_info
.qsfp_lock
, flags
);
6141 /* Schedule the QSFP work only if there is a cable attached. */
6142 if (qsfp_mod_present(ppd
))
6143 queue_work(ppd
->link_wq
, &ppd
->qsfp_info
.qsfp_work
);
6146 static int request_host_lcb_access(struct hfi1_devdata
*dd
)
6150 ret
= do_8051_command(dd
, HCMD_MISC
,
6151 (u64
)HCMD_MISC_REQUEST_LCB_ACCESS
<<
6152 LOAD_DATA_FIELD_ID_SHIFT
, NULL
);
6153 if (ret
!= HCMD_SUCCESS
) {
6154 dd_dev_err(dd
, "%s: command failed with error %d\n",
6157 return ret
== HCMD_SUCCESS
? 0 : -EBUSY
;
6160 static int request_8051_lcb_access(struct hfi1_devdata
*dd
)
6164 ret
= do_8051_command(dd
, HCMD_MISC
,
6165 (u64
)HCMD_MISC_GRANT_LCB_ACCESS
<<
6166 LOAD_DATA_FIELD_ID_SHIFT
, NULL
);
6167 if (ret
!= HCMD_SUCCESS
) {
6168 dd_dev_err(dd
, "%s: command failed with error %d\n",
6171 return ret
== HCMD_SUCCESS
? 0 : -EBUSY
;
6175 * Set the LCB selector - allow host access. The DCC selector always
6176 * points to the host.
6178 static inline void set_host_lcb_access(struct hfi1_devdata
*dd
)
6180 write_csr(dd
, DC_DC8051_CFG_CSR_ACCESS_SEL
,
6181 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK
|
6182 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK
);
6186 * Clear the LCB selector - allow 8051 access. The DCC selector always
6187 * points to the host.
6189 static inline void set_8051_lcb_access(struct hfi1_devdata
*dd
)
6191 write_csr(dd
, DC_DC8051_CFG_CSR_ACCESS_SEL
,
6192 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK
);
6196 * Acquire LCB access from the 8051. If the host already has access,
6197 * just increment a counter. Otherwise, inform the 8051 that the
6198 * host is taking access.
6202 * -EBUSY if the 8051 has control and cannot be disturbed
6203 * -errno if unable to acquire access from the 8051
6205 int acquire_lcb_access(struct hfi1_devdata
*dd
, int sleep_ok
)
6207 struct hfi1_pportdata
*ppd
= dd
->pport
;
6211 * Use the host link state lock so the operation of this routine
6212 * { link state check, selector change, count increment } can occur
6213 * as a unit against a link state change. Otherwise there is a
6214 * race between the state change and the count increment.
6217 mutex_lock(&ppd
->hls_lock
);
6219 while (!mutex_trylock(&ppd
->hls_lock
))
6223 /* this access is valid only when the link is up */
6224 if (ppd
->host_link_state
& HLS_DOWN
) {
6225 dd_dev_info(dd
, "%s: link state %s not up\n",
6226 __func__
, link_state_name(ppd
->host_link_state
));
6231 if (dd
->lcb_access_count
== 0) {
6232 ret
= request_host_lcb_access(dd
);
6235 "%s: unable to acquire LCB access, err %d\n",
6239 set_host_lcb_access(dd
);
6241 dd
->lcb_access_count
++;
6243 mutex_unlock(&ppd
->hls_lock
);
6248 * Release LCB access by decrementing the use count. If the count is moving
6249 * from 1 to 0, inform 8051 that it has control back.
6253 * -errno if unable to release access to the 8051
6255 int release_lcb_access(struct hfi1_devdata
*dd
, int sleep_ok
)
6260 * Use the host link state lock because the acquire needed it.
6261 * Here, we only need to keep { selector change, count decrement }
6265 mutex_lock(&dd
->pport
->hls_lock
);
6267 while (!mutex_trylock(&dd
->pport
->hls_lock
))
6271 if (dd
->lcb_access_count
== 0) {
6272 dd_dev_err(dd
, "%s: LCB access count is zero. Skipping.\n",
6277 if (dd
->lcb_access_count
== 1) {
6278 set_8051_lcb_access(dd
);
6279 ret
= request_8051_lcb_access(dd
);
6282 "%s: unable to release LCB access, err %d\n",
6284 /* restore host access if the grant didn't work */
6285 set_host_lcb_access(dd
);
6289 dd
->lcb_access_count
--;
6291 mutex_unlock(&dd
->pport
->hls_lock
);
6296 * Initialize LCB access variables and state. Called during driver load,
6297 * after most of the initialization is finished.
6299 * The DC default is LCB access on for the host. The driver defaults to
6300 * leaving access to the 8051. Assign access now - this constrains the call
6301 * to this routine to be after all LCB set-up is done. In particular, after
6302 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6304 static void init_lcb_access(struct hfi1_devdata
*dd
)
6306 dd
->lcb_access_count
= 0;
6310 * Write a response back to a 8051 request.
6312 static void hreq_response(struct hfi1_devdata
*dd
, u8 return_code
, u16 rsp_data
)
6314 write_csr(dd
, DC_DC8051_CFG_EXT_DEV_0
,
6315 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK
|
6317 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT
|
6318 (u64
)rsp_data
<< DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT
);
6322 * Handle host requests from the 8051.
6324 static void handle_8051_request(struct hfi1_pportdata
*ppd
)
6326 struct hfi1_devdata
*dd
= ppd
->dd
;
6331 reg
= read_csr(dd
, DC_DC8051_CFG_EXT_DEV_1
);
6332 if ((reg
& DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK
) == 0)
6333 return; /* no request */
6335 /* zero out COMPLETED so the response is seen */
6336 write_csr(dd
, DC_DC8051_CFG_EXT_DEV_0
, 0);
6338 /* extract request details */
6339 type
= (reg
>> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT
)
6340 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK
;
6341 data
= (reg
>> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT
)
6342 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK
;
6345 case HREQ_LOAD_CONFIG
:
6346 case HREQ_SAVE_CONFIG
:
6347 case HREQ_READ_CONFIG
:
6348 case HREQ_SET_TX_EQ_ABS
:
6349 case HREQ_SET_TX_EQ_REL
:
6351 dd_dev_info(dd
, "8051 request: request 0x%x not supported\n",
6353 hreq_response(dd
, HREQ_NOT_SUPPORTED
, 0);
6355 case HREQ_CONFIG_DONE
:
6356 hreq_response(dd
, HREQ_SUCCESS
, 0);
6359 case HREQ_INTERFACE_TEST
:
6360 hreq_response(dd
, HREQ_SUCCESS
, data
);
6363 dd_dev_err(dd
, "8051 request: unknown request 0x%x\n", type
);
6364 hreq_response(dd
, HREQ_NOT_SUPPORTED
, 0);
6370 * Set up allocation unit vaulue.
6372 void set_up_vau(struct hfi1_devdata
*dd
, u8 vau
)
6374 u64 reg
= read_csr(dd
, SEND_CM_GLOBAL_CREDIT
);
6376 /* do not modify other values in the register */
6377 reg
&= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK
;
6378 reg
|= (u64
)vau
<< SEND_CM_GLOBAL_CREDIT_AU_SHIFT
;
6379 write_csr(dd
, SEND_CM_GLOBAL_CREDIT
, reg
);
6383 * Set up initial VL15 credits of the remote. Assumes the rest of
6384 * the CM credit registers are zero from a previous global or credit reset.
6385 * Shared limit for VL15 will always be 0.
6387 void set_up_vl15(struct hfi1_devdata
*dd
, u16 vl15buf
)
6389 u64 reg
= read_csr(dd
, SEND_CM_GLOBAL_CREDIT
);
6391 /* set initial values for total and shared credit limit */
6392 reg
&= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK
|
6393 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK
);
6396 * Set total limit to be equal to VL15 credits.
6397 * Leave shared limit at 0.
6399 reg
|= (u64
)vl15buf
<< SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT
;
6400 write_csr(dd
, SEND_CM_GLOBAL_CREDIT
, reg
);
6402 write_csr(dd
, SEND_CM_CREDIT_VL15
, (u64
)vl15buf
6403 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT
);
6407 * Zero all credit details from the previous connection and
6408 * reset the CM manager's internal counters.
6410 void reset_link_credits(struct hfi1_devdata
*dd
)
6414 /* remove all previous VL credit limits */
6415 for (i
= 0; i
< TXE_NUM_DATA_VL
; i
++)
6416 write_csr(dd
, SEND_CM_CREDIT_VL
+ (8 * i
), 0);
6417 write_csr(dd
, SEND_CM_CREDIT_VL15
, 0);
6418 write_csr(dd
, SEND_CM_GLOBAL_CREDIT
, 0);
6419 /* reset the CM block */
6420 pio_send_control(dd
, PSC_CM_RESET
);
6421 /* reset cached value */
6422 dd
->vl15buf_cached
= 0;
6425 /* convert a vCU to a CU */
6426 static u32
vcu_to_cu(u8 vcu
)
6431 /* convert a CU to a vCU */
6432 static u8
cu_to_vcu(u32 cu
)
6437 /* convert a vAU to an AU */
6438 static u32
vau_to_au(u8 vau
)
6440 return 8 * (1 << vau
);
6443 static void set_linkup_defaults(struct hfi1_pportdata
*ppd
)
6445 ppd
->sm_trap_qp
= 0x0;
6450 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6452 static void lcb_shutdown(struct hfi1_devdata
*dd
, int abort
)
6456 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6457 write_csr(dd
, DC_LCB_CFG_RUN
, 0);
6458 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6459 write_csr(dd
, DC_LCB_CFG_TX_FIFOS_RESET
,
6460 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT
);
6461 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6462 dd
->lcb_err_en
= read_csr(dd
, DC_LCB_ERR_EN
);
6463 reg
= read_csr(dd
, DCC_CFG_RESET
);
6464 write_csr(dd
, DCC_CFG_RESET
, reg
|
6465 (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT
) |
6466 (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT
));
6467 (void)read_csr(dd
, DCC_CFG_RESET
); /* make sure the write completed */
6469 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6470 write_csr(dd
, DCC_CFG_RESET
, reg
);
6471 write_csr(dd
, DC_LCB_ERR_EN
, dd
->lcb_err_en
);
6476 * This routine should be called after the link has been transitioned to
6477 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6480 * The expectation is that the caller of this routine would have taken
6481 * care of properly transitioning the link into the correct state.
6482 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6483 * before calling this function.
6485 static void _dc_shutdown(struct hfi1_devdata
*dd
)
6487 lockdep_assert_held(&dd
->dc8051_lock
);
6489 if (dd
->dc_shutdown
)
6492 dd
->dc_shutdown
= 1;
6493 /* Shutdown the LCB */
6494 lcb_shutdown(dd
, 1);
6496 * Going to OFFLINE would have causes the 8051 to put the
6497 * SerDes into reset already. Just need to shut down the 8051,
6500 write_csr(dd
, DC_DC8051_CFG_RST
, 0x1);
6503 static void dc_shutdown(struct hfi1_devdata
*dd
)
6505 mutex_lock(&dd
->dc8051_lock
);
6507 mutex_unlock(&dd
->dc8051_lock
);
6511 * Calling this after the DC has been brought out of reset should not
6513 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6514 * before calling this function.
6516 static void _dc_start(struct hfi1_devdata
*dd
)
6518 lockdep_assert_held(&dd
->dc8051_lock
);
6520 if (!dd
->dc_shutdown
)
6523 /* Take the 8051 out of reset */
6524 write_csr(dd
, DC_DC8051_CFG_RST
, 0ull);
6525 /* Wait until 8051 is ready */
6526 if (wait_fm_ready(dd
, TIMEOUT_8051_START
))
6527 dd_dev_err(dd
, "%s: timeout starting 8051 firmware\n",
6530 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6531 write_csr(dd
, DCC_CFG_RESET
, 0x10);
6532 /* lcb_shutdown() with abort=1 does not restore these */
6533 write_csr(dd
, DC_LCB_ERR_EN
, dd
->lcb_err_en
);
6534 dd
->dc_shutdown
= 0;
6537 static void dc_start(struct hfi1_devdata
*dd
)
6539 mutex_lock(&dd
->dc8051_lock
);
6541 mutex_unlock(&dd
->dc8051_lock
);
6545 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6547 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata
*dd
)
6549 u64 rx_radr
, tx_radr
;
6552 if (dd
->icode
!= ICODE_FPGA_EMULATION
)
6556 * These LCB defaults on emulator _s are good, nothing to do here:
6557 * LCB_CFG_TX_FIFOS_RADR
6558 * LCB_CFG_RX_FIFOS_RADR
6560 * LCB_CFG_IGNORE_LOST_RCLK
6562 if (is_emulator_s(dd
))
6564 /* else this is _p */
6566 version
= emulator_rev(dd
);
6568 version
= 0x2d; /* all B0 use 0x2d or higher settings */
6570 if (version
<= 0x12) {
6571 /* release 0x12 and below */
6574 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6575 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6576 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6579 0xaull
<< DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6580 | 0x9ull
<< DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6581 | 0x9ull
<< DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT
;
6583 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6584 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6586 tx_radr
= 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT
;
6587 } else if (version
<= 0x18) {
6588 /* release 0x13 up to 0x18 */
6589 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6591 0x9ull
<< DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6592 | 0x8ull
<< DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6593 | 0x8ull
<< DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT
;
6594 tx_radr
= 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT
;
6595 } else if (version
== 0x19) {
6597 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6599 0xAull
<< DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6600 | 0x9ull
<< DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6601 | 0x9ull
<< DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT
;
6602 tx_radr
= 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT
;
6603 } else if (version
== 0x1a) {
6605 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6607 0x9ull
<< DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6608 | 0x8ull
<< DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6609 | 0x8ull
<< DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT
;
6610 tx_radr
= 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT
;
6611 write_csr(dd
, DC_LCB_CFG_LN_DCLK
, 1ull);
6613 /* release 0x1b and higher */
6614 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6616 0x8ull
<< DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6617 | 0x7ull
<< DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6618 | 0x7ull
<< DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT
;
6619 tx_radr
= 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT
;
6622 write_csr(dd
, DC_LCB_CFG_RX_FIFOS_RADR
, rx_radr
);
6623 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6624 write_csr(dd
, DC_LCB_CFG_IGNORE_LOST_RCLK
,
6625 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK
);
6626 write_csr(dd
, DC_LCB_CFG_TX_FIFOS_RADR
, tx_radr
);
6630 * Handle a SMA idle message
6632 * This is a work-queue function outside of the interrupt.
6634 void handle_sma_message(struct work_struct
*work
)
6636 struct hfi1_pportdata
*ppd
= container_of(work
, struct hfi1_pportdata
,
6638 struct hfi1_devdata
*dd
= ppd
->dd
;
6643 * msg is bytes 1-4 of the 40-bit idle message - the command code
6646 ret
= read_idle_sma(dd
, &msg
);
6649 dd_dev_info(dd
, "%s: SMA message 0x%llx\n", __func__
, msg
);
6651 * React to the SMA message. Byte[1] (0 for us) is the command.
6653 switch (msg
& 0xff) {
6656 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6659 * Only expected in INIT or ARMED, discard otherwise.
6661 if (ppd
->host_link_state
& (HLS_UP_INIT
| HLS_UP_ARMED
))
6662 ppd
->neighbor_normal
= 1;
6664 case SMA_IDLE_ACTIVE
:
6666 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6669 * Can activate the node. Discard otherwise.
6671 if (ppd
->host_link_state
== HLS_UP_ARMED
&&
6672 ppd
->is_active_optimize_enabled
) {
6673 ppd
->neighbor_normal
= 1;
6674 ret
= set_link_state(ppd
, HLS_UP_ACTIVE
);
6678 "%s: received Active SMA idle message, couldn't set link to Active\n",
6684 "%s: received unexpected SMA idle message 0x%llx\n",
6690 static void adjust_rcvctrl(struct hfi1_devdata
*dd
, u64 add
, u64 clear
)
6693 unsigned long flags
;
6695 spin_lock_irqsave(&dd
->rcvctrl_lock
, flags
);
6696 rcvctrl
= read_csr(dd
, RCV_CTRL
);
6699 write_csr(dd
, RCV_CTRL
, rcvctrl
);
6700 spin_unlock_irqrestore(&dd
->rcvctrl_lock
, flags
);
6703 static inline void add_rcvctrl(struct hfi1_devdata
*dd
, u64 add
)
6705 adjust_rcvctrl(dd
, add
, 0);
6708 static inline void clear_rcvctrl(struct hfi1_devdata
*dd
, u64 clear
)
6710 adjust_rcvctrl(dd
, 0, clear
);
6714 * Called from all interrupt handlers to start handling an SPC freeze.
6716 void start_freeze_handling(struct hfi1_pportdata
*ppd
, int flags
)
6718 struct hfi1_devdata
*dd
= ppd
->dd
;
6719 struct send_context
*sc
;
6722 if (flags
& FREEZE_SELF
)
6723 write_csr(dd
, CCE_CTRL
, CCE_CTRL_SPC_FREEZE_SMASK
);
6725 /* enter frozen mode */
6726 dd
->flags
|= HFI1_FROZEN
;
6728 /* notify all SDMA engines that they are going into a freeze */
6729 sdma_freeze_notify(dd
, !!(flags
& FREEZE_LINK_DOWN
));
6731 /* do halt pre-handling on all enabled send contexts */
6732 for (i
= 0; i
< dd
->num_send_contexts
; i
++) {
6733 sc
= dd
->send_contexts
[i
].sc
;
6734 if (sc
&& (sc
->flags
& SCF_ENABLED
))
6735 sc_stop(sc
, SCF_FROZEN
| SCF_HALTED
);
6738 /* Send context are frozen. Notify user space */
6739 hfi1_set_uevent_bits(ppd
, _HFI1_EVENT_FROZEN_BIT
);
6741 if (flags
& FREEZE_ABORT
) {
6743 "Aborted freeze recovery. Please REBOOT system\n");
6746 /* queue non-interrupt handler */
6747 queue_work(ppd
->hfi1_wq
, &ppd
->freeze_work
);
6751 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6752 * depending on the "freeze" parameter.
6754 * No need to return an error if it times out, our only option
6755 * is to proceed anyway.
6757 static void wait_for_freeze_status(struct hfi1_devdata
*dd
, int freeze
)
6759 unsigned long timeout
;
6762 timeout
= jiffies
+ msecs_to_jiffies(FREEZE_STATUS_TIMEOUT
);
6764 reg
= read_csr(dd
, CCE_STATUS
);
6766 /* waiting until all indicators are set */
6767 if ((reg
& ALL_FROZE
) == ALL_FROZE
)
6768 return; /* all done */
6770 /* waiting until all indicators are clear */
6771 if ((reg
& ALL_FROZE
) == 0)
6772 return; /* all done */
6775 if (time_after(jiffies
, timeout
)) {
6777 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6778 freeze
? "" : "un", reg
& ALL_FROZE
,
6779 freeze
? ALL_FROZE
: 0ull);
6782 usleep_range(80, 120);
6787 * Do all freeze handling for the RXE block.
6789 static void rxe_freeze(struct hfi1_devdata
*dd
)
6792 struct hfi1_ctxtdata
*rcd
;
6795 clear_rcvctrl(dd
, RCV_CTRL_RCV_PORT_ENABLE_SMASK
);
6797 /* disable all receive contexts */
6798 for (i
= 0; i
< dd
->num_rcv_contexts
; i
++) {
6799 rcd
= hfi1_rcd_get_by_index(dd
, i
);
6800 hfi1_rcvctrl(dd
, HFI1_RCVCTRL_CTXT_DIS
, rcd
);
6806 * Unfreeze handling for the RXE block - kernel contexts only.
6807 * This will also enable the port. User contexts will do unfreeze
6808 * handling on a per-context basis as they call into the driver.
6811 static void rxe_kernel_unfreeze(struct hfi1_devdata
*dd
)
6815 struct hfi1_ctxtdata
*rcd
;
6817 /* enable all kernel contexts */
6818 for (i
= 0; i
< dd
->num_rcv_contexts
; i
++) {
6819 rcd
= hfi1_rcd_get_by_index(dd
, i
);
6821 /* Ensure all non-user contexts(including vnic) are enabled */
6822 if (!rcd
|| !rcd
->sc
|| (rcd
->sc
->type
== SC_USER
)) {
6826 rcvmask
= HFI1_RCVCTRL_CTXT_ENB
;
6827 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6828 rcvmask
|= HFI1_CAP_KGET_MASK(rcd
->flags
, DMA_RTAIL
) ?
6829 HFI1_RCVCTRL_TAILUPD_ENB
: HFI1_RCVCTRL_TAILUPD_DIS
;
6830 hfi1_rcvctrl(dd
, rcvmask
, rcd
);
6835 add_rcvctrl(dd
, RCV_CTRL_RCV_PORT_ENABLE_SMASK
);
6839 * Non-interrupt SPC freeze handling.
6841 * This is a work-queue function outside of the triggering interrupt.
6843 void handle_freeze(struct work_struct
*work
)
6845 struct hfi1_pportdata
*ppd
= container_of(work
, struct hfi1_pportdata
,
6847 struct hfi1_devdata
*dd
= ppd
->dd
;
6849 /* wait for freeze indicators on all affected blocks */
6850 wait_for_freeze_status(dd
, 1);
6852 /* SPC is now frozen */
6854 /* do send PIO freeze steps */
6857 /* do send DMA freeze steps */
6860 /* do send egress freeze steps - nothing to do */
6862 /* do receive freeze steps */
6866 * Unfreeze the hardware - clear the freeze, wait for each
6867 * block's frozen bit to clear, then clear the frozen flag.
6869 write_csr(dd
, CCE_CTRL
, CCE_CTRL_SPC_UNFREEZE_SMASK
);
6870 wait_for_freeze_status(dd
, 0);
6873 write_csr(dd
, CCE_CTRL
, CCE_CTRL_SPC_FREEZE_SMASK
);
6874 wait_for_freeze_status(dd
, 1);
6875 write_csr(dd
, CCE_CTRL
, CCE_CTRL_SPC_UNFREEZE_SMASK
);
6876 wait_for_freeze_status(dd
, 0);
6879 /* do send PIO unfreeze steps for kernel contexts */
6880 pio_kernel_unfreeze(dd
);
6882 /* do send DMA unfreeze steps */
6885 /* do send egress unfreeze steps - nothing to do */
6887 /* do receive unfreeze steps for kernel contexts */
6888 rxe_kernel_unfreeze(dd
);
6891 * The unfreeze procedure touches global device registers when
6892 * it disables and re-enables RXE. Mark the device unfrozen
6893 * after all that is done so other parts of the driver waiting
6894 * for the device to unfreeze don't do things out of order.
6896 * The above implies that the meaning of HFI1_FROZEN flag is
6897 * "Device has gone into freeze mode and freeze mode handling
6898 * is still in progress."
6900 * The flag will be removed when freeze mode processing has
6903 dd
->flags
&= ~HFI1_FROZEN
;
6904 wake_up(&dd
->event_queue
);
6906 /* no longer frozen */
6910 * Handle a link up interrupt from the 8051.
6912 * This is a work-queue function outside of the interrupt.
6914 void handle_link_up(struct work_struct
*work
)
6916 struct hfi1_pportdata
*ppd
= container_of(work
, struct hfi1_pportdata
,
6918 struct hfi1_devdata
*dd
= ppd
->dd
;
6920 set_link_state(ppd
, HLS_UP_INIT
);
6922 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6925 * OPA specifies that certain counters are cleared on a transition
6926 * to link up, so do that.
6928 clear_linkup_counters(dd
);
6930 * And (re)set link up default values.
6932 set_linkup_defaults(ppd
);
6935 * Set VL15 credits. Use cached value from verify cap interrupt.
6936 * In case of quick linkup or simulator, vl15 value will be set by
6937 * handle_linkup_change. VerifyCap interrupt handler will not be
6938 * called in those scenarios.
6940 if (!(quick_linkup
|| dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
))
6941 set_up_vl15(dd
, dd
->vl15buf_cached
);
6943 /* enforce link speed enabled */
6944 if ((ppd
->link_speed_active
& ppd
->link_speed_enabled
) == 0) {
6945 /* oops - current speed is not enabled, bounce */
6947 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6948 ppd
->link_speed_active
, ppd
->link_speed_enabled
);
6949 set_link_down_reason(ppd
, OPA_LINKDOWN_REASON_SPEED_POLICY
, 0,
6950 OPA_LINKDOWN_REASON_SPEED_POLICY
);
6951 set_link_state(ppd
, HLS_DN_OFFLINE
);
6957 * Several pieces of LNI information were cached for SMA in ppd.
6958 * Reset these on link down
6960 static void reset_neighbor_info(struct hfi1_pportdata
*ppd
)
6962 ppd
->neighbor_guid
= 0;
6963 ppd
->neighbor_port_number
= 0;
6964 ppd
->neighbor_type
= 0;
6965 ppd
->neighbor_fm_security
= 0;
6968 static const char * const link_down_reason_strs
[] = {
6969 [OPA_LINKDOWN_REASON_NONE
] = "None",
6970 [OPA_LINKDOWN_REASON_RCV_ERROR_0
] = "Receive error 0",
6971 [OPA_LINKDOWN_REASON_BAD_PKT_LEN
] = "Bad packet length",
6972 [OPA_LINKDOWN_REASON_PKT_TOO_LONG
] = "Packet too long",
6973 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT
] = "Packet too short",
6974 [OPA_LINKDOWN_REASON_BAD_SLID
] = "Bad SLID",
6975 [OPA_LINKDOWN_REASON_BAD_DLID
] = "Bad DLID",
6976 [OPA_LINKDOWN_REASON_BAD_L2
] = "Bad L2",
6977 [OPA_LINKDOWN_REASON_BAD_SC
] = "Bad SC",
6978 [OPA_LINKDOWN_REASON_RCV_ERROR_8
] = "Receive error 8",
6979 [OPA_LINKDOWN_REASON_BAD_MID_TAIL
] = "Bad mid tail",
6980 [OPA_LINKDOWN_REASON_RCV_ERROR_10
] = "Receive error 10",
6981 [OPA_LINKDOWN_REASON_PREEMPT_ERROR
] = "Preempt error",
6982 [OPA_LINKDOWN_REASON_PREEMPT_VL15
] = "Preempt vl15",
6983 [OPA_LINKDOWN_REASON_BAD_VL_MARKER
] = "Bad VL marker",
6984 [OPA_LINKDOWN_REASON_RCV_ERROR_14
] = "Receive error 14",
6985 [OPA_LINKDOWN_REASON_RCV_ERROR_15
] = "Receive error 15",
6986 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST
] = "Bad head distance",
6987 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST
] = "Bad tail distance",
6988 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST
] = "Bad control distance",
6989 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK
] = "Bad credit ack",
6990 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER
] = "Unsupported VL marker",
6991 [OPA_LINKDOWN_REASON_BAD_PREEMPT
] = "Bad preempt",
6992 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT
] = "Bad control flit",
6993 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT
] = "Exceed multicast limit",
6994 [OPA_LINKDOWN_REASON_RCV_ERROR_24
] = "Receive error 24",
6995 [OPA_LINKDOWN_REASON_RCV_ERROR_25
] = "Receive error 25",
6996 [OPA_LINKDOWN_REASON_RCV_ERROR_26
] = "Receive error 26",
6997 [OPA_LINKDOWN_REASON_RCV_ERROR_27
] = "Receive error 27",
6998 [OPA_LINKDOWN_REASON_RCV_ERROR_28
] = "Receive error 28",
6999 [OPA_LINKDOWN_REASON_RCV_ERROR_29
] = "Receive error 29",
7000 [OPA_LINKDOWN_REASON_RCV_ERROR_30
] = "Receive error 30",
7001 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN
] =
7002 "Excessive buffer overrun",
7003 [OPA_LINKDOWN_REASON_UNKNOWN
] = "Unknown",
7004 [OPA_LINKDOWN_REASON_REBOOT
] = "Reboot",
7005 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN
] = "Neighbor unknown",
7006 [OPA_LINKDOWN_REASON_FM_BOUNCE
] = "FM bounce",
7007 [OPA_LINKDOWN_REASON_SPEED_POLICY
] = "Speed policy",
7008 [OPA_LINKDOWN_REASON_WIDTH_POLICY
] = "Width policy",
7009 [OPA_LINKDOWN_REASON_DISCONNECTED
] = "Disconnected",
7010 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED
] =
7011 "Local media not installed",
7012 [OPA_LINKDOWN_REASON_NOT_INSTALLED
] = "Not installed",
7013 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG
] = "Chassis config",
7014 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED
] =
7015 "End to end not installed",
7016 [OPA_LINKDOWN_REASON_POWER_POLICY
] = "Power policy",
7017 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY
] = "Link speed policy",
7018 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY
] = "Link width policy",
7019 [OPA_LINKDOWN_REASON_SWITCH_MGMT
] = "Switch management",
7020 [OPA_LINKDOWN_REASON_SMA_DISABLED
] = "SMA disabled",
7021 [OPA_LINKDOWN_REASON_TRANSIENT
] = "Transient"
7024 /* return the neighbor link down reason string */
7025 static const char *link_down_reason_str(u8 reason
)
7027 const char *str
= NULL
;
7029 if (reason
< ARRAY_SIZE(link_down_reason_strs
))
7030 str
= link_down_reason_strs
[reason
];
7038 * Handle a link down interrupt from the 8051.
7040 * This is a work-queue function outside of the interrupt.
7042 void handle_link_down(struct work_struct
*work
)
7044 u8 lcl_reason
, neigh_reason
= 0;
7045 u8 link_down_reason
;
7046 struct hfi1_pportdata
*ppd
= container_of(work
, struct hfi1_pportdata
,
7049 static const char ldr_str
[] = "Link down reason: ";
7051 if ((ppd
->host_link_state
&
7052 (HLS_DN_POLL
| HLS_VERIFY_CAP
| HLS_GOING_UP
)) &&
7053 ppd
->port_type
== PORT_TYPE_FIXED
)
7054 ppd
->offline_disabled_reason
=
7055 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED
);
7057 /* Go offline first, then deal with reading/writing through 8051 */
7058 was_up
= !!(ppd
->host_link_state
& HLS_UP
);
7059 set_link_state(ppd
, HLS_DN_OFFLINE
);
7060 xchg(&ppd
->is_link_down_queued
, 0);
7064 /* link down reason is only valid if the link was up */
7065 read_link_down_reason(ppd
->dd
, &link_down_reason
);
7066 switch (link_down_reason
) {
7067 case LDR_LINK_TRANSFER_ACTIVE_LOW
:
7068 /* the link went down, no idle message reason */
7069 dd_dev_info(ppd
->dd
, "%sUnexpected link down\n",
7072 case LDR_RECEIVED_LINKDOWN_IDLE_MSG
:
7074 * The neighbor reason is only valid if an idle message
7075 * was received for it.
7077 read_planned_down_reason_code(ppd
->dd
, &neigh_reason
);
7078 dd_dev_info(ppd
->dd
,
7079 "%sNeighbor link down message %d, %s\n",
7080 ldr_str
, neigh_reason
,
7081 link_down_reason_str(neigh_reason
));
7083 case LDR_RECEIVED_HOST_OFFLINE_REQ
:
7084 dd_dev_info(ppd
->dd
,
7085 "%sHost requested link to go offline\n",
7089 dd_dev_info(ppd
->dd
, "%sUnknown reason 0x%x\n",
7090 ldr_str
, link_down_reason
);
7095 * If no reason, assume peer-initiated but missed
7096 * LinkGoingDown idle flits.
7098 if (neigh_reason
== 0)
7099 lcl_reason
= OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN
;
7101 /* went down while polling or going up */
7102 lcl_reason
= OPA_LINKDOWN_REASON_TRANSIENT
;
7105 set_link_down_reason(ppd
, lcl_reason
, neigh_reason
, 0);
7107 /* inform the SMA when the link transitions from up to down */
7108 if (was_up
&& ppd
->local_link_down_reason
.sma
== 0 &&
7109 ppd
->neigh_link_down_reason
.sma
== 0) {
7110 ppd
->local_link_down_reason
.sma
=
7111 ppd
->local_link_down_reason
.latest
;
7112 ppd
->neigh_link_down_reason
.sma
=
7113 ppd
->neigh_link_down_reason
.latest
;
7116 reset_neighbor_info(ppd
);
7118 /* disable the port */
7119 clear_rcvctrl(ppd
->dd
, RCV_CTRL_RCV_PORT_ENABLE_SMASK
);
7122 * If there is no cable attached, turn the DC off. Otherwise,
7123 * start the link bring up.
7125 if (ppd
->port_type
== PORT_TYPE_QSFP
&& !qsfp_mod_present(ppd
))
7126 dc_shutdown(ppd
->dd
);
7131 void handle_link_bounce(struct work_struct
*work
)
7133 struct hfi1_pportdata
*ppd
= container_of(work
, struct hfi1_pportdata
,
7137 * Only do something if the link is currently up.
7139 if (ppd
->host_link_state
& HLS_UP
) {
7140 set_link_state(ppd
, HLS_DN_OFFLINE
);
7143 dd_dev_info(ppd
->dd
, "%s: link not up (%s), nothing to do\n",
7144 __func__
, link_state_name(ppd
->host_link_state
));
7149 * Mask conversion: Capability exchange to Port LTP. The capability
7150 * exchange has an implicit 16b CRC that is mandatory.
7152 static int cap_to_port_ltp(int cap
)
7154 int port_ltp
= PORT_LTP_CRC_MODE_16
; /* this mode is mandatory */
7156 if (cap
& CAP_CRC_14B
)
7157 port_ltp
|= PORT_LTP_CRC_MODE_14
;
7158 if (cap
& CAP_CRC_48B
)
7159 port_ltp
|= PORT_LTP_CRC_MODE_48
;
7160 if (cap
& CAP_CRC_12B_16B_PER_LANE
)
7161 port_ltp
|= PORT_LTP_CRC_MODE_PER_LANE
;
7167 * Convert an OPA Port LTP mask to capability mask
7169 int port_ltp_to_cap(int port_ltp
)
7173 if (port_ltp
& PORT_LTP_CRC_MODE_14
)
7174 cap_mask
|= CAP_CRC_14B
;
7175 if (port_ltp
& PORT_LTP_CRC_MODE_48
)
7176 cap_mask
|= CAP_CRC_48B
;
7177 if (port_ltp
& PORT_LTP_CRC_MODE_PER_LANE
)
7178 cap_mask
|= CAP_CRC_12B_16B_PER_LANE
;
7184 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7186 static int lcb_to_port_ltp(int lcb_crc
)
7190 if (lcb_crc
== LCB_CRC_12B_16B_PER_LANE
)
7191 port_ltp
= PORT_LTP_CRC_MODE_PER_LANE
;
7192 else if (lcb_crc
== LCB_CRC_48B
)
7193 port_ltp
= PORT_LTP_CRC_MODE_48
;
7194 else if (lcb_crc
== LCB_CRC_14B
)
7195 port_ltp
= PORT_LTP_CRC_MODE_14
;
7197 port_ltp
= PORT_LTP_CRC_MODE_16
;
7203 * Our neighbor has indicated that we are allowed to act as a fabric
7204 * manager, so place the full management partition key in the second
7205 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7206 * that we should already have the limited management partition key in
7207 * array element 1, and also that the port is not yet up when
7208 * add_full_mgmt_pkey() is invoked.
7210 static void add_full_mgmt_pkey(struct hfi1_pportdata
*ppd
)
7212 struct hfi1_devdata
*dd
= ppd
->dd
;
7214 /* Sanity check - ppd->pkeys[2] should be 0, or already initialized */
7215 if (!((ppd
->pkeys
[2] == 0) || (ppd
->pkeys
[2] == FULL_MGMT_P_KEY
)))
7216 dd_dev_warn(dd
, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7217 __func__
, ppd
->pkeys
[2], FULL_MGMT_P_KEY
);
7218 ppd
->pkeys
[2] = FULL_MGMT_P_KEY
;
7219 (void)hfi1_set_ib_cfg(ppd
, HFI1_IB_CFG_PKEYS
, 0);
7220 hfi1_event_pkey_change(ppd
->dd
, ppd
->port
);
7223 static void clear_full_mgmt_pkey(struct hfi1_pportdata
*ppd
)
7225 if (ppd
->pkeys
[2] != 0) {
7227 (void)hfi1_set_ib_cfg(ppd
, HFI1_IB_CFG_PKEYS
, 0);
7228 hfi1_event_pkey_change(ppd
->dd
, ppd
->port
);
7233 * Convert the given link width to the OPA link width bitmask.
7235 static u16
link_width_to_bits(struct hfi1_devdata
*dd
, u16 width
)
7240 * Simulator and quick linkup do not set the width.
7241 * Just set it to 4x without complaint.
7243 if (dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
|| quick_linkup
)
7244 return OPA_LINK_WIDTH_4X
;
7245 return 0; /* no lanes up */
7246 case 1: return OPA_LINK_WIDTH_1X
;
7247 case 2: return OPA_LINK_WIDTH_2X
;
7248 case 3: return OPA_LINK_WIDTH_3X
;
7250 dd_dev_info(dd
, "%s: invalid width %d, using 4\n",
7253 case 4: return OPA_LINK_WIDTH_4X
;
7258 * Do a population count on the bottom nibble.
7260 static const u8 bit_counts
[16] = {
7261 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7264 static inline u8
nibble_to_count(u8 nibble
)
7266 return bit_counts
[nibble
& 0xf];
7270 * Read the active lane information from the 8051 registers and return
7273 * Active lane information is found in these 8051 registers:
7277 static void get_link_widths(struct hfi1_devdata
*dd
, u16
*tx_width
,
7283 u8 tx_polarity_inversion
;
7284 u8 rx_polarity_inversion
;
7287 /* read the active lanes */
7288 read_tx_settings(dd
, &enable_lane_tx
, &tx_polarity_inversion
,
7289 &rx_polarity_inversion
, &max_rate
);
7290 read_local_lni(dd
, &enable_lane_rx
);
7292 /* convert to counts */
7293 tx
= nibble_to_count(enable_lane_tx
);
7294 rx
= nibble_to_count(enable_lane_rx
);
7297 * Set link_speed_active here, overriding what was set in
7298 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7299 * set the max_rate field in handle_verify_cap until v0.19.
7301 if ((dd
->icode
== ICODE_RTL_SILICON
) &&
7302 (dd
->dc8051_ver
< dc8051_ver(0, 19, 0))) {
7303 /* max_rate: 0 = 12.5G, 1 = 25G */
7306 dd
->pport
[0].link_speed_active
= OPA_LINK_SPEED_12_5G
;
7310 "%s: unexpected max rate %d, using 25Gb\n",
7311 __func__
, (int)max_rate
);
7314 dd
->pport
[0].link_speed_active
= OPA_LINK_SPEED_25G
;
7320 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7321 enable_lane_tx
, tx
, enable_lane_rx
, rx
);
7322 *tx_width
= link_width_to_bits(dd
, tx
);
7323 *rx_width
= link_width_to_bits(dd
, rx
);
7327 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7328 * Valid after the end of VerifyCap and during LinkUp. Does not change
7329 * after link up. I.e. look elsewhere for downgrade information.
7332 * + bits [7:4] contain the number of active transmitters
7333 * + bits [3:0] contain the number of active receivers
7334 * These are numbers 1 through 4 and can be different values if the
7335 * link is asymmetric.
7337 * verify_cap_local_fm_link_width[0] retains its original value.
7339 static void get_linkup_widths(struct hfi1_devdata
*dd
, u16
*tx_width
,
7343 u8 misc_bits
, local_flags
;
7344 u16 active_tx
, active_rx
;
7346 read_vc_local_link_width(dd
, &misc_bits
, &local_flags
, &widths
);
7348 rx
= (widths
>> 8) & 0xf;
7350 *tx_width
= link_width_to_bits(dd
, tx
);
7351 *rx_width
= link_width_to_bits(dd
, rx
);
7353 /* print the active widths */
7354 get_link_widths(dd
, &active_tx
, &active_rx
);
7358 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7359 * hardware information when the link first comes up.
7361 * The link width is not available until after VerifyCap.AllFramesReceived
7362 * (the trigger for handle_verify_cap), so this is outside that routine
7363 * and should be called when the 8051 signals linkup.
7365 void get_linkup_link_widths(struct hfi1_pportdata
*ppd
)
7367 u16 tx_width
, rx_width
;
7369 /* get end-of-LNI link widths */
7370 get_linkup_widths(ppd
->dd
, &tx_width
, &rx_width
);
7372 /* use tx_width as the link is supposed to be symmetric on link up */
7373 ppd
->link_width_active
= tx_width
;
7374 /* link width downgrade active (LWD.A) starts out matching LW.A */
7375 ppd
->link_width_downgrade_tx_active
= ppd
->link_width_active
;
7376 ppd
->link_width_downgrade_rx_active
= ppd
->link_width_active
;
7377 /* per OPA spec, on link up LWD.E resets to LWD.S */
7378 ppd
->link_width_downgrade_enabled
= ppd
->link_width_downgrade_supported
;
7379 /* cache the active egress rate (units {10^6 bits/sec]) */
7380 ppd
->current_egress_rate
= active_egress_rate(ppd
);
7384 * Handle a verify capabilities interrupt from the 8051.
7386 * This is a work-queue function outside of the interrupt.
7388 void handle_verify_cap(struct work_struct
*work
)
7390 struct hfi1_pportdata
*ppd
= container_of(work
, struct hfi1_pportdata
,
7392 struct hfi1_devdata
*dd
= ppd
->dd
;
7394 u8 power_management
;
7404 u16 active_tx
, active_rx
;
7405 u8 partner_supported_crc
;
7409 set_link_state(ppd
, HLS_VERIFY_CAP
);
7411 lcb_shutdown(dd
, 0);
7412 adjust_lcb_for_fpga_serdes(dd
);
7414 read_vc_remote_phy(dd
, &power_management
, &continuous
);
7415 read_vc_remote_fabric(dd
, &vau
, &z
, &vcu
, &vl15buf
,
7416 &partner_supported_crc
);
7417 read_vc_remote_link_width(dd
, &remote_tx_rate
, &link_widths
);
7418 read_remote_device_id(dd
, &device_id
, &device_rev
);
7420 * And the 'MgmtAllowed' information, which is exchanged during
7421 * LNI, is also be available at this point.
7423 read_mgmt_allowed(dd
, &ppd
->mgmt_allowed
);
7424 /* print the active widths */
7425 get_link_widths(dd
, &active_tx
, &active_rx
);
7427 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7428 (int)power_management
, (int)continuous
);
7430 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7431 (int)vau
, (int)z
, (int)vcu
, (int)vl15buf
,
7432 (int)partner_supported_crc
);
7433 dd_dev_info(dd
, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7434 (u32
)remote_tx_rate
, (u32
)link_widths
);
7435 dd_dev_info(dd
, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7436 (u32
)device_id
, (u32
)device_rev
);
7438 * The peer vAU value just read is the peer receiver value. HFI does
7439 * not support a transmit vAU of 0 (AU == 8). We advertised that
7440 * with Z=1 in the fabric capabilities sent to the peer. The peer
7441 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7442 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7443 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7444 * subject to the Z value exception.
7448 set_up_vau(dd
, vau
);
7451 * Set VL15 credits to 0 in global credit register. Cache remote VL15
7452 * credits value and wait for link-up interrupt ot set it.
7455 dd
->vl15buf_cached
= vl15buf
;
7457 /* set up the LCB CRC mode */
7458 crc_mask
= ppd
->port_crc_mode_enabled
& partner_supported_crc
;
7460 /* order is important: use the lowest bit in common */
7461 if (crc_mask
& CAP_CRC_14B
)
7462 crc_val
= LCB_CRC_14B
;
7463 else if (crc_mask
& CAP_CRC_48B
)
7464 crc_val
= LCB_CRC_48B
;
7465 else if (crc_mask
& CAP_CRC_12B_16B_PER_LANE
)
7466 crc_val
= LCB_CRC_12B_16B_PER_LANE
;
7468 crc_val
= LCB_CRC_16B
;
7470 dd_dev_info(dd
, "Final LCB CRC mode: %d\n", (int)crc_val
);
7471 write_csr(dd
, DC_LCB_CFG_CRC_MODE
,
7472 (u64
)crc_val
<< DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT
);
7474 /* set (14b only) or clear sideband credit */
7475 reg
= read_csr(dd
, SEND_CM_CTRL
);
7476 if (crc_val
== LCB_CRC_14B
&& crc_14b_sideband
) {
7477 write_csr(dd
, SEND_CM_CTRL
,
7478 reg
| SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK
);
7480 write_csr(dd
, SEND_CM_CTRL
,
7481 reg
& ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK
);
7484 ppd
->link_speed_active
= 0; /* invalid value */
7485 if (dd
->dc8051_ver
< dc8051_ver(0, 20, 0)) {
7486 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7487 switch (remote_tx_rate
) {
7489 ppd
->link_speed_active
= OPA_LINK_SPEED_12_5G
;
7492 ppd
->link_speed_active
= OPA_LINK_SPEED_25G
;
7496 /* actual rate is highest bit of the ANDed rates */
7497 u8 rate
= remote_tx_rate
& ppd
->local_tx_rate
;
7500 ppd
->link_speed_active
= OPA_LINK_SPEED_25G
;
7502 ppd
->link_speed_active
= OPA_LINK_SPEED_12_5G
;
7504 if (ppd
->link_speed_active
== 0) {
7505 dd_dev_err(dd
, "%s: unexpected remote tx rate %d, using 25Gb\n",
7506 __func__
, (int)remote_tx_rate
);
7507 ppd
->link_speed_active
= OPA_LINK_SPEED_25G
;
7511 * Cache the values of the supported, enabled, and active
7512 * LTP CRC modes to return in 'portinfo' queries. But the bit
7513 * flags that are returned in the portinfo query differ from
7514 * what's in the link_crc_mask, crc_sizes, and crc_val
7515 * variables. Convert these here.
7517 ppd
->port_ltp_crc_mode
= cap_to_port_ltp(link_crc_mask
) << 8;
7518 /* supported crc modes */
7519 ppd
->port_ltp_crc_mode
|=
7520 cap_to_port_ltp(ppd
->port_crc_mode_enabled
) << 4;
7521 /* enabled crc modes */
7522 ppd
->port_ltp_crc_mode
|= lcb_to_port_ltp(crc_val
);
7523 /* active crc mode */
7525 /* set up the remote credit return table */
7526 assign_remote_cm_au_table(dd
, vcu
);
7529 * The LCB is reset on entry to handle_verify_cap(), so this must
7530 * be applied on every link up.
7532 * Adjust LCB error kill enable to kill the link if
7533 * these RBUF errors are seen:
7534 * REPLAY_BUF_MBE_SMASK
7535 * FLIT_INPUT_BUF_MBE_SMASK
7537 if (is_ax(dd
)) { /* fixed in B0 */
7538 reg
= read_csr(dd
, DC_LCB_CFG_LINK_KILL_EN
);
7539 reg
|= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7540 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK
;
7541 write_csr(dd
, DC_LCB_CFG_LINK_KILL_EN
, reg
);
7544 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7545 write_csr(dd
, DC_LCB_CFG_TX_FIFOS_RESET
, 0);
7547 /* give 8051 access to the LCB CSRs */
7548 write_csr(dd
, DC_LCB_ERR_EN
, 0); /* mask LCB errors */
7549 set_8051_lcb_access(dd
);
7551 if (ppd
->mgmt_allowed
)
7552 add_full_mgmt_pkey(ppd
);
7554 /* tell the 8051 to go to LinkUp */
7555 set_link_state(ppd
, HLS_GOING_UP
);
7559 * Apply the link width downgrade enabled policy against the current active
7562 * Called when the enabled policy changes or the active link widths change.
7564 void apply_link_downgrade_policy(struct hfi1_pportdata
*ppd
, int refresh_widths
)
7571 /* use the hls lock to avoid a race with actual link up */
7574 mutex_lock(&ppd
->hls_lock
);
7575 /* only apply if the link is up */
7576 if (ppd
->host_link_state
& HLS_DOWN
) {
7577 /* still going up..wait and retry */
7578 if (ppd
->host_link_state
& HLS_GOING_UP
) {
7579 if (++tries
< 1000) {
7580 mutex_unlock(&ppd
->hls_lock
);
7581 usleep_range(100, 120); /* arbitrary */
7585 "%s: giving up waiting for link state change\n",
7591 lwde
= ppd
->link_width_downgrade_enabled
;
7593 if (refresh_widths
) {
7594 get_link_widths(ppd
->dd
, &tx
, &rx
);
7595 ppd
->link_width_downgrade_tx_active
= tx
;
7596 ppd
->link_width_downgrade_rx_active
= rx
;
7599 if (ppd
->link_width_downgrade_tx_active
== 0 ||
7600 ppd
->link_width_downgrade_rx_active
== 0) {
7601 /* the 8051 reported a dead link as a downgrade */
7602 dd_dev_err(ppd
->dd
, "Link downgrade is really a link down, ignoring\n");
7603 } else if (lwde
== 0) {
7604 /* downgrade is disabled */
7606 /* bounce if not at starting active width */
7607 if ((ppd
->link_width_active
!=
7608 ppd
->link_width_downgrade_tx_active
) ||
7609 (ppd
->link_width_active
!=
7610 ppd
->link_width_downgrade_rx_active
)) {
7612 "Link downgrade is disabled and link has downgraded, downing link\n");
7614 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7615 ppd
->link_width_active
,
7616 ppd
->link_width_downgrade_tx_active
,
7617 ppd
->link_width_downgrade_rx_active
);
7620 } else if ((lwde
& ppd
->link_width_downgrade_tx_active
) == 0 ||
7621 (lwde
& ppd
->link_width_downgrade_rx_active
) == 0) {
7622 /* Tx or Rx is outside the enabled policy */
7624 "Link is outside of downgrade allowed, downing link\n");
7626 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7627 lwde
, ppd
->link_width_downgrade_tx_active
,
7628 ppd
->link_width_downgrade_rx_active
);
7633 mutex_unlock(&ppd
->hls_lock
);
7636 set_link_down_reason(ppd
, OPA_LINKDOWN_REASON_WIDTH_POLICY
, 0,
7637 OPA_LINKDOWN_REASON_WIDTH_POLICY
);
7638 set_link_state(ppd
, HLS_DN_OFFLINE
);
7644 * Handle a link downgrade interrupt from the 8051.
7646 * This is a work-queue function outside of the interrupt.
7648 void handle_link_downgrade(struct work_struct
*work
)
7650 struct hfi1_pportdata
*ppd
= container_of(work
, struct hfi1_pportdata
,
7651 link_downgrade_work
);
7653 dd_dev_info(ppd
->dd
, "8051: Link width downgrade\n");
7654 apply_link_downgrade_policy(ppd
, 1);
7657 static char *dcc_err_string(char *buf
, int buf_len
, u64 flags
)
7659 return flag_string(buf
, buf_len
, flags
, dcc_err_flags
,
7660 ARRAY_SIZE(dcc_err_flags
));
7663 static char *lcb_err_string(char *buf
, int buf_len
, u64 flags
)
7665 return flag_string(buf
, buf_len
, flags
, lcb_err_flags
,
7666 ARRAY_SIZE(lcb_err_flags
));
7669 static char *dc8051_err_string(char *buf
, int buf_len
, u64 flags
)
7671 return flag_string(buf
, buf_len
, flags
, dc8051_err_flags
,
7672 ARRAY_SIZE(dc8051_err_flags
));
7675 static char *dc8051_info_err_string(char *buf
, int buf_len
, u64 flags
)
7677 return flag_string(buf
, buf_len
, flags
, dc8051_info_err_flags
,
7678 ARRAY_SIZE(dc8051_info_err_flags
));
7681 static char *dc8051_info_host_msg_string(char *buf
, int buf_len
, u64 flags
)
7683 return flag_string(buf
, buf_len
, flags
, dc8051_info_host_msg_flags
,
7684 ARRAY_SIZE(dc8051_info_host_msg_flags
));
7687 static void handle_8051_interrupt(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
7689 struct hfi1_pportdata
*ppd
= dd
->pport
;
7690 u64 info
, err
, host_msg
;
7691 int queue_link_down
= 0;
7694 /* look at the flags */
7695 if (reg
& DC_DC8051_ERR_FLG_SET_BY_8051_SMASK
) {
7696 /* 8051 information set by firmware */
7697 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7698 info
= read_csr(dd
, DC_DC8051_DBG_ERR_INFO_SET_BY_8051
);
7699 err
= (info
>> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT
)
7700 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK
;
7702 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT
)
7703 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK
;
7706 * Handle error flags.
7708 if (err
& FAILED_LNI
) {
7710 * LNI error indications are cleared by the 8051
7711 * only when starting polling. Only pay attention
7712 * to them when in the states that occur during
7715 if (ppd
->host_link_state
7716 & (HLS_DN_POLL
| HLS_VERIFY_CAP
| HLS_GOING_UP
)) {
7717 queue_link_down
= 1;
7718 dd_dev_info(dd
, "Link error: %s\n",
7719 dc8051_info_err_string(buf
,
7724 err
&= ~(u64
)FAILED_LNI
;
7726 /* unknown frames can happen durning LNI, just count */
7727 if (err
& UNKNOWN_FRAME
) {
7728 ppd
->unknown_frame_count
++;
7729 err
&= ~(u64
)UNKNOWN_FRAME
;
7732 /* report remaining errors, but do not do anything */
7733 dd_dev_err(dd
, "8051 info error: %s\n",
7734 dc8051_info_err_string(buf
, sizeof(buf
),
7739 * Handle host message flags.
7741 if (host_msg
& HOST_REQ_DONE
) {
7743 * Presently, the driver does a busy wait for
7744 * host requests to complete. This is only an
7745 * informational message.
7746 * NOTE: The 8051 clears the host message
7747 * information *on the next 8051 command*.
7748 * Therefore, when linkup is achieved,
7749 * this flag will still be set.
7751 host_msg
&= ~(u64
)HOST_REQ_DONE
;
7753 if (host_msg
& BC_SMA_MSG
) {
7754 queue_work(ppd
->link_wq
, &ppd
->sma_message_work
);
7755 host_msg
&= ~(u64
)BC_SMA_MSG
;
7757 if (host_msg
& LINKUP_ACHIEVED
) {
7758 dd_dev_info(dd
, "8051: Link up\n");
7759 queue_work(ppd
->link_wq
, &ppd
->link_up_work
);
7760 host_msg
&= ~(u64
)LINKUP_ACHIEVED
;
7762 if (host_msg
& EXT_DEVICE_CFG_REQ
) {
7763 handle_8051_request(ppd
);
7764 host_msg
&= ~(u64
)EXT_DEVICE_CFG_REQ
;
7766 if (host_msg
& VERIFY_CAP_FRAME
) {
7767 queue_work(ppd
->link_wq
, &ppd
->link_vc_work
);
7768 host_msg
&= ~(u64
)VERIFY_CAP_FRAME
;
7770 if (host_msg
& LINK_GOING_DOWN
) {
7771 const char *extra
= "";
7772 /* no downgrade action needed if going down */
7773 if (host_msg
& LINK_WIDTH_DOWNGRADED
) {
7774 host_msg
&= ~(u64
)LINK_WIDTH_DOWNGRADED
;
7775 extra
= " (ignoring downgrade)";
7777 dd_dev_info(dd
, "8051: Link down%s\n", extra
);
7778 queue_link_down
= 1;
7779 host_msg
&= ~(u64
)LINK_GOING_DOWN
;
7781 if (host_msg
& LINK_WIDTH_DOWNGRADED
) {
7782 queue_work(ppd
->link_wq
, &ppd
->link_downgrade_work
);
7783 host_msg
&= ~(u64
)LINK_WIDTH_DOWNGRADED
;
7786 /* report remaining messages, but do not do anything */
7787 dd_dev_info(dd
, "8051 info host message: %s\n",
7788 dc8051_info_host_msg_string(buf
,
7793 reg
&= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK
;
7795 if (reg
& DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK
) {
7797 * Lost the 8051 heartbeat. If this happens, we
7798 * receive constant interrupts about it. Disable
7799 * the interrupt after the first.
7801 dd_dev_err(dd
, "Lost 8051 heartbeat\n");
7802 write_csr(dd
, DC_DC8051_ERR_EN
,
7803 read_csr(dd
, DC_DC8051_ERR_EN
) &
7804 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK
);
7806 reg
&= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK
;
7809 /* report the error, but do not do anything */
7810 dd_dev_err(dd
, "8051 error: %s\n",
7811 dc8051_err_string(buf
, sizeof(buf
), reg
));
7814 if (queue_link_down
) {
7816 * if the link is already going down or disabled, do not
7817 * queue another. If there's a link down entry already
7818 * queued, don't queue another one.
7820 if ((ppd
->host_link_state
&
7821 (HLS_GOING_OFFLINE
| HLS_LINK_COOLDOWN
)) ||
7822 ppd
->link_enabled
== 0) {
7823 dd_dev_info(dd
, "%s: not queuing link down. host_link_state %x, link_enabled %x\n",
7824 __func__
, ppd
->host_link_state
,
7827 if (xchg(&ppd
->is_link_down_queued
, 1) == 1)
7829 "%s: link down request already queued\n",
7832 queue_work(ppd
->link_wq
, &ppd
->link_down_work
);
7837 static const char * const fm_config_txt
[] = {
7839 "BadHeadDist: Distance violation between two head flits",
7841 "BadTailDist: Distance violation between two tail flits",
7843 "BadCtrlDist: Distance violation between two credit control flits",
7845 "BadCrdAck: Credits return for unsupported VL",
7847 "UnsupportedVLMarker: Received VL Marker",
7849 "BadPreempt: Exceeded the preemption nesting level",
7851 "BadControlFlit: Received unsupported control flit",
7854 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7857 static const char * const port_rcv_txt
[] = {
7859 "BadPktLen: Illegal PktLen",
7861 "PktLenTooLong: Packet longer than PktLen",
7863 "PktLenTooShort: Packet shorter than PktLen",
7865 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7867 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7869 "BadL2: Illegal L2 opcode",
7871 "BadSC: Unsupported SC",
7873 "BadRC: Illegal RC",
7875 "PreemptError: Preempting with same VL",
7877 "PreemptVL15: Preempting a VL15 packet",
7880 #define OPA_LDR_FMCONFIG_OFFSET 16
7881 #define OPA_LDR_PORTRCV_OFFSET 0
7882 static void handle_dcc_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
7884 u64 info
, hdr0
, hdr1
;
7887 struct hfi1_pportdata
*ppd
= dd
->pport
;
7891 if (reg
& DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK
) {
7892 if (!(dd
->err_info_uncorrectable
& OPA_EI_STATUS_SMASK
)) {
7893 info
= read_csr(dd
, DCC_ERR_INFO_UNCORRECTABLE
);
7894 dd
->err_info_uncorrectable
= info
& OPA_EI_CODE_SMASK
;
7895 /* set status bit */
7896 dd
->err_info_uncorrectable
|= OPA_EI_STATUS_SMASK
;
7898 reg
&= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK
;
7901 if (reg
& DCC_ERR_FLG_LINK_ERR_SMASK
) {
7902 struct hfi1_pportdata
*ppd
= dd
->pport
;
7903 /* this counter saturates at (2^32) - 1 */
7904 if (ppd
->link_downed
< (u32
)UINT_MAX
)
7906 reg
&= ~DCC_ERR_FLG_LINK_ERR_SMASK
;
7909 if (reg
& DCC_ERR_FLG_FMCONFIG_ERR_SMASK
) {
7910 u8 reason_valid
= 1;
7912 info
= read_csr(dd
, DCC_ERR_INFO_FMCONFIG
);
7913 if (!(dd
->err_info_fmconfig
& OPA_EI_STATUS_SMASK
)) {
7914 dd
->err_info_fmconfig
= info
& OPA_EI_CODE_SMASK
;
7915 /* set status bit */
7916 dd
->err_info_fmconfig
|= OPA_EI_STATUS_SMASK
;
7926 extra
= fm_config_txt
[info
];
7929 extra
= fm_config_txt
[info
];
7930 if (ppd
->port_error_action
&
7931 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER
) {
7934 * lcl_reason cannot be derived from info
7938 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER
;
7943 snprintf(buf
, sizeof(buf
), "reserved%lld", info
);
7948 if (reason_valid
&& !do_bounce
) {
7949 do_bounce
= ppd
->port_error_action
&
7950 (1 << (OPA_LDR_FMCONFIG_OFFSET
+ info
));
7951 lcl_reason
= info
+ OPA_LINKDOWN_REASON_BAD_HEAD_DIST
;
7954 /* just report this */
7955 dd_dev_info_ratelimited(dd
, "DCC Error: fmconfig error: %s\n",
7957 reg
&= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK
;
7960 if (reg
& DCC_ERR_FLG_RCVPORT_ERR_SMASK
) {
7961 u8 reason_valid
= 1;
7963 info
= read_csr(dd
, DCC_ERR_INFO_PORTRCV
);
7964 hdr0
= read_csr(dd
, DCC_ERR_INFO_PORTRCV_HDR0
);
7965 hdr1
= read_csr(dd
, DCC_ERR_INFO_PORTRCV_HDR1
);
7966 if (!(dd
->err_info_rcvport
.status_and_code
&
7967 OPA_EI_STATUS_SMASK
)) {
7968 dd
->err_info_rcvport
.status_and_code
=
7969 info
& OPA_EI_CODE_SMASK
;
7970 /* set status bit */
7971 dd
->err_info_rcvport
.status_and_code
|=
7972 OPA_EI_STATUS_SMASK
;
7974 * save first 2 flits in the packet that caused
7977 dd
->err_info_rcvport
.packet_flit1
= hdr0
;
7978 dd
->err_info_rcvport
.packet_flit2
= hdr1
;
7991 extra
= port_rcv_txt
[info
];
7995 snprintf(buf
, sizeof(buf
), "reserved%lld", info
);
8000 if (reason_valid
&& !do_bounce
) {
8001 do_bounce
= ppd
->port_error_action
&
8002 (1 << (OPA_LDR_PORTRCV_OFFSET
+ info
));
8003 lcl_reason
= info
+ OPA_LINKDOWN_REASON_RCV_ERROR_0
;
8006 /* just report this */
8007 dd_dev_info_ratelimited(dd
, "DCC Error: PortRcv error: %s\n"
8008 " hdr0 0x%llx, hdr1 0x%llx\n",
8011 reg
&= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK
;
8014 if (reg
& DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK
) {
8015 /* informative only */
8016 dd_dev_info_ratelimited(dd
, "8051 access to LCB blocked\n");
8017 reg
&= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK
;
8019 if (reg
& DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK
) {
8020 /* informative only */
8021 dd_dev_info_ratelimited(dd
, "host access to LCB blocked\n");
8022 reg
&= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK
;
8025 if (unlikely(hfi1_dbg_fault_suppress_err(&dd
->verbs_dev
)))
8026 reg
&= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK
;
8028 /* report any remaining errors */
8030 dd_dev_info_ratelimited(dd
, "DCC Error: %s\n",
8031 dcc_err_string(buf
, sizeof(buf
), reg
));
8033 if (lcl_reason
== 0)
8034 lcl_reason
= OPA_LINKDOWN_REASON_UNKNOWN
;
8037 dd_dev_info_ratelimited(dd
, "%s: PortErrorAction bounce\n",
8039 set_link_down_reason(ppd
, lcl_reason
, 0, lcl_reason
);
8040 queue_work(ppd
->link_wq
, &ppd
->link_bounce_work
);
8044 static void handle_lcb_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
8048 dd_dev_info(dd
, "LCB Error: %s\n",
8049 lcb_err_string(buf
, sizeof(buf
), reg
));
8053 * CCE block DC interrupt. Source is < 8.
8055 static void is_dc_int(struct hfi1_devdata
*dd
, unsigned int source
)
8057 const struct err_reg_info
*eri
= &dc_errs
[source
];
8060 interrupt_clear_down(dd
, 0, eri
);
8061 } else if (source
== 3 /* dc_lbm_int */) {
8063 * This indicates that a parity error has occurred on the
8064 * address/control lines presented to the LBM. The error
8065 * is a single pulse, there is no associated error flag,
8066 * and it is non-maskable. This is because if a parity
8067 * error occurs on the request the request is dropped.
8068 * This should never occur, but it is nice to know if it
8071 dd_dev_err(dd
, "Parity error in DC LBM block\n");
8073 dd_dev_err(dd
, "Invalid DC interrupt %u\n", source
);
8078 * TX block send credit interrupt. Source is < 160.
8080 static void is_send_credit_int(struct hfi1_devdata
*dd
, unsigned int source
)
8082 sc_group_release_update(dd
, source
);
8086 * TX block SDMA interrupt. Source is < 48.
8088 * SDMA interrupts are grouped by type:
8091 * N - 2N-1 = SDmaProgress
8092 * 2N - 3N-1 = SDmaIdle
8094 static void is_sdma_eng_int(struct hfi1_devdata
*dd
, unsigned int source
)
8096 /* what interrupt */
8097 unsigned int what
= source
/ TXE_NUM_SDMA_ENGINES
;
8099 unsigned int which
= source
% TXE_NUM_SDMA_ENGINES
;
8101 #ifdef CONFIG_SDMA_VERBOSITY
8102 dd_dev_err(dd
, "CONFIG SDMA(%u) %s:%d %s()\n", which
,
8103 slashstrip(__FILE__
), __LINE__
, __func__
);
8104 sdma_dumpstate(&dd
->per_sdma
[which
]);
8107 if (likely(what
< 3 && which
< dd
->num_sdma
)) {
8108 sdma_engine_interrupt(&dd
->per_sdma
[which
], 1ull << source
);
8110 /* should not happen */
8111 dd_dev_err(dd
, "Invalid SDMA interrupt 0x%x\n", source
);
8116 * RX block receive available interrupt. Source is < 160.
8118 static void is_rcv_avail_int(struct hfi1_devdata
*dd
, unsigned int source
)
8120 struct hfi1_ctxtdata
*rcd
;
8123 if (likely(source
< dd
->num_rcv_contexts
)) {
8124 rcd
= hfi1_rcd_get_by_index(dd
, source
);
8126 /* Check for non-user contexts, including vnic */
8127 if ((source
< dd
->first_dyn_alloc_ctxt
) ||
8128 (rcd
->sc
&& (rcd
->sc
->type
== SC_KERNEL
)))
8129 rcd
->do_interrupt(rcd
, 0);
8131 handle_user_interrupt(rcd
);
8136 /* received an interrupt, but no rcd */
8137 err_detail
= "dataless";
8139 /* received an interrupt, but are not using that context */
8140 err_detail
= "out of range";
8142 dd_dev_err(dd
, "unexpected %s receive available context interrupt %u\n",
8143 err_detail
, source
);
8147 * RX block receive urgent interrupt. Source is < 160.
8149 static void is_rcv_urgent_int(struct hfi1_devdata
*dd
, unsigned int source
)
8151 struct hfi1_ctxtdata
*rcd
;
8154 if (likely(source
< dd
->num_rcv_contexts
)) {
8155 rcd
= hfi1_rcd_get_by_index(dd
, source
);
8157 /* only pay attention to user urgent interrupts */
8158 if ((source
>= dd
->first_dyn_alloc_ctxt
) &&
8159 (!rcd
->sc
|| (rcd
->sc
->type
== SC_USER
)))
8160 handle_user_interrupt(rcd
);
8165 /* received an interrupt, but no rcd */
8166 err_detail
= "dataless";
8168 /* received an interrupt, but are not using that context */
8169 err_detail
= "out of range";
8171 dd_dev_err(dd
, "unexpected %s receive urgent context interrupt %u\n",
8172 err_detail
, source
);
8176 * Reserved range interrupt. Should not be called in normal operation.
8178 static void is_reserved_int(struct hfi1_devdata
*dd
, unsigned int source
)
8182 dd_dev_err(dd
, "unexpected %s interrupt\n",
8183 is_reserved_name(name
, sizeof(name
), source
));
8186 static const struct is_table is_table
[] = {
8189 * name func interrupt func
8191 { IS_GENERAL_ERR_START
, IS_GENERAL_ERR_END
,
8192 is_misc_err_name
, is_misc_err_int
},
8193 { IS_SDMAENG_ERR_START
, IS_SDMAENG_ERR_END
,
8194 is_sdma_eng_err_name
, is_sdma_eng_err_int
},
8195 { IS_SENDCTXT_ERR_START
, IS_SENDCTXT_ERR_END
,
8196 is_sendctxt_err_name
, is_sendctxt_err_int
},
8197 { IS_SDMA_START
, IS_SDMA_END
,
8198 is_sdma_eng_name
, is_sdma_eng_int
},
8199 { IS_VARIOUS_START
, IS_VARIOUS_END
,
8200 is_various_name
, is_various_int
},
8201 { IS_DC_START
, IS_DC_END
,
8202 is_dc_name
, is_dc_int
},
8203 { IS_RCVAVAIL_START
, IS_RCVAVAIL_END
,
8204 is_rcv_avail_name
, is_rcv_avail_int
},
8205 { IS_RCVURGENT_START
, IS_RCVURGENT_END
,
8206 is_rcv_urgent_name
, is_rcv_urgent_int
},
8207 { IS_SENDCREDIT_START
, IS_SENDCREDIT_END
,
8208 is_send_credit_name
, is_send_credit_int
},
8209 { IS_RESERVED_START
, IS_RESERVED_END
,
8210 is_reserved_name
, is_reserved_int
},
8214 * Interrupt source interrupt - called when the given source has an interrupt.
8215 * Source is a bit index into an array of 64-bit integers.
8217 static void is_interrupt(struct hfi1_devdata
*dd
, unsigned int source
)
8219 const struct is_table
*entry
;
8221 /* avoids a double compare by walking the table in-order */
8222 for (entry
= &is_table
[0]; entry
->is_name
; entry
++) {
8223 if (source
< entry
->end
) {
8224 trace_hfi1_interrupt(dd
, entry
, source
);
8225 entry
->is_int(dd
, source
- entry
->start
);
8229 /* fell off the end */
8230 dd_dev_err(dd
, "invalid interrupt source %u\n", source
);
8234 * General interrupt handler. This is able to correctly handle
8235 * all interrupts in case INTx is used.
8237 static irqreturn_t
general_interrupt(int irq
, void *data
)
8239 struct hfi1_devdata
*dd
= data
;
8240 u64 regs
[CCE_NUM_INT_CSRS
];
8243 irqreturn_t handled
= IRQ_NONE
;
8245 this_cpu_inc(*dd
->int_counter
);
8247 /* phase 1: scan and clear all handled interrupts */
8248 for (i
= 0; i
< CCE_NUM_INT_CSRS
; i
++) {
8249 if (dd
->gi_mask
[i
] == 0) {
8250 regs
[i
] = 0; /* used later */
8253 regs
[i
] = read_csr(dd
, CCE_INT_STATUS
+ (8 * i
)) &
8255 /* only clear if anything is set */
8257 write_csr(dd
, CCE_INT_CLEAR
+ (8 * i
), regs
[i
]);
8260 /* phase 2: call the appropriate handler */
8261 for_each_set_bit(bit
, (unsigned long *)®s
[0],
8262 CCE_NUM_INT_CSRS
* 64) {
8263 is_interrupt(dd
, bit
);
8264 handled
= IRQ_HANDLED
;
8270 static irqreturn_t
sdma_interrupt(int irq
, void *data
)
8272 struct sdma_engine
*sde
= data
;
8273 struct hfi1_devdata
*dd
= sde
->dd
;
8276 #ifdef CONFIG_SDMA_VERBOSITY
8277 dd_dev_err(dd
, "CONFIG SDMA(%u) %s:%d %s()\n", sde
->this_idx
,
8278 slashstrip(__FILE__
), __LINE__
, __func__
);
8279 sdma_dumpstate(sde
);
8282 this_cpu_inc(*dd
->int_counter
);
8284 /* This read_csr is really bad in the hot path */
8285 status
= read_csr(dd
,
8286 CCE_INT_STATUS
+ (8 * (IS_SDMA_START
/ 64)))
8288 if (likely(status
)) {
8289 /* clear the interrupt(s) */
8291 CCE_INT_CLEAR
+ (8 * (IS_SDMA_START
/ 64)),
8294 /* handle the interrupt(s) */
8295 sdma_engine_interrupt(sde
, status
);
8297 dd_dev_err_ratelimited(dd
, "SDMA engine %u interrupt, but no status bits set\n",
8304 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8305 * to insure that the write completed. This does NOT guarantee that
8306 * queued DMA writes to memory from the chip are pushed.
8308 static inline void clear_recv_intr(struct hfi1_ctxtdata
*rcd
)
8310 struct hfi1_devdata
*dd
= rcd
->dd
;
8311 u32 addr
= CCE_INT_CLEAR
+ (8 * rcd
->ireg
);
8313 mmiowb(); /* make sure everything before is written */
8314 write_csr(dd
, addr
, rcd
->imask
);
8315 /* force the above write on the chip and get a value back */
8316 (void)read_csr(dd
, addr
);
8319 /* force the receive interrupt */
8320 void force_recv_intr(struct hfi1_ctxtdata
*rcd
)
8322 write_csr(rcd
->dd
, CCE_INT_FORCE
+ (8 * rcd
->ireg
), rcd
->imask
);
8326 * Return non-zero if a packet is present.
8328 * This routine is called when rechecking for packets after the RcvAvail
8329 * interrupt has been cleared down. First, do a quick check of memory for
8330 * a packet present. If not found, use an expensive CSR read of the context
8331 * tail to determine the actual tail. The CSR read is necessary because there
8332 * is no method to push pending DMAs to memory other than an interrupt and we
8333 * are trying to determine if we need to force an interrupt.
8335 static inline int check_packet_present(struct hfi1_ctxtdata
*rcd
)
8340 if (!HFI1_CAP_IS_KSET(DMA_RTAIL
))
8341 present
= (rcd
->seq_cnt
==
8342 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd
))));
8343 else /* is RDMA rtail */
8344 present
= (rcd
->head
!= get_rcvhdrtail(rcd
));
8349 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8350 tail
= (u32
)read_uctxt_csr(rcd
->dd
, rcd
->ctxt
, RCV_HDR_TAIL
);
8351 return rcd
->head
!= tail
;
8355 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8356 * This routine will try to handle packets immediately (latency), but if
8357 * it finds too many, it will invoke the thread handler (bandwitdh). The
8358 * chip receive interrupt is *not* cleared down until this or the thread (if
8359 * invoked) is finished. The intent is to avoid extra interrupts while we
8360 * are processing packets anyway.
8362 static irqreturn_t
receive_context_interrupt(int irq
, void *data
)
8364 struct hfi1_ctxtdata
*rcd
= data
;
8365 struct hfi1_devdata
*dd
= rcd
->dd
;
8369 trace_hfi1_receive_interrupt(dd
, rcd
);
8370 this_cpu_inc(*dd
->int_counter
);
8371 aspm_ctx_disable(rcd
);
8373 /* receive interrupt remains blocked while processing packets */
8374 disposition
= rcd
->do_interrupt(rcd
, 0);
8377 * Too many packets were seen while processing packets in this
8378 * IRQ handler. Invoke the handler thread. The receive interrupt
8381 if (disposition
== RCV_PKT_LIMIT
)
8382 return IRQ_WAKE_THREAD
;
8385 * The packet processor detected no more packets. Clear the receive
8386 * interrupt and recheck for a packet packet that may have arrived
8387 * after the previous check and interrupt clear. If a packet arrived,
8388 * force another interrupt.
8390 clear_recv_intr(rcd
);
8391 present
= check_packet_present(rcd
);
8393 force_recv_intr(rcd
);
8399 * Receive packet thread handler. This expects to be invoked with the
8400 * receive interrupt still blocked.
8402 static irqreturn_t
receive_context_thread(int irq
, void *data
)
8404 struct hfi1_ctxtdata
*rcd
= data
;
8407 /* receive interrupt is still blocked from the IRQ handler */
8408 (void)rcd
->do_interrupt(rcd
, 1);
8411 * The packet processor will only return if it detected no more
8412 * packets. Hold IRQs here so we can safely clear the interrupt and
8413 * recheck for a packet that may have arrived after the previous
8414 * check and the interrupt clear. If a packet arrived, force another
8417 local_irq_disable();
8418 clear_recv_intr(rcd
);
8419 present
= check_packet_present(rcd
);
8421 force_recv_intr(rcd
);
8427 /* ========================================================================= */
8429 u32
read_physical_state(struct hfi1_devdata
*dd
)
8433 reg
= read_csr(dd
, DC_DC8051_STS_CUR_STATE
);
8434 return (reg
>> DC_DC8051_STS_CUR_STATE_PORT_SHIFT
)
8435 & DC_DC8051_STS_CUR_STATE_PORT_MASK
;
8438 u32
read_logical_state(struct hfi1_devdata
*dd
)
8442 reg
= read_csr(dd
, DCC_CFG_PORT_CONFIG
);
8443 return (reg
>> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT
)
8444 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK
;
8447 static void set_logical_state(struct hfi1_devdata
*dd
, u32 chip_lstate
)
8451 reg
= read_csr(dd
, DCC_CFG_PORT_CONFIG
);
8452 /* clear current state, set new state */
8453 reg
&= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK
;
8454 reg
|= (u64
)chip_lstate
<< DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT
;
8455 write_csr(dd
, DCC_CFG_PORT_CONFIG
, reg
);
8459 * Use the 8051 to read a LCB CSR.
8461 static int read_lcb_via_8051(struct hfi1_devdata
*dd
, u32 addr
, u64
*data
)
8466 if (dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
) {
8467 if (acquire_lcb_access(dd
, 0) == 0) {
8468 *data
= read_csr(dd
, addr
);
8469 release_lcb_access(dd
, 0);
8475 /* register is an index of LCB registers: (offset - base) / 8 */
8476 regno
= (addr
- DC_LCB_CFG_RUN
) >> 3;
8477 ret
= do_8051_command(dd
, HCMD_READ_LCB_CSR
, regno
, data
);
8478 if (ret
!= HCMD_SUCCESS
)
8484 * Provide a cache for some of the LCB registers in case the LCB is
8486 * (The LCB is unavailable in certain link states, for example.)
8493 static struct lcb_datum lcb_cache
[] = {
8494 { DC_LCB_ERR_INFO_RX_REPLAY_CNT
, 0},
8495 { DC_LCB_ERR_INFO_SEQ_CRC_CNT
, 0 },
8496 { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT
, 0 },
8499 static void update_lcb_cache(struct hfi1_devdata
*dd
)
8505 for (i
= 0; i
< ARRAY_SIZE(lcb_cache
); i
++) {
8506 ret
= read_lcb_csr(dd
, lcb_cache
[i
].off
, &val
);
8508 /* Update if we get good data */
8509 if (likely(ret
!= -EBUSY
))
8510 lcb_cache
[i
].val
= val
;
8514 static int read_lcb_cache(u32 off
, u64
*val
)
8518 for (i
= 0; i
< ARRAY_SIZE(lcb_cache
); i
++) {
8519 if (lcb_cache
[i
].off
== off
) {
8520 *val
= lcb_cache
[i
].val
;
8525 pr_warn("%s bad offset 0x%x\n", __func__
, off
);
8530 * Read an LCB CSR. Access may not be in host control, so check.
8531 * Return 0 on success, -EBUSY on failure.
8533 int read_lcb_csr(struct hfi1_devdata
*dd
, u32 addr
, u64
*data
)
8535 struct hfi1_pportdata
*ppd
= dd
->pport
;
8537 /* if up, go through the 8051 for the value */
8538 if (ppd
->host_link_state
& HLS_UP
)
8539 return read_lcb_via_8051(dd
, addr
, data
);
8540 /* if going up or down, check the cache, otherwise, no access */
8541 if (ppd
->host_link_state
& (HLS_GOING_UP
| HLS_GOING_OFFLINE
)) {
8542 if (read_lcb_cache(addr
, data
))
8547 /* otherwise, host has access */
8548 *data
= read_csr(dd
, addr
);
8553 * Use the 8051 to write a LCB CSR.
8555 static int write_lcb_via_8051(struct hfi1_devdata
*dd
, u32 addr
, u64 data
)
8560 if (dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
||
8561 (dd
->dc8051_ver
< dc8051_ver(0, 20, 0))) {
8562 if (acquire_lcb_access(dd
, 0) == 0) {
8563 write_csr(dd
, addr
, data
);
8564 release_lcb_access(dd
, 0);
8570 /* register is an index of LCB registers: (offset - base) / 8 */
8571 regno
= (addr
- DC_LCB_CFG_RUN
) >> 3;
8572 ret
= do_8051_command(dd
, HCMD_WRITE_LCB_CSR
, regno
, &data
);
8573 if (ret
!= HCMD_SUCCESS
)
8579 * Write an LCB CSR. Access may not be in host control, so check.
8580 * Return 0 on success, -EBUSY on failure.
8582 int write_lcb_csr(struct hfi1_devdata
*dd
, u32 addr
, u64 data
)
8584 struct hfi1_pportdata
*ppd
= dd
->pport
;
8586 /* if up, go through the 8051 for the value */
8587 if (ppd
->host_link_state
& HLS_UP
)
8588 return write_lcb_via_8051(dd
, addr
, data
);
8589 /* if going up or down, no access */
8590 if (ppd
->host_link_state
& (HLS_GOING_UP
| HLS_GOING_OFFLINE
))
8592 /* otherwise, host has access */
8593 write_csr(dd
, addr
, data
);
8599 * < 0 = Linux error, not able to get access
8600 * > 0 = 8051 command RETURN_CODE
8602 static int do_8051_command(
8603 struct hfi1_devdata
*dd
,
8610 unsigned long timeout
;
8612 hfi1_cdbg(DC8051
, "type %d, data 0x%012llx", type
, in_data
);
8614 mutex_lock(&dd
->dc8051_lock
);
8616 /* We can't send any commands to the 8051 if it's in reset */
8617 if (dd
->dc_shutdown
) {
8618 return_code
= -ENODEV
;
8623 * If an 8051 host command timed out previously, then the 8051 is
8626 * On first timeout, attempt to reset and restart the entire DC
8627 * block (including 8051). (Is this too big of a hammer?)
8629 * If the 8051 times out a second time, the reset did not bring it
8630 * back to healthy life. In that case, fail any subsequent commands.
8632 if (dd
->dc8051_timed_out
) {
8633 if (dd
->dc8051_timed_out
> 1) {
8635 "Previous 8051 host command timed out, skipping command %u\n",
8637 return_code
= -ENXIO
;
8645 * If there is no timeout, then the 8051 command interface is
8646 * waiting for a command.
8650 * When writing a LCB CSR, out_data contains the full value to
8651 * to be written, while in_data contains the relative LCB
8652 * address in 7:0. Do the work here, rather than the caller,
8653 * of distrubting the write data to where it needs to go:
8656 * 39:00 -> in_data[47:8]
8657 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8658 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8660 if (type
== HCMD_WRITE_LCB_CSR
) {
8661 in_data
|= ((*out_data
) & 0xffffffffffull
) << 8;
8662 /* must preserve COMPLETED - it is tied to hardware */
8663 reg
= read_csr(dd
, DC_DC8051_CFG_EXT_DEV_0
);
8664 reg
&= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK
;
8665 reg
|= ((((*out_data
) >> 40) & 0xff) <<
8666 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT
)
8667 | ((((*out_data
) >> 48) & 0xffff) <<
8668 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT
);
8669 write_csr(dd
, DC_DC8051_CFG_EXT_DEV_0
, reg
);
8673 * Do two writes: the first to stabilize the type and req_data, the
8674 * second to activate.
8676 reg
= ((u64
)type
& DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK
)
8677 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8678 | (in_data
& DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK
)
8679 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT
;
8680 write_csr(dd
, DC_DC8051_CFG_HOST_CMD_0
, reg
);
8681 reg
|= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK
;
8682 write_csr(dd
, DC_DC8051_CFG_HOST_CMD_0
, reg
);
8684 /* wait for completion, alternate: interrupt */
8685 timeout
= jiffies
+ msecs_to_jiffies(DC8051_COMMAND_TIMEOUT
);
8687 reg
= read_csr(dd
, DC_DC8051_CFG_HOST_CMD_1
);
8688 completed
= reg
& DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK
;
8691 if (time_after(jiffies
, timeout
)) {
8692 dd
->dc8051_timed_out
++;
8693 dd_dev_err(dd
, "8051 host command %u timeout\n", type
);
8696 return_code
= -ETIMEDOUT
;
8703 *out_data
= (reg
>> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT
)
8704 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK
;
8705 if (type
== HCMD_READ_LCB_CSR
) {
8706 /* top 16 bits are in a different register */
8707 *out_data
|= (read_csr(dd
, DC_DC8051_CFG_EXT_DEV_1
)
8708 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK
)
8710 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT
);
8713 return_code
= (reg
>> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT
)
8714 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK
;
8715 dd
->dc8051_timed_out
= 0;
8717 * Clear command for next user.
8719 write_csr(dd
, DC_DC8051_CFG_HOST_CMD_0
, 0);
8722 mutex_unlock(&dd
->dc8051_lock
);
8726 static int set_physical_link_state(struct hfi1_devdata
*dd
, u64 state
)
8728 return do_8051_command(dd
, HCMD_CHANGE_PHY_STATE
, state
, NULL
);
8731 int load_8051_config(struct hfi1_devdata
*dd
, u8 field_id
,
8732 u8 lane_id
, u32 config_data
)
8737 data
= (u64
)field_id
<< LOAD_DATA_FIELD_ID_SHIFT
8738 | (u64
)lane_id
<< LOAD_DATA_LANE_ID_SHIFT
8739 | (u64
)config_data
<< LOAD_DATA_DATA_SHIFT
;
8740 ret
= do_8051_command(dd
, HCMD_LOAD_CONFIG_DATA
, data
, NULL
);
8741 if (ret
!= HCMD_SUCCESS
) {
8743 "load 8051 config: field id %d, lane %d, err %d\n",
8744 (int)field_id
, (int)lane_id
, ret
);
8750 * Read the 8051 firmware "registers". Use the RAM directly. Always
8751 * set the result, even on error.
8752 * Return 0 on success, -errno on failure
8754 int read_8051_config(struct hfi1_devdata
*dd
, u8 field_id
, u8 lane_id
,
8761 /* address start depends on the lane_id */
8763 addr
= (4 * NUM_GENERAL_FIELDS
)
8764 + (lane_id
* 4 * NUM_LANE_FIELDS
);
8767 addr
+= field_id
* 4;
8769 /* read is in 8-byte chunks, hardware will truncate the address down */
8770 ret
= read_8051_data(dd
, addr
, 8, &big_data
);
8773 /* extract the 4 bytes we want */
8775 *result
= (u32
)(big_data
>> 32);
8777 *result
= (u32
)big_data
;
8780 dd_dev_err(dd
, "%s: direct read failed, lane %d, field %d!\n",
8781 __func__
, lane_id
, field_id
);
8787 static int write_vc_local_phy(struct hfi1_devdata
*dd
, u8 power_management
,
8792 frame
= continuous
<< CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8793 | power_management
<< POWER_MANAGEMENT_SHIFT
;
8794 return load_8051_config(dd
, VERIFY_CAP_LOCAL_PHY
,
8795 GENERAL_CONFIG
, frame
);
8798 static int write_vc_local_fabric(struct hfi1_devdata
*dd
, u8 vau
, u8 z
, u8 vcu
,
8799 u16 vl15buf
, u8 crc_sizes
)
8803 frame
= (u32
)vau
<< VAU_SHIFT
8805 | (u32
)vcu
<< VCU_SHIFT
8806 | (u32
)vl15buf
<< VL15BUF_SHIFT
8807 | (u32
)crc_sizes
<< CRC_SIZES_SHIFT
;
8808 return load_8051_config(dd
, VERIFY_CAP_LOCAL_FABRIC
,
8809 GENERAL_CONFIG
, frame
);
8812 static void read_vc_local_link_width(struct hfi1_devdata
*dd
, u8
*misc_bits
,
8813 u8
*flag_bits
, u16
*link_widths
)
8817 read_8051_config(dd
, VERIFY_CAP_LOCAL_LINK_WIDTH
, GENERAL_CONFIG
,
8819 *misc_bits
= (frame
>> MISC_CONFIG_BITS_SHIFT
) & MISC_CONFIG_BITS_MASK
;
8820 *flag_bits
= (frame
>> LOCAL_FLAG_BITS_SHIFT
) & LOCAL_FLAG_BITS_MASK
;
8821 *link_widths
= (frame
>> LINK_WIDTH_SHIFT
) & LINK_WIDTH_MASK
;
8824 static int write_vc_local_link_width(struct hfi1_devdata
*dd
,
8831 frame
= (u32
)misc_bits
<< MISC_CONFIG_BITS_SHIFT
8832 | (u32
)flag_bits
<< LOCAL_FLAG_BITS_SHIFT
8833 | (u32
)link_widths
<< LINK_WIDTH_SHIFT
;
8834 return load_8051_config(dd
, VERIFY_CAP_LOCAL_LINK_WIDTH
, GENERAL_CONFIG
,
8838 static int write_local_device_id(struct hfi1_devdata
*dd
, u16 device_id
,
8843 frame
= ((u32
)device_id
<< LOCAL_DEVICE_ID_SHIFT
)
8844 | ((u32
)device_rev
<< LOCAL_DEVICE_REV_SHIFT
);
8845 return load_8051_config(dd
, LOCAL_DEVICE_ID
, GENERAL_CONFIG
, frame
);
8848 static void read_remote_device_id(struct hfi1_devdata
*dd
, u16
*device_id
,
8853 read_8051_config(dd
, REMOTE_DEVICE_ID
, GENERAL_CONFIG
, &frame
);
8854 *device_id
= (frame
>> REMOTE_DEVICE_ID_SHIFT
) & REMOTE_DEVICE_ID_MASK
;
8855 *device_rev
= (frame
>> REMOTE_DEVICE_REV_SHIFT
)
8856 & REMOTE_DEVICE_REV_MASK
;
8859 int write_host_interface_version(struct hfi1_devdata
*dd
, u8 version
)
8864 mask
= (HOST_INTERFACE_VERSION_MASK
<< HOST_INTERFACE_VERSION_SHIFT
);
8865 read_8051_config(dd
, RESERVED_REGISTERS
, GENERAL_CONFIG
, &frame
);
8866 /* Clear, then set field */
8868 frame
|= ((u32
)version
<< HOST_INTERFACE_VERSION_SHIFT
);
8869 return load_8051_config(dd
, RESERVED_REGISTERS
, GENERAL_CONFIG
,
8873 void read_misc_status(struct hfi1_devdata
*dd
, u8
*ver_major
, u8
*ver_minor
,
8878 read_8051_config(dd
, MISC_STATUS
, GENERAL_CONFIG
, &frame
);
8879 *ver_major
= (frame
>> STS_FM_VERSION_MAJOR_SHIFT
) &
8880 STS_FM_VERSION_MAJOR_MASK
;
8881 *ver_minor
= (frame
>> STS_FM_VERSION_MINOR_SHIFT
) &
8882 STS_FM_VERSION_MINOR_MASK
;
8884 read_8051_config(dd
, VERSION_PATCH
, GENERAL_CONFIG
, &frame
);
8885 *ver_patch
= (frame
>> STS_FM_VERSION_PATCH_SHIFT
) &
8886 STS_FM_VERSION_PATCH_MASK
;
8889 static void read_vc_remote_phy(struct hfi1_devdata
*dd
, u8
*power_management
,
8894 read_8051_config(dd
, VERIFY_CAP_REMOTE_PHY
, GENERAL_CONFIG
, &frame
);
8895 *power_management
= (frame
>> POWER_MANAGEMENT_SHIFT
)
8896 & POWER_MANAGEMENT_MASK
;
8897 *continuous
= (frame
>> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
)
8898 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK
;
8901 static void read_vc_remote_fabric(struct hfi1_devdata
*dd
, u8
*vau
, u8
*z
,
8902 u8
*vcu
, u16
*vl15buf
, u8
*crc_sizes
)
8906 read_8051_config(dd
, VERIFY_CAP_REMOTE_FABRIC
, GENERAL_CONFIG
, &frame
);
8907 *vau
= (frame
>> VAU_SHIFT
) & VAU_MASK
;
8908 *z
= (frame
>> Z_SHIFT
) & Z_MASK
;
8909 *vcu
= (frame
>> VCU_SHIFT
) & VCU_MASK
;
8910 *vl15buf
= (frame
>> VL15BUF_SHIFT
) & VL15BUF_MASK
;
8911 *crc_sizes
= (frame
>> CRC_SIZES_SHIFT
) & CRC_SIZES_MASK
;
8914 static void read_vc_remote_link_width(struct hfi1_devdata
*dd
,
8920 read_8051_config(dd
, VERIFY_CAP_REMOTE_LINK_WIDTH
, GENERAL_CONFIG
,
8922 *remote_tx_rate
= (frame
>> REMOTE_TX_RATE_SHIFT
)
8923 & REMOTE_TX_RATE_MASK
;
8924 *link_widths
= (frame
>> LINK_WIDTH_SHIFT
) & LINK_WIDTH_MASK
;
8927 static void read_local_lni(struct hfi1_devdata
*dd
, u8
*enable_lane_rx
)
8931 read_8051_config(dd
, LOCAL_LNI_INFO
, GENERAL_CONFIG
, &frame
);
8932 *enable_lane_rx
= (frame
>> ENABLE_LANE_RX_SHIFT
) & ENABLE_LANE_RX_MASK
;
8935 static void read_mgmt_allowed(struct hfi1_devdata
*dd
, u8
*mgmt_allowed
)
8939 read_8051_config(dd
, REMOTE_LNI_INFO
, GENERAL_CONFIG
, &frame
);
8940 *mgmt_allowed
= (frame
>> MGMT_ALLOWED_SHIFT
) & MGMT_ALLOWED_MASK
;
8943 static void read_last_local_state(struct hfi1_devdata
*dd
, u32
*lls
)
8945 read_8051_config(dd
, LAST_LOCAL_STATE_COMPLETE
, GENERAL_CONFIG
, lls
);
8948 static void read_last_remote_state(struct hfi1_devdata
*dd
, u32
*lrs
)
8950 read_8051_config(dd
, LAST_REMOTE_STATE_COMPLETE
, GENERAL_CONFIG
, lrs
);
8953 void hfi1_read_link_quality(struct hfi1_devdata
*dd
, u8
*link_quality
)
8959 if (dd
->pport
->host_link_state
& HLS_UP
) {
8960 ret
= read_8051_config(dd
, LINK_QUALITY_INFO
, GENERAL_CONFIG
,
8963 *link_quality
= (frame
>> LINK_QUALITY_SHIFT
)
8964 & LINK_QUALITY_MASK
;
8968 static void read_planned_down_reason_code(struct hfi1_devdata
*dd
, u8
*pdrrc
)
8972 read_8051_config(dd
, LINK_QUALITY_INFO
, GENERAL_CONFIG
, &frame
);
8973 *pdrrc
= (frame
>> DOWN_REMOTE_REASON_SHIFT
) & DOWN_REMOTE_REASON_MASK
;
8976 static void read_link_down_reason(struct hfi1_devdata
*dd
, u8
*ldr
)
8980 read_8051_config(dd
, LINK_DOWN_REASON
, GENERAL_CONFIG
, &frame
);
8981 *ldr
= (frame
& 0xff);
8984 static int read_tx_settings(struct hfi1_devdata
*dd
,
8986 u8
*tx_polarity_inversion
,
8987 u8
*rx_polarity_inversion
,
8993 ret
= read_8051_config(dd
, TX_SETTINGS
, GENERAL_CONFIG
, &frame
);
8994 *enable_lane_tx
= (frame
>> ENABLE_LANE_TX_SHIFT
)
8995 & ENABLE_LANE_TX_MASK
;
8996 *tx_polarity_inversion
= (frame
>> TX_POLARITY_INVERSION_SHIFT
)
8997 & TX_POLARITY_INVERSION_MASK
;
8998 *rx_polarity_inversion
= (frame
>> RX_POLARITY_INVERSION_SHIFT
)
8999 & RX_POLARITY_INVERSION_MASK
;
9000 *max_rate
= (frame
>> MAX_RATE_SHIFT
) & MAX_RATE_MASK
;
9004 static int write_tx_settings(struct hfi1_devdata
*dd
,
9006 u8 tx_polarity_inversion
,
9007 u8 rx_polarity_inversion
,
9012 /* no need to mask, all variable sizes match field widths */
9013 frame
= enable_lane_tx
<< ENABLE_LANE_TX_SHIFT
9014 | tx_polarity_inversion
<< TX_POLARITY_INVERSION_SHIFT
9015 | rx_polarity_inversion
<< RX_POLARITY_INVERSION_SHIFT
9016 | max_rate
<< MAX_RATE_SHIFT
;
9017 return load_8051_config(dd
, TX_SETTINGS
, GENERAL_CONFIG
, frame
);
9021 * Read an idle LCB message.
9023 * Returns 0 on success, -EINVAL on error
9025 static int read_idle_message(struct hfi1_devdata
*dd
, u64 type
, u64
*data_out
)
9029 ret
= do_8051_command(dd
, HCMD_READ_LCB_IDLE_MSG
, type
, data_out
);
9030 if (ret
!= HCMD_SUCCESS
) {
9031 dd_dev_err(dd
, "read idle message: type %d, err %d\n",
9035 dd_dev_info(dd
, "%s: read idle message 0x%llx\n", __func__
, *data_out
);
9036 /* return only the payload as we already know the type */
9037 *data_out
>>= IDLE_PAYLOAD_SHIFT
;
9042 * Read an idle SMA message. To be done in response to a notification from
9045 * Returns 0 on success, -EINVAL on error
9047 static int read_idle_sma(struct hfi1_devdata
*dd
, u64
*data
)
9049 return read_idle_message(dd
, (u64
)IDLE_SMA
<< IDLE_MSG_TYPE_SHIFT
,
9054 * Send an idle LCB message.
9056 * Returns 0 on success, -EINVAL on error
9058 static int send_idle_message(struct hfi1_devdata
*dd
, u64 data
)
9062 dd_dev_info(dd
, "%s: sending idle message 0x%llx\n", __func__
, data
);
9063 ret
= do_8051_command(dd
, HCMD_SEND_LCB_IDLE_MSG
, data
, NULL
);
9064 if (ret
!= HCMD_SUCCESS
) {
9065 dd_dev_err(dd
, "send idle message: data 0x%llx, err %d\n",
9073 * Send an idle SMA message.
9075 * Returns 0 on success, -EINVAL on error
9077 int send_idle_sma(struct hfi1_devdata
*dd
, u64 message
)
9081 data
= ((message
& IDLE_PAYLOAD_MASK
) << IDLE_PAYLOAD_SHIFT
) |
9082 ((u64
)IDLE_SMA
<< IDLE_MSG_TYPE_SHIFT
);
9083 return send_idle_message(dd
, data
);
9087 * Initialize the LCB then do a quick link up. This may or may not be
9090 * return 0 on success, -errno on error
9092 static int do_quick_linkup(struct hfi1_devdata
*dd
)
9096 lcb_shutdown(dd
, 0);
9099 /* LCB_CFG_LOOPBACK.VAL = 2 */
9100 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
9101 write_csr(dd
, DC_LCB_CFG_LOOPBACK
,
9102 IB_PACKET_TYPE
<< DC_LCB_CFG_LOOPBACK_VAL_SHIFT
);
9103 write_csr(dd
, DC_LCB_CFG_LANE_WIDTH
, 0);
9106 /* start the LCBs */
9107 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
9108 write_csr(dd
, DC_LCB_CFG_TX_FIFOS_RESET
, 0);
9110 /* simulator only loopback steps */
9111 if (loopback
&& dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
) {
9112 /* LCB_CFG_RUN.EN = 1 */
9113 write_csr(dd
, DC_LCB_CFG_RUN
,
9114 1ull << DC_LCB_CFG_RUN_EN_SHIFT
);
9116 ret
= wait_link_transfer_active(dd
, 10);
9120 write_csr(dd
, DC_LCB_CFG_ALLOW_LINK_UP
,
9121 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT
);
9126 * When doing quick linkup and not in loopback, both
9127 * sides must be done with LCB set-up before either
9128 * starts the quick linkup. Put a delay here so that
9129 * both sides can be started and have a chance to be
9130 * done with LCB set up before resuming.
9133 "Pausing for peer to be finished with LCB set up\n");
9135 dd_dev_err(dd
, "Continuing with quick linkup\n");
9138 write_csr(dd
, DC_LCB_ERR_EN
, 0); /* mask LCB errors */
9139 set_8051_lcb_access(dd
);
9142 * State "quick" LinkUp request sets the physical link state to
9143 * LinkUp without a verify capability sequence.
9144 * This state is in simulator v37 and later.
9146 ret
= set_physical_link_state(dd
, PLS_QUICK_LINKUP
);
9147 if (ret
!= HCMD_SUCCESS
) {
9149 "%s: set physical link state to quick LinkUp failed with return %d\n",
9152 set_host_lcb_access(dd
);
9153 write_csr(dd
, DC_LCB_ERR_EN
, ~0ull); /* watch LCB errors */
9160 return 0; /* success */
9164 * Set the SerDes to internal loopback mode.
9165 * Returns 0 on success, -errno on error.
9167 static int set_serdes_loopback_mode(struct hfi1_devdata
*dd
)
9171 ret
= set_physical_link_state(dd
, PLS_INTERNAL_SERDES_LOOPBACK
);
9172 if (ret
== HCMD_SUCCESS
)
9175 "Set physical link state to SerDes Loopback failed with return %d\n",
9183 * Do all special steps to set up loopback.
9185 static int init_loopback(struct hfi1_devdata
*dd
)
9187 dd_dev_info(dd
, "Entering loopback mode\n");
9189 /* all loopbacks should disable self GUID check */
9190 write_csr(dd
, DC_DC8051_CFG_MODE
,
9191 (read_csr(dd
, DC_DC8051_CFG_MODE
) | DISABLE_SELF_GUID_CHECK
));
9194 * The simulator has only one loopback option - LCB. Switch
9195 * to that option, which includes quick link up.
9197 * Accept all valid loopback values.
9199 if ((dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
) &&
9200 (loopback
== LOOPBACK_SERDES
|| loopback
== LOOPBACK_LCB
||
9201 loopback
== LOOPBACK_CABLE
)) {
9202 loopback
= LOOPBACK_LCB
;
9207 /* handle serdes loopback */
9208 if (loopback
== LOOPBACK_SERDES
) {
9209 /* internal serdes loopack needs quick linkup on RTL */
9210 if (dd
->icode
== ICODE_RTL_SILICON
)
9212 return set_serdes_loopback_mode(dd
);
9215 /* LCB loopback - handled at poll time */
9216 if (loopback
== LOOPBACK_LCB
) {
9217 quick_linkup
= 1; /* LCB is always quick linkup */
9219 /* not supported in emulation due to emulation RTL changes */
9220 if (dd
->icode
== ICODE_FPGA_EMULATION
) {
9222 "LCB loopback not supported in emulation\n");
9228 /* external cable loopback requires no extra steps */
9229 if (loopback
== LOOPBACK_CABLE
)
9232 dd_dev_err(dd
, "Invalid loopback mode %d\n", loopback
);
9237 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9238 * used in the Verify Capability link width attribute.
9240 static u16
opa_to_vc_link_widths(u16 opa_widths
)
9245 static const struct link_bits
{
9248 } opa_link_xlate
[] = {
9249 { OPA_LINK_WIDTH_1X
, 1 << (1 - 1) },
9250 { OPA_LINK_WIDTH_2X
, 1 << (2 - 1) },
9251 { OPA_LINK_WIDTH_3X
, 1 << (3 - 1) },
9252 { OPA_LINK_WIDTH_4X
, 1 << (4 - 1) },
9255 for (i
= 0; i
< ARRAY_SIZE(opa_link_xlate
); i
++) {
9256 if (opa_widths
& opa_link_xlate
[i
].from
)
9257 result
|= opa_link_xlate
[i
].to
;
9263 * Set link attributes before moving to polling.
9265 static int set_local_link_attributes(struct hfi1_pportdata
*ppd
)
9267 struct hfi1_devdata
*dd
= ppd
->dd
;
9269 u8 tx_polarity_inversion
;
9270 u8 rx_polarity_inversion
;
9273 /* reset our fabric serdes to clear any lingering problems */
9274 fabric_serdes_reset(dd
);
9276 /* set the local tx rate - need to read-modify-write */
9277 ret
= read_tx_settings(dd
, &enable_lane_tx
, &tx_polarity_inversion
,
9278 &rx_polarity_inversion
, &ppd
->local_tx_rate
);
9280 goto set_local_link_attributes_fail
;
9282 if (dd
->dc8051_ver
< dc8051_ver(0, 20, 0)) {
9283 /* set the tx rate to the fastest enabled */
9284 if (ppd
->link_speed_enabled
& OPA_LINK_SPEED_25G
)
9285 ppd
->local_tx_rate
= 1;
9287 ppd
->local_tx_rate
= 0;
9289 /* set the tx rate to all enabled */
9290 ppd
->local_tx_rate
= 0;
9291 if (ppd
->link_speed_enabled
& OPA_LINK_SPEED_25G
)
9292 ppd
->local_tx_rate
|= 2;
9293 if (ppd
->link_speed_enabled
& OPA_LINK_SPEED_12_5G
)
9294 ppd
->local_tx_rate
|= 1;
9297 enable_lane_tx
= 0xF; /* enable all four lanes */
9298 ret
= write_tx_settings(dd
, enable_lane_tx
, tx_polarity_inversion
,
9299 rx_polarity_inversion
, ppd
->local_tx_rate
);
9300 if (ret
!= HCMD_SUCCESS
)
9301 goto set_local_link_attributes_fail
;
9304 * DC supports continuous updates.
9306 ret
= write_vc_local_phy(dd
,
9307 0 /* no power management */,
9308 1 /* continuous updates */);
9309 if (ret
!= HCMD_SUCCESS
)
9310 goto set_local_link_attributes_fail
;
9312 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9313 ret
= write_vc_local_fabric(dd
, dd
->vau
, 1, dd
->vcu
, dd
->vl15_init
,
9314 ppd
->port_crc_mode_enabled
);
9315 if (ret
!= HCMD_SUCCESS
)
9316 goto set_local_link_attributes_fail
;
9318 ret
= write_vc_local_link_width(dd
, 0, 0,
9319 opa_to_vc_link_widths(
9320 ppd
->link_width_enabled
));
9321 if (ret
!= HCMD_SUCCESS
)
9322 goto set_local_link_attributes_fail
;
9324 /* let peer know who we are */
9325 ret
= write_local_device_id(dd
, dd
->pcidev
->device
, dd
->minrev
);
9326 if (ret
== HCMD_SUCCESS
)
9329 set_local_link_attributes_fail
:
9331 "Failed to set local link attributes, return 0x%x\n",
9337 * Call this to start the link.
9338 * Do not do anything if the link is disabled.
9339 * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
9341 int start_link(struct hfi1_pportdata
*ppd
)
9344 * Tune the SerDes to a ballpark setting for optimal signal and bit
9345 * error rate. Needs to be done before starting the link.
9349 if (!ppd
->driver_link_ready
) {
9350 dd_dev_info(ppd
->dd
,
9351 "%s: stopping link start because driver is not ready\n",
9357 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9358 * pkey table can be configured properly if the HFI unit is connected
9359 * to switch port with MgmtAllowed=NO
9361 clear_full_mgmt_pkey(ppd
);
9363 return set_link_state(ppd
, HLS_DN_POLL
);
9366 static void wait_for_qsfp_init(struct hfi1_pportdata
*ppd
)
9368 struct hfi1_devdata
*dd
= ppd
->dd
;
9370 unsigned long timeout
;
9373 * Some QSFP cables have a quirk that asserts the IntN line as a side
9374 * effect of power up on plug-in. We ignore this false positive
9375 * interrupt until the module has finished powering up by waiting for
9376 * a minimum timeout of the module inrush initialization time of
9377 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9378 * module have stabilized.
9383 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
9385 timeout
= jiffies
+ msecs_to_jiffies(2000);
9387 mask
= read_csr(dd
, dd
->hfi1_id
?
9388 ASIC_QSFP2_IN
: ASIC_QSFP1_IN
);
9389 if (!(mask
& QSFP_HFI0_INT_N
))
9391 if (time_after(jiffies
, timeout
)) {
9392 dd_dev_info(dd
, "%s: No IntN detected, reset complete\n",
9400 static void set_qsfp_int_n(struct hfi1_pportdata
*ppd
, u8 enable
)
9402 struct hfi1_devdata
*dd
= ppd
->dd
;
9405 mask
= read_csr(dd
, dd
->hfi1_id
? ASIC_QSFP2_MASK
: ASIC_QSFP1_MASK
);
9408 * Clear the status register to avoid an immediate interrupt
9409 * when we re-enable the IntN pin
9411 write_csr(dd
, dd
->hfi1_id
? ASIC_QSFP2_CLEAR
: ASIC_QSFP1_CLEAR
,
9413 mask
|= (u64
)QSFP_HFI0_INT_N
;
9415 mask
&= ~(u64
)QSFP_HFI0_INT_N
;
9417 write_csr(dd
, dd
->hfi1_id
? ASIC_QSFP2_MASK
: ASIC_QSFP1_MASK
, mask
);
9420 int reset_qsfp(struct hfi1_pportdata
*ppd
)
9422 struct hfi1_devdata
*dd
= ppd
->dd
;
9423 u64 mask
, qsfp_mask
;
9425 /* Disable INT_N from triggering QSFP interrupts */
9426 set_qsfp_int_n(ppd
, 0);
9428 /* Reset the QSFP */
9429 mask
= (u64
)QSFP_HFI0_RESET_N
;
9431 qsfp_mask
= read_csr(dd
,
9432 dd
->hfi1_id
? ASIC_QSFP2_OUT
: ASIC_QSFP1_OUT
);
9435 dd
->hfi1_id
? ASIC_QSFP2_OUT
: ASIC_QSFP1_OUT
, qsfp_mask
);
9441 dd
->hfi1_id
? ASIC_QSFP2_OUT
: ASIC_QSFP1_OUT
, qsfp_mask
);
9443 wait_for_qsfp_init(ppd
);
9446 * Allow INT_N to trigger the QSFP interrupt to watch
9447 * for alarms and warnings
9449 set_qsfp_int_n(ppd
, 1);
9452 * After the reset, AOC transmitters are enabled by default. They need
9453 * to be turned off to complete the QSFP setup before they can be
9456 return set_qsfp_tx(ppd
, 0);
9459 static int handle_qsfp_error_conditions(struct hfi1_pportdata
*ppd
,
9460 u8
*qsfp_interrupt_status
)
9462 struct hfi1_devdata
*dd
= ppd
->dd
;
9464 if ((qsfp_interrupt_status
[0] & QSFP_HIGH_TEMP_ALARM
) ||
9465 (qsfp_interrupt_status
[0] & QSFP_HIGH_TEMP_WARNING
))
9466 dd_dev_err(dd
, "%s: QSFP cable temperature too high\n",
9469 if ((qsfp_interrupt_status
[0] & QSFP_LOW_TEMP_ALARM
) ||
9470 (qsfp_interrupt_status
[0] & QSFP_LOW_TEMP_WARNING
))
9471 dd_dev_err(dd
, "%s: QSFP cable temperature too low\n",
9475 * The remaining alarms/warnings don't matter if the link is down.
9477 if (ppd
->host_link_state
& HLS_DOWN
)
9480 if ((qsfp_interrupt_status
[1] & QSFP_HIGH_VCC_ALARM
) ||
9481 (qsfp_interrupt_status
[1] & QSFP_HIGH_VCC_WARNING
))
9482 dd_dev_err(dd
, "%s: QSFP supply voltage too high\n",
9485 if ((qsfp_interrupt_status
[1] & QSFP_LOW_VCC_ALARM
) ||
9486 (qsfp_interrupt_status
[1] & QSFP_LOW_VCC_WARNING
))
9487 dd_dev_err(dd
, "%s: QSFP supply voltage too low\n",
9490 /* Byte 2 is vendor specific */
9492 if ((qsfp_interrupt_status
[3] & QSFP_HIGH_POWER_ALARM
) ||
9493 (qsfp_interrupt_status
[3] & QSFP_HIGH_POWER_WARNING
))
9494 dd_dev_err(dd
, "%s: Cable RX channel 1/2 power too high\n",
9497 if ((qsfp_interrupt_status
[3] & QSFP_LOW_POWER_ALARM
) ||
9498 (qsfp_interrupt_status
[3] & QSFP_LOW_POWER_WARNING
))
9499 dd_dev_err(dd
, "%s: Cable RX channel 1/2 power too low\n",
9502 if ((qsfp_interrupt_status
[4] & QSFP_HIGH_POWER_ALARM
) ||
9503 (qsfp_interrupt_status
[4] & QSFP_HIGH_POWER_WARNING
))
9504 dd_dev_err(dd
, "%s: Cable RX channel 3/4 power too high\n",
9507 if ((qsfp_interrupt_status
[4] & QSFP_LOW_POWER_ALARM
) ||
9508 (qsfp_interrupt_status
[4] & QSFP_LOW_POWER_WARNING
))
9509 dd_dev_err(dd
, "%s: Cable RX channel 3/4 power too low\n",
9512 if ((qsfp_interrupt_status
[5] & QSFP_HIGH_BIAS_ALARM
) ||
9513 (qsfp_interrupt_status
[5] & QSFP_HIGH_BIAS_WARNING
))
9514 dd_dev_err(dd
, "%s: Cable TX channel 1/2 bias too high\n",
9517 if ((qsfp_interrupt_status
[5] & QSFP_LOW_BIAS_ALARM
) ||
9518 (qsfp_interrupt_status
[5] & QSFP_LOW_BIAS_WARNING
))
9519 dd_dev_err(dd
, "%s: Cable TX channel 1/2 bias too low\n",
9522 if ((qsfp_interrupt_status
[6] & QSFP_HIGH_BIAS_ALARM
) ||
9523 (qsfp_interrupt_status
[6] & QSFP_HIGH_BIAS_WARNING
))
9524 dd_dev_err(dd
, "%s: Cable TX channel 3/4 bias too high\n",
9527 if ((qsfp_interrupt_status
[6] & QSFP_LOW_BIAS_ALARM
) ||
9528 (qsfp_interrupt_status
[6] & QSFP_LOW_BIAS_WARNING
))
9529 dd_dev_err(dd
, "%s: Cable TX channel 3/4 bias too low\n",
9532 if ((qsfp_interrupt_status
[7] & QSFP_HIGH_POWER_ALARM
) ||
9533 (qsfp_interrupt_status
[7] & QSFP_HIGH_POWER_WARNING
))
9534 dd_dev_err(dd
, "%s: Cable TX channel 1/2 power too high\n",
9537 if ((qsfp_interrupt_status
[7] & QSFP_LOW_POWER_ALARM
) ||
9538 (qsfp_interrupt_status
[7] & QSFP_LOW_POWER_WARNING
))
9539 dd_dev_err(dd
, "%s: Cable TX channel 1/2 power too low\n",
9542 if ((qsfp_interrupt_status
[8] & QSFP_HIGH_POWER_ALARM
) ||
9543 (qsfp_interrupt_status
[8] & QSFP_HIGH_POWER_WARNING
))
9544 dd_dev_err(dd
, "%s: Cable TX channel 3/4 power too high\n",
9547 if ((qsfp_interrupt_status
[8] & QSFP_LOW_POWER_ALARM
) ||
9548 (qsfp_interrupt_status
[8] & QSFP_LOW_POWER_WARNING
))
9549 dd_dev_err(dd
, "%s: Cable TX channel 3/4 power too low\n",
9552 /* Bytes 9-10 and 11-12 are reserved */
9553 /* Bytes 13-15 are vendor specific */
9558 /* This routine will only be scheduled if the QSFP module present is asserted */
9559 void qsfp_event(struct work_struct
*work
)
9561 struct qsfp_data
*qd
;
9562 struct hfi1_pportdata
*ppd
;
9563 struct hfi1_devdata
*dd
;
9565 qd
= container_of(work
, struct qsfp_data
, qsfp_work
);
9570 if (!qsfp_mod_present(ppd
))
9573 if (ppd
->host_link_state
== HLS_DN_DISABLE
) {
9574 dd_dev_info(ppd
->dd
,
9575 "%s: stopping link start because link is disabled\n",
9581 * Turn DC back on after cable has been re-inserted. Up until
9582 * now, the DC has been in reset to save power.
9586 if (qd
->cache_refresh_required
) {
9587 set_qsfp_int_n(ppd
, 0);
9589 wait_for_qsfp_init(ppd
);
9592 * Allow INT_N to trigger the QSFP interrupt to watch
9593 * for alarms and warnings
9595 set_qsfp_int_n(ppd
, 1);
9600 if (qd
->check_interrupt_flags
) {
9601 u8 qsfp_interrupt_status
[16] = {0,};
9603 if (one_qsfp_read(ppd
, dd
->hfi1_id
, 6,
9604 &qsfp_interrupt_status
[0], 16) != 16) {
9606 "%s: Failed to read status of QSFP module\n",
9609 unsigned long flags
;
9611 handle_qsfp_error_conditions(
9612 ppd
, qsfp_interrupt_status
);
9613 spin_lock_irqsave(&ppd
->qsfp_info
.qsfp_lock
, flags
);
9614 ppd
->qsfp_info
.check_interrupt_flags
= 0;
9615 spin_unlock_irqrestore(&ppd
->qsfp_info
.qsfp_lock
,
9621 static void init_qsfp_int(struct hfi1_devdata
*dd
)
9623 struct hfi1_pportdata
*ppd
= dd
->pport
;
9624 u64 qsfp_mask
, cce_int_mask
;
9625 const int qsfp1_int_smask
= QSFP1_INT
% 64;
9626 const int qsfp2_int_smask
= QSFP2_INT
% 64;
9629 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9630 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9631 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9632 * the index of the appropriate CSR in the CCEIntMask CSR array
9634 cce_int_mask
= read_csr(dd
, CCE_INT_MASK
+
9635 (8 * (QSFP1_INT
/ 64)));
9637 cce_int_mask
&= ~((u64
)1 << qsfp1_int_smask
);
9638 write_csr(dd
, CCE_INT_MASK
+ (8 * (QSFP1_INT
/ 64)),
9641 cce_int_mask
&= ~((u64
)1 << qsfp2_int_smask
);
9642 write_csr(dd
, CCE_INT_MASK
+ (8 * (QSFP2_INT
/ 64)),
9646 qsfp_mask
= (u64
)(QSFP_HFI0_INT_N
| QSFP_HFI0_MODPRST_N
);
9647 /* Clear current status to avoid spurious interrupts */
9648 write_csr(dd
, dd
->hfi1_id
? ASIC_QSFP2_CLEAR
: ASIC_QSFP1_CLEAR
,
9650 write_csr(dd
, dd
->hfi1_id
? ASIC_QSFP2_MASK
: ASIC_QSFP1_MASK
,
9653 set_qsfp_int_n(ppd
, 0);
9655 /* Handle active low nature of INT_N and MODPRST_N pins */
9656 if (qsfp_mod_present(ppd
))
9657 qsfp_mask
&= ~(u64
)QSFP_HFI0_MODPRST_N
;
9659 dd
->hfi1_id
? ASIC_QSFP2_INVERT
: ASIC_QSFP1_INVERT
,
9664 * Do a one-time initialize of the LCB block.
9666 static void init_lcb(struct hfi1_devdata
*dd
)
9668 /* simulator does not correctly handle LCB cclk loopback, skip */
9669 if (dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
)
9672 /* the DC has been reset earlier in the driver load */
9674 /* set LCB for cclk loopback on the port */
9675 write_csr(dd
, DC_LCB_CFG_TX_FIFOS_RESET
, 0x01);
9676 write_csr(dd
, DC_LCB_CFG_LANE_WIDTH
, 0x00);
9677 write_csr(dd
, DC_LCB_CFG_REINIT_AS_SLAVE
, 0x00);
9678 write_csr(dd
, DC_LCB_CFG_CNT_FOR_SKIP_STALL
, 0x110);
9679 write_csr(dd
, DC_LCB_CFG_CLK_CNTR
, 0x08);
9680 write_csr(dd
, DC_LCB_CFG_LOOPBACK
, 0x02);
9681 write_csr(dd
, DC_LCB_CFG_TX_FIFOS_RESET
, 0x00);
9685 * Perform a test read on the QSFP. Return 0 on success, -ERRNO
9688 static int test_qsfp_read(struct hfi1_pportdata
*ppd
)
9694 * Report success if not a QSFP or, if it is a QSFP, but the cable is
9697 if (ppd
->port_type
!= PORT_TYPE_QSFP
|| !qsfp_mod_present(ppd
))
9700 /* read byte 2, the status byte */
9701 ret
= one_qsfp_read(ppd
, ppd
->dd
->hfi1_id
, 2, &status
, 1);
9707 return 0; /* success */
9711 * Values for QSFP retry.
9713 * Give up after 10s (20 x 500ms). The overall timeout was empirically
9714 * arrived at from experience on a large cluster.
9716 #define MAX_QSFP_RETRIES 20
9717 #define QSFP_RETRY_WAIT 500 /* msec */
9720 * Try a QSFP read. If it fails, schedule a retry for later.
9721 * Called on first link activation after driver load.
9723 static void try_start_link(struct hfi1_pportdata
*ppd
)
9725 if (test_qsfp_read(ppd
)) {
9727 if (ppd
->qsfp_retry_count
>= MAX_QSFP_RETRIES
) {
9728 dd_dev_err(ppd
->dd
, "QSFP not responding, giving up\n");
9731 dd_dev_info(ppd
->dd
,
9732 "QSFP not responding, waiting and retrying %d\n",
9733 (int)ppd
->qsfp_retry_count
);
9734 ppd
->qsfp_retry_count
++;
9735 queue_delayed_work(ppd
->link_wq
, &ppd
->start_link_work
,
9736 msecs_to_jiffies(QSFP_RETRY_WAIT
));
9739 ppd
->qsfp_retry_count
= 0;
9745 * Workqueue function to start the link after a delay.
9747 void handle_start_link(struct work_struct
*work
)
9749 struct hfi1_pportdata
*ppd
= container_of(work
, struct hfi1_pportdata
,
9750 start_link_work
.work
);
9751 try_start_link(ppd
);
9754 int bringup_serdes(struct hfi1_pportdata
*ppd
)
9756 struct hfi1_devdata
*dd
= ppd
->dd
;
9760 if (HFI1_CAP_IS_KSET(EXTENDED_PSN
))
9761 add_rcvctrl(dd
, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK
);
9763 guid
= ppd
->guids
[HFI1_PORT_GUID_INDEX
];
9766 guid
= dd
->base_guid
+ ppd
->port
- 1;
9767 ppd
->guids
[HFI1_PORT_GUID_INDEX
] = guid
;
9770 /* Set linkinit_reason on power up per OPA spec */
9771 ppd
->linkinit_reason
= OPA_LINKINIT_REASON_LINKUP
;
9773 /* one-time init of the LCB */
9777 ret
= init_loopback(dd
);
9783 if (ppd
->port_type
== PORT_TYPE_QSFP
) {
9784 set_qsfp_int_n(ppd
, 0);
9785 wait_for_qsfp_init(ppd
);
9786 set_qsfp_int_n(ppd
, 1);
9789 try_start_link(ppd
);
9793 void hfi1_quiet_serdes(struct hfi1_pportdata
*ppd
)
9795 struct hfi1_devdata
*dd
= ppd
->dd
;
9798 * Shut down the link and keep it down. First turn off that the
9799 * driver wants to allow the link to be up (driver_link_ready).
9800 * Then make sure the link is not automatically restarted
9801 * (link_enabled). Cancel any pending restart. And finally
9804 ppd
->driver_link_ready
= 0;
9805 ppd
->link_enabled
= 0;
9807 ppd
->qsfp_retry_count
= MAX_QSFP_RETRIES
; /* prevent more retries */
9808 flush_delayed_work(&ppd
->start_link_work
);
9809 cancel_delayed_work_sync(&ppd
->start_link_work
);
9811 ppd
->offline_disabled_reason
=
9812 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED
);
9813 set_link_down_reason(ppd
, OPA_LINKDOWN_REASON_SMA_DISABLED
, 0,
9814 OPA_LINKDOWN_REASON_SMA_DISABLED
);
9815 set_link_state(ppd
, HLS_DN_OFFLINE
);
9817 /* disable the port */
9818 clear_rcvctrl(dd
, RCV_CTRL_RCV_PORT_ENABLE_SMASK
);
9821 static inline int init_cpu_counters(struct hfi1_devdata
*dd
)
9823 struct hfi1_pportdata
*ppd
;
9826 ppd
= (struct hfi1_pportdata
*)(dd
+ 1);
9827 for (i
= 0; i
< dd
->num_pports
; i
++, ppd
++) {
9828 ppd
->ibport_data
.rvp
.rc_acks
= NULL
;
9829 ppd
->ibport_data
.rvp
.rc_qacks
= NULL
;
9830 ppd
->ibport_data
.rvp
.rc_acks
= alloc_percpu(u64
);
9831 ppd
->ibport_data
.rvp
.rc_qacks
= alloc_percpu(u64
);
9832 ppd
->ibport_data
.rvp
.rc_delayed_comp
= alloc_percpu(u64
);
9833 if (!ppd
->ibport_data
.rvp
.rc_acks
||
9834 !ppd
->ibport_data
.rvp
.rc_delayed_comp
||
9835 !ppd
->ibport_data
.rvp
.rc_qacks
)
9843 * index is the index into the receive array
9845 void hfi1_put_tid(struct hfi1_devdata
*dd
, u32 index
,
9846 u32 type
, unsigned long pa
, u16 order
)
9850 if (!(dd
->flags
& HFI1_PRESENT
))
9853 if (type
== PT_INVALID
|| type
== PT_INVALID_FLUSH
) {
9856 } else if (type
> PT_INVALID
) {
9858 "unexpected receive array type %u for index %u, not handled\n",
9862 trace_hfi1_put_tid(dd
, index
, type
, pa
, order
);
9864 #define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9865 reg
= RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9866 | (u64
)order
<< RCV_ARRAY_RT_BUF_SIZE_SHIFT
9867 | ((pa
>> RT_ADDR_SHIFT
) & RCV_ARRAY_RT_ADDR_MASK
)
9868 << RCV_ARRAY_RT_ADDR_SHIFT
;
9869 trace_hfi1_write_rcvarray(dd
->rcvarray_wc
+ (index
* 8), reg
);
9870 writeq(reg
, dd
->rcvarray_wc
+ (index
* 8));
9872 if (type
== PT_EAGER
|| type
== PT_INVALID_FLUSH
|| (index
& 3) == 3)
9874 * Eager entries are written and flushed
9876 * Expected entries are flushed every 4 writes
9883 void hfi1_clear_tids(struct hfi1_ctxtdata
*rcd
)
9885 struct hfi1_devdata
*dd
= rcd
->dd
;
9888 /* this could be optimized */
9889 for (i
= rcd
->eager_base
; i
< rcd
->eager_base
+
9890 rcd
->egrbufs
.alloced
; i
++)
9891 hfi1_put_tid(dd
, i
, PT_INVALID
, 0, 0);
9893 for (i
= rcd
->expected_base
;
9894 i
< rcd
->expected_base
+ rcd
->expected_count
; i
++)
9895 hfi1_put_tid(dd
, i
, PT_INVALID
, 0, 0);
9898 static const char * const ib_cfg_name_strings
[] = {
9899 "HFI1_IB_CFG_LIDLMC",
9900 "HFI1_IB_CFG_LWID_DG_ENB",
9901 "HFI1_IB_CFG_LWID_ENB",
9903 "HFI1_IB_CFG_SPD_ENB",
9905 "HFI1_IB_CFG_RXPOL_ENB",
9906 "HFI1_IB_CFG_LREV_ENB",
9907 "HFI1_IB_CFG_LINKLATENCY",
9908 "HFI1_IB_CFG_HRTBT",
9909 "HFI1_IB_CFG_OP_VLS",
9910 "HFI1_IB_CFG_VL_HIGH_CAP",
9911 "HFI1_IB_CFG_VL_LOW_CAP",
9912 "HFI1_IB_CFG_OVERRUN_THRESH",
9913 "HFI1_IB_CFG_PHYERR_THRESH",
9914 "HFI1_IB_CFG_LINKDEFAULT",
9915 "HFI1_IB_CFG_PKEYS",
9917 "HFI1_IB_CFG_LSTATE",
9918 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9919 "HFI1_IB_CFG_PMA_TICKS",
9923 static const char *ib_cfg_name(int which
)
9925 if (which
< 0 || which
>= ARRAY_SIZE(ib_cfg_name_strings
))
9927 return ib_cfg_name_strings
[which
];
9930 int hfi1_get_ib_cfg(struct hfi1_pportdata
*ppd
, int which
)
9932 struct hfi1_devdata
*dd
= ppd
->dd
;
9936 case HFI1_IB_CFG_LWID_ENB
: /* allowed Link-width */
9937 val
= ppd
->link_width_enabled
;
9939 case HFI1_IB_CFG_LWID
: /* currently active Link-width */
9940 val
= ppd
->link_width_active
;
9942 case HFI1_IB_CFG_SPD_ENB
: /* allowed Link speeds */
9943 val
= ppd
->link_speed_enabled
;
9945 case HFI1_IB_CFG_SPD
: /* current Link speed */
9946 val
= ppd
->link_speed_active
;
9949 case HFI1_IB_CFG_RXPOL_ENB
: /* Auto-RX-polarity enable */
9950 case HFI1_IB_CFG_LREV_ENB
: /* Auto-Lane-reversal enable */
9951 case HFI1_IB_CFG_LINKLATENCY
:
9954 case HFI1_IB_CFG_OP_VLS
:
9955 val
= ppd
->vls_operational
;
9957 case HFI1_IB_CFG_VL_HIGH_CAP
: /* VL arb high priority table size */
9958 val
= VL_ARB_HIGH_PRIO_TABLE_SIZE
;
9960 case HFI1_IB_CFG_VL_LOW_CAP
: /* VL arb low priority table size */
9961 val
= VL_ARB_LOW_PRIO_TABLE_SIZE
;
9963 case HFI1_IB_CFG_OVERRUN_THRESH
: /* IB overrun threshold */
9964 val
= ppd
->overrun_threshold
;
9966 case HFI1_IB_CFG_PHYERR_THRESH
: /* IB PHY error threshold */
9967 val
= ppd
->phy_error_threshold
;
9969 case HFI1_IB_CFG_LINKDEFAULT
: /* IB link default (sleep/poll) */
9970 val
= dd
->link_default
;
9973 case HFI1_IB_CFG_HRTBT
: /* Heartbeat off/enable/auto */
9974 case HFI1_IB_CFG_PMA_TICKS
:
9977 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL
))
9980 "%s: which %s: not implemented\n",
9982 ib_cfg_name(which
));
9990 * The largest MAD packet size.
9992 #define MAX_MAD_PACKET 2048
9995 * Return the maximum header bytes that can go on the _wire_
9996 * for this device. This count includes the ICRC which is
9997 * not part of the packet held in memory but it is appended
9999 * This is dependent on the device's receive header entry size.
10000 * HFI allows this to be set per-receive context, but the
10001 * driver presently enforces a global value.
10003 u32
lrh_max_header_bytes(struct hfi1_devdata
*dd
)
10006 * The maximum non-payload (MTU) bytes in LRH.PktLen are
10007 * the Receive Header Entry Size minus the PBC (or RHF) size
10008 * plus one DW for the ICRC appended by HW.
10010 * dd->rcd[0].rcvhdrqentsize is in DW.
10011 * We use rcd[0] as all context will have the same value. Also,
10012 * the first kernel context would have been allocated by now so
10013 * we are guaranteed a valid value.
10015 return (dd
->rcd
[0]->rcvhdrqentsize
- 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
10020 * @ppd - per port data
10022 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
10023 * registers compare against LRH.PktLen, so use the max bytes included
10026 * This routine changes all VL values except VL15, which it maintains at
10029 static void set_send_length(struct hfi1_pportdata
*ppd
)
10031 struct hfi1_devdata
*dd
= ppd
->dd
;
10032 u32 max_hb
= lrh_max_header_bytes(dd
), dcmtu
;
10033 u32 maxvlmtu
= dd
->vld
[15].mtu
;
10034 u64 len1
= 0, len2
= (((dd
->vld
[15].mtu
+ max_hb
) >> 2)
10035 & SEND_LEN_CHECK1_LEN_VL15_MASK
) <<
10036 SEND_LEN_CHECK1_LEN_VL15_SHIFT
;
10040 for (i
= 0; i
< ppd
->vls_supported
; i
++) {
10041 if (dd
->vld
[i
].mtu
> maxvlmtu
)
10042 maxvlmtu
= dd
->vld
[i
].mtu
;
10044 len1
|= (((dd
->vld
[i
].mtu
+ max_hb
) >> 2)
10045 & SEND_LEN_CHECK0_LEN_VL0_MASK
) <<
10046 ((i
% 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT
);
10048 len2
|= (((dd
->vld
[i
].mtu
+ max_hb
) >> 2)
10049 & SEND_LEN_CHECK1_LEN_VL4_MASK
) <<
10050 ((i
% 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT
);
10052 write_csr(dd
, SEND_LEN_CHECK0
, len1
);
10053 write_csr(dd
, SEND_LEN_CHECK1
, len2
);
10054 /* adjust kernel credit return thresholds based on new MTUs */
10055 /* all kernel receive contexts have the same hdrqentsize */
10056 for (i
= 0; i
< ppd
->vls_supported
; i
++) {
10057 thres
= min(sc_percent_to_threshold(dd
->vld
[i
].sc
, 50),
10058 sc_mtu_to_threshold(dd
->vld
[i
].sc
,
10060 dd
->rcd
[0]->rcvhdrqentsize
));
10061 for (j
= 0; j
< INIT_SC_PER_VL
; j
++)
10062 sc_set_cr_threshold(
10063 pio_select_send_context_vl(dd
, j
, i
),
10066 thres
= min(sc_percent_to_threshold(dd
->vld
[15].sc
, 50),
10067 sc_mtu_to_threshold(dd
->vld
[15].sc
,
10069 dd
->rcd
[0]->rcvhdrqentsize
));
10070 sc_set_cr_threshold(dd
->vld
[15].sc
, thres
);
10072 /* Adjust maximum MTU for the port in DC */
10073 dcmtu
= maxvlmtu
== 10240 ? DCC_CFG_PORT_MTU_CAP_10240
:
10074 (ilog2(maxvlmtu
>> 8) + 1);
10075 len1
= read_csr(ppd
->dd
, DCC_CFG_PORT_CONFIG
);
10076 len1
&= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK
;
10077 len1
|= ((u64
)dcmtu
& DCC_CFG_PORT_CONFIG_MTU_CAP_MASK
) <<
10078 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT
;
10079 write_csr(ppd
->dd
, DCC_CFG_PORT_CONFIG
, len1
);
10082 static void set_lidlmc(struct hfi1_pportdata
*ppd
)
10086 struct hfi1_devdata
*dd
= ppd
->dd
;
10087 u32 mask
= ~((1U << ppd
->lmc
) - 1);
10088 u64 c1
= read_csr(ppd
->dd
, DCC_CFG_PORT_CONFIG1
);
10092 * Program 0 in CSR if port lid is extended. This prevents
10093 * 9B packets being sent out for large lids.
10095 lid
= (ppd
->lid
>= be16_to_cpu(IB_MULTICAST_LID_BASE
)) ? 0 : ppd
->lid
;
10096 c1
&= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
10097 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK
);
10098 c1
|= ((lid
& DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK
)
10099 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT
) |
10100 ((mask
& DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK
)
10101 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT
);
10102 write_csr(ppd
->dd
, DCC_CFG_PORT_CONFIG1
, c1
);
10105 * Iterate over all the send contexts and set their SLID check
10107 sreg
= ((mask
& SEND_CTXT_CHECK_SLID_MASK_MASK
) <<
10108 SEND_CTXT_CHECK_SLID_MASK_SHIFT
) |
10109 (((lid
& mask
) & SEND_CTXT_CHECK_SLID_VALUE_MASK
) <<
10110 SEND_CTXT_CHECK_SLID_VALUE_SHIFT
);
10112 for (i
= 0; i
< dd
->chip_send_contexts
; i
++) {
10113 hfi1_cdbg(LINKVERB
, "SendContext[%d].SLID_CHECK = 0x%x",
10115 write_kctxt_csr(dd
, i
, SEND_CTXT_CHECK_SLID
, sreg
);
10118 /* Now we have to do the same thing for the sdma engines */
10119 sdma_update_lmc(dd
, mask
, lid
);
10122 static const char *state_completed_string(u32 completed
)
10124 static const char * const state_completed
[] = {
10130 if (completed
< ARRAY_SIZE(state_completed
))
10131 return state_completed
[completed
];
10136 static const char all_lanes_dead_timeout_expired
[] =
10137 "All lanes were inactive – was the interconnect media removed?";
10138 static const char tx_out_of_policy
[] =
10139 "Passing lanes on local port do not meet the local link width policy";
10140 static const char no_state_complete
[] =
10141 "State timeout occurred before link partner completed the state";
10142 static const char * const state_complete_reasons
[] = {
10143 [0x00] = "Reason unknown",
10144 [0x01] = "Link was halted by driver, refer to LinkDownReason",
10145 [0x02] = "Link partner reported failure",
10146 [0x10] = "Unable to achieve frame sync on any lane",
10148 "Unable to find a common bit rate with the link partner",
10150 "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10152 "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10153 [0x14] = no_state_complete
,
10155 "State timeout occurred before link partner identified equalization presets",
10157 "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10158 [0x17] = tx_out_of_policy
,
10159 [0x20] = all_lanes_dead_timeout_expired
,
10161 "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10162 [0x22] = no_state_complete
,
10164 "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10165 [0x24] = tx_out_of_policy
,
10166 [0x30] = all_lanes_dead_timeout_expired
,
10168 "State timeout occurred waiting for host to process received frames",
10169 [0x32] = no_state_complete
,
10171 "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10172 [0x34] = tx_out_of_policy
,
10175 static const char *state_complete_reason_code_string(struct hfi1_pportdata
*ppd
,
10178 const char *str
= NULL
;
10180 if (code
< ARRAY_SIZE(state_complete_reasons
))
10181 str
= state_complete_reasons
[code
];
10188 /* describe the given last state complete frame */
10189 static void decode_state_complete(struct hfi1_pportdata
*ppd
, u32 frame
,
10190 const char *prefix
)
10192 struct hfi1_devdata
*dd
= ppd
->dd
;
10200 * [ 0: 0] - success
10202 * [ 7: 4] - next state timeout
10203 * [15: 8] - reason code
10206 success
= frame
& 0x1;
10207 state
= (frame
>> 1) & 0x7;
10208 reason
= (frame
>> 8) & 0xff;
10209 lanes
= (frame
>> 16) & 0xffff;
10211 dd_dev_err(dd
, "Last %s LNI state complete frame 0x%08x:\n",
10213 dd_dev_err(dd
, " last reported state state: %s (0x%x)\n",
10214 state_completed_string(state
), state
);
10215 dd_dev_err(dd
, " state successfully completed: %s\n",
10216 success
? "yes" : "no");
10217 dd_dev_err(dd
, " fail reason 0x%x: %s\n",
10218 reason
, state_complete_reason_code_string(ppd
, reason
));
10219 dd_dev_err(dd
, " passing lane mask: 0x%x", lanes
);
10223 * Read the last state complete frames and explain them. This routine
10224 * expects to be called if the link went down during link negotiation
10225 * and initialization (LNI). That is, anywhere between polling and link up.
10227 static void check_lni_states(struct hfi1_pportdata
*ppd
)
10229 u32 last_local_state
;
10230 u32 last_remote_state
;
10232 read_last_local_state(ppd
->dd
, &last_local_state
);
10233 read_last_remote_state(ppd
->dd
, &last_remote_state
);
10236 * Don't report anything if there is nothing to report. A value of
10237 * 0 means the link was taken down while polling and there was no
10238 * training in-process.
10240 if (last_local_state
== 0 && last_remote_state
== 0)
10243 decode_state_complete(ppd
, last_local_state
, "transmitted");
10244 decode_state_complete(ppd
, last_remote_state
, "received");
10247 /* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
10248 static int wait_link_transfer_active(struct hfi1_devdata
*dd
, int wait_ms
)
10251 unsigned long timeout
;
10253 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
10254 timeout
= jiffies
+ msecs_to_jiffies(wait_ms
);
10256 reg
= read_csr(dd
, DC_LCB_STS_LINK_TRANSFER_ACTIVE
);
10259 if (time_after(jiffies
, timeout
)) {
10261 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10269 /* called when the logical link state is not down as it should be */
10270 static void force_logical_link_state_down(struct hfi1_pportdata
*ppd
)
10272 struct hfi1_devdata
*dd
= ppd
->dd
;
10275 * Bring link up in LCB loopback
10277 write_csr(dd
, DC_LCB_CFG_TX_FIFOS_RESET
, 1);
10278 write_csr(dd
, DC_LCB_CFG_IGNORE_LOST_RCLK
,
10279 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK
);
10281 write_csr(dd
, DC_LCB_CFG_LANE_WIDTH
, 0);
10282 write_csr(dd
, DC_LCB_CFG_REINIT_AS_SLAVE
, 0);
10283 write_csr(dd
, DC_LCB_CFG_CNT_FOR_SKIP_STALL
, 0x110);
10284 write_csr(dd
, DC_LCB_CFG_LOOPBACK
, 0x2);
10286 write_csr(dd
, DC_LCB_CFG_TX_FIFOS_RESET
, 0);
10287 (void)read_csr(dd
, DC_LCB_CFG_TX_FIFOS_RESET
);
10289 write_csr(dd
, DC_LCB_CFG_ALLOW_LINK_UP
, 1);
10290 write_csr(dd
, DC_LCB_CFG_RUN
, 1ull << DC_LCB_CFG_RUN_EN_SHIFT
);
10292 wait_link_transfer_active(dd
, 100);
10295 * Bring the link down again.
10297 write_csr(dd
, DC_LCB_CFG_TX_FIFOS_RESET
, 1);
10298 write_csr(dd
, DC_LCB_CFG_ALLOW_LINK_UP
, 0);
10299 write_csr(dd
, DC_LCB_CFG_IGNORE_LOST_RCLK
, 0);
10301 /* adjust ppd->statusp, if needed */
10302 update_statusp(ppd
, IB_PORT_DOWN
);
10304 dd_dev_info(ppd
->dd
, "logical state forced to LINK_DOWN\n");
10308 * Helper for set_link_state(). Do not call except from that routine.
10309 * Expects ppd->hls_mutex to be held.
10311 * @rem_reason value to be sent to the neighbor
10313 * LinkDownReasons only set if transition succeeds.
10315 static int goto_offline(struct hfi1_pportdata
*ppd
, u8 rem_reason
)
10317 struct hfi1_devdata
*dd
= ppd
->dd
;
10318 u32 previous_state
;
10319 int offline_state_ret
;
10322 update_lcb_cache(dd
);
10324 previous_state
= ppd
->host_link_state
;
10325 ppd
->host_link_state
= HLS_GOING_OFFLINE
;
10327 /* start offline transition */
10328 ret
= set_physical_link_state(dd
, (rem_reason
<< 8) | PLS_OFFLINE
);
10330 if (ret
!= HCMD_SUCCESS
) {
10332 "Failed to transition to Offline link state, return %d\n",
10336 if (ppd
->offline_disabled_reason
==
10337 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE
))
10338 ppd
->offline_disabled_reason
=
10339 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT
);
10341 offline_state_ret
= wait_phys_link_offline_substates(ppd
, 10000);
10342 if (offline_state_ret
< 0)
10343 return offline_state_ret
;
10345 /* Disabling AOC transmitters */
10346 if (ppd
->port_type
== PORT_TYPE_QSFP
&&
10347 ppd
->qsfp_info
.limiting_active
&&
10348 qsfp_mod_present(ppd
)) {
10351 ret
= acquire_chip_resource(dd
, qsfp_resource(dd
), QSFP_WAIT
);
10353 set_qsfp_tx(ppd
, 0);
10354 release_chip_resource(dd
, qsfp_resource(dd
));
10356 /* not fatal, but should warn */
10358 "Unable to acquire lock to turn off QSFP TX\n");
10363 * Wait for the offline.Quiet transition if it hasn't happened yet. It
10364 * can take a while for the link to go down.
10366 if (offline_state_ret
!= PLS_OFFLINE_QUIET
) {
10367 ret
= wait_physical_linkstate(ppd
, PLS_OFFLINE
, 30000);
10373 * Now in charge of LCB - must be after the physical state is
10374 * offline.quiet and before host_link_state is changed.
10376 set_host_lcb_access(dd
);
10377 write_csr(dd
, DC_LCB_ERR_EN
, ~0ull); /* watch LCB errors */
10379 /* make sure the logical state is also down */
10380 ret
= wait_logical_linkstate(ppd
, IB_PORT_DOWN
, 1000);
10382 force_logical_link_state_down(ppd
);
10384 ppd
->host_link_state
= HLS_LINK_COOLDOWN
; /* LCB access allowed */
10387 * The LNI has a mandatory wait time after the physical state
10388 * moves to Offline.Quiet. The wait time may be different
10389 * depending on how the link went down. The 8051 firmware
10390 * will observe the needed wait time and only move to ready
10391 * when that is completed. The largest of the quiet timeouts
10392 * is 6s, so wait that long and then at least 0.5s more for
10393 * other transitions, and another 0.5s for a buffer.
10395 ret
= wait_fm_ready(dd
, 7000);
10398 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
10399 /* state is really offline, so make it so */
10400 ppd
->host_link_state
= HLS_DN_OFFLINE
;
10405 * The state is now offline and the 8051 is ready to accept host
10407 * - change our state
10408 * - notify others if we were previously in a linkup state
10410 ppd
->host_link_state
= HLS_DN_OFFLINE
;
10411 if (previous_state
& HLS_UP
) {
10412 /* went down while link was up */
10413 handle_linkup_change(dd
, 0);
10414 } else if (previous_state
10415 & (HLS_DN_POLL
| HLS_VERIFY_CAP
| HLS_GOING_UP
)) {
10416 /* went down while attempting link up */
10417 check_lni_states(ppd
);
10419 /* The QSFP doesn't need to be reset on LNI failure */
10420 ppd
->qsfp_info
.reset_needed
= 0;
10423 /* the active link width (downgrade) is 0 on link down */
10424 ppd
->link_width_active
= 0;
10425 ppd
->link_width_downgrade_tx_active
= 0;
10426 ppd
->link_width_downgrade_rx_active
= 0;
10427 ppd
->current_egress_rate
= 0;
10431 /* return the link state name */
10432 static const char *link_state_name(u32 state
)
10435 int n
= ilog2(state
);
10436 static const char * const names
[] = {
10437 [__HLS_UP_INIT_BP
] = "INIT",
10438 [__HLS_UP_ARMED_BP
] = "ARMED",
10439 [__HLS_UP_ACTIVE_BP
] = "ACTIVE",
10440 [__HLS_DN_DOWNDEF_BP
] = "DOWNDEF",
10441 [__HLS_DN_POLL_BP
] = "POLL",
10442 [__HLS_DN_DISABLE_BP
] = "DISABLE",
10443 [__HLS_DN_OFFLINE_BP
] = "OFFLINE",
10444 [__HLS_VERIFY_CAP_BP
] = "VERIFY_CAP",
10445 [__HLS_GOING_UP_BP
] = "GOING_UP",
10446 [__HLS_GOING_OFFLINE_BP
] = "GOING_OFFLINE",
10447 [__HLS_LINK_COOLDOWN_BP
] = "LINK_COOLDOWN"
10450 name
= n
< ARRAY_SIZE(names
) ? names
[n
] : NULL
;
10451 return name
? name
: "unknown";
10454 /* return the link state reason name */
10455 static const char *link_state_reason_name(struct hfi1_pportdata
*ppd
, u32 state
)
10457 if (state
== HLS_UP_INIT
) {
10458 switch (ppd
->linkinit_reason
) {
10459 case OPA_LINKINIT_REASON_LINKUP
:
10461 case OPA_LINKINIT_REASON_FLAPPING
:
10462 return "(FLAPPING)";
10463 case OPA_LINKINIT_OUTSIDE_POLICY
:
10464 return "(OUTSIDE_POLICY)";
10465 case OPA_LINKINIT_QUARANTINED
:
10466 return "(QUARANTINED)";
10467 case OPA_LINKINIT_INSUFIC_CAPABILITY
:
10468 return "(INSUFIC_CAPABILITY)";
10477 * driver_pstate - convert the driver's notion of a port's
10478 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10479 * Return -1 (converted to a u32) to indicate error.
10481 u32
driver_pstate(struct hfi1_pportdata
*ppd
)
10483 switch (ppd
->host_link_state
) {
10486 case HLS_UP_ACTIVE
:
10487 return IB_PORTPHYSSTATE_LINKUP
;
10489 return IB_PORTPHYSSTATE_POLLING
;
10490 case HLS_DN_DISABLE
:
10491 return IB_PORTPHYSSTATE_DISABLED
;
10492 case HLS_DN_OFFLINE
:
10493 return OPA_PORTPHYSSTATE_OFFLINE
;
10494 case HLS_VERIFY_CAP
:
10495 return IB_PORTPHYSSTATE_POLLING
;
10497 return IB_PORTPHYSSTATE_POLLING
;
10498 case HLS_GOING_OFFLINE
:
10499 return OPA_PORTPHYSSTATE_OFFLINE
;
10500 case HLS_LINK_COOLDOWN
:
10501 return OPA_PORTPHYSSTATE_OFFLINE
;
10502 case HLS_DN_DOWNDEF
:
10504 dd_dev_err(ppd
->dd
, "invalid host_link_state 0x%x\n",
10505 ppd
->host_link_state
);
10511 * driver_lstate - convert the driver's notion of a port's
10512 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10513 * (converted to a u32) to indicate error.
10515 u32
driver_lstate(struct hfi1_pportdata
*ppd
)
10517 if (ppd
->host_link_state
&& (ppd
->host_link_state
& HLS_DOWN
))
10518 return IB_PORT_DOWN
;
10520 switch (ppd
->host_link_state
& HLS_UP
) {
10522 return IB_PORT_INIT
;
10524 return IB_PORT_ARMED
;
10525 case HLS_UP_ACTIVE
:
10526 return IB_PORT_ACTIVE
;
10528 dd_dev_err(ppd
->dd
, "invalid host_link_state 0x%x\n",
10529 ppd
->host_link_state
);
10534 void set_link_down_reason(struct hfi1_pportdata
*ppd
, u8 lcl_reason
,
10535 u8 neigh_reason
, u8 rem_reason
)
10537 if (ppd
->local_link_down_reason
.latest
== 0 &&
10538 ppd
->neigh_link_down_reason
.latest
== 0) {
10539 ppd
->local_link_down_reason
.latest
= lcl_reason
;
10540 ppd
->neigh_link_down_reason
.latest
= neigh_reason
;
10541 ppd
->remote_link_down_reason
= rem_reason
;
10546 * Verify if BCT for data VLs is non-zero.
10548 static inline bool data_vls_operational(struct hfi1_pportdata
*ppd
)
10550 return !!ppd
->actual_vls_operational
;
10554 * Change the physical and/or logical link state.
10556 * Do not call this routine while inside an interrupt. It contains
10557 * calls to routines that can take multiple seconds to finish.
10559 * Returns 0 on success, -errno on failure.
10561 int set_link_state(struct hfi1_pportdata
*ppd
, u32 state
)
10563 struct hfi1_devdata
*dd
= ppd
->dd
;
10564 struct ib_event event
= {.device
= NULL
};
10566 int orig_new_state
, poll_bounce
;
10568 mutex_lock(&ppd
->hls_lock
);
10570 orig_new_state
= state
;
10571 if (state
== HLS_DN_DOWNDEF
)
10572 state
= dd
->link_default
;
10574 /* interpret poll -> poll as a link bounce */
10575 poll_bounce
= ppd
->host_link_state
== HLS_DN_POLL
&&
10576 state
== HLS_DN_POLL
;
10578 dd_dev_info(dd
, "%s: current %s, new %s %s%s\n", __func__
,
10579 link_state_name(ppd
->host_link_state
),
10580 link_state_name(orig_new_state
),
10581 poll_bounce
? "(bounce) " : "",
10582 link_state_reason_name(ppd
, state
));
10585 * If we're going to a (HLS_*) link state that implies the logical
10586 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10587 * reset is_sm_config_started to 0.
10589 if (!(state
& (HLS_UP_ARMED
| HLS_UP_ACTIVE
)))
10590 ppd
->is_sm_config_started
= 0;
10593 * Do nothing if the states match. Let a poll to poll link bounce
10596 if (ppd
->host_link_state
== state
&& !poll_bounce
)
10601 if (ppd
->host_link_state
== HLS_DN_POLL
&&
10602 (quick_linkup
|| dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
)) {
10604 * Quick link up jumps from polling to here.
10606 * Whether in normal or loopback mode, the
10607 * simulator jumps from polling to link up.
10608 * Accept that here.
10611 } else if (ppd
->host_link_state
!= HLS_GOING_UP
) {
10616 * Wait for Link_Up physical state.
10617 * Physical and Logical states should already be
10618 * be transitioned to LinkUp and LinkInit respectively.
10620 ret
= wait_physical_linkstate(ppd
, PLS_LINKUP
, 1000);
10623 "%s: physical state did not change to LINK-UP\n",
10628 ret
= wait_logical_linkstate(ppd
, IB_PORT_INIT
, 1000);
10631 "%s: logical state did not change to INIT\n",
10636 /* clear old transient LINKINIT_REASON code */
10637 if (ppd
->linkinit_reason
>= OPA_LINKINIT_REASON_CLEAR
)
10638 ppd
->linkinit_reason
=
10639 OPA_LINKINIT_REASON_LINKUP
;
10641 /* enable the port */
10642 add_rcvctrl(dd
, RCV_CTRL_RCV_PORT_ENABLE_SMASK
);
10644 handle_linkup_change(dd
, 1);
10645 ppd
->host_link_state
= HLS_UP_INIT
;
10648 if (ppd
->host_link_state
!= HLS_UP_INIT
)
10651 if (!data_vls_operational(ppd
)) {
10653 "%s: data VLs not operational\n", __func__
);
10658 set_logical_state(dd
, LSTATE_ARMED
);
10659 ret
= wait_logical_linkstate(ppd
, IB_PORT_ARMED
, 1000);
10662 "%s: logical state did not change to ARMED\n",
10666 ppd
->host_link_state
= HLS_UP_ARMED
;
10668 * The simulator does not currently implement SMA messages,
10669 * so neighbor_normal is not set. Set it here when we first
10672 if (dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
)
10673 ppd
->neighbor_normal
= 1;
10675 case HLS_UP_ACTIVE
:
10676 if (ppd
->host_link_state
!= HLS_UP_ARMED
)
10679 set_logical_state(dd
, LSTATE_ACTIVE
);
10680 ret
= wait_logical_linkstate(ppd
, IB_PORT_ACTIVE
, 1000);
10683 "%s: logical state did not change to ACTIVE\n",
10686 /* tell all engines to go running */
10687 sdma_all_running(dd
);
10688 ppd
->host_link_state
= HLS_UP_ACTIVE
;
10690 /* Signal the IB layer that the port has went active */
10691 event
.device
= &dd
->verbs_dev
.rdi
.ibdev
;
10692 event
.element
.port_num
= ppd
->port
;
10693 event
.event
= IB_EVENT_PORT_ACTIVE
;
10697 if ((ppd
->host_link_state
== HLS_DN_DISABLE
||
10698 ppd
->host_link_state
== HLS_DN_OFFLINE
) &&
10701 /* Hand LED control to the DC */
10702 write_csr(dd
, DCC_CFG_LED_CNTRL
, 0);
10704 if (ppd
->host_link_state
!= HLS_DN_OFFLINE
) {
10705 u8 tmp
= ppd
->link_enabled
;
10707 ret
= goto_offline(ppd
, ppd
->remote_link_down_reason
);
10709 ppd
->link_enabled
= tmp
;
10712 ppd
->remote_link_down_reason
= 0;
10714 if (ppd
->driver_link_ready
)
10715 ppd
->link_enabled
= 1;
10718 set_all_slowpath(ppd
->dd
);
10719 ret
= set_local_link_attributes(ppd
);
10723 ppd
->port_error_action
= 0;
10724 ppd
->host_link_state
= HLS_DN_POLL
;
10726 if (quick_linkup
) {
10727 /* quick linkup does not go into polling */
10728 ret
= do_quick_linkup(dd
);
10730 ret1
= set_physical_link_state(dd
, PLS_POLLING
);
10731 if (ret1
!= HCMD_SUCCESS
) {
10733 "Failed to transition to Polling link state, return 0x%x\n",
10738 ppd
->offline_disabled_reason
=
10739 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE
);
10741 * If an error occurred above, go back to offline. The
10742 * caller may reschedule another attempt.
10745 goto_offline(ppd
, 0);
10747 log_physical_state(ppd
, PLS_POLLING
);
10749 case HLS_DN_DISABLE
:
10750 /* link is disabled */
10751 ppd
->link_enabled
= 0;
10753 /* allow any state to transition to disabled */
10755 /* must transition to offline first */
10756 if (ppd
->host_link_state
!= HLS_DN_OFFLINE
) {
10757 ret
= goto_offline(ppd
, ppd
->remote_link_down_reason
);
10760 ppd
->remote_link_down_reason
= 0;
10763 if (!dd
->dc_shutdown
) {
10764 ret1
= set_physical_link_state(dd
, PLS_DISABLED
);
10765 if (ret1
!= HCMD_SUCCESS
) {
10767 "Failed to transition to Disabled link state, return 0x%x\n",
10772 ret
= wait_physical_linkstate(ppd
, PLS_DISABLED
, 10000);
10775 "%s: physical state did not change to DISABLED\n",
10781 ppd
->host_link_state
= HLS_DN_DISABLE
;
10783 case HLS_DN_OFFLINE
:
10784 if (ppd
->host_link_state
== HLS_DN_DISABLE
)
10787 /* allow any state to transition to offline */
10788 ret
= goto_offline(ppd
, ppd
->remote_link_down_reason
);
10790 ppd
->remote_link_down_reason
= 0;
10792 case HLS_VERIFY_CAP
:
10793 if (ppd
->host_link_state
!= HLS_DN_POLL
)
10795 ppd
->host_link_state
= HLS_VERIFY_CAP
;
10796 log_physical_state(ppd
, PLS_CONFIGPHY_VERIFYCAP
);
10799 if (ppd
->host_link_state
!= HLS_VERIFY_CAP
)
10802 ret1
= set_physical_link_state(dd
, PLS_LINKUP
);
10803 if (ret1
!= HCMD_SUCCESS
) {
10805 "Failed to transition to link up state, return 0x%x\n",
10810 ppd
->host_link_state
= HLS_GOING_UP
;
10813 case HLS_GOING_OFFLINE
: /* transient within goto_offline() */
10814 case HLS_LINK_COOLDOWN
: /* transient within goto_offline() */
10816 dd_dev_info(dd
, "%s: state 0x%x: not supported\n",
10825 dd_dev_err(dd
, "%s: unexpected state transition from %s to %s\n",
10826 __func__
, link_state_name(ppd
->host_link_state
),
10827 link_state_name(state
));
10831 mutex_unlock(&ppd
->hls_lock
);
10834 ib_dispatch_event(&event
);
10839 int hfi1_set_ib_cfg(struct hfi1_pportdata
*ppd
, int which
, u32 val
)
10845 case HFI1_IB_CFG_LIDLMC
:
10848 case HFI1_IB_CFG_VL_HIGH_LIMIT
:
10850 * The VL Arbitrator high limit is sent in units of 4k
10851 * bytes, while HFI stores it in units of 64 bytes.
10854 reg
= ((u64
)val
& SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK
)
10855 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT
;
10856 write_csr(ppd
->dd
, SEND_HIGH_PRIORITY_LIMIT
, reg
);
10858 case HFI1_IB_CFG_LINKDEFAULT
: /* IB link default (sleep/poll) */
10859 /* HFI only supports POLL as the default link down state */
10860 if (val
!= HLS_DN_POLL
)
10863 case HFI1_IB_CFG_OP_VLS
:
10864 if (ppd
->vls_operational
!= val
) {
10865 ppd
->vls_operational
= val
;
10871 * For link width, link width downgrade, and speed enable, always AND
10872 * the setting with what is actually supported. This has two benefits.
10873 * First, enabled can't have unsupported values, no matter what the
10874 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10875 * "fill in with your supported value" have all the bits in the
10876 * field set, so simply ANDing with supported has the desired result.
10878 case HFI1_IB_CFG_LWID_ENB
: /* set allowed Link-width */
10879 ppd
->link_width_enabled
= val
& ppd
->link_width_supported
;
10881 case HFI1_IB_CFG_LWID_DG_ENB
: /* set allowed link width downgrade */
10882 ppd
->link_width_downgrade_enabled
=
10883 val
& ppd
->link_width_downgrade_supported
;
10885 case HFI1_IB_CFG_SPD_ENB
: /* allowed Link speeds */
10886 ppd
->link_speed_enabled
= val
& ppd
->link_speed_supported
;
10888 case HFI1_IB_CFG_OVERRUN_THRESH
: /* IB overrun threshold */
10890 * HFI does not follow IB specs, save this value
10891 * so we can report it, if asked.
10893 ppd
->overrun_threshold
= val
;
10895 case HFI1_IB_CFG_PHYERR_THRESH
: /* IB PHY error threshold */
10897 * HFI does not follow IB specs, save this value
10898 * so we can report it, if asked.
10900 ppd
->phy_error_threshold
= val
;
10903 case HFI1_IB_CFG_MTU
:
10904 set_send_length(ppd
);
10907 case HFI1_IB_CFG_PKEYS
:
10908 if (HFI1_CAP_IS_KSET(PKEY_CHECK
))
10909 set_partition_keys(ppd
);
10913 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL
))
10914 dd_dev_info(ppd
->dd
,
10915 "%s: which %s, val 0x%x: not implemented\n",
10916 __func__
, ib_cfg_name(which
), val
);
10922 /* begin functions related to vl arbitration table caching */
10923 static void init_vl_arb_caches(struct hfi1_pportdata
*ppd
)
10927 BUILD_BUG_ON(VL_ARB_TABLE_SIZE
!=
10928 VL_ARB_LOW_PRIO_TABLE_SIZE
);
10929 BUILD_BUG_ON(VL_ARB_TABLE_SIZE
!=
10930 VL_ARB_HIGH_PRIO_TABLE_SIZE
);
10933 * Note that we always return values directly from the
10934 * 'vl_arb_cache' (and do no CSR reads) in response to a
10935 * 'Get(VLArbTable)'. This is obviously correct after a
10936 * 'Set(VLArbTable)', since the cache will then be up to
10937 * date. But it's also correct prior to any 'Set(VLArbTable)'
10938 * since then both the cache, and the relevant h/w registers
10942 for (i
= 0; i
< MAX_PRIO_TABLE
; i
++)
10943 spin_lock_init(&ppd
->vl_arb_cache
[i
].lock
);
10947 * vl_arb_lock_cache
10949 * All other vl_arb_* functions should be called only after locking
10952 static inline struct vl_arb_cache
*
10953 vl_arb_lock_cache(struct hfi1_pportdata
*ppd
, int idx
)
10955 if (idx
!= LO_PRIO_TABLE
&& idx
!= HI_PRIO_TABLE
)
10957 spin_lock(&ppd
->vl_arb_cache
[idx
].lock
);
10958 return &ppd
->vl_arb_cache
[idx
];
10961 static inline void vl_arb_unlock_cache(struct hfi1_pportdata
*ppd
, int idx
)
10963 spin_unlock(&ppd
->vl_arb_cache
[idx
].lock
);
10966 static void vl_arb_get_cache(struct vl_arb_cache
*cache
,
10967 struct ib_vl_weight_elem
*vl
)
10969 memcpy(vl
, cache
->table
, VL_ARB_TABLE_SIZE
* sizeof(*vl
));
10972 static void vl_arb_set_cache(struct vl_arb_cache
*cache
,
10973 struct ib_vl_weight_elem
*vl
)
10975 memcpy(cache
->table
, vl
, VL_ARB_TABLE_SIZE
* sizeof(*vl
));
10978 static int vl_arb_match_cache(struct vl_arb_cache
*cache
,
10979 struct ib_vl_weight_elem
*vl
)
10981 return !memcmp(cache
->table
, vl
, VL_ARB_TABLE_SIZE
* sizeof(*vl
));
10984 /* end functions related to vl arbitration table caching */
10986 static int set_vl_weights(struct hfi1_pportdata
*ppd
, u32 target
,
10987 u32 size
, struct ib_vl_weight_elem
*vl
)
10989 struct hfi1_devdata
*dd
= ppd
->dd
;
10991 unsigned int i
, is_up
= 0;
10992 int drain
, ret
= 0;
10994 mutex_lock(&ppd
->hls_lock
);
10996 if (ppd
->host_link_state
& HLS_UP
)
10999 drain
= !is_ax(dd
) && is_up
;
11003 * Before adjusting VL arbitration weights, empty per-VL
11004 * FIFOs, otherwise a packet whose VL weight is being
11005 * set to 0 could get stuck in a FIFO with no chance to
11008 ret
= stop_drain_data_vls(dd
);
11013 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
11018 for (i
= 0; i
< size
; i
++, vl
++) {
11020 * NOTE: The low priority shift and mask are used here, but
11021 * they are the same for both the low and high registers.
11023 reg
= (((u64
)vl
->vl
& SEND_LOW_PRIORITY_LIST_VL_MASK
)
11024 << SEND_LOW_PRIORITY_LIST_VL_SHIFT
)
11025 | (((u64
)vl
->weight
11026 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK
)
11027 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT
);
11028 write_csr(dd
, target
+ (i
* 8), reg
);
11030 pio_send_control(dd
, PSC_GLOBAL_VLARB_ENABLE
);
11033 open_fill_data_vls(dd
); /* reopen all VLs */
11036 mutex_unlock(&ppd
->hls_lock
);
11042 * Read one credit merge VL register.
11044 static void read_one_cm_vl(struct hfi1_devdata
*dd
, u32 csr
,
11045 struct vl_limit
*vll
)
11047 u64 reg
= read_csr(dd
, csr
);
11049 vll
->dedicated
= cpu_to_be16(
11050 (reg
>> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT
)
11051 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK
);
11052 vll
->shared
= cpu_to_be16(
11053 (reg
>> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT
)
11054 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK
);
11058 * Read the current credit merge limits.
11060 static int get_buffer_control(struct hfi1_devdata
*dd
,
11061 struct buffer_control
*bc
, u16
*overall_limit
)
11066 /* not all entries are filled in */
11067 memset(bc
, 0, sizeof(*bc
));
11069 /* OPA and HFI have a 1-1 mapping */
11070 for (i
= 0; i
< TXE_NUM_DATA_VL
; i
++)
11071 read_one_cm_vl(dd
, SEND_CM_CREDIT_VL
+ (8 * i
), &bc
->vl
[i
]);
11073 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
11074 read_one_cm_vl(dd
, SEND_CM_CREDIT_VL15
, &bc
->vl
[15]);
11076 reg
= read_csr(dd
, SEND_CM_GLOBAL_CREDIT
);
11077 bc
->overall_shared_limit
= cpu_to_be16(
11078 (reg
>> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT
)
11079 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK
);
11081 *overall_limit
= (reg
11082 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT
)
11083 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK
;
11084 return sizeof(struct buffer_control
);
11087 static int get_sc2vlnt(struct hfi1_devdata
*dd
, struct sc2vlnt
*dp
)
11092 /* each register contains 16 SC->VLnt mappings, 4 bits each */
11093 reg
= read_csr(dd
, DCC_CFG_SC_VL_TABLE_15_0
);
11094 for (i
= 0; i
< sizeof(u64
); i
++) {
11095 u8 byte
= *(((u8
*)®
) + i
);
11097 dp
->vlnt
[2 * i
] = byte
& 0xf;
11098 dp
->vlnt
[(2 * i
) + 1] = (byte
& 0xf0) >> 4;
11101 reg
= read_csr(dd
, DCC_CFG_SC_VL_TABLE_31_16
);
11102 for (i
= 0; i
< sizeof(u64
); i
++) {
11103 u8 byte
= *(((u8
*)®
) + i
);
11105 dp
->vlnt
[16 + (2 * i
)] = byte
& 0xf;
11106 dp
->vlnt
[16 + (2 * i
) + 1] = (byte
& 0xf0) >> 4;
11108 return sizeof(struct sc2vlnt
);
11111 static void get_vlarb_preempt(struct hfi1_devdata
*dd
, u32 nelems
,
11112 struct ib_vl_weight_elem
*vl
)
11116 for (i
= 0; i
< nelems
; i
++, vl
++) {
11122 static void set_sc2vlnt(struct hfi1_devdata
*dd
, struct sc2vlnt
*dp
)
11124 write_csr(dd
, DCC_CFG_SC_VL_TABLE_15_0
,
11126 0, dp
->vlnt
[0] & 0xf,
11127 1, dp
->vlnt
[1] & 0xf,
11128 2, dp
->vlnt
[2] & 0xf,
11129 3, dp
->vlnt
[3] & 0xf,
11130 4, dp
->vlnt
[4] & 0xf,
11131 5, dp
->vlnt
[5] & 0xf,
11132 6, dp
->vlnt
[6] & 0xf,
11133 7, dp
->vlnt
[7] & 0xf,
11134 8, dp
->vlnt
[8] & 0xf,
11135 9, dp
->vlnt
[9] & 0xf,
11136 10, dp
->vlnt
[10] & 0xf,
11137 11, dp
->vlnt
[11] & 0xf,
11138 12, dp
->vlnt
[12] & 0xf,
11139 13, dp
->vlnt
[13] & 0xf,
11140 14, dp
->vlnt
[14] & 0xf,
11141 15, dp
->vlnt
[15] & 0xf));
11142 write_csr(dd
, DCC_CFG_SC_VL_TABLE_31_16
,
11143 DC_SC_VL_VAL(31_16
,
11144 16, dp
->vlnt
[16] & 0xf,
11145 17, dp
->vlnt
[17] & 0xf,
11146 18, dp
->vlnt
[18] & 0xf,
11147 19, dp
->vlnt
[19] & 0xf,
11148 20, dp
->vlnt
[20] & 0xf,
11149 21, dp
->vlnt
[21] & 0xf,
11150 22, dp
->vlnt
[22] & 0xf,
11151 23, dp
->vlnt
[23] & 0xf,
11152 24, dp
->vlnt
[24] & 0xf,
11153 25, dp
->vlnt
[25] & 0xf,
11154 26, dp
->vlnt
[26] & 0xf,
11155 27, dp
->vlnt
[27] & 0xf,
11156 28, dp
->vlnt
[28] & 0xf,
11157 29, dp
->vlnt
[29] & 0xf,
11158 30, dp
->vlnt
[30] & 0xf,
11159 31, dp
->vlnt
[31] & 0xf));
11162 static void nonzero_msg(struct hfi1_devdata
*dd
, int idx
, const char *what
,
11166 dd_dev_info(dd
, "Invalid %s limit %d on VL %d, ignoring\n",
11167 what
, (int)limit
, idx
);
11170 /* change only the shared limit portion of SendCmGLobalCredit */
11171 static void set_global_shared(struct hfi1_devdata
*dd
, u16 limit
)
11175 reg
= read_csr(dd
, SEND_CM_GLOBAL_CREDIT
);
11176 reg
&= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK
;
11177 reg
|= (u64
)limit
<< SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT
;
11178 write_csr(dd
, SEND_CM_GLOBAL_CREDIT
, reg
);
11181 /* change only the total credit limit portion of SendCmGLobalCredit */
11182 static void set_global_limit(struct hfi1_devdata
*dd
, u16 limit
)
11186 reg
= read_csr(dd
, SEND_CM_GLOBAL_CREDIT
);
11187 reg
&= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK
;
11188 reg
|= (u64
)limit
<< SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT
;
11189 write_csr(dd
, SEND_CM_GLOBAL_CREDIT
, reg
);
11192 /* set the given per-VL shared limit */
11193 static void set_vl_shared(struct hfi1_devdata
*dd
, int vl
, u16 limit
)
11198 if (vl
< TXE_NUM_DATA_VL
)
11199 addr
= SEND_CM_CREDIT_VL
+ (8 * vl
);
11201 addr
= SEND_CM_CREDIT_VL15
;
11203 reg
= read_csr(dd
, addr
);
11204 reg
&= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK
;
11205 reg
|= (u64
)limit
<< SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT
;
11206 write_csr(dd
, addr
, reg
);
11209 /* set the given per-VL dedicated limit */
11210 static void set_vl_dedicated(struct hfi1_devdata
*dd
, int vl
, u16 limit
)
11215 if (vl
< TXE_NUM_DATA_VL
)
11216 addr
= SEND_CM_CREDIT_VL
+ (8 * vl
);
11218 addr
= SEND_CM_CREDIT_VL15
;
11220 reg
= read_csr(dd
, addr
);
11221 reg
&= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK
;
11222 reg
|= (u64
)limit
<< SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT
;
11223 write_csr(dd
, addr
, reg
);
11226 /* spin until the given per-VL status mask bits clear */
11227 static void wait_for_vl_status_clear(struct hfi1_devdata
*dd
, u64 mask
,
11230 unsigned long timeout
;
11233 timeout
= jiffies
+ msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT
);
11235 reg
= read_csr(dd
, SEND_CM_CREDIT_USED_STATUS
) & mask
;
11238 return; /* success */
11239 if (time_after(jiffies
, timeout
))
11240 break; /* timed out */
11245 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11246 which
, VL_STATUS_CLEAR_TIMEOUT
, mask
, reg
);
11248 * If this occurs, it is likely there was a credit loss on the link.
11249 * The only recovery from that is a link bounce.
11252 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
11256 * The number of credits on the VLs may be changed while everything
11257 * is "live", but the following algorithm must be followed due to
11258 * how the hardware is actually implemented. In particular,
11259 * Return_Credit_Status[] is the only correct status check.
11261 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11262 * set Global_Shared_Credit_Limit = 0
11264 * mask0 = all VLs that are changing either dedicated or shared limits
11265 * set Shared_Limit[mask0] = 0
11266 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11267 * if (changing any dedicated limit)
11268 * mask1 = all VLs that are lowering dedicated limits
11269 * lower Dedicated_Limit[mask1]
11270 * spin until Return_Credit_Status[mask1] == 0
11271 * raise Dedicated_Limits
11272 * raise Shared_Limits
11273 * raise Global_Shared_Credit_Limit
11275 * lower = if the new limit is lower, set the limit to the new value
11276 * raise = if the new limit is higher than the current value (may be changed
11277 * earlier in the algorithm), set the new limit to the new value
11279 int set_buffer_control(struct hfi1_pportdata
*ppd
,
11280 struct buffer_control
*new_bc
)
11282 struct hfi1_devdata
*dd
= ppd
->dd
;
11283 u64 changing_mask
, ld_mask
, stat_mask
;
11285 int i
, use_all_mask
;
11286 int this_shared_changing
;
11287 int vl_count
= 0, ret
;
11289 * A0: add the variable any_shared_limit_changing below and in the
11290 * algorithm above. If removing A0 support, it can be removed.
11292 int any_shared_limit_changing
;
11293 struct buffer_control cur_bc
;
11294 u8 changing
[OPA_MAX_VLS
];
11295 u8 lowering_dedicated
[OPA_MAX_VLS
];
11298 const u64 all_mask
=
11299 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11300 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11301 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11302 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11303 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11304 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11305 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11306 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11307 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK
;
11309 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11310 #define NUM_USABLE_VLS 16 /* look at VL15 and less */
11312 /* find the new total credits, do sanity check on unused VLs */
11313 for (i
= 0; i
< OPA_MAX_VLS
; i
++) {
11315 new_total
+= be16_to_cpu(new_bc
->vl
[i
].dedicated
);
11318 nonzero_msg(dd
, i
, "dedicated",
11319 be16_to_cpu(new_bc
->vl
[i
].dedicated
));
11320 nonzero_msg(dd
, i
, "shared",
11321 be16_to_cpu(new_bc
->vl
[i
].shared
));
11322 new_bc
->vl
[i
].dedicated
= 0;
11323 new_bc
->vl
[i
].shared
= 0;
11325 new_total
+= be16_to_cpu(new_bc
->overall_shared_limit
);
11327 /* fetch the current values */
11328 get_buffer_control(dd
, &cur_bc
, &cur_total
);
11331 * Create the masks we will use.
11333 memset(changing
, 0, sizeof(changing
));
11334 memset(lowering_dedicated
, 0, sizeof(lowering_dedicated
));
11336 * NOTE: Assumes that the individual VL bits are adjacent and in
11340 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
;
11344 any_shared_limit_changing
= 0;
11345 for (i
= 0; i
< NUM_USABLE_VLS
; i
++, stat_mask
<<= 1) {
11348 this_shared_changing
= new_bc
->vl
[i
].shared
11349 != cur_bc
.vl
[i
].shared
;
11350 if (this_shared_changing
)
11351 any_shared_limit_changing
= 1;
11352 if (new_bc
->vl
[i
].dedicated
!= cur_bc
.vl
[i
].dedicated
||
11353 this_shared_changing
) {
11355 changing_mask
|= stat_mask
;
11358 if (be16_to_cpu(new_bc
->vl
[i
].dedicated
) <
11359 be16_to_cpu(cur_bc
.vl
[i
].dedicated
)) {
11360 lowering_dedicated
[i
] = 1;
11361 ld_mask
|= stat_mask
;
11365 /* bracket the credit change with a total adjustment */
11366 if (new_total
> cur_total
)
11367 set_global_limit(dd
, new_total
);
11370 * Start the credit change algorithm.
11373 if ((be16_to_cpu(new_bc
->overall_shared_limit
) <
11374 be16_to_cpu(cur_bc
.overall_shared_limit
)) ||
11375 (is_ax(dd
) && any_shared_limit_changing
)) {
11376 set_global_shared(dd
, 0);
11377 cur_bc
.overall_shared_limit
= 0;
11381 for (i
= 0; i
< NUM_USABLE_VLS
; i
++) {
11386 set_vl_shared(dd
, i
, 0);
11387 cur_bc
.vl
[i
].shared
= 0;
11391 wait_for_vl_status_clear(dd
, use_all_mask
? all_mask
: changing_mask
,
11394 if (change_count
> 0) {
11395 for (i
= 0; i
< NUM_USABLE_VLS
; i
++) {
11399 if (lowering_dedicated
[i
]) {
11400 set_vl_dedicated(dd
, i
,
11401 be16_to_cpu(new_bc
->
11403 cur_bc
.vl
[i
].dedicated
=
11404 new_bc
->vl
[i
].dedicated
;
11408 wait_for_vl_status_clear(dd
, ld_mask
, "dedicated");
11410 /* now raise all dedicated that are going up */
11411 for (i
= 0; i
< NUM_USABLE_VLS
; i
++) {
11415 if (be16_to_cpu(new_bc
->vl
[i
].dedicated
) >
11416 be16_to_cpu(cur_bc
.vl
[i
].dedicated
))
11417 set_vl_dedicated(dd
, i
,
11418 be16_to_cpu(new_bc
->
11423 /* next raise all shared that are going up */
11424 for (i
= 0; i
< NUM_USABLE_VLS
; i
++) {
11428 if (be16_to_cpu(new_bc
->vl
[i
].shared
) >
11429 be16_to_cpu(cur_bc
.vl
[i
].shared
))
11430 set_vl_shared(dd
, i
, be16_to_cpu(new_bc
->vl
[i
].shared
));
11433 /* finally raise the global shared */
11434 if (be16_to_cpu(new_bc
->overall_shared_limit
) >
11435 be16_to_cpu(cur_bc
.overall_shared_limit
))
11436 set_global_shared(dd
,
11437 be16_to_cpu(new_bc
->overall_shared_limit
));
11439 /* bracket the credit change with a total adjustment */
11440 if (new_total
< cur_total
)
11441 set_global_limit(dd
, new_total
);
11444 * Determine the actual number of operational VLS using the number of
11445 * dedicated and shared credits for each VL.
11447 if (change_count
> 0) {
11448 for (i
= 0; i
< TXE_NUM_DATA_VL
; i
++)
11449 if (be16_to_cpu(new_bc
->vl
[i
].dedicated
) > 0 ||
11450 be16_to_cpu(new_bc
->vl
[i
].shared
) > 0)
11452 ppd
->actual_vls_operational
= vl_count
;
11453 ret
= sdma_map_init(dd
, ppd
->port
- 1, vl_count
?
11454 ppd
->actual_vls_operational
:
11455 ppd
->vls_operational
,
11458 ret
= pio_map_init(dd
, ppd
->port
- 1, vl_count
?
11459 ppd
->actual_vls_operational
:
11460 ppd
->vls_operational
, NULL
);
11468 * Read the given fabric manager table. Return the size of the
11469 * table (in bytes) on success, and a negative error code on
11472 int fm_get_table(struct hfi1_pportdata
*ppd
, int which
, void *t
)
11476 struct vl_arb_cache
*vlc
;
11479 case FM_TBL_VL_HIGH_ARB
:
11482 * OPA specifies 128 elements (of 2 bytes each), though
11483 * HFI supports only 16 elements in h/w.
11485 vlc
= vl_arb_lock_cache(ppd
, HI_PRIO_TABLE
);
11486 vl_arb_get_cache(vlc
, t
);
11487 vl_arb_unlock_cache(ppd
, HI_PRIO_TABLE
);
11489 case FM_TBL_VL_LOW_ARB
:
11492 * OPA specifies 128 elements (of 2 bytes each), though
11493 * HFI supports only 16 elements in h/w.
11495 vlc
= vl_arb_lock_cache(ppd
, LO_PRIO_TABLE
);
11496 vl_arb_get_cache(vlc
, t
);
11497 vl_arb_unlock_cache(ppd
, LO_PRIO_TABLE
);
11499 case FM_TBL_BUFFER_CONTROL
:
11500 size
= get_buffer_control(ppd
->dd
, t
, NULL
);
11502 case FM_TBL_SC2VLNT
:
11503 size
= get_sc2vlnt(ppd
->dd
, t
);
11505 case FM_TBL_VL_PREEMPT_ELEMS
:
11507 /* OPA specifies 128 elements, of 2 bytes each */
11508 get_vlarb_preempt(ppd
->dd
, OPA_MAX_VLS
, t
);
11510 case FM_TBL_VL_PREEMPT_MATRIX
:
11513 * OPA specifies that this is the same size as the VL
11514 * arbitration tables (i.e., 256 bytes).
11524 * Write the given fabric manager table.
11526 int fm_set_table(struct hfi1_pportdata
*ppd
, int which
, void *t
)
11529 struct vl_arb_cache
*vlc
;
11532 case FM_TBL_VL_HIGH_ARB
:
11533 vlc
= vl_arb_lock_cache(ppd
, HI_PRIO_TABLE
);
11534 if (vl_arb_match_cache(vlc
, t
)) {
11535 vl_arb_unlock_cache(ppd
, HI_PRIO_TABLE
);
11538 vl_arb_set_cache(vlc
, t
);
11539 vl_arb_unlock_cache(ppd
, HI_PRIO_TABLE
);
11540 ret
= set_vl_weights(ppd
, SEND_HIGH_PRIORITY_LIST
,
11541 VL_ARB_HIGH_PRIO_TABLE_SIZE
, t
);
11543 case FM_TBL_VL_LOW_ARB
:
11544 vlc
= vl_arb_lock_cache(ppd
, LO_PRIO_TABLE
);
11545 if (vl_arb_match_cache(vlc
, t
)) {
11546 vl_arb_unlock_cache(ppd
, LO_PRIO_TABLE
);
11549 vl_arb_set_cache(vlc
, t
);
11550 vl_arb_unlock_cache(ppd
, LO_PRIO_TABLE
);
11551 ret
= set_vl_weights(ppd
, SEND_LOW_PRIORITY_LIST
,
11552 VL_ARB_LOW_PRIO_TABLE_SIZE
, t
);
11554 case FM_TBL_BUFFER_CONTROL
:
11555 ret
= set_buffer_control(ppd
, t
);
11557 case FM_TBL_SC2VLNT
:
11558 set_sc2vlnt(ppd
->dd
, t
);
11567 * Disable all data VLs.
11569 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11571 static int disable_data_vls(struct hfi1_devdata
*dd
)
11576 pio_send_control(dd
, PSC_DATA_VL_DISABLE
);
11582 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11583 * Just re-enables all data VLs (the "fill" part happens
11584 * automatically - the name was chosen for symmetry with
11585 * stop_drain_data_vls()).
11587 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11589 int open_fill_data_vls(struct hfi1_devdata
*dd
)
11594 pio_send_control(dd
, PSC_DATA_VL_ENABLE
);
11600 * drain_data_vls() - assumes that disable_data_vls() has been called,
11601 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11602 * engines to drop to 0.
11604 static void drain_data_vls(struct hfi1_devdata
*dd
)
11608 pause_for_credit_return(dd
);
11612 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11614 * Use open_fill_data_vls() to resume using data VLs. This pair is
11615 * meant to be used like this:
11617 * stop_drain_data_vls(dd);
11618 * // do things with per-VL resources
11619 * open_fill_data_vls(dd);
11621 int stop_drain_data_vls(struct hfi1_devdata
*dd
)
11625 ret
= disable_data_vls(dd
);
11627 drain_data_vls(dd
);
11633 * Convert a nanosecond time to a cclock count. No matter how slow
11634 * the cclock, a non-zero ns will always have a non-zero result.
11636 u32
ns_to_cclock(struct hfi1_devdata
*dd
, u32 ns
)
11640 if (dd
->icode
== ICODE_FPGA_EMULATION
)
11641 cclocks
= (ns
* 1000) / FPGA_CCLOCK_PS
;
11642 else /* simulation pretends to be ASIC */
11643 cclocks
= (ns
* 1000) / ASIC_CCLOCK_PS
;
11644 if (ns
&& !cclocks
) /* if ns nonzero, must be at least 1 */
11650 * Convert a cclock count to nanoseconds. Not matter how slow
11651 * the cclock, a non-zero cclocks will always have a non-zero result.
11653 u32
cclock_to_ns(struct hfi1_devdata
*dd
, u32 cclocks
)
11657 if (dd
->icode
== ICODE_FPGA_EMULATION
)
11658 ns
= (cclocks
* FPGA_CCLOCK_PS
) / 1000;
11659 else /* simulation pretends to be ASIC */
11660 ns
= (cclocks
* ASIC_CCLOCK_PS
) / 1000;
11661 if (cclocks
&& !ns
)
11667 * Dynamically adjust the receive interrupt timeout for a context based on
11668 * incoming packet rate.
11670 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11672 static void adjust_rcv_timeout(struct hfi1_ctxtdata
*rcd
, u32 npkts
)
11674 struct hfi1_devdata
*dd
= rcd
->dd
;
11675 u32 timeout
= rcd
->rcvavail_timeout
;
11678 * This algorithm doubles or halves the timeout depending on whether
11679 * the number of packets received in this interrupt were less than or
11680 * greater equal the interrupt count.
11682 * The calculations below do not allow a steady state to be achieved.
11683 * Only at the endpoints it is possible to have an unchanging
11686 if (npkts
< rcv_intr_count
) {
11688 * Not enough packets arrived before the timeout, adjust
11689 * timeout downward.
11691 if (timeout
< 2) /* already at minimum? */
11696 * More than enough packets arrived before the timeout, adjust
11699 if (timeout
>= dd
->rcv_intr_timeout_csr
) /* already at max? */
11701 timeout
= min(timeout
<< 1, dd
->rcv_intr_timeout_csr
);
11704 rcd
->rcvavail_timeout
= timeout
;
11706 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11707 * been verified to be in range
11709 write_kctxt_csr(dd
, rcd
->ctxt
, RCV_AVAIL_TIME_OUT
,
11711 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT
);
11714 void update_usrhead(struct hfi1_ctxtdata
*rcd
, u32 hd
, u32 updegr
, u32 egrhd
,
11715 u32 intr_adjust
, u32 npkts
)
11717 struct hfi1_devdata
*dd
= rcd
->dd
;
11719 u32 ctxt
= rcd
->ctxt
;
11722 * Need to write timeout register before updating RcvHdrHead to ensure
11723 * that a new value is used when the HW decides to restart counting.
11726 adjust_rcv_timeout(rcd
, npkts
);
11728 reg
= (egrhd
& RCV_EGR_INDEX_HEAD_HEAD_MASK
)
11729 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT
;
11730 write_uctxt_csr(dd
, ctxt
, RCV_EGR_INDEX_HEAD
, reg
);
11733 reg
= ((u64
)rcv_intr_count
<< RCV_HDR_HEAD_COUNTER_SHIFT
) |
11734 (((u64
)hd
& RCV_HDR_HEAD_HEAD_MASK
)
11735 << RCV_HDR_HEAD_HEAD_SHIFT
);
11736 write_uctxt_csr(dd
, ctxt
, RCV_HDR_HEAD
, reg
);
11740 u32
hdrqempty(struct hfi1_ctxtdata
*rcd
)
11744 head
= (read_uctxt_csr(rcd
->dd
, rcd
->ctxt
, RCV_HDR_HEAD
)
11745 & RCV_HDR_HEAD_HEAD_SMASK
) >> RCV_HDR_HEAD_HEAD_SHIFT
;
11747 if (rcd
->rcvhdrtail_kvaddr
)
11748 tail
= get_rcvhdrtail(rcd
);
11750 tail
= read_uctxt_csr(rcd
->dd
, rcd
->ctxt
, RCV_HDR_TAIL
);
11752 return head
== tail
;
11756 * Context Control and Receive Array encoding for buffer size:
11765 * 0x8 512 KB (Receive Array only)
11766 * 0x9 1 MB (Receive Array only)
11767 * 0xa 2 MB (Receive Array only)
11769 * 0xB-0xF - reserved (Receive Array only)
11772 * This routine assumes that the value has already been sanity checked.
11774 static u32
encoded_size(u32 size
)
11777 case 4 * 1024: return 0x1;
11778 case 8 * 1024: return 0x2;
11779 case 16 * 1024: return 0x3;
11780 case 32 * 1024: return 0x4;
11781 case 64 * 1024: return 0x5;
11782 case 128 * 1024: return 0x6;
11783 case 256 * 1024: return 0x7;
11784 case 512 * 1024: return 0x8;
11785 case 1 * 1024 * 1024: return 0x9;
11786 case 2 * 1024 * 1024: return 0xa;
11788 return 0x1; /* if invalid, go with the minimum size */
11791 void hfi1_rcvctrl(struct hfi1_devdata
*dd
, unsigned int op
,
11792 struct hfi1_ctxtdata
*rcd
)
11795 int did_enable
= 0;
11803 hfi1_cdbg(RCVCTRL
, "ctxt %d op 0x%x", ctxt
, op
);
11805 rcvctrl
= read_kctxt_csr(dd
, ctxt
, RCV_CTXT_CTRL
);
11806 /* if the context already enabled, don't do the extra steps */
11807 if ((op
& HFI1_RCVCTRL_CTXT_ENB
) &&
11808 !(rcvctrl
& RCV_CTXT_CTRL_ENABLE_SMASK
)) {
11809 /* reset the tail and hdr addresses, and sequence count */
11810 write_kctxt_csr(dd
, ctxt
, RCV_HDR_ADDR
,
11812 if (HFI1_CAP_KGET_MASK(rcd
->flags
, DMA_RTAIL
))
11813 write_kctxt_csr(dd
, ctxt
, RCV_HDR_TAIL_ADDR
,
11814 rcd
->rcvhdrqtailaddr_dma
);
11817 /* reset the cached receive header queue head value */
11821 * Zero the receive header queue so we don't get false
11822 * positives when checking the sequence number. The
11823 * sequence numbers could land exactly on the same spot.
11824 * E.g. a rcd restart before the receive header wrapped.
11826 memset(rcd
->rcvhdrq
, 0, rcd
->rcvhdrq_size
);
11828 /* starting timeout */
11829 rcd
->rcvavail_timeout
= dd
->rcv_intr_timeout_csr
;
11831 /* enable the context */
11832 rcvctrl
|= RCV_CTXT_CTRL_ENABLE_SMASK
;
11834 /* clean the egr buffer size first */
11835 rcvctrl
&= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK
;
11836 rcvctrl
|= ((u64
)encoded_size(rcd
->egrbufs
.rcvtid_size
)
11837 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK
)
11838 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT
;
11840 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11841 write_uctxt_csr(dd
, ctxt
, RCV_HDR_HEAD
, 0);
11844 /* zero RcvEgrIndexHead */
11845 write_uctxt_csr(dd
, ctxt
, RCV_EGR_INDEX_HEAD
, 0);
11847 /* set eager count and base index */
11848 reg
= (((u64
)(rcd
->egrbufs
.alloced
>> RCV_SHIFT
)
11849 & RCV_EGR_CTRL_EGR_CNT_MASK
)
11850 << RCV_EGR_CTRL_EGR_CNT_SHIFT
) |
11851 (((rcd
->eager_base
>> RCV_SHIFT
)
11852 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK
)
11853 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT
);
11854 write_kctxt_csr(dd
, ctxt
, RCV_EGR_CTRL
, reg
);
11857 * Set TID (expected) count and base index.
11858 * rcd->expected_count is set to individual RcvArray entries,
11859 * not pairs, and the CSR takes a pair-count in groups of
11860 * four, so divide by 8.
11862 reg
= (((rcd
->expected_count
>> RCV_SHIFT
)
11863 & RCV_TID_CTRL_TID_PAIR_CNT_MASK
)
11864 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT
) |
11865 (((rcd
->expected_base
>> RCV_SHIFT
)
11866 & RCV_TID_CTRL_TID_BASE_INDEX_MASK
)
11867 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT
);
11868 write_kctxt_csr(dd
, ctxt
, RCV_TID_CTRL
, reg
);
11869 if (ctxt
== HFI1_CTRL_CTXT
)
11870 write_csr(dd
, RCV_VL15
, HFI1_CTRL_CTXT
);
11872 if (op
& HFI1_RCVCTRL_CTXT_DIS
) {
11873 write_csr(dd
, RCV_VL15
, 0);
11875 * When receive context is being disabled turn on tail
11876 * update with a dummy tail address and then disable
11879 if (dd
->rcvhdrtail_dummy_dma
) {
11880 write_kctxt_csr(dd
, ctxt
, RCV_HDR_TAIL_ADDR
,
11881 dd
->rcvhdrtail_dummy_dma
);
11882 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11883 rcvctrl
|= RCV_CTXT_CTRL_TAIL_UPD_SMASK
;
11886 rcvctrl
&= ~RCV_CTXT_CTRL_ENABLE_SMASK
;
11888 if (op
& HFI1_RCVCTRL_INTRAVAIL_ENB
)
11889 rcvctrl
|= RCV_CTXT_CTRL_INTR_AVAIL_SMASK
;
11890 if (op
& HFI1_RCVCTRL_INTRAVAIL_DIS
)
11891 rcvctrl
&= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK
;
11892 if (op
& HFI1_RCVCTRL_TAILUPD_ENB
&& rcd
->rcvhdrqtailaddr_dma
)
11893 rcvctrl
|= RCV_CTXT_CTRL_TAIL_UPD_SMASK
;
11894 if (op
& HFI1_RCVCTRL_TAILUPD_DIS
) {
11895 /* See comment on RcvCtxtCtrl.TailUpd above */
11896 if (!(op
& HFI1_RCVCTRL_CTXT_DIS
))
11897 rcvctrl
&= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK
;
11899 if (op
& HFI1_RCVCTRL_TIDFLOW_ENB
)
11900 rcvctrl
|= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK
;
11901 if (op
& HFI1_RCVCTRL_TIDFLOW_DIS
)
11902 rcvctrl
&= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK
;
11903 if (op
& HFI1_RCVCTRL_ONE_PKT_EGR_ENB
) {
11905 * In one-packet-per-eager mode, the size comes from
11906 * the RcvArray entry.
11908 rcvctrl
&= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK
;
11909 rcvctrl
|= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK
;
11911 if (op
& HFI1_RCVCTRL_ONE_PKT_EGR_DIS
)
11912 rcvctrl
&= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK
;
11913 if (op
& HFI1_RCVCTRL_NO_RHQ_DROP_ENB
)
11914 rcvctrl
|= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK
;
11915 if (op
& HFI1_RCVCTRL_NO_RHQ_DROP_DIS
)
11916 rcvctrl
&= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK
;
11917 if (op
& HFI1_RCVCTRL_NO_EGR_DROP_ENB
)
11918 rcvctrl
|= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK
;
11919 if (op
& HFI1_RCVCTRL_NO_EGR_DROP_DIS
)
11920 rcvctrl
&= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK
;
11921 rcd
->rcvctrl
= rcvctrl
;
11922 hfi1_cdbg(RCVCTRL
, "ctxt %d rcvctrl 0x%llx\n", ctxt
, rcvctrl
);
11923 write_kctxt_csr(dd
, ctxt
, RCV_CTXT_CTRL
, rcd
->rcvctrl
);
11925 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
11927 (rcvctrl
& RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK
)) {
11928 reg
= read_kctxt_csr(dd
, ctxt
, RCV_CTXT_STATUS
);
11930 dd_dev_info(dd
, "ctxt %d status %lld (blocked)\n",
11932 read_uctxt_csr(dd
, ctxt
, RCV_HDR_HEAD
);
11933 write_uctxt_csr(dd
, ctxt
, RCV_HDR_HEAD
, 0x10);
11934 write_uctxt_csr(dd
, ctxt
, RCV_HDR_HEAD
, 0x00);
11935 read_uctxt_csr(dd
, ctxt
, RCV_HDR_HEAD
);
11936 reg
= read_kctxt_csr(dd
, ctxt
, RCV_CTXT_STATUS
);
11937 dd_dev_info(dd
, "ctxt %d status %lld (%s blocked)\n",
11938 ctxt
, reg
, reg
== 0 ? "not" : "still");
11944 * The interrupt timeout and count must be set after
11945 * the context is enabled to take effect.
11947 /* set interrupt timeout */
11948 write_kctxt_csr(dd
, ctxt
, RCV_AVAIL_TIME_OUT
,
11949 (u64
)rcd
->rcvavail_timeout
<<
11950 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT
);
11952 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11953 reg
= (u64
)rcv_intr_count
<< RCV_HDR_HEAD_COUNTER_SHIFT
;
11954 write_uctxt_csr(dd
, ctxt
, RCV_HDR_HEAD
, reg
);
11957 if (op
& (HFI1_RCVCTRL_TAILUPD_DIS
| HFI1_RCVCTRL_CTXT_DIS
))
11959 * If the context has been disabled and the Tail Update has
11960 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11961 * so it doesn't contain an address that is invalid.
11963 write_kctxt_csr(dd
, ctxt
, RCV_HDR_TAIL_ADDR
,
11964 dd
->rcvhdrtail_dummy_dma
);
11967 u32
hfi1_read_cntrs(struct hfi1_devdata
*dd
, char **namep
, u64
**cntrp
)
11973 ret
= dd
->cntrnameslen
;
11974 *namep
= dd
->cntrnames
;
11976 const struct cntr_entry
*entry
;
11979 ret
= (dd
->ndevcntrs
) * sizeof(u64
);
11981 /* Get the start of the block of counters */
11982 *cntrp
= dd
->cntrs
;
11985 * Now go and fill in each counter in the block.
11987 for (i
= 0; i
< DEV_CNTR_LAST
; i
++) {
11988 entry
= &dev_cntrs
[i
];
11989 hfi1_cdbg(CNTR
, "reading %s", entry
->name
);
11990 if (entry
->flags
& CNTR_DISABLED
) {
11992 hfi1_cdbg(CNTR
, "\tDisabled\n");
11994 if (entry
->flags
& CNTR_VL
) {
11995 hfi1_cdbg(CNTR
, "\tPer VL\n");
11996 for (j
= 0; j
< C_VL_COUNT
; j
++) {
11997 val
= entry
->rw_cntr(entry
,
12003 "\t\tRead 0x%llx for %d\n",
12005 dd
->cntrs
[entry
->offset
+ j
] =
12008 } else if (entry
->flags
& CNTR_SDMA
) {
12010 "\t Per SDMA Engine\n");
12011 for (j
= 0; j
< dd
->chip_sdma_engines
;
12014 entry
->rw_cntr(entry
, dd
, j
,
12017 "\t\tRead 0x%llx for %d\n",
12019 dd
->cntrs
[entry
->offset
+ j
] =
12023 val
= entry
->rw_cntr(entry
, dd
,
12026 dd
->cntrs
[entry
->offset
] = val
;
12027 hfi1_cdbg(CNTR
, "\tRead 0x%llx", val
);
12036 * Used by sysfs to create files for hfi stats to read
12038 u32
hfi1_read_portcntrs(struct hfi1_pportdata
*ppd
, char **namep
, u64
**cntrp
)
12044 ret
= ppd
->dd
->portcntrnameslen
;
12045 *namep
= ppd
->dd
->portcntrnames
;
12047 const struct cntr_entry
*entry
;
12050 ret
= ppd
->dd
->nportcntrs
* sizeof(u64
);
12051 *cntrp
= ppd
->cntrs
;
12053 for (i
= 0; i
< PORT_CNTR_LAST
; i
++) {
12054 entry
= &port_cntrs
[i
];
12055 hfi1_cdbg(CNTR
, "reading %s", entry
->name
);
12056 if (entry
->flags
& CNTR_DISABLED
) {
12058 hfi1_cdbg(CNTR
, "\tDisabled\n");
12062 if (entry
->flags
& CNTR_VL
) {
12063 hfi1_cdbg(CNTR
, "\tPer VL");
12064 for (j
= 0; j
< C_VL_COUNT
; j
++) {
12065 val
= entry
->rw_cntr(entry
, ppd
, j
,
12070 "\t\tRead 0x%llx for %d",
12072 ppd
->cntrs
[entry
->offset
+ j
] = val
;
12075 val
= entry
->rw_cntr(entry
, ppd
,
12079 ppd
->cntrs
[entry
->offset
] = val
;
12080 hfi1_cdbg(CNTR
, "\tRead 0x%llx", val
);
12087 static void free_cntrs(struct hfi1_devdata
*dd
)
12089 struct hfi1_pportdata
*ppd
;
12092 if (dd
->synth_stats_timer
.data
)
12093 del_timer_sync(&dd
->synth_stats_timer
);
12094 dd
->synth_stats_timer
.data
= 0;
12095 ppd
= (struct hfi1_pportdata
*)(dd
+ 1);
12096 for (i
= 0; i
< dd
->num_pports
; i
++, ppd
++) {
12098 kfree(ppd
->scntrs
);
12099 free_percpu(ppd
->ibport_data
.rvp
.rc_acks
);
12100 free_percpu(ppd
->ibport_data
.rvp
.rc_qacks
);
12101 free_percpu(ppd
->ibport_data
.rvp
.rc_delayed_comp
);
12103 ppd
->scntrs
= NULL
;
12104 ppd
->ibport_data
.rvp
.rc_acks
= NULL
;
12105 ppd
->ibport_data
.rvp
.rc_qacks
= NULL
;
12106 ppd
->ibport_data
.rvp
.rc_delayed_comp
= NULL
;
12108 kfree(dd
->portcntrnames
);
12109 dd
->portcntrnames
= NULL
;
12114 kfree(dd
->cntrnames
);
12115 dd
->cntrnames
= NULL
;
12116 if (dd
->update_cntr_wq
) {
12117 destroy_workqueue(dd
->update_cntr_wq
);
12118 dd
->update_cntr_wq
= NULL
;
12122 static u64
read_dev_port_cntr(struct hfi1_devdata
*dd
, struct cntr_entry
*entry
,
12123 u64
*psval
, void *context
, int vl
)
12128 if (entry
->flags
& CNTR_DISABLED
) {
12129 dd_dev_err(dd
, "Counter %s not enabled", entry
->name
);
12133 hfi1_cdbg(CNTR
, "cntr: %s vl %d psval 0x%llx", entry
->name
, vl
, *psval
);
12135 val
= entry
->rw_cntr(entry
, context
, vl
, CNTR_MODE_R
, 0);
12137 /* If its a synthetic counter there is more work we need to do */
12138 if (entry
->flags
& CNTR_SYNTH
) {
12139 if (sval
== CNTR_MAX
) {
12140 /* No need to read already saturated */
12144 if (entry
->flags
& CNTR_32BIT
) {
12145 /* 32bit counters can wrap multiple times */
12146 u64 upper
= sval
>> 32;
12147 u64 lower
= (sval
<< 32) >> 32;
12149 if (lower
> val
) { /* hw wrapped */
12150 if (upper
== CNTR_32BIT_MAX
)
12156 if (val
!= CNTR_MAX
)
12157 val
= (upper
<< 32) | val
;
12160 /* If we rolled we are saturated */
12161 if ((val
< sval
) || (val
> CNTR_MAX
))
12168 hfi1_cdbg(CNTR
, "\tNew val=0x%llx", val
);
12173 static u64
write_dev_port_cntr(struct hfi1_devdata
*dd
,
12174 struct cntr_entry
*entry
,
12175 u64
*psval
, void *context
, int vl
, u64 data
)
12179 if (entry
->flags
& CNTR_DISABLED
) {
12180 dd_dev_err(dd
, "Counter %s not enabled", entry
->name
);
12184 hfi1_cdbg(CNTR
, "cntr: %s vl %d psval 0x%llx", entry
->name
, vl
, *psval
);
12186 if (entry
->flags
& CNTR_SYNTH
) {
12188 if (entry
->flags
& CNTR_32BIT
) {
12189 val
= entry
->rw_cntr(entry
, context
, vl
, CNTR_MODE_W
,
12190 (data
<< 32) >> 32);
12191 val
= data
; /* return the full 64bit value */
12193 val
= entry
->rw_cntr(entry
, context
, vl
, CNTR_MODE_W
,
12197 val
= entry
->rw_cntr(entry
, context
, vl
, CNTR_MODE_W
, data
);
12202 hfi1_cdbg(CNTR
, "\tNew val=0x%llx", val
);
12207 u64
read_dev_cntr(struct hfi1_devdata
*dd
, int index
, int vl
)
12209 struct cntr_entry
*entry
;
12212 entry
= &dev_cntrs
[index
];
12213 sval
= dd
->scntrs
+ entry
->offset
;
12215 if (vl
!= CNTR_INVALID_VL
)
12218 return read_dev_port_cntr(dd
, entry
, sval
, dd
, vl
);
12221 u64
write_dev_cntr(struct hfi1_devdata
*dd
, int index
, int vl
, u64 data
)
12223 struct cntr_entry
*entry
;
12226 entry
= &dev_cntrs
[index
];
12227 sval
= dd
->scntrs
+ entry
->offset
;
12229 if (vl
!= CNTR_INVALID_VL
)
12232 return write_dev_port_cntr(dd
, entry
, sval
, dd
, vl
, data
);
12235 u64
read_port_cntr(struct hfi1_pportdata
*ppd
, int index
, int vl
)
12237 struct cntr_entry
*entry
;
12240 entry
= &port_cntrs
[index
];
12241 sval
= ppd
->scntrs
+ entry
->offset
;
12243 if (vl
!= CNTR_INVALID_VL
)
12246 if ((index
>= C_RCV_HDR_OVF_FIRST
+ ppd
->dd
->num_rcv_contexts
) &&
12247 (index
<= C_RCV_HDR_OVF_LAST
)) {
12248 /* We do not want to bother for disabled contexts */
12252 return read_dev_port_cntr(ppd
->dd
, entry
, sval
, ppd
, vl
);
12255 u64
write_port_cntr(struct hfi1_pportdata
*ppd
, int index
, int vl
, u64 data
)
12257 struct cntr_entry
*entry
;
12260 entry
= &port_cntrs
[index
];
12261 sval
= ppd
->scntrs
+ entry
->offset
;
12263 if (vl
!= CNTR_INVALID_VL
)
12266 if ((index
>= C_RCV_HDR_OVF_FIRST
+ ppd
->dd
->num_rcv_contexts
) &&
12267 (index
<= C_RCV_HDR_OVF_LAST
)) {
12268 /* We do not want to bother for disabled contexts */
12272 return write_dev_port_cntr(ppd
->dd
, entry
, sval
, ppd
, vl
, data
);
12275 static void do_update_synth_timer(struct work_struct
*work
)
12282 struct hfi1_pportdata
*ppd
;
12283 struct cntr_entry
*entry
;
12284 struct hfi1_devdata
*dd
= container_of(work
, struct hfi1_devdata
,
12288 * Rather than keep beating on the CSRs pick a minimal set that we can
12289 * check to watch for potential roll over. We can do this by looking at
12290 * the number of flits sent/recv. If the total flits exceeds 32bits then
12291 * we have to iterate all the counters and update.
12293 entry
= &dev_cntrs
[C_DC_RCV_FLITS
];
12294 cur_rx
= entry
->rw_cntr(entry
, dd
, CNTR_INVALID_VL
, CNTR_MODE_R
, 0);
12296 entry
= &dev_cntrs
[C_DC_XMIT_FLITS
];
12297 cur_tx
= entry
->rw_cntr(entry
, dd
, CNTR_INVALID_VL
, CNTR_MODE_R
, 0);
12301 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12302 dd
->unit
, cur_tx
, cur_rx
, dd
->last_tx
, dd
->last_rx
);
12304 if ((cur_tx
< dd
->last_tx
) || (cur_rx
< dd
->last_rx
)) {
12306 * May not be strictly necessary to update but it won't hurt and
12307 * simplifies the logic here.
12310 hfi1_cdbg(CNTR
, "[%d] Tripwire counter rolled, updating",
12313 total_flits
= (cur_tx
- dd
->last_tx
) + (cur_rx
- dd
->last_rx
);
12315 "[%d] total flits 0x%llx limit 0x%llx\n", dd
->unit
,
12316 total_flits
, (u64
)CNTR_32BIT_MAX
);
12317 if (total_flits
>= CNTR_32BIT_MAX
) {
12318 hfi1_cdbg(CNTR
, "[%d] 32bit limit hit, updating",
12325 hfi1_cdbg(CNTR
, "[%d] Updating dd and ppd counters", dd
->unit
);
12326 for (i
= 0; i
< DEV_CNTR_LAST
; i
++) {
12327 entry
= &dev_cntrs
[i
];
12328 if (entry
->flags
& CNTR_VL
) {
12329 for (vl
= 0; vl
< C_VL_COUNT
; vl
++)
12330 read_dev_cntr(dd
, i
, vl
);
12332 read_dev_cntr(dd
, i
, CNTR_INVALID_VL
);
12335 ppd
= (struct hfi1_pportdata
*)(dd
+ 1);
12336 for (i
= 0; i
< dd
->num_pports
; i
++, ppd
++) {
12337 for (j
= 0; j
< PORT_CNTR_LAST
; j
++) {
12338 entry
= &port_cntrs
[j
];
12339 if (entry
->flags
& CNTR_VL
) {
12340 for (vl
= 0; vl
< C_VL_COUNT
; vl
++)
12341 read_port_cntr(ppd
, j
, vl
);
12343 read_port_cntr(ppd
, j
, CNTR_INVALID_VL
);
12349 * We want the value in the register. The goal is to keep track
12350 * of the number of "ticks" not the counter value. In other
12351 * words if the register rolls we want to notice it and go ahead
12352 * and force an update.
12354 entry
= &dev_cntrs
[C_DC_XMIT_FLITS
];
12355 dd
->last_tx
= entry
->rw_cntr(entry
, dd
, CNTR_INVALID_VL
,
12358 entry
= &dev_cntrs
[C_DC_RCV_FLITS
];
12359 dd
->last_rx
= entry
->rw_cntr(entry
, dd
, CNTR_INVALID_VL
,
12362 hfi1_cdbg(CNTR
, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12363 dd
->unit
, dd
->last_tx
, dd
->last_rx
);
12366 hfi1_cdbg(CNTR
, "[%d] No update necessary", dd
->unit
);
12370 static void update_synth_timer(unsigned long opaque
)
12372 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)opaque
;
12374 queue_work(dd
->update_cntr_wq
, &dd
->update_cntr_work
);
12375 mod_timer(&dd
->synth_stats_timer
, jiffies
+ HZ
* SYNTH_CNT_TIME
);
12378 #define C_MAX_NAME 16 /* 15 chars + one for /0 */
12379 static int init_cntrs(struct hfi1_devdata
*dd
)
12381 int i
, rcv_ctxts
, j
;
12384 char name
[C_MAX_NAME
];
12385 struct hfi1_pportdata
*ppd
;
12386 const char *bit_type_32
= ",32";
12387 const int bit_type_32_sz
= strlen(bit_type_32
);
12389 /* set up the stats timer; the add_timer is done at the end */
12390 setup_timer(&dd
->synth_stats_timer
, update_synth_timer
,
12391 (unsigned long)dd
);
12393 /***********************/
12394 /* per device counters */
12395 /***********************/
12397 /* size names and determine how many we have*/
12401 for (i
= 0; i
< DEV_CNTR_LAST
; i
++) {
12402 if (dev_cntrs
[i
].flags
& CNTR_DISABLED
) {
12403 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs
[i
].name
);
12407 if (dev_cntrs
[i
].flags
& CNTR_VL
) {
12408 dev_cntrs
[i
].offset
= dd
->ndevcntrs
;
12409 for (j
= 0; j
< C_VL_COUNT
; j
++) {
12410 snprintf(name
, C_MAX_NAME
, "%s%d",
12411 dev_cntrs
[i
].name
, vl_from_idx(j
));
12412 sz
+= strlen(name
);
12413 /* Add ",32" for 32-bit counters */
12414 if (dev_cntrs
[i
].flags
& CNTR_32BIT
)
12415 sz
+= bit_type_32_sz
;
12419 } else if (dev_cntrs
[i
].flags
& CNTR_SDMA
) {
12420 dev_cntrs
[i
].offset
= dd
->ndevcntrs
;
12421 for (j
= 0; j
< dd
->chip_sdma_engines
; j
++) {
12422 snprintf(name
, C_MAX_NAME
, "%s%d",
12423 dev_cntrs
[i
].name
, j
);
12424 sz
+= strlen(name
);
12425 /* Add ",32" for 32-bit counters */
12426 if (dev_cntrs
[i
].flags
& CNTR_32BIT
)
12427 sz
+= bit_type_32_sz
;
12432 /* +1 for newline. */
12433 sz
+= strlen(dev_cntrs
[i
].name
) + 1;
12434 /* Add ",32" for 32-bit counters */
12435 if (dev_cntrs
[i
].flags
& CNTR_32BIT
)
12436 sz
+= bit_type_32_sz
;
12437 dev_cntrs
[i
].offset
= dd
->ndevcntrs
;
12442 /* allocate space for the counter values */
12443 dd
->cntrs
= kcalloc(dd
->ndevcntrs
, sizeof(u64
), GFP_KERNEL
);
12447 dd
->scntrs
= kcalloc(dd
->ndevcntrs
, sizeof(u64
), GFP_KERNEL
);
12451 /* allocate space for the counter names */
12452 dd
->cntrnameslen
= sz
;
12453 dd
->cntrnames
= kmalloc(sz
, GFP_KERNEL
);
12454 if (!dd
->cntrnames
)
12457 /* fill in the names */
12458 for (p
= dd
->cntrnames
, i
= 0; i
< DEV_CNTR_LAST
; i
++) {
12459 if (dev_cntrs
[i
].flags
& CNTR_DISABLED
) {
12461 } else if (dev_cntrs
[i
].flags
& CNTR_VL
) {
12462 for (j
= 0; j
< C_VL_COUNT
; j
++) {
12463 snprintf(name
, C_MAX_NAME
, "%s%d",
12466 memcpy(p
, name
, strlen(name
));
12469 /* Counter is 32 bits */
12470 if (dev_cntrs
[i
].flags
& CNTR_32BIT
) {
12471 memcpy(p
, bit_type_32
, bit_type_32_sz
);
12472 p
+= bit_type_32_sz
;
12477 } else if (dev_cntrs
[i
].flags
& CNTR_SDMA
) {
12478 for (j
= 0; j
< dd
->chip_sdma_engines
; j
++) {
12479 snprintf(name
, C_MAX_NAME
, "%s%d",
12480 dev_cntrs
[i
].name
, j
);
12481 memcpy(p
, name
, strlen(name
));
12484 /* Counter is 32 bits */
12485 if (dev_cntrs
[i
].flags
& CNTR_32BIT
) {
12486 memcpy(p
, bit_type_32
, bit_type_32_sz
);
12487 p
+= bit_type_32_sz
;
12493 memcpy(p
, dev_cntrs
[i
].name
, strlen(dev_cntrs
[i
].name
));
12494 p
+= strlen(dev_cntrs
[i
].name
);
12496 /* Counter is 32 bits */
12497 if (dev_cntrs
[i
].flags
& CNTR_32BIT
) {
12498 memcpy(p
, bit_type_32
, bit_type_32_sz
);
12499 p
+= bit_type_32_sz
;
12506 /*********************/
12507 /* per port counters */
12508 /*********************/
12511 * Go through the counters for the overflows and disable the ones we
12512 * don't need. This varies based on platform so we need to do it
12513 * dynamically here.
12515 rcv_ctxts
= dd
->num_rcv_contexts
;
12516 for (i
= C_RCV_HDR_OVF_FIRST
+ rcv_ctxts
;
12517 i
<= C_RCV_HDR_OVF_LAST
; i
++) {
12518 port_cntrs
[i
].flags
|= CNTR_DISABLED
;
12521 /* size port counter names and determine how many we have*/
12523 dd
->nportcntrs
= 0;
12524 for (i
= 0; i
< PORT_CNTR_LAST
; i
++) {
12525 if (port_cntrs
[i
].flags
& CNTR_DISABLED
) {
12526 hfi1_dbg_early("\tSkipping %s\n", port_cntrs
[i
].name
);
12530 if (port_cntrs
[i
].flags
& CNTR_VL
) {
12531 port_cntrs
[i
].offset
= dd
->nportcntrs
;
12532 for (j
= 0; j
< C_VL_COUNT
; j
++) {
12533 snprintf(name
, C_MAX_NAME
, "%s%d",
12534 port_cntrs
[i
].name
, vl_from_idx(j
));
12535 sz
+= strlen(name
);
12536 /* Add ",32" for 32-bit counters */
12537 if (port_cntrs
[i
].flags
& CNTR_32BIT
)
12538 sz
+= bit_type_32_sz
;
12543 /* +1 for newline */
12544 sz
+= strlen(port_cntrs
[i
].name
) + 1;
12545 /* Add ",32" for 32-bit counters */
12546 if (port_cntrs
[i
].flags
& CNTR_32BIT
)
12547 sz
+= bit_type_32_sz
;
12548 port_cntrs
[i
].offset
= dd
->nportcntrs
;
12553 /* allocate space for the counter names */
12554 dd
->portcntrnameslen
= sz
;
12555 dd
->portcntrnames
= kmalloc(sz
, GFP_KERNEL
);
12556 if (!dd
->portcntrnames
)
12559 /* fill in port cntr names */
12560 for (p
= dd
->portcntrnames
, i
= 0; i
< PORT_CNTR_LAST
; i
++) {
12561 if (port_cntrs
[i
].flags
& CNTR_DISABLED
)
12564 if (port_cntrs
[i
].flags
& CNTR_VL
) {
12565 for (j
= 0; j
< C_VL_COUNT
; j
++) {
12566 snprintf(name
, C_MAX_NAME
, "%s%d",
12567 port_cntrs
[i
].name
, vl_from_idx(j
));
12568 memcpy(p
, name
, strlen(name
));
12571 /* Counter is 32 bits */
12572 if (port_cntrs
[i
].flags
& CNTR_32BIT
) {
12573 memcpy(p
, bit_type_32
, bit_type_32_sz
);
12574 p
+= bit_type_32_sz
;
12580 memcpy(p
, port_cntrs
[i
].name
,
12581 strlen(port_cntrs
[i
].name
));
12582 p
+= strlen(port_cntrs
[i
].name
);
12584 /* Counter is 32 bits */
12585 if (port_cntrs
[i
].flags
& CNTR_32BIT
) {
12586 memcpy(p
, bit_type_32
, bit_type_32_sz
);
12587 p
+= bit_type_32_sz
;
12594 /* allocate per port storage for counter values */
12595 ppd
= (struct hfi1_pportdata
*)(dd
+ 1);
12596 for (i
= 0; i
< dd
->num_pports
; i
++, ppd
++) {
12597 ppd
->cntrs
= kcalloc(dd
->nportcntrs
, sizeof(u64
), GFP_KERNEL
);
12601 ppd
->scntrs
= kcalloc(dd
->nportcntrs
, sizeof(u64
), GFP_KERNEL
);
12606 /* CPU counters need to be allocated and zeroed */
12607 if (init_cpu_counters(dd
))
12610 dd
->update_cntr_wq
= alloc_ordered_workqueue("hfi1_update_cntr_%d",
12611 WQ_MEM_RECLAIM
, dd
->unit
);
12612 if (!dd
->update_cntr_wq
)
12615 INIT_WORK(&dd
->update_cntr_work
, do_update_synth_timer
);
12617 mod_timer(&dd
->synth_stats_timer
, jiffies
+ HZ
* SYNTH_CNT_TIME
);
12624 static u32
chip_to_opa_lstate(struct hfi1_devdata
*dd
, u32 chip_lstate
)
12626 switch (chip_lstate
) {
12629 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12633 return IB_PORT_DOWN
;
12635 return IB_PORT_INIT
;
12637 return IB_PORT_ARMED
;
12638 case LSTATE_ACTIVE
:
12639 return IB_PORT_ACTIVE
;
12643 u32
chip_to_opa_pstate(struct hfi1_devdata
*dd
, u32 chip_pstate
)
12645 /* look at the HFI meta-states only */
12646 switch (chip_pstate
& 0xf0) {
12648 dd_dev_err(dd
, "Unexpected chip physical state of 0x%x\n",
12652 return IB_PORTPHYSSTATE_DISABLED
;
12654 return OPA_PORTPHYSSTATE_OFFLINE
;
12656 return IB_PORTPHYSSTATE_POLLING
;
12657 case PLS_CONFIGPHY
:
12658 return IB_PORTPHYSSTATE_TRAINING
;
12660 return IB_PORTPHYSSTATE_LINKUP
;
12662 return IB_PORTPHYSSTATE_PHY_TEST
;
12666 /* return the OPA port logical state name */
12667 const char *opa_lstate_name(u32 lstate
)
12669 static const char * const port_logical_names
[] = {
12675 "PORT_ACTIVE_DEFER",
12677 if (lstate
< ARRAY_SIZE(port_logical_names
))
12678 return port_logical_names
[lstate
];
12682 /* return the OPA port physical state name */
12683 const char *opa_pstate_name(u32 pstate
)
12685 static const char * const port_physical_names
[] = {
12692 "PHYS_LINK_ERR_RECOVER",
12699 if (pstate
< ARRAY_SIZE(port_physical_names
))
12700 return port_physical_names
[pstate
];
12704 static void update_statusp(struct hfi1_pportdata
*ppd
, u32 state
)
12707 * Set port status flags in the page mapped into userspace
12708 * memory. Do it here to ensure a reliable state - this is
12709 * the only function called by all state handling code.
12710 * Always set the flags due to the fact that the cache value
12711 * might have been changed explicitly outside of this
12714 if (ppd
->statusp
) {
12718 *ppd
->statusp
&= ~(HFI1_STATUS_IB_CONF
|
12719 HFI1_STATUS_IB_READY
);
12721 case IB_PORT_ARMED
:
12722 *ppd
->statusp
|= HFI1_STATUS_IB_CONF
;
12724 case IB_PORT_ACTIVE
:
12725 *ppd
->statusp
|= HFI1_STATUS_IB_READY
;
12732 * wait_logical_linkstate - wait for an IB link state change to occur
12733 * @ppd: port device
12734 * @state: the state to wait for
12735 * @msecs: the number of milliseconds to wait
12737 * Wait up to msecs milliseconds for IB link state change to occur.
12738 * For now, take the easy polling route.
12739 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12741 static int wait_logical_linkstate(struct hfi1_pportdata
*ppd
, u32 state
,
12744 unsigned long timeout
;
12747 timeout
= jiffies
+ msecs_to_jiffies(msecs
);
12749 new_state
= chip_to_opa_lstate(ppd
->dd
,
12750 read_logical_state(ppd
->dd
));
12751 if (new_state
== state
)
12753 if (time_after(jiffies
, timeout
)) {
12754 dd_dev_err(ppd
->dd
,
12755 "timeout waiting for link state 0x%x\n",
12762 update_statusp(ppd
, state
);
12763 dd_dev_info(ppd
->dd
,
12764 "logical state changed to %s (0x%x)\n",
12765 opa_lstate_name(state
),
12770 static void log_state_transition(struct hfi1_pportdata
*ppd
, u32 state
)
12772 u32 ib_pstate
= chip_to_opa_pstate(ppd
->dd
, state
);
12774 dd_dev_info(ppd
->dd
,
12775 "physical state changed to %s (0x%x), phy 0x%x\n",
12776 opa_pstate_name(ib_pstate
), ib_pstate
, state
);
12780 * Read the physical hardware link state and check if it matches host
12781 * drivers anticipated state.
12783 static void log_physical_state(struct hfi1_pportdata
*ppd
, u32 state
)
12785 u32 read_state
= read_physical_state(ppd
->dd
);
12787 if (read_state
== state
) {
12788 log_state_transition(ppd
, state
);
12790 dd_dev_err(ppd
->dd
,
12791 "anticipated phy link state 0x%x, read 0x%x\n",
12792 state
, read_state
);
12797 * wait_physical_linkstate - wait for an physical link state change to occur
12798 * @ppd: port device
12799 * @state: the state to wait for
12800 * @msecs: the number of milliseconds to wait
12802 * Wait up to msecs milliseconds for physical link state change to occur.
12803 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12805 static int wait_physical_linkstate(struct hfi1_pportdata
*ppd
, u32 state
,
12809 unsigned long timeout
;
12811 timeout
= jiffies
+ msecs_to_jiffies(msecs
);
12813 read_state
= read_physical_state(ppd
->dd
);
12814 if (read_state
== state
)
12816 if (time_after(jiffies
, timeout
)) {
12817 dd_dev_err(ppd
->dd
,
12818 "timeout waiting for phy link state 0x%x\n",
12822 usleep_range(1950, 2050); /* sleep 2ms-ish */
12825 log_state_transition(ppd
, state
);
12830 * wait_phys_link_offline_quiet_substates - wait for any offline substate
12831 * @ppd: port device
12832 * @msecs: the number of milliseconds to wait
12834 * Wait up to msecs milliseconds for any offline physical link
12835 * state change to occur.
12836 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12838 static int wait_phys_link_offline_substates(struct hfi1_pportdata
*ppd
,
12842 unsigned long timeout
;
12844 timeout
= jiffies
+ msecs_to_jiffies(msecs
);
12846 read_state
= read_physical_state(ppd
->dd
);
12847 if ((read_state
& 0xF0) == PLS_OFFLINE
)
12849 if (time_after(jiffies
, timeout
)) {
12850 dd_dev_err(ppd
->dd
,
12851 "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
12852 read_state
, msecs
);
12855 usleep_range(1950, 2050); /* sleep 2ms-ish */
12858 log_state_transition(ppd
, read_state
);
12862 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12863 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12865 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
12866 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12868 void hfi1_init_ctxt(struct send_context
*sc
)
12871 struct hfi1_devdata
*dd
= sc
->dd
;
12873 u8 set
= (sc
->type
== SC_USER
?
12874 HFI1_CAP_IS_USET(STATIC_RATE_CTRL
) :
12875 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL
));
12876 reg
= read_kctxt_csr(dd
, sc
->hw_context
,
12877 SEND_CTXT_CHECK_ENABLE
);
12879 CLEAR_STATIC_RATE_CONTROL_SMASK(reg
);
12881 SET_STATIC_RATE_CONTROL_SMASK(reg
);
12882 write_kctxt_csr(dd
, sc
->hw_context
,
12883 SEND_CTXT_CHECK_ENABLE
, reg
);
12887 int hfi1_tempsense_rd(struct hfi1_devdata
*dd
, struct hfi1_temp
*temp
)
12892 if (dd
->icode
!= ICODE_RTL_SILICON
) {
12893 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL
))
12894 dd_dev_info(dd
, "%s: tempsense not supported by HW\n",
12898 reg
= read_csr(dd
, ASIC_STS_THERM
);
12899 temp
->curr
= ((reg
>> ASIC_STS_THERM_CURR_TEMP_SHIFT
) &
12900 ASIC_STS_THERM_CURR_TEMP_MASK
);
12901 temp
->lo_lim
= ((reg
>> ASIC_STS_THERM_LO_TEMP_SHIFT
) &
12902 ASIC_STS_THERM_LO_TEMP_MASK
);
12903 temp
->hi_lim
= ((reg
>> ASIC_STS_THERM_HI_TEMP_SHIFT
) &
12904 ASIC_STS_THERM_HI_TEMP_MASK
);
12905 temp
->crit_lim
= ((reg
>> ASIC_STS_THERM_CRIT_TEMP_SHIFT
) &
12906 ASIC_STS_THERM_CRIT_TEMP_MASK
);
12907 /* triggers is a 3-bit value - 1 bit per trigger. */
12908 temp
->triggers
= (u8
)((reg
>> ASIC_STS_THERM_LOW_SHIFT
) & 0x7);
12913 /* ========================================================================= */
12916 * Enable/disable chip from delivering interrupts.
12918 void set_intr_state(struct hfi1_devdata
*dd
, u32 enable
)
12923 * In HFI, the mask needs to be 1 to allow interrupts.
12926 /* enable all interrupts */
12927 for (i
= 0; i
< CCE_NUM_INT_CSRS
; i
++)
12928 write_csr(dd
, CCE_INT_MASK
+ (8 * i
), ~(u64
)0);
12932 for (i
= 0; i
< CCE_NUM_INT_CSRS
; i
++)
12933 write_csr(dd
, CCE_INT_MASK
+ (8 * i
), 0ull);
12938 * Clear all interrupt sources on the chip.
12940 static void clear_all_interrupts(struct hfi1_devdata
*dd
)
12944 for (i
= 0; i
< CCE_NUM_INT_CSRS
; i
++)
12945 write_csr(dd
, CCE_INT_CLEAR
+ (8 * i
), ~(u64
)0);
12947 write_csr(dd
, CCE_ERR_CLEAR
, ~(u64
)0);
12948 write_csr(dd
, MISC_ERR_CLEAR
, ~(u64
)0);
12949 write_csr(dd
, RCV_ERR_CLEAR
, ~(u64
)0);
12950 write_csr(dd
, SEND_ERR_CLEAR
, ~(u64
)0);
12951 write_csr(dd
, SEND_PIO_ERR_CLEAR
, ~(u64
)0);
12952 write_csr(dd
, SEND_DMA_ERR_CLEAR
, ~(u64
)0);
12953 write_csr(dd
, SEND_EGRESS_ERR_CLEAR
, ~(u64
)0);
12954 for (i
= 0; i
< dd
->chip_send_contexts
; i
++)
12955 write_kctxt_csr(dd
, i
, SEND_CTXT_ERR_CLEAR
, ~(u64
)0);
12956 for (i
= 0; i
< dd
->chip_sdma_engines
; i
++)
12957 write_kctxt_csr(dd
, i
, SEND_DMA_ENG_ERR_CLEAR
, ~(u64
)0);
12959 write_csr(dd
, DCC_ERR_FLG_CLR
, ~(u64
)0);
12960 write_csr(dd
, DC_LCB_ERR_CLR
, ~(u64
)0);
12961 write_csr(dd
, DC_DC8051_ERR_CLR
, ~(u64
)0);
12964 /* Move to pcie.c? */
12965 static void disable_intx(struct pci_dev
*pdev
)
12970 static void clean_up_interrupts(struct hfi1_devdata
*dd
)
12974 /* remove irqs - must happen before disabling/turning off */
12975 if (dd
->num_msix_entries
) {
12977 struct hfi1_msix_entry
*me
= dd
->msix_entries
;
12979 for (i
= 0; i
< dd
->num_msix_entries
; i
++, me
++) {
12980 if (!me
->arg
) /* => no irq, no affinity */
12982 hfi1_put_irq_affinity(dd
, me
);
12983 free_irq(me
->irq
, me
->arg
);
12986 /* clean structures */
12987 kfree(dd
->msix_entries
);
12988 dd
->msix_entries
= NULL
;
12989 dd
->num_msix_entries
= 0;
12992 if (dd
->requested_intx_irq
) {
12993 free_irq(dd
->pcidev
->irq
, dd
);
12994 dd
->requested_intx_irq
= 0;
12996 disable_intx(dd
->pcidev
);
12999 pci_free_irq_vectors(dd
->pcidev
);
13003 * Remap the interrupt source from the general handler to the given MSI-X
13006 static void remap_intr(struct hfi1_devdata
*dd
, int isrc
, int msix_intr
)
13011 /* clear from the handled mask of the general interrupt */
13014 if (likely(m
< CCE_NUM_INT_CSRS
)) {
13015 dd
->gi_mask
[m
] &= ~((u64
)1 << n
);
13017 dd_dev_err(dd
, "remap interrupt err\n");
13021 /* direct the chip source to the given MSI-X interrupt */
13024 reg
= read_csr(dd
, CCE_INT_MAP
+ (8 * m
));
13025 reg
&= ~((u64
)0xff << (8 * n
));
13026 reg
|= ((u64
)msix_intr
& 0xff) << (8 * n
);
13027 write_csr(dd
, CCE_INT_MAP
+ (8 * m
), reg
);
13030 static void remap_sdma_interrupts(struct hfi1_devdata
*dd
,
13031 int engine
, int msix_intr
)
13034 * SDMA engine interrupt sources grouped by type, rather than
13035 * engine. Per-engine interrupts are as follows:
13040 remap_intr(dd
, IS_SDMA_START
+ 0 * TXE_NUM_SDMA_ENGINES
+ engine
,
13042 remap_intr(dd
, IS_SDMA_START
+ 1 * TXE_NUM_SDMA_ENGINES
+ engine
,
13044 remap_intr(dd
, IS_SDMA_START
+ 2 * TXE_NUM_SDMA_ENGINES
+ engine
,
13048 static int request_intx_irq(struct hfi1_devdata
*dd
)
13052 snprintf(dd
->intx_name
, sizeof(dd
->intx_name
), DRIVER_NAME
"_%d",
13054 ret
= request_irq(dd
->pcidev
->irq
, general_interrupt
,
13055 IRQF_SHARED
, dd
->intx_name
, dd
);
13057 dd_dev_err(dd
, "unable to request INTx interrupt, err %d\n",
13060 dd
->requested_intx_irq
= 1;
13064 static int request_msix_irqs(struct hfi1_devdata
*dd
)
13066 int first_general
, last_general
;
13067 int first_sdma
, last_sdma
;
13068 int first_rx
, last_rx
;
13071 /* calculate the ranges we are going to use */
13073 last_general
= first_general
+ 1;
13074 first_sdma
= last_general
;
13075 last_sdma
= first_sdma
+ dd
->num_sdma
;
13076 first_rx
= last_sdma
;
13077 last_rx
= first_rx
+ dd
->n_krcv_queues
+ HFI1_NUM_VNIC_CTXT
;
13079 /* VNIC MSIx interrupts get mapped when VNIC contexts are created */
13080 dd
->first_dyn_msix_idx
= first_rx
+ dd
->n_krcv_queues
;
13083 * Sanity check - the code expects all SDMA chip source
13084 * interrupts to be in the same CSR, starting at bit 0. Verify
13085 * that this is true by checking the bit location of the start.
13087 BUILD_BUG_ON(IS_SDMA_START
% 64);
13089 for (i
= 0; i
< dd
->num_msix_entries
; i
++) {
13090 struct hfi1_msix_entry
*me
= &dd
->msix_entries
[i
];
13091 const char *err_info
;
13092 irq_handler_t handler
;
13093 irq_handler_t thread
= NULL
;
13096 struct hfi1_ctxtdata
*rcd
= NULL
;
13097 struct sdma_engine
*sde
= NULL
;
13099 /* obtain the arguments to request_irq */
13100 if (first_general
<= i
&& i
< last_general
) {
13101 idx
= i
- first_general
;
13102 handler
= general_interrupt
;
13104 snprintf(me
->name
, sizeof(me
->name
),
13105 DRIVER_NAME
"_%d", dd
->unit
);
13106 err_info
= "general";
13107 me
->type
= IRQ_GENERAL
;
13108 } else if (first_sdma
<= i
&& i
< last_sdma
) {
13109 idx
= i
- first_sdma
;
13110 sde
= &dd
->per_sdma
[idx
];
13111 handler
= sdma_interrupt
;
13113 snprintf(me
->name
, sizeof(me
->name
),
13114 DRIVER_NAME
"_%d sdma%d", dd
->unit
, idx
);
13116 remap_sdma_interrupts(dd
, idx
, i
);
13117 me
->type
= IRQ_SDMA
;
13118 } else if (first_rx
<= i
&& i
< last_rx
) {
13119 idx
= i
- first_rx
;
13120 rcd
= hfi1_rcd_get_by_index(dd
, idx
);
13123 * Set the interrupt register and mask for this
13124 * context's interrupt.
13126 rcd
->ireg
= (IS_RCVAVAIL_START
+ idx
) / 64;
13127 rcd
->imask
= ((u64
)1) <<
13128 ((IS_RCVAVAIL_START
+ idx
) % 64);
13129 handler
= receive_context_interrupt
;
13130 thread
= receive_context_thread
;
13132 snprintf(me
->name
, sizeof(me
->name
),
13133 DRIVER_NAME
"_%d kctxt%d",
13135 err_info
= "receive context";
13136 remap_intr(dd
, IS_RCVAVAIL_START
+ idx
, i
);
13137 me
->type
= IRQ_RCVCTXT
;
13138 rcd
->msix_intr
= i
;
13142 /* not in our expected range - complain, then
13146 "Unexpected extra MSI-X interrupt %d\n", i
);
13149 /* no argument, no interrupt */
13152 /* make sure the name is terminated */
13153 me
->name
[sizeof(me
->name
) - 1] = 0;
13154 me
->irq
= pci_irq_vector(dd
->pcidev
, i
);
13156 * On err return me->irq. Don't need to clear this
13157 * because 'arg' has not been set, and cleanup will
13158 * do the right thing.
13163 ret
= request_threaded_irq(me
->irq
, handler
, thread
, 0,
13167 "unable to allocate %s interrupt, irq %d, index %d, err %d\n",
13168 err_info
, me
->irq
, idx
, ret
);
13172 * assign arg after request_irq call, so it will be
13177 ret
= hfi1_get_irq_affinity(dd
, me
);
13179 dd_dev_err(dd
, "unable to pin IRQ %d\n", ret
);
13185 void hfi1_vnic_synchronize_irq(struct hfi1_devdata
*dd
)
13189 if (!dd
->num_msix_entries
) {
13190 synchronize_irq(dd
->pcidev
->irq
);
13194 for (i
= 0; i
< dd
->vnic
.num_ctxt
; i
++) {
13195 struct hfi1_ctxtdata
*rcd
= dd
->vnic
.ctxt
[i
];
13196 struct hfi1_msix_entry
*me
= &dd
->msix_entries
[rcd
->msix_intr
];
13198 synchronize_irq(me
->irq
);
13202 void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata
*rcd
)
13204 struct hfi1_devdata
*dd
= rcd
->dd
;
13205 struct hfi1_msix_entry
*me
= &dd
->msix_entries
[rcd
->msix_intr
];
13207 if (!me
->arg
) /* => no irq, no affinity */
13210 hfi1_put_irq_affinity(dd
, me
);
13211 free_irq(me
->irq
, me
->arg
);
13216 void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata
*rcd
)
13218 struct hfi1_devdata
*dd
= rcd
->dd
;
13219 struct hfi1_msix_entry
*me
;
13220 int idx
= rcd
->ctxt
;
13224 rcd
->msix_intr
= dd
->vnic
.msix_idx
++;
13225 me
= &dd
->msix_entries
[rcd
->msix_intr
];
13228 * Set the interrupt register and mask for this
13229 * context's interrupt.
13231 rcd
->ireg
= (IS_RCVAVAIL_START
+ idx
) / 64;
13232 rcd
->imask
= ((u64
)1) <<
13233 ((IS_RCVAVAIL_START
+ idx
) % 64);
13235 snprintf(me
->name
, sizeof(me
->name
),
13236 DRIVER_NAME
"_%d kctxt%d", dd
->unit
, idx
);
13237 me
->name
[sizeof(me
->name
) - 1] = 0;
13238 me
->type
= IRQ_RCVCTXT
;
13239 me
->irq
= pci_irq_vector(dd
->pcidev
, rcd
->msix_intr
);
13241 dd_dev_err(dd
, "vnic irq vector request (idx %d) fail %d\n",
13245 remap_intr(dd
, IS_RCVAVAIL_START
+ idx
, rcd
->msix_intr
);
13247 ret
= request_threaded_irq(me
->irq
, receive_context_interrupt
,
13248 receive_context_thread
, 0, me
->name
, arg
);
13250 dd_dev_err(dd
, "vnic irq request (irq %d, idx %d) fail %d\n",
13251 me
->irq
, idx
, ret
);
13255 * assign arg after request_irq call, so it will be
13260 ret
= hfi1_get_irq_affinity(dd
, me
);
13263 "unable to pin IRQ %d\n", ret
);
13264 free_irq(me
->irq
, me
->arg
);
13269 * Set the general handler to accept all interrupts, remap all
13270 * chip interrupts back to MSI-X 0.
13272 static void reset_interrupts(struct hfi1_devdata
*dd
)
13276 /* all interrupts handled by the general handler */
13277 for (i
= 0; i
< CCE_NUM_INT_CSRS
; i
++)
13278 dd
->gi_mask
[i
] = ~(u64
)0;
13280 /* all chip interrupts map to MSI-X 0 */
13281 for (i
= 0; i
< CCE_NUM_INT_MAP_CSRS
; i
++)
13282 write_csr(dd
, CCE_INT_MAP
+ (8 * i
), 0);
13285 static int set_up_interrupts(struct hfi1_devdata
*dd
)
13289 int single_interrupt
= 0; /* we expect to have all the interrupts */
13293 * 1 general, "slow path" interrupt (includes the SDMA engines
13294 * slow source, SDMACleanupDone)
13295 * N interrupts - one per used SDMA engine
13296 * M interrupt - one per kernel receive context
13298 total
= 1 + dd
->num_sdma
+ dd
->n_krcv_queues
+ HFI1_NUM_VNIC_CTXT
;
13300 /* ask for MSI-X interrupts */
13301 request
= request_msix(dd
, total
);
13305 } else if (request
== 0) {
13307 /* dd->num_msix_entries already zero */
13308 single_interrupt
= 1;
13309 dd_dev_err(dd
, "MSI-X failed, using INTx interrupts\n");
13310 } else if (request
< total
) {
13311 /* using MSI-X, with reduced interrupts */
13312 dd_dev_err(dd
, "reduced interrupt found, wanted %u, got %u\n",
13317 dd
->msix_entries
= kcalloc(total
, sizeof(*dd
->msix_entries
),
13319 if (!dd
->msix_entries
) {
13324 dd
->num_msix_entries
= total
;
13325 dd_dev_info(dd
, "%u MSI-X interrupts allocated\n", total
);
13328 /* mask all interrupts */
13329 set_intr_state(dd
, 0);
13330 /* clear all pending interrupts */
13331 clear_all_interrupts(dd
);
13333 /* reset general handler mask, chip MSI-X mappings */
13334 reset_interrupts(dd
);
13336 if (single_interrupt
)
13337 ret
= request_intx_irq(dd
);
13339 ret
= request_msix_irqs(dd
);
13346 clean_up_interrupts(dd
);
13351 * Set up context values in dd. Sets:
13353 * num_rcv_contexts - number of contexts being used
13354 * n_krcv_queues - number of kernel contexts
13355 * first_dyn_alloc_ctxt - first dynamically allocated context
13356 * in array of contexts
13357 * freectxts - number of free user contexts
13358 * num_send_contexts - number of PIO send contexts being used
13360 static int set_up_context_variables(struct hfi1_devdata
*dd
)
13362 unsigned long num_kernel_contexts
;
13363 int total_contexts
;
13367 int user_rmt_reduced
;
13370 * Kernel receive contexts:
13371 * - Context 0 - control context (VL15/multicast/error)
13372 * - Context 1 - first kernel context
13373 * - Context 2 - second kernel context
13378 * n_krcvqs is the sum of module parameter kernel receive
13379 * contexts, krcvqs[]. It does not include the control
13380 * context, so add that.
13382 num_kernel_contexts
= n_krcvqs
+ 1;
13384 num_kernel_contexts
= DEFAULT_KRCVQS
+ 1;
13386 * Every kernel receive context needs an ACK send context.
13387 * one send context is allocated for each VL{0-7} and VL15
13389 if (num_kernel_contexts
> (dd
->chip_send_contexts
- num_vls
- 1)) {
13391 "Reducing # kernel rcv contexts to: %d, from %lu\n",
13392 (int)(dd
->chip_send_contexts
- num_vls
- 1),
13393 num_kernel_contexts
);
13394 num_kernel_contexts
= dd
->chip_send_contexts
- num_vls
- 1;
13398 * - default to 1 user context per real (non-HT) CPU core if
13399 * num_user_contexts is negative
13401 if (num_user_contexts
< 0)
13402 num_user_contexts
=
13403 cpumask_weight(&node_affinity
.real_cpu_mask
);
13405 total_contexts
= num_kernel_contexts
+ num_user_contexts
;
13408 * Adjust the counts given a global max.
13410 if (total_contexts
> dd
->chip_rcv_contexts
) {
13412 "Reducing # user receive contexts to: %d, from %d\n",
13413 (int)(dd
->chip_rcv_contexts
- num_kernel_contexts
),
13414 (int)num_user_contexts
);
13415 num_user_contexts
= dd
->chip_rcv_contexts
- num_kernel_contexts
;
13417 total_contexts
= num_kernel_contexts
+ num_user_contexts
;
13420 /* each user context requires an entry in the RMT */
13421 qos_rmt_count
= qos_rmt_entries(dd
, NULL
, NULL
);
13422 if (qos_rmt_count
+ num_user_contexts
> NUM_MAP_ENTRIES
) {
13423 user_rmt_reduced
= NUM_MAP_ENTRIES
- qos_rmt_count
;
13425 "RMT size is reducing the number of user receive contexts from %d to %d\n",
13426 (int)num_user_contexts
,
13429 num_user_contexts
= user_rmt_reduced
;
13430 total_contexts
= num_kernel_contexts
+ num_user_contexts
;
13433 /* Accommodate VNIC contexts */
13434 if ((total_contexts
+ HFI1_NUM_VNIC_CTXT
) <= dd
->chip_rcv_contexts
)
13435 total_contexts
+= HFI1_NUM_VNIC_CTXT
;
13437 /* the first N are kernel contexts, the rest are user/vnic contexts */
13438 dd
->num_rcv_contexts
= total_contexts
;
13439 dd
->n_krcv_queues
= num_kernel_contexts
;
13440 dd
->first_dyn_alloc_ctxt
= num_kernel_contexts
;
13441 dd
->num_user_contexts
= num_user_contexts
;
13442 dd
->freectxts
= num_user_contexts
;
13444 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
13445 (int)dd
->chip_rcv_contexts
,
13446 (int)dd
->num_rcv_contexts
,
13447 (int)dd
->n_krcv_queues
,
13448 (int)dd
->num_rcv_contexts
- dd
->n_krcv_queues
);
13451 * Receive array allocation:
13452 * All RcvArray entries are divided into groups of 8. This
13453 * is required by the hardware and will speed up writes to
13454 * consecutive entries by using write-combining of the entire
13457 * The number of groups are evenly divided among all contexts.
13458 * any left over groups will be given to the first N user
13461 dd
->rcv_entries
.group_size
= RCV_INCREMENT
;
13462 ngroups
= dd
->chip_rcv_array_count
/ dd
->rcv_entries
.group_size
;
13463 dd
->rcv_entries
.ngroups
= ngroups
/ dd
->num_rcv_contexts
;
13464 dd
->rcv_entries
.nctxt_extra
= ngroups
-
13465 (dd
->num_rcv_contexts
* dd
->rcv_entries
.ngroups
);
13466 dd_dev_info(dd
, "RcvArray groups %u, ctxts extra %u\n",
13467 dd
->rcv_entries
.ngroups
,
13468 dd
->rcv_entries
.nctxt_extra
);
13469 if (dd
->rcv_entries
.ngroups
* dd
->rcv_entries
.group_size
>
13470 MAX_EAGER_ENTRIES
* 2) {
13471 dd
->rcv_entries
.ngroups
= (MAX_EAGER_ENTRIES
* 2) /
13472 dd
->rcv_entries
.group_size
;
13474 "RcvArray group count too high, change to %u\n",
13475 dd
->rcv_entries
.ngroups
);
13476 dd
->rcv_entries
.nctxt_extra
= 0;
13479 * PIO send contexts
13481 ret
= init_sc_pools_and_sizes(dd
);
13482 if (ret
>= 0) { /* success */
13483 dd
->num_send_contexts
= ret
;
13486 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
13487 dd
->chip_send_contexts
,
13488 dd
->num_send_contexts
,
13489 dd
->sc_sizes
[SC_KERNEL
].count
,
13490 dd
->sc_sizes
[SC_ACK
].count
,
13491 dd
->sc_sizes
[SC_USER
].count
,
13492 dd
->sc_sizes
[SC_VL15
].count
);
13493 ret
= 0; /* success */
13500 * Set the device/port partition key table. The MAD code
13501 * will ensure that, at least, the partial management
13502 * partition key is present in the table.
13504 static void set_partition_keys(struct hfi1_pportdata
*ppd
)
13506 struct hfi1_devdata
*dd
= ppd
->dd
;
13510 dd_dev_info(dd
, "Setting partition keys\n");
13511 for (i
= 0; i
< hfi1_get_npkeys(dd
); i
++) {
13512 reg
|= (ppd
->pkeys
[i
] &
13513 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK
) <<
13515 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT
);
13516 /* Each register holds 4 PKey values. */
13517 if ((i
% 4) == 3) {
13518 write_csr(dd
, RCV_PARTITION_KEY
+
13519 ((i
- 3) * 2), reg
);
13524 /* Always enable HW pkeys check when pkeys table is set */
13525 add_rcvctrl(dd
, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK
);
13529 * These CSRs and memories are uninitialized on reset and must be
13530 * written before reading to set the ECC/parity bits.
13532 * NOTE: All user context CSRs that are not mmaped write-only
13533 * (e.g. the TID flows) must be initialized even if the driver never
13536 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata
*dd
)
13541 for (i
= 0; i
< CCE_NUM_INT_MAP_CSRS
; i
++)
13542 write_csr(dd
, CCE_INT_MAP
+ (8 * i
), 0);
13544 /* SendCtxtCreditReturnAddr */
13545 for (i
= 0; i
< dd
->chip_send_contexts
; i
++)
13546 write_kctxt_csr(dd
, i
, SEND_CTXT_CREDIT_RETURN_ADDR
, 0);
13548 /* PIO Send buffers */
13549 /* SDMA Send buffers */
13551 * These are not normally read, and (presently) have no method
13552 * to be read, so are not pre-initialized
13556 /* RcvHdrTailAddr */
13557 /* RcvTidFlowTable */
13558 for (i
= 0; i
< dd
->chip_rcv_contexts
; i
++) {
13559 write_kctxt_csr(dd
, i
, RCV_HDR_ADDR
, 0);
13560 write_kctxt_csr(dd
, i
, RCV_HDR_TAIL_ADDR
, 0);
13561 for (j
= 0; j
< RXE_NUM_TID_FLOWS
; j
++)
13562 write_uctxt_csr(dd
, i
, RCV_TID_FLOW_TABLE
+ (8 * j
), 0);
13566 for (i
= 0; i
< dd
->chip_rcv_array_count
; i
++)
13567 hfi1_put_tid(dd
, i
, PT_INVALID_FLUSH
, 0, 0);
13569 /* RcvQPMapTable */
13570 for (i
= 0; i
< 32; i
++)
13571 write_csr(dd
, RCV_QP_MAP_TABLE
+ (8 * i
), 0);
13575 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13577 static void clear_cce_status(struct hfi1_devdata
*dd
, u64 status_bits
,
13580 unsigned long timeout
;
13583 /* is the condition present? */
13584 reg
= read_csr(dd
, CCE_STATUS
);
13585 if ((reg
& status_bits
) == 0)
13588 /* clear the condition */
13589 write_csr(dd
, CCE_CTRL
, ctrl_bits
);
13591 /* wait for the condition to clear */
13592 timeout
= jiffies
+ msecs_to_jiffies(CCE_STATUS_TIMEOUT
);
13594 reg
= read_csr(dd
, CCE_STATUS
);
13595 if ((reg
& status_bits
) == 0)
13597 if (time_after(jiffies
, timeout
)) {
13599 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13600 status_bits
, reg
& status_bits
);
13607 /* set CCE CSRs to chip reset defaults */
13608 static void reset_cce_csrs(struct hfi1_devdata
*dd
)
13612 /* CCE_REVISION read-only */
13613 /* CCE_REVISION2 read-only */
13614 /* CCE_CTRL - bits clear automatically */
13615 /* CCE_STATUS read-only, use CceCtrl to clear */
13616 clear_cce_status(dd
, ALL_FROZE
, CCE_CTRL_SPC_UNFREEZE_SMASK
);
13617 clear_cce_status(dd
, ALL_TXE_PAUSE
, CCE_CTRL_TXE_RESUME_SMASK
);
13618 clear_cce_status(dd
, ALL_RXE_PAUSE
, CCE_CTRL_RXE_RESUME_SMASK
);
13619 for (i
= 0; i
< CCE_NUM_SCRATCH
; i
++)
13620 write_csr(dd
, CCE_SCRATCH
+ (8 * i
), 0);
13621 /* CCE_ERR_STATUS read-only */
13622 write_csr(dd
, CCE_ERR_MASK
, 0);
13623 write_csr(dd
, CCE_ERR_CLEAR
, ~0ull);
13624 /* CCE_ERR_FORCE leave alone */
13625 for (i
= 0; i
< CCE_NUM_32_BIT_COUNTERS
; i
++)
13626 write_csr(dd
, CCE_COUNTER_ARRAY32
+ (8 * i
), 0);
13627 write_csr(dd
, CCE_DC_CTRL
, CCE_DC_CTRL_RESETCSR
);
13628 /* CCE_PCIE_CTRL leave alone */
13629 for (i
= 0; i
< CCE_NUM_MSIX_VECTORS
; i
++) {
13630 write_csr(dd
, CCE_MSIX_TABLE_LOWER
+ (8 * i
), 0);
13631 write_csr(dd
, CCE_MSIX_TABLE_UPPER
+ (8 * i
),
13632 CCE_MSIX_TABLE_UPPER_RESETCSR
);
13634 for (i
= 0; i
< CCE_NUM_MSIX_PBAS
; i
++) {
13635 /* CCE_MSIX_PBA read-only */
13636 write_csr(dd
, CCE_MSIX_INT_GRANTED
, ~0ull);
13637 write_csr(dd
, CCE_MSIX_VEC_CLR_WITHOUT_INT
, ~0ull);
13639 for (i
= 0; i
< CCE_NUM_INT_MAP_CSRS
; i
++)
13640 write_csr(dd
, CCE_INT_MAP
, 0);
13641 for (i
= 0; i
< CCE_NUM_INT_CSRS
; i
++) {
13642 /* CCE_INT_STATUS read-only */
13643 write_csr(dd
, CCE_INT_MASK
+ (8 * i
), 0);
13644 write_csr(dd
, CCE_INT_CLEAR
+ (8 * i
), ~0ull);
13645 /* CCE_INT_FORCE leave alone */
13646 /* CCE_INT_BLOCKED read-only */
13648 for (i
= 0; i
< CCE_NUM_32_BIT_INT_COUNTERS
; i
++)
13649 write_csr(dd
, CCE_INT_COUNTER_ARRAY32
+ (8 * i
), 0);
13652 /* set MISC CSRs to chip reset defaults */
13653 static void reset_misc_csrs(struct hfi1_devdata
*dd
)
13657 for (i
= 0; i
< 32; i
++) {
13658 write_csr(dd
, MISC_CFG_RSA_R2
+ (8 * i
), 0);
13659 write_csr(dd
, MISC_CFG_RSA_SIGNATURE
+ (8 * i
), 0);
13660 write_csr(dd
, MISC_CFG_RSA_MODULUS
+ (8 * i
), 0);
13663 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13664 * only be written 128-byte chunks
13666 /* init RSA engine to clear lingering errors */
13667 write_csr(dd
, MISC_CFG_RSA_CMD
, 1);
13668 write_csr(dd
, MISC_CFG_RSA_MU
, 0);
13669 write_csr(dd
, MISC_CFG_FW_CTRL
, 0);
13670 /* MISC_STS_8051_DIGEST read-only */
13671 /* MISC_STS_SBM_DIGEST read-only */
13672 /* MISC_STS_PCIE_DIGEST read-only */
13673 /* MISC_STS_FAB_DIGEST read-only */
13674 /* MISC_ERR_STATUS read-only */
13675 write_csr(dd
, MISC_ERR_MASK
, 0);
13676 write_csr(dd
, MISC_ERR_CLEAR
, ~0ull);
13677 /* MISC_ERR_FORCE leave alone */
13680 /* set TXE CSRs to chip reset defaults */
13681 static void reset_txe_csrs(struct hfi1_devdata
*dd
)
13688 write_csr(dd
, SEND_CTRL
, 0);
13689 __cm_reset(dd
, 0); /* reset CM internal state */
13690 /* SEND_CONTEXTS read-only */
13691 /* SEND_DMA_ENGINES read-only */
13692 /* SEND_PIO_MEM_SIZE read-only */
13693 /* SEND_DMA_MEM_SIZE read-only */
13694 write_csr(dd
, SEND_HIGH_PRIORITY_LIMIT
, 0);
13695 pio_reset_all(dd
); /* SEND_PIO_INIT_CTXT */
13696 /* SEND_PIO_ERR_STATUS read-only */
13697 write_csr(dd
, SEND_PIO_ERR_MASK
, 0);
13698 write_csr(dd
, SEND_PIO_ERR_CLEAR
, ~0ull);
13699 /* SEND_PIO_ERR_FORCE leave alone */
13700 /* SEND_DMA_ERR_STATUS read-only */
13701 write_csr(dd
, SEND_DMA_ERR_MASK
, 0);
13702 write_csr(dd
, SEND_DMA_ERR_CLEAR
, ~0ull);
13703 /* SEND_DMA_ERR_FORCE leave alone */
13704 /* SEND_EGRESS_ERR_STATUS read-only */
13705 write_csr(dd
, SEND_EGRESS_ERR_MASK
, 0);
13706 write_csr(dd
, SEND_EGRESS_ERR_CLEAR
, ~0ull);
13707 /* SEND_EGRESS_ERR_FORCE leave alone */
13708 write_csr(dd
, SEND_BTH_QP
, 0);
13709 write_csr(dd
, SEND_STATIC_RATE_CONTROL
, 0);
13710 write_csr(dd
, SEND_SC2VLT0
, 0);
13711 write_csr(dd
, SEND_SC2VLT1
, 0);
13712 write_csr(dd
, SEND_SC2VLT2
, 0);
13713 write_csr(dd
, SEND_SC2VLT3
, 0);
13714 write_csr(dd
, SEND_LEN_CHECK0
, 0);
13715 write_csr(dd
, SEND_LEN_CHECK1
, 0);
13716 /* SEND_ERR_STATUS read-only */
13717 write_csr(dd
, SEND_ERR_MASK
, 0);
13718 write_csr(dd
, SEND_ERR_CLEAR
, ~0ull);
13719 /* SEND_ERR_FORCE read-only */
13720 for (i
= 0; i
< VL_ARB_LOW_PRIO_TABLE_SIZE
; i
++)
13721 write_csr(dd
, SEND_LOW_PRIORITY_LIST
+ (8 * i
), 0);
13722 for (i
= 0; i
< VL_ARB_HIGH_PRIO_TABLE_SIZE
; i
++)
13723 write_csr(dd
, SEND_HIGH_PRIORITY_LIST
+ (8 * i
), 0);
13724 for (i
= 0; i
< dd
->chip_send_contexts
/ NUM_CONTEXTS_PER_SET
; i
++)
13725 write_csr(dd
, SEND_CONTEXT_SET_CTRL
+ (8 * i
), 0);
13726 for (i
= 0; i
< TXE_NUM_32_BIT_COUNTER
; i
++)
13727 write_csr(dd
, SEND_COUNTER_ARRAY32
+ (8 * i
), 0);
13728 for (i
= 0; i
< TXE_NUM_64_BIT_COUNTER
; i
++)
13729 write_csr(dd
, SEND_COUNTER_ARRAY64
+ (8 * i
), 0);
13730 write_csr(dd
, SEND_CM_CTRL
, SEND_CM_CTRL_RESETCSR
);
13731 write_csr(dd
, SEND_CM_GLOBAL_CREDIT
, SEND_CM_GLOBAL_CREDIT_RESETCSR
);
13732 /* SEND_CM_CREDIT_USED_STATUS read-only */
13733 write_csr(dd
, SEND_CM_TIMER_CTRL
, 0);
13734 write_csr(dd
, SEND_CM_LOCAL_AU_TABLE0_TO3
, 0);
13735 write_csr(dd
, SEND_CM_LOCAL_AU_TABLE4_TO7
, 0);
13736 write_csr(dd
, SEND_CM_REMOTE_AU_TABLE0_TO3
, 0);
13737 write_csr(dd
, SEND_CM_REMOTE_AU_TABLE4_TO7
, 0);
13738 for (i
= 0; i
< TXE_NUM_DATA_VL
; i
++)
13739 write_csr(dd
, SEND_CM_CREDIT_VL
+ (8 * i
), 0);
13740 write_csr(dd
, SEND_CM_CREDIT_VL15
, 0);
13741 /* SEND_CM_CREDIT_USED_VL read-only */
13742 /* SEND_CM_CREDIT_USED_VL15 read-only */
13743 /* SEND_EGRESS_CTXT_STATUS read-only */
13744 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13745 write_csr(dd
, SEND_EGRESS_ERR_INFO
, ~0ull);
13746 /* SEND_EGRESS_ERR_INFO read-only */
13747 /* SEND_EGRESS_ERR_SOURCE read-only */
13750 * TXE Per-Context CSRs
13752 for (i
= 0; i
< dd
->chip_send_contexts
; i
++) {
13753 write_kctxt_csr(dd
, i
, SEND_CTXT_CTRL
, 0);
13754 write_kctxt_csr(dd
, i
, SEND_CTXT_CREDIT_CTRL
, 0);
13755 write_kctxt_csr(dd
, i
, SEND_CTXT_CREDIT_RETURN_ADDR
, 0);
13756 write_kctxt_csr(dd
, i
, SEND_CTXT_CREDIT_FORCE
, 0);
13757 write_kctxt_csr(dd
, i
, SEND_CTXT_ERR_MASK
, 0);
13758 write_kctxt_csr(dd
, i
, SEND_CTXT_ERR_CLEAR
, ~0ull);
13759 write_kctxt_csr(dd
, i
, SEND_CTXT_CHECK_ENABLE
, 0);
13760 write_kctxt_csr(dd
, i
, SEND_CTXT_CHECK_VL
, 0);
13761 write_kctxt_csr(dd
, i
, SEND_CTXT_CHECK_JOB_KEY
, 0);
13762 write_kctxt_csr(dd
, i
, SEND_CTXT_CHECK_PARTITION_KEY
, 0);
13763 write_kctxt_csr(dd
, i
, SEND_CTXT_CHECK_SLID
, 0);
13764 write_kctxt_csr(dd
, i
, SEND_CTXT_CHECK_OPCODE
, 0);
13768 * TXE Per-SDMA CSRs
13770 for (i
= 0; i
< dd
->chip_sdma_engines
; i
++) {
13771 write_kctxt_csr(dd
, i
, SEND_DMA_CTRL
, 0);
13772 /* SEND_DMA_STATUS read-only */
13773 write_kctxt_csr(dd
, i
, SEND_DMA_BASE_ADDR
, 0);
13774 write_kctxt_csr(dd
, i
, SEND_DMA_LEN_GEN
, 0);
13775 write_kctxt_csr(dd
, i
, SEND_DMA_TAIL
, 0);
13776 /* SEND_DMA_HEAD read-only */
13777 write_kctxt_csr(dd
, i
, SEND_DMA_HEAD_ADDR
, 0);
13778 write_kctxt_csr(dd
, i
, SEND_DMA_PRIORITY_THLD
, 0);
13779 /* SEND_DMA_IDLE_CNT read-only */
13780 write_kctxt_csr(dd
, i
, SEND_DMA_RELOAD_CNT
, 0);
13781 write_kctxt_csr(dd
, i
, SEND_DMA_DESC_CNT
, 0);
13782 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13783 /* SEND_DMA_ENG_ERR_STATUS read-only */
13784 write_kctxt_csr(dd
, i
, SEND_DMA_ENG_ERR_MASK
, 0);
13785 write_kctxt_csr(dd
, i
, SEND_DMA_ENG_ERR_CLEAR
, ~0ull);
13786 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13787 write_kctxt_csr(dd
, i
, SEND_DMA_CHECK_ENABLE
, 0);
13788 write_kctxt_csr(dd
, i
, SEND_DMA_CHECK_VL
, 0);
13789 write_kctxt_csr(dd
, i
, SEND_DMA_CHECK_JOB_KEY
, 0);
13790 write_kctxt_csr(dd
, i
, SEND_DMA_CHECK_PARTITION_KEY
, 0);
13791 write_kctxt_csr(dd
, i
, SEND_DMA_CHECK_SLID
, 0);
13792 write_kctxt_csr(dd
, i
, SEND_DMA_CHECK_OPCODE
, 0);
13793 write_kctxt_csr(dd
, i
, SEND_DMA_MEMORY
, 0);
13799 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13801 static void init_rbufs(struct hfi1_devdata
*dd
)
13807 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13812 reg
= read_csr(dd
, RCV_STATUS
);
13813 if ((reg
& (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13814 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK
)) == 0)
13817 * Give up after 1ms - maximum wait time.
13819 * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at
13820 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13821 * 136 KB / (66% * 250MB/s) = 844us
13823 if (count
++ > 500) {
13825 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13829 udelay(2); /* do not busy-wait the CSR */
13832 /* start the init - expect RcvCtrl to be 0 */
13833 write_csr(dd
, RCV_CTRL
, RCV_CTRL_RX_RBUF_INIT_SMASK
);
13836 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13837 * period after the write before RcvStatus.RxRbufInitDone is valid.
13838 * The delay in the first run through the loop below is sufficient and
13839 * required before the first read of RcvStatus.RxRbufInintDone.
13841 read_csr(dd
, RCV_CTRL
);
13843 /* wait for the init to finish */
13846 /* delay is required first time through - see above */
13847 udelay(2); /* do not busy-wait the CSR */
13848 reg
= read_csr(dd
, RCV_STATUS
);
13849 if (reg
& (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK
))
13852 /* give up after 100us - slowest possible at 33MHz is 73us */
13853 if (count
++ > 50) {
13855 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13862 /* set RXE CSRs to chip reset defaults */
13863 static void reset_rxe_csrs(struct hfi1_devdata
*dd
)
13870 write_csr(dd
, RCV_CTRL
, 0);
13872 /* RCV_STATUS read-only */
13873 /* RCV_CONTEXTS read-only */
13874 /* RCV_ARRAY_CNT read-only */
13875 /* RCV_BUF_SIZE read-only */
13876 write_csr(dd
, RCV_BTH_QP
, 0);
13877 write_csr(dd
, RCV_MULTICAST
, 0);
13878 write_csr(dd
, RCV_BYPASS
, 0);
13879 write_csr(dd
, RCV_VL15
, 0);
13880 /* this is a clear-down */
13881 write_csr(dd
, RCV_ERR_INFO
,
13882 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK
);
13883 /* RCV_ERR_STATUS read-only */
13884 write_csr(dd
, RCV_ERR_MASK
, 0);
13885 write_csr(dd
, RCV_ERR_CLEAR
, ~0ull);
13886 /* RCV_ERR_FORCE leave alone */
13887 for (i
= 0; i
< 32; i
++)
13888 write_csr(dd
, RCV_QP_MAP_TABLE
+ (8 * i
), 0);
13889 for (i
= 0; i
< 4; i
++)
13890 write_csr(dd
, RCV_PARTITION_KEY
+ (8 * i
), 0);
13891 for (i
= 0; i
< RXE_NUM_32_BIT_COUNTERS
; i
++)
13892 write_csr(dd
, RCV_COUNTER_ARRAY32
+ (8 * i
), 0);
13893 for (i
= 0; i
< RXE_NUM_64_BIT_COUNTERS
; i
++)
13894 write_csr(dd
, RCV_COUNTER_ARRAY64
+ (8 * i
), 0);
13895 for (i
= 0; i
< RXE_NUM_RSM_INSTANCES
; i
++)
13896 clear_rsm_rule(dd
, i
);
13897 for (i
= 0; i
< 32; i
++)
13898 write_csr(dd
, RCV_RSM_MAP_TABLE
+ (8 * i
), 0);
13901 * RXE Kernel and User Per-Context CSRs
13903 for (i
= 0; i
< dd
->chip_rcv_contexts
; i
++) {
13905 write_kctxt_csr(dd
, i
, RCV_CTXT_CTRL
, 0);
13906 /* RCV_CTXT_STATUS read-only */
13907 write_kctxt_csr(dd
, i
, RCV_EGR_CTRL
, 0);
13908 write_kctxt_csr(dd
, i
, RCV_TID_CTRL
, 0);
13909 write_kctxt_csr(dd
, i
, RCV_KEY_CTRL
, 0);
13910 write_kctxt_csr(dd
, i
, RCV_HDR_ADDR
, 0);
13911 write_kctxt_csr(dd
, i
, RCV_HDR_CNT
, 0);
13912 write_kctxt_csr(dd
, i
, RCV_HDR_ENT_SIZE
, 0);
13913 write_kctxt_csr(dd
, i
, RCV_HDR_SIZE
, 0);
13914 write_kctxt_csr(dd
, i
, RCV_HDR_TAIL_ADDR
, 0);
13915 write_kctxt_csr(dd
, i
, RCV_AVAIL_TIME_OUT
, 0);
13916 write_kctxt_csr(dd
, i
, RCV_HDR_OVFL_CNT
, 0);
13919 /* RCV_HDR_TAIL read-only */
13920 write_uctxt_csr(dd
, i
, RCV_HDR_HEAD
, 0);
13921 /* RCV_EGR_INDEX_TAIL read-only */
13922 write_uctxt_csr(dd
, i
, RCV_EGR_INDEX_HEAD
, 0);
13923 /* RCV_EGR_OFFSET_TAIL read-only */
13924 for (j
= 0; j
< RXE_NUM_TID_FLOWS
; j
++) {
13925 write_uctxt_csr(dd
, i
,
13926 RCV_TID_FLOW_TABLE
+ (8 * j
), 0);
13932 * Set sc2vl tables.
13934 * They power on to zeros, so to avoid send context errors
13935 * they need to be set:
13937 * SC 0-7 -> VL 0-7 (respectively)
13942 static void init_sc2vl_tables(struct hfi1_devdata
*dd
)
13945 /* init per architecture spec, constrained by hardware capability */
13947 /* HFI maps sent packets */
13948 write_csr(dd
, SEND_SC2VLT0
, SC2VL_VAL(
13954 write_csr(dd
, SEND_SC2VLT1
, SC2VL_VAL(
13960 write_csr(dd
, SEND_SC2VLT2
, SC2VL_VAL(
13966 write_csr(dd
, SEND_SC2VLT3
, SC2VL_VAL(
13973 /* DC maps received packets */
13974 write_csr(dd
, DCC_CFG_SC_VL_TABLE_15_0
, DC_SC_VL_VAL(
13976 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13977 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13978 write_csr(dd
, DCC_CFG_SC_VL_TABLE_31_16
, DC_SC_VL_VAL(
13980 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13981 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13983 /* initialize the cached sc2vl values consistently with h/w */
13984 for (i
= 0; i
< 32; i
++) {
13985 if (i
< 8 || i
== 15)
13986 *((u8
*)(dd
->sc2vl
) + i
) = (u8
)i
;
13988 *((u8
*)(dd
->sc2vl
) + i
) = 0;
13993 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13994 * depend on the chip going through a power-on reset - a driver may be loaded
13995 * and unloaded many times.
13997 * Do not write any CSR values to the chip in this routine - there may be
13998 * a reset following the (possible) FLR in this routine.
14001 static int init_chip(struct hfi1_devdata
*dd
)
14007 * Put the HFI CSRs in a known state.
14008 * Combine this with a DC reset.
14010 * Stop the device from doing anything while we do a
14011 * reset. We know there are no other active users of
14012 * the device since we are now in charge. Turn off
14013 * off all outbound and inbound traffic and make sure
14014 * the device does not generate any interrupts.
14017 /* disable send contexts and SDMA engines */
14018 write_csr(dd
, SEND_CTRL
, 0);
14019 for (i
= 0; i
< dd
->chip_send_contexts
; i
++)
14020 write_kctxt_csr(dd
, i
, SEND_CTXT_CTRL
, 0);
14021 for (i
= 0; i
< dd
->chip_sdma_engines
; i
++)
14022 write_kctxt_csr(dd
, i
, SEND_DMA_CTRL
, 0);
14023 /* disable port (turn off RXE inbound traffic) and contexts */
14024 write_csr(dd
, RCV_CTRL
, 0);
14025 for (i
= 0; i
< dd
->chip_rcv_contexts
; i
++)
14026 write_csr(dd
, RCV_CTXT_CTRL
, 0);
14027 /* mask all interrupt sources */
14028 for (i
= 0; i
< CCE_NUM_INT_CSRS
; i
++)
14029 write_csr(dd
, CCE_INT_MASK
+ (8 * i
), 0ull);
14032 * DC Reset: do a full DC reset before the register clear.
14033 * A recommended length of time to hold is one CSR read,
14034 * so reread the CceDcCtrl. Then, hold the DC in reset
14035 * across the clear.
14037 write_csr(dd
, CCE_DC_CTRL
, CCE_DC_CTRL_DC_RESET_SMASK
);
14038 (void)read_csr(dd
, CCE_DC_CTRL
);
14042 * A FLR will reset the SPC core and part of the PCIe.
14043 * The parts that need to be restored have already been
14046 dd_dev_info(dd
, "Resetting CSRs with FLR\n");
14048 /* do the FLR, the DC reset will remain */
14049 pcie_flr(dd
->pcidev
);
14051 /* restore command and BARs */
14052 ret
= restore_pci_variables(dd
);
14054 dd_dev_err(dd
, "%s: Could not restore PCI variables\n",
14060 dd_dev_info(dd
, "Resetting CSRs with FLR\n");
14061 pcie_flr(dd
->pcidev
);
14062 ret
= restore_pci_variables(dd
);
14064 dd_dev_err(dd
, "%s: Could not restore PCI variables\n",
14070 dd_dev_info(dd
, "Resetting CSRs with writes\n");
14071 reset_cce_csrs(dd
);
14072 reset_txe_csrs(dd
);
14073 reset_rxe_csrs(dd
);
14074 reset_misc_csrs(dd
);
14076 /* clear the DC reset */
14077 write_csr(dd
, CCE_DC_CTRL
, 0);
14079 /* Set the LED off */
14083 * Clear the QSFP reset.
14084 * An FLR enforces a 0 on all out pins. The driver does not touch
14085 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
14086 * anything plugged constantly in reset, if it pays attention
14088 * Prime examples of this are optical cables. Set all pins high.
14089 * I2CCLK and I2CDAT will change per direction, and INT_N and
14090 * MODPRS_N are input only and their value is ignored.
14092 write_csr(dd
, ASIC_QSFP1_OUT
, 0x1f);
14093 write_csr(dd
, ASIC_QSFP2_OUT
, 0x1f);
14094 init_chip_resources(dd
);
14098 static void init_early_variables(struct hfi1_devdata
*dd
)
14102 /* assign link credit variables */
14104 dd
->link_credits
= CM_GLOBAL_CREDITS
;
14106 dd
->link_credits
--;
14107 dd
->vcu
= cu_to_vcu(hfi1_cu
);
14108 /* enough room for 8 MAD packets plus header - 17K */
14109 dd
->vl15_init
= (8 * (2048 + 128)) / vau_to_au(dd
->vau
);
14110 if (dd
->vl15_init
> dd
->link_credits
)
14111 dd
->vl15_init
= dd
->link_credits
;
14113 write_uninitialized_csrs_and_memories(dd
);
14115 if (HFI1_CAP_IS_KSET(PKEY_CHECK
))
14116 for (i
= 0; i
< dd
->num_pports
; i
++) {
14117 struct hfi1_pportdata
*ppd
= &dd
->pport
[i
];
14119 set_partition_keys(ppd
);
14121 init_sc2vl_tables(dd
);
14124 static void init_kdeth_qp(struct hfi1_devdata
*dd
)
14126 /* user changed the KDETH_QP */
14127 if (kdeth_qp
!= 0 && kdeth_qp
>= 0xff) {
14128 /* out of range or illegal value */
14129 dd_dev_err(dd
, "Invalid KDETH queue pair prefix, ignoring");
14132 if (kdeth_qp
== 0) /* not set, or failed range check */
14133 kdeth_qp
= DEFAULT_KDETH_QP
;
14135 write_csr(dd
, SEND_BTH_QP
,
14136 (kdeth_qp
& SEND_BTH_QP_KDETH_QP_MASK
) <<
14137 SEND_BTH_QP_KDETH_QP_SHIFT
);
14139 write_csr(dd
, RCV_BTH_QP
,
14140 (kdeth_qp
& RCV_BTH_QP_KDETH_QP_MASK
) <<
14141 RCV_BTH_QP_KDETH_QP_SHIFT
);
14146 * @dd - device data
14147 * @first_ctxt - first context
14148 * @last_ctxt - first context
14150 * This return sets the qpn mapping table that
14151 * is indexed by qpn[8:1].
14153 * The routine will round robin the 256 settings
14154 * from first_ctxt to last_ctxt.
14156 * The first/last looks ahead to having specialized
14157 * receive contexts for mgmt and bypass. Normal
14158 * verbs traffic will assumed to be on a range
14159 * of receive contexts.
14161 static void init_qpmap_table(struct hfi1_devdata
*dd
,
14166 u64 regno
= RCV_QP_MAP_TABLE
;
14168 u64 ctxt
= first_ctxt
;
14170 for (i
= 0; i
< 256; i
++) {
14171 reg
|= ctxt
<< (8 * (i
% 8));
14173 if (ctxt
> last_ctxt
)
14176 write_csr(dd
, regno
, reg
);
14182 add_rcvctrl(dd
, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
14183 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK
);
14186 struct rsm_map_table
{
14187 u64 map
[NUM_MAP_REGS
];
14191 struct rsm_rule_data
{
14207 * Return an initialized RMT map table for users to fill in. OK if it
14208 * returns NULL, indicating no table.
14210 static struct rsm_map_table
*alloc_rsm_map_table(struct hfi1_devdata
*dd
)
14212 struct rsm_map_table
*rmt
;
14213 u8 rxcontext
= is_ax(dd
) ? 0 : 0xff; /* 0 is default if a0 ver. */
14215 rmt
= kmalloc(sizeof(*rmt
), GFP_KERNEL
);
14217 memset(rmt
->map
, rxcontext
, sizeof(rmt
->map
));
14225 * Write the final RMT map table to the chip and free the table. OK if
14228 static void complete_rsm_map_table(struct hfi1_devdata
*dd
,
14229 struct rsm_map_table
*rmt
)
14234 /* write table to chip */
14235 for (i
= 0; i
< NUM_MAP_REGS
; i
++)
14236 write_csr(dd
, RCV_RSM_MAP_TABLE
+ (8 * i
), rmt
->map
[i
]);
14239 add_rcvctrl(dd
, RCV_CTRL_RCV_RSM_ENABLE_SMASK
);
14244 * Add a receive side mapping rule.
14246 static void add_rsm_rule(struct hfi1_devdata
*dd
, u8 rule_index
,
14247 struct rsm_rule_data
*rrd
)
14249 write_csr(dd
, RCV_RSM_CFG
+ (8 * rule_index
),
14250 (u64
)rrd
->offset
<< RCV_RSM_CFG_OFFSET_SHIFT
|
14251 1ull << rule_index
| /* enable bit */
14252 (u64
)rrd
->pkt_type
<< RCV_RSM_CFG_PACKET_TYPE_SHIFT
);
14253 write_csr(dd
, RCV_RSM_SELECT
+ (8 * rule_index
),
14254 (u64
)rrd
->field1_off
<< RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT
|
14255 (u64
)rrd
->field2_off
<< RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT
|
14256 (u64
)rrd
->index1_off
<< RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT
|
14257 (u64
)rrd
->index1_width
<< RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT
|
14258 (u64
)rrd
->index2_off
<< RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT
|
14259 (u64
)rrd
->index2_width
<< RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT
);
14260 write_csr(dd
, RCV_RSM_MATCH
+ (8 * rule_index
),
14261 (u64
)rrd
->mask1
<< RCV_RSM_MATCH_MASK1_SHIFT
|
14262 (u64
)rrd
->value1
<< RCV_RSM_MATCH_VALUE1_SHIFT
|
14263 (u64
)rrd
->mask2
<< RCV_RSM_MATCH_MASK2_SHIFT
|
14264 (u64
)rrd
->value2
<< RCV_RSM_MATCH_VALUE2_SHIFT
);
14268 * Clear a receive side mapping rule.
14270 static void clear_rsm_rule(struct hfi1_devdata
*dd
, u8 rule_index
)
14272 write_csr(dd
, RCV_RSM_CFG
+ (8 * rule_index
), 0);
14273 write_csr(dd
, RCV_RSM_SELECT
+ (8 * rule_index
), 0);
14274 write_csr(dd
, RCV_RSM_MATCH
+ (8 * rule_index
), 0);
14277 /* return the number of RSM map table entries that will be used for QOS */
14278 static int qos_rmt_entries(struct hfi1_devdata
*dd
, unsigned int *mp
,
14285 /* is QOS active at all? */
14286 if (dd
->n_krcv_queues
<= MIN_KERNEL_KCTXTS
||
14291 /* determine bits for qpn */
14292 for (i
= 0; i
< min_t(unsigned int, num_vls
, krcvqsset
); i
++)
14293 if (krcvqs
[i
] > max_by_vl
)
14294 max_by_vl
= krcvqs
[i
];
14295 if (max_by_vl
> 32)
14297 m
= ilog2(__roundup_pow_of_two(max_by_vl
));
14299 /* determine bits for vl */
14300 n
= ilog2(__roundup_pow_of_two(num_vls
));
14302 /* reject if too much is used */
14311 return 1 << (m
+ n
);
14322 * init_qos - init RX qos
14323 * @dd - device data
14324 * @rmt - RSM map table
14326 * This routine initializes Rule 0 and the RSM map table to implement
14327 * quality of service (qos).
14329 * If all of the limit tests succeed, qos is applied based on the array
14330 * interpretation of krcvqs where entry 0 is VL0.
14332 * The number of vl bits (n) and the number of qpn bits (m) are computed to
14333 * feed both the RSM map table and the single rule.
14335 static void init_qos(struct hfi1_devdata
*dd
, struct rsm_map_table
*rmt
)
14337 struct rsm_rule_data rrd
;
14338 unsigned qpns_per_vl
, ctxt
, i
, qpn
, n
= 1, m
;
14339 unsigned int rmt_entries
;
14344 rmt_entries
= qos_rmt_entries(dd
, &m
, &n
);
14345 if (rmt_entries
== 0)
14347 qpns_per_vl
= 1 << m
;
14349 /* enough room in the map table? */
14350 rmt_entries
= 1 << (m
+ n
);
14351 if (rmt
->used
+ rmt_entries
>= NUM_MAP_ENTRIES
)
14354 /* add qos entries to the the RSM map table */
14355 for (i
= 0, ctxt
= FIRST_KERNEL_KCTXT
; i
< num_vls
; i
++) {
14358 for (qpn
= 0, tctxt
= ctxt
;
14359 krcvqs
[i
] && qpn
< qpns_per_vl
; qpn
++) {
14360 unsigned idx
, regoff
, regidx
;
14362 /* generate the index the hardware will produce */
14363 idx
= rmt
->used
+ ((qpn
<< n
) ^ i
);
14364 regoff
= (idx
% 8) * 8;
14366 /* replace default with context number */
14367 reg
= rmt
->map
[regidx
];
14368 reg
&= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14370 reg
|= (u64
)(tctxt
++) << regoff
;
14371 rmt
->map
[regidx
] = reg
;
14372 if (tctxt
== ctxt
+ krcvqs
[i
])
14378 rrd
.offset
= rmt
->used
;
14380 rrd
.field1_off
= LRH_BTH_MATCH_OFFSET
;
14381 rrd
.field2_off
= LRH_SC_MATCH_OFFSET
;
14382 rrd
.index1_off
= LRH_SC_SELECT_OFFSET
;
14383 rrd
.index1_width
= n
;
14384 rrd
.index2_off
= QPN_SELECT_OFFSET
;
14385 rrd
.index2_width
= m
+ n
;
14386 rrd
.mask1
= LRH_BTH_MASK
;
14387 rrd
.value1
= LRH_BTH_VALUE
;
14388 rrd
.mask2
= LRH_SC_MASK
;
14389 rrd
.value2
= LRH_SC_VALUE
;
14392 add_rsm_rule(dd
, RSM_INS_VERBS
, &rrd
);
14394 /* mark RSM map entries as used */
14395 rmt
->used
+= rmt_entries
;
14396 /* map everything else to the mcast/err/vl15 context */
14397 init_qpmap_table(dd
, HFI1_CTRL_CTXT
, HFI1_CTRL_CTXT
);
14398 dd
->qos_shift
= n
+ 1;
14402 init_qpmap_table(dd
, FIRST_KERNEL_KCTXT
, dd
->n_krcv_queues
- 1);
14405 static void init_user_fecn_handling(struct hfi1_devdata
*dd
,
14406 struct rsm_map_table
*rmt
)
14408 struct rsm_rule_data rrd
;
14410 int i
, idx
, regoff
, regidx
;
14413 /* there needs to be enough room in the map table */
14414 if (rmt
->used
+ dd
->num_user_contexts
>= NUM_MAP_ENTRIES
) {
14415 dd_dev_err(dd
, "User FECN handling disabled - too many user contexts allocated\n");
14420 * RSM will extract the destination context as an index into the
14421 * map table. The destination contexts are a sequential block
14422 * in the range first_dyn_alloc_ctxt...num_rcv_contexts-1 (inclusive).
14423 * Map entries are accessed as offset + extracted value. Adjust
14424 * the added offset so this sequence can be placed anywhere in
14425 * the table - as long as the entries themselves do not wrap.
14426 * There are only enough bits in offset for the table size, so
14427 * start with that to allow for a "negative" offset.
14429 offset
= (u8
)(NUM_MAP_ENTRIES
+ (int)rmt
->used
-
14430 (int)dd
->first_dyn_alloc_ctxt
);
14432 for (i
= dd
->first_dyn_alloc_ctxt
, idx
= rmt
->used
;
14433 i
< dd
->num_rcv_contexts
; i
++, idx
++) {
14434 /* replace with identity mapping */
14435 regoff
= (idx
% 8) * 8;
14437 reg
= rmt
->map
[regidx
];
14438 reg
&= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
<< regoff
);
14439 reg
|= (u64
)i
<< regoff
;
14440 rmt
->map
[regidx
] = reg
;
14444 * For RSM intercept of Expected FECN packets:
14445 * o packet type 0 - expected
14446 * o match on F (bit 95), using select/match 1, and
14447 * o match on SH (bit 133), using select/match 2.
14449 * Use index 1 to extract the 8-bit receive context from DestQP
14450 * (start at bit 64). Use that as the RSM map table index.
14452 rrd
.offset
= offset
;
14454 rrd
.field1_off
= 95;
14455 rrd
.field2_off
= 133;
14456 rrd
.index1_off
= 64;
14457 rrd
.index1_width
= 8;
14458 rrd
.index2_off
= 0;
14459 rrd
.index2_width
= 0;
14466 add_rsm_rule(dd
, RSM_INS_FECN
, &rrd
);
14468 rmt
->used
+= dd
->num_user_contexts
;
14471 /* Initialize RSM for VNIC */
14472 void hfi1_init_vnic_rsm(struct hfi1_devdata
*dd
)
14478 struct rsm_rule_data rrd
;
14480 if (hfi1_vnic_is_rsm_full(dd
, NUM_VNIC_MAP_ENTRIES
)) {
14481 dd_dev_err(dd
, "Vnic RSM disabled, rmt entries used = %d\n",
14482 dd
->vnic
.rmt_start
);
14486 dev_dbg(&(dd
)->pcidev
->dev
, "Vnic rsm start = %d, end %d\n",
14487 dd
->vnic
.rmt_start
,
14488 dd
->vnic
.rmt_start
+ NUM_VNIC_MAP_ENTRIES
);
14490 /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
14491 regoff
= RCV_RSM_MAP_TABLE
+ (dd
->vnic
.rmt_start
/ 8) * 8;
14492 reg
= read_csr(dd
, regoff
);
14493 for (i
= 0; i
< NUM_VNIC_MAP_ENTRIES
; i
++) {
14494 /* Update map register with vnic context */
14495 j
= (dd
->vnic
.rmt_start
+ i
) % 8;
14496 reg
&= ~(0xffllu
<< (j
* 8));
14497 reg
|= (u64
)dd
->vnic
.ctxt
[ctx_id
++]->ctxt
<< (j
* 8);
14498 /* Wrap up vnic ctx index */
14499 ctx_id
%= dd
->vnic
.num_ctxt
;
14500 /* Write back map register */
14501 if (j
== 7 || ((i
+ 1) == NUM_VNIC_MAP_ENTRIES
)) {
14502 dev_dbg(&(dd
)->pcidev
->dev
,
14503 "Vnic rsm map reg[%d] =0x%llx\n",
14504 regoff
- RCV_RSM_MAP_TABLE
, reg
);
14506 write_csr(dd
, regoff
, reg
);
14508 if (i
< (NUM_VNIC_MAP_ENTRIES
- 1))
14509 reg
= read_csr(dd
, regoff
);
14513 /* Add rule for vnic */
14514 rrd
.offset
= dd
->vnic
.rmt_start
;
14516 /* Match 16B packets */
14517 rrd
.field1_off
= L2_TYPE_MATCH_OFFSET
;
14518 rrd
.mask1
= L2_TYPE_MASK
;
14519 rrd
.value1
= L2_16B_VALUE
;
14520 /* Match ETH L4 packets */
14521 rrd
.field2_off
= L4_TYPE_MATCH_OFFSET
;
14522 rrd
.mask2
= L4_16B_TYPE_MASK
;
14523 rrd
.value2
= L4_16B_ETH_VALUE
;
14524 /* Calc context from veswid and entropy */
14525 rrd
.index1_off
= L4_16B_HDR_VESWID_OFFSET
;
14526 rrd
.index1_width
= ilog2(NUM_VNIC_MAP_ENTRIES
);
14527 rrd
.index2_off
= L2_16B_ENTROPY_OFFSET
;
14528 rrd
.index2_width
= ilog2(NUM_VNIC_MAP_ENTRIES
);
14529 add_rsm_rule(dd
, RSM_INS_VNIC
, &rrd
);
14531 /* Enable RSM if not already enabled */
14532 add_rcvctrl(dd
, RCV_CTRL_RCV_RSM_ENABLE_SMASK
);
14535 void hfi1_deinit_vnic_rsm(struct hfi1_devdata
*dd
)
14537 clear_rsm_rule(dd
, RSM_INS_VNIC
);
14539 /* Disable RSM if used only by vnic */
14540 if (dd
->vnic
.rmt_start
== 0)
14541 clear_rcvctrl(dd
, RCV_CTRL_RCV_RSM_ENABLE_SMASK
);
14544 static void init_rxe(struct hfi1_devdata
*dd
)
14546 struct rsm_map_table
*rmt
;
14549 /* enable all receive errors */
14550 write_csr(dd
, RCV_ERR_MASK
, ~0ull);
14552 rmt
= alloc_rsm_map_table(dd
);
14553 /* set up QOS, including the QPN map table */
14555 init_user_fecn_handling(dd
, rmt
);
14556 complete_rsm_map_table(dd
, rmt
);
14557 /* record number of used rsm map entries for vnic */
14558 dd
->vnic
.rmt_start
= rmt
->used
;
14562 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14563 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14564 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
14565 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14566 * Max_PayLoad_Size set to its minimum of 128.
14568 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14569 * (64 bytes). Max_Payload_Size is possibly modified upward in
14570 * tune_pcie_caps() which is called after this routine.
14573 /* Have 16 bytes (4DW) of bypass header available in header queue */
14574 val
= read_csr(dd
, RCV_BYPASS
);
14575 val
|= (4ull << 16);
14576 write_csr(dd
, RCV_BYPASS
, val
);
14579 static void init_other(struct hfi1_devdata
*dd
)
14581 /* enable all CCE errors */
14582 write_csr(dd
, CCE_ERR_MASK
, ~0ull);
14583 /* enable *some* Misc errors */
14584 write_csr(dd
, MISC_ERR_MASK
, DRIVER_MISC_MASK
);
14585 /* enable all DC errors, except LCB */
14586 write_csr(dd
, DCC_ERR_FLG_EN
, ~0ull);
14587 write_csr(dd
, DC_DC8051_ERR_EN
, ~0ull);
14591 * Fill out the given AU table using the given CU. A CU is defined in terms
14592 * AUs. The table is a an encoding: given the index, how many AUs does that
14595 * NOTE: Assumes that the register layout is the same for the
14596 * local and remote tables.
14598 static void assign_cm_au_table(struct hfi1_devdata
*dd
, u32 cu
,
14599 u32 csr0to3
, u32 csr4to7
)
14601 write_csr(dd
, csr0to3
,
14602 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT
|
14603 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT
|
14605 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT
|
14607 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT
);
14608 write_csr(dd
, csr4to7
,
14610 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT
|
14612 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT
|
14614 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT
|
14616 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT
);
14619 static void assign_local_cm_au_table(struct hfi1_devdata
*dd
, u8 vcu
)
14621 assign_cm_au_table(dd
, vcu_to_cu(vcu
), SEND_CM_LOCAL_AU_TABLE0_TO3
,
14622 SEND_CM_LOCAL_AU_TABLE4_TO7
);
14625 void assign_remote_cm_au_table(struct hfi1_devdata
*dd
, u8 vcu
)
14627 assign_cm_au_table(dd
, vcu_to_cu(vcu
), SEND_CM_REMOTE_AU_TABLE0_TO3
,
14628 SEND_CM_REMOTE_AU_TABLE4_TO7
);
14631 static void init_txe(struct hfi1_devdata
*dd
)
14635 /* enable all PIO, SDMA, general, and Egress errors */
14636 write_csr(dd
, SEND_PIO_ERR_MASK
, ~0ull);
14637 write_csr(dd
, SEND_DMA_ERR_MASK
, ~0ull);
14638 write_csr(dd
, SEND_ERR_MASK
, ~0ull);
14639 write_csr(dd
, SEND_EGRESS_ERR_MASK
, ~0ull);
14641 /* enable all per-context and per-SDMA engine errors */
14642 for (i
= 0; i
< dd
->chip_send_contexts
; i
++)
14643 write_kctxt_csr(dd
, i
, SEND_CTXT_ERR_MASK
, ~0ull);
14644 for (i
= 0; i
< dd
->chip_sdma_engines
; i
++)
14645 write_kctxt_csr(dd
, i
, SEND_DMA_ENG_ERR_MASK
, ~0ull);
14647 /* set the local CU to AU mapping */
14648 assign_local_cm_au_table(dd
, dd
->vcu
);
14651 * Set reasonable default for Credit Return Timer
14652 * Don't set on Simulator - causes it to choke.
14654 if (dd
->icode
!= ICODE_FUNCTIONAL_SIMULATOR
)
14655 write_csr(dd
, SEND_CM_TIMER_CTRL
, HFI1_CREDIT_RETURN_RATE
);
14658 int hfi1_set_ctxt_jkey(struct hfi1_devdata
*dd
, struct hfi1_ctxtdata
*rcd
,
14664 if (!rcd
|| !rcd
->sc
)
14667 hw_ctxt
= rcd
->sc
->hw_context
;
14668 reg
= SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK
| /* mask is always 1's */
14669 ((jkey
& SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK
) <<
14670 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT
);
14671 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14672 if (HFI1_CAP_KGET_MASK(rcd
->flags
, ALLOW_PERM_JKEY
))
14673 reg
|= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK
;
14674 write_kctxt_csr(dd
, hw_ctxt
, SEND_CTXT_CHECK_JOB_KEY
, reg
);
14676 * Enable send-side J_KEY integrity check, unless this is A0 h/w
14679 reg
= read_kctxt_csr(dd
, hw_ctxt
, SEND_CTXT_CHECK_ENABLE
);
14680 reg
|= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK
;
14681 write_kctxt_csr(dd
, hw_ctxt
, SEND_CTXT_CHECK_ENABLE
, reg
);
14684 /* Enable J_KEY check on receive context. */
14685 reg
= RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK
|
14686 ((jkey
& RCV_KEY_CTRL_JOB_KEY_VALUE_MASK
) <<
14687 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT
);
14688 write_kctxt_csr(dd
, rcd
->ctxt
, RCV_KEY_CTRL
, reg
);
14693 int hfi1_clear_ctxt_jkey(struct hfi1_devdata
*dd
, struct hfi1_ctxtdata
*rcd
)
14698 if (!rcd
|| !rcd
->sc
)
14701 hw_ctxt
= rcd
->sc
->hw_context
;
14702 write_kctxt_csr(dd
, hw_ctxt
, SEND_CTXT_CHECK_JOB_KEY
, 0);
14704 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14705 * This check would not have been enabled for A0 h/w, see
14709 reg
= read_kctxt_csr(dd
, hw_ctxt
, SEND_CTXT_CHECK_ENABLE
);
14710 reg
&= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK
;
14711 write_kctxt_csr(dd
, hw_ctxt
, SEND_CTXT_CHECK_ENABLE
, reg
);
14713 /* Turn off the J_KEY on the receive side */
14714 write_kctxt_csr(dd
, rcd
->ctxt
, RCV_KEY_CTRL
, 0);
14719 int hfi1_set_ctxt_pkey(struct hfi1_devdata
*dd
, struct hfi1_ctxtdata
*rcd
,
14725 if (!rcd
|| !rcd
->sc
)
14728 hw_ctxt
= rcd
->sc
->hw_context
;
14729 reg
= ((u64
)pkey
& SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK
) <<
14730 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT
;
14731 write_kctxt_csr(dd
, hw_ctxt
, SEND_CTXT_CHECK_PARTITION_KEY
, reg
);
14732 reg
= read_kctxt_csr(dd
, hw_ctxt
, SEND_CTXT_CHECK_ENABLE
);
14733 reg
|= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK
;
14734 reg
&= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK
;
14735 write_kctxt_csr(dd
, hw_ctxt
, SEND_CTXT_CHECK_ENABLE
, reg
);
14740 int hfi1_clear_ctxt_pkey(struct hfi1_devdata
*dd
, struct hfi1_ctxtdata
*ctxt
)
14745 if (!ctxt
|| !ctxt
->sc
)
14748 hw_ctxt
= ctxt
->sc
->hw_context
;
14749 reg
= read_kctxt_csr(dd
, hw_ctxt
, SEND_CTXT_CHECK_ENABLE
);
14750 reg
&= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK
;
14751 write_kctxt_csr(dd
, hw_ctxt
, SEND_CTXT_CHECK_ENABLE
, reg
);
14752 write_kctxt_csr(dd
, hw_ctxt
, SEND_CTXT_CHECK_PARTITION_KEY
, 0);
14758 * Start doing the clean up the the chip. Our clean up happens in multiple
14759 * stages and this is just the first.
14761 void hfi1_start_cleanup(struct hfi1_devdata
*dd
)
14766 clean_up_interrupts(dd
);
14767 finish_chip_resources(dd
);
14770 #define HFI_BASE_GUID(dev) \
14771 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14774 * Information can be shared between the two HFIs on the same ASIC
14775 * in the same OS. This function finds the peer device and sets
14776 * up a shared structure.
14778 static int init_asic_data(struct hfi1_devdata
*dd
)
14780 unsigned long flags
;
14781 struct hfi1_devdata
*tmp
, *peer
= NULL
;
14782 struct hfi1_asic_data
*asic_data
;
14785 /* pre-allocate the asic structure in case we are the first device */
14786 asic_data
= kzalloc(sizeof(*dd
->asic_data
), GFP_KERNEL
);
14790 spin_lock_irqsave(&hfi1_devs_lock
, flags
);
14791 /* Find our peer device */
14792 list_for_each_entry(tmp
, &hfi1_dev_list
, list
) {
14793 if ((HFI_BASE_GUID(dd
) == HFI_BASE_GUID(tmp
)) &&
14794 dd
->unit
!= tmp
->unit
) {
14801 /* use already allocated structure */
14802 dd
->asic_data
= peer
->asic_data
;
14805 dd
->asic_data
= asic_data
;
14806 mutex_init(&dd
->asic_data
->asic_resource_mutex
);
14808 dd
->asic_data
->dds
[dd
->hfi1_id
] = dd
; /* self back-pointer */
14809 spin_unlock_irqrestore(&hfi1_devs_lock
, flags
);
14811 /* first one through - set up i2c devices */
14813 ret
= set_up_i2c(dd
, dd
->asic_data
);
14819 * Set dd->boardname. Use a generic name if a name is not returned from
14820 * EFI variable space.
14822 * Return 0 on success, -ENOMEM if space could not be allocated.
14824 static int obtain_boardname(struct hfi1_devdata
*dd
)
14826 /* generic board description */
14827 const char generic
[] =
14828 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14829 unsigned long size
;
14832 ret
= read_hfi1_efi_var(dd
, "description", &size
,
14833 (void **)&dd
->boardname
);
14835 dd_dev_info(dd
, "Board description not found\n");
14836 /* use generic description */
14837 dd
->boardname
= kstrdup(generic
, GFP_KERNEL
);
14838 if (!dd
->boardname
)
14845 * Check the interrupt registers to make sure that they are mapped correctly.
14846 * It is intended to help user identify any mismapping by VMM when the driver
14847 * is running in a VM. This function should only be called before interrupt
14848 * is set up properly.
14850 * Return 0 on success, -EINVAL on failure.
14852 static int check_int_registers(struct hfi1_devdata
*dd
)
14855 u64 all_bits
= ~(u64
)0;
14858 /* Clear CceIntMask[0] to avoid raising any interrupts */
14859 mask
= read_csr(dd
, CCE_INT_MASK
);
14860 write_csr(dd
, CCE_INT_MASK
, 0ull);
14861 reg
= read_csr(dd
, CCE_INT_MASK
);
14865 /* Clear all interrupt status bits */
14866 write_csr(dd
, CCE_INT_CLEAR
, all_bits
);
14867 reg
= read_csr(dd
, CCE_INT_STATUS
);
14871 /* Set all interrupt status bits */
14872 write_csr(dd
, CCE_INT_FORCE
, all_bits
);
14873 reg
= read_csr(dd
, CCE_INT_STATUS
);
14874 if (reg
!= all_bits
)
14877 /* Restore the interrupt mask */
14878 write_csr(dd
, CCE_INT_CLEAR
, all_bits
);
14879 write_csr(dd
, CCE_INT_MASK
, mask
);
14883 write_csr(dd
, CCE_INT_MASK
, mask
);
14884 dd_dev_err(dd
, "Interrupt registers not properly mapped by VMM\n");
14889 * Allocate and initialize the device structure for the hfi.
14890 * @dev: the pci_dev for hfi1_ib device
14891 * @ent: pci_device_id struct for this dev
14893 * Also allocates, initializes, and returns the devdata struct for this
14896 * This is global, and is called directly at init to set up the
14897 * chip-specific function pointers for later use.
14899 struct hfi1_devdata
*hfi1_init_dd(struct pci_dev
*pdev
,
14900 const struct pci_device_id
*ent
)
14902 struct hfi1_devdata
*dd
;
14903 struct hfi1_pportdata
*ppd
;
14906 static const char * const inames
[] = { /* implementation names */
14908 "RTL VCS simulation",
14909 "RTL FPGA emulation",
14910 "Functional simulator"
14912 struct pci_dev
*parent
= pdev
->bus
->self
;
14914 dd
= hfi1_alloc_devdata(pdev
, NUM_IB_PORTS
*
14915 sizeof(struct hfi1_pportdata
));
14919 for (i
= 0; i
< dd
->num_pports
; i
++, ppd
++) {
14921 /* init common fields */
14922 hfi1_init_pportdata(pdev
, ppd
, dd
, 0, 1);
14923 /* DC supports 4 link widths */
14924 ppd
->link_width_supported
=
14925 OPA_LINK_WIDTH_1X
| OPA_LINK_WIDTH_2X
|
14926 OPA_LINK_WIDTH_3X
| OPA_LINK_WIDTH_4X
;
14927 ppd
->link_width_downgrade_supported
=
14928 ppd
->link_width_supported
;
14929 /* start out enabling only 4X */
14930 ppd
->link_width_enabled
= OPA_LINK_WIDTH_4X
;
14931 ppd
->link_width_downgrade_enabled
=
14932 ppd
->link_width_downgrade_supported
;
14933 /* link width active is 0 when link is down */
14934 /* link width downgrade active is 0 when link is down */
14936 if (num_vls
< HFI1_MIN_VLS_SUPPORTED
||
14937 num_vls
> HFI1_MAX_VLS_SUPPORTED
) {
14938 hfi1_early_err(&pdev
->dev
,
14939 "Invalid num_vls %u, using %u VLs\n",
14940 num_vls
, HFI1_MAX_VLS_SUPPORTED
);
14941 num_vls
= HFI1_MAX_VLS_SUPPORTED
;
14943 ppd
->vls_supported
= num_vls
;
14944 ppd
->vls_operational
= ppd
->vls_supported
;
14945 /* Set the default MTU. */
14946 for (vl
= 0; vl
< num_vls
; vl
++)
14947 dd
->vld
[vl
].mtu
= hfi1_max_mtu
;
14948 dd
->vld
[15].mtu
= MAX_MAD_PACKET
;
14950 * Set the initial values to reasonable default, will be set
14951 * for real when link is up.
14953 ppd
->overrun_threshold
= 0x4;
14954 ppd
->phy_error_threshold
= 0xf;
14955 ppd
->port_crc_mode_enabled
= link_crc_mask
;
14956 /* initialize supported LTP CRC mode */
14957 ppd
->port_ltp_crc_mode
= cap_to_port_ltp(link_crc_mask
) << 8;
14958 /* initialize enabled LTP CRC mode */
14959 ppd
->port_ltp_crc_mode
|= cap_to_port_ltp(link_crc_mask
) << 4;
14960 /* start in offline */
14961 ppd
->host_link_state
= HLS_DN_OFFLINE
;
14962 init_vl_arb_caches(ppd
);
14965 dd
->link_default
= HLS_DN_POLL
;
14968 * Do remaining PCIe setup and save PCIe values in dd.
14969 * Any error printing is already done by the init code.
14970 * On return, we have the chip mapped.
14972 ret
= hfi1_pcie_ddinit(dd
, pdev
);
14976 /* Save PCI space registers to rewrite after device reset */
14977 ret
= save_pci_variables(dd
);
14981 /* verify that reads actually work, save revision for reset check */
14982 dd
->revision
= read_csr(dd
, CCE_REVISION
);
14983 if (dd
->revision
== ~(u64
)0) {
14984 dd_dev_err(dd
, "cannot read chip CSRs\n");
14988 dd
->majrev
= (dd
->revision
>> CCE_REVISION_CHIP_REV_MAJOR_SHIFT
)
14989 & CCE_REVISION_CHIP_REV_MAJOR_MASK
;
14990 dd
->minrev
= (dd
->revision
>> CCE_REVISION_CHIP_REV_MINOR_SHIFT
)
14991 & CCE_REVISION_CHIP_REV_MINOR_MASK
;
14994 * Check interrupt registers mapping if the driver has no access to
14995 * the upstream component. In this case, it is likely that the driver
14996 * is running in a VM.
14999 ret
= check_int_registers(dd
);
15005 * obtain the hardware ID - NOT related to unit, which is a
15006 * software enumeration
15008 reg
= read_csr(dd
, CCE_REVISION2
);
15009 dd
->hfi1_id
= (reg
>> CCE_REVISION2_HFI_ID_SHIFT
)
15010 & CCE_REVISION2_HFI_ID_MASK
;
15011 /* the variable size will remove unwanted bits */
15012 dd
->icode
= reg
>> CCE_REVISION2_IMPL_CODE_SHIFT
;
15013 dd
->irev
= reg
>> CCE_REVISION2_IMPL_REVISION_SHIFT
;
15014 dd_dev_info(dd
, "Implementation: %s, revision 0x%x\n",
15015 dd
->icode
< ARRAY_SIZE(inames
) ?
15016 inames
[dd
->icode
] : "unknown", (int)dd
->irev
);
15018 /* speeds the hardware can support */
15019 dd
->pport
->link_speed_supported
= OPA_LINK_SPEED_25G
;
15020 /* speeds allowed to run at */
15021 dd
->pport
->link_speed_enabled
= dd
->pport
->link_speed_supported
;
15022 /* give a reasonable active value, will be set on link up */
15023 dd
->pport
->link_speed_active
= OPA_LINK_SPEED_25G
;
15025 dd
->chip_rcv_contexts
= read_csr(dd
, RCV_CONTEXTS
);
15026 dd
->chip_send_contexts
= read_csr(dd
, SEND_CONTEXTS
);
15027 dd
->chip_sdma_engines
= read_csr(dd
, SEND_DMA_ENGINES
);
15028 dd
->chip_pio_mem_size
= read_csr(dd
, SEND_PIO_MEM_SIZE
);
15029 dd
->chip_sdma_mem_size
= read_csr(dd
, SEND_DMA_MEM_SIZE
);
15030 /* fix up link widths for emulation _p */
15032 if (dd
->icode
== ICODE_FPGA_EMULATION
&& is_emulator_p(dd
)) {
15033 ppd
->link_width_supported
=
15034 ppd
->link_width_enabled
=
15035 ppd
->link_width_downgrade_supported
=
15036 ppd
->link_width_downgrade_enabled
=
15039 /* insure num_vls isn't larger than number of sdma engines */
15040 if (HFI1_CAP_IS_KSET(SDMA
) && num_vls
> dd
->chip_sdma_engines
) {
15041 dd_dev_err(dd
, "num_vls %u too large, using %u VLs\n",
15042 num_vls
, dd
->chip_sdma_engines
);
15043 num_vls
= dd
->chip_sdma_engines
;
15044 ppd
->vls_supported
= dd
->chip_sdma_engines
;
15045 ppd
->vls_operational
= ppd
->vls_supported
;
15049 * Convert the ns parameter to the 64 * cclocks used in the CSR.
15050 * Limit the max if larger than the field holds. If timeout is
15051 * non-zero, then the calculated field will be at least 1.
15053 * Must be after icode is set up - the cclock rate depends
15054 * on knowing the hardware being used.
15056 dd
->rcv_intr_timeout_csr
= ns_to_cclock(dd
, rcv_intr_timeout
) / 64;
15057 if (dd
->rcv_intr_timeout_csr
>
15058 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK
)
15059 dd
->rcv_intr_timeout_csr
=
15060 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK
;
15061 else if (dd
->rcv_intr_timeout_csr
== 0 && rcv_intr_timeout
)
15062 dd
->rcv_intr_timeout_csr
= 1;
15064 /* needs to be done before we look for the peer device */
15067 /* set up shared ASIC data with peer device */
15068 ret
= init_asic_data(dd
);
15072 /* obtain chip sizes, reset chip CSRs */
15073 ret
= init_chip(dd
);
15077 /* read in the PCIe link speed information */
15078 ret
= pcie_speeds(dd
);
15082 /* call before get_platform_config(), after init_chip_resources() */
15083 ret
= eprom_init(dd
);
15085 goto bail_free_rcverr
;
15087 /* Needs to be called before hfi1_firmware_init */
15088 get_platform_config(dd
);
15090 /* read in firmware */
15091 ret
= hfi1_firmware_init(dd
);
15096 * In general, the PCIe Gen3 transition must occur after the
15097 * chip has been idled (so it won't initiate any PCIe transactions
15098 * e.g. an interrupt) and before the driver changes any registers
15099 * (the transition will reset the registers).
15101 * In particular, place this call after:
15102 * - init_chip() - the chip will not initiate any PCIe transactions
15103 * - pcie_speeds() - reads the current link speed
15104 * - hfi1_firmware_init() - the needed firmware is ready to be
15107 ret
= do_pcie_gen3_transition(dd
);
15111 /* start setting dd values and adjusting CSRs */
15112 init_early_variables(dd
);
15114 parse_platform_config(dd
);
15116 ret
= obtain_boardname(dd
);
15120 snprintf(dd
->boardversion
, BOARD_VERS_MAX
,
15121 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
15122 HFI1_CHIP_VERS_MAJ
, HFI1_CHIP_VERS_MIN
,
15125 (dd
->revision
>> CCE_REVISION_SW_SHIFT
)
15126 & CCE_REVISION_SW_MASK
);
15128 ret
= set_up_context_variables(dd
);
15132 /* set initial RXE CSRs */
15134 /* set initial TXE CSRs */
15136 /* set initial non-RXE, non-TXE CSRs */
15138 /* set up KDETH QP prefix in both RX and TX CSRs */
15141 ret
= hfi1_dev_affinity_init(dd
);
15145 /* send contexts must be set up before receive contexts */
15146 ret
= init_send_contexts(dd
);
15150 ret
= hfi1_create_kctxts(dd
);
15155 * Initialize aspm, to be done after gen3 transition and setting up
15156 * contexts and before enabling interrupts
15160 dd
->rcvhdrsize
= DEFAULT_RCVHDRSIZE
;
15162 * rcd[0] is guaranteed to be valid by this point. Also, all
15163 * context are using the same value, as per the module parameter.
15165 dd
->rhf_offset
= dd
->rcd
[0]->rcvhdrqentsize
- sizeof(u64
) / sizeof(u32
);
15167 ret
= init_pervl_scs(dd
);
15172 for (i
= 0; i
< dd
->num_pports
; ++i
) {
15173 ret
= sdma_init(dd
, i
);
15178 /* use contexts created by hfi1_create_kctxts */
15179 ret
= set_up_interrupts(dd
);
15183 /* set up LCB access - must be after set_up_interrupts() */
15184 init_lcb_access(dd
);
15187 * Serial number is created from the base guid:
15188 * [27:24] = base guid [38:35]
15189 * [23: 0] = base guid [23: 0]
15191 snprintf(dd
->serial
, SERIAL_MAX
, "0x%08llx\n",
15192 (dd
->base_guid
& 0xFFFFFF) |
15193 ((dd
->base_guid
>> 11) & 0xF000000));
15195 dd
->oui1
= dd
->base_guid
>> 56 & 0xFF;
15196 dd
->oui2
= dd
->base_guid
>> 48 & 0xFF;
15197 dd
->oui3
= dd
->base_guid
>> 40 & 0xFF;
15199 ret
= load_firmware(dd
); /* asymmetric with dispose_firmware() */
15201 goto bail_clear_intr
;
15205 ret
= init_cntrs(dd
);
15207 goto bail_clear_intr
;
15209 ret
= init_rcverr(dd
);
15211 goto bail_free_cntrs
;
15213 init_completion(&dd
->user_comp
);
15215 /* The user refcount starts with one to inidicate an active device */
15216 atomic_set(&dd
->user_refcount
, 1);
15225 clean_up_interrupts(dd
);
15227 hfi1_pcie_ddcleanup(dd
);
15229 hfi1_free_devdata(dd
);
15235 static u16
delay_cycles(struct hfi1_pportdata
*ppd
, u32 desired_egress_rate
,
15239 u32 current_egress_rate
= ppd
->current_egress_rate
;
15240 /* rates here are in units of 10^6 bits/sec */
15242 if (desired_egress_rate
== -1)
15243 return 0; /* shouldn't happen */
15245 if (desired_egress_rate
>= current_egress_rate
)
15246 return 0; /* we can't help go faster, only slower */
15248 delta_cycles
= egress_cycles(dw_len
* 4, desired_egress_rate
) -
15249 egress_cycles(dw_len
* 4, current_egress_rate
);
15251 return (u16
)delta_cycles
;
15255 * create_pbc - build a pbc for transmission
15256 * @flags: special case flags or-ed in built pbc
15257 * @srate: static rate
15259 * @dwlen: dword length (header words + data words + pbc words)
15261 * Create a PBC with the given flags, rate, VL, and length.
15263 * NOTE: The PBC created will not insert any HCRC - all callers but one are
15264 * for verbs, which does not use this PSM feature. The lone other caller
15265 * is for the diagnostic interface which calls this if the user does not
15266 * supply their own PBC.
15268 u64
create_pbc(struct hfi1_pportdata
*ppd
, u64 flags
, int srate_mbs
, u32 vl
,
15271 u64 pbc
, delay
= 0;
15273 if (unlikely(srate_mbs
))
15274 delay
= delay_cycles(ppd
, srate_mbs
, dw_len
);
15277 | (delay
<< PBC_STATIC_RATE_CONTROL_COUNT_SHIFT
)
15278 | ((u64
)PBC_IHCRC_NONE
<< PBC_INSERT_HCRC_SHIFT
)
15279 | (vl
& PBC_VL_MASK
) << PBC_VL_SHIFT
15280 | (dw_len
& PBC_LENGTH_DWS_MASK
)
15281 << PBC_LENGTH_DWS_SHIFT
;
15286 #define SBUS_THERMAL 0x4f
15287 #define SBUS_THERM_MONITOR_MODE 0x1
15289 #define THERM_FAILURE(dev, ret, reason) \
15291 "Thermal sensor initialization failed: %s (%d)\n", \
15295 * Initialize the thermal sensor.
15297 * After initialization, enable polling of thermal sensor through
15298 * SBus interface. In order for this to work, the SBus Master
15299 * firmware has to be loaded due to the fact that the HW polling
15300 * logic uses SBus interrupts, which are not supported with
15301 * default firmware. Otherwise, no data will be returned through
15302 * the ASIC_STS_THERM CSR.
15304 static int thermal_init(struct hfi1_devdata
*dd
)
15308 if (dd
->icode
!= ICODE_RTL_SILICON
||
15309 check_chip_resource(dd
, CR_THERM_INIT
, NULL
))
15312 ret
= acquire_chip_resource(dd
, CR_SBUS
, SBUS_TIMEOUT
);
15314 THERM_FAILURE(dd
, ret
, "Acquire SBus");
15318 dd_dev_info(dd
, "Initializing thermal sensor\n");
15319 /* Disable polling of thermal readings */
15320 write_csr(dd
, ASIC_CFG_THERM_POLL_EN
, 0x0);
15322 /* Thermal Sensor Initialization */
15323 /* Step 1: Reset the Thermal SBus Receiver */
15324 ret
= sbus_request_slow(dd
, SBUS_THERMAL
, 0x0,
15325 RESET_SBUS_RECEIVER
, 0);
15327 THERM_FAILURE(dd
, ret
, "Bus Reset");
15330 /* Step 2: Set Reset bit in Thermal block */
15331 ret
= sbus_request_slow(dd
, SBUS_THERMAL
, 0x0,
15332 WRITE_SBUS_RECEIVER
, 0x1);
15334 THERM_FAILURE(dd
, ret
, "Therm Block Reset");
15337 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
15338 ret
= sbus_request_slow(dd
, SBUS_THERMAL
, 0x1,
15339 WRITE_SBUS_RECEIVER
, 0x32);
15341 THERM_FAILURE(dd
, ret
, "Write Clock Div");
15344 /* Step 4: Select temperature mode */
15345 ret
= sbus_request_slow(dd
, SBUS_THERMAL
, 0x3,
15346 WRITE_SBUS_RECEIVER
,
15347 SBUS_THERM_MONITOR_MODE
);
15349 THERM_FAILURE(dd
, ret
, "Write Mode Sel");
15352 /* Step 5: De-assert block reset and start conversion */
15353 ret
= sbus_request_slow(dd
, SBUS_THERMAL
, 0x0,
15354 WRITE_SBUS_RECEIVER
, 0x2);
15356 THERM_FAILURE(dd
, ret
, "Write Reset Deassert");
15359 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
15362 /* Enable polling of thermal readings */
15363 write_csr(dd
, ASIC_CFG_THERM_POLL_EN
, 0x1);
15365 /* Set initialized flag */
15366 ret
= acquire_chip_resource(dd
, CR_THERM_INIT
, 0);
15368 THERM_FAILURE(dd
, ret
, "Unable to set thermal init flag");
15371 release_chip_resource(dd
, CR_SBUS
);
15375 static void handle_temp_err(struct hfi1_devdata
*dd
)
15377 struct hfi1_pportdata
*ppd
= &dd
->pport
[0];
15379 * Thermal Critical Interrupt
15380 * Put the device into forced freeze mode, take link down to
15381 * offline, and put DC into reset.
15384 "Critical temperature reached! Forcing device into freeze mode!\n");
15385 dd
->flags
|= HFI1_FORCED_FREEZE
;
15386 start_freeze_handling(ppd
, FREEZE_SELF
| FREEZE_ABORT
);
15388 * Shut DC down as much and as quickly as possible.
15390 * Step 1: Take the link down to OFFLINE. This will cause the
15391 * 8051 to put the Serdes in reset. However, we don't want to
15392 * go through the entire link state machine since we want to
15393 * shutdown ASAP. Furthermore, this is not a graceful shutdown
15394 * but rather an attempt to save the chip.
15395 * Code below is almost the same as quiet_serdes() but avoids
15396 * all the extra work and the sleeps.
15398 ppd
->driver_link_ready
= 0;
15399 ppd
->link_enabled
= 0;
15400 set_physical_link_state(dd
, (OPA_LINKDOWN_REASON_SMA_DISABLED
<< 8) |
15403 * Step 2: Shutdown LCB and 8051
15404 * After shutdown, do not restore DC_CFG_RESET value.