3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 * This file contains all of the code that is specific to the HFI chip
55 #include <linux/pci.h>
56 #include <linux/delay.h>
57 #include <linux/interrupt.h>
58 #include <linux/module.h>
70 #define NUM_IB_PORTS 1
73 module_param_named(kdeth_qp
, kdeth_qp
, uint
, S_IRUGO
);
74 MODULE_PARM_DESC(kdeth_qp
, "Set the KDETH queue pair prefix");
76 uint num_vls
= HFI1_MAX_VLS_SUPPORTED
;
77 module_param(num_vls
, uint
, S_IRUGO
);
78 MODULE_PARM_DESC(num_vls
, "Set number of Virtual Lanes to use (1-8)");
81 * Default time to aggregate two 10K packets from the idle state
82 * (timer not running). The timer starts at the end of the first packet,
83 * so only the time for one 10K packet and header plus a bit extra is needed.
84 * 10 * 1024 + 64 header byte = 10304 byte
85 * 10304 byte / 12.5 GB/s = 824.32ns
87 uint rcv_intr_timeout
= (824 + 16); /* 16 is for coalescing interrupt */
88 module_param(rcv_intr_timeout
, uint
, S_IRUGO
);
89 MODULE_PARM_DESC(rcv_intr_timeout
, "Receive interrupt mitigation timeout in ns");
91 uint rcv_intr_count
= 16; /* same as qib */
92 module_param(rcv_intr_count
, uint
, S_IRUGO
);
93 MODULE_PARM_DESC(rcv_intr_count
, "Receive interrupt mitigation count");
95 ushort link_crc_mask
= SUPPORTED_CRCS
;
96 module_param(link_crc_mask
, ushort
, S_IRUGO
);
97 MODULE_PARM_DESC(link_crc_mask
, "CRCs to use on the link");
100 module_param_named(loopback
, loopback
, uint
, S_IRUGO
);
101 MODULE_PARM_DESC(loopback
, "Put into loopback mode (1 = serdes, 3 = external cable");
103 /* Other driver tunables */
104 uint rcv_intr_dynamic
= 1; /* enable dynamic mode for rcv int mitigation*/
105 static ushort crc_14b_sideband
= 1;
106 static uint use_flr
= 1;
107 uint quick_linkup
; /* skip LNI */
110 u64 flag
; /* the flag */
111 char *str
; /* description string */
112 u16 extra
; /* extra information */
117 /* str must be a string constant */
118 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
119 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
121 /* Send Error Consequences */
122 #define SEC_WRITE_DROPPED 0x1
123 #define SEC_PACKET_DROPPED 0x2
124 #define SEC_SC_HALTED 0x4 /* per-context only */
125 #define SEC_SPC_FREEZE 0x8 /* per-HFI only */
127 #define MIN_KERNEL_KCTXTS 2
128 #define FIRST_KERNEL_KCTXT 1
129 #define NUM_MAP_REGS 32
131 /* Bit offset into the GUID which carries HFI id information */
132 #define GUID_HFI_INDEX_SHIFT 39
134 /* extract the emulation revision */
135 #define emulator_rev(dd) ((dd)->irev >> 8)
136 /* parallel and serial emulation versions are 3 and 4 respectively */
137 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
138 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
143 #define IB_PACKET_TYPE 2ull
144 #define QW_SHIFT 6ull
146 #define QPN_WIDTH 7ull
148 /* LRH.BTH: QW 0, OFFSET 48 - for match */
149 #define LRH_BTH_QW 0ull
150 #define LRH_BTH_BIT_OFFSET 48ull
151 #define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
152 #define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
153 #define LRH_BTH_SELECT
154 #define LRH_BTH_MASK 3ull
155 #define LRH_BTH_VALUE 2ull
157 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
158 #define LRH_SC_QW 0ull
159 #define LRH_SC_BIT_OFFSET 56ull
160 #define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
161 #define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
162 #define LRH_SC_MASK 128ull
163 #define LRH_SC_VALUE 0ull
165 /* SC[n..0] QW 0, OFFSET 60 - for select */
166 #define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
168 /* QPN[m+n:1] QW 1, OFFSET 1 */
169 #define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
171 /* defines to build power on SC2VL table */
183 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
184 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
185 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
186 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
187 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
188 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
189 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
190 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
193 #define DC_SC_VL_VAL( \
212 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
213 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
214 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
215 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
216 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
217 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
218 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
219 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
220 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
221 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
222 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
223 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
224 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
225 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
226 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
227 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
230 /* all CceStatus sub-block freeze bits */
231 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
232 | CCE_STATUS_RXE_FROZE_SMASK \
233 | CCE_STATUS_TXE_FROZE_SMASK \
234 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
235 /* all CceStatus sub-block TXE pause bits */
236 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
237 | CCE_STATUS_TXE_PAUSED_SMASK \
238 | CCE_STATUS_SDMA_PAUSED_SMASK)
239 /* all CceStatus sub-block RXE pause bits */
240 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
245 static struct flag_table cce_err_status_flags
[] = {
246 /* 0*/ FLAG_ENTRY0("CceCsrParityErr",
247 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK
),
248 /* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
249 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK
),
250 /* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
251 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK
),
252 /* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
253 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK
),
254 /* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
255 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK
),
256 /* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
257 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK
),
258 /* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
259 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK
),
260 /* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
261 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK
),
262 /* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
263 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK
),
264 /* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
265 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK
),
266 /*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
267 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK
),
268 /*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
269 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK
),
270 /*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
271 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK
),
272 /*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
273 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK
),
274 /*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
275 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK
),
276 /*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
277 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK
),
278 /*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
279 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK
),
280 /*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
281 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK
),
282 /*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
283 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK
),
284 /*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
285 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK
),
286 /*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
287 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK
),
288 /*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
289 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK
),
290 /*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
291 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK
),
292 /*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
293 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK
),
294 /*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
295 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK
),
296 /*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
297 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK
),
298 /*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
299 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK
),
300 /*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
301 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK
),
302 /*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
303 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK
),
304 /*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
305 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK
),
306 /*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
307 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK
),
308 /*31*/ FLAG_ENTRY0("LATriggered",
309 CCE_ERR_STATUS_LA_TRIGGERED_SMASK
),
310 /*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
311 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK
),
312 /*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
313 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK
),
314 /*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
315 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK
),
316 /*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
317 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK
),
318 /*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
319 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK
),
320 /*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
321 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK
),
322 /*38*/ FLAG_ENTRY0("CceIntMapCorErr",
323 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK
),
324 /*39*/ FLAG_ENTRY0("CceIntMapUncErr",
325 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK
),
326 /*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
327 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK
),
334 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
335 static struct flag_table misc_err_status_flags
[] = {
336 /* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY
)),
337 /* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR
)),
338 /* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR
)),
339 /* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED
)),
340 /* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH
)),
341 /* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED
)),
342 /* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY
)),
343 /* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR
)),
344 /* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE
)),
345 /* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY
)),
346 /*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD
)),
347 /*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL
)),
348 /*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL
))
352 * TXE PIO Error flags and consequences
354 static struct flag_table pio_err_status_flags
[] = {
355 /* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
357 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK
),
358 /* 1*/ FLAG_ENTRY("PioWriteAddrParity",
360 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK
),
361 /* 2*/ FLAG_ENTRY("PioCsrParity",
363 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK
),
364 /* 3*/ FLAG_ENTRY("PioSbMemFifo0",
366 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK
),
367 /* 4*/ FLAG_ENTRY("PioSbMemFifo1",
369 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK
),
370 /* 5*/ FLAG_ENTRY("PioPccFifoParity",
372 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK
),
373 /* 6*/ FLAG_ENTRY("PioPecFifoParity",
375 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK
),
376 /* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
378 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK
),
379 /* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
381 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK
),
382 /* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
384 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK
),
385 /*10*/ FLAG_ENTRY("PioSmPktResetParity",
387 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK
),
388 /*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
390 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK
),
391 /*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
393 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK
),
394 /*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
396 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK
),
397 /*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
399 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK
),
400 /*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
402 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK
),
403 /*16*/ FLAG_ENTRY("PioPpmcPblFifo",
405 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK
),
406 /*17*/ FLAG_ENTRY("PioInitSmIn",
408 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK
),
409 /*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
411 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK
),
412 /*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
414 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK
),
415 /*20*/ FLAG_ENTRY("PioHostAddrMemCor",
417 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK
),
418 /*21*/ FLAG_ENTRY("PioWriteDataParity",
420 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK
),
421 /*22*/ FLAG_ENTRY("PioStateMachine",
423 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK
),
424 /*23*/ FLAG_ENTRY("PioWriteQwValidParity",
425 SEC_WRITE_DROPPED
|SEC_SPC_FREEZE
,
426 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK
),
427 /*24*/ FLAG_ENTRY("PioBlockQwCountParity",
428 SEC_WRITE_DROPPED
|SEC_SPC_FREEZE
,
429 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK
),
430 /*25*/ FLAG_ENTRY("PioVlfVlLenParity",
432 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK
),
433 /*26*/ FLAG_ENTRY("PioVlfSopParity",
435 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK
),
436 /*27*/ FLAG_ENTRY("PioVlFifoParity",
438 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK
),
439 /*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
441 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK
),
442 /*29*/ FLAG_ENTRY("PioPpmcSopLen",
444 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK
),
446 /*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
448 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK
),
449 /*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
451 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK
),
452 /*34*/ FLAG_ENTRY("PioPccSopHeadParity",
454 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK
),
455 /*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
457 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK
),
461 /* TXE PIO errors that cause an SPC freeze */
462 #define ALL_PIO_FREEZE_ERR \
463 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
464 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
465 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
466 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
467 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
468 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
490 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
491 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
494 * TXE SDMA Error flags
496 static struct flag_table sdma_err_status_flags
[] = {
497 /* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
498 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK
),
499 /* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
500 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK
),
501 /* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
502 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK
),
503 /* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
504 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK
),
508 /* TXE SDMA errors that cause an SPC freeze */
509 #define ALL_SDMA_FREEZE_ERR \
510 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
511 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
512 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
514 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
515 #define PORT_DISCARD_EGRESS_ERRS \
516 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
517 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
518 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
521 * TXE Egress Error flags
523 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
524 static struct flag_table egress_err_status_flags
[] = {
525 /* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR
)),
526 /* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC
)),
528 /* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
529 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY
)),
530 /* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN
)),
531 /* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE
)),
533 /* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
534 SEES(TX_PIO_LAUNCH_INTF_PARITY
)),
535 /* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
536 SEES(TX_SDMA_LAUNCH_INTF_PARITY
)),
538 /*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
539 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY
)),
540 /*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL
)),
541 /*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY
)),
542 /*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY
)),
543 /*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY
)),
544 /*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
545 SEES(TX_SDMA0_DISALLOWED_PACKET
)),
546 /*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
547 SEES(TX_SDMA1_DISALLOWED_PACKET
)),
548 /*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
549 SEES(TX_SDMA2_DISALLOWED_PACKET
)),
550 /*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
551 SEES(TX_SDMA3_DISALLOWED_PACKET
)),
552 /*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
553 SEES(TX_SDMA4_DISALLOWED_PACKET
)),
554 /*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
555 SEES(TX_SDMA5_DISALLOWED_PACKET
)),
556 /*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
557 SEES(TX_SDMA6_DISALLOWED_PACKET
)),
558 /*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
559 SEES(TX_SDMA7_DISALLOWED_PACKET
)),
560 /*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
561 SEES(TX_SDMA8_DISALLOWED_PACKET
)),
562 /*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
563 SEES(TX_SDMA9_DISALLOWED_PACKET
)),
564 /*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
565 SEES(TX_SDMA10_DISALLOWED_PACKET
)),
566 /*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
567 SEES(TX_SDMA11_DISALLOWED_PACKET
)),
568 /*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
569 SEES(TX_SDMA12_DISALLOWED_PACKET
)),
570 /*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
571 SEES(TX_SDMA13_DISALLOWED_PACKET
)),
572 /*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
573 SEES(TX_SDMA14_DISALLOWED_PACKET
)),
574 /*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
575 SEES(TX_SDMA15_DISALLOWED_PACKET
)),
576 /*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
577 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY
)),
578 /*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
579 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY
)),
580 /*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
581 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY
)),
582 /*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
583 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY
)),
584 /*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
585 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY
)),
586 /*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
587 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY
)),
588 /*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
589 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY
)),
590 /*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
591 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY
)),
592 /*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
593 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY
)),
594 /*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY
)),
595 /*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC
)),
596 /*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC
)),
597 /*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC
)),
598 /*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC
)),
599 /*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION
)),
600 /*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL
)),
601 /*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR
)),
602 /*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR
)),
603 /*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR
)),
604 /*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR
)),
605 /*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR
)),
606 /*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR
)),
607 /*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR
)),
608 /*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR
)),
609 /*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR
)),
610 /*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN
)),
611 /*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR
)),
612 /*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR
)),
613 /*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR
)),
614 /*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR
)),
615 /*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
616 SEES(TX_READ_SDMA_MEMORY_CSR_UNC
)),
617 /*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
618 SEES(TX_READ_PIO_MEMORY_CSR_UNC
)),
622 * TXE Egress Error Info flags
624 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
625 static struct flag_table egress_err_info_flags
[] = {
626 /* 0*/ FLAG_ENTRY0("Reserved", 0ull),
627 /* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL
)),
628 /* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY
)),
629 /* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY
)),
630 /* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY
)),
631 /* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID
)),
632 /* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE
)),
633 /* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING
)),
634 /* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW
)),
635 /* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6
)),
636 /*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH
)),
637 /*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS
)),
638 /*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS
)),
639 /*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS
)),
640 /*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS
)),
641 /*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS
)),
642 /*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST
)),
643 /*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN
)),
644 /*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET
)),
645 /*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS
)),
646 /*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL
)),
647 /*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN
)),
650 /* TXE Egress errors that cause an SPC freeze */
651 #define ALL_TXE_EGRESS_FREEZE_ERR \
652 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
653 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
654 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
655 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
656 | SEES(TX_LAUNCH_CSR_PARITY) \
657 | SEES(TX_SBRD_CTL_CSR_PARITY) \
658 | SEES(TX_CONFIG_PARITY) \
659 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
660 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
661 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
662 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
663 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
664 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
665 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
666 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
667 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
668 | SEES(TX_CREDIT_RETURN_PARITY))
671 * TXE Send error flags
673 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
674 static struct flag_table send_err_status_flags
[] = {
675 /* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY
)),
676 /* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR
)),
677 /* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR
))
681 * TXE Send Context Error flags and consequences
683 static struct flag_table sc_err_status_flags
[] = {
684 /* 0*/ FLAG_ENTRY("InconsistentSop",
685 SEC_PACKET_DROPPED
| SEC_SC_HALTED
,
686 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK
),
687 /* 1*/ FLAG_ENTRY("DisallowedPacket",
688 SEC_PACKET_DROPPED
| SEC_SC_HALTED
,
689 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK
),
690 /* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
691 SEC_WRITE_DROPPED
| SEC_SC_HALTED
,
692 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK
),
693 /* 3*/ FLAG_ENTRY("WriteOverflow",
694 SEC_WRITE_DROPPED
| SEC_SC_HALTED
,
695 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK
),
696 /* 4*/ FLAG_ENTRY("WriteOutOfBounds",
697 SEC_WRITE_DROPPED
| SEC_SC_HALTED
,
698 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK
),
703 * RXE Receive Error flags
705 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
706 static struct flag_table rxe_err_status_flags
[] = {
707 /* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR
)),
708 /* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY
)),
709 /* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC
)),
710 /* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR
)),
711 /* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC
)),
712 /* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR
)),
713 /* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC
)),
714 /* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR
)),
715 /* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY
)),
716 /* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY
)),
717 /*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC
)),
718 /*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR
)),
719 /*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING
)),
720 /*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC
)),
721 /*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR
)),
722 /*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC
)),
723 /*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
724 RXES(RBUF_LOOKUP_DES_REG_UNC_COR
)),
725 /*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC
)),
726 /*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR
)),
727 /*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
728 RXES(RBUF_BLOCK_LIST_READ_UNC
)),
729 /*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
730 RXES(RBUF_BLOCK_LIST_READ_COR
)),
731 /*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
732 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY
)),
733 /*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
734 RXES(RBUF_CSR_QENT_CNT_PARITY
)),
735 /*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
736 RXES(RBUF_CSR_QNEXT_BUF_PARITY
)),
737 /*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
738 RXES(RBUF_CSR_QVLD_BIT_PARITY
)),
739 /*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY
)),
740 /*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY
)),
741 /*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
742 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY
)),
743 /*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY
)),
744 /*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY
)),
745 /*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP
)),
746 /*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL
)),
747 /*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY
)),
748 /*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY
)),
749 /*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY
)),
750 /*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
751 RXES(RBUF_FL_INITDONE_PARITY
)),
752 /*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
753 RXES(RBUF_FL_INIT_WR_ADDR_PARITY
)),
754 /*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC
)),
755 /*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR
)),
756 /*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC
)),
757 /*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
758 RXES(LOOKUP_DES_PART1_UNC_COR
)),
759 /*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
760 RXES(LOOKUP_DES_PART2_PARITY
)),
761 /*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC
)),
762 /*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR
)),
763 /*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY
)),
764 /*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY
)),
765 /*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM
)),
766 /*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC
)),
767 /*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR
)),
768 /*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC
)),
769 /*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR
)),
770 /*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC
)),
771 /*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR
)),
772 /*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC
)),
773 /*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR
)),
774 /*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC
)),
775 /*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR
)),
776 /*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY
)),
777 /*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING
)),
778 /*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING
)),
779 /*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC
)),
780 /*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR
)),
781 /*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR
)),
782 /*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY
))
785 /* RXE errors that will trigger an SPC freeze */
786 #define ALL_RXE_FREEZE_ERR \
787 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
788 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
789 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
790 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
791 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
792 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
832 #define RXE_FREEZE_ABORT_MASK \
833 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
834 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
835 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
840 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
841 static struct flag_table dcc_err_flags
[] = {
842 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR
)),
843 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR
)),
844 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR
)),
845 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR
)),
846 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR
)),
847 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR
)),
848 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR
)),
849 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR
)),
850 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR
)),
851 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR
)),
852 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR
)),
853 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE
)),
854 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR
)),
855 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR
)),
856 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR
)),
857 FLAG_ENTRY0("link_err", DCCE(LINK_ERR
)),
858 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR
)),
859 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR
)),
860 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR
)),
861 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR
)),
862 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR
)),
863 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR
)),
864 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR
)),
865 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR
)),
866 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR
)),
867 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR
)),
868 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR
)),
869 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR
)),
870 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR
)),
871 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR
)),
872 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR
)),
873 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR
)),
874 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR
)),
875 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR
)),
876 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST
)),
877 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC
)),
878 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR
)),
879 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR
)),
880 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR
)),
881 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR
)),
882 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR
)),
883 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR
)),
884 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR
)),
885 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR
)),
886 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR
)),
887 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR
)),
893 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
894 static struct flag_table lcb_err_flags
[] = {
895 /* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR
)),
896 /* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR
)),
897 /* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW
)),
898 /* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
899 LCBE(ALL_LNS_FAILED_REINIT_TEST
)),
900 /* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS
)),
901 /* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS
)),
902 /* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS
)),
903 /* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR
)),
904 /* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER
)),
905 /* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE
)),
906 /*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT
)),
907 /*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED
)),
908 /*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER
)),
909 /*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
910 LCBE(UNEXPECTED_ROUND_TRIP_MARKER
)),
911 /*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP
)),
912 /*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING
)),
913 /*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW
)),
914 /*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW
)),
915 /*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR
)),
916 /*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
917 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE
)),
918 /*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE
)),
919 /*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE
)),
920 /*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE
)),
921 /*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE
)),
922 /*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE
)),
923 /*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT
)),
924 /*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
925 LCBE(RST_FOR_INCOMPLT_RND_TRIP
)),
926 /*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT
)),
927 /*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
928 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE
)),
929 /*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
930 LCBE(REDUNDANT_FLIT_PARITY_ERR
))
936 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
937 static struct flag_table dc8051_err_flags
[] = {
938 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051
)),
939 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT
)),
940 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE
)),
941 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE
)),
942 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE
)),
943 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE
)),
944 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE
)),
945 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE
)),
946 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
947 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES
)),
948 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR
)),
952 * DC8051 Information Error flags
954 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
956 static struct flag_table dc8051_info_err_flags
[] = {
957 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED
),
958 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME
),
959 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET
),
960 FLAG_ENTRY0("Serdes internal loopback failure",
961 FAILED_SERDES_INTERNAL_LOOPBACK
),
962 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT
),
963 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING
),
964 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE
),
965 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM
),
966 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ
),
967 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1
),
968 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2
),
969 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT
)
973 * DC8051 Information Host Information flags
975 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
977 static struct flag_table dc8051_info_host_msg_flags
[] = {
978 FLAG_ENTRY0("Host request done", 0x0001),
979 FLAG_ENTRY0("BC SMA message", 0x0002),
980 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
981 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
982 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
983 FLAG_ENTRY0("External device config request", 0x0020),
984 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
985 FLAG_ENTRY0("LinkUp achieved", 0x0080),
986 FLAG_ENTRY0("Link going down", 0x0100),
990 static u32
encoded_size(u32 size
);
991 static u32
chip_to_opa_lstate(struct hfi1_devdata
*dd
, u32 chip_lstate
);
992 static int set_physical_link_state(struct hfi1_devdata
*dd
, u64 state
);
993 static void read_vc_remote_phy(struct hfi1_devdata
*dd
, u8
*power_management
,
995 static void read_vc_remote_fabric(struct hfi1_devdata
*dd
, u8
*vau
, u8
*z
,
996 u8
*vcu
, u16
*vl15buf
, u8
*crc_sizes
);
997 static void read_vc_remote_link_width(struct hfi1_devdata
*dd
,
998 u8
*remote_tx_rate
, u16
*link_widths
);
999 static void read_vc_local_link_width(struct hfi1_devdata
*dd
, u8
*misc_bits
,
1000 u8
*flag_bits
, u16
*link_widths
);
1001 static void read_remote_device_id(struct hfi1_devdata
*dd
, u16
*device_id
,
1003 static void read_mgmt_allowed(struct hfi1_devdata
*dd
, u8
*mgmt_allowed
);
1004 static void read_local_lni(struct hfi1_devdata
*dd
, u8
*enable_lane_rx
);
1005 static int read_tx_settings(struct hfi1_devdata
*dd
, u8
*enable_lane_tx
,
1006 u8
*tx_polarity_inversion
,
1007 u8
*rx_polarity_inversion
, u8
*max_rate
);
1008 static void handle_sdma_eng_err(struct hfi1_devdata
*dd
,
1009 unsigned int context
, u64 err_status
);
1010 static void handle_qsfp_int(struct hfi1_devdata
*dd
, u32 source
, u64 reg
);
1011 static void handle_dcc_err(struct hfi1_devdata
*dd
,
1012 unsigned int context
, u64 err_status
);
1013 static void handle_lcb_err(struct hfi1_devdata
*dd
,
1014 unsigned int context
, u64 err_status
);
1015 static void handle_8051_interrupt(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
);
1016 static void handle_cce_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
);
1017 static void handle_rxe_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
);
1018 static void handle_misc_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
);
1019 static void handle_pio_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
);
1020 static void handle_sdma_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
);
1021 static void handle_egress_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
);
1022 static void handle_txe_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
);
1023 static void set_partition_keys(struct hfi1_pportdata
*);
1024 static const char *link_state_name(u32 state
);
1025 static const char *link_state_reason_name(struct hfi1_pportdata
*ppd
,
1027 static int do_8051_command(struct hfi1_devdata
*dd
, u32 type
, u64 in_data
,
1029 static int read_idle_sma(struct hfi1_devdata
*dd
, u64
*data
);
1030 static int thermal_init(struct hfi1_devdata
*dd
);
1032 static int wait_logical_linkstate(struct hfi1_pportdata
*ppd
, u32 state
,
1034 static void read_planned_down_reason_code(struct hfi1_devdata
*dd
, u8
*pdrrc
);
1035 static void handle_temp_err(struct hfi1_devdata
*);
1036 static void dc_shutdown(struct hfi1_devdata
*);
1037 static void dc_start(struct hfi1_devdata
*);
1040 * Error interrupt table entry. This is used as input to the interrupt
1041 * "clear down" routine used for all second tier error interrupt register.
1042 * Second tier interrupt registers have a single bit representing them
1043 * in the top-level CceIntStatus.
1045 struct err_reg_info
{
1046 u32 status
; /* status CSR offset */
1047 u32 clear
; /* clear CSR offset */
1048 u32 mask
; /* mask CSR offset */
1049 void (*handler
)(struct hfi1_devdata
*dd
, u32 source
, u64 reg
);
1053 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1054 #define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1055 #define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1058 * Helpers for building HFI and DC error interrupt table entries. Different
1059 * helpers are needed because of inconsistent register names.
1061 #define EE(reg, handler, desc) \
1062 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1064 #define DC_EE1(reg, handler, desc) \
1065 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1066 #define DC_EE2(reg, handler, desc) \
1067 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1070 * Table of the "misc" grouping of error interrupts. Each entry refers to
1071 * another register containing more information.
1073 static const struct err_reg_info misc_errs
[NUM_MISC_ERRS
] = {
1074 /* 0*/ EE(CCE_ERR
, handle_cce_err
, "CceErr"),
1075 /* 1*/ EE(RCV_ERR
, handle_rxe_err
, "RxeErr"),
1076 /* 2*/ EE(MISC_ERR
, handle_misc_err
, "MiscErr"),
1077 /* 3*/ { 0, 0, 0, NULL
}, /* reserved */
1078 /* 4*/ EE(SEND_PIO_ERR
, handle_pio_err
, "PioErr"),
1079 /* 5*/ EE(SEND_DMA_ERR
, handle_sdma_err
, "SDmaErr"),
1080 /* 6*/ EE(SEND_EGRESS_ERR
, handle_egress_err
, "EgressErr"),
1081 /* 7*/ EE(SEND_ERR
, handle_txe_err
, "TxeErr")
1082 /* the rest are reserved */
1086 * Index into the Various section of the interrupt sources
1087 * corresponding to the Critical Temperature interrupt.
1089 #define TCRIT_INT_SOURCE 4
1092 * SDMA error interrupt entry - refers to another register containing more
1095 static const struct err_reg_info sdma_eng_err
=
1096 EE(SEND_DMA_ENG_ERR
, handle_sdma_eng_err
, "SDmaEngErr");
1098 static const struct err_reg_info various_err
[NUM_VARIOUS
] = {
1099 /* 0*/ { 0, 0, 0, NULL
}, /* PbcInt */
1100 /* 1*/ { 0, 0, 0, NULL
}, /* GpioAssertInt */
1101 /* 2*/ EE(ASIC_QSFP1
, handle_qsfp_int
, "QSFP1"),
1102 /* 3*/ EE(ASIC_QSFP2
, handle_qsfp_int
, "QSFP2"),
1103 /* 4*/ { 0, 0, 0, NULL
}, /* TCritInt */
1104 /* rest are reserved */
1108 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1109 * register can not be derived from the MTU value because 10K is not
1110 * a power of 2. Therefore, we need a constant. Everything else can
1113 #define DCC_CFG_PORT_MTU_CAP_10240 7
1116 * Table of the DC grouping of error interrupts. Each entry refers to
1117 * another register containing more information.
1119 static const struct err_reg_info dc_errs
[NUM_DC_ERRS
] = {
1120 /* 0*/ DC_EE1(DCC_ERR
, handle_dcc_err
, "DCC Err"),
1121 /* 1*/ DC_EE2(DC_LCB_ERR
, handle_lcb_err
, "LCB Err"),
1122 /* 2*/ DC_EE2(DC_DC8051_ERR
, handle_8051_interrupt
, "DC8051 Interrupt"),
1123 /* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1124 /* the rest are reserved */
1134 * csr to read for name (if applicable)
1139 * offset into dd or ppd to store the counter's value
1149 * accessor for stat element, context either dd or ppd
1151 u64 (*rw_cntr
)(const struct cntr_entry
*,
1158 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1159 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1161 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1171 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1173 (counter * 8 + RCV_COUNTER_ARRAY32), \
1174 0, flags | CNTR_32BIT, \
1175 port_access_u32_csr)
1177 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1179 (counter * 8 + RCV_COUNTER_ARRAY32), \
1180 0, flags | CNTR_32BIT, \
1184 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1186 (counter * 8 + RCV_COUNTER_ARRAY64), \
1188 port_access_u64_csr)
1190 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1192 (counter * 8 + RCV_COUNTER_ARRAY64), \
1196 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1197 #define OVR_ELM(ctx) \
1198 CNTR_ELEM("RcvHdrOvr" #ctx, \
1199 (RCV_HDR_OVFL_CNT + ctx*0x100), \
1200 0, CNTR_NORMAL, port_access_u64_csr)
1203 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1205 (counter * 8 + SEND_COUNTER_ARRAY32), \
1206 0, flags | CNTR_32BIT, \
1207 port_access_u32_csr)
1210 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1212 (counter * 8 + SEND_COUNTER_ARRAY64), \
1214 port_access_u64_csr)
1216 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1218 counter * 8 + SEND_COUNTER_ARRAY64, \
1224 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1226 (counter * 8 + CCE_COUNTER_ARRAY32), \
1227 0, flags | CNTR_32BIT, \
1230 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1232 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1233 0, flags | CNTR_32BIT, \
1237 #define DC_PERF_CNTR(name, counter, flags) \
1244 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1252 #define SW_IBP_CNTR(name, cntr) \
1259 u64
read_csr(const struct hfi1_devdata
*dd
, u32 offset
)
1263 if (dd
->flags
& HFI1_PRESENT
) {
1264 val
= readq((void __iomem
*)dd
->kregbase
+ offset
);
1270 void write_csr(const struct hfi1_devdata
*dd
, u32 offset
, u64 value
)
1272 if (dd
->flags
& HFI1_PRESENT
)
1273 writeq(value
, (void __iomem
*)dd
->kregbase
+ offset
);
1276 void __iomem
*get_csr_addr(
1277 struct hfi1_devdata
*dd
,
1280 return (void __iomem
*)dd
->kregbase
+ offset
;
1283 static inline u64
read_write_csr(const struct hfi1_devdata
*dd
, u32 csr
,
1284 int mode
, u64 value
)
1289 if (mode
== CNTR_MODE_R
) {
1290 ret
= read_csr(dd
, csr
);
1291 } else if (mode
== CNTR_MODE_W
) {
1292 write_csr(dd
, csr
, value
);
1295 dd_dev_err(dd
, "Invalid cntr register access mode");
1299 hfi1_cdbg(CNTR
, "csr 0x%x val 0x%llx mode %d", csr
, ret
, mode
);
1304 static u64
dev_access_u32_csr(const struct cntr_entry
*entry
,
1305 void *context
, int vl
, int mode
, u64 data
)
1307 struct hfi1_devdata
*dd
= context
;
1308 u64 csr
= entry
->csr
;
1310 if (entry
->flags
& CNTR_SDMA
) {
1311 if (vl
== CNTR_INVALID_VL
)
1315 if (vl
!= CNTR_INVALID_VL
)
1318 return read_write_csr(dd
, csr
, mode
, data
);
1321 static u64
access_sde_err_cnt(const struct cntr_entry
*entry
,
1322 void *context
, int idx
, int mode
, u64 data
)
1324 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1326 if (dd
->per_sdma
&& idx
< dd
->num_sdma
)
1327 return dd
->per_sdma
[idx
].err_cnt
;
1331 static u64
access_sde_int_cnt(const struct cntr_entry
*entry
,
1332 void *context
, int idx
, int mode
, u64 data
)
1334 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1336 if (dd
->per_sdma
&& idx
< dd
->num_sdma
)
1337 return dd
->per_sdma
[idx
].sdma_int_cnt
;
1341 static u64
access_sde_idle_int_cnt(const struct cntr_entry
*entry
,
1342 void *context
, int idx
, int mode
, u64 data
)
1344 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1346 if (dd
->per_sdma
&& idx
< dd
->num_sdma
)
1347 return dd
->per_sdma
[idx
].idle_int_cnt
;
1351 static u64
access_sde_progress_int_cnt(const struct cntr_entry
*entry
,
1352 void *context
, int idx
, int mode
,
1355 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1357 if (dd
->per_sdma
&& idx
< dd
->num_sdma
)
1358 return dd
->per_sdma
[idx
].progress_int_cnt
;
1362 static u64
dev_access_u64_csr(const struct cntr_entry
*entry
, void *context
,
1363 int vl
, int mode
, u64 data
)
1365 struct hfi1_devdata
*dd
= context
;
1368 u64 csr
= entry
->csr
;
1370 if (entry
->flags
& CNTR_VL
) {
1371 if (vl
== CNTR_INVALID_VL
)
1375 if (vl
!= CNTR_INVALID_VL
)
1379 val
= read_write_csr(dd
, csr
, mode
, data
);
1383 static u64
dc_access_lcb_cntr(const struct cntr_entry
*entry
, void *context
,
1384 int vl
, int mode
, u64 data
)
1386 struct hfi1_devdata
*dd
= context
;
1387 u32 csr
= entry
->csr
;
1390 if (vl
!= CNTR_INVALID_VL
)
1392 if (mode
== CNTR_MODE_R
)
1393 ret
= read_lcb_csr(dd
, csr
, &data
);
1394 else if (mode
== CNTR_MODE_W
)
1395 ret
= write_lcb_csr(dd
, csr
, data
);
1398 dd_dev_err(dd
, "Could not acquire LCB for counter 0x%x", csr
);
1402 hfi1_cdbg(CNTR
, "csr 0x%x val 0x%llx mode %d", csr
, data
, mode
);
1407 static u64
port_access_u32_csr(const struct cntr_entry
*entry
, void *context
,
1408 int vl
, int mode
, u64 data
)
1410 struct hfi1_pportdata
*ppd
= context
;
1412 if (vl
!= CNTR_INVALID_VL
)
1414 return read_write_csr(ppd
->dd
, entry
->csr
, mode
, data
);
1417 static u64
port_access_u64_csr(const struct cntr_entry
*entry
,
1418 void *context
, int vl
, int mode
, u64 data
)
1420 struct hfi1_pportdata
*ppd
= context
;
1422 u64 csr
= entry
->csr
;
1424 if (entry
->flags
& CNTR_VL
) {
1425 if (vl
== CNTR_INVALID_VL
)
1429 if (vl
!= CNTR_INVALID_VL
)
1432 val
= read_write_csr(ppd
->dd
, csr
, mode
, data
);
1436 /* Software defined */
1437 static inline u64
read_write_sw(struct hfi1_devdata
*dd
, u64
*cntr
, int mode
,
1442 if (mode
== CNTR_MODE_R
) {
1444 } else if (mode
== CNTR_MODE_W
) {
1448 dd_dev_err(dd
, "Invalid cntr sw access mode");
1452 hfi1_cdbg(CNTR
, "val 0x%llx mode %d", ret
, mode
);
1457 static u64
access_sw_link_dn_cnt(const struct cntr_entry
*entry
, void *context
,
1458 int vl
, int mode
, u64 data
)
1460 struct hfi1_pportdata
*ppd
= context
;
1462 if (vl
!= CNTR_INVALID_VL
)
1464 return read_write_sw(ppd
->dd
, &ppd
->link_downed
, mode
, data
);
1467 static u64
access_sw_link_up_cnt(const struct cntr_entry
*entry
, void *context
,
1468 int vl
, int mode
, u64 data
)
1470 struct hfi1_pportdata
*ppd
= context
;
1472 if (vl
!= CNTR_INVALID_VL
)
1474 return read_write_sw(ppd
->dd
, &ppd
->link_up
, mode
, data
);
1477 static u64
access_sw_unknown_frame_cnt(const struct cntr_entry
*entry
,
1478 void *context
, int vl
, int mode
,
1481 struct hfi1_pportdata
*ppd
= (struct hfi1_pportdata
*)context
;
1483 if (vl
!= CNTR_INVALID_VL
)
1485 return read_write_sw(ppd
->dd
, &ppd
->unknown_frame_count
, mode
, data
);
1488 static u64
access_sw_xmit_discards(const struct cntr_entry
*entry
,
1489 void *context
, int vl
, int mode
, u64 data
)
1491 struct hfi1_pportdata
*ppd
= (struct hfi1_pportdata
*)context
;
1495 if (vl
== CNTR_INVALID_VL
)
1496 counter
= &ppd
->port_xmit_discards
;
1497 else if (vl
>= 0 && vl
< C_VL_COUNT
)
1498 counter
= &ppd
->port_xmit_discards_vl
[vl
];
1502 return read_write_sw(ppd
->dd
, counter
, mode
, data
);
1505 static u64
access_xmit_constraint_errs(const struct cntr_entry
*entry
,
1506 void *context
, int vl
, int mode
, u64 data
)
1508 struct hfi1_pportdata
*ppd
= context
;
1510 if (vl
!= CNTR_INVALID_VL
)
1513 return read_write_sw(ppd
->dd
, &ppd
->port_xmit_constraint_errors
,
1517 static u64
access_rcv_constraint_errs(const struct cntr_entry
*entry
,
1518 void *context
, int vl
, int mode
, u64 data
)
1520 struct hfi1_pportdata
*ppd
= context
;
1522 if (vl
!= CNTR_INVALID_VL
)
1525 return read_write_sw(ppd
->dd
, &ppd
->port_rcv_constraint_errors
,
1529 u64
get_all_cpu_total(u64 __percpu
*cntr
)
1534 for_each_possible_cpu(cpu
)
1535 counter
+= *per_cpu_ptr(cntr
, cpu
);
1539 static u64
read_write_cpu(struct hfi1_devdata
*dd
, u64
*z_val
,
1541 int vl
, int mode
, u64 data
)
1546 if (vl
!= CNTR_INVALID_VL
)
1549 if (mode
== CNTR_MODE_R
) {
1550 ret
= get_all_cpu_total(cntr
) - *z_val
;
1551 } else if (mode
== CNTR_MODE_W
) {
1552 /* A write can only zero the counter */
1554 *z_val
= get_all_cpu_total(cntr
);
1556 dd_dev_err(dd
, "Per CPU cntrs can only be zeroed");
1558 dd_dev_err(dd
, "Invalid cntr sw cpu access mode");
1565 static u64
access_sw_cpu_intr(const struct cntr_entry
*entry
,
1566 void *context
, int vl
, int mode
, u64 data
)
1568 struct hfi1_devdata
*dd
= context
;
1570 return read_write_cpu(dd
, &dd
->z_int_counter
, dd
->int_counter
, vl
,
1574 static u64
access_sw_cpu_rcv_limit(const struct cntr_entry
*entry
,
1575 void *context
, int vl
, int mode
, u64 data
)
1577 struct hfi1_devdata
*dd
= context
;
1579 return read_write_cpu(dd
, &dd
->z_rcv_limit
, dd
->rcv_limit
, vl
,
1583 static u64
access_sw_pio_wait(const struct cntr_entry
*entry
,
1584 void *context
, int vl
, int mode
, u64 data
)
1586 struct hfi1_devdata
*dd
= context
;
1588 return dd
->verbs_dev
.n_piowait
;
1591 static u64
access_sw_vtx_wait(const struct cntr_entry
*entry
,
1592 void *context
, int vl
, int mode
, u64 data
)
1594 struct hfi1_devdata
*dd
= context
;
1596 return dd
->verbs_dev
.n_txwait
;
1599 static u64
access_sw_kmem_wait(const struct cntr_entry
*entry
,
1600 void *context
, int vl
, int mode
, u64 data
)
1602 struct hfi1_devdata
*dd
= context
;
1604 return dd
->verbs_dev
.n_kmem_wait
;
1607 static u64
access_sw_send_schedule(const struct cntr_entry
*entry
,
1608 void *context
, int vl
, int mode
, u64 data
)
1610 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1612 return read_write_cpu(dd
, &dd
->z_send_schedule
, dd
->send_schedule
, vl
,
1616 /* Software counters for the error status bits within MISC_ERR_STATUS */
1617 static u64
access_misc_pll_lock_fail_err_cnt(const struct cntr_entry
*entry
,
1618 void *context
, int vl
, int mode
,
1621 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1623 return dd
->misc_err_status_cnt
[12];
1626 static u64
access_misc_mbist_fail_err_cnt(const struct cntr_entry
*entry
,
1627 void *context
, int vl
, int mode
,
1630 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1632 return dd
->misc_err_status_cnt
[11];
1635 static u64
access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry
*entry
,
1636 void *context
, int vl
, int mode
,
1639 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1641 return dd
->misc_err_status_cnt
[10];
1644 static u64
access_misc_efuse_done_parity_err_cnt(const struct cntr_entry
*entry
,
1645 void *context
, int vl
,
1648 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1650 return dd
->misc_err_status_cnt
[9];
1653 static u64
access_misc_efuse_write_err_cnt(const struct cntr_entry
*entry
,
1654 void *context
, int vl
, int mode
,
1657 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1659 return dd
->misc_err_status_cnt
[8];
1662 static u64
access_misc_efuse_read_bad_addr_err_cnt(
1663 const struct cntr_entry
*entry
,
1664 void *context
, int vl
, int mode
, u64 data
)
1666 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1668 return dd
->misc_err_status_cnt
[7];
1671 static u64
access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry
*entry
,
1672 void *context
, int vl
,
1675 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1677 return dd
->misc_err_status_cnt
[6];
1680 static u64
access_misc_fw_auth_failed_err_cnt(const struct cntr_entry
*entry
,
1681 void *context
, int vl
, int mode
,
1684 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1686 return dd
->misc_err_status_cnt
[5];
1689 static u64
access_misc_key_mismatch_err_cnt(const struct cntr_entry
*entry
,
1690 void *context
, int vl
, int mode
,
1693 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1695 return dd
->misc_err_status_cnt
[4];
1698 static u64
access_misc_sbus_write_failed_err_cnt(const struct cntr_entry
*entry
,
1699 void *context
, int vl
,
1702 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1704 return dd
->misc_err_status_cnt
[3];
1707 static u64
access_misc_csr_write_bad_addr_err_cnt(
1708 const struct cntr_entry
*entry
,
1709 void *context
, int vl
, int mode
, u64 data
)
1711 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1713 return dd
->misc_err_status_cnt
[2];
1716 static u64
access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry
*entry
,
1717 void *context
, int vl
,
1720 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1722 return dd
->misc_err_status_cnt
[1];
1725 static u64
access_misc_csr_parity_err_cnt(const struct cntr_entry
*entry
,
1726 void *context
, int vl
, int mode
,
1729 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1731 return dd
->misc_err_status_cnt
[0];
1735 * Software counter for the aggregate of
1736 * individual CceErrStatus counters
1738 static u64
access_sw_cce_err_status_aggregated_cnt(
1739 const struct cntr_entry
*entry
,
1740 void *context
, int vl
, int mode
, u64 data
)
1742 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1744 return dd
->sw_cce_err_status_aggregate
;
1748 * Software counters corresponding to each of the
1749 * error status bits within CceErrStatus
1751 static u64
access_cce_msix_csr_parity_err_cnt(const struct cntr_entry
*entry
,
1752 void *context
, int vl
, int mode
,
1755 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1757 return dd
->cce_err_status_cnt
[40];
1760 static u64
access_cce_int_map_unc_err_cnt(const struct cntr_entry
*entry
,
1761 void *context
, int vl
, int mode
,
1764 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1766 return dd
->cce_err_status_cnt
[39];
1769 static u64
access_cce_int_map_cor_err_cnt(const struct cntr_entry
*entry
,
1770 void *context
, int vl
, int mode
,
1773 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1775 return dd
->cce_err_status_cnt
[38];
1778 static u64
access_cce_msix_table_unc_err_cnt(const struct cntr_entry
*entry
,
1779 void *context
, int vl
, int mode
,
1782 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1784 return dd
->cce_err_status_cnt
[37];
1787 static u64
access_cce_msix_table_cor_err_cnt(const struct cntr_entry
*entry
,
1788 void *context
, int vl
, int mode
,
1791 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1793 return dd
->cce_err_status_cnt
[36];
1796 static u64
access_cce_rxdma_conv_fifo_parity_err_cnt(
1797 const struct cntr_entry
*entry
,
1798 void *context
, int vl
, int mode
, u64 data
)
1800 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1802 return dd
->cce_err_status_cnt
[35];
1805 static u64
access_cce_rcpl_async_fifo_parity_err_cnt(
1806 const struct cntr_entry
*entry
,
1807 void *context
, int vl
, int mode
, u64 data
)
1809 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1811 return dd
->cce_err_status_cnt
[34];
1814 static u64
access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry
*entry
,
1815 void *context
, int vl
,
1818 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1820 return dd
->cce_err_status_cnt
[33];
1823 static u64
access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry
*entry
,
1824 void *context
, int vl
, int mode
,
1827 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1829 return dd
->cce_err_status_cnt
[32];
1832 static u64
access_la_triggered_cnt(const struct cntr_entry
*entry
,
1833 void *context
, int vl
, int mode
, u64 data
)
1835 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1837 return dd
->cce_err_status_cnt
[31];
1840 static u64
access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry
*entry
,
1841 void *context
, int vl
, int mode
,
1844 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1846 return dd
->cce_err_status_cnt
[30];
1849 static u64
access_pcic_receive_parity_err_cnt(const struct cntr_entry
*entry
,
1850 void *context
, int vl
, int mode
,
1853 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1855 return dd
->cce_err_status_cnt
[29];
1858 static u64
access_pcic_transmit_back_parity_err_cnt(
1859 const struct cntr_entry
*entry
,
1860 void *context
, int vl
, int mode
, u64 data
)
1862 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1864 return dd
->cce_err_status_cnt
[28];
1867 static u64
access_pcic_transmit_front_parity_err_cnt(
1868 const struct cntr_entry
*entry
,
1869 void *context
, int vl
, int mode
, u64 data
)
1871 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1873 return dd
->cce_err_status_cnt
[27];
1876 static u64
access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry
*entry
,
1877 void *context
, int vl
, int mode
,
1880 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1882 return dd
->cce_err_status_cnt
[26];
1885 static u64
access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry
*entry
,
1886 void *context
, int vl
, int mode
,
1889 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1891 return dd
->cce_err_status_cnt
[25];
1894 static u64
access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry
*entry
,
1895 void *context
, int vl
, int mode
,
1898 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1900 return dd
->cce_err_status_cnt
[24];
1903 static u64
access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry
*entry
,
1904 void *context
, int vl
, int mode
,
1907 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1909 return dd
->cce_err_status_cnt
[23];
1912 static u64
access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry
*entry
,
1913 void *context
, int vl
,
1916 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1918 return dd
->cce_err_status_cnt
[22];
1921 static u64
access_pcic_retry_mem_unc_err(const struct cntr_entry
*entry
,
1922 void *context
, int vl
, int mode
,
1925 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1927 return dd
->cce_err_status_cnt
[21];
1930 static u64
access_pcic_n_post_dat_q_parity_err_cnt(
1931 const struct cntr_entry
*entry
,
1932 void *context
, int vl
, int mode
, u64 data
)
1934 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1936 return dd
->cce_err_status_cnt
[20];
1939 static u64
access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry
*entry
,
1940 void *context
, int vl
,
1943 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1945 return dd
->cce_err_status_cnt
[19];
1948 static u64
access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry
*entry
,
1949 void *context
, int vl
, int mode
,
1952 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1954 return dd
->cce_err_status_cnt
[18];
1957 static u64
access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry
*entry
,
1958 void *context
, int vl
, int mode
,
1961 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1963 return dd
->cce_err_status_cnt
[17];
1966 static u64
access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry
*entry
,
1967 void *context
, int vl
, int mode
,
1970 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1972 return dd
->cce_err_status_cnt
[16];
1975 static u64
access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry
*entry
,
1976 void *context
, int vl
, int mode
,
1979 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1981 return dd
->cce_err_status_cnt
[15];
1984 static u64
access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry
*entry
,
1985 void *context
, int vl
,
1988 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1990 return dd
->cce_err_status_cnt
[14];
1993 static u64
access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry
*entry
,
1994 void *context
, int vl
, int mode
,
1997 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1999 return dd
->cce_err_status_cnt
[13];
2002 static u64
access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2003 const struct cntr_entry
*entry
,
2004 void *context
, int vl
, int mode
, u64 data
)
2006 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2008 return dd
->cce_err_status_cnt
[12];
2011 static u64
access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2012 const struct cntr_entry
*entry
,
2013 void *context
, int vl
, int mode
, u64 data
)
2015 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2017 return dd
->cce_err_status_cnt
[11];
2020 static u64
access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2021 const struct cntr_entry
*entry
,
2022 void *context
, int vl
, int mode
, u64 data
)
2024 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2026 return dd
->cce_err_status_cnt
[10];
2029 static u64
access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2030 const struct cntr_entry
*entry
,
2031 void *context
, int vl
, int mode
, u64 data
)
2033 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2035 return dd
->cce_err_status_cnt
[9];
2038 static u64
access_cce_cli2_async_fifo_parity_err_cnt(
2039 const struct cntr_entry
*entry
,
2040 void *context
, int vl
, int mode
, u64 data
)
2042 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2044 return dd
->cce_err_status_cnt
[8];
2047 static u64
access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry
*entry
,
2048 void *context
, int vl
,
2051 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2053 return dd
->cce_err_status_cnt
[7];
2056 static u64
access_cce_cli0_async_fifo_parity_err_cnt(
2057 const struct cntr_entry
*entry
,
2058 void *context
, int vl
, int mode
, u64 data
)
2060 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2062 return dd
->cce_err_status_cnt
[6];
2065 static u64
access_cce_rspd_data_parity_err_cnt(const struct cntr_entry
*entry
,
2066 void *context
, int vl
, int mode
,
2069 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2071 return dd
->cce_err_status_cnt
[5];
2074 static u64
access_cce_trgt_access_err_cnt(const struct cntr_entry
*entry
,
2075 void *context
, int vl
, int mode
,
2078 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2080 return dd
->cce_err_status_cnt
[4];
2083 static u64
access_cce_trgt_async_fifo_parity_err_cnt(
2084 const struct cntr_entry
*entry
,
2085 void *context
, int vl
, int mode
, u64 data
)
2087 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2089 return dd
->cce_err_status_cnt
[3];
2092 static u64
access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry
*entry
,
2093 void *context
, int vl
,
2096 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2098 return dd
->cce_err_status_cnt
[2];
2101 static u64
access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry
*entry
,
2102 void *context
, int vl
,
2105 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2107 return dd
->cce_err_status_cnt
[1];
2110 static u64
access_ccs_csr_parity_err_cnt(const struct cntr_entry
*entry
,
2111 void *context
, int vl
, int mode
,
2114 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2116 return dd
->cce_err_status_cnt
[0];
2120 * Software counters corresponding to each of the
2121 * error status bits within RcvErrStatus
2123 static u64
access_rx_csr_parity_err_cnt(const struct cntr_entry
*entry
,
2124 void *context
, int vl
, int mode
,
2127 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2129 return dd
->rcv_err_status_cnt
[63];
2132 static u64
access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry
*entry
,
2133 void *context
, int vl
,
2136 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2138 return dd
->rcv_err_status_cnt
[62];
2141 static u64
access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry
*entry
,
2142 void *context
, int vl
, int mode
,
2145 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2147 return dd
->rcv_err_status_cnt
[61];
2150 static u64
access_rx_dma_csr_unc_err_cnt(const struct cntr_entry
*entry
,
2151 void *context
, int vl
, int mode
,
2154 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2156 return dd
->rcv_err_status_cnt
[60];
2159 static u64
access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry
*entry
,
2160 void *context
, int vl
,
2163 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2165 return dd
->rcv_err_status_cnt
[59];
2168 static u64
access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry
*entry
,
2169 void *context
, int vl
,
2172 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2174 return dd
->rcv_err_status_cnt
[58];
2177 static u64
access_rx_dma_csr_parity_err_cnt(const struct cntr_entry
*entry
,
2178 void *context
, int vl
, int mode
,
2181 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2183 return dd
->rcv_err_status_cnt
[57];
2186 static u64
access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry
*entry
,
2187 void *context
, int vl
, int mode
,
2190 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2192 return dd
->rcv_err_status_cnt
[56];
2195 static u64
access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry
*entry
,
2196 void *context
, int vl
, int mode
,
2199 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2201 return dd
->rcv_err_status_cnt
[55];
2204 static u64
access_rx_dma_data_fifo_rd_cor_err_cnt(
2205 const struct cntr_entry
*entry
,
2206 void *context
, int vl
, int mode
, u64 data
)
2208 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2210 return dd
->rcv_err_status_cnt
[54];
2213 static u64
access_rx_dma_data_fifo_rd_unc_err_cnt(
2214 const struct cntr_entry
*entry
,
2215 void *context
, int vl
, int mode
, u64 data
)
2217 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2219 return dd
->rcv_err_status_cnt
[53];
2222 static u64
access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry
*entry
,
2223 void *context
, int vl
,
2226 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2228 return dd
->rcv_err_status_cnt
[52];
2231 static u64
access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry
*entry
,
2232 void *context
, int vl
,
2235 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2237 return dd
->rcv_err_status_cnt
[51];
2240 static u64
access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry
*entry
,
2241 void *context
, int vl
,
2244 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2246 return dd
->rcv_err_status_cnt
[50];
2249 static u64
access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry
*entry
,
2250 void *context
, int vl
,
2253 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2255 return dd
->rcv_err_status_cnt
[49];
2258 static u64
access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry
*entry
,
2259 void *context
, int vl
,
2262 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2264 return dd
->rcv_err_status_cnt
[48];
2267 static u64
access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry
*entry
,
2268 void *context
, int vl
,
2271 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2273 return dd
->rcv_err_status_cnt
[47];
2276 static u64
access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry
*entry
,
2277 void *context
, int vl
, int mode
,
2280 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2282 return dd
->rcv_err_status_cnt
[46];
2285 static u64
access_rx_hq_intr_csr_parity_err_cnt(
2286 const struct cntr_entry
*entry
,
2287 void *context
, int vl
, int mode
, u64 data
)
2289 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2291 return dd
->rcv_err_status_cnt
[45];
2294 static u64
access_rx_lookup_csr_parity_err_cnt(
2295 const struct cntr_entry
*entry
,
2296 void *context
, int vl
, int mode
, u64 data
)
2298 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2300 return dd
->rcv_err_status_cnt
[44];
2303 static u64
access_rx_lookup_rcv_array_cor_err_cnt(
2304 const struct cntr_entry
*entry
,
2305 void *context
, int vl
, int mode
, u64 data
)
2307 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2309 return dd
->rcv_err_status_cnt
[43];
2312 static u64
access_rx_lookup_rcv_array_unc_err_cnt(
2313 const struct cntr_entry
*entry
,
2314 void *context
, int vl
, int mode
, u64 data
)
2316 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2318 return dd
->rcv_err_status_cnt
[42];
2321 static u64
access_rx_lookup_des_part2_parity_err_cnt(
2322 const struct cntr_entry
*entry
,
2323 void *context
, int vl
, int mode
, u64 data
)
2325 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2327 return dd
->rcv_err_status_cnt
[41];
2330 static u64
access_rx_lookup_des_part1_unc_cor_err_cnt(
2331 const struct cntr_entry
*entry
,
2332 void *context
, int vl
, int mode
, u64 data
)
2334 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2336 return dd
->rcv_err_status_cnt
[40];
2339 static u64
access_rx_lookup_des_part1_unc_err_cnt(
2340 const struct cntr_entry
*entry
,
2341 void *context
, int vl
, int mode
, u64 data
)
2343 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2345 return dd
->rcv_err_status_cnt
[39];
2348 static u64
access_rx_rbuf_next_free_buf_cor_err_cnt(
2349 const struct cntr_entry
*entry
,
2350 void *context
, int vl
, int mode
, u64 data
)
2352 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2354 return dd
->rcv_err_status_cnt
[38];
2357 static u64
access_rx_rbuf_next_free_buf_unc_err_cnt(
2358 const struct cntr_entry
*entry
,
2359 void *context
, int vl
, int mode
, u64 data
)
2361 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2363 return dd
->rcv_err_status_cnt
[37];
2366 static u64
access_rbuf_fl_init_wr_addr_parity_err_cnt(
2367 const struct cntr_entry
*entry
,
2368 void *context
, int vl
, int mode
, u64 data
)
2370 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2372 return dd
->rcv_err_status_cnt
[36];
2375 static u64
access_rx_rbuf_fl_initdone_parity_err_cnt(
2376 const struct cntr_entry
*entry
,
2377 void *context
, int vl
, int mode
, u64 data
)
2379 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2381 return dd
->rcv_err_status_cnt
[35];
2384 static u64
access_rx_rbuf_fl_write_addr_parity_err_cnt(
2385 const struct cntr_entry
*entry
,
2386 void *context
, int vl
, int mode
, u64 data
)
2388 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2390 return dd
->rcv_err_status_cnt
[34];
2393 static u64
access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2394 const struct cntr_entry
*entry
,
2395 void *context
, int vl
, int mode
, u64 data
)
2397 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2399 return dd
->rcv_err_status_cnt
[33];
2402 static u64
access_rx_rbuf_empty_err_cnt(const struct cntr_entry
*entry
,
2403 void *context
, int vl
, int mode
,
2406 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2408 return dd
->rcv_err_status_cnt
[32];
2411 static u64
access_rx_rbuf_full_err_cnt(const struct cntr_entry
*entry
,
2412 void *context
, int vl
, int mode
,
2415 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2417 return dd
->rcv_err_status_cnt
[31];
2420 static u64
access_rbuf_bad_lookup_err_cnt(const struct cntr_entry
*entry
,
2421 void *context
, int vl
, int mode
,
2424 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2426 return dd
->rcv_err_status_cnt
[30];
2429 static u64
access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry
*entry
,
2430 void *context
, int vl
, int mode
,
2433 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2435 return dd
->rcv_err_status_cnt
[29];
2438 static u64
access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry
*entry
,
2439 void *context
, int vl
,
2442 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2444 return dd
->rcv_err_status_cnt
[28];
2447 static u64
access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2448 const struct cntr_entry
*entry
,
2449 void *context
, int vl
, int mode
, u64 data
)
2451 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2453 return dd
->rcv_err_status_cnt
[27];
2456 static u64
access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2457 const struct cntr_entry
*entry
,
2458 void *context
, int vl
, int mode
, u64 data
)
2460 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2462 return dd
->rcv_err_status_cnt
[26];
2465 static u64
access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2466 const struct cntr_entry
*entry
,
2467 void *context
, int vl
, int mode
, u64 data
)
2469 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2471 return dd
->rcv_err_status_cnt
[25];
2474 static u64
access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2475 const struct cntr_entry
*entry
,
2476 void *context
, int vl
, int mode
, u64 data
)
2478 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2480 return dd
->rcv_err_status_cnt
[24];
2483 static u64
access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2484 const struct cntr_entry
*entry
,
2485 void *context
, int vl
, int mode
, u64 data
)
2487 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2489 return dd
->rcv_err_status_cnt
[23];
2492 static u64
access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2493 const struct cntr_entry
*entry
,
2494 void *context
, int vl
, int mode
, u64 data
)
2496 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2498 return dd
->rcv_err_status_cnt
[22];
2501 static u64
access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2502 const struct cntr_entry
*entry
,
2503 void *context
, int vl
, int mode
, u64 data
)
2505 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2507 return dd
->rcv_err_status_cnt
[21];
2510 static u64
access_rx_rbuf_block_list_read_cor_err_cnt(
2511 const struct cntr_entry
*entry
,
2512 void *context
, int vl
, int mode
, u64 data
)
2514 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2516 return dd
->rcv_err_status_cnt
[20];
2519 static u64
access_rx_rbuf_block_list_read_unc_err_cnt(
2520 const struct cntr_entry
*entry
,
2521 void *context
, int vl
, int mode
, u64 data
)
2523 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2525 return dd
->rcv_err_status_cnt
[19];
2528 static u64
access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry
*entry
,
2529 void *context
, int vl
,
2532 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2534 return dd
->rcv_err_status_cnt
[18];
2537 static u64
access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry
*entry
,
2538 void *context
, int vl
,
2541 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2543 return dd
->rcv_err_status_cnt
[17];
2546 static u64
access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2547 const struct cntr_entry
*entry
,
2548 void *context
, int vl
, int mode
, u64 data
)
2550 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2552 return dd
->rcv_err_status_cnt
[16];
2555 static u64
access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2556 const struct cntr_entry
*entry
,
2557 void *context
, int vl
, int mode
, u64 data
)
2559 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2561 return dd
->rcv_err_status_cnt
[15];
2564 static u64
access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry
*entry
,
2565 void *context
, int vl
,
2568 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2570 return dd
->rcv_err_status_cnt
[14];
2573 static u64
access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry
*entry
,
2574 void *context
, int vl
,
2577 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2579 return dd
->rcv_err_status_cnt
[13];
2582 static u64
access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry
*entry
,
2583 void *context
, int vl
, int mode
,
2586 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2588 return dd
->rcv_err_status_cnt
[12];
2591 static u64
access_rx_dma_flag_cor_err_cnt(const struct cntr_entry
*entry
,
2592 void *context
, int vl
, int mode
,
2595 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2597 return dd
->rcv_err_status_cnt
[11];
2600 static u64
access_rx_dma_flag_unc_err_cnt(const struct cntr_entry
*entry
,
2601 void *context
, int vl
, int mode
,
2604 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2606 return dd
->rcv_err_status_cnt
[10];
2609 static u64
access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry
*entry
,
2610 void *context
, int vl
, int mode
,
2613 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2615 return dd
->rcv_err_status_cnt
[9];
2618 static u64
access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry
*entry
,
2619 void *context
, int vl
, int mode
,
2622 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2624 return dd
->rcv_err_status_cnt
[8];
2627 static u64
access_rx_rcv_qp_map_table_cor_err_cnt(
2628 const struct cntr_entry
*entry
,
2629 void *context
, int vl
, int mode
, u64 data
)
2631 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2633 return dd
->rcv_err_status_cnt
[7];
2636 static u64
access_rx_rcv_qp_map_table_unc_err_cnt(
2637 const struct cntr_entry
*entry
,
2638 void *context
, int vl
, int mode
, u64 data
)
2640 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2642 return dd
->rcv_err_status_cnt
[6];
2645 static u64
access_rx_rcv_data_cor_err_cnt(const struct cntr_entry
*entry
,
2646 void *context
, int vl
, int mode
,
2649 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2651 return dd
->rcv_err_status_cnt
[5];
2654 static u64
access_rx_rcv_data_unc_err_cnt(const struct cntr_entry
*entry
,
2655 void *context
, int vl
, int mode
,
2658 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2660 return dd
->rcv_err_status_cnt
[4];
2663 static u64
access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry
*entry
,
2664 void *context
, int vl
, int mode
,
2667 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2669 return dd
->rcv_err_status_cnt
[3];
2672 static u64
access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry
*entry
,
2673 void *context
, int vl
, int mode
,
2676 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2678 return dd
->rcv_err_status_cnt
[2];
2681 static u64
access_rx_dc_intf_parity_err_cnt(const struct cntr_entry
*entry
,
2682 void *context
, int vl
, int mode
,
2685 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2687 return dd
->rcv_err_status_cnt
[1];
2690 static u64
access_rx_dma_csr_cor_err_cnt(const struct cntr_entry
*entry
,
2691 void *context
, int vl
, int mode
,
2694 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2696 return dd
->rcv_err_status_cnt
[0];
2700 * Software counters corresponding to each of the
2701 * error status bits within SendPioErrStatus
2703 static u64
access_pio_pec_sop_head_parity_err_cnt(
2704 const struct cntr_entry
*entry
,
2705 void *context
, int vl
, int mode
, u64 data
)
2707 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2709 return dd
->send_pio_err_status_cnt
[35];
2712 static u64
access_pio_pcc_sop_head_parity_err_cnt(
2713 const struct cntr_entry
*entry
,
2714 void *context
, int vl
, int mode
, u64 data
)
2716 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2718 return dd
->send_pio_err_status_cnt
[34];
2721 static u64
access_pio_last_returned_cnt_parity_err_cnt(
2722 const struct cntr_entry
*entry
,
2723 void *context
, int vl
, int mode
, u64 data
)
2725 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2727 return dd
->send_pio_err_status_cnt
[33];
2730 static u64
access_pio_current_free_cnt_parity_err_cnt(
2731 const struct cntr_entry
*entry
,
2732 void *context
, int vl
, int mode
, u64 data
)
2734 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2736 return dd
->send_pio_err_status_cnt
[32];
2739 static u64
access_pio_reserved_31_err_cnt(const struct cntr_entry
*entry
,
2740 void *context
, int vl
, int mode
,
2743 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2745 return dd
->send_pio_err_status_cnt
[31];
2748 static u64
access_pio_reserved_30_err_cnt(const struct cntr_entry
*entry
,
2749 void *context
, int vl
, int mode
,
2752 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2754 return dd
->send_pio_err_status_cnt
[30];
2757 static u64
access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry
*entry
,
2758 void *context
, int vl
, int mode
,
2761 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2763 return dd
->send_pio_err_status_cnt
[29];
2766 static u64
access_pio_ppmc_bqc_mem_parity_err_cnt(
2767 const struct cntr_entry
*entry
,
2768 void *context
, int vl
, int mode
, u64 data
)
2770 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2772 return dd
->send_pio_err_status_cnt
[28];
2775 static u64
access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry
*entry
,
2776 void *context
, int vl
, int mode
,
2779 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2781 return dd
->send_pio_err_status_cnt
[27];
2784 static u64
access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry
*entry
,
2785 void *context
, int vl
, int mode
,
2788 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2790 return dd
->send_pio_err_status_cnt
[26];
2793 static u64
access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry
*entry
,
2794 void *context
, int vl
,
2797 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2799 return dd
->send_pio_err_status_cnt
[25];
2802 static u64
access_pio_block_qw_count_parity_err_cnt(
2803 const struct cntr_entry
*entry
,
2804 void *context
, int vl
, int mode
, u64 data
)
2806 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2808 return dd
->send_pio_err_status_cnt
[24];
2811 static u64
access_pio_write_qw_valid_parity_err_cnt(
2812 const struct cntr_entry
*entry
,
2813 void *context
, int vl
, int mode
, u64 data
)
2815 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2817 return dd
->send_pio_err_status_cnt
[23];
2820 static u64
access_pio_state_machine_err_cnt(const struct cntr_entry
*entry
,
2821 void *context
, int vl
, int mode
,
2824 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2826 return dd
->send_pio_err_status_cnt
[22];
2829 static u64
access_pio_write_data_parity_err_cnt(const struct cntr_entry
*entry
,
2830 void *context
, int vl
,
2833 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2835 return dd
->send_pio_err_status_cnt
[21];
2838 static u64
access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry
*entry
,
2839 void *context
, int vl
,
2842 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2844 return dd
->send_pio_err_status_cnt
[20];
2847 static u64
access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry
*entry
,
2848 void *context
, int vl
,
2851 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2853 return dd
->send_pio_err_status_cnt
[19];
2856 static u64
access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2857 const struct cntr_entry
*entry
,
2858 void *context
, int vl
, int mode
, u64 data
)
2860 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2862 return dd
->send_pio_err_status_cnt
[18];
2865 static u64
access_pio_init_sm_in_err_cnt(const struct cntr_entry
*entry
,
2866 void *context
, int vl
, int mode
,
2869 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2871 return dd
->send_pio_err_status_cnt
[17];
2874 static u64
access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry
*entry
,
2875 void *context
, int vl
, int mode
,
2878 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2880 return dd
->send_pio_err_status_cnt
[16];
2883 static u64
access_pio_credit_ret_fifo_parity_err_cnt(
2884 const struct cntr_entry
*entry
,
2885 void *context
, int vl
, int mode
, u64 data
)
2887 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2889 return dd
->send_pio_err_status_cnt
[15];
2892 static u64
access_pio_v1_len_mem_bank1_cor_err_cnt(
2893 const struct cntr_entry
*entry
,
2894 void *context
, int vl
, int mode
, u64 data
)
2896 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2898 return dd
->send_pio_err_status_cnt
[14];
2901 static u64
access_pio_v1_len_mem_bank0_cor_err_cnt(
2902 const struct cntr_entry
*entry
,
2903 void *context
, int vl
, int mode
, u64 data
)
2905 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2907 return dd
->send_pio_err_status_cnt
[13];
2910 static u64
access_pio_v1_len_mem_bank1_unc_err_cnt(
2911 const struct cntr_entry
*entry
,
2912 void *context
, int vl
, int mode
, u64 data
)
2914 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2916 return dd
->send_pio_err_status_cnt
[12];
2919 static u64
access_pio_v1_len_mem_bank0_unc_err_cnt(
2920 const struct cntr_entry
*entry
,
2921 void *context
, int vl
, int mode
, u64 data
)
2923 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2925 return dd
->send_pio_err_status_cnt
[11];
2928 static u64
access_pio_sm_pkt_reset_parity_err_cnt(
2929 const struct cntr_entry
*entry
,
2930 void *context
, int vl
, int mode
, u64 data
)
2932 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2934 return dd
->send_pio_err_status_cnt
[10];
2937 static u64
access_pio_pkt_evict_fifo_parity_err_cnt(
2938 const struct cntr_entry
*entry
,
2939 void *context
, int vl
, int mode
, u64 data
)
2941 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2943 return dd
->send_pio_err_status_cnt
[9];
2946 static u64
access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2947 const struct cntr_entry
*entry
,
2948 void *context
, int vl
, int mode
, u64 data
)
2950 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2952 return dd
->send_pio_err_status_cnt
[8];
2955 static u64
access_pio_sbrdctl_crrel_parity_err_cnt(
2956 const struct cntr_entry
*entry
,
2957 void *context
, int vl
, int mode
, u64 data
)
2959 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2961 return dd
->send_pio_err_status_cnt
[7];
2964 static u64
access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry
*entry
,
2965 void *context
, int vl
, int mode
,
2968 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2970 return dd
->send_pio_err_status_cnt
[6];
2973 static u64
access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry
*entry
,
2974 void *context
, int vl
, int mode
,
2977 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2979 return dd
->send_pio_err_status_cnt
[5];
2982 static u64
access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry
*entry
,
2983 void *context
, int vl
, int mode
,
2986 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2988 return dd
->send_pio_err_status_cnt
[4];
2991 static u64
access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry
*entry
,
2992 void *context
, int vl
, int mode
,
2995 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2997 return dd
->send_pio_err_status_cnt
[3];
3000 static u64
access_pio_csr_parity_err_cnt(const struct cntr_entry
*entry
,
3001 void *context
, int vl
, int mode
,
3004 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3006 return dd
->send_pio_err_status_cnt
[2];
3009 static u64
access_pio_write_addr_parity_err_cnt(const struct cntr_entry
*entry
,
3010 void *context
, int vl
,
3013 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3015 return dd
->send_pio_err_status_cnt
[1];
3018 static u64
access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry
*entry
,
3019 void *context
, int vl
, int mode
,
3022 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3024 return dd
->send_pio_err_status_cnt
[0];
3028 * Software counters corresponding to each of the
3029 * error status bits within SendDmaErrStatus
3031 static u64
access_sdma_pcie_req_tracking_cor_err_cnt(
3032 const struct cntr_entry
*entry
,
3033 void *context
, int vl
, int mode
, u64 data
)
3035 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3037 return dd
->send_dma_err_status_cnt
[3];
3040 static u64
access_sdma_pcie_req_tracking_unc_err_cnt(
3041 const struct cntr_entry
*entry
,
3042 void *context
, int vl
, int mode
, u64 data
)
3044 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3046 return dd
->send_dma_err_status_cnt
[2];
3049 static u64
access_sdma_csr_parity_err_cnt(const struct cntr_entry
*entry
,
3050 void *context
, int vl
, int mode
,
3053 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3055 return dd
->send_dma_err_status_cnt
[1];
3058 static u64
access_sdma_rpy_tag_err_cnt(const struct cntr_entry
*entry
,
3059 void *context
, int vl
, int mode
,
3062 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3064 return dd
->send_dma_err_status_cnt
[0];
3068 * Software counters corresponding to each of the
3069 * error status bits within SendEgressErrStatus
3071 static u64
access_tx_read_pio_memory_csr_unc_err_cnt(
3072 const struct cntr_entry
*entry
,
3073 void *context
, int vl
, int mode
, u64 data
)
3075 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3077 return dd
->send_egress_err_status_cnt
[63];
3080 static u64
access_tx_read_sdma_memory_csr_err_cnt(
3081 const struct cntr_entry
*entry
,
3082 void *context
, int vl
, int mode
, u64 data
)
3084 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3086 return dd
->send_egress_err_status_cnt
[62];
3089 static u64
access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry
*entry
,
3090 void *context
, int vl
, int mode
,
3093 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3095 return dd
->send_egress_err_status_cnt
[61];
3098 static u64
access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry
*entry
,
3099 void *context
, int vl
,
3102 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3104 return dd
->send_egress_err_status_cnt
[60];
3107 static u64
access_tx_read_sdma_memory_cor_err_cnt(
3108 const struct cntr_entry
*entry
,
3109 void *context
, int vl
, int mode
, u64 data
)
3111 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3113 return dd
->send_egress_err_status_cnt
[59];
3116 static u64
access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry
*entry
,
3117 void *context
, int vl
, int mode
,
3120 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3122 return dd
->send_egress_err_status_cnt
[58];
3125 static u64
access_tx_credit_overrun_err_cnt(const struct cntr_entry
*entry
,
3126 void *context
, int vl
, int mode
,
3129 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3131 return dd
->send_egress_err_status_cnt
[57];
3134 static u64
access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry
*entry
,
3135 void *context
, int vl
, int mode
,
3138 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3140 return dd
->send_egress_err_status_cnt
[56];
3143 static u64
access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry
*entry
,
3144 void *context
, int vl
, int mode
,
3147 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3149 return dd
->send_egress_err_status_cnt
[55];
3152 static u64
access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry
*entry
,
3153 void *context
, int vl
, int mode
,
3156 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3158 return dd
->send_egress_err_status_cnt
[54];
3161 static u64
access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry
*entry
,
3162 void *context
, int vl
, int mode
,
3165 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3167 return dd
->send_egress_err_status_cnt
[53];
3170 static u64
access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry
*entry
,
3171 void *context
, int vl
, int mode
,
3174 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3176 return dd
->send_egress_err_status_cnt
[52];
3179 static u64
access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry
*entry
,
3180 void *context
, int vl
, int mode
,
3183 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3185 return dd
->send_egress_err_status_cnt
[51];
3188 static u64
access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry
*entry
,
3189 void *context
, int vl
, int mode
,
3192 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3194 return dd
->send_egress_err_status_cnt
[50];
3197 static u64
access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry
*entry
,
3198 void *context
, int vl
, int mode
,
3201 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3203 return dd
->send_egress_err_status_cnt
[49];
3206 static u64
access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry
*entry
,
3207 void *context
, int vl
, int mode
,
3210 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3212 return dd
->send_egress_err_status_cnt
[48];
3215 static u64
access_tx_credit_return_vl_err_cnt(const struct cntr_entry
*entry
,
3216 void *context
, int vl
, int mode
,
3219 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3221 return dd
->send_egress_err_status_cnt
[47];
3224 static u64
access_tx_hcrc_insertion_err_cnt(const struct cntr_entry
*entry
,
3225 void *context
, int vl
, int mode
,
3228 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3230 return dd
->send_egress_err_status_cnt
[46];
3233 static u64
access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry
*entry
,
3234 void *context
, int vl
, int mode
,
3237 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3239 return dd
->send_egress_err_status_cnt
[45];
3242 static u64
access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry
*entry
,
3243 void *context
, int vl
,
3246 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3248 return dd
->send_egress_err_status_cnt
[44];
3251 static u64
access_tx_read_sdma_memory_unc_err_cnt(
3252 const struct cntr_entry
*entry
,
3253 void *context
, int vl
, int mode
, u64 data
)
3255 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3257 return dd
->send_egress_err_status_cnt
[43];
3260 static u64
access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry
*entry
,
3261 void *context
, int vl
, int mode
,
3264 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3266 return dd
->send_egress_err_status_cnt
[42];
3269 static u64
access_tx_credit_return_partiy_err_cnt(
3270 const struct cntr_entry
*entry
,
3271 void *context
, int vl
, int mode
, u64 data
)
3273 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3275 return dd
->send_egress_err_status_cnt
[41];
3278 static u64
access_tx_launch_fifo8_unc_or_parity_err_cnt(
3279 const struct cntr_entry
*entry
,
3280 void *context
, int vl
, int mode
, u64 data
)
3282 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3284 return dd
->send_egress_err_status_cnt
[40];
3287 static u64
access_tx_launch_fifo7_unc_or_parity_err_cnt(
3288 const struct cntr_entry
*entry
,
3289 void *context
, int vl
, int mode
, u64 data
)
3291 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3293 return dd
->send_egress_err_status_cnt
[39];
3296 static u64
access_tx_launch_fifo6_unc_or_parity_err_cnt(
3297 const struct cntr_entry
*entry
,
3298 void *context
, int vl
, int mode
, u64 data
)
3300 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3302 return dd
->send_egress_err_status_cnt
[38];
3305 static u64
access_tx_launch_fifo5_unc_or_parity_err_cnt(
3306 const struct cntr_entry
*entry
,
3307 void *context
, int vl
, int mode
, u64 data
)
3309 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3311 return dd
->send_egress_err_status_cnt
[37];
3314 static u64
access_tx_launch_fifo4_unc_or_parity_err_cnt(
3315 const struct cntr_entry
*entry
,
3316 void *context
, int vl
, int mode
, u64 data
)
3318 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3320 return dd
->send_egress_err_status_cnt
[36];
3323 static u64
access_tx_launch_fifo3_unc_or_parity_err_cnt(
3324 const struct cntr_entry
*entry
,
3325 void *context
, int vl
, int mode
, u64 data
)
3327 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3329 return dd
->send_egress_err_status_cnt
[35];
3332 static u64
access_tx_launch_fifo2_unc_or_parity_err_cnt(
3333 const struct cntr_entry
*entry
,
3334 void *context
, int vl
, int mode
, u64 data
)
3336 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3338 return dd
->send_egress_err_status_cnt
[34];
3341 static u64
access_tx_launch_fifo1_unc_or_parity_err_cnt(
3342 const struct cntr_entry
*entry
,
3343 void *context
, int vl
, int mode
, u64 data
)
3345 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3347 return dd
->send_egress_err_status_cnt
[33];
3350 static u64
access_tx_launch_fifo0_unc_or_parity_err_cnt(
3351 const struct cntr_entry
*entry
,
3352 void *context
, int vl
, int mode
, u64 data
)
3354 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3356 return dd
->send_egress_err_status_cnt
[32];
3359 static u64
access_tx_sdma15_disallowed_packet_err_cnt(
3360 const struct cntr_entry
*entry
,
3361 void *context
, int vl
, int mode
, u64 data
)
3363 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3365 return dd
->send_egress_err_status_cnt
[31];
3368 static u64
access_tx_sdma14_disallowed_packet_err_cnt(
3369 const struct cntr_entry
*entry
,
3370 void *context
, int vl
, int mode
, u64 data
)
3372 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3374 return dd
->send_egress_err_status_cnt
[30];
3377 static u64
access_tx_sdma13_disallowed_packet_err_cnt(
3378 const struct cntr_entry
*entry
,
3379 void *context
, int vl
, int mode
, u64 data
)
3381 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3383 return dd
->send_egress_err_status_cnt
[29];
3386 static u64
access_tx_sdma12_disallowed_packet_err_cnt(
3387 const struct cntr_entry
*entry
,
3388 void *context
, int vl
, int mode
, u64 data
)
3390 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3392 return dd
->send_egress_err_status_cnt
[28];
3395 static u64
access_tx_sdma11_disallowed_packet_err_cnt(
3396 const struct cntr_entry
*entry
,
3397 void *context
, int vl
, int mode
, u64 data
)
3399 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3401 return dd
->send_egress_err_status_cnt
[27];
3404 static u64
access_tx_sdma10_disallowed_packet_err_cnt(
3405 const struct cntr_entry
*entry
,
3406 void *context
, int vl
, int mode
, u64 data
)
3408 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3410 return dd
->send_egress_err_status_cnt
[26];
3413 static u64
access_tx_sdma9_disallowed_packet_err_cnt(
3414 const struct cntr_entry
*entry
,
3415 void *context
, int vl
, int mode
, u64 data
)
3417 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3419 return dd
->send_egress_err_status_cnt
[25];
3422 static u64
access_tx_sdma8_disallowed_packet_err_cnt(
3423 const struct cntr_entry
*entry
,
3424 void *context
, int vl
, int mode
, u64 data
)
3426 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3428 return dd
->send_egress_err_status_cnt
[24];
3431 static u64
access_tx_sdma7_disallowed_packet_err_cnt(
3432 const struct cntr_entry
*entry
,
3433 void *context
, int vl
, int mode
, u64 data
)
3435 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3437 return dd
->send_egress_err_status_cnt
[23];
3440 static u64
access_tx_sdma6_disallowed_packet_err_cnt(
3441 const struct cntr_entry
*entry
,
3442 void *context
, int vl
, int mode
, u64 data
)
3444 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3446 return dd
->send_egress_err_status_cnt
[22];
3449 static u64
access_tx_sdma5_disallowed_packet_err_cnt(
3450 const struct cntr_entry
*entry
,
3451 void *context
, int vl
, int mode
, u64 data
)
3453 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3455 return dd
->send_egress_err_status_cnt
[21];
3458 static u64
access_tx_sdma4_disallowed_packet_err_cnt(
3459 const struct cntr_entry
*entry
,
3460 void *context
, int vl
, int mode
, u64 data
)
3462 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3464 return dd
->send_egress_err_status_cnt
[20];
3467 static u64
access_tx_sdma3_disallowed_packet_err_cnt(
3468 const struct cntr_entry
*entry
,
3469 void *context
, int vl
, int mode
, u64 data
)
3471 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3473 return dd
->send_egress_err_status_cnt
[19];
3476 static u64
access_tx_sdma2_disallowed_packet_err_cnt(
3477 const struct cntr_entry
*entry
,
3478 void *context
, int vl
, int mode
, u64 data
)
3480 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3482 return dd
->send_egress_err_status_cnt
[18];
3485 static u64
access_tx_sdma1_disallowed_packet_err_cnt(
3486 const struct cntr_entry
*entry
,
3487 void *context
, int vl
, int mode
, u64 data
)
3489 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3491 return dd
->send_egress_err_status_cnt
[17];
3494 static u64
access_tx_sdma0_disallowed_packet_err_cnt(
3495 const struct cntr_entry
*entry
,
3496 void *context
, int vl
, int mode
, u64 data
)
3498 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3500 return dd
->send_egress_err_status_cnt
[16];
3503 static u64
access_tx_config_parity_err_cnt(const struct cntr_entry
*entry
,
3504 void *context
, int vl
, int mode
,
3507 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3509 return dd
->send_egress_err_status_cnt
[15];
3512 static u64
access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry
*entry
,
3513 void *context
, int vl
,
3516 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3518 return dd
->send_egress_err_status_cnt
[14];
3521 static u64
access_tx_launch_csr_parity_err_cnt(const struct cntr_entry
*entry
,
3522 void *context
, int vl
, int mode
,
3525 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3527 return dd
->send_egress_err_status_cnt
[13];
3530 static u64
access_tx_illegal_vl_err_cnt(const struct cntr_entry
*entry
,
3531 void *context
, int vl
, int mode
,
3534 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3536 return dd
->send_egress_err_status_cnt
[12];
3539 static u64
access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3540 const struct cntr_entry
*entry
,
3541 void *context
, int vl
, int mode
, u64 data
)
3543 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3545 return dd
->send_egress_err_status_cnt
[11];
3548 static u64
access_egress_reserved_10_err_cnt(const struct cntr_entry
*entry
,
3549 void *context
, int vl
, int mode
,
3552 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3554 return dd
->send_egress_err_status_cnt
[10];
3557 static u64
access_egress_reserved_9_err_cnt(const struct cntr_entry
*entry
,
3558 void *context
, int vl
, int mode
,
3561 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3563 return dd
->send_egress_err_status_cnt
[9];
3566 static u64
access_tx_sdma_launch_intf_parity_err_cnt(
3567 const struct cntr_entry
*entry
,
3568 void *context
, int vl
, int mode
, u64 data
)
3570 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3572 return dd
->send_egress_err_status_cnt
[8];
3575 static u64
access_tx_pio_launch_intf_parity_err_cnt(
3576 const struct cntr_entry
*entry
,
3577 void *context
, int vl
, int mode
, u64 data
)
3579 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3581 return dd
->send_egress_err_status_cnt
[7];
3584 static u64
access_egress_reserved_6_err_cnt(const struct cntr_entry
*entry
,
3585 void *context
, int vl
, int mode
,
3588 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3590 return dd
->send_egress_err_status_cnt
[6];
3593 static u64
access_tx_incorrect_link_state_err_cnt(
3594 const struct cntr_entry
*entry
,
3595 void *context
, int vl
, int mode
, u64 data
)
3597 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3599 return dd
->send_egress_err_status_cnt
[5];
3602 static u64
access_tx_linkdown_err_cnt(const struct cntr_entry
*entry
,
3603 void *context
, int vl
, int mode
,
3606 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3608 return dd
->send_egress_err_status_cnt
[4];
3611 static u64
access_tx_egress_fifi_underrun_or_parity_err_cnt(
3612 const struct cntr_entry
*entry
,
3613 void *context
, int vl
, int mode
, u64 data
)
3615 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3617 return dd
->send_egress_err_status_cnt
[3];
3620 static u64
access_egress_reserved_2_err_cnt(const struct cntr_entry
*entry
,
3621 void *context
, int vl
, int mode
,
3624 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3626 return dd
->send_egress_err_status_cnt
[2];
3629 static u64
access_tx_pkt_integrity_mem_unc_err_cnt(
3630 const struct cntr_entry
*entry
,
3631 void *context
, int vl
, int mode
, u64 data
)
3633 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3635 return dd
->send_egress_err_status_cnt
[1];
3638 static u64
access_tx_pkt_integrity_mem_cor_err_cnt(
3639 const struct cntr_entry
*entry
,
3640 void *context
, int vl
, int mode
, u64 data
)
3642 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3644 return dd
->send_egress_err_status_cnt
[0];
3648 * Software counters corresponding to each of the
3649 * error status bits within SendErrStatus
3651 static u64
access_send_csr_write_bad_addr_err_cnt(
3652 const struct cntr_entry
*entry
,
3653 void *context
, int vl
, int mode
, u64 data
)
3655 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3657 return dd
->send_err_status_cnt
[2];
3660 static u64
access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry
*entry
,
3661 void *context
, int vl
,
3664 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3666 return dd
->send_err_status_cnt
[1];
3669 static u64
access_send_csr_parity_cnt(const struct cntr_entry
*entry
,
3670 void *context
, int vl
, int mode
,
3673 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3675 return dd
->send_err_status_cnt
[0];
3679 * Software counters corresponding to each of the
3680 * error status bits within SendCtxtErrStatus
3682 static u64
access_pio_write_out_of_bounds_err_cnt(
3683 const struct cntr_entry
*entry
,
3684 void *context
, int vl
, int mode
, u64 data
)
3686 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3688 return dd
->sw_ctxt_err_status_cnt
[4];
3691 static u64
access_pio_write_overflow_err_cnt(const struct cntr_entry
*entry
,
3692 void *context
, int vl
, int mode
,
3695 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3697 return dd
->sw_ctxt_err_status_cnt
[3];
3700 static u64
access_pio_write_crosses_boundary_err_cnt(
3701 const struct cntr_entry
*entry
,
3702 void *context
, int vl
, int mode
, u64 data
)
3704 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3706 return dd
->sw_ctxt_err_status_cnt
[2];
3709 static u64
access_pio_disallowed_packet_err_cnt(const struct cntr_entry
*entry
,
3710 void *context
, int vl
,
3713 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3715 return dd
->sw_ctxt_err_status_cnt
[1];
3718 static u64
access_pio_inconsistent_sop_err_cnt(const struct cntr_entry
*entry
,
3719 void *context
, int vl
, int mode
,
3722 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3724 return dd
->sw_ctxt_err_status_cnt
[0];
3728 * Software counters corresponding to each of the
3729 * error status bits within SendDmaEngErrStatus
3731 static u64
access_sdma_header_request_fifo_cor_err_cnt(
3732 const struct cntr_entry
*entry
,
3733 void *context
, int vl
, int mode
, u64 data
)
3735 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3737 return dd
->sw_send_dma_eng_err_status_cnt
[23];
3740 static u64
access_sdma_header_storage_cor_err_cnt(
3741 const struct cntr_entry
*entry
,
3742 void *context
, int vl
, int mode
, u64 data
)
3744 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3746 return dd
->sw_send_dma_eng_err_status_cnt
[22];
3749 static u64
access_sdma_packet_tracking_cor_err_cnt(
3750 const struct cntr_entry
*entry
,
3751 void *context
, int vl
, int mode
, u64 data
)
3753 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3755 return dd
->sw_send_dma_eng_err_status_cnt
[21];
3758 static u64
access_sdma_assembly_cor_err_cnt(const struct cntr_entry
*entry
,
3759 void *context
, int vl
, int mode
,
3762 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3764 return dd
->sw_send_dma_eng_err_status_cnt
[20];
3767 static u64
access_sdma_desc_table_cor_err_cnt(const struct cntr_entry
*entry
,
3768 void *context
, int vl
, int mode
,
3771 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3773 return dd
->sw_send_dma_eng_err_status_cnt
[19];
3776 static u64
access_sdma_header_request_fifo_unc_err_cnt(
3777 const struct cntr_entry
*entry
,
3778 void *context
, int vl
, int mode
, u64 data
)
3780 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3782 return dd
->sw_send_dma_eng_err_status_cnt
[18];
3785 static u64
access_sdma_header_storage_unc_err_cnt(
3786 const struct cntr_entry
*entry
,
3787 void *context
, int vl
, int mode
, u64 data
)
3789 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3791 return dd
->sw_send_dma_eng_err_status_cnt
[17];
3794 static u64
access_sdma_packet_tracking_unc_err_cnt(
3795 const struct cntr_entry
*entry
,
3796 void *context
, int vl
, int mode
, u64 data
)
3798 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3800 return dd
->sw_send_dma_eng_err_status_cnt
[16];
3803 static u64
access_sdma_assembly_unc_err_cnt(const struct cntr_entry
*entry
,
3804 void *context
, int vl
, int mode
,
3807 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3809 return dd
->sw_send_dma_eng_err_status_cnt
[15];
3812 static u64
access_sdma_desc_table_unc_err_cnt(const struct cntr_entry
*entry
,
3813 void *context
, int vl
, int mode
,
3816 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3818 return dd
->sw_send_dma_eng_err_status_cnt
[14];
3821 static u64
access_sdma_timeout_err_cnt(const struct cntr_entry
*entry
,
3822 void *context
, int vl
, int mode
,
3825 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3827 return dd
->sw_send_dma_eng_err_status_cnt
[13];
3830 static u64
access_sdma_header_length_err_cnt(const struct cntr_entry
*entry
,
3831 void *context
, int vl
, int mode
,
3834 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3836 return dd
->sw_send_dma_eng_err_status_cnt
[12];
3839 static u64
access_sdma_header_address_err_cnt(const struct cntr_entry
*entry
,
3840 void *context
, int vl
, int mode
,
3843 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3845 return dd
->sw_send_dma_eng_err_status_cnt
[11];
3848 static u64
access_sdma_header_select_err_cnt(const struct cntr_entry
*entry
,
3849 void *context
, int vl
, int mode
,
3852 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3854 return dd
->sw_send_dma_eng_err_status_cnt
[10];
3857 static u64
access_sdma_reserved_9_err_cnt(const struct cntr_entry
*entry
,
3858 void *context
, int vl
, int mode
,
3861 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3863 return dd
->sw_send_dma_eng_err_status_cnt
[9];
3866 static u64
access_sdma_packet_desc_overflow_err_cnt(
3867 const struct cntr_entry
*entry
,
3868 void *context
, int vl
, int mode
, u64 data
)
3870 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3872 return dd
->sw_send_dma_eng_err_status_cnt
[8];
3875 static u64
access_sdma_length_mismatch_err_cnt(const struct cntr_entry
*entry
,
3876 void *context
, int vl
,
3879 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3881 return dd
->sw_send_dma_eng_err_status_cnt
[7];
3884 static u64
access_sdma_halt_err_cnt(const struct cntr_entry
*entry
,
3885 void *context
, int vl
, int mode
, u64 data
)
3887 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3889 return dd
->sw_send_dma_eng_err_status_cnt
[6];
3892 static u64
access_sdma_mem_read_err_cnt(const struct cntr_entry
*entry
,
3893 void *context
, int vl
, int mode
,
3896 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3898 return dd
->sw_send_dma_eng_err_status_cnt
[5];
3901 static u64
access_sdma_first_desc_err_cnt(const struct cntr_entry
*entry
,
3902 void *context
, int vl
, int mode
,
3905 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3907 return dd
->sw_send_dma_eng_err_status_cnt
[4];
3910 static u64
access_sdma_tail_out_of_bounds_err_cnt(
3911 const struct cntr_entry
*entry
,
3912 void *context
, int vl
, int mode
, u64 data
)
3914 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3916 return dd
->sw_send_dma_eng_err_status_cnt
[3];
3919 static u64
access_sdma_too_long_err_cnt(const struct cntr_entry
*entry
,
3920 void *context
, int vl
, int mode
,
3923 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3925 return dd
->sw_send_dma_eng_err_status_cnt
[2];
3928 static u64
access_sdma_gen_mismatch_err_cnt(const struct cntr_entry
*entry
,
3929 void *context
, int vl
, int mode
,
3932 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3934 return dd
->sw_send_dma_eng_err_status_cnt
[1];
3937 static u64
access_sdma_wrong_dw_err_cnt(const struct cntr_entry
*entry
,
3938 void *context
, int vl
, int mode
,
3941 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3943 return dd
->sw_send_dma_eng_err_status_cnt
[0];
3946 #define def_access_sw_cpu(cntr) \
3947 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3948 void *context, int vl, int mode, u64 data) \
3950 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3951 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
3952 ppd->ibport_data.rvp.cntr, vl, \
3956 def_access_sw_cpu(rc_acks
);
3957 def_access_sw_cpu(rc_qacks
);
3958 def_access_sw_cpu(rc_delayed_comp
);
3960 #define def_access_ibp_counter(cntr) \
3961 static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3962 void *context, int vl, int mode, u64 data) \
3964 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3966 if (vl != CNTR_INVALID_VL) \
3969 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
3973 def_access_ibp_counter(loop_pkts
);
3974 def_access_ibp_counter(rc_resends
);
3975 def_access_ibp_counter(rnr_naks
);
3976 def_access_ibp_counter(other_naks
);
3977 def_access_ibp_counter(rc_timeouts
);
3978 def_access_ibp_counter(pkt_drops
);
3979 def_access_ibp_counter(dmawait
);
3980 def_access_ibp_counter(rc_seqnak
);
3981 def_access_ibp_counter(rc_dupreq
);
3982 def_access_ibp_counter(rdma_seq
);
3983 def_access_ibp_counter(unaligned
);
3984 def_access_ibp_counter(seq_naks
);
3986 static struct cntr_entry dev_cntrs
[DEV_CNTR_LAST
] = {
3987 [C_RCV_OVF
] = RXE32_DEV_CNTR_ELEM(RcvOverflow
, RCV_BUF_OVFL_CNT
, CNTR_SYNTH
),
3988 [C_RX_TID_FULL
] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr
, RCV_TID_FULL_ERR_CNT
,
3990 [C_RX_TID_INVALID
] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid
, RCV_TID_VALID_ERR_CNT
,
3992 [C_RX_TID_FLGMS
] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs
,
3993 RCV_TID_FLOW_GEN_MISMATCH_CNT
,
3995 [C_RX_CTX_EGRS
] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS
, RCV_CONTEXT_EGR_STALL
,
3997 [C_RCV_TID_FLSMS
] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs
,
3998 RCV_TID_FLOW_SEQ_MISMATCH_CNT
, CNTR_NORMAL
),
3999 [C_CCE_PCI_CR_ST
] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt
,
4000 CCE_PCIE_POSTED_CRDT_STALL_CNT
, CNTR_NORMAL
),
4001 [C_CCE_PCI_TR_ST
] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt
, CCE_PCIE_TRGT_STALL_CNT
,
4003 [C_CCE_PIO_WR_ST
] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt
, CCE_PIO_WR_STALL_CNT
,
4005 [C_CCE_ERR_INT
] = CCE_INT_DEV_CNTR_ELEM(CceErrInt
, CCE_ERR_INT_CNT
,
4007 [C_CCE_SDMA_INT
] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt
, CCE_SDMA_INT_CNT
,
4009 [C_CCE_MISC_INT
] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt
, CCE_MISC_INT_CNT
,
4011 [C_CCE_RCV_AV_INT
] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt
, CCE_RCV_AVAIL_INT_CNT
,
4013 [C_CCE_RCV_URG_INT
] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt
,
4014 CCE_RCV_URGENT_INT_CNT
, CNTR_NORMAL
),
4015 [C_CCE_SEND_CR_INT
] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt
,
4016 CCE_SEND_CREDIT_INT_CNT
, CNTR_NORMAL
),
4017 [C_DC_UNC_ERR
] = DC_PERF_CNTR(DcUnctblErr
, DCC_ERR_UNCORRECTABLE_CNT
,
4019 [C_DC_RCV_ERR
] = DC_PERF_CNTR(DcRecvErr
, DCC_ERR_PORTRCV_ERR_CNT
, CNTR_SYNTH
),
4020 [C_DC_FM_CFG_ERR
] = DC_PERF_CNTR(DcFmCfgErr
, DCC_ERR_FMCONFIG_ERR_CNT
,
4022 [C_DC_RMT_PHY_ERR
] = DC_PERF_CNTR(DcRmtPhyErr
, DCC_ERR_RCVREMOTE_PHY_ERR_CNT
,
4024 [C_DC_DROPPED_PKT
] = DC_PERF_CNTR(DcDroppedPkt
, DCC_ERR_DROPPED_PKT_CNT
,
4026 [C_DC_MC_XMIT_PKTS
] = DC_PERF_CNTR(DcMcXmitPkts
,
4027 DCC_PRF_PORT_XMIT_MULTICAST_CNT
, CNTR_SYNTH
),
4028 [C_DC_MC_RCV_PKTS
] = DC_PERF_CNTR(DcMcRcvPkts
,
4029 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT
,
4031 [C_DC_XMIT_CERR
] = DC_PERF_CNTR(DcXmitCorr
,
4032 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT
, CNTR_SYNTH
),
4033 [C_DC_RCV_CERR
] = DC_PERF_CNTR(DcRcvCorrCnt
, DCC_PRF_PORT_RCV_CORRECTABLE_CNT
,
4035 [C_DC_RCV_FCC
] = DC_PERF_CNTR(DcRxFCntl
, DCC_PRF_RX_FLOW_CRTL_CNT
,
4037 [C_DC_XMIT_FCC
] = DC_PERF_CNTR(DcXmitFCntl
, DCC_PRF_TX_FLOW_CRTL_CNT
,
4039 [C_DC_XMIT_FLITS
] = DC_PERF_CNTR(DcXmitFlits
, DCC_PRF_PORT_XMIT_DATA_CNT
,
4041 [C_DC_RCV_FLITS
] = DC_PERF_CNTR(DcRcvFlits
, DCC_PRF_PORT_RCV_DATA_CNT
,
4043 [C_DC_XMIT_PKTS
] = DC_PERF_CNTR(DcXmitPkts
, DCC_PRF_PORT_XMIT_PKTS_CNT
,
4045 [C_DC_RCV_PKTS
] = DC_PERF_CNTR(DcRcvPkts
, DCC_PRF_PORT_RCV_PKTS_CNT
,
4047 [C_DC_RX_FLIT_VL
] = DC_PERF_CNTR(DcRxFlitVl
, DCC_PRF_PORT_VL_RCV_DATA_CNT
,
4048 CNTR_SYNTH
| CNTR_VL
),
4049 [C_DC_RX_PKT_VL
] = DC_PERF_CNTR(DcRxPktVl
, DCC_PRF_PORT_VL_RCV_PKTS_CNT
,
4050 CNTR_SYNTH
| CNTR_VL
),
4051 [C_DC_RCV_FCN
] = DC_PERF_CNTR(DcRcvFcn
, DCC_PRF_PORT_RCV_FECN_CNT
, CNTR_SYNTH
),
4052 [C_DC_RCV_FCN_VL
] = DC_PERF_CNTR(DcRcvFcnVl
, DCC_PRF_PORT_VL_RCV_FECN_CNT
,
4053 CNTR_SYNTH
| CNTR_VL
),
4054 [C_DC_RCV_BCN
] = DC_PERF_CNTR(DcRcvBcn
, DCC_PRF_PORT_RCV_BECN_CNT
, CNTR_SYNTH
),
4055 [C_DC_RCV_BCN_VL
] = DC_PERF_CNTR(DcRcvBcnVl
, DCC_PRF_PORT_VL_RCV_BECN_CNT
,
4056 CNTR_SYNTH
| CNTR_VL
),
4057 [C_DC_RCV_BBL
] = DC_PERF_CNTR(DcRcvBbl
, DCC_PRF_PORT_RCV_BUBBLE_CNT
,
4059 [C_DC_RCV_BBL_VL
] = DC_PERF_CNTR(DcRcvBblVl
, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT
,
4060 CNTR_SYNTH
| CNTR_VL
),
4061 [C_DC_MARK_FECN
] = DC_PERF_CNTR(DcMarkFcn
, DCC_PRF_PORT_MARK_FECN_CNT
,
4063 [C_DC_MARK_FECN_VL
] = DC_PERF_CNTR(DcMarkFcnVl
, DCC_PRF_PORT_VL_MARK_FECN_CNT
,
4064 CNTR_SYNTH
| CNTR_VL
),
4066 DC_PERF_CNTR_LCB(DcTotCrc
, DC_LCB_ERR_INFO_TOTAL_CRC_ERR
,
4068 [C_DC_CRC_LN0
] = DC_PERF_CNTR_LCB(DcCrcLn0
, DC_LCB_ERR_INFO_CRC_ERR_LN0
,
4070 [C_DC_CRC_LN1
] = DC_PERF_CNTR_LCB(DcCrcLn1
, DC_LCB_ERR_INFO_CRC_ERR_LN1
,
4072 [C_DC_CRC_LN2
] = DC_PERF_CNTR_LCB(DcCrcLn2
, DC_LCB_ERR_INFO_CRC_ERR_LN2
,
4074 [C_DC_CRC_LN3
] = DC_PERF_CNTR_LCB(DcCrcLn3
, DC_LCB_ERR_INFO_CRC_ERR_LN3
,
4076 [C_DC_CRC_MULT_LN
] =
4077 DC_PERF_CNTR_LCB(DcMultLn
, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN
,
4079 [C_DC_TX_REPLAY
] = DC_PERF_CNTR_LCB(DcTxReplay
, DC_LCB_ERR_INFO_TX_REPLAY_CNT
,
4081 [C_DC_RX_REPLAY
] = DC_PERF_CNTR_LCB(DcRxReplay
, DC_LCB_ERR_INFO_RX_REPLAY_CNT
,
4083 [C_DC_SEQ_CRC_CNT
] =
4084 DC_PERF_CNTR_LCB(DcLinkSeqCrc
, DC_LCB_ERR_INFO_SEQ_CRC_CNT
,
4086 [C_DC_ESC0_ONLY_CNT
] =
4087 DC_PERF_CNTR_LCB(DcEsc0
, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT
,
4089 [C_DC_ESC0_PLUS1_CNT
] =
4090 DC_PERF_CNTR_LCB(DcEsc1
, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT
,
4092 [C_DC_ESC0_PLUS2_CNT
] =
4093 DC_PERF_CNTR_LCB(DcEsc0Plus2
, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT
,
4095 [C_DC_REINIT_FROM_PEER_CNT
] =
4096 DC_PERF_CNTR_LCB(DcReinitPeer
, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT
,
4098 [C_DC_SBE_CNT
] = DC_PERF_CNTR_LCB(DcSbe
, DC_LCB_ERR_INFO_SBE_CNT
,
4100 [C_DC_MISC_FLG_CNT
] =
4101 DC_PERF_CNTR_LCB(DcMiscFlg
, DC_LCB_ERR_INFO_MISC_FLG_CNT
,
4103 [C_DC_PRF_GOOD_LTP_CNT
] =
4104 DC_PERF_CNTR_LCB(DcGoodLTP
, DC_LCB_PRF_GOOD_LTP_CNT
, CNTR_SYNTH
),
4105 [C_DC_PRF_ACCEPTED_LTP_CNT
] =
4106 DC_PERF_CNTR_LCB(DcAccLTP
, DC_LCB_PRF_ACCEPTED_LTP_CNT
,
4108 [C_DC_PRF_RX_FLIT_CNT
] =
4109 DC_PERF_CNTR_LCB(DcPrfRxFlit
, DC_LCB_PRF_RX_FLIT_CNT
, CNTR_SYNTH
),
4110 [C_DC_PRF_TX_FLIT_CNT
] =
4111 DC_PERF_CNTR_LCB(DcPrfTxFlit
, DC_LCB_PRF_TX_FLIT_CNT
, CNTR_SYNTH
),
4112 [C_DC_PRF_CLK_CNTR
] =
4113 DC_PERF_CNTR_LCB(DcPrfClk
, DC_LCB_PRF_CLK_CNTR
, CNTR_SYNTH
),
4114 [C_DC_PG_DBG_FLIT_CRDTS_CNT
] =
4115 DC_PERF_CNTR_LCB(DcFltCrdts
, DC_LCB_PG_DBG_FLIT_CRDTS_CNT
, CNTR_SYNTH
),
4116 [C_DC_PG_STS_PAUSE_COMPLETE_CNT
] =
4117 DC_PERF_CNTR_LCB(DcPauseComp
, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT
,
4119 [C_DC_PG_STS_TX_SBE_CNT
] =
4120 DC_PERF_CNTR_LCB(DcStsTxSbe
, DC_LCB_PG_STS_TX_SBE_CNT
, CNTR_SYNTH
),
4121 [C_DC_PG_STS_TX_MBE_CNT
] =
4122 DC_PERF_CNTR_LCB(DcStsTxMbe
, DC_LCB_PG_STS_TX_MBE_CNT
,
4124 [C_SW_CPU_INTR
] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL
,
4125 access_sw_cpu_intr
),
4126 [C_SW_CPU_RCV_LIM
] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL
,
4127 access_sw_cpu_rcv_limit
),
4128 [C_SW_VTX_WAIT
] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL
,
4129 access_sw_vtx_wait
),
4130 [C_SW_PIO_WAIT
] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL
,
4131 access_sw_pio_wait
),
4132 [C_SW_KMEM_WAIT
] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL
,
4133 access_sw_kmem_wait
),
4134 [C_SW_SEND_SCHED
] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL
,
4135 access_sw_send_schedule
),
4136 [C_SDMA_DESC_FETCHED_CNT
] = CNTR_ELEM("SDEDscFdCn",
4137 SEND_DMA_DESC_FETCHED_CNT
, 0,
4138 CNTR_NORMAL
| CNTR_32BIT
| CNTR_SDMA
,
4139 dev_access_u32_csr
),
4140 [C_SDMA_INT_CNT
] = CNTR_ELEM("SDMAInt", 0, 0,
4141 CNTR_NORMAL
| CNTR_32BIT
| CNTR_SDMA
,
4142 access_sde_int_cnt
),
4143 [C_SDMA_ERR_CNT
] = CNTR_ELEM("SDMAErrCt", 0, 0,
4144 CNTR_NORMAL
| CNTR_32BIT
| CNTR_SDMA
,
4145 access_sde_err_cnt
),
4146 [C_SDMA_IDLE_INT_CNT
] = CNTR_ELEM("SDMAIdInt", 0, 0,
4147 CNTR_NORMAL
| CNTR_32BIT
| CNTR_SDMA
,
4148 access_sde_idle_int_cnt
),
4149 [C_SDMA_PROGRESS_INT_CNT
] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4150 CNTR_NORMAL
| CNTR_32BIT
| CNTR_SDMA
,
4151 access_sde_progress_int_cnt
),
4152 /* MISC_ERR_STATUS */
4153 [C_MISC_PLL_LOCK_FAIL_ERR
] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4155 access_misc_pll_lock_fail_err_cnt
),
4156 [C_MISC_MBIST_FAIL_ERR
] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4158 access_misc_mbist_fail_err_cnt
),
4159 [C_MISC_INVALID_EEP_CMD_ERR
] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4161 access_misc_invalid_eep_cmd_err_cnt
),
4162 [C_MISC_EFUSE_DONE_PARITY_ERR
] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4164 access_misc_efuse_done_parity_err_cnt
),
4165 [C_MISC_EFUSE_WRITE_ERR
] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4167 access_misc_efuse_write_err_cnt
),
4168 [C_MISC_EFUSE_READ_BAD_ADDR_ERR
] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4170 access_misc_efuse_read_bad_addr_err_cnt
),
4171 [C_MISC_EFUSE_CSR_PARITY_ERR
] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4173 access_misc_efuse_csr_parity_err_cnt
),
4174 [C_MISC_FW_AUTH_FAILED_ERR
] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4176 access_misc_fw_auth_failed_err_cnt
),
4177 [C_MISC_KEY_MISMATCH_ERR
] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4179 access_misc_key_mismatch_err_cnt
),
4180 [C_MISC_SBUS_WRITE_FAILED_ERR
] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4182 access_misc_sbus_write_failed_err_cnt
),
4183 [C_MISC_CSR_WRITE_BAD_ADDR_ERR
] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4185 access_misc_csr_write_bad_addr_err_cnt
),
4186 [C_MISC_CSR_READ_BAD_ADDR_ERR
] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4188 access_misc_csr_read_bad_addr_err_cnt
),
4189 [C_MISC_CSR_PARITY_ERR
] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4191 access_misc_csr_parity_err_cnt
),
4193 [C_CCE_ERR_STATUS_AGGREGATED_CNT
] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4195 access_sw_cce_err_status_aggregated_cnt
),
4196 [C_CCE_MSIX_CSR_PARITY_ERR
] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4198 access_cce_msix_csr_parity_err_cnt
),
4199 [C_CCE_INT_MAP_UNC_ERR
] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4201 access_cce_int_map_unc_err_cnt
),
4202 [C_CCE_INT_MAP_COR_ERR
] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4204 access_cce_int_map_cor_err_cnt
),
4205 [C_CCE_MSIX_TABLE_UNC_ERR
] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4207 access_cce_msix_table_unc_err_cnt
),
4208 [C_CCE_MSIX_TABLE_COR_ERR
] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4210 access_cce_msix_table_cor_err_cnt
),
4211 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR
] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4213 access_cce_rxdma_conv_fifo_parity_err_cnt
),
4214 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR
] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4216 access_cce_rcpl_async_fifo_parity_err_cnt
),
4217 [C_CCE_SEG_WRITE_BAD_ADDR_ERR
] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4219 access_cce_seg_write_bad_addr_err_cnt
),
4220 [C_CCE_SEG_READ_BAD_ADDR_ERR
] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4222 access_cce_seg_read_bad_addr_err_cnt
),
4223 [C_LA_TRIGGERED
] = CNTR_ELEM("Cce LATriggered", 0, 0,
4225 access_la_triggered_cnt
),
4226 [C_CCE_TRGT_CPL_TIMEOUT_ERR
] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4228 access_cce_trgt_cpl_timeout_err_cnt
),
4229 [C_PCIC_RECEIVE_PARITY_ERR
] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4231 access_pcic_receive_parity_err_cnt
),
4232 [C_PCIC_TRANSMIT_BACK_PARITY_ERR
] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4234 access_pcic_transmit_back_parity_err_cnt
),
4235 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR
] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4237 access_pcic_transmit_front_parity_err_cnt
),
4238 [C_PCIC_CPL_DAT_Q_UNC_ERR
] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4240 access_pcic_cpl_dat_q_unc_err_cnt
),
4241 [C_PCIC_CPL_HD_Q_UNC_ERR
] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4243 access_pcic_cpl_hd_q_unc_err_cnt
),
4244 [C_PCIC_POST_DAT_Q_UNC_ERR
] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4246 access_pcic_post_dat_q_unc_err_cnt
),
4247 [C_PCIC_POST_HD_Q_UNC_ERR
] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4249 access_pcic_post_hd_q_unc_err_cnt
),
4250 [C_PCIC_RETRY_SOT_MEM_UNC_ERR
] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4252 access_pcic_retry_sot_mem_unc_err_cnt
),
4253 [C_PCIC_RETRY_MEM_UNC_ERR
] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4255 access_pcic_retry_mem_unc_err
),
4256 [C_PCIC_N_POST_DAT_Q_PARITY_ERR
] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4258 access_pcic_n_post_dat_q_parity_err_cnt
),
4259 [C_PCIC_N_POST_H_Q_PARITY_ERR
] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4261 access_pcic_n_post_h_q_parity_err_cnt
),
4262 [C_PCIC_CPL_DAT_Q_COR_ERR
] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4264 access_pcic_cpl_dat_q_cor_err_cnt
),
4265 [C_PCIC_CPL_HD_Q_COR_ERR
] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4267 access_pcic_cpl_hd_q_cor_err_cnt
),
4268 [C_PCIC_POST_DAT_Q_COR_ERR
] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4270 access_pcic_post_dat_q_cor_err_cnt
),
4271 [C_PCIC_POST_HD_Q_COR_ERR
] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4273 access_pcic_post_hd_q_cor_err_cnt
),
4274 [C_PCIC_RETRY_SOT_MEM_COR_ERR
] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4276 access_pcic_retry_sot_mem_cor_err_cnt
),
4277 [C_PCIC_RETRY_MEM_COR_ERR
] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4279 access_pcic_retry_mem_cor_err_cnt
),
4280 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR
] = CNTR_ELEM(
4281 "CceCli1AsyncFifoDbgParityError", 0, 0,
4283 access_cce_cli1_async_fifo_dbg_parity_err_cnt
),
4284 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR
] = CNTR_ELEM(
4285 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4287 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4289 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR
] = CNTR_ELEM(
4290 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4292 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt
),
4293 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR
] = CNTR_ELEM(
4294 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4296 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt
),
4297 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR
] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4299 access_cce_cli2_async_fifo_parity_err_cnt
),
4300 [C_CCE_CSR_CFG_BUS_PARITY_ERR
] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4302 access_cce_csr_cfg_bus_parity_err_cnt
),
4303 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR
] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4305 access_cce_cli0_async_fifo_parity_err_cnt
),
4306 [C_CCE_RSPD_DATA_PARITY_ERR
] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4308 access_cce_rspd_data_parity_err_cnt
),
4309 [C_CCE_TRGT_ACCESS_ERR
] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4311 access_cce_trgt_access_err_cnt
),
4312 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR
] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4314 access_cce_trgt_async_fifo_parity_err_cnt
),
4315 [C_CCE_CSR_WRITE_BAD_ADDR_ERR
] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4317 access_cce_csr_write_bad_addr_err_cnt
),
4318 [C_CCE_CSR_READ_BAD_ADDR_ERR
] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4320 access_cce_csr_read_bad_addr_err_cnt
),
4321 [C_CCE_CSR_PARITY_ERR
] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4323 access_ccs_csr_parity_err_cnt
),
4326 [C_RX_CSR_PARITY_ERR
] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4328 access_rx_csr_parity_err_cnt
),
4329 [C_RX_CSR_WRITE_BAD_ADDR_ERR
] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4331 access_rx_csr_write_bad_addr_err_cnt
),
4332 [C_RX_CSR_READ_BAD_ADDR_ERR
] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4334 access_rx_csr_read_bad_addr_err_cnt
),
4335 [C_RX_DMA_CSR_UNC_ERR
] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4337 access_rx_dma_csr_unc_err_cnt
),
4338 [C_RX_DMA_DQ_FSM_ENCODING_ERR
] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4340 access_rx_dma_dq_fsm_encoding_err_cnt
),
4341 [C_RX_DMA_EQ_FSM_ENCODING_ERR
] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4343 access_rx_dma_eq_fsm_encoding_err_cnt
),
4344 [C_RX_DMA_CSR_PARITY_ERR
] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4346 access_rx_dma_csr_parity_err_cnt
),
4347 [C_RX_RBUF_DATA_COR_ERR
] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4349 access_rx_rbuf_data_cor_err_cnt
),
4350 [C_RX_RBUF_DATA_UNC_ERR
] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4352 access_rx_rbuf_data_unc_err_cnt
),
4353 [C_RX_DMA_DATA_FIFO_RD_COR_ERR
] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4355 access_rx_dma_data_fifo_rd_cor_err_cnt
),
4356 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR
] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4358 access_rx_dma_data_fifo_rd_unc_err_cnt
),
4359 [C_RX_DMA_HDR_FIFO_RD_COR_ERR
] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4361 access_rx_dma_hdr_fifo_rd_cor_err_cnt
),
4362 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR
] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4364 access_rx_dma_hdr_fifo_rd_unc_err_cnt
),
4365 [C_RX_RBUF_DESC_PART2_COR_ERR
] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4367 access_rx_rbuf_desc_part2_cor_err_cnt
),
4368 [C_RX_RBUF_DESC_PART2_UNC_ERR
] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4370 access_rx_rbuf_desc_part2_unc_err_cnt
),
4371 [C_RX_RBUF_DESC_PART1_COR_ERR
] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4373 access_rx_rbuf_desc_part1_cor_err_cnt
),
4374 [C_RX_RBUF_DESC_PART1_UNC_ERR
] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4376 access_rx_rbuf_desc_part1_unc_err_cnt
),
4377 [C_RX_HQ_INTR_FSM_ERR
] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4379 access_rx_hq_intr_fsm_err_cnt
),
4380 [C_RX_HQ_INTR_CSR_PARITY_ERR
] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4382 access_rx_hq_intr_csr_parity_err_cnt
),
4383 [C_RX_LOOKUP_CSR_PARITY_ERR
] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4385 access_rx_lookup_csr_parity_err_cnt
),
4386 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR
] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4388 access_rx_lookup_rcv_array_cor_err_cnt
),
4389 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR
] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4391 access_rx_lookup_rcv_array_unc_err_cnt
),
4392 [C_RX_LOOKUP_DES_PART2_PARITY_ERR
] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4394 access_rx_lookup_des_part2_parity_err_cnt
),
4395 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR
] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4397 access_rx_lookup_des_part1_unc_cor_err_cnt
),
4398 [C_RX_LOOKUP_DES_PART1_UNC_ERR
] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4400 access_rx_lookup_des_part1_unc_err_cnt
),
4401 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR
] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4403 access_rx_rbuf_next_free_buf_cor_err_cnt
),
4404 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR
] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4406 access_rx_rbuf_next_free_buf_unc_err_cnt
),
4407 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR
] = CNTR_ELEM(
4408 "RxRbufFlInitWrAddrParityErr", 0, 0,
4410 access_rbuf_fl_init_wr_addr_parity_err_cnt
),
4411 [C_RX_RBUF_FL_INITDONE_PARITY_ERR
] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4413 access_rx_rbuf_fl_initdone_parity_err_cnt
),
4414 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR
] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4416 access_rx_rbuf_fl_write_addr_parity_err_cnt
),
4417 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR
] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4419 access_rx_rbuf_fl_rd_addr_parity_err_cnt
),
4420 [C_RX_RBUF_EMPTY_ERR
] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4422 access_rx_rbuf_empty_err_cnt
),
4423 [C_RX_RBUF_FULL_ERR
] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4425 access_rx_rbuf_full_err_cnt
),
4426 [C_RX_RBUF_BAD_LOOKUP_ERR
] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4428 access_rbuf_bad_lookup_err_cnt
),
4429 [C_RX_RBUF_CTX_ID_PARITY_ERR
] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4431 access_rbuf_ctx_id_parity_err_cnt
),
4432 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR
] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4434 access_rbuf_csr_qeopdw_parity_err_cnt
),
4435 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR
] = CNTR_ELEM(
4436 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4438 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt
),
4439 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR
] = CNTR_ELEM(
4440 "RxRbufCsrQTlPtrParityErr", 0, 0,
4442 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt
),
4443 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR
] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4445 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt
),
4446 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR
] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4448 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt
),
4449 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR
] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4451 access_rx_rbuf_csr_q_next_buf_parity_err_cnt
),
4452 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR
] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4454 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt
),
4455 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR
] = CNTR_ELEM(
4456 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4458 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt
),
4459 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR
] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4461 access_rx_rbuf_block_list_read_cor_err_cnt
),
4462 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR
] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4464 access_rx_rbuf_block_list_read_unc_err_cnt
),
4465 [C_RX_RBUF_LOOKUP_DES_COR_ERR
] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4467 access_rx_rbuf_lookup_des_cor_err_cnt
),
4468 [C_RX_RBUF_LOOKUP_DES_UNC_ERR
] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4470 access_rx_rbuf_lookup_des_unc_err_cnt
),
4471 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR
] = CNTR_ELEM(
4472 "RxRbufLookupDesRegUncCorErr", 0, 0,
4474 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt
),
4475 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR
] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4477 access_rx_rbuf_lookup_des_reg_unc_err_cnt
),
4478 [C_RX_RBUF_FREE_LIST_COR_ERR
] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4480 access_rx_rbuf_free_list_cor_err_cnt
),
4481 [C_RX_RBUF_FREE_LIST_UNC_ERR
] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4483 access_rx_rbuf_free_list_unc_err_cnt
),
4484 [C_RX_RCV_FSM_ENCODING_ERR
] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4486 access_rx_rcv_fsm_encoding_err_cnt
),
4487 [C_RX_DMA_FLAG_COR_ERR
] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4489 access_rx_dma_flag_cor_err_cnt
),
4490 [C_RX_DMA_FLAG_UNC_ERR
] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4492 access_rx_dma_flag_unc_err_cnt
),
4493 [C_RX_DC_SOP_EOP_PARITY_ERR
] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4495 access_rx_dc_sop_eop_parity_err_cnt
),
4496 [C_RX_RCV_CSR_PARITY_ERR
] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4498 access_rx_rcv_csr_parity_err_cnt
),
4499 [C_RX_RCV_QP_MAP_TABLE_COR_ERR
] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4501 access_rx_rcv_qp_map_table_cor_err_cnt
),
4502 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR
] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4504 access_rx_rcv_qp_map_table_unc_err_cnt
),
4505 [C_RX_RCV_DATA_COR_ERR
] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4507 access_rx_rcv_data_cor_err_cnt
),
4508 [C_RX_RCV_DATA_UNC_ERR
] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4510 access_rx_rcv_data_unc_err_cnt
),
4511 [C_RX_RCV_HDR_COR_ERR
] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4513 access_rx_rcv_hdr_cor_err_cnt
),
4514 [C_RX_RCV_HDR_UNC_ERR
] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4516 access_rx_rcv_hdr_unc_err_cnt
),
4517 [C_RX_DC_INTF_PARITY_ERR
] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4519 access_rx_dc_intf_parity_err_cnt
),
4520 [C_RX_DMA_CSR_COR_ERR
] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4522 access_rx_dma_csr_cor_err_cnt
),
4523 /* SendPioErrStatus */
4524 [C_PIO_PEC_SOP_HEAD_PARITY_ERR
] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4526 access_pio_pec_sop_head_parity_err_cnt
),
4527 [C_PIO_PCC_SOP_HEAD_PARITY_ERR
] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4529 access_pio_pcc_sop_head_parity_err_cnt
),
4530 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR
] = CNTR_ELEM("PioLastReturnedCntParityErr",
4532 access_pio_last_returned_cnt_parity_err_cnt
),
4533 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR
] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4535 access_pio_current_free_cnt_parity_err_cnt
),
4536 [C_PIO_RSVD_31_ERR
] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4538 access_pio_reserved_31_err_cnt
),
4539 [C_PIO_RSVD_30_ERR
] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4541 access_pio_reserved_30_err_cnt
),
4542 [C_PIO_PPMC_SOP_LEN_ERR
] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4544 access_pio_ppmc_sop_len_err_cnt
),
4545 [C_PIO_PPMC_BQC_MEM_PARITY_ERR
] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4547 access_pio_ppmc_bqc_mem_parity_err_cnt
),
4548 [C_PIO_VL_FIFO_PARITY_ERR
] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4550 access_pio_vl_fifo_parity_err_cnt
),
4551 [C_PIO_VLF_SOP_PARITY_ERR
] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4553 access_pio_vlf_sop_parity_err_cnt
),
4554 [C_PIO_VLF_V1_LEN_PARITY_ERR
] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4556 access_pio_vlf_v1_len_parity_err_cnt
),
4557 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR
] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4559 access_pio_block_qw_count_parity_err_cnt
),
4560 [C_PIO_WRITE_QW_VALID_PARITY_ERR
] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4562 access_pio_write_qw_valid_parity_err_cnt
),
4563 [C_PIO_STATE_MACHINE_ERR
] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4565 access_pio_state_machine_err_cnt
),
4566 [C_PIO_WRITE_DATA_PARITY_ERR
] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4568 access_pio_write_data_parity_err_cnt
),
4569 [C_PIO_HOST_ADDR_MEM_COR_ERR
] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4571 access_pio_host_addr_mem_cor_err_cnt
),
4572 [C_PIO_HOST_ADDR_MEM_UNC_ERR
] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4574 access_pio_host_addr_mem_unc_err_cnt
),
4575 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR
] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4577 access_pio_pkt_evict_sm_or_arb_sm_err_cnt
),
4578 [C_PIO_INIT_SM_IN_ERR
] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4580 access_pio_init_sm_in_err_cnt
),
4581 [C_PIO_PPMC_PBL_FIFO_ERR
] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4583 access_pio_ppmc_pbl_fifo_err_cnt
),
4584 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR
] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4586 access_pio_credit_ret_fifo_parity_err_cnt
),
4587 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR
] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4589 access_pio_v1_len_mem_bank1_cor_err_cnt
),
4590 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR
] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4592 access_pio_v1_len_mem_bank0_cor_err_cnt
),
4593 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR
] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4595 access_pio_v1_len_mem_bank1_unc_err_cnt
),
4596 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR
] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4598 access_pio_v1_len_mem_bank0_unc_err_cnt
),
4599 [C_PIO_SM_PKT_RESET_PARITY_ERR
] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4601 access_pio_sm_pkt_reset_parity_err_cnt
),
4602 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR
] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4604 access_pio_pkt_evict_fifo_parity_err_cnt
),
4605 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR
] = CNTR_ELEM(
4606 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4608 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt
),
4609 [C_PIO_SBRDCTL_CRREL_PARITY_ERR
] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4611 access_pio_sbrdctl_crrel_parity_err_cnt
),
4612 [C_PIO_PEC_FIFO_PARITY_ERR
] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4614 access_pio_pec_fifo_parity_err_cnt
),
4615 [C_PIO_PCC_FIFO_PARITY_ERR
] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4617 access_pio_pcc_fifo_parity_err_cnt
),
4618 [C_PIO_SB_MEM_FIFO1_ERR
] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4620 access_pio_sb_mem_fifo1_err_cnt
),
4621 [C_PIO_SB_MEM_FIFO0_ERR
] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4623 access_pio_sb_mem_fifo0_err_cnt
),
4624 [C_PIO_CSR_PARITY_ERR
] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4626 access_pio_csr_parity_err_cnt
),
4627 [C_PIO_WRITE_ADDR_PARITY_ERR
] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4629 access_pio_write_addr_parity_err_cnt
),
4630 [C_PIO_WRITE_BAD_CTXT_ERR
] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4632 access_pio_write_bad_ctxt_err_cnt
),
4633 /* SendDmaErrStatus */
4634 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR
] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4636 access_sdma_pcie_req_tracking_cor_err_cnt
),
4637 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR
] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4639 access_sdma_pcie_req_tracking_unc_err_cnt
),
4640 [C_SDMA_CSR_PARITY_ERR
] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4642 access_sdma_csr_parity_err_cnt
),
4643 [C_SDMA_RPY_TAG_ERR
] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4645 access_sdma_rpy_tag_err_cnt
),
4646 /* SendEgressErrStatus */
4647 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR
] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4649 access_tx_read_pio_memory_csr_unc_err_cnt
),
4650 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR
] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4652 access_tx_read_sdma_memory_csr_err_cnt
),
4653 [C_TX_EGRESS_FIFO_COR_ERR
] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4655 access_tx_egress_fifo_cor_err_cnt
),
4656 [C_TX_READ_PIO_MEMORY_COR_ERR
] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4658 access_tx_read_pio_memory_cor_err_cnt
),
4659 [C_TX_READ_SDMA_MEMORY_COR_ERR
] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4661 access_tx_read_sdma_memory_cor_err_cnt
),
4662 [C_TX_SB_HDR_COR_ERR
] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4664 access_tx_sb_hdr_cor_err_cnt
),
4665 [C_TX_CREDIT_OVERRUN_ERR
] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4667 access_tx_credit_overrun_err_cnt
),
4668 [C_TX_LAUNCH_FIFO8_COR_ERR
] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4670 access_tx_launch_fifo8_cor_err_cnt
),
4671 [C_TX_LAUNCH_FIFO7_COR_ERR
] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4673 access_tx_launch_fifo7_cor_err_cnt
),
4674 [C_TX_LAUNCH_FIFO6_COR_ERR
] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4676 access_tx_launch_fifo6_cor_err_cnt
),
4677 [C_TX_LAUNCH_FIFO5_COR_ERR
] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4679 access_tx_launch_fifo5_cor_err_cnt
),
4680 [C_TX_LAUNCH_FIFO4_COR_ERR
] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4682 access_tx_launch_fifo4_cor_err_cnt
),
4683 [C_TX_LAUNCH_FIFO3_COR_ERR
] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4685 access_tx_launch_fifo3_cor_err_cnt
),
4686 [C_TX_LAUNCH_FIFO2_COR_ERR
] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4688 access_tx_launch_fifo2_cor_err_cnt
),
4689 [C_TX_LAUNCH_FIFO1_COR_ERR
] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4691 access_tx_launch_fifo1_cor_err_cnt
),
4692 [C_TX_LAUNCH_FIFO0_COR_ERR
] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4694 access_tx_launch_fifo0_cor_err_cnt
),
4695 [C_TX_CREDIT_RETURN_VL_ERR
] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4697 access_tx_credit_return_vl_err_cnt
),
4698 [C_TX_HCRC_INSERTION_ERR
] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4700 access_tx_hcrc_insertion_err_cnt
),
4701 [C_TX_EGRESS_FIFI_UNC_ERR
] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4703 access_tx_egress_fifo_unc_err_cnt
),
4704 [C_TX_READ_PIO_MEMORY_UNC_ERR
] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4706 access_tx_read_pio_memory_unc_err_cnt
),
4707 [C_TX_READ_SDMA_MEMORY_UNC_ERR
] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4709 access_tx_read_sdma_memory_unc_err_cnt
),
4710 [C_TX_SB_HDR_UNC_ERR
] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4712 access_tx_sb_hdr_unc_err_cnt
),
4713 [C_TX_CREDIT_RETURN_PARITY_ERR
] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4715 access_tx_credit_return_partiy_err_cnt
),
4716 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4718 access_tx_launch_fifo8_unc_or_parity_err_cnt
),
4719 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4721 access_tx_launch_fifo7_unc_or_parity_err_cnt
),
4722 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4724 access_tx_launch_fifo6_unc_or_parity_err_cnt
),
4725 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4727 access_tx_launch_fifo5_unc_or_parity_err_cnt
),
4728 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4730 access_tx_launch_fifo4_unc_or_parity_err_cnt
),
4731 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4733 access_tx_launch_fifo3_unc_or_parity_err_cnt
),
4734 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4736 access_tx_launch_fifo2_unc_or_parity_err_cnt
),
4737 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4739 access_tx_launch_fifo1_unc_or_parity_err_cnt
),
4740 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4742 access_tx_launch_fifo0_unc_or_parity_err_cnt
),
4743 [C_TX_SDMA15_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4745 access_tx_sdma15_disallowed_packet_err_cnt
),
4746 [C_TX_SDMA14_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4748 access_tx_sdma14_disallowed_packet_err_cnt
),
4749 [C_TX_SDMA13_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4751 access_tx_sdma13_disallowed_packet_err_cnt
),
4752 [C_TX_SDMA12_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4754 access_tx_sdma12_disallowed_packet_err_cnt
),
4755 [C_TX_SDMA11_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4757 access_tx_sdma11_disallowed_packet_err_cnt
),
4758 [C_TX_SDMA10_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4760 access_tx_sdma10_disallowed_packet_err_cnt
),
4761 [C_TX_SDMA9_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4763 access_tx_sdma9_disallowed_packet_err_cnt
),
4764 [C_TX_SDMA8_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4766 access_tx_sdma8_disallowed_packet_err_cnt
),
4767 [C_TX_SDMA7_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4769 access_tx_sdma7_disallowed_packet_err_cnt
),
4770 [C_TX_SDMA6_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4772 access_tx_sdma6_disallowed_packet_err_cnt
),
4773 [C_TX_SDMA5_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4775 access_tx_sdma5_disallowed_packet_err_cnt
),
4776 [C_TX_SDMA4_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4778 access_tx_sdma4_disallowed_packet_err_cnt
),
4779 [C_TX_SDMA3_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4781 access_tx_sdma3_disallowed_packet_err_cnt
),
4782 [C_TX_SDMA2_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4784 access_tx_sdma2_disallowed_packet_err_cnt
),
4785 [C_TX_SDMA1_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4787 access_tx_sdma1_disallowed_packet_err_cnt
),
4788 [C_TX_SDMA0_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4790 access_tx_sdma0_disallowed_packet_err_cnt
),
4791 [C_TX_CONFIG_PARITY_ERR
] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4793 access_tx_config_parity_err_cnt
),
4794 [C_TX_SBRD_CTL_CSR_PARITY_ERR
] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4796 access_tx_sbrd_ctl_csr_parity_err_cnt
),
4797 [C_TX_LAUNCH_CSR_PARITY_ERR
] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4799 access_tx_launch_csr_parity_err_cnt
),
4800 [C_TX_ILLEGAL_CL_ERR
] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4802 access_tx_illegal_vl_err_cnt
),
4803 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR
] = CNTR_ELEM(
4804 "TxSbrdCtlStateMachineParityErr", 0, 0,
4806 access_tx_sbrd_ctl_state_machine_parity_err_cnt
),
4807 [C_TX_RESERVED_10
] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4809 access_egress_reserved_10_err_cnt
),
4810 [C_TX_RESERVED_9
] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4812 access_egress_reserved_9_err_cnt
),
4813 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR
] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4815 access_tx_sdma_launch_intf_parity_err_cnt
),
4816 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR
] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4818 access_tx_pio_launch_intf_parity_err_cnt
),
4819 [C_TX_RESERVED_6
] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4821 access_egress_reserved_6_err_cnt
),
4822 [C_TX_INCORRECT_LINK_STATE_ERR
] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4824 access_tx_incorrect_link_state_err_cnt
),
4825 [C_TX_LINK_DOWN_ERR
] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4827 access_tx_linkdown_err_cnt
),
4828 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR
] = CNTR_ELEM(
4829 "EgressFifoUnderrunOrParityErr", 0, 0,
4831 access_tx_egress_fifi_underrun_or_parity_err_cnt
),
4832 [C_TX_RESERVED_2
] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4834 access_egress_reserved_2_err_cnt
),
4835 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR
] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4837 access_tx_pkt_integrity_mem_unc_err_cnt
),
4838 [C_TX_PKT_INTEGRITY_MEM_COR_ERR
] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4840 access_tx_pkt_integrity_mem_cor_err_cnt
),
4842 [C_SEND_CSR_WRITE_BAD_ADDR_ERR
] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4844 access_send_csr_write_bad_addr_err_cnt
),
4845 [C_SEND_CSR_READ_BAD_ADD_ERR
] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4847 access_send_csr_read_bad_addr_err_cnt
),
4848 [C_SEND_CSR_PARITY_ERR
] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4850 access_send_csr_parity_cnt
),
4851 /* SendCtxtErrStatus */
4852 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR
] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4854 access_pio_write_out_of_bounds_err_cnt
),
4855 [C_PIO_WRITE_OVERFLOW_ERR
] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4857 access_pio_write_overflow_err_cnt
),
4858 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR
] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4860 access_pio_write_crosses_boundary_err_cnt
),
4861 [C_PIO_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4863 access_pio_disallowed_packet_err_cnt
),
4864 [C_PIO_INCONSISTENT_SOP_ERR
] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4866 access_pio_inconsistent_sop_err_cnt
),
4867 /* SendDmaEngErrStatus */
4868 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR
] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4870 access_sdma_header_request_fifo_cor_err_cnt
),
4871 [C_SDMA_HEADER_STORAGE_COR_ERR
] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4873 access_sdma_header_storage_cor_err_cnt
),
4874 [C_SDMA_PACKET_TRACKING_COR_ERR
] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4876 access_sdma_packet_tracking_cor_err_cnt
),
4877 [C_SDMA_ASSEMBLY_COR_ERR
] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4879 access_sdma_assembly_cor_err_cnt
),
4880 [C_SDMA_DESC_TABLE_COR_ERR
] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4882 access_sdma_desc_table_cor_err_cnt
),
4883 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR
] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4885 access_sdma_header_request_fifo_unc_err_cnt
),
4886 [C_SDMA_HEADER_STORAGE_UNC_ERR
] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4888 access_sdma_header_storage_unc_err_cnt
),
4889 [C_SDMA_PACKET_TRACKING_UNC_ERR
] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4891 access_sdma_packet_tracking_unc_err_cnt
),
4892 [C_SDMA_ASSEMBLY_UNC_ERR
] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4894 access_sdma_assembly_unc_err_cnt
),
4895 [C_SDMA_DESC_TABLE_UNC_ERR
] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4897 access_sdma_desc_table_unc_err_cnt
),
4898 [C_SDMA_TIMEOUT_ERR
] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4900 access_sdma_timeout_err_cnt
),
4901 [C_SDMA_HEADER_LENGTH_ERR
] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4903 access_sdma_header_length_err_cnt
),
4904 [C_SDMA_HEADER_ADDRESS_ERR
] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4906 access_sdma_header_address_err_cnt
),
4907 [C_SDMA_HEADER_SELECT_ERR
] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4909 access_sdma_header_select_err_cnt
),
4910 [C_SMDA_RESERVED_9
] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4912 access_sdma_reserved_9_err_cnt
),
4913 [C_SDMA_PACKET_DESC_OVERFLOW_ERR
] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4915 access_sdma_packet_desc_overflow_err_cnt
),
4916 [C_SDMA_LENGTH_MISMATCH_ERR
] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4918 access_sdma_length_mismatch_err_cnt
),
4919 [C_SDMA_HALT_ERR
] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4921 access_sdma_halt_err_cnt
),
4922 [C_SDMA_MEM_READ_ERR
] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4924 access_sdma_mem_read_err_cnt
),
4925 [C_SDMA_FIRST_DESC_ERR
] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4927 access_sdma_first_desc_err_cnt
),
4928 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR
] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4930 access_sdma_tail_out_of_bounds_err_cnt
),
4931 [C_SDMA_TOO_LONG_ERR
] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4933 access_sdma_too_long_err_cnt
),
4934 [C_SDMA_GEN_MISMATCH_ERR
] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4936 access_sdma_gen_mismatch_err_cnt
),
4937 [C_SDMA_WRONG_DW_ERR
] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4939 access_sdma_wrong_dw_err_cnt
),
4942 static struct cntr_entry port_cntrs
[PORT_CNTR_LAST
] = {
4943 [C_TX_UNSUP_VL
] = TXE32_PORT_CNTR_ELEM(TxUnVLErr
, SEND_UNSUP_VL_ERR_CNT
,
4945 [C_TX_INVAL_LEN
] = TXE32_PORT_CNTR_ELEM(TxInvalLen
, SEND_LEN_ERR_CNT
,
4947 [C_TX_MM_LEN_ERR
] = TXE32_PORT_CNTR_ELEM(TxMMLenErr
, SEND_MAX_MIN_LEN_ERR_CNT
,
4949 [C_TX_UNDERRUN
] = TXE32_PORT_CNTR_ELEM(TxUnderrun
, SEND_UNDERRUN_CNT
,
4951 [C_TX_FLOW_STALL
] = TXE32_PORT_CNTR_ELEM(TxFlowStall
, SEND_FLOW_STALL_CNT
,
4953 [C_TX_DROPPED
] = TXE32_PORT_CNTR_ELEM(TxDropped
, SEND_DROPPED_PKT_CNT
,
4955 [C_TX_HDR_ERR
] = TXE32_PORT_CNTR_ELEM(TxHdrErr
, SEND_HEADERS_ERR_CNT
,
4957 [C_TX_PKT
] = TXE64_PORT_CNTR_ELEM(TxPkt
, SEND_DATA_PKT_CNT
, CNTR_NORMAL
),
4958 [C_TX_WORDS
] = TXE64_PORT_CNTR_ELEM(TxWords
, SEND_DWORD_CNT
, CNTR_NORMAL
),
4959 [C_TX_WAIT
] = TXE64_PORT_CNTR_ELEM(TxWait
, SEND_WAIT_CNT
, CNTR_SYNTH
),
4960 [C_TX_FLIT_VL
] = TXE64_PORT_CNTR_ELEM(TxFlitVL
, SEND_DATA_VL0_CNT
,
4961 CNTR_SYNTH
| CNTR_VL
),
4962 [C_TX_PKT_VL
] = TXE64_PORT_CNTR_ELEM(TxPktVL
, SEND_DATA_PKT_VL0_CNT
,
4963 CNTR_SYNTH
| CNTR_VL
),
4964 [C_TX_WAIT_VL
] = TXE64_PORT_CNTR_ELEM(TxWaitVL
, SEND_WAIT_VL0_CNT
,
4965 CNTR_SYNTH
| CNTR_VL
),
4966 [C_RX_PKT
] = RXE64_PORT_CNTR_ELEM(RxPkt
, RCV_DATA_PKT_CNT
, CNTR_NORMAL
),
4967 [C_RX_WORDS
] = RXE64_PORT_CNTR_ELEM(RxWords
, RCV_DWORD_CNT
, CNTR_NORMAL
),
4968 [C_SW_LINK_DOWN
] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH
| CNTR_32BIT
,
4969 access_sw_link_dn_cnt
),
4970 [C_SW_LINK_UP
] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH
| CNTR_32BIT
,
4971 access_sw_link_up_cnt
),
4972 [C_SW_UNKNOWN_FRAME
] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL
,
4973 access_sw_unknown_frame_cnt
),
4974 [C_SW_XMIT_DSCD
] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH
| CNTR_32BIT
,
4975 access_sw_xmit_discards
),
4976 [C_SW_XMIT_DSCD_VL
] = CNTR_ELEM("XmitDscdVl", 0, 0,
4977 CNTR_SYNTH
| CNTR_32BIT
| CNTR_VL
,
4978 access_sw_xmit_discards
),
4979 [C_SW_XMIT_CSTR_ERR
] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH
,
4980 access_xmit_constraint_errs
),
4981 [C_SW_RCV_CSTR_ERR
] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH
,
4982 access_rcv_constraint_errs
),
4983 [C_SW_IBP_LOOP_PKTS
] = SW_IBP_CNTR(LoopPkts
, loop_pkts
),
4984 [C_SW_IBP_RC_RESENDS
] = SW_IBP_CNTR(RcResend
, rc_resends
),
4985 [C_SW_IBP_RNR_NAKS
] = SW_IBP_CNTR(RnrNak
, rnr_naks
),
4986 [C_SW_IBP_OTHER_NAKS
] = SW_IBP_CNTR(OtherNak
, other_naks
),
4987 [C_SW_IBP_RC_TIMEOUTS
] = SW_IBP_CNTR(RcTimeOut
, rc_timeouts
),
4988 [C_SW_IBP_PKT_DROPS
] = SW_IBP_CNTR(PktDrop
, pkt_drops
),
4989 [C_SW_IBP_DMA_WAIT
] = SW_IBP_CNTR(DmaWait
, dmawait
),
4990 [C_SW_IBP_RC_SEQNAK
] = SW_IBP_CNTR(RcSeqNak
, rc_seqnak
),
4991 [C_SW_IBP_RC_DUPREQ
] = SW_IBP_CNTR(RcDupRew
, rc_dupreq
),
4992 [C_SW_IBP_RDMA_SEQ
] = SW_IBP_CNTR(RdmaSeq
, rdma_seq
),
4993 [C_SW_IBP_UNALIGNED
] = SW_IBP_CNTR(Unaligned
, unaligned
),
4994 [C_SW_IBP_SEQ_NAK
] = SW_IBP_CNTR(SeqNak
, seq_naks
),
4995 [C_SW_CPU_RC_ACKS
] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL
,
4996 access_sw_cpu_rc_acks
),
4997 [C_SW_CPU_RC_QACKS
] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL
,
4998 access_sw_cpu_rc_qacks
),
4999 [C_SW_CPU_RC_DELAYED_COMP
] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL
,
5000 access_sw_cpu_rc_delayed_comp
),
5001 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5002 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5003 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5004 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5005 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5006 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5007 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5008 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5009 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5010 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5011 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5012 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5013 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5014 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5015 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5016 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5017 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5018 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5019 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5020 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5021 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5022 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5023 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5024 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5025 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5026 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5027 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5028 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5029 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5030 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5031 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5032 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5033 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5034 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5035 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5036 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5037 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5038 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5039 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5040 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5041 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5042 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5043 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5044 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5045 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5046 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5047 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5048 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5049 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5050 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5051 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5052 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5053 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5054 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5055 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5056 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5057 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5058 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5059 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5060 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5061 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5062 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5063 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5064 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5065 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5066 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5067 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5068 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5069 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5070 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5071 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5072 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5073 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5074 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5075 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5076 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5077 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5078 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5079 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5080 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5083 /* ======================================================================== */
5085 /* return true if this is chip revision revision a */
5086 int is_ax(struct hfi1_devdata
*dd
)
5089 dd
->revision
>> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5090 & CCE_REVISION_CHIP_REV_MINOR_MASK
;
5091 return (chip_rev_minor
& 0xf0) == 0;
5094 /* return true if this is chip revision revision b */
5095 int is_bx(struct hfi1_devdata
*dd
)
5098 dd
->revision
>> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5099 & CCE_REVISION_CHIP_REV_MINOR_MASK
;
5100 return (chip_rev_minor
& 0xF0) == 0x10;
5104 * Append string s to buffer buf. Arguments curp and len are the current
5105 * position and remaining length, respectively.
5107 * return 0 on success, 1 on out of room
5109 static int append_str(char *buf
, char **curp
, int *lenp
, const char *s
)
5113 int result
= 0; /* success */
5116 /* add a comma, if first in the buffer */
5119 result
= 1; /* out of room */
5126 /* copy the string */
5127 while ((c
= *s
++) != 0) {
5129 result
= 1; /* out of room */
5137 /* write return values */
5145 * Using the given flag table, print a comma separated string into
5146 * the buffer. End in '*' if the buffer is too short.
5148 static char *flag_string(char *buf
, int buf_len
, u64 flags
,
5149 struct flag_table
*table
, int table_size
)
5157 /* make sure there is at least 2 so we can form "*" */
5161 len
--; /* leave room for a nul */
5162 for (i
= 0; i
< table_size
; i
++) {
5163 if (flags
& table
[i
].flag
) {
5164 no_room
= append_str(buf
, &p
, &len
, table
[i
].str
);
5167 flags
&= ~table
[i
].flag
;
5171 /* any undocumented bits left? */
5172 if (!no_room
&& flags
) {
5173 snprintf(extra
, sizeof(extra
), "bits 0x%llx", flags
);
5174 no_room
= append_str(buf
, &p
, &len
, extra
);
5177 /* add * if ran out of room */
5179 /* may need to back up to add space for a '*' */
5185 /* add final nul - space already allocated above */
5190 /* first 8 CCE error interrupt source names */
5191 static const char * const cce_misc_names
[] = {
5192 "CceErrInt", /* 0 */
5193 "RxeErrInt", /* 1 */
5194 "MiscErrInt", /* 2 */
5195 "Reserved3", /* 3 */
5196 "PioErrInt", /* 4 */
5197 "SDmaErrInt", /* 5 */
5198 "EgressErrInt", /* 6 */
5203 * Return the miscellaneous error interrupt name.
5205 static char *is_misc_err_name(char *buf
, size_t bsize
, unsigned int source
)
5207 if (source
< ARRAY_SIZE(cce_misc_names
))
5208 strncpy(buf
, cce_misc_names
[source
], bsize
);
5213 source
+ IS_GENERAL_ERR_START
);
5219 * Return the SDMA engine error interrupt name.
5221 static char *is_sdma_eng_err_name(char *buf
, size_t bsize
, unsigned int source
)
5223 snprintf(buf
, bsize
, "SDmaEngErrInt%u", source
);
5228 * Return the send context error interrupt name.
5230 static char *is_sendctxt_err_name(char *buf
, size_t bsize
, unsigned int source
)
5232 snprintf(buf
, bsize
, "SendCtxtErrInt%u", source
);
5236 static const char * const various_names
[] = {
5245 * Return the various interrupt name.
5247 static char *is_various_name(char *buf
, size_t bsize
, unsigned int source
)
5249 if (source
< ARRAY_SIZE(various_names
))
5250 strncpy(buf
, various_names
[source
], bsize
);
5252 snprintf(buf
, bsize
, "Reserved%u", source
+IS_VARIOUS_START
);
5257 * Return the DC interrupt name.
5259 static char *is_dc_name(char *buf
, size_t bsize
, unsigned int source
)
5261 static const char * const dc_int_names
[] = {
5265 "lbm" /* local block merge */
5268 if (source
< ARRAY_SIZE(dc_int_names
))
5269 snprintf(buf
, bsize
, "dc_%s_int", dc_int_names
[source
]);
5271 snprintf(buf
, bsize
, "DCInt%u", source
);
5275 static const char * const sdma_int_names
[] = {
5282 * Return the SDMA engine interrupt name.
5284 static char *is_sdma_eng_name(char *buf
, size_t bsize
, unsigned int source
)
5286 /* what interrupt */
5287 unsigned int what
= source
/ TXE_NUM_SDMA_ENGINES
;
5289 unsigned int which
= source
% TXE_NUM_SDMA_ENGINES
;
5291 if (likely(what
< 3))
5292 snprintf(buf
, bsize
, "%s%u", sdma_int_names
[what
], which
);
5294 snprintf(buf
, bsize
, "Invalid SDMA interrupt %u", source
);
5299 * Return the receive available interrupt name.
5301 static char *is_rcv_avail_name(char *buf
, size_t bsize
, unsigned int source
)
5303 snprintf(buf
, bsize
, "RcvAvailInt%u", source
);
5308 * Return the receive urgent interrupt name.
5310 static char *is_rcv_urgent_name(char *buf
, size_t bsize
, unsigned int source
)
5312 snprintf(buf
, bsize
, "RcvUrgentInt%u", source
);
5317 * Return the send credit interrupt name.
5319 static char *is_send_credit_name(char *buf
, size_t bsize
, unsigned int source
)
5321 snprintf(buf
, bsize
, "SendCreditInt%u", source
);
5326 * Return the reserved interrupt name.
5328 static char *is_reserved_name(char *buf
, size_t bsize
, unsigned int source
)
5330 snprintf(buf
, bsize
, "Reserved%u", source
+ IS_RESERVED_START
);
5334 static char *cce_err_status_string(char *buf
, int buf_len
, u64 flags
)
5336 return flag_string(buf
, buf_len
, flags
,
5337 cce_err_status_flags
, ARRAY_SIZE(cce_err_status_flags
));
5340 static char *rxe_err_status_string(char *buf
, int buf_len
, u64 flags
)
5342 return flag_string(buf
, buf_len
, flags
,
5343 rxe_err_status_flags
, ARRAY_SIZE(rxe_err_status_flags
));
5346 static char *misc_err_status_string(char *buf
, int buf_len
, u64 flags
)
5348 return flag_string(buf
, buf_len
, flags
, misc_err_status_flags
,
5349 ARRAY_SIZE(misc_err_status_flags
));
5352 static char *pio_err_status_string(char *buf
, int buf_len
, u64 flags
)
5354 return flag_string(buf
, buf_len
, flags
,
5355 pio_err_status_flags
, ARRAY_SIZE(pio_err_status_flags
));
5358 static char *sdma_err_status_string(char *buf
, int buf_len
, u64 flags
)
5360 return flag_string(buf
, buf_len
, flags
,
5361 sdma_err_status_flags
,
5362 ARRAY_SIZE(sdma_err_status_flags
));
5365 static char *egress_err_status_string(char *buf
, int buf_len
, u64 flags
)
5367 return flag_string(buf
, buf_len
, flags
,
5368 egress_err_status_flags
, ARRAY_SIZE(egress_err_status_flags
));
5371 static char *egress_err_info_string(char *buf
, int buf_len
, u64 flags
)
5373 return flag_string(buf
, buf_len
, flags
,
5374 egress_err_info_flags
, ARRAY_SIZE(egress_err_info_flags
));
5377 static char *send_err_status_string(char *buf
, int buf_len
, u64 flags
)
5379 return flag_string(buf
, buf_len
, flags
,
5380 send_err_status_flags
,
5381 ARRAY_SIZE(send_err_status_flags
));
5384 static void handle_cce_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
5390 * For most these errors, there is nothing that can be done except
5391 * report or record it.
5393 dd_dev_info(dd
, "CCE Error: %s\n",
5394 cce_err_status_string(buf
, sizeof(buf
), reg
));
5396 if ((reg
& CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK
) &&
5397 is_ax(dd
) && (dd
->icode
!= ICODE_FUNCTIONAL_SIMULATOR
)) {
5398 /* this error requires a manual drop into SPC freeze mode */
5400 start_freeze_handling(dd
->pport
, FREEZE_SELF
);
5403 for (i
= 0; i
< NUM_CCE_ERR_STATUS_COUNTERS
; i
++) {
5404 if (reg
& (1ull << i
)) {
5405 incr_cntr64(&dd
->cce_err_status_cnt
[i
]);
5406 /* maintain a counter over all cce_err_status errors */
5407 incr_cntr64(&dd
->sw_cce_err_status_aggregate
);
5413 * Check counters for receive errors that do not have an interrupt
5414 * associated with them.
5416 #define RCVERR_CHECK_TIME 10
5417 static void update_rcverr_timer(unsigned long opaque
)
5419 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)opaque
;
5420 struct hfi1_pportdata
*ppd
= dd
->pport
;
5421 u32 cur_ovfl_cnt
= read_dev_cntr(dd
, C_RCV_OVF
, CNTR_INVALID_VL
);
5423 if (dd
->rcv_ovfl_cnt
< cur_ovfl_cnt
&&
5424 ppd
->port_error_action
& OPA_PI_MASK_EX_BUFFER_OVERRUN
) {
5425 dd_dev_info(dd
, "%s: PortErrorAction bounce\n", __func__
);
5426 set_link_down_reason(ppd
,
5427 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN
, 0,
5428 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN
);
5429 queue_work(ppd
->hfi1_wq
, &ppd
->link_bounce_work
);
5431 dd
->rcv_ovfl_cnt
= (u32
) cur_ovfl_cnt
;
5433 mod_timer(&dd
->rcverr_timer
, jiffies
+ HZ
* RCVERR_CHECK_TIME
);
5436 static int init_rcverr(struct hfi1_devdata
*dd
)
5438 setup_timer(&dd
->rcverr_timer
, update_rcverr_timer
, (unsigned long)dd
);
5439 /* Assume the hardware counter has been reset */
5440 dd
->rcv_ovfl_cnt
= 0;
5441 return mod_timer(&dd
->rcverr_timer
, jiffies
+ HZ
* RCVERR_CHECK_TIME
);
5444 static void free_rcverr(struct hfi1_devdata
*dd
)
5446 if (dd
->rcverr_timer
.data
)
5447 del_timer_sync(&dd
->rcverr_timer
);
5448 dd
->rcverr_timer
.data
= 0;
5451 static void handle_rxe_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
5456 dd_dev_info(dd
, "Receive Error: %s\n",
5457 rxe_err_status_string(buf
, sizeof(buf
), reg
));
5459 if (reg
& ALL_RXE_FREEZE_ERR
) {
5463 * Freeze mode recovery is disabled for the errors
5464 * in RXE_FREEZE_ABORT_MASK
5466 if (is_ax(dd
) && (reg
& RXE_FREEZE_ABORT_MASK
))
5467 flags
= FREEZE_ABORT
;
5469 start_freeze_handling(dd
->pport
, flags
);
5472 for (i
= 0; i
< NUM_RCV_ERR_STATUS_COUNTERS
; i
++) {
5473 if (reg
& (1ull << i
))
5474 incr_cntr64(&dd
->rcv_err_status_cnt
[i
]);
5478 static void handle_misc_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
5483 dd_dev_info(dd
, "Misc Error: %s",
5484 misc_err_status_string(buf
, sizeof(buf
), reg
));
5485 for (i
= 0; i
< NUM_MISC_ERR_STATUS_COUNTERS
; i
++) {
5486 if (reg
& (1ull << i
))
5487 incr_cntr64(&dd
->misc_err_status_cnt
[i
]);
5491 static void handle_pio_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
5496 dd_dev_info(dd
, "PIO Error: %s\n",
5497 pio_err_status_string(buf
, sizeof(buf
), reg
));
5499 if (reg
& ALL_PIO_FREEZE_ERR
)
5500 start_freeze_handling(dd
->pport
, 0);
5502 for (i
= 0; i
< NUM_SEND_PIO_ERR_STATUS_COUNTERS
; i
++) {
5503 if (reg
& (1ull << i
))
5504 incr_cntr64(&dd
->send_pio_err_status_cnt
[i
]);
5508 static void handle_sdma_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
5513 dd_dev_info(dd
, "SDMA Error: %s\n",
5514 sdma_err_status_string(buf
, sizeof(buf
), reg
));
5516 if (reg
& ALL_SDMA_FREEZE_ERR
)
5517 start_freeze_handling(dd
->pport
, 0);
5519 for (i
= 0; i
< NUM_SEND_DMA_ERR_STATUS_COUNTERS
; i
++) {
5520 if (reg
& (1ull << i
))
5521 incr_cntr64(&dd
->send_dma_err_status_cnt
[i
]);
5525 static inline void __count_port_discards(struct hfi1_pportdata
*ppd
)
5527 incr_cntr64(&ppd
->port_xmit_discards
);
5530 static void count_port_inactive(struct hfi1_devdata
*dd
)
5532 __count_port_discards(dd
->pport
);
5536 * We have had a "disallowed packet" error during egress. Determine the
5537 * integrity check which failed, and update relevant error counter, etc.
5539 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5540 * bit of state per integrity check, and so we can miss the reason for an
5541 * egress error if more than one packet fails the same integrity check
5542 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5544 static void handle_send_egress_err_info(struct hfi1_devdata
*dd
,
5547 struct hfi1_pportdata
*ppd
= dd
->pport
;
5548 u64 src
= read_csr(dd
, SEND_EGRESS_ERR_SOURCE
); /* read first */
5549 u64 info
= read_csr(dd
, SEND_EGRESS_ERR_INFO
);
5552 /* clear down all observed info as quickly as possible after read */
5553 write_csr(dd
, SEND_EGRESS_ERR_INFO
, info
);
5556 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5557 info
, egress_err_info_string(buf
, sizeof(buf
), info
), src
);
5559 /* Eventually add other counters for each bit */
5560 if (info
& PORT_DISCARD_EGRESS_ERRS
) {
5564 * Count all, in case multiple bits are set. Reminder:
5565 * since there is only one info register for many sources,
5566 * these may be attributed to the wrong VL if they occur
5567 * too close together.
5569 weight
= hweight64(info
);
5570 for (i
= 0; i
< weight
; i
++) {
5571 __count_port_discards(ppd
);
5572 if (vl
>= 0 && vl
< TXE_NUM_DATA_VL
)
5573 incr_cntr64(&ppd
->port_xmit_discards_vl
[vl
]);
5575 incr_cntr64(&ppd
->port_xmit_discards_vl
5582 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5583 * register. Does it represent a 'port inactive' error?
5585 static inline int port_inactive_err(u64 posn
)
5587 return (posn
>= SEES(TX_LINKDOWN
) &&
5588 posn
<= SEES(TX_INCORRECT_LINK_STATE
));
5592 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5593 * register. Does it represent a 'disallowed packet' error?
5595 static inline int disallowed_pkt_err(int posn
)
5597 return (posn
>= SEES(TX_SDMA0_DISALLOWED_PACKET
) &&
5598 posn
<= SEES(TX_SDMA15_DISALLOWED_PACKET
));
5602 * Input value is a bit position of one of the SDMA engine disallowed
5603 * packet errors. Return which engine. Use of this must be guarded by
5604 * disallowed_pkt_err().
5606 static inline int disallowed_pkt_engine(int posn
)
5608 return posn
- SEES(TX_SDMA0_DISALLOWED_PACKET
);
5612 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5615 static int engine_to_vl(struct hfi1_devdata
*dd
, int engine
)
5617 struct sdma_vl_map
*m
;
5621 if (engine
< 0 || engine
>= TXE_NUM_SDMA_ENGINES
)
5625 m
= rcu_dereference(dd
->sdma_map
);
5626 vl
= m
->engine_to_vl
[engine
];
5633 * Translate the send context (sofware index) into a VL. Return -1 if the
5634 * translation cannot be done.
5636 static int sc_to_vl(struct hfi1_devdata
*dd
, int sw_index
)
5638 struct send_context_info
*sci
;
5639 struct send_context
*sc
;
5642 sci
= &dd
->send_contexts
[sw_index
];
5644 /* there is no information for user (PSM) and ack contexts */
5645 if (sci
->type
!= SC_KERNEL
)
5651 if (dd
->vld
[15].sc
== sc
)
5653 for (i
= 0; i
< num_vls
; i
++)
5654 if (dd
->vld
[i
].sc
== sc
)
5660 static void handle_egress_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
5662 u64 reg_copy
= reg
, handled
= 0;
5666 if (reg
& ALL_TXE_EGRESS_FREEZE_ERR
)
5667 start_freeze_handling(dd
->pport
, 0);
5668 else if (is_ax(dd
) &&
5669 (reg
& SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK
) &&
5670 (dd
->icode
!= ICODE_FUNCTIONAL_SIMULATOR
))
5671 start_freeze_handling(dd
->pport
, 0);
5674 int posn
= fls64(reg_copy
);
5675 /* fls64() returns a 1-based offset, we want it zero based */
5676 int shift
= posn
- 1;
5677 u64 mask
= 1ULL << shift
;
5679 if (port_inactive_err(shift
)) {
5680 count_port_inactive(dd
);
5682 } else if (disallowed_pkt_err(shift
)) {
5683 int vl
= engine_to_vl(dd
, disallowed_pkt_engine(shift
));
5685 handle_send_egress_err_info(dd
, vl
);
5694 dd_dev_info(dd
, "Egress Error: %s\n",
5695 egress_err_status_string(buf
, sizeof(buf
), reg
));
5697 for (i
= 0; i
< NUM_SEND_EGRESS_ERR_STATUS_COUNTERS
; i
++) {
5698 if (reg
& (1ull << i
))
5699 incr_cntr64(&dd
->send_egress_err_status_cnt
[i
]);
5703 static void handle_txe_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
5708 dd_dev_info(dd
, "Send Error: %s\n",
5709 send_err_status_string(buf
, sizeof(buf
), reg
));
5711 for (i
= 0; i
< NUM_SEND_ERR_STATUS_COUNTERS
; i
++) {
5712 if (reg
& (1ull << i
))
5713 incr_cntr64(&dd
->send_err_status_cnt
[i
]);
5718 * The maximum number of times the error clear down will loop before
5719 * blocking a repeating error. This value is arbitrary.
5721 #define MAX_CLEAR_COUNT 20
5724 * Clear and handle an error register. All error interrupts are funneled
5725 * through here to have a central location to correctly handle single-
5726 * or multi-shot errors.
5728 * For non per-context registers, call this routine with a context value
5729 * of 0 so the per-context offset is zero.
5731 * If the handler loops too many times, assume that something is wrong
5732 * and can't be fixed, so mask the error bits.
5734 static void interrupt_clear_down(struct hfi1_devdata
*dd
,
5736 const struct err_reg_info
*eri
)
5741 /* read in a loop until no more errors are seen */
5744 reg
= read_kctxt_csr(dd
, context
, eri
->status
);
5747 write_kctxt_csr(dd
, context
, eri
->clear
, reg
);
5748 if (likely(eri
->handler
))
5749 eri
->handler(dd
, context
, reg
);
5751 if (count
> MAX_CLEAR_COUNT
) {
5754 dd_dev_err(dd
, "Repeating %s bits 0x%llx - masking\n",
5757 * Read-modify-write so any other masked bits
5760 mask
= read_kctxt_csr(dd
, context
, eri
->mask
);
5762 write_kctxt_csr(dd
, context
, eri
->mask
, mask
);
5769 * CCE block "misc" interrupt. Source is < 16.
5771 static void is_misc_err_int(struct hfi1_devdata
*dd
, unsigned int source
)
5773 const struct err_reg_info
*eri
= &misc_errs
[source
];
5776 interrupt_clear_down(dd
, 0, eri
);
5778 dd_dev_err(dd
, "Unexpected misc interrupt (%u) - reserved\n",
5783 static char *send_context_err_status_string(char *buf
, int buf_len
, u64 flags
)
5785 return flag_string(buf
, buf_len
, flags
,
5786 sc_err_status_flags
, ARRAY_SIZE(sc_err_status_flags
));
5790 * Send context error interrupt. Source (hw_context) is < 160.
5792 * All send context errors cause the send context to halt. The normal
5793 * clear-down mechanism cannot be used because we cannot clear the
5794 * error bits until several other long-running items are done first.
5795 * This is OK because with the context halted, nothing else is going
5796 * to happen on it anyway.
5798 static void is_sendctxt_err_int(struct hfi1_devdata
*dd
,
5799 unsigned int hw_context
)
5801 struct send_context_info
*sci
;
5802 struct send_context
*sc
;
5808 sw_index
= dd
->hw_to_sw
[hw_context
];
5809 if (sw_index
>= dd
->num_send_contexts
) {
5811 "out of range sw index %u for send context %u\n",
5812 sw_index
, hw_context
);
5815 sci
= &dd
->send_contexts
[sw_index
];
5818 dd_dev_err(dd
, "%s: context %u(%u): no sc?\n", __func__
,
5819 sw_index
, hw_context
);
5823 /* tell the software that a halt has begun */
5824 sc_stop(sc
, SCF_HALTED
);
5826 status
= read_kctxt_csr(dd
, hw_context
, SEND_CTXT_ERR_STATUS
);
5828 dd_dev_info(dd
, "Send Context %u(%u) Error: %s\n", sw_index
, hw_context
,
5829 send_context_err_status_string(flags
, sizeof(flags
), status
));
5831 if (status
& SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK
)
5832 handle_send_egress_err_info(dd
, sc_to_vl(dd
, sw_index
));
5835 * Automatically restart halted kernel contexts out of interrupt
5836 * context. User contexts must ask the driver to restart the context.
5838 if (sc
->type
!= SC_USER
)
5839 queue_work(dd
->pport
->hfi1_wq
, &sc
->halt_work
);
5842 * Update the counters for the corresponding status bits.
5843 * Note that these particular counters are aggregated over all
5846 for (i
= 0; i
< NUM_SEND_CTXT_ERR_STATUS_COUNTERS
; i
++) {
5847 if (status
& (1ull << i
))
5848 incr_cntr64(&dd
->sw_ctxt_err_status_cnt
[i
]);
5852 static void handle_sdma_eng_err(struct hfi1_devdata
*dd
,
5853 unsigned int source
, u64 status
)
5855 struct sdma_engine
*sde
;
5858 sde
= &dd
->per_sdma
[source
];
5859 #ifdef CONFIG_SDMA_VERBOSITY
5860 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) %s:%d %s()\n", sde
->this_idx
,
5861 slashstrip(__FILE__
), __LINE__
, __func__
);
5862 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5863 sde
->this_idx
, source
, (unsigned long long)status
);
5866 sdma_engine_error(sde
, status
);
5869 * Update the counters for the corresponding status bits.
5870 * Note that these particular counters are aggregated over
5871 * all 16 DMA engines.
5873 for (i
= 0; i
< NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS
; i
++) {
5874 if (status
& (1ull << i
))
5875 incr_cntr64(&dd
->sw_send_dma_eng_err_status_cnt
[i
]);
5880 * CCE block SDMA error interrupt. Source is < 16.
5882 static void is_sdma_eng_err_int(struct hfi1_devdata
*dd
, unsigned int source
)
5884 #ifdef CONFIG_SDMA_VERBOSITY
5885 struct sdma_engine
*sde
= &dd
->per_sdma
[source
];
5887 dd_dev_err(dd
, "CONFIG SDMA(%u) %s:%d %s()\n", sde
->this_idx
,
5888 slashstrip(__FILE__
), __LINE__
, __func__
);
5889 dd_dev_err(dd
, "CONFIG SDMA(%u) source: %u\n", sde
->this_idx
,
5891 sdma_dumpstate(sde
);
5893 interrupt_clear_down(dd
, source
, &sdma_eng_err
);
5897 * CCE block "various" interrupt. Source is < 8.
5899 static void is_various_int(struct hfi1_devdata
*dd
, unsigned int source
)
5901 const struct err_reg_info
*eri
= &various_err
[source
];
5904 * TCritInt cannot go through interrupt_clear_down()
5905 * because it is not a second tier interrupt. The handler
5906 * should be called directly.
5908 if (source
== TCRIT_INT_SOURCE
)
5909 handle_temp_err(dd
);
5910 else if (eri
->handler
)
5911 interrupt_clear_down(dd
, 0, eri
);
5914 "%s: Unimplemented/reserved interrupt %d\n",
5918 static void handle_qsfp_int(struct hfi1_devdata
*dd
, u32 src_ctx
, u64 reg
)
5920 /* src_ctx is always zero */
5921 struct hfi1_pportdata
*ppd
= dd
->pport
;
5922 unsigned long flags
;
5923 u64 qsfp_int_mgmt
= (u64
)(QSFP_HFI0_INT_N
| QSFP_HFI0_MODPRST_N
);
5925 if (reg
& QSFP_HFI0_MODPRST_N
) {
5927 dd_dev_info(dd
, "%s: ModPresent triggered QSFP interrupt\n",
5930 if (!qsfp_mod_present(ppd
)) {
5931 ppd
->driver_link_ready
= 0;
5933 * Cable removed, reset all our information about the
5934 * cache and cable capabilities
5937 spin_lock_irqsave(&ppd
->qsfp_info
.qsfp_lock
, flags
);
5939 * We don't set cache_refresh_required here as we expect
5940 * an interrupt when a cable is inserted
5942 ppd
->qsfp_info
.cache_valid
= 0;
5943 ppd
->qsfp_info
.reset_needed
= 0;
5944 ppd
->qsfp_info
.limiting_active
= 0;
5945 spin_unlock_irqrestore(&ppd
->qsfp_info
.qsfp_lock
,
5947 /* Invert the ModPresent pin now to detect plug-in */
5948 write_csr(dd
, dd
->hfi1_id
? ASIC_QSFP2_INVERT
:
5949 ASIC_QSFP1_INVERT
, qsfp_int_mgmt
);
5951 if ((ppd
->offline_disabled_reason
>
5953 OPA_LINKDOWN_REASONLOCAL_MEDIA_NOT_INSTALLED
)) ||
5954 (ppd
->offline_disabled_reason
==
5955 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE
)))
5956 ppd
->offline_disabled_reason
=
5958 OPA_LINKDOWN_REASONLOCAL_MEDIA_NOT_INSTALLED
);
5960 if (ppd
->host_link_state
== HLS_DN_POLL
) {
5962 * The link is still in POLL. This means
5963 * that the normal link down processing
5964 * will not happen. We have to do it here
5965 * before turning the DC off.
5967 queue_work(ppd
->hfi1_wq
, &ppd
->link_down_work
);
5970 spin_lock_irqsave(&ppd
->qsfp_info
.qsfp_lock
, flags
);
5971 ppd
->qsfp_info
.cache_valid
= 0;
5972 ppd
->qsfp_info
.cache_refresh_required
= 1;
5973 spin_unlock_irqrestore(&ppd
->qsfp_info
.qsfp_lock
,
5977 * Stop inversion of ModPresent pin to detect
5978 * removal of the cable
5980 qsfp_int_mgmt
&= ~(u64
)QSFP_HFI0_MODPRST_N
;
5981 write_csr(dd
, dd
->hfi1_id
? ASIC_QSFP2_INVERT
:
5982 ASIC_QSFP1_INVERT
, qsfp_int_mgmt
);
5984 ppd
->offline_disabled_reason
=
5985 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT
);
5989 if (reg
& QSFP_HFI0_INT_N
) {
5991 dd_dev_info(dd
, "%s: IntN triggered QSFP interrupt\n",
5993 spin_lock_irqsave(&ppd
->qsfp_info
.qsfp_lock
, flags
);
5994 ppd
->qsfp_info
.check_interrupt_flags
= 1;
5995 spin_unlock_irqrestore(&ppd
->qsfp_info
.qsfp_lock
, flags
);
5998 /* Schedule the QSFP work only if there is a cable attached. */
5999 if (qsfp_mod_present(ppd
))
6000 queue_work(ppd
->hfi1_wq
, &ppd
->qsfp_info
.qsfp_work
);
6003 static int request_host_lcb_access(struct hfi1_devdata
*dd
)
6007 ret
= do_8051_command(dd
, HCMD_MISC
,
6008 (u64
)HCMD_MISC_REQUEST_LCB_ACCESS
<< LOAD_DATA_FIELD_ID_SHIFT
,
6010 if (ret
!= HCMD_SUCCESS
) {
6011 dd_dev_err(dd
, "%s: command failed with error %d\n",
6014 return ret
== HCMD_SUCCESS
? 0 : -EBUSY
;
6017 static int request_8051_lcb_access(struct hfi1_devdata
*dd
)
6021 ret
= do_8051_command(dd
, HCMD_MISC
,
6022 (u64
)HCMD_MISC_GRANT_LCB_ACCESS
<< LOAD_DATA_FIELD_ID_SHIFT
,
6024 if (ret
!= HCMD_SUCCESS
) {
6025 dd_dev_err(dd
, "%s: command failed with error %d\n",
6028 return ret
== HCMD_SUCCESS
? 0 : -EBUSY
;
6032 * Set the LCB selector - allow host access. The DCC selector always
6033 * points to the host.
6035 static inline void set_host_lcb_access(struct hfi1_devdata
*dd
)
6037 write_csr(dd
, DC_DC8051_CFG_CSR_ACCESS_SEL
,
6038 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK
6039 | DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK
);
6043 * Clear the LCB selector - allow 8051 access. The DCC selector always
6044 * points to the host.
6046 static inline void set_8051_lcb_access(struct hfi1_devdata
*dd
)
6048 write_csr(dd
, DC_DC8051_CFG_CSR_ACCESS_SEL
,
6049 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK
);
6053 * Acquire LCB access from the 8051. If the host already has access,
6054 * just increment a counter. Otherwise, inform the 8051 that the
6055 * host is taking access.
6059 * -EBUSY if the 8051 has control and cannot be disturbed
6060 * -errno if unable to acquire access from the 8051
6062 int acquire_lcb_access(struct hfi1_devdata
*dd
, int sleep_ok
)
6064 struct hfi1_pportdata
*ppd
= dd
->pport
;
6068 * Use the host link state lock so the operation of this routine
6069 * { link state check, selector change, count increment } can occur
6070 * as a unit against a link state change. Otherwise there is a
6071 * race between the state change and the count increment.
6074 mutex_lock(&ppd
->hls_lock
);
6076 while (!mutex_trylock(&ppd
->hls_lock
))
6080 /* this access is valid only when the link is up */
6081 if ((ppd
->host_link_state
& HLS_UP
) == 0) {
6082 dd_dev_info(dd
, "%s: link state %s not up\n",
6083 __func__
, link_state_name(ppd
->host_link_state
));
6088 if (dd
->lcb_access_count
== 0) {
6089 ret
= request_host_lcb_access(dd
);
6092 "%s: unable to acquire LCB access, err %d\n",
6096 set_host_lcb_access(dd
);
6098 dd
->lcb_access_count
++;
6100 mutex_unlock(&ppd
->hls_lock
);
6105 * Release LCB access by decrementing the use count. If the count is moving
6106 * from 1 to 0, inform 8051 that it has control back.
6110 * -errno if unable to release access to the 8051
6112 int release_lcb_access(struct hfi1_devdata
*dd
, int sleep_ok
)
6117 * Use the host link state lock because the acquire needed it.
6118 * Here, we only need to keep { selector change, count decrement }
6122 mutex_lock(&dd
->pport
->hls_lock
);
6124 while (!mutex_trylock(&dd
->pport
->hls_lock
))
6128 if (dd
->lcb_access_count
== 0) {
6129 dd_dev_err(dd
, "%s: LCB access count is zero. Skipping.\n",
6134 if (dd
->lcb_access_count
== 1) {
6135 set_8051_lcb_access(dd
);
6136 ret
= request_8051_lcb_access(dd
);
6139 "%s: unable to release LCB access, err %d\n",
6141 /* restore host access if the grant didn't work */
6142 set_host_lcb_access(dd
);
6146 dd
->lcb_access_count
--;
6148 mutex_unlock(&dd
->pport
->hls_lock
);
6153 * Initialize LCB access variables and state. Called during driver load,
6154 * after most of the initialization is finished.
6156 * The DC default is LCB access on for the host. The driver defaults to
6157 * leaving access to the 8051. Assign access now - this constrains the call
6158 * to this routine to be after all LCB set-up is done. In particular, after
6159 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6161 static void init_lcb_access(struct hfi1_devdata
*dd
)
6163 dd
->lcb_access_count
= 0;
6167 * Write a response back to a 8051 request.
6169 static void hreq_response(struct hfi1_devdata
*dd
, u8 return_code
, u16 rsp_data
)
6171 write_csr(dd
, DC_DC8051_CFG_EXT_DEV_0
,
6172 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK
6173 | (u64
)return_code
<< DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT
6174 | (u64
)rsp_data
<< DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT
);
6178 * Handle host requests from the 8051.
6180 * This is a work-queue function outside of the interrupt.
6182 void handle_8051_request(struct work_struct
*work
)
6184 struct hfi1_pportdata
*ppd
= container_of(work
, struct hfi1_pportdata
,
6186 struct hfi1_devdata
*dd
= ppd
->dd
;
6189 u8 type
, i
, lanes
, *cache
= ppd
->qsfp_info
.cache
;
6190 u8 cdr_ctrl_byte
= cache
[QSFP_CDR_CTRL_BYTE_OFFS
];
6192 reg
= read_csr(dd
, DC_DC8051_CFG_EXT_DEV_1
);
6193 if ((reg
& DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK
) == 0)
6194 return; /* no request */
6196 /* zero out COMPLETED so the response is seen */
6197 write_csr(dd
, DC_DC8051_CFG_EXT_DEV_0
, 0);
6199 /* extract request details */
6200 type
= (reg
>> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT
)
6201 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK
;
6202 data
= (reg
>> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT
)
6203 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK
;
6206 case HREQ_LOAD_CONFIG
:
6207 case HREQ_SAVE_CONFIG
:
6208 case HREQ_READ_CONFIG
:
6209 case HREQ_SET_TX_EQ_ABS
:
6210 case HREQ_SET_TX_EQ_REL
:
6211 dd_dev_info(dd
, "8051 request: request 0x%x not supported\n",
6213 hreq_response(dd
, HREQ_NOT_SUPPORTED
, 0);
6218 for (i
= 0; lanes
; lanes
>>= 1, i
++) {
6223 if (cache
[QSFP_MOD_PWR_OFFS
] & 0x8 &&
6224 cache
[QSFP_CDR_INFO_OFFS
] & 0x80)
6225 cdr_ctrl_byte
|= (1 << (i
+ 4));
6227 /* disable TX CDR */
6228 if (cache
[QSFP_MOD_PWR_OFFS
] & 0x8 &&
6229 cache
[QSFP_CDR_INFO_OFFS
] & 0x80)
6230 cdr_ctrl_byte
&= ~(1 << (i
+ 4));
6235 if (cache
[QSFP_MOD_PWR_OFFS
] & 0x4 &&
6236 cache
[QSFP_CDR_INFO_OFFS
] & 0x40)
6237 cdr_ctrl_byte
|= (1 << i
);
6239 /* disable RX CDR */
6240 if (cache
[QSFP_MOD_PWR_OFFS
] & 0x4 &&
6241 cache
[QSFP_CDR_INFO_OFFS
] & 0x40)
6242 cdr_ctrl_byte
&= ~(1 << i
);
6245 qsfp_write(ppd
, ppd
->dd
->hfi1_id
, QSFP_CDR_CTRL_BYTE_OFFS
,
6247 hreq_response(dd
, HREQ_SUCCESS
, data
);
6248 refresh_qsfp_cache(ppd
, &ppd
->qsfp_info
);
6251 case HREQ_CONFIG_DONE
:
6252 hreq_response(dd
, HREQ_SUCCESS
, 0);
6255 case HREQ_INTERFACE_TEST
:
6256 hreq_response(dd
, HREQ_SUCCESS
, data
);
6260 dd_dev_err(dd
, "8051 request: unknown request 0x%x\n", type
);
6261 hreq_response(dd
, HREQ_NOT_SUPPORTED
, 0);
6266 static void write_global_credit(struct hfi1_devdata
*dd
,
6267 u8 vau
, u16 total
, u16 shared
)
6269 write_csr(dd
, SEND_CM_GLOBAL_CREDIT
,
6271 << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT
)
6273 << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT
)
6274 | ((u64
)vau
<< SEND_CM_GLOBAL_CREDIT_AU_SHIFT
));
6278 * Set up initial VL15 credits of the remote. Assumes the rest of
6279 * the CM credit registers are zero from a previous global or credit reset .
6281 void set_up_vl15(struct hfi1_devdata
*dd
, u8 vau
, u16 vl15buf
)
6283 /* leave shared count at zero for both global and VL15 */
6284 write_global_credit(dd
, vau
, vl15buf
, 0);
6286 /* We may need some credits for another VL when sending packets
6287 * with the snoop interface. Dividing it down the middle for VL15
6288 * and VL0 should suffice.
6290 if (unlikely(dd
->hfi1_snoop
.mode_flag
== HFI1_PORT_SNOOP_MODE
)) {
6291 write_csr(dd
, SEND_CM_CREDIT_VL15
, (u64
)(vl15buf
>> 1)
6292 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT
);
6293 write_csr(dd
, SEND_CM_CREDIT_VL
, (u64
)(vl15buf
>> 1)
6294 << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT
);
6296 write_csr(dd
, SEND_CM_CREDIT_VL15
, (u64
)vl15buf
6297 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT
);
6302 * Zero all credit details from the previous connection and
6303 * reset the CM manager's internal counters.
6305 void reset_link_credits(struct hfi1_devdata
*dd
)
6309 /* remove all previous VL credit limits */
6310 for (i
= 0; i
< TXE_NUM_DATA_VL
; i
++)
6311 write_csr(dd
, SEND_CM_CREDIT_VL
+ (8*i
), 0);
6312 write_csr(dd
, SEND_CM_CREDIT_VL15
, 0);
6313 write_global_credit(dd
, 0, 0, 0);
6314 /* reset the CM block */
6315 pio_send_control(dd
, PSC_CM_RESET
);
6318 /* convert a vCU to a CU */
6319 static u32
vcu_to_cu(u8 vcu
)
6324 /* convert a CU to a vCU */
6325 static u8
cu_to_vcu(u32 cu
)
6330 /* convert a vAU to an AU */
6331 static u32
vau_to_au(u8 vau
)
6333 return 8 * (1 << vau
);
6336 static void set_linkup_defaults(struct hfi1_pportdata
*ppd
)
6338 ppd
->sm_trap_qp
= 0x0;
6343 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6345 static void lcb_shutdown(struct hfi1_devdata
*dd
, int abort
)
6349 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6350 write_csr(dd
, DC_LCB_CFG_RUN
, 0);
6351 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6352 write_csr(dd
, DC_LCB_CFG_TX_FIFOS_RESET
,
6353 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT
);
6354 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6355 dd
->lcb_err_en
= read_csr(dd
, DC_LCB_ERR_EN
);
6356 reg
= read_csr(dd
, DCC_CFG_RESET
);
6357 write_csr(dd
, DCC_CFG_RESET
,
6359 | (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT
)
6360 | (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT
));
6361 (void) read_csr(dd
, DCC_CFG_RESET
); /* make sure the write completed */
6363 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6364 write_csr(dd
, DCC_CFG_RESET
, reg
);
6365 write_csr(dd
, DC_LCB_ERR_EN
, dd
->lcb_err_en
);
6370 * This routine should be called after the link has been transitioned to
6371 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6374 * The expectation is that the caller of this routine would have taken
6375 * care of properly transitioning the link into the correct state.
6377 static void dc_shutdown(struct hfi1_devdata
*dd
)
6379 unsigned long flags
;
6381 spin_lock_irqsave(&dd
->dc8051_lock
, flags
);
6382 if (dd
->dc_shutdown
) {
6383 spin_unlock_irqrestore(&dd
->dc8051_lock
, flags
);
6386 dd
->dc_shutdown
= 1;
6387 spin_unlock_irqrestore(&dd
->dc8051_lock
, flags
);
6388 /* Shutdown the LCB */
6389 lcb_shutdown(dd
, 1);
6390 /* Going to OFFLINE would have causes the 8051 to put the
6391 * SerDes into reset already. Just need to shut down the 8051,
6393 write_csr(dd
, DC_DC8051_CFG_RST
, 0x1);
6396 /* Calling this after the DC has been brought out of reset should not
6398 static void dc_start(struct hfi1_devdata
*dd
)
6400 unsigned long flags
;
6403 spin_lock_irqsave(&dd
->dc8051_lock
, flags
);
6404 if (!dd
->dc_shutdown
)
6406 spin_unlock_irqrestore(&dd
->dc8051_lock
, flags
);
6407 /* Take the 8051 out of reset */
6408 write_csr(dd
, DC_DC8051_CFG_RST
, 0ull);
6409 /* Wait until 8051 is ready */
6410 ret
= wait_fm_ready(dd
, TIMEOUT_8051_START
);
6412 dd_dev_err(dd
, "%s: timeout starting 8051 firmware\n",
6415 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6416 write_csr(dd
, DCC_CFG_RESET
, 0x10);
6417 /* lcb_shutdown() with abort=1 does not restore these */
6418 write_csr(dd
, DC_LCB_ERR_EN
, dd
->lcb_err_en
);
6419 spin_lock_irqsave(&dd
->dc8051_lock
, flags
);
6420 dd
->dc_shutdown
= 0;
6422 spin_unlock_irqrestore(&dd
->dc8051_lock
, flags
);
6426 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6428 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata
*dd
)
6430 u64 rx_radr
, tx_radr
;
6433 if (dd
->icode
!= ICODE_FPGA_EMULATION
)
6437 * These LCB defaults on emulator _s are good, nothing to do here:
6438 * LCB_CFG_TX_FIFOS_RADR
6439 * LCB_CFG_RX_FIFOS_RADR
6441 * LCB_CFG_IGNORE_LOST_RCLK
6443 if (is_emulator_s(dd
))
6445 /* else this is _p */
6447 version
= emulator_rev(dd
);
6449 version
= 0x2d; /* all B0 use 0x2d or higher settings */
6451 if (version
<= 0x12) {
6452 /* release 0x12 and below */
6455 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6456 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6457 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6460 0xaull
<< DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6461 | 0x9ull
<< DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6462 | 0x9ull
<< DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT
;
6464 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6465 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6467 tx_radr
= 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT
;
6468 } else if (version
<= 0x18) {
6469 /* release 0x13 up to 0x18 */
6470 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6472 0x9ull
<< DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6473 | 0x8ull
<< DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6474 | 0x8ull
<< DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT
;
6475 tx_radr
= 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT
;
6476 } else if (version
== 0x19) {
6478 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6480 0xAull
<< DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6481 | 0x9ull
<< DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6482 | 0x9ull
<< DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT
;
6483 tx_radr
= 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT
;
6484 } else if (version
== 0x1a) {
6486 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6488 0x9ull
<< DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6489 | 0x8ull
<< DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6490 | 0x8ull
<< DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT
;
6491 tx_radr
= 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT
;
6492 write_csr(dd
, DC_LCB_CFG_LN_DCLK
, 1ull);
6494 /* release 0x1b and higher */
6495 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6497 0x8ull
<< DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6498 | 0x7ull
<< DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6499 | 0x7ull
<< DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT
;
6500 tx_radr
= 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT
;
6503 write_csr(dd
, DC_LCB_CFG_RX_FIFOS_RADR
, rx_radr
);
6504 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6505 write_csr(dd
, DC_LCB_CFG_IGNORE_LOST_RCLK
,
6506 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK
);
6507 write_csr(dd
, DC_LCB_CFG_TX_FIFOS_RADR
, tx_radr
);
6511 * Handle a SMA idle message
6513 * This is a work-queue function outside of the interrupt.
6515 void handle_sma_message(struct work_struct
*work
)
6517 struct hfi1_pportdata
*ppd
= container_of(work
, struct hfi1_pportdata
,
6519 struct hfi1_devdata
*dd
= ppd
->dd
;
6523 /* msg is bytes 1-4 of the 40-bit idle message - the command code
6525 ret
= read_idle_sma(dd
, &msg
);
6528 dd_dev_info(dd
, "%s: SMA message 0x%llx\n", __func__
, msg
);
6530 * React to the SMA message. Byte[1] (0 for us) is the command.
6532 switch (msg
& 0xff) {
6535 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6538 * Only expected in INIT or ARMED, discard otherwise.
6540 if (ppd
->host_link_state
& (HLS_UP_INIT
| HLS_UP_ARMED
))
6541 ppd
->neighbor_normal
= 1;
6543 case SMA_IDLE_ACTIVE
:
6545 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6548 * Can activate the node. Discard otherwise.
6550 if (ppd
->host_link_state
== HLS_UP_ARMED
6551 && ppd
->is_active_optimize_enabled
) {
6552 ppd
->neighbor_normal
= 1;
6553 ret
= set_link_state(ppd
, HLS_UP_ACTIVE
);
6557 "%s: received Active SMA idle message, couldn't set link to Active\n",
6563 "%s: received unexpected SMA idle message 0x%llx\n",
6569 static void adjust_rcvctrl(struct hfi1_devdata
*dd
, u64 add
, u64 clear
)
6572 unsigned long flags
;
6574 spin_lock_irqsave(&dd
->rcvctrl_lock
, flags
);
6575 rcvctrl
= read_csr(dd
, RCV_CTRL
);
6578 write_csr(dd
, RCV_CTRL
, rcvctrl
);
6579 spin_unlock_irqrestore(&dd
->rcvctrl_lock
, flags
);
6582 static inline void add_rcvctrl(struct hfi1_devdata
*dd
, u64 add
)
6584 adjust_rcvctrl(dd
, add
, 0);
6587 static inline void clear_rcvctrl(struct hfi1_devdata
*dd
, u64 clear
)
6589 adjust_rcvctrl(dd
, 0, clear
);
6593 * Called from all interrupt handlers to start handling an SPC freeze.
6595 void start_freeze_handling(struct hfi1_pportdata
*ppd
, int flags
)
6597 struct hfi1_devdata
*dd
= ppd
->dd
;
6598 struct send_context
*sc
;
6601 if (flags
& FREEZE_SELF
)
6602 write_csr(dd
, CCE_CTRL
, CCE_CTRL_SPC_FREEZE_SMASK
);
6604 /* enter frozen mode */
6605 dd
->flags
|= HFI1_FROZEN
;
6607 /* notify all SDMA engines that they are going into a freeze */
6608 sdma_freeze_notify(dd
, !!(flags
& FREEZE_LINK_DOWN
));
6610 /* do halt pre-handling on all enabled send contexts */
6611 for (i
= 0; i
< dd
->num_send_contexts
; i
++) {
6612 sc
= dd
->send_contexts
[i
].sc
;
6613 if (sc
&& (sc
->flags
& SCF_ENABLED
))
6614 sc_stop(sc
, SCF_FROZEN
| SCF_HALTED
);
6617 /* Send context are frozen. Notify user space */
6618 hfi1_set_uevent_bits(ppd
, _HFI1_EVENT_FROZEN_BIT
);
6620 if (flags
& FREEZE_ABORT
) {
6622 "Aborted freeze recovery. Please REBOOT system\n");
6625 /* queue non-interrupt handler */
6626 queue_work(ppd
->hfi1_wq
, &ppd
->freeze_work
);
6630 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6631 * depending on the "freeze" parameter.
6633 * No need to return an error if it times out, our only option
6634 * is to proceed anyway.
6636 static void wait_for_freeze_status(struct hfi1_devdata
*dd
, int freeze
)
6638 unsigned long timeout
;
6641 timeout
= jiffies
+ msecs_to_jiffies(FREEZE_STATUS_TIMEOUT
);
6643 reg
= read_csr(dd
, CCE_STATUS
);
6645 /* waiting until all indicators are set */
6646 if ((reg
& ALL_FROZE
) == ALL_FROZE
)
6647 return; /* all done */
6649 /* waiting until all indicators are clear */
6650 if ((reg
& ALL_FROZE
) == 0)
6651 return; /* all done */
6654 if (time_after(jiffies
, timeout
)) {
6656 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6659 freeze
? ALL_FROZE
: 0ull);
6662 usleep_range(80, 120);
6667 * Do all freeze handling for the RXE block.
6669 static void rxe_freeze(struct hfi1_devdata
*dd
)
6674 clear_rcvctrl(dd
, RCV_CTRL_RCV_PORT_ENABLE_SMASK
);
6676 /* disable all receive contexts */
6677 for (i
= 0; i
< dd
->num_rcv_contexts
; i
++)
6678 hfi1_rcvctrl(dd
, HFI1_RCVCTRL_CTXT_DIS
, i
);
6682 * Unfreeze handling for the RXE block - kernel contexts only.
6683 * This will also enable the port. User contexts will do unfreeze
6684 * handling on a per-context basis as they call into the driver.
6687 static void rxe_kernel_unfreeze(struct hfi1_devdata
*dd
)
6692 /* enable all kernel contexts */
6693 for (i
= 0; i
< dd
->n_krcv_queues
; i
++) {
6694 rcvmask
= HFI1_RCVCTRL_CTXT_ENB
;
6695 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6696 rcvmask
|= HFI1_CAP_KGET_MASK(dd
->rcd
[i
]->flags
, DMA_RTAIL
) ?
6697 HFI1_RCVCTRL_TAILUPD_ENB
: HFI1_RCVCTRL_TAILUPD_DIS
;
6698 hfi1_rcvctrl(dd
, rcvmask
, i
);
6702 add_rcvctrl(dd
, RCV_CTRL_RCV_PORT_ENABLE_SMASK
);
6706 * Non-interrupt SPC freeze handling.
6708 * This is a work-queue function outside of the triggering interrupt.
6710 void handle_freeze(struct work_struct
*work
)
6712 struct hfi1_pportdata
*ppd
= container_of(work
, struct hfi1_pportdata
,
6714 struct hfi1_devdata
*dd
= ppd
->dd
;
6716 /* wait for freeze indicators on all affected blocks */
6717 wait_for_freeze_status(dd
, 1);
6719 /* SPC is now frozen */
6721 /* do send PIO freeze steps */
6724 /* do send DMA freeze steps */
6727 /* do send egress freeze steps - nothing to do */
6729 /* do receive freeze steps */
6733 * Unfreeze the hardware - clear the freeze, wait for each
6734 * block's frozen bit to clear, then clear the frozen flag.
6736 write_csr(dd
, CCE_CTRL
, CCE_CTRL_SPC_UNFREEZE_SMASK
);
6737 wait_for_freeze_status(dd
, 0);
6740 write_csr(dd
, CCE_CTRL
, CCE_CTRL_SPC_FREEZE_SMASK
);
6741 wait_for_freeze_status(dd
, 1);
6742 write_csr(dd
, CCE_CTRL
, CCE_CTRL_SPC_UNFREEZE_SMASK
);
6743 wait_for_freeze_status(dd
, 0);
6746 /* do send PIO unfreeze steps for kernel contexts */
6747 pio_kernel_unfreeze(dd
);
6749 /* do send DMA unfreeze steps */
6752 /* do send egress unfreeze steps - nothing to do */
6754 /* do receive unfreeze steps for kernel contexts */
6755 rxe_kernel_unfreeze(dd
);
6758 * The unfreeze procedure touches global device registers when
6759 * it disables and re-enables RXE. Mark the device unfrozen
6760 * after all that is done so other parts of the driver waiting
6761 * for the device to unfreeze don't do things out of order.
6763 * The above implies that the meaning of HFI1_FROZEN flag is
6764 * "Device has gone into freeze mode and freeze mode handling
6765 * is still in progress."
6767 * The flag will be removed when freeze mode processing has
6770 dd
->flags
&= ~HFI1_FROZEN
;
6771 wake_up(&dd
->event_queue
);
6773 /* no longer frozen */
6777 * Handle a link up interrupt from the 8051.
6779 * This is a work-queue function outside of the interrupt.
6781 void handle_link_up(struct work_struct
*work
)
6783 struct hfi1_pportdata
*ppd
= container_of(work
, struct hfi1_pportdata
,
6785 set_link_state(ppd
, HLS_UP_INIT
);
6787 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6788 read_ltp_rtt(ppd
->dd
);
6790 * OPA specifies that certain counters are cleared on a transition
6791 * to link up, so do that.
6793 clear_linkup_counters(ppd
->dd
);
6795 * And (re)set link up default values.
6797 set_linkup_defaults(ppd
);
6799 /* enforce link speed enabled */
6800 if ((ppd
->link_speed_active
& ppd
->link_speed_enabled
) == 0) {
6801 /* oops - current speed is not enabled, bounce */
6803 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6804 ppd
->link_speed_active
, ppd
->link_speed_enabled
);
6805 set_link_down_reason(ppd
, OPA_LINKDOWN_REASON_SPEED_POLICY
, 0,
6806 OPA_LINKDOWN_REASON_SPEED_POLICY
);
6807 set_link_state(ppd
, HLS_DN_OFFLINE
);
6813 /* Several pieces of LNI information were cached for SMA in ppd.
6814 * Reset these on link down */
6815 static void reset_neighbor_info(struct hfi1_pportdata
*ppd
)
6817 ppd
->neighbor_guid
= 0;
6818 ppd
->neighbor_port_number
= 0;
6819 ppd
->neighbor_type
= 0;
6820 ppd
->neighbor_fm_security
= 0;
6824 * Handle a link down interrupt from the 8051.
6826 * This is a work-queue function outside of the interrupt.
6828 void handle_link_down(struct work_struct
*work
)
6830 u8 lcl_reason
, neigh_reason
= 0;
6831 struct hfi1_pportdata
*ppd
= container_of(work
, struct hfi1_pportdata
,
6834 if ((ppd
->host_link_state
&
6835 (HLS_DN_POLL
| HLS_VERIFY_CAP
| HLS_GOING_UP
)) &&
6836 ppd
->port_type
== PORT_TYPE_FIXED
)
6837 ppd
->offline_disabled_reason
=
6838 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED
);
6840 /* Go offline first, then deal with reading/writing through 8051 */
6841 set_link_state(ppd
, HLS_DN_OFFLINE
);
6844 read_planned_down_reason_code(ppd
->dd
, &neigh_reason
);
6847 * If no reason, assume peer-initiated but missed
6848 * LinkGoingDown idle flits.
6850 if (neigh_reason
== 0)
6851 lcl_reason
= OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN
;
6853 set_link_down_reason(ppd
, lcl_reason
, neigh_reason
, 0);
6855 reset_neighbor_info(ppd
);
6857 /* disable the port */
6858 clear_rcvctrl(ppd
->dd
, RCV_CTRL_RCV_PORT_ENABLE_SMASK
);
6860 /* If there is no cable attached, turn the DC off. Otherwise,
6861 * start the link bring up. */
6862 if (!qsfp_mod_present(ppd
)) {
6863 dc_shutdown(ppd
->dd
);
6870 void handle_link_bounce(struct work_struct
*work
)
6872 struct hfi1_pportdata
*ppd
= container_of(work
, struct hfi1_pportdata
,
6876 * Only do something if the link is currently up.
6878 if (ppd
->host_link_state
& HLS_UP
) {
6879 set_link_state(ppd
, HLS_DN_OFFLINE
);
6883 dd_dev_info(ppd
->dd
, "%s: link not up (%s), nothing to do\n",
6884 __func__
, link_state_name(ppd
->host_link_state
));
6889 * Mask conversion: Capability exchange to Port LTP. The capability
6890 * exchange has an implicit 16b CRC that is mandatory.
6892 static int cap_to_port_ltp(int cap
)
6894 int port_ltp
= PORT_LTP_CRC_MODE_16
; /* this mode is mandatory */
6896 if (cap
& CAP_CRC_14B
)
6897 port_ltp
|= PORT_LTP_CRC_MODE_14
;
6898 if (cap
& CAP_CRC_48B
)
6899 port_ltp
|= PORT_LTP_CRC_MODE_48
;
6900 if (cap
& CAP_CRC_12B_16B_PER_LANE
)
6901 port_ltp
|= PORT_LTP_CRC_MODE_PER_LANE
;
6907 * Convert an OPA Port LTP mask to capability mask
6909 int port_ltp_to_cap(int port_ltp
)
6913 if (port_ltp
& PORT_LTP_CRC_MODE_14
)
6914 cap_mask
|= CAP_CRC_14B
;
6915 if (port_ltp
& PORT_LTP_CRC_MODE_48
)
6916 cap_mask
|= CAP_CRC_48B
;
6917 if (port_ltp
& PORT_LTP_CRC_MODE_PER_LANE
)
6918 cap_mask
|= CAP_CRC_12B_16B_PER_LANE
;
6924 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
6926 static int lcb_to_port_ltp(int lcb_crc
)
6930 if (lcb_crc
== LCB_CRC_12B_16B_PER_LANE
)
6931 port_ltp
= PORT_LTP_CRC_MODE_PER_LANE
;
6932 else if (lcb_crc
== LCB_CRC_48B
)
6933 port_ltp
= PORT_LTP_CRC_MODE_48
;
6934 else if (lcb_crc
== LCB_CRC_14B
)
6935 port_ltp
= PORT_LTP_CRC_MODE_14
;
6937 port_ltp
= PORT_LTP_CRC_MODE_16
;
6943 * Our neighbor has indicated that we are allowed to act as a fabric
6944 * manager, so place the full management partition key in the second
6945 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
6946 * that we should already have the limited management partition key in
6947 * array element 1, and also that the port is not yet up when
6948 * add_full_mgmt_pkey() is invoked.
6950 static void add_full_mgmt_pkey(struct hfi1_pportdata
*ppd
)
6952 struct hfi1_devdata
*dd
= ppd
->dd
;
6954 /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
6955 if (!((ppd
->pkeys
[2] == 0) || (ppd
->pkeys
[2] == FULL_MGMT_P_KEY
)))
6956 dd_dev_warn(dd
, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
6957 __func__
, ppd
->pkeys
[2], FULL_MGMT_P_KEY
);
6958 ppd
->pkeys
[2] = FULL_MGMT_P_KEY
;
6959 (void)hfi1_set_ib_cfg(ppd
, HFI1_IB_CFG_PKEYS
, 0);
6963 * Convert the given link width to the OPA link width bitmask.
6965 static u16
link_width_to_bits(struct hfi1_devdata
*dd
, u16 width
)
6970 * Simulator and quick linkup do not set the width.
6971 * Just set it to 4x without complaint.
6973 if (dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
|| quick_linkup
)
6974 return OPA_LINK_WIDTH_4X
;
6975 return 0; /* no lanes up */
6976 case 1: return OPA_LINK_WIDTH_1X
;
6977 case 2: return OPA_LINK_WIDTH_2X
;
6978 case 3: return OPA_LINK_WIDTH_3X
;
6980 dd_dev_info(dd
, "%s: invalid width %d, using 4\n",
6983 case 4: return OPA_LINK_WIDTH_4X
;
6988 * Do a population count on the bottom nibble.
6990 static const u8 bit_counts
[16] = {
6991 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
6993 static inline u8
nibble_to_count(u8 nibble
)
6995 return bit_counts
[nibble
& 0xf];
6999 * Read the active lane information from the 8051 registers and return
7002 * Active lane information is found in these 8051 registers:
7006 static void get_link_widths(struct hfi1_devdata
*dd
, u16
*tx_width
,
7012 u8 tx_polarity_inversion
;
7013 u8 rx_polarity_inversion
;
7016 /* read the active lanes */
7017 read_tx_settings(dd
, &enable_lane_tx
, &tx_polarity_inversion
,
7018 &rx_polarity_inversion
, &max_rate
);
7019 read_local_lni(dd
, &enable_lane_rx
);
7021 /* convert to counts */
7022 tx
= nibble_to_count(enable_lane_tx
);
7023 rx
= nibble_to_count(enable_lane_rx
);
7026 * Set link_speed_active here, overriding what was set in
7027 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7028 * set the max_rate field in handle_verify_cap until v0.19.
7030 if ((dd
->icode
== ICODE_RTL_SILICON
)
7031 && (dd
->dc8051_ver
< dc8051_ver(0, 19))) {
7032 /* max_rate: 0 = 12.5G, 1 = 25G */
7035 dd
->pport
[0].link_speed_active
= OPA_LINK_SPEED_12_5G
;
7039 "%s: unexpected max rate %d, using 25Gb\n",
7040 __func__
, (int)max_rate
);
7043 dd
->pport
[0].link_speed_active
= OPA_LINK_SPEED_25G
;
7049 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7050 enable_lane_tx
, tx
, enable_lane_rx
, rx
);
7051 *tx_width
= link_width_to_bits(dd
, tx
);
7052 *rx_width
= link_width_to_bits(dd
, rx
);
7056 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7057 * Valid after the end of VerifyCap and during LinkUp. Does not change
7058 * after link up. I.e. look elsewhere for downgrade information.
7061 * + bits [7:4] contain the number of active transmitters
7062 * + bits [3:0] contain the number of active receivers
7063 * These are numbers 1 through 4 and can be different values if the
7064 * link is asymmetric.
7066 * verify_cap_local_fm_link_width[0] retains its original value.
7068 static void get_linkup_widths(struct hfi1_devdata
*dd
, u16
*tx_width
,
7072 u8 misc_bits
, local_flags
;
7073 u16 active_tx
, active_rx
;
7075 read_vc_local_link_width(dd
, &misc_bits
, &local_flags
, &widths
);
7077 rx
= (widths
>> 8) & 0xf;
7079 *tx_width
= link_width_to_bits(dd
, tx
);
7080 *rx_width
= link_width_to_bits(dd
, rx
);
7082 /* print the active widths */
7083 get_link_widths(dd
, &active_tx
, &active_rx
);
7087 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7088 * hardware information when the link first comes up.
7090 * The link width is not available until after VerifyCap.AllFramesReceived
7091 * (the trigger for handle_verify_cap), so this is outside that routine
7092 * and should be called when the 8051 signals linkup.
7094 void get_linkup_link_widths(struct hfi1_pportdata
*ppd
)
7096 u16 tx_width
, rx_width
;
7098 /* get end-of-LNI link widths */
7099 get_linkup_widths(ppd
->dd
, &tx_width
, &rx_width
);
7101 /* use tx_width as the link is supposed to be symmetric on link up */
7102 ppd
->link_width_active
= tx_width
;
7103 /* link width downgrade active (LWD.A) starts out matching LW.A */
7104 ppd
->link_width_downgrade_tx_active
= ppd
->link_width_active
;
7105 ppd
->link_width_downgrade_rx_active
= ppd
->link_width_active
;
7106 /* per OPA spec, on link up LWD.E resets to LWD.S */
7107 ppd
->link_width_downgrade_enabled
= ppd
->link_width_downgrade_supported
;
7108 /* cache the active egress rate (units {10^6 bits/sec]) */
7109 ppd
->current_egress_rate
= active_egress_rate(ppd
);
7113 * Handle a verify capabilities interrupt from the 8051.
7115 * This is a work-queue function outside of the interrupt.
7117 void handle_verify_cap(struct work_struct
*work
)
7119 struct hfi1_pportdata
*ppd
= container_of(work
, struct hfi1_pportdata
,
7121 struct hfi1_devdata
*dd
= ppd
->dd
;
7123 u8 power_management
;
7133 u16 active_tx
, active_rx
;
7134 u8 partner_supported_crc
;
7138 set_link_state(ppd
, HLS_VERIFY_CAP
);
7140 lcb_shutdown(dd
, 0);
7141 adjust_lcb_for_fpga_serdes(dd
);
7144 * These are now valid:
7145 * remote VerifyCap fields in the general LNI config
7146 * CSR DC8051_STS_REMOTE_GUID
7147 * CSR DC8051_STS_REMOTE_NODE_TYPE
7148 * CSR DC8051_STS_REMOTE_FM_SECURITY
7149 * CSR DC8051_STS_REMOTE_PORT_NO
7152 read_vc_remote_phy(dd
, &power_management
, &continious
);
7153 read_vc_remote_fabric(
7159 &partner_supported_crc
);
7160 read_vc_remote_link_width(dd
, &remote_tx_rate
, &link_widths
);
7161 read_remote_device_id(dd
, &device_id
, &device_rev
);
7163 * And the 'MgmtAllowed' information, which is exchanged during
7164 * LNI, is also be available at this point.
7166 read_mgmt_allowed(dd
, &ppd
->mgmt_allowed
);
7167 /* print the active widths */
7168 get_link_widths(dd
, &active_tx
, &active_rx
);
7170 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7171 (int)power_management
, (int)continious
);
7173 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7178 (int)partner_supported_crc
);
7179 dd_dev_info(dd
, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7180 (u32
)remote_tx_rate
, (u32
)link_widths
);
7181 dd_dev_info(dd
, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7182 (u32
)device_id
, (u32
)device_rev
);
7184 * The peer vAU value just read is the peer receiver value. HFI does
7185 * not support a transmit vAU of 0 (AU == 8). We advertised that
7186 * with Z=1 in the fabric capabilities sent to the peer. The peer
7187 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7188 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7189 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7190 * subject to the Z value exception.
7194 set_up_vl15(dd
, vau
, vl15buf
);
7196 /* set up the LCB CRC mode */
7197 crc_mask
= ppd
->port_crc_mode_enabled
& partner_supported_crc
;
7199 /* order is important: use the lowest bit in common */
7200 if (crc_mask
& CAP_CRC_14B
)
7201 crc_val
= LCB_CRC_14B
;
7202 else if (crc_mask
& CAP_CRC_48B
)
7203 crc_val
= LCB_CRC_48B
;
7204 else if (crc_mask
& CAP_CRC_12B_16B_PER_LANE
)
7205 crc_val
= LCB_CRC_12B_16B_PER_LANE
;
7207 crc_val
= LCB_CRC_16B
;
7209 dd_dev_info(dd
, "Final LCB CRC mode: %d\n", (int)crc_val
);
7210 write_csr(dd
, DC_LCB_CFG_CRC_MODE
,
7211 (u64
)crc_val
<< DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT
);
7213 /* set (14b only) or clear sideband credit */
7214 reg
= read_csr(dd
, SEND_CM_CTRL
);
7215 if (crc_val
== LCB_CRC_14B
&& crc_14b_sideband
) {
7216 write_csr(dd
, SEND_CM_CTRL
,
7217 reg
| SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK
);
7219 write_csr(dd
, SEND_CM_CTRL
,
7220 reg
& ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK
);
7223 ppd
->link_speed_active
= 0; /* invalid value */
7224 if (dd
->dc8051_ver
< dc8051_ver(0, 20)) {
7225 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7226 switch (remote_tx_rate
) {
7228 ppd
->link_speed_active
= OPA_LINK_SPEED_12_5G
;
7231 ppd
->link_speed_active
= OPA_LINK_SPEED_25G
;
7235 /* actual rate is highest bit of the ANDed rates */
7236 u8 rate
= remote_tx_rate
& ppd
->local_tx_rate
;
7239 ppd
->link_speed_active
= OPA_LINK_SPEED_25G
;
7241 ppd
->link_speed_active
= OPA_LINK_SPEED_12_5G
;
7243 if (ppd
->link_speed_active
== 0) {
7244 dd_dev_err(dd
, "%s: unexpected remote tx rate %d, using 25Gb\n",
7245 __func__
, (int)remote_tx_rate
);
7246 ppd
->link_speed_active
= OPA_LINK_SPEED_25G
;
7250 * Cache the values of the supported, enabled, and active
7251 * LTP CRC modes to return in 'portinfo' queries. But the bit
7252 * flags that are returned in the portinfo query differ from
7253 * what's in the link_crc_mask, crc_sizes, and crc_val
7254 * variables. Convert these here.
7256 ppd
->port_ltp_crc_mode
= cap_to_port_ltp(link_crc_mask
) << 8;
7257 /* supported crc modes */
7258 ppd
->port_ltp_crc_mode
|=
7259 cap_to_port_ltp(ppd
->port_crc_mode_enabled
) << 4;
7260 /* enabled crc modes */
7261 ppd
->port_ltp_crc_mode
|= lcb_to_port_ltp(crc_val
);
7262 /* active crc mode */
7264 /* set up the remote credit return table */
7265 assign_remote_cm_au_table(dd
, vcu
);
7268 * The LCB is reset on entry to handle_verify_cap(), so this must
7269 * be applied on every link up.
7271 * Adjust LCB error kill enable to kill the link if
7272 * these RBUF errors are seen:
7273 * REPLAY_BUF_MBE_SMASK
7274 * FLIT_INPUT_BUF_MBE_SMASK
7276 if (is_ax(dd
)) { /* fixed in B0 */
7277 reg
= read_csr(dd
, DC_LCB_CFG_LINK_KILL_EN
);
7278 reg
|= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7279 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK
;
7280 write_csr(dd
, DC_LCB_CFG_LINK_KILL_EN
, reg
);
7283 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7284 write_csr(dd
, DC_LCB_CFG_TX_FIFOS_RESET
, 0);
7286 /* give 8051 access to the LCB CSRs */
7287 write_csr(dd
, DC_LCB_ERR_EN
, 0); /* mask LCB errors */
7288 set_8051_lcb_access(dd
);
7290 ppd
->neighbor_guid
=
7291 read_csr(dd
, DC_DC8051_STS_REMOTE_GUID
);
7292 ppd
->neighbor_port_number
= read_csr(dd
, DC_DC8051_STS_REMOTE_PORT_NO
) &
7293 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK
;
7294 ppd
->neighbor_type
=
7295 read_csr(dd
, DC_DC8051_STS_REMOTE_NODE_TYPE
) &
7296 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK
;
7297 ppd
->neighbor_fm_security
=
7298 read_csr(dd
, DC_DC8051_STS_REMOTE_FM_SECURITY
) &
7299 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK
;
7301 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7302 ppd
->neighbor_guid
, ppd
->neighbor_type
,
7303 ppd
->mgmt_allowed
, ppd
->neighbor_fm_security
);
7304 if (ppd
->mgmt_allowed
)
7305 add_full_mgmt_pkey(ppd
);
7307 /* tell the 8051 to go to LinkUp */
7308 set_link_state(ppd
, HLS_GOING_UP
);
7312 * Apply the link width downgrade enabled policy against the current active
7315 * Called when the enabled policy changes or the active link widths change.
7317 void apply_link_downgrade_policy(struct hfi1_pportdata
*ppd
, int refresh_widths
)
7324 /* use the hls lock to avoid a race with actual link up */
7327 mutex_lock(&ppd
->hls_lock
);
7328 /* only apply if the link is up */
7329 if (!(ppd
->host_link_state
& HLS_UP
)) {
7330 /* still going up..wait and retry */
7331 if (ppd
->host_link_state
& HLS_GOING_UP
) {
7332 if (++tries
< 1000) {
7333 mutex_unlock(&ppd
->hls_lock
);
7334 usleep_range(100, 120); /* arbitrary */
7338 "%s: giving up waiting for link state change\n",
7344 lwde
= ppd
->link_width_downgrade_enabled
;
7346 if (refresh_widths
) {
7347 get_link_widths(ppd
->dd
, &tx
, &rx
);
7348 ppd
->link_width_downgrade_tx_active
= tx
;
7349 ppd
->link_width_downgrade_rx_active
= rx
;
7353 /* downgrade is disabled */
7355 /* bounce if not at starting active width */
7356 if ((ppd
->link_width_active
!=
7357 ppd
->link_width_downgrade_tx_active
)
7358 || (ppd
->link_width_active
!=
7359 ppd
->link_width_downgrade_rx_active
)) {
7361 "Link downgrade is disabled and link has downgraded, downing link\n");
7363 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7364 ppd
->link_width_active
,
7365 ppd
->link_width_downgrade_tx_active
,
7366 ppd
->link_width_downgrade_rx_active
);
7369 } else if ((lwde
& ppd
->link_width_downgrade_tx_active
) == 0
7370 || (lwde
& ppd
->link_width_downgrade_rx_active
) == 0) {
7371 /* Tx or Rx is outside the enabled policy */
7373 "Link is outside of downgrade allowed, downing link\n");
7375 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7377 ppd
->link_width_downgrade_tx_active
,
7378 ppd
->link_width_downgrade_rx_active
);
7383 mutex_unlock(&ppd
->hls_lock
);
7386 set_link_down_reason(ppd
, OPA_LINKDOWN_REASON_WIDTH_POLICY
, 0,
7387 OPA_LINKDOWN_REASON_WIDTH_POLICY
);
7388 set_link_state(ppd
, HLS_DN_OFFLINE
);
7395 * Handle a link downgrade interrupt from the 8051.
7397 * This is a work-queue function outside of the interrupt.
7399 void handle_link_downgrade(struct work_struct
*work
)
7401 struct hfi1_pportdata
*ppd
= container_of(work
, struct hfi1_pportdata
,
7402 link_downgrade_work
);
7404 dd_dev_info(ppd
->dd
, "8051: Link width downgrade\n");
7405 apply_link_downgrade_policy(ppd
, 1);
7408 static char *dcc_err_string(char *buf
, int buf_len
, u64 flags
)
7410 return flag_string(buf
, buf_len
, flags
, dcc_err_flags
,
7411 ARRAY_SIZE(dcc_err_flags
));
7414 static char *lcb_err_string(char *buf
, int buf_len
, u64 flags
)
7416 return flag_string(buf
, buf_len
, flags
, lcb_err_flags
,
7417 ARRAY_SIZE(lcb_err_flags
));
7420 static char *dc8051_err_string(char *buf
, int buf_len
, u64 flags
)
7422 return flag_string(buf
, buf_len
, flags
, dc8051_err_flags
,
7423 ARRAY_SIZE(dc8051_err_flags
));
7426 static char *dc8051_info_err_string(char *buf
, int buf_len
, u64 flags
)
7428 return flag_string(buf
, buf_len
, flags
, dc8051_info_err_flags
,
7429 ARRAY_SIZE(dc8051_info_err_flags
));
7432 static char *dc8051_info_host_msg_string(char *buf
, int buf_len
, u64 flags
)
7434 return flag_string(buf
, buf_len
, flags
, dc8051_info_host_msg_flags
,
7435 ARRAY_SIZE(dc8051_info_host_msg_flags
));
7438 static void handle_8051_interrupt(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
7440 struct hfi1_pportdata
*ppd
= dd
->pport
;
7441 u64 info
, err
, host_msg
;
7442 int queue_link_down
= 0;
7445 /* look at the flags */
7446 if (reg
& DC_DC8051_ERR_FLG_SET_BY_8051_SMASK
) {
7447 /* 8051 information set by firmware */
7448 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7449 info
= read_csr(dd
, DC_DC8051_DBG_ERR_INFO_SET_BY_8051
);
7450 err
= (info
>> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT
)
7451 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK
;
7453 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT
)
7454 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK
;
7457 * Handle error flags.
7459 if (err
& FAILED_LNI
) {
7461 * LNI error indications are cleared by the 8051
7462 * only when starting polling. Only pay attention
7463 * to them when in the states that occur during
7466 if (ppd
->host_link_state
7467 & (HLS_DN_POLL
| HLS_VERIFY_CAP
| HLS_GOING_UP
)) {
7468 queue_link_down
= 1;
7469 dd_dev_info(dd
, "Link error: %s\n",
7470 dc8051_info_err_string(buf
,
7474 err
&= ~(u64
)FAILED_LNI
;
7476 /* unknown frames can happen durning LNI, just count */
7477 if (err
& UNKNOWN_FRAME
) {
7478 ppd
->unknown_frame_count
++;
7479 err
&= ~(u64
)UNKNOWN_FRAME
;
7482 /* report remaining errors, but do not do anything */
7483 dd_dev_err(dd
, "8051 info error: %s\n",
7484 dc8051_info_err_string(buf
, sizeof(buf
), err
));
7488 * Handle host message flags.
7490 if (host_msg
& HOST_REQ_DONE
) {
7492 * Presently, the driver does a busy wait for
7493 * host requests to complete. This is only an
7494 * informational message.
7495 * NOTE: The 8051 clears the host message
7496 * information *on the next 8051 command*.
7497 * Therefore, when linkup is achieved,
7498 * this flag will still be set.
7500 host_msg
&= ~(u64
)HOST_REQ_DONE
;
7502 if (host_msg
& BC_SMA_MSG
) {
7503 queue_work(ppd
->hfi1_wq
, &ppd
->sma_message_work
);
7504 host_msg
&= ~(u64
)BC_SMA_MSG
;
7506 if (host_msg
& LINKUP_ACHIEVED
) {
7507 dd_dev_info(dd
, "8051: Link up\n");
7508 queue_work(ppd
->hfi1_wq
, &ppd
->link_up_work
);
7509 host_msg
&= ~(u64
)LINKUP_ACHIEVED
;
7511 if (host_msg
& EXT_DEVICE_CFG_REQ
) {
7512 queue_work(ppd
->hfi1_wq
, &ppd
->dc_host_req_work
);
7513 host_msg
&= ~(u64
)EXT_DEVICE_CFG_REQ
;
7515 if (host_msg
& VERIFY_CAP_FRAME
) {
7516 queue_work(ppd
->hfi1_wq
, &ppd
->link_vc_work
);
7517 host_msg
&= ~(u64
)VERIFY_CAP_FRAME
;
7519 if (host_msg
& LINK_GOING_DOWN
) {
7520 const char *extra
= "";
7521 /* no downgrade action needed if going down */
7522 if (host_msg
& LINK_WIDTH_DOWNGRADED
) {
7523 host_msg
&= ~(u64
)LINK_WIDTH_DOWNGRADED
;
7524 extra
= " (ignoring downgrade)";
7526 dd_dev_info(dd
, "8051: Link down%s\n", extra
);
7527 queue_link_down
= 1;
7528 host_msg
&= ~(u64
)LINK_GOING_DOWN
;
7530 if (host_msg
& LINK_WIDTH_DOWNGRADED
) {
7531 queue_work(ppd
->hfi1_wq
, &ppd
->link_downgrade_work
);
7532 host_msg
&= ~(u64
)LINK_WIDTH_DOWNGRADED
;
7535 /* report remaining messages, but do not do anything */
7536 dd_dev_info(dd
, "8051 info host message: %s\n",
7537 dc8051_info_host_msg_string(buf
, sizeof(buf
),
7541 reg
&= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK
;
7543 if (reg
& DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK
) {
7545 * Lost the 8051 heartbeat. If this happens, we
7546 * receive constant interrupts about it. Disable
7547 * the interrupt after the first.
7549 dd_dev_err(dd
, "Lost 8051 heartbeat\n");
7550 write_csr(dd
, DC_DC8051_ERR_EN
,
7551 read_csr(dd
, DC_DC8051_ERR_EN
)
7552 & ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK
);
7554 reg
&= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK
;
7557 /* report the error, but do not do anything */
7558 dd_dev_err(dd
, "8051 error: %s\n",
7559 dc8051_err_string(buf
, sizeof(buf
), reg
));
7562 if (queue_link_down
) {
7563 /* if the link is already going down or disabled, do not
7565 if ((ppd
->host_link_state
7566 & (HLS_GOING_OFFLINE
|HLS_LINK_COOLDOWN
))
7567 || ppd
->link_enabled
== 0) {
7568 dd_dev_info(dd
, "%s: not queuing link down\n",
7571 queue_work(ppd
->hfi1_wq
, &ppd
->link_down_work
);
7576 static const char * const fm_config_txt
[] = {
7578 "BadHeadDist: Distance violation between two head flits",
7580 "BadTailDist: Distance violation between two tail flits",
7582 "BadCtrlDist: Distance violation between two credit control flits",
7584 "BadCrdAck: Credits return for unsupported VL",
7586 "UnsupportedVLMarker: Received VL Marker",
7588 "BadPreempt: Exceeded the preemption nesting level",
7590 "BadControlFlit: Received unsupported control flit",
7593 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7596 static const char * const port_rcv_txt
[] = {
7598 "BadPktLen: Illegal PktLen",
7600 "PktLenTooLong: Packet longer than PktLen",
7602 "PktLenTooShort: Packet shorter than PktLen",
7604 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7606 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7608 "BadL2: Illegal L2 opcode",
7610 "BadSC: Unsupported SC",
7612 "BadRC: Illegal RC",
7614 "PreemptError: Preempting with same VL",
7616 "PreemptVL15: Preempting a VL15 packet",
7619 #define OPA_LDR_FMCONFIG_OFFSET 16
7620 #define OPA_LDR_PORTRCV_OFFSET 0
7621 static void handle_dcc_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
7623 u64 info
, hdr0
, hdr1
;
7626 struct hfi1_pportdata
*ppd
= dd
->pport
;
7630 if (reg
& DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK
) {
7631 if (!(dd
->err_info_uncorrectable
& OPA_EI_STATUS_SMASK
)) {
7632 info
= read_csr(dd
, DCC_ERR_INFO_UNCORRECTABLE
);
7633 dd
->err_info_uncorrectable
= info
& OPA_EI_CODE_SMASK
;
7634 /* set status bit */
7635 dd
->err_info_uncorrectable
|= OPA_EI_STATUS_SMASK
;
7637 reg
&= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK
;
7640 if (reg
& DCC_ERR_FLG_LINK_ERR_SMASK
) {
7641 struct hfi1_pportdata
*ppd
= dd
->pport
;
7642 /* this counter saturates at (2^32) - 1 */
7643 if (ppd
->link_downed
< (u32
)UINT_MAX
)
7645 reg
&= ~DCC_ERR_FLG_LINK_ERR_SMASK
;
7648 if (reg
& DCC_ERR_FLG_FMCONFIG_ERR_SMASK
) {
7649 u8 reason_valid
= 1;
7651 info
= read_csr(dd
, DCC_ERR_INFO_FMCONFIG
);
7652 if (!(dd
->err_info_fmconfig
& OPA_EI_STATUS_SMASK
)) {
7653 dd
->err_info_fmconfig
= info
& OPA_EI_CODE_SMASK
;
7654 /* set status bit */
7655 dd
->err_info_fmconfig
|= OPA_EI_STATUS_SMASK
;
7665 extra
= fm_config_txt
[info
];
7668 extra
= fm_config_txt
[info
];
7669 if (ppd
->port_error_action
&
7670 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER
) {
7673 * lcl_reason cannot be derived from info
7677 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER
;
7682 snprintf(buf
, sizeof(buf
), "reserved%lld", info
);
7687 if (reason_valid
&& !do_bounce
) {
7688 do_bounce
= ppd
->port_error_action
&
7689 (1 << (OPA_LDR_FMCONFIG_OFFSET
+ info
));
7690 lcl_reason
= info
+ OPA_LINKDOWN_REASON_BAD_HEAD_DIST
;
7693 /* just report this */
7694 dd_dev_info(dd
, "DCC Error: fmconfig error: %s\n", extra
);
7695 reg
&= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK
;
7698 if (reg
& DCC_ERR_FLG_RCVPORT_ERR_SMASK
) {
7699 u8 reason_valid
= 1;
7701 info
= read_csr(dd
, DCC_ERR_INFO_PORTRCV
);
7702 hdr0
= read_csr(dd
, DCC_ERR_INFO_PORTRCV_HDR0
);
7703 hdr1
= read_csr(dd
, DCC_ERR_INFO_PORTRCV_HDR1
);
7704 if (!(dd
->err_info_rcvport
.status_and_code
&
7705 OPA_EI_STATUS_SMASK
)) {
7706 dd
->err_info_rcvport
.status_and_code
=
7707 info
& OPA_EI_CODE_SMASK
;
7708 /* set status bit */
7709 dd
->err_info_rcvport
.status_and_code
|=
7710 OPA_EI_STATUS_SMASK
;
7711 /* save first 2 flits in the packet that caused
7713 dd
->err_info_rcvport
.packet_flit1
= hdr0
;
7714 dd
->err_info_rcvport
.packet_flit2
= hdr1
;
7727 extra
= port_rcv_txt
[info
];
7731 snprintf(buf
, sizeof(buf
), "reserved%lld", info
);
7736 if (reason_valid
&& !do_bounce
) {
7737 do_bounce
= ppd
->port_error_action
&
7738 (1 << (OPA_LDR_PORTRCV_OFFSET
+ info
));
7739 lcl_reason
= info
+ OPA_LINKDOWN_REASON_RCV_ERROR_0
;
7742 /* just report this */
7743 dd_dev_info(dd
, "DCC Error: PortRcv error: %s\n", extra
);
7744 dd_dev_info(dd
, " hdr0 0x%llx, hdr1 0x%llx\n",
7747 reg
&= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK
;
7750 if (reg
& DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK
) {
7751 /* informative only */
7752 dd_dev_info(dd
, "8051 access to LCB blocked\n");
7753 reg
&= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK
;
7755 if (reg
& DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK
) {
7756 /* informative only */
7757 dd_dev_info(dd
, "host access to LCB blocked\n");
7758 reg
&= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK
;
7761 /* report any remaining errors */
7763 dd_dev_info(dd
, "DCC Error: %s\n",
7764 dcc_err_string(buf
, sizeof(buf
), reg
));
7766 if (lcl_reason
== 0)
7767 lcl_reason
= OPA_LINKDOWN_REASON_UNKNOWN
;
7770 dd_dev_info(dd
, "%s: PortErrorAction bounce\n", __func__
);
7771 set_link_down_reason(ppd
, lcl_reason
, 0, lcl_reason
);
7772 queue_work(ppd
->hfi1_wq
, &ppd
->link_bounce_work
);
7776 static void handle_lcb_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
7780 dd_dev_info(dd
, "LCB Error: %s\n",
7781 lcb_err_string(buf
, sizeof(buf
), reg
));
7785 * CCE block DC interrupt. Source is < 8.
7787 static void is_dc_int(struct hfi1_devdata
*dd
, unsigned int source
)
7789 const struct err_reg_info
*eri
= &dc_errs
[source
];
7792 interrupt_clear_down(dd
, 0, eri
);
7793 } else if (source
== 3 /* dc_lbm_int */) {
7795 * This indicates that a parity error has occurred on the
7796 * address/control lines presented to the LBM. The error
7797 * is a single pulse, there is no associated error flag,
7798 * and it is non-maskable. This is because if a parity
7799 * error occurs on the request the request is dropped.
7800 * This should never occur, but it is nice to know if it
7803 dd_dev_err(dd
, "Parity error in DC LBM block\n");
7805 dd_dev_err(dd
, "Invalid DC interrupt %u\n", source
);
7810 * TX block send credit interrupt. Source is < 160.
7812 static void is_send_credit_int(struct hfi1_devdata
*dd
, unsigned int source
)
7814 sc_group_release_update(dd
, source
);
7818 * TX block SDMA interrupt. Source is < 48.
7820 * SDMA interrupts are grouped by type:
7823 * N - 2N-1 = SDmaProgress
7824 * 2N - 3N-1 = SDmaIdle
7826 static void is_sdma_eng_int(struct hfi1_devdata
*dd
, unsigned int source
)
7828 /* what interrupt */
7829 unsigned int what
= source
/ TXE_NUM_SDMA_ENGINES
;
7831 unsigned int which
= source
% TXE_NUM_SDMA_ENGINES
;
7833 #ifdef CONFIG_SDMA_VERBOSITY
7834 dd_dev_err(dd
, "CONFIG SDMA(%u) %s:%d %s()\n", which
,
7835 slashstrip(__FILE__
), __LINE__
, __func__
);
7836 sdma_dumpstate(&dd
->per_sdma
[which
]);
7839 if (likely(what
< 3 && which
< dd
->num_sdma
)) {
7840 sdma_engine_interrupt(&dd
->per_sdma
[which
], 1ull << source
);
7842 /* should not happen */
7843 dd_dev_err(dd
, "Invalid SDMA interrupt 0x%x\n", source
);
7848 * RX block receive available interrupt. Source is < 160.
7850 static void is_rcv_avail_int(struct hfi1_devdata
*dd
, unsigned int source
)
7852 struct hfi1_ctxtdata
*rcd
;
7855 if (likely(source
< dd
->num_rcv_contexts
)) {
7856 rcd
= dd
->rcd
[source
];
7858 if (source
< dd
->first_user_ctxt
)
7859 rcd
->do_interrupt(rcd
, 0);
7861 handle_user_interrupt(rcd
);
7864 /* received an interrupt, but no rcd */
7865 err_detail
= "dataless";
7867 /* received an interrupt, but are not using that context */
7868 err_detail
= "out of range";
7870 dd_dev_err(dd
, "unexpected %s receive available context interrupt %u\n",
7871 err_detail
, source
);
7875 * RX block receive urgent interrupt. Source is < 160.
7877 static void is_rcv_urgent_int(struct hfi1_devdata
*dd
, unsigned int source
)
7879 struct hfi1_ctxtdata
*rcd
;
7882 if (likely(source
< dd
->num_rcv_contexts
)) {
7883 rcd
= dd
->rcd
[source
];
7885 /* only pay attention to user urgent interrupts */
7886 if (source
>= dd
->first_user_ctxt
)
7887 handle_user_interrupt(rcd
);
7890 /* received an interrupt, but no rcd */
7891 err_detail
= "dataless";
7893 /* received an interrupt, but are not using that context */
7894 err_detail
= "out of range";
7896 dd_dev_err(dd
, "unexpected %s receive urgent context interrupt %u\n",
7897 err_detail
, source
);
7901 * Reserved range interrupt. Should not be called in normal operation.
7903 static void is_reserved_int(struct hfi1_devdata
*dd
, unsigned int source
)
7907 dd_dev_err(dd
, "unexpected %s interrupt\n",
7908 is_reserved_name(name
, sizeof(name
), source
));
7911 static const struct is_table is_table
[] = {
7913 name func interrupt func */
7914 { IS_GENERAL_ERR_START
, IS_GENERAL_ERR_END
,
7915 is_misc_err_name
, is_misc_err_int
},
7916 { IS_SDMAENG_ERR_START
, IS_SDMAENG_ERR_END
,
7917 is_sdma_eng_err_name
, is_sdma_eng_err_int
},
7918 { IS_SENDCTXT_ERR_START
, IS_SENDCTXT_ERR_END
,
7919 is_sendctxt_err_name
, is_sendctxt_err_int
},
7920 { IS_SDMA_START
, IS_SDMA_END
,
7921 is_sdma_eng_name
, is_sdma_eng_int
},
7922 { IS_VARIOUS_START
, IS_VARIOUS_END
,
7923 is_various_name
, is_various_int
},
7924 { IS_DC_START
, IS_DC_END
,
7925 is_dc_name
, is_dc_int
},
7926 { IS_RCVAVAIL_START
, IS_RCVAVAIL_END
,
7927 is_rcv_avail_name
, is_rcv_avail_int
},
7928 { IS_RCVURGENT_START
, IS_RCVURGENT_END
,
7929 is_rcv_urgent_name
, is_rcv_urgent_int
},
7930 { IS_SENDCREDIT_START
, IS_SENDCREDIT_END
,
7931 is_send_credit_name
, is_send_credit_int
},
7932 { IS_RESERVED_START
, IS_RESERVED_END
,
7933 is_reserved_name
, is_reserved_int
},
7937 * Interrupt source interrupt - called when the given source has an interrupt.
7938 * Source is a bit index into an array of 64-bit integers.
7940 static void is_interrupt(struct hfi1_devdata
*dd
, unsigned int source
)
7942 const struct is_table
*entry
;
7944 /* avoids a double compare by walking the table in-order */
7945 for (entry
= &is_table
[0]; entry
->is_name
; entry
++) {
7946 if (source
< entry
->end
) {
7947 trace_hfi1_interrupt(dd
, entry
, source
);
7948 entry
->is_int(dd
, source
- entry
->start
);
7952 /* fell off the end */
7953 dd_dev_err(dd
, "invalid interrupt source %u\n", source
);
7957 * General interrupt handler. This is able to correctly handle
7958 * all interrupts in case INTx is used.
7960 static irqreturn_t
general_interrupt(int irq
, void *data
)
7962 struct hfi1_devdata
*dd
= data
;
7963 u64 regs
[CCE_NUM_INT_CSRS
];
7967 this_cpu_inc(*dd
->int_counter
);
7969 /* phase 1: scan and clear all handled interrupts */
7970 for (i
= 0; i
< CCE_NUM_INT_CSRS
; i
++) {
7971 if (dd
->gi_mask
[i
] == 0) {
7972 regs
[i
] = 0; /* used later */
7975 regs
[i
] = read_csr(dd
, CCE_INT_STATUS
+ (8 * i
)) &
7977 /* only clear if anything is set */
7979 write_csr(dd
, CCE_INT_CLEAR
+ (8 * i
), regs
[i
]);
7982 /* phase 2: call the appropriate handler */
7983 for_each_set_bit(bit
, (unsigned long *)®s
[0],
7984 CCE_NUM_INT_CSRS
*64) {
7985 is_interrupt(dd
, bit
);
7991 static irqreturn_t
sdma_interrupt(int irq
, void *data
)
7993 struct sdma_engine
*sde
= data
;
7994 struct hfi1_devdata
*dd
= sde
->dd
;
7997 #ifdef CONFIG_SDMA_VERBOSITY
7998 dd_dev_err(dd
, "CONFIG SDMA(%u) %s:%d %s()\n", sde
->this_idx
,
7999 slashstrip(__FILE__
), __LINE__
, __func__
);
8000 sdma_dumpstate(sde
);
8003 this_cpu_inc(*dd
->int_counter
);
8005 /* This read_csr is really bad in the hot path */
8006 status
= read_csr(dd
,
8007 CCE_INT_STATUS
+ (8*(IS_SDMA_START
/64)))
8009 if (likely(status
)) {
8010 /* clear the interrupt(s) */
8012 CCE_INT_CLEAR
+ (8*(IS_SDMA_START
/64)),
8015 /* handle the interrupt(s) */
8016 sdma_engine_interrupt(sde
, status
);
8018 dd_dev_err(dd
, "SDMA engine %u interrupt, but no status bits set\n",
8025 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8026 * to insure that the write completed. This does NOT guarantee that
8027 * queued DMA writes to memory from the chip are pushed.
8029 static inline void clear_recv_intr(struct hfi1_ctxtdata
*rcd
)
8031 struct hfi1_devdata
*dd
= rcd
->dd
;
8032 u32 addr
= CCE_INT_CLEAR
+ (8 * rcd
->ireg
);
8034 mmiowb(); /* make sure everything before is written */
8035 write_csr(dd
, addr
, rcd
->imask
);
8036 /* force the above write on the chip and get a value back */
8037 (void)read_csr(dd
, addr
);
8040 /* force the receive interrupt */
8041 void force_recv_intr(struct hfi1_ctxtdata
*rcd
)
8043 write_csr(rcd
->dd
, CCE_INT_FORCE
+ (8 * rcd
->ireg
), rcd
->imask
);
8047 * Return non-zero if a packet is present.
8049 * This routine is called when rechecking for packets after the RcvAvail
8050 * interrupt has been cleared down. First, do a quick check of memory for
8051 * a packet present. If not found, use an expensive CSR read of the context
8052 * tail to determine the actual tail. The CSR read is necessary because there
8053 * is no method to push pending DMAs to memory other than an interrupt and we
8054 * are trying to determine if we need to force an interrupt.
8056 static inline int check_packet_present(struct hfi1_ctxtdata
*rcd
)
8061 if (!HFI1_CAP_IS_KSET(DMA_RTAIL
))
8062 present
= (rcd
->seq_cnt
==
8063 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd
))));
8064 else /* is RDMA rtail */
8065 present
= (rcd
->head
!= get_rcvhdrtail(rcd
));
8070 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8071 tail
= (u32
)read_uctxt_csr(rcd
->dd
, rcd
->ctxt
, RCV_HDR_TAIL
);
8072 return rcd
->head
!= tail
;
8076 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8077 * This routine will try to handle packets immediately (latency), but if
8078 * it finds too many, it will invoke the thread handler (bandwitdh). The
8079 * chip receive interupt is *not* cleared down until this or the thread (if
8080 * invoked) is finished. The intent is to avoid extra interrupts while we
8081 * are processing packets anyway.
8083 static irqreturn_t
receive_context_interrupt(int irq
, void *data
)
8085 struct hfi1_ctxtdata
*rcd
= data
;
8086 struct hfi1_devdata
*dd
= rcd
->dd
;
8090 trace_hfi1_receive_interrupt(dd
, rcd
->ctxt
);
8091 this_cpu_inc(*dd
->int_counter
);
8092 aspm_ctx_disable(rcd
);
8094 /* receive interrupt remains blocked while processing packets */
8095 disposition
= rcd
->do_interrupt(rcd
, 0);
8098 * Too many packets were seen while processing packets in this
8099 * IRQ handler. Invoke the handler thread. The receive interrupt
8102 if (disposition
== RCV_PKT_LIMIT
)
8103 return IRQ_WAKE_THREAD
;
8106 * The packet processor detected no more packets. Clear the receive
8107 * interrupt and recheck for a packet packet that may have arrived
8108 * after the previous check and interrupt clear. If a packet arrived,
8109 * force another interrupt.
8111 clear_recv_intr(rcd
);
8112 present
= check_packet_present(rcd
);
8114 force_recv_intr(rcd
);
8120 * Receive packet thread handler. This expects to be invoked with the
8121 * receive interrupt still blocked.
8123 static irqreturn_t
receive_context_thread(int irq
, void *data
)
8125 struct hfi1_ctxtdata
*rcd
= data
;
8128 /* receive interrupt is still blocked from the IRQ handler */
8129 (void)rcd
->do_interrupt(rcd
, 1);
8132 * The packet processor will only return if it detected no more
8133 * packets. Hold IRQs here so we can safely clear the interrupt and
8134 * recheck for a packet that may have arrived after the previous
8135 * check and the interrupt clear. If a packet arrived, force another
8138 local_irq_disable();
8139 clear_recv_intr(rcd
);
8140 present
= check_packet_present(rcd
);
8142 force_recv_intr(rcd
);
8148 /* ========================================================================= */
8150 u32
read_physical_state(struct hfi1_devdata
*dd
)
8154 reg
= read_csr(dd
, DC_DC8051_STS_CUR_STATE
);
8155 return (reg
>> DC_DC8051_STS_CUR_STATE_PORT_SHIFT
)
8156 & DC_DC8051_STS_CUR_STATE_PORT_MASK
;
8159 u32
read_logical_state(struct hfi1_devdata
*dd
)
8163 reg
= read_csr(dd
, DCC_CFG_PORT_CONFIG
);
8164 return (reg
>> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT
)
8165 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK
;
8168 static void set_logical_state(struct hfi1_devdata
*dd
, u32 chip_lstate
)
8172 reg
= read_csr(dd
, DCC_CFG_PORT_CONFIG
);
8173 /* clear current state, set new state */
8174 reg
&= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK
;
8175 reg
|= (u64
)chip_lstate
<< DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT
;
8176 write_csr(dd
, DCC_CFG_PORT_CONFIG
, reg
);
8180 * Use the 8051 to read a LCB CSR.
8182 static int read_lcb_via_8051(struct hfi1_devdata
*dd
, u32 addr
, u64
*data
)
8187 if (dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
) {
8188 if (acquire_lcb_access(dd
, 0) == 0) {
8189 *data
= read_csr(dd
, addr
);
8190 release_lcb_access(dd
, 0);
8196 /* register is an index of LCB registers: (offset - base) / 8 */
8197 regno
= (addr
- DC_LCB_CFG_RUN
) >> 3;
8198 ret
= do_8051_command(dd
, HCMD_READ_LCB_CSR
, regno
, data
);
8199 if (ret
!= HCMD_SUCCESS
)
8205 * Read an LCB CSR. Access may not be in host control, so check.
8206 * Return 0 on success, -EBUSY on failure.
8208 int read_lcb_csr(struct hfi1_devdata
*dd
, u32 addr
, u64
*data
)
8210 struct hfi1_pportdata
*ppd
= dd
->pport
;
8212 /* if up, go through the 8051 for the value */
8213 if (ppd
->host_link_state
& HLS_UP
)
8214 return read_lcb_via_8051(dd
, addr
, data
);
8215 /* if going up or down, no access */
8216 if (ppd
->host_link_state
& (HLS_GOING_UP
| HLS_GOING_OFFLINE
))
8218 /* otherwise, host has access */
8219 *data
= read_csr(dd
, addr
);
8224 * Use the 8051 to write a LCB CSR.
8226 static int write_lcb_via_8051(struct hfi1_devdata
*dd
, u32 addr
, u64 data
)
8231 if (dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
||
8232 (dd
->dc8051_ver
< dc8051_ver(0, 20))) {
8233 if (acquire_lcb_access(dd
, 0) == 0) {
8234 write_csr(dd
, addr
, data
);
8235 release_lcb_access(dd
, 0);
8241 /* register is an index of LCB registers: (offset - base) / 8 */
8242 regno
= (addr
- DC_LCB_CFG_RUN
) >> 3;
8243 ret
= do_8051_command(dd
, HCMD_WRITE_LCB_CSR
, regno
, &data
);
8244 if (ret
!= HCMD_SUCCESS
)
8250 * Write an LCB CSR. Access may not be in host control, so check.
8251 * Return 0 on success, -EBUSY on failure.
8253 int write_lcb_csr(struct hfi1_devdata
*dd
, u32 addr
, u64 data
)
8255 struct hfi1_pportdata
*ppd
= dd
->pport
;
8257 /* if up, go through the 8051 for the value */
8258 if (ppd
->host_link_state
& HLS_UP
)
8259 return write_lcb_via_8051(dd
, addr
, data
);
8260 /* if going up or down, no access */
8261 if (ppd
->host_link_state
& (HLS_GOING_UP
| HLS_GOING_OFFLINE
))
8263 /* otherwise, host has access */
8264 write_csr(dd
, addr
, data
);
8270 * < 0 = Linux error, not able to get access
8271 * > 0 = 8051 command RETURN_CODE
8273 static int do_8051_command(
8274 struct hfi1_devdata
*dd
,
8281 unsigned long flags
;
8282 unsigned long timeout
;
8284 hfi1_cdbg(DC8051
, "type %d, data 0x%012llx", type
, in_data
);
8287 * Alternative to holding the lock for a long time:
8288 * - keep busy wait - have other users bounce off
8290 spin_lock_irqsave(&dd
->dc8051_lock
, flags
);
8292 /* We can't send any commands to the 8051 if it's in reset */
8293 if (dd
->dc_shutdown
) {
8294 return_code
= -ENODEV
;
8299 * If an 8051 host command timed out previously, then the 8051 is
8302 * On first timeout, attempt to reset and restart the entire DC
8303 * block (including 8051). (Is this too big of a hammer?)
8305 * If the 8051 times out a second time, the reset did not bring it
8306 * back to healthy life. In that case, fail any subsequent commands.
8308 if (dd
->dc8051_timed_out
) {
8309 if (dd
->dc8051_timed_out
> 1) {
8311 "Previous 8051 host command timed out, skipping command %u\n",
8313 return_code
= -ENXIO
;
8316 spin_unlock_irqrestore(&dd
->dc8051_lock
, flags
);
8319 spin_lock_irqsave(&dd
->dc8051_lock
, flags
);
8323 * If there is no timeout, then the 8051 command interface is
8324 * waiting for a command.
8328 * When writing a LCB CSR, out_data contains the full value to
8329 * to be written, while in_data contains the relative LCB
8330 * address in 7:0. Do the work here, rather than the caller,
8331 * of distrubting the write data to where it needs to go:
8334 * 39:00 -> in_data[47:8]
8335 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8336 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8338 if (type
== HCMD_WRITE_LCB_CSR
) {
8339 in_data
|= ((*out_data
) & 0xffffffffffull
) << 8;
8340 reg
= ((((*out_data
) >> 40) & 0xff) <<
8341 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT
)
8342 | ((((*out_data
) >> 48) & 0xffff) <<
8343 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT
);
8344 write_csr(dd
, DC_DC8051_CFG_EXT_DEV_0
, reg
);
8348 * Do two writes: the first to stabilize the type and req_data, the
8349 * second to activate.
8351 reg
= ((u64
)type
& DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK
)
8352 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8353 | (in_data
& DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK
)
8354 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT
;
8355 write_csr(dd
, DC_DC8051_CFG_HOST_CMD_0
, reg
);
8356 reg
|= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK
;
8357 write_csr(dd
, DC_DC8051_CFG_HOST_CMD_0
, reg
);
8359 /* wait for completion, alternate: interrupt */
8360 timeout
= jiffies
+ msecs_to_jiffies(DC8051_COMMAND_TIMEOUT
);
8362 reg
= read_csr(dd
, DC_DC8051_CFG_HOST_CMD_1
);
8363 completed
= reg
& DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK
;
8366 if (time_after(jiffies
, timeout
)) {
8367 dd
->dc8051_timed_out
++;
8368 dd_dev_err(dd
, "8051 host command %u timeout\n", type
);
8371 return_code
= -ETIMEDOUT
;
8378 *out_data
= (reg
>> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT
)
8379 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK
;
8380 if (type
== HCMD_READ_LCB_CSR
) {
8381 /* top 16 bits are in a different register */
8382 *out_data
|= (read_csr(dd
, DC_DC8051_CFG_EXT_DEV_1
)
8383 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK
)
8385 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT
);
8388 return_code
= (reg
>> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT
)
8389 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK
;
8390 dd
->dc8051_timed_out
= 0;
8392 * Clear command for next user.
8394 write_csr(dd
, DC_DC8051_CFG_HOST_CMD_0
, 0);
8397 spin_unlock_irqrestore(&dd
->dc8051_lock
, flags
);
8402 static int set_physical_link_state(struct hfi1_devdata
*dd
, u64 state
)
8404 return do_8051_command(dd
, HCMD_CHANGE_PHY_STATE
, state
, NULL
);
8407 int load_8051_config(struct hfi1_devdata
*dd
, u8 field_id
,
8408 u8 lane_id
, u32 config_data
)
8413 data
= (u64
)field_id
<< LOAD_DATA_FIELD_ID_SHIFT
8414 | (u64
)lane_id
<< LOAD_DATA_LANE_ID_SHIFT
8415 | (u64
)config_data
<< LOAD_DATA_DATA_SHIFT
;
8416 ret
= do_8051_command(dd
, HCMD_LOAD_CONFIG_DATA
, data
, NULL
);
8417 if (ret
!= HCMD_SUCCESS
) {
8419 "load 8051 config: field id %d, lane %d, err %d\n",
8420 (int)field_id
, (int)lane_id
, ret
);
8426 * Read the 8051 firmware "registers". Use the RAM directly. Always
8427 * set the result, even on error.
8428 * Return 0 on success, -errno on failure
8430 int read_8051_config(struct hfi1_devdata
*dd
, u8 field_id
, u8 lane_id
,
8437 /* address start depends on the lane_id */
8439 addr
= (4 * NUM_GENERAL_FIELDS
)
8440 + (lane_id
* 4 * NUM_LANE_FIELDS
);
8443 addr
+= field_id
* 4;
8445 /* read is in 8-byte chunks, hardware will truncate the address down */
8446 ret
= read_8051_data(dd
, addr
, 8, &big_data
);
8449 /* extract the 4 bytes we want */
8451 *result
= (u32
)(big_data
>> 32);
8453 *result
= (u32
)big_data
;
8456 dd_dev_err(dd
, "%s: direct read failed, lane %d, field %d!\n",
8457 __func__
, lane_id
, field_id
);
8463 static int write_vc_local_phy(struct hfi1_devdata
*dd
, u8 power_management
,
8468 frame
= continuous
<< CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8469 | power_management
<< POWER_MANAGEMENT_SHIFT
;
8470 return load_8051_config(dd
, VERIFY_CAP_LOCAL_PHY
,
8471 GENERAL_CONFIG
, frame
);
8474 static int write_vc_local_fabric(struct hfi1_devdata
*dd
, u8 vau
, u8 z
, u8 vcu
,
8475 u16 vl15buf
, u8 crc_sizes
)
8479 frame
= (u32
)vau
<< VAU_SHIFT
8481 | (u32
)vcu
<< VCU_SHIFT
8482 | (u32
)vl15buf
<< VL15BUF_SHIFT
8483 | (u32
)crc_sizes
<< CRC_SIZES_SHIFT
;
8484 return load_8051_config(dd
, VERIFY_CAP_LOCAL_FABRIC
,
8485 GENERAL_CONFIG
, frame
);
8488 static void read_vc_local_link_width(struct hfi1_devdata
*dd
, u8
*misc_bits
,
8489 u8
*flag_bits
, u16
*link_widths
)
8493 read_8051_config(dd
, VERIFY_CAP_LOCAL_LINK_WIDTH
, GENERAL_CONFIG
,
8495 *misc_bits
= (frame
>> MISC_CONFIG_BITS_SHIFT
) & MISC_CONFIG_BITS_MASK
;
8496 *flag_bits
= (frame
>> LOCAL_FLAG_BITS_SHIFT
) & LOCAL_FLAG_BITS_MASK
;
8497 *link_widths
= (frame
>> LINK_WIDTH_SHIFT
) & LINK_WIDTH_MASK
;
8500 static int write_vc_local_link_width(struct hfi1_devdata
*dd
,
8507 frame
= (u32
)misc_bits
<< MISC_CONFIG_BITS_SHIFT
8508 | (u32
)flag_bits
<< LOCAL_FLAG_BITS_SHIFT
8509 | (u32
)link_widths
<< LINK_WIDTH_SHIFT
;
8510 return load_8051_config(dd
, VERIFY_CAP_LOCAL_LINK_WIDTH
, GENERAL_CONFIG
,
8514 static int write_local_device_id(struct hfi1_devdata
*dd
, u16 device_id
,
8519 frame
= ((u32
)device_id
<< LOCAL_DEVICE_ID_SHIFT
)
8520 | ((u32
)device_rev
<< LOCAL_DEVICE_REV_SHIFT
);
8521 return load_8051_config(dd
, LOCAL_DEVICE_ID
, GENERAL_CONFIG
, frame
);
8524 static void read_remote_device_id(struct hfi1_devdata
*dd
, u16
*device_id
,
8529 read_8051_config(dd
, REMOTE_DEVICE_ID
, GENERAL_CONFIG
, &frame
);
8530 *device_id
= (frame
>> REMOTE_DEVICE_ID_SHIFT
) & REMOTE_DEVICE_ID_MASK
;
8531 *device_rev
= (frame
>> REMOTE_DEVICE_REV_SHIFT
)
8532 & REMOTE_DEVICE_REV_MASK
;
8535 void read_misc_status(struct hfi1_devdata
*dd
, u8
*ver_a
, u8
*ver_b
)
8539 read_8051_config(dd
, MISC_STATUS
, GENERAL_CONFIG
, &frame
);
8540 *ver_a
= (frame
>> STS_FM_VERSION_A_SHIFT
) & STS_FM_VERSION_A_MASK
;
8541 *ver_b
= (frame
>> STS_FM_VERSION_B_SHIFT
) & STS_FM_VERSION_B_MASK
;
8544 static void read_vc_remote_phy(struct hfi1_devdata
*dd
, u8
*power_management
,
8549 read_8051_config(dd
, VERIFY_CAP_REMOTE_PHY
, GENERAL_CONFIG
, &frame
);
8550 *power_management
= (frame
>> POWER_MANAGEMENT_SHIFT
)
8551 & POWER_MANAGEMENT_MASK
;
8552 *continuous
= (frame
>> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
)
8553 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK
;
8556 static void read_vc_remote_fabric(struct hfi1_devdata
*dd
, u8
*vau
, u8
*z
,
8557 u8
*vcu
, u16
*vl15buf
, u8
*crc_sizes
)
8561 read_8051_config(dd
, VERIFY_CAP_REMOTE_FABRIC
, GENERAL_CONFIG
, &frame
);
8562 *vau
= (frame
>> VAU_SHIFT
) & VAU_MASK
;
8563 *z
= (frame
>> Z_SHIFT
) & Z_MASK
;
8564 *vcu
= (frame
>> VCU_SHIFT
) & VCU_MASK
;
8565 *vl15buf
= (frame
>> VL15BUF_SHIFT
) & VL15BUF_MASK
;
8566 *crc_sizes
= (frame
>> CRC_SIZES_SHIFT
) & CRC_SIZES_MASK
;
8569 static void read_vc_remote_link_width(struct hfi1_devdata
*dd
,
8575 read_8051_config(dd
, VERIFY_CAP_REMOTE_LINK_WIDTH
, GENERAL_CONFIG
,
8577 *remote_tx_rate
= (frame
>> REMOTE_TX_RATE_SHIFT
)
8578 & REMOTE_TX_RATE_MASK
;
8579 *link_widths
= (frame
>> LINK_WIDTH_SHIFT
) & LINK_WIDTH_MASK
;
8582 static void read_local_lni(struct hfi1_devdata
*dd
, u8
*enable_lane_rx
)
8586 read_8051_config(dd
, LOCAL_LNI_INFO
, GENERAL_CONFIG
, &frame
);
8587 *enable_lane_rx
= (frame
>> ENABLE_LANE_RX_SHIFT
) & ENABLE_LANE_RX_MASK
;
8590 static void read_mgmt_allowed(struct hfi1_devdata
*dd
, u8
*mgmt_allowed
)
8594 read_8051_config(dd
, REMOTE_LNI_INFO
, GENERAL_CONFIG
, &frame
);
8595 *mgmt_allowed
= (frame
>> MGMT_ALLOWED_SHIFT
) & MGMT_ALLOWED_MASK
;
8598 static void read_last_local_state(struct hfi1_devdata
*dd
, u32
*lls
)
8600 read_8051_config(dd
, LAST_LOCAL_STATE_COMPLETE
, GENERAL_CONFIG
, lls
);
8603 static void read_last_remote_state(struct hfi1_devdata
*dd
, u32
*lrs
)
8605 read_8051_config(dd
, LAST_REMOTE_STATE_COMPLETE
, GENERAL_CONFIG
, lrs
);
8608 void hfi1_read_link_quality(struct hfi1_devdata
*dd
, u8
*link_quality
)
8614 if (dd
->pport
->host_link_state
& HLS_UP
) {
8615 ret
= read_8051_config(dd
, LINK_QUALITY_INFO
, GENERAL_CONFIG
,
8618 *link_quality
= (frame
>> LINK_QUALITY_SHIFT
)
8619 & LINK_QUALITY_MASK
;
8623 static void read_planned_down_reason_code(struct hfi1_devdata
*dd
, u8
*pdrrc
)
8627 read_8051_config(dd
, LINK_QUALITY_INFO
, GENERAL_CONFIG
, &frame
);
8628 *pdrrc
= (frame
>> DOWN_REMOTE_REASON_SHIFT
) & DOWN_REMOTE_REASON_MASK
;
8631 static int read_tx_settings(struct hfi1_devdata
*dd
,
8633 u8
*tx_polarity_inversion
,
8634 u8
*rx_polarity_inversion
,
8640 ret
= read_8051_config(dd
, TX_SETTINGS
, GENERAL_CONFIG
, &frame
);
8641 *enable_lane_tx
= (frame
>> ENABLE_LANE_TX_SHIFT
)
8642 & ENABLE_LANE_TX_MASK
;
8643 *tx_polarity_inversion
= (frame
>> TX_POLARITY_INVERSION_SHIFT
)
8644 & TX_POLARITY_INVERSION_MASK
;
8645 *rx_polarity_inversion
= (frame
>> RX_POLARITY_INVERSION_SHIFT
)
8646 & RX_POLARITY_INVERSION_MASK
;
8647 *max_rate
= (frame
>> MAX_RATE_SHIFT
) & MAX_RATE_MASK
;
8651 static int write_tx_settings(struct hfi1_devdata
*dd
,
8653 u8 tx_polarity_inversion
,
8654 u8 rx_polarity_inversion
,
8659 /* no need to mask, all variable sizes match field widths */
8660 frame
= enable_lane_tx
<< ENABLE_LANE_TX_SHIFT
8661 | tx_polarity_inversion
<< TX_POLARITY_INVERSION_SHIFT
8662 | rx_polarity_inversion
<< RX_POLARITY_INVERSION_SHIFT
8663 | max_rate
<< MAX_RATE_SHIFT
;
8664 return load_8051_config(dd
, TX_SETTINGS
, GENERAL_CONFIG
, frame
);
8667 static void check_fabric_firmware_versions(struct hfi1_devdata
*dd
)
8669 u32 frame
, version
, prod_id
;
8673 for (lane
= 0; lane
< 4; lane
++) {
8674 ret
= read_8051_config(dd
, SPICO_FW_VERSION
, lane
, &frame
);
8678 "Unable to read lane %d firmware details\n",
8682 version
= (frame
>> SPICO_ROM_VERSION_SHIFT
)
8683 & SPICO_ROM_VERSION_MASK
;
8684 prod_id
= (frame
>> SPICO_ROM_PROD_ID_SHIFT
)
8685 & SPICO_ROM_PROD_ID_MASK
;
8687 "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8688 lane
, version
, prod_id
);
8693 * Read an idle LCB message.
8695 * Returns 0 on success, -EINVAL on error
8697 static int read_idle_message(struct hfi1_devdata
*dd
, u64 type
, u64
*data_out
)
8701 ret
= do_8051_command(dd
, HCMD_READ_LCB_IDLE_MSG
,
8703 if (ret
!= HCMD_SUCCESS
) {
8704 dd_dev_err(dd
, "read idle message: type %d, err %d\n",
8708 dd_dev_info(dd
, "%s: read idle message 0x%llx\n", __func__
, *data_out
);
8709 /* return only the payload as we already know the type */
8710 *data_out
>>= IDLE_PAYLOAD_SHIFT
;
8715 * Read an idle SMA message. To be done in response to a notification from
8718 * Returns 0 on success, -EINVAL on error
8720 static int read_idle_sma(struct hfi1_devdata
*dd
, u64
*data
)
8722 return read_idle_message(dd
,
8723 (u64
)IDLE_SMA
<< IDLE_MSG_TYPE_SHIFT
, data
);
8727 * Send an idle LCB message.
8729 * Returns 0 on success, -EINVAL on error
8731 static int send_idle_message(struct hfi1_devdata
*dd
, u64 data
)
8735 dd_dev_info(dd
, "%s: sending idle message 0x%llx\n", __func__
, data
);
8736 ret
= do_8051_command(dd
, HCMD_SEND_LCB_IDLE_MSG
, data
, NULL
);
8737 if (ret
!= HCMD_SUCCESS
) {
8738 dd_dev_err(dd
, "send idle message: data 0x%llx, err %d\n",
8746 * Send an idle SMA message.
8748 * Returns 0 on success, -EINVAL on error
8750 int send_idle_sma(struct hfi1_devdata
*dd
, u64 message
)
8754 data
= ((message
& IDLE_PAYLOAD_MASK
) << IDLE_PAYLOAD_SHIFT
)
8755 | ((u64
)IDLE_SMA
<< IDLE_MSG_TYPE_SHIFT
);
8756 return send_idle_message(dd
, data
);
8760 * Initialize the LCB then do a quick link up. This may or may not be
8763 * return 0 on success, -errno on error
8765 static int do_quick_linkup(struct hfi1_devdata
*dd
)
8768 unsigned long timeout
;
8771 lcb_shutdown(dd
, 0);
8774 /* LCB_CFG_LOOPBACK.VAL = 2 */
8775 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8776 write_csr(dd
, DC_LCB_CFG_LOOPBACK
,
8777 IB_PACKET_TYPE
<< DC_LCB_CFG_LOOPBACK_VAL_SHIFT
);
8778 write_csr(dd
, DC_LCB_CFG_LANE_WIDTH
, 0);
8781 /* start the LCBs */
8782 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8783 write_csr(dd
, DC_LCB_CFG_TX_FIFOS_RESET
, 0);
8785 /* simulator only loopback steps */
8786 if (loopback
&& dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
) {
8787 /* LCB_CFG_RUN.EN = 1 */
8788 write_csr(dd
, DC_LCB_CFG_RUN
,
8789 1ull << DC_LCB_CFG_RUN_EN_SHIFT
);
8791 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8792 timeout
= jiffies
+ msecs_to_jiffies(10);
8795 DC_LCB_STS_LINK_TRANSFER_ACTIVE
);
8798 if (time_after(jiffies
, timeout
)) {
8800 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
8806 write_csr(dd
, DC_LCB_CFG_ALLOW_LINK_UP
,
8807 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT
);
8812 * When doing quick linkup and not in loopback, both
8813 * sides must be done with LCB set-up before either
8814 * starts the quick linkup. Put a delay here so that
8815 * both sides can be started and have a chance to be
8816 * done with LCB set up before resuming.
8819 "Pausing for peer to be finished with LCB set up\n");
8822 "Continuing with quick linkup\n");
8825 write_csr(dd
, DC_LCB_ERR_EN
, 0); /* mask LCB errors */
8826 set_8051_lcb_access(dd
);
8829 * State "quick" LinkUp request sets the physical link state to
8830 * LinkUp without a verify capability sequence.
8831 * This state is in simulator v37 and later.
8833 ret
= set_physical_link_state(dd
, PLS_QUICK_LINKUP
);
8834 if (ret
!= HCMD_SUCCESS
) {
8836 "%s: set physical link state to quick LinkUp failed with return %d\n",
8839 set_host_lcb_access(dd
);
8840 write_csr(dd
, DC_LCB_ERR_EN
, ~0ull); /* watch LCB errors */
8847 return 0; /* success */
8851 * Set the SerDes to internal loopback mode.
8852 * Returns 0 on success, -errno on error.
8854 static int set_serdes_loopback_mode(struct hfi1_devdata
*dd
)
8858 ret
= set_physical_link_state(dd
, PLS_INTERNAL_SERDES_LOOPBACK
);
8859 if (ret
== HCMD_SUCCESS
)
8862 "Set physical link state to SerDes Loopback failed with return %d\n",
8870 * Do all special steps to set up loopback.
8872 static int init_loopback(struct hfi1_devdata
*dd
)
8874 dd_dev_info(dd
, "Entering loopback mode\n");
8876 /* all loopbacks should disable self GUID check */
8877 write_csr(dd
, DC_DC8051_CFG_MODE
,
8878 (read_csr(dd
, DC_DC8051_CFG_MODE
) | DISABLE_SELF_GUID_CHECK
));
8881 * The simulator has only one loopback option - LCB. Switch
8882 * to that option, which includes quick link up.
8884 * Accept all valid loopback values.
8886 if ((dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
)
8887 && (loopback
== LOOPBACK_SERDES
8888 || loopback
== LOOPBACK_LCB
8889 || loopback
== LOOPBACK_CABLE
)) {
8890 loopback
= LOOPBACK_LCB
;
8895 /* handle serdes loopback */
8896 if (loopback
== LOOPBACK_SERDES
) {
8897 /* internal serdes loopack needs quick linkup on RTL */
8898 if (dd
->icode
== ICODE_RTL_SILICON
)
8900 return set_serdes_loopback_mode(dd
);
8903 /* LCB loopback - handled at poll time */
8904 if (loopback
== LOOPBACK_LCB
) {
8905 quick_linkup
= 1; /* LCB is always quick linkup */
8907 /* not supported in emulation due to emulation RTL changes */
8908 if (dd
->icode
== ICODE_FPGA_EMULATION
) {
8910 "LCB loopback not supported in emulation\n");
8916 /* external cable loopback requires no extra steps */
8917 if (loopback
== LOOPBACK_CABLE
)
8920 dd_dev_err(dd
, "Invalid loopback mode %d\n", loopback
);
8925 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
8926 * used in the Verify Capability link width attribute.
8928 static u16
opa_to_vc_link_widths(u16 opa_widths
)
8933 static const struct link_bits
{
8936 } opa_link_xlate
[] = {
8937 { OPA_LINK_WIDTH_1X
, 1 << (1-1) },
8938 { OPA_LINK_WIDTH_2X
, 1 << (2-1) },
8939 { OPA_LINK_WIDTH_3X
, 1 << (3-1) },
8940 { OPA_LINK_WIDTH_4X
, 1 << (4-1) },
8943 for (i
= 0; i
< ARRAY_SIZE(opa_link_xlate
); i
++) {
8944 if (opa_widths
& opa_link_xlate
[i
].from
)
8945 result
|= opa_link_xlate
[i
].to
;
8951 * Set link attributes before moving to polling.
8953 static int set_local_link_attributes(struct hfi1_pportdata
*ppd
)
8955 struct hfi1_devdata
*dd
= ppd
->dd
;
8957 u8 tx_polarity_inversion
;
8958 u8 rx_polarity_inversion
;
8961 /* reset our fabric serdes to clear any lingering problems */
8962 fabric_serdes_reset(dd
);
8964 /* set the local tx rate - need to read-modify-write */
8965 ret
= read_tx_settings(dd
, &enable_lane_tx
, &tx_polarity_inversion
,
8966 &rx_polarity_inversion
, &ppd
->local_tx_rate
);
8968 goto set_local_link_attributes_fail
;
8970 if (dd
->dc8051_ver
< dc8051_ver(0, 20)) {
8971 /* set the tx rate to the fastest enabled */
8972 if (ppd
->link_speed_enabled
& OPA_LINK_SPEED_25G
)
8973 ppd
->local_tx_rate
= 1;
8975 ppd
->local_tx_rate
= 0;
8977 /* set the tx rate to all enabled */
8978 ppd
->local_tx_rate
= 0;
8979 if (ppd
->link_speed_enabled
& OPA_LINK_SPEED_25G
)
8980 ppd
->local_tx_rate
|= 2;
8981 if (ppd
->link_speed_enabled
& OPA_LINK_SPEED_12_5G
)
8982 ppd
->local_tx_rate
|= 1;
8985 enable_lane_tx
= 0xF; /* enable all four lanes */
8986 ret
= write_tx_settings(dd
, enable_lane_tx
, tx_polarity_inversion
,
8987 rx_polarity_inversion
, ppd
->local_tx_rate
);
8988 if (ret
!= HCMD_SUCCESS
)
8989 goto set_local_link_attributes_fail
;
8992 * DC supports continuous updates.
8994 ret
= write_vc_local_phy(dd
, 0 /* no power management */,
8995 1 /* continuous updates */);
8996 if (ret
!= HCMD_SUCCESS
)
8997 goto set_local_link_attributes_fail
;
8999 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9000 ret
= write_vc_local_fabric(dd
, dd
->vau
, 1, dd
->vcu
, dd
->vl15_init
,
9001 ppd
->port_crc_mode_enabled
);
9002 if (ret
!= HCMD_SUCCESS
)
9003 goto set_local_link_attributes_fail
;
9005 ret
= write_vc_local_link_width(dd
, 0, 0,
9006 opa_to_vc_link_widths(ppd
->link_width_enabled
));
9007 if (ret
!= HCMD_SUCCESS
)
9008 goto set_local_link_attributes_fail
;
9010 /* let peer know who we are */
9011 ret
= write_local_device_id(dd
, dd
->pcidev
->device
, dd
->minrev
);
9012 if (ret
== HCMD_SUCCESS
)
9015 set_local_link_attributes_fail
:
9017 "Failed to set local link attributes, return 0x%x\n",
9023 * Call this to start the link. Schedule a retry if the cable is not
9024 * present or if unable to start polling. Do not do anything if the
9025 * link is disabled. Returns 0 if link is disabled or moved to polling
9027 int start_link(struct hfi1_pportdata
*ppd
)
9029 if (!ppd
->link_enabled
) {
9030 dd_dev_info(ppd
->dd
,
9031 "%s: stopping link start because link is disabled\n",
9035 if (!ppd
->driver_link_ready
) {
9036 dd_dev_info(ppd
->dd
,
9037 "%s: stopping link start because driver is not ready\n",
9042 if (qsfp_mod_present(ppd
) || loopback
== LOOPBACK_SERDES
||
9043 loopback
== LOOPBACK_LCB
||
9044 ppd
->dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
)
9045 return set_link_state(ppd
, HLS_DN_POLL
);
9047 dd_dev_info(ppd
->dd
,
9048 "%s: stopping link start because no cable is present\n",
9053 static void wait_for_qsfp_init(struct hfi1_pportdata
*ppd
)
9055 struct hfi1_devdata
*dd
= ppd
->dd
;
9057 unsigned long timeout
;
9060 * Check for QSFP interrupt for t_init (SFF 8679)
9062 timeout
= jiffies
+ msecs_to_jiffies(2000);
9064 mask
= read_csr(dd
, dd
->hfi1_id
?
9065 ASIC_QSFP2_IN
: ASIC_QSFP1_IN
);
9066 if (!(mask
& QSFP_HFI0_INT_N
)) {
9067 write_csr(dd
, dd
->hfi1_id
? ASIC_QSFP2_CLEAR
:
9068 ASIC_QSFP1_CLEAR
, QSFP_HFI0_INT_N
);
9071 if (time_after(jiffies
, timeout
)) {
9072 dd_dev_info(dd
, "%s: No IntN detected, reset complete\n",
9080 static void set_qsfp_int_n(struct hfi1_pportdata
*ppd
, u8 enable
)
9082 struct hfi1_devdata
*dd
= ppd
->dd
;
9085 mask
= read_csr(dd
, dd
->hfi1_id
? ASIC_QSFP2_MASK
: ASIC_QSFP1_MASK
);
9087 mask
|= (u64
)QSFP_HFI0_INT_N
;
9089 mask
&= ~(u64
)QSFP_HFI0_INT_N
;
9090 write_csr(dd
, dd
->hfi1_id
? ASIC_QSFP2_MASK
: ASIC_QSFP1_MASK
, mask
);
9093 void reset_qsfp(struct hfi1_pportdata
*ppd
)
9095 struct hfi1_devdata
*dd
= ppd
->dd
;
9096 u64 mask
, qsfp_mask
;
9098 /* Disable INT_N from triggering QSFP interrupts */
9099 set_qsfp_int_n(ppd
, 0);
9101 /* Reset the QSFP */
9102 mask
= (u64
)QSFP_HFI0_RESET_N
;
9103 qsfp_mask
= read_csr(dd
, dd
->hfi1_id
? ASIC_QSFP2_OE
: ASIC_QSFP1_OE
);
9106 dd
->hfi1_id
? ASIC_QSFP2_OE
: ASIC_QSFP1_OE
, qsfp_mask
);
9108 qsfp_mask
= read_csr(dd
, dd
->hfi1_id
?
9109 ASIC_QSFP2_OUT
: ASIC_QSFP1_OUT
);
9112 dd
->hfi1_id
? ASIC_QSFP2_OUT
: ASIC_QSFP1_OUT
, qsfp_mask
);
9118 dd
->hfi1_id
? ASIC_QSFP2_OUT
: ASIC_QSFP1_OUT
, qsfp_mask
);
9120 wait_for_qsfp_init(ppd
);
9123 * Allow INT_N to trigger the QSFP interrupt to watch
9124 * for alarms and warnings
9126 set_qsfp_int_n(ppd
, 1);
9129 static int handle_qsfp_error_conditions(struct hfi1_pportdata
*ppd
,
9130 u8
*qsfp_interrupt_status
)
9132 struct hfi1_devdata
*dd
= ppd
->dd
;
9134 if ((qsfp_interrupt_status
[0] & QSFP_HIGH_TEMP_ALARM
) ||
9135 (qsfp_interrupt_status
[0] & QSFP_HIGH_TEMP_WARNING
))
9137 "%s: QSFP cable on fire\n",
9140 if ((qsfp_interrupt_status
[0] & QSFP_LOW_TEMP_ALARM
) ||
9141 (qsfp_interrupt_status
[0] & QSFP_LOW_TEMP_WARNING
))
9143 "%s: QSFP cable temperature too low\n",
9146 if ((qsfp_interrupt_status
[1] & QSFP_HIGH_VCC_ALARM
) ||
9147 (qsfp_interrupt_status
[1] & QSFP_HIGH_VCC_WARNING
))
9149 "%s: QSFP supply voltage too high\n",
9152 if ((qsfp_interrupt_status
[1] & QSFP_LOW_VCC_ALARM
) ||
9153 (qsfp_interrupt_status
[1] & QSFP_LOW_VCC_WARNING
))
9155 "%s: QSFP supply voltage too low\n",
9158 /* Byte 2 is vendor specific */
9160 if ((qsfp_interrupt_status
[3] & QSFP_HIGH_POWER_ALARM
) ||
9161 (qsfp_interrupt_status
[3] & QSFP_HIGH_POWER_WARNING
))
9163 "%s: Cable RX channel 1/2 power too high\n",
9166 if ((qsfp_interrupt_status
[3] & QSFP_LOW_POWER_ALARM
) ||
9167 (qsfp_interrupt_status
[3] & QSFP_LOW_POWER_WARNING
))
9169 "%s: Cable RX channel 1/2 power too low\n",
9172 if ((qsfp_interrupt_status
[4] & QSFP_HIGH_POWER_ALARM
) ||
9173 (qsfp_interrupt_status
[4] & QSFP_HIGH_POWER_WARNING
))
9175 "%s: Cable RX channel 3/4 power too high\n",
9178 if ((qsfp_interrupt_status
[4] & QSFP_LOW_POWER_ALARM
) ||
9179 (qsfp_interrupt_status
[4] & QSFP_LOW_POWER_WARNING
))
9181 "%s: Cable RX channel 3/4 power too low\n",
9184 if ((qsfp_interrupt_status
[5] & QSFP_HIGH_BIAS_ALARM
) ||
9185 (qsfp_interrupt_status
[5] & QSFP_HIGH_BIAS_WARNING
))
9187 "%s: Cable TX channel 1/2 bias too high\n",
9190 if ((qsfp_interrupt_status
[5] & QSFP_LOW_BIAS_ALARM
) ||
9191 (qsfp_interrupt_status
[5] & QSFP_LOW_BIAS_WARNING
))
9193 "%s: Cable TX channel 1/2 bias too low\n",
9196 if ((qsfp_interrupt_status
[6] & QSFP_HIGH_BIAS_ALARM
) ||
9197 (qsfp_interrupt_status
[6] & QSFP_HIGH_BIAS_WARNING
))
9199 "%s: Cable TX channel 3/4 bias too high\n",
9202 if ((qsfp_interrupt_status
[6] & QSFP_LOW_BIAS_ALARM
) ||
9203 (qsfp_interrupt_status
[6] & QSFP_LOW_BIAS_WARNING
))
9205 "%s: Cable TX channel 3/4 bias too low\n",
9208 if ((qsfp_interrupt_status
[7] & QSFP_HIGH_POWER_ALARM
) ||
9209 (qsfp_interrupt_status
[7] & QSFP_HIGH_POWER_WARNING
))
9211 "%s: Cable TX channel 1/2 power too high\n",
9214 if ((qsfp_interrupt_status
[7] & QSFP_LOW_POWER_ALARM
) ||
9215 (qsfp_interrupt_status
[7] & QSFP_LOW_POWER_WARNING
))
9217 "%s: Cable TX channel 1/2 power too low\n",
9220 if ((qsfp_interrupt_status
[8] & QSFP_HIGH_POWER_ALARM
) ||
9221 (qsfp_interrupt_status
[8] & QSFP_HIGH_POWER_WARNING
))
9223 "%s: Cable TX channel 3/4 power too high\n",
9226 if ((qsfp_interrupt_status
[8] & QSFP_LOW_POWER_ALARM
) ||
9227 (qsfp_interrupt_status
[8] & QSFP_LOW_POWER_WARNING
))
9229 "%s: Cable TX channel 3/4 power too low\n",
9232 /* Bytes 9-10 and 11-12 are reserved */
9233 /* Bytes 13-15 are vendor specific */
9238 /* This routine will only be scheduled if the QSFP module is present */
9239 void qsfp_event(struct work_struct
*work
)
9241 struct qsfp_data
*qd
;
9242 struct hfi1_pportdata
*ppd
;
9243 struct hfi1_devdata
*dd
;
9245 qd
= container_of(work
, struct qsfp_data
, qsfp_work
);
9250 if (!qsfp_mod_present(ppd
))
9254 * Turn DC back on after cables has been
9255 * re-inserted. Up until now, the DC has been in
9256 * reset to save power.
9260 if (qd
->cache_refresh_required
) {
9262 set_qsfp_int_n(ppd
, 0);
9264 wait_for_qsfp_init(ppd
);
9267 * Allow INT_N to trigger the QSFP interrupt to watch
9268 * for alarms and warnings
9270 set_qsfp_int_n(ppd
, 1);
9277 if (qd
->check_interrupt_flags
) {
9278 u8 qsfp_interrupt_status
[16] = {0,};
9280 if (qsfp_read(ppd
, dd
->hfi1_id
, 6,
9281 &qsfp_interrupt_status
[0], 16) != 16) {
9283 "%s: Failed to read status of QSFP module\n",
9286 unsigned long flags
;
9288 handle_qsfp_error_conditions(
9289 ppd
, qsfp_interrupt_status
);
9290 spin_lock_irqsave(&ppd
->qsfp_info
.qsfp_lock
, flags
);
9291 ppd
->qsfp_info
.check_interrupt_flags
= 0;
9292 spin_unlock_irqrestore(&ppd
->qsfp_info
.qsfp_lock
,
9298 static void init_qsfp_int(struct hfi1_devdata
*dd
)
9300 struct hfi1_pportdata
*ppd
= dd
->pport
;
9301 u64 qsfp_mask
, cce_int_mask
;
9302 const int qsfp1_int_smask
= QSFP1_INT
% 64;
9303 const int qsfp2_int_smask
= QSFP2_INT
% 64;
9306 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9307 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9308 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9309 * the index of the appropriate CSR in the CCEIntMask CSR array
9311 cce_int_mask
= read_csr(dd
, CCE_INT_MASK
+
9312 (8 * (QSFP1_INT
/ 64)));
9314 cce_int_mask
&= ~((u64
)1 << qsfp1_int_smask
);
9315 write_csr(dd
, CCE_INT_MASK
+ (8 * (QSFP1_INT
/ 64)),
9318 cce_int_mask
&= ~((u64
)1 << qsfp2_int_smask
);
9319 write_csr(dd
, CCE_INT_MASK
+ (8 * (QSFP2_INT
/ 64)),
9323 qsfp_mask
= (u64
)(QSFP_HFI0_INT_N
| QSFP_HFI0_MODPRST_N
);
9324 /* Clear current status to avoid spurious interrupts */
9325 write_csr(dd
, dd
->hfi1_id
? ASIC_QSFP2_CLEAR
: ASIC_QSFP1_CLEAR
,
9327 write_csr(dd
, dd
->hfi1_id
? ASIC_QSFP2_MASK
: ASIC_QSFP1_MASK
,
9330 set_qsfp_int_n(ppd
, 0);
9332 /* Handle active low nature of INT_N and MODPRST_N pins */
9333 if (qsfp_mod_present(ppd
))
9334 qsfp_mask
&= ~(u64
)QSFP_HFI0_MODPRST_N
;
9336 dd
->hfi1_id
? ASIC_QSFP2_INVERT
: ASIC_QSFP1_INVERT
,
9341 * Do a one-time initialize of the LCB block.
9343 static void init_lcb(struct hfi1_devdata
*dd
)
9345 /* simulator does not correctly handle LCB cclk loopback, skip */
9346 if (dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
)
9349 /* the DC has been reset earlier in the driver load */
9351 /* set LCB for cclk loopback on the port */
9352 write_csr(dd
, DC_LCB_CFG_TX_FIFOS_RESET
, 0x01);
9353 write_csr(dd
, DC_LCB_CFG_LANE_WIDTH
, 0x00);
9354 write_csr(dd
, DC_LCB_CFG_REINIT_AS_SLAVE
, 0x00);
9355 write_csr(dd
, DC_LCB_CFG_CNT_FOR_SKIP_STALL
, 0x110);
9356 write_csr(dd
, DC_LCB_CFG_CLK_CNTR
, 0x08);
9357 write_csr(dd
, DC_LCB_CFG_LOOPBACK
, 0x02);
9358 write_csr(dd
, DC_LCB_CFG_TX_FIFOS_RESET
, 0x00);
9361 int bringup_serdes(struct hfi1_pportdata
*ppd
)
9363 struct hfi1_devdata
*dd
= ppd
->dd
;
9367 if (HFI1_CAP_IS_KSET(EXTENDED_PSN
))
9368 add_rcvctrl(dd
, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK
);
9373 guid
= dd
->base_guid
+ ppd
->port
- 1;
9377 /* Set linkinit_reason on power up per OPA spec */
9378 ppd
->linkinit_reason
= OPA_LINKINIT_REASON_LINKUP
;
9380 /* one-time init of the LCB */
9384 ret
= init_loopback(dd
);
9389 /* tune the SERDES to a ballpark setting for
9390 * optimal signal and bit error rate
9391 * Needs to be done before starting the link
9395 return start_link(ppd
);
9398 void hfi1_quiet_serdes(struct hfi1_pportdata
*ppd
)
9400 struct hfi1_devdata
*dd
= ppd
->dd
;
9403 * Shut down the link and keep it down. First turn off that the
9404 * driver wants to allow the link to be up (driver_link_ready).
9405 * Then make sure the link is not automatically restarted
9406 * (link_enabled). Cancel any pending restart. And finally
9409 ppd
->driver_link_ready
= 0;
9410 ppd
->link_enabled
= 0;
9412 ppd
->offline_disabled_reason
=
9413 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED
);
9414 set_link_down_reason(ppd
, OPA_LINKDOWN_REASON_SMA_DISABLED
, 0,
9415 OPA_LINKDOWN_REASON_SMA_DISABLED
);
9416 set_link_state(ppd
, HLS_DN_OFFLINE
);
9418 /* disable the port */
9419 clear_rcvctrl(dd
, RCV_CTRL_RCV_PORT_ENABLE_SMASK
);
9422 static inline int init_cpu_counters(struct hfi1_devdata
*dd
)
9424 struct hfi1_pportdata
*ppd
;
9427 ppd
= (struct hfi1_pportdata
*)(dd
+ 1);
9428 for (i
= 0; i
< dd
->num_pports
; i
++, ppd
++) {
9429 ppd
->ibport_data
.rvp
.rc_acks
= NULL
;
9430 ppd
->ibport_data
.rvp
.rc_qacks
= NULL
;
9431 ppd
->ibport_data
.rvp
.rc_acks
= alloc_percpu(u64
);
9432 ppd
->ibport_data
.rvp
.rc_qacks
= alloc_percpu(u64
);
9433 ppd
->ibport_data
.rvp
.rc_delayed_comp
= alloc_percpu(u64
);
9434 if (!ppd
->ibport_data
.rvp
.rc_acks
||
9435 !ppd
->ibport_data
.rvp
.rc_delayed_comp
||
9436 !ppd
->ibport_data
.rvp
.rc_qacks
)
9443 static const char * const pt_names
[] = {
9449 static const char *pt_name(u32 type
)
9451 return type
>= ARRAY_SIZE(pt_names
) ? "unknown" : pt_names
[type
];
9455 * index is the index into the receive array
9457 void hfi1_put_tid(struct hfi1_devdata
*dd
, u32 index
,
9458 u32 type
, unsigned long pa
, u16 order
)
9461 void __iomem
*base
= (dd
->rcvarray_wc
? dd
->rcvarray_wc
:
9462 (dd
->kregbase
+ RCV_ARRAY
));
9464 if (!(dd
->flags
& HFI1_PRESENT
))
9467 if (type
== PT_INVALID
) {
9469 } else if (type
> PT_INVALID
) {
9471 "unexpected receive array type %u for index %u, not handled\n",
9476 hfi1_cdbg(TID
, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9477 pt_name(type
), index
, pa
, (unsigned long)order
);
9479 #define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9480 reg
= RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9481 | (u64
)order
<< RCV_ARRAY_RT_BUF_SIZE_SHIFT
9482 | ((pa
>> RT_ADDR_SHIFT
) & RCV_ARRAY_RT_ADDR_MASK
)
9483 << RCV_ARRAY_RT_ADDR_SHIFT
;
9484 writeq(reg
, base
+ (index
* 8));
9486 if (type
== PT_EAGER
)
9488 * Eager entries are written one-by-one so we have to push them
9489 * after we write the entry.
9496 void hfi1_clear_tids(struct hfi1_ctxtdata
*rcd
)
9498 struct hfi1_devdata
*dd
= rcd
->dd
;
9501 /* this could be optimized */
9502 for (i
= rcd
->eager_base
; i
< rcd
->eager_base
+
9503 rcd
->egrbufs
.alloced
; i
++)
9504 hfi1_put_tid(dd
, i
, PT_INVALID
, 0, 0);
9506 for (i
= rcd
->expected_base
;
9507 i
< rcd
->expected_base
+ rcd
->expected_count
; i
++)
9508 hfi1_put_tid(dd
, i
, PT_INVALID
, 0, 0);
9511 int hfi1_get_base_kinfo(struct hfi1_ctxtdata
*rcd
,
9512 struct hfi1_ctxt_info
*kinfo
)
9514 kinfo
->runtime_flags
= (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT
) |
9515 HFI1_CAP_UGET(MASK
) | HFI1_CAP_KGET(K2U
);
9519 struct hfi1_message_header
*hfi1_get_msgheader(
9520 struct hfi1_devdata
*dd
, __le32
*rhf_addr
)
9522 u32 offset
= rhf_hdrq_offset(rhf_to_cpu(rhf_addr
));
9524 return (struct hfi1_message_header
*)
9525 (rhf_addr
- dd
->rhf_offset
+ offset
);
9528 static const char * const ib_cfg_name_strings
[] = {
9529 "HFI1_IB_CFG_LIDLMC",
9530 "HFI1_IB_CFG_LWID_DG_ENB",
9531 "HFI1_IB_CFG_LWID_ENB",
9533 "HFI1_IB_CFG_SPD_ENB",
9535 "HFI1_IB_CFG_RXPOL_ENB",
9536 "HFI1_IB_CFG_LREV_ENB",
9537 "HFI1_IB_CFG_LINKLATENCY",
9538 "HFI1_IB_CFG_HRTBT",
9539 "HFI1_IB_CFG_OP_VLS",
9540 "HFI1_IB_CFG_VL_HIGH_CAP",
9541 "HFI1_IB_CFG_VL_LOW_CAP",
9542 "HFI1_IB_CFG_OVERRUN_THRESH",
9543 "HFI1_IB_CFG_PHYERR_THRESH",
9544 "HFI1_IB_CFG_LINKDEFAULT",
9545 "HFI1_IB_CFG_PKEYS",
9547 "HFI1_IB_CFG_LSTATE",
9548 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9549 "HFI1_IB_CFG_PMA_TICKS",
9553 static const char *ib_cfg_name(int which
)
9555 if (which
< 0 || which
>= ARRAY_SIZE(ib_cfg_name_strings
))
9557 return ib_cfg_name_strings
[which
];
9560 int hfi1_get_ib_cfg(struct hfi1_pportdata
*ppd
, int which
)
9562 struct hfi1_devdata
*dd
= ppd
->dd
;
9566 case HFI1_IB_CFG_LWID_ENB
: /* allowed Link-width */
9567 val
= ppd
->link_width_enabled
;
9569 case HFI1_IB_CFG_LWID
: /* currently active Link-width */
9570 val
= ppd
->link_width_active
;
9572 case HFI1_IB_CFG_SPD_ENB
: /* allowed Link speeds */
9573 val
= ppd
->link_speed_enabled
;
9575 case HFI1_IB_CFG_SPD
: /* current Link speed */
9576 val
= ppd
->link_speed_active
;
9579 case HFI1_IB_CFG_RXPOL_ENB
: /* Auto-RX-polarity enable */
9580 case HFI1_IB_CFG_LREV_ENB
: /* Auto-Lane-reversal enable */
9581 case HFI1_IB_CFG_LINKLATENCY
:
9584 case HFI1_IB_CFG_OP_VLS
:
9585 val
= ppd
->vls_operational
;
9587 case HFI1_IB_CFG_VL_HIGH_CAP
: /* VL arb high priority table size */
9588 val
= VL_ARB_HIGH_PRIO_TABLE_SIZE
;
9590 case HFI1_IB_CFG_VL_LOW_CAP
: /* VL arb low priority table size */
9591 val
= VL_ARB_LOW_PRIO_TABLE_SIZE
;
9593 case HFI1_IB_CFG_OVERRUN_THRESH
: /* IB overrun threshold */
9594 val
= ppd
->overrun_threshold
;
9596 case HFI1_IB_CFG_PHYERR_THRESH
: /* IB PHY error threshold */
9597 val
= ppd
->phy_error_threshold
;
9599 case HFI1_IB_CFG_LINKDEFAULT
: /* IB link default (sleep/poll) */
9600 val
= dd
->link_default
;
9603 case HFI1_IB_CFG_HRTBT
: /* Heartbeat off/enable/auto */
9604 case HFI1_IB_CFG_PMA_TICKS
:
9607 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL
))
9610 "%s: which %s: not implemented\n",
9612 ib_cfg_name(which
));
9620 * The largest MAD packet size.
9622 #define MAX_MAD_PACKET 2048
9625 * Return the maximum header bytes that can go on the _wire_
9626 * for this device. This count includes the ICRC which is
9627 * not part of the packet held in memory but it is appended
9629 * This is dependent on the device's receive header entry size.
9630 * HFI allows this to be set per-receive context, but the
9631 * driver presently enforces a global value.
9633 u32
lrh_max_header_bytes(struct hfi1_devdata
*dd
)
9636 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9637 * the Receive Header Entry Size minus the PBC (or RHF) size
9638 * plus one DW for the ICRC appended by HW.
9640 * dd->rcd[0].rcvhdrqentsize is in DW.
9641 * We use rcd[0] as all context will have the same value. Also,
9642 * the first kernel context would have been allocated by now so
9643 * we are guaranteed a valid value.
9645 return (dd
->rcd
[0]->rcvhdrqentsize
- 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9650 * @ppd - per port data
9652 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9653 * registers compare against LRH.PktLen, so use the max bytes included
9656 * This routine changes all VL values except VL15, which it maintains at
9659 static void set_send_length(struct hfi1_pportdata
*ppd
)
9661 struct hfi1_devdata
*dd
= ppd
->dd
;
9662 u32 max_hb
= lrh_max_header_bytes(dd
), dcmtu
;
9663 u32 maxvlmtu
= dd
->vld
[15].mtu
;
9664 u64 len1
= 0, len2
= (((dd
->vld
[15].mtu
+ max_hb
) >> 2)
9665 & SEND_LEN_CHECK1_LEN_VL15_MASK
) <<
9666 SEND_LEN_CHECK1_LEN_VL15_SHIFT
;
9669 for (i
= 0; i
< ppd
->vls_supported
; i
++) {
9670 if (dd
->vld
[i
].mtu
> maxvlmtu
)
9671 maxvlmtu
= dd
->vld
[i
].mtu
;
9673 len1
|= (((dd
->vld
[i
].mtu
+ max_hb
) >> 2)
9674 & SEND_LEN_CHECK0_LEN_VL0_MASK
) <<
9675 ((i
% 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT
);
9677 len2
|= (((dd
->vld
[i
].mtu
+ max_hb
) >> 2)
9678 & SEND_LEN_CHECK1_LEN_VL4_MASK
) <<
9679 ((i
% 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT
);
9681 write_csr(dd
, SEND_LEN_CHECK0
, len1
);
9682 write_csr(dd
, SEND_LEN_CHECK1
, len2
);
9683 /* adjust kernel credit return thresholds based on new MTUs */
9684 /* all kernel receive contexts have the same hdrqentsize */
9685 for (i
= 0; i
< ppd
->vls_supported
; i
++) {
9686 sc_set_cr_threshold(dd
->vld
[i
].sc
,
9687 sc_mtu_to_threshold(dd
->vld
[i
].sc
, dd
->vld
[i
].mtu
,
9688 dd
->rcd
[0]->rcvhdrqentsize
));
9690 sc_set_cr_threshold(dd
->vld
[15].sc
,
9691 sc_mtu_to_threshold(dd
->vld
[15].sc
, dd
->vld
[15].mtu
,
9692 dd
->rcd
[0]->rcvhdrqentsize
));
9694 /* Adjust maximum MTU for the port in DC */
9695 dcmtu
= maxvlmtu
== 10240 ? DCC_CFG_PORT_MTU_CAP_10240
:
9696 (ilog2(maxvlmtu
>> 8) + 1);
9697 len1
= read_csr(ppd
->dd
, DCC_CFG_PORT_CONFIG
);
9698 len1
&= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK
;
9699 len1
|= ((u64
)dcmtu
& DCC_CFG_PORT_CONFIG_MTU_CAP_MASK
) <<
9700 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT
;
9701 write_csr(ppd
->dd
, DCC_CFG_PORT_CONFIG
, len1
);
9704 static void set_lidlmc(struct hfi1_pportdata
*ppd
)
9708 struct hfi1_devdata
*dd
= ppd
->dd
;
9709 u32 mask
= ~((1U << ppd
->lmc
) - 1);
9710 u64 c1
= read_csr(ppd
->dd
, DCC_CFG_PORT_CONFIG1
);
9712 if (dd
->hfi1_snoop
.mode_flag
)
9713 dd_dev_info(dd
, "Set lid/lmc while snooping");
9715 c1
&= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9716 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK
);
9717 c1
|= ((ppd
->lid
& DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK
)
9718 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT
)|
9719 ((mask
& DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK
)
9720 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT
);
9721 write_csr(ppd
->dd
, DCC_CFG_PORT_CONFIG1
, c1
);
9724 * Iterate over all the send contexts and set their SLID check
9726 sreg
= ((mask
& SEND_CTXT_CHECK_SLID_MASK_MASK
) <<
9727 SEND_CTXT_CHECK_SLID_MASK_SHIFT
) |
9728 (((ppd
->lid
& mask
) & SEND_CTXT_CHECK_SLID_VALUE_MASK
) <<
9729 SEND_CTXT_CHECK_SLID_VALUE_SHIFT
);
9731 for (i
= 0; i
< dd
->chip_send_contexts
; i
++) {
9732 hfi1_cdbg(LINKVERB
, "SendContext[%d].SLID_CHECK = 0x%x",
9734 write_kctxt_csr(dd
, i
, SEND_CTXT_CHECK_SLID
, sreg
);
9737 /* Now we have to do the same thing for the sdma engines */
9738 sdma_update_lmc(dd
, mask
, ppd
->lid
);
9741 static int wait_phy_linkstate(struct hfi1_devdata
*dd
, u32 state
, u32 msecs
)
9743 unsigned long timeout
;
9746 timeout
= jiffies
+ msecs_to_jiffies(msecs
);
9748 curr_state
= read_physical_state(dd
);
9749 if (curr_state
== state
)
9751 if (time_after(jiffies
, timeout
)) {
9753 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9757 usleep_range(1950, 2050); /* sleep 2ms-ish */
9764 * Helper for set_link_state(). Do not call except from that routine.
9765 * Expects ppd->hls_mutex to be held.
9767 * @rem_reason value to be sent to the neighbor
9769 * LinkDownReasons only set if transition succeeds.
9771 static int goto_offline(struct hfi1_pportdata
*ppd
, u8 rem_reason
)
9773 struct hfi1_devdata
*dd
= ppd
->dd
;
9774 u32 pstate
, previous_state
;
9775 u32 last_local_state
;
9776 u32 last_remote_state
;
9781 previous_state
= ppd
->host_link_state
;
9782 ppd
->host_link_state
= HLS_GOING_OFFLINE
;
9783 pstate
= read_physical_state(dd
);
9784 if (pstate
== PLS_OFFLINE
) {
9785 do_transition
= 0; /* in right state */
9786 do_wait
= 0; /* ...no need to wait */
9787 } else if ((pstate
& 0xff) == PLS_OFFLINE
) {
9788 do_transition
= 0; /* in an offline transient state */
9789 do_wait
= 1; /* ...wait for it to settle */
9791 do_transition
= 1; /* need to move to offline */
9792 do_wait
= 1; /* ...will need to wait */
9795 if (do_transition
) {
9796 ret
= set_physical_link_state(dd
,
9797 PLS_OFFLINE
| (rem_reason
<< 8));
9799 if (ret
!= HCMD_SUCCESS
) {
9801 "Failed to transition to Offline link state, return %d\n",
9805 if (ppd
->offline_disabled_reason
==
9806 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE
))
9807 ppd
->offline_disabled_reason
=
9808 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT
);
9812 /* it can take a while for the link to go down */
9813 ret
= wait_phy_linkstate(dd
, PLS_OFFLINE
, 10000);
9818 /* make sure the logical state is also down */
9819 wait_logical_linkstate(ppd
, IB_PORT_DOWN
, 1000);
9822 * Now in charge of LCB - must be after the physical state is
9823 * offline.quiet and before host_link_state is changed.
9825 set_host_lcb_access(dd
);
9826 write_csr(dd
, DC_LCB_ERR_EN
, ~0ull); /* watch LCB errors */
9827 ppd
->host_link_state
= HLS_LINK_COOLDOWN
; /* LCB access allowed */
9829 if (ppd
->port_type
== PORT_TYPE_QSFP
&&
9830 ppd
->qsfp_info
.limiting_active
&&
9831 qsfp_mod_present(ppd
)) {
9832 set_qsfp_tx(ppd
, 0);
9836 * The LNI has a mandatory wait time after the physical state
9837 * moves to Offline.Quiet. The wait time may be different
9838 * depending on how the link went down. The 8051 firmware
9839 * will observe the needed wait time and only move to ready
9840 * when that is completed. The largest of the quiet timeouts
9841 * is 6s, so wait that long and then at least 0.5s more for
9842 * other transitions, and another 0.5s for a buffer.
9844 ret
= wait_fm_ready(dd
, 7000);
9847 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
9848 /* state is really offline, so make it so */
9849 ppd
->host_link_state
= HLS_DN_OFFLINE
;
9854 * The state is now offline and the 8051 is ready to accept host
9856 * - change our state
9857 * - notify others if we were previously in a linkup state
9859 ppd
->host_link_state
= HLS_DN_OFFLINE
;
9860 if (previous_state
& HLS_UP
) {
9861 /* went down while link was up */
9862 handle_linkup_change(dd
, 0);
9863 } else if (previous_state
9864 & (HLS_DN_POLL
| HLS_VERIFY_CAP
| HLS_GOING_UP
)) {
9865 /* went down while attempting link up */
9866 /* byte 1 of last_*_state is the failure reason */
9867 read_last_local_state(dd
, &last_local_state
);
9868 read_last_remote_state(dd
, &last_remote_state
);
9870 "LNI failure last states: local 0x%08x, remote 0x%08x\n",
9871 last_local_state
, last_remote_state
);
9874 /* the active link width (downgrade) is 0 on link down */
9875 ppd
->link_width_active
= 0;
9876 ppd
->link_width_downgrade_tx_active
= 0;
9877 ppd
->link_width_downgrade_rx_active
= 0;
9878 ppd
->current_egress_rate
= 0;
9882 /* return the link state name */
9883 static const char *link_state_name(u32 state
)
9886 int n
= ilog2(state
);
9887 static const char * const names
[] = {
9888 [__HLS_UP_INIT_BP
] = "INIT",
9889 [__HLS_UP_ARMED_BP
] = "ARMED",
9890 [__HLS_UP_ACTIVE_BP
] = "ACTIVE",
9891 [__HLS_DN_DOWNDEF_BP
] = "DOWNDEF",
9892 [__HLS_DN_POLL_BP
] = "POLL",
9893 [__HLS_DN_DISABLE_BP
] = "DISABLE",
9894 [__HLS_DN_OFFLINE_BP
] = "OFFLINE",
9895 [__HLS_VERIFY_CAP_BP
] = "VERIFY_CAP",
9896 [__HLS_GOING_UP_BP
] = "GOING_UP",
9897 [__HLS_GOING_OFFLINE_BP
] = "GOING_OFFLINE",
9898 [__HLS_LINK_COOLDOWN_BP
] = "LINK_COOLDOWN"
9901 name
= n
< ARRAY_SIZE(names
) ? names
[n
] : NULL
;
9902 return name
? name
: "unknown";
9905 /* return the link state reason name */
9906 static const char *link_state_reason_name(struct hfi1_pportdata
*ppd
, u32 state
)
9908 if (state
== HLS_UP_INIT
) {
9909 switch (ppd
->linkinit_reason
) {
9910 case OPA_LINKINIT_REASON_LINKUP
:
9912 case OPA_LINKINIT_REASON_FLAPPING
:
9913 return "(FLAPPING)";
9914 case OPA_LINKINIT_OUTSIDE_POLICY
:
9915 return "(OUTSIDE_POLICY)";
9916 case OPA_LINKINIT_QUARANTINED
:
9917 return "(QUARANTINED)";
9918 case OPA_LINKINIT_INSUFIC_CAPABILITY
:
9919 return "(INSUFIC_CAPABILITY)";
9928 * driver_physical_state - convert the driver's notion of a port's
9929 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
9930 * Return -1 (converted to a u32) to indicate error.
9932 u32
driver_physical_state(struct hfi1_pportdata
*ppd
)
9934 switch (ppd
->host_link_state
) {
9938 return IB_PORTPHYSSTATE_LINKUP
;
9940 return IB_PORTPHYSSTATE_POLLING
;
9941 case HLS_DN_DISABLE
:
9942 return IB_PORTPHYSSTATE_DISABLED
;
9943 case HLS_DN_OFFLINE
:
9944 return OPA_PORTPHYSSTATE_OFFLINE
;
9945 case HLS_VERIFY_CAP
:
9946 return IB_PORTPHYSSTATE_POLLING
;
9948 return IB_PORTPHYSSTATE_POLLING
;
9949 case HLS_GOING_OFFLINE
:
9950 return OPA_PORTPHYSSTATE_OFFLINE
;
9951 case HLS_LINK_COOLDOWN
:
9952 return OPA_PORTPHYSSTATE_OFFLINE
;
9953 case HLS_DN_DOWNDEF
:
9955 dd_dev_err(ppd
->dd
, "invalid host_link_state 0x%x\n",
9956 ppd
->host_link_state
);
9962 * driver_logical_state - convert the driver's notion of a port's
9963 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
9964 * (converted to a u32) to indicate error.
9966 u32
driver_logical_state(struct hfi1_pportdata
*ppd
)
9968 if (ppd
->host_link_state
&& !(ppd
->host_link_state
& HLS_UP
))
9969 return IB_PORT_DOWN
;
9971 switch (ppd
->host_link_state
& HLS_UP
) {
9973 return IB_PORT_INIT
;
9975 return IB_PORT_ARMED
;
9977 return IB_PORT_ACTIVE
;
9979 dd_dev_err(ppd
->dd
, "invalid host_link_state 0x%x\n",
9980 ppd
->host_link_state
);
9985 void set_link_down_reason(struct hfi1_pportdata
*ppd
, u8 lcl_reason
,
9986 u8 neigh_reason
, u8 rem_reason
)
9988 if (ppd
->local_link_down_reason
.latest
== 0 &&
9989 ppd
->neigh_link_down_reason
.latest
== 0) {
9990 ppd
->local_link_down_reason
.latest
= lcl_reason
;
9991 ppd
->neigh_link_down_reason
.latest
= neigh_reason
;
9992 ppd
->remote_link_down_reason
= rem_reason
;
9997 * Change the physical and/or logical link state.
9999 * Do not call this routine while inside an interrupt. It contains
10000 * calls to routines that can take multiple seconds to finish.
10002 * Returns 0 on success, -errno on failure.
10004 int set_link_state(struct hfi1_pportdata
*ppd
, u32 state
)
10006 struct hfi1_devdata
*dd
= ppd
->dd
;
10007 struct ib_event event
= {.device
= NULL
};
10009 int was_up
, is_down
;
10010 int orig_new_state
, poll_bounce
;
10012 mutex_lock(&ppd
->hls_lock
);
10014 orig_new_state
= state
;
10015 if (state
== HLS_DN_DOWNDEF
)
10016 state
= dd
->link_default
;
10018 /* interpret poll -> poll as a link bounce */
10019 poll_bounce
= ppd
->host_link_state
== HLS_DN_POLL
10020 && state
== HLS_DN_POLL
;
10022 dd_dev_info(dd
, "%s: current %s, new %s %s%s\n", __func__
,
10023 link_state_name(ppd
->host_link_state
),
10024 link_state_name(orig_new_state
),
10025 poll_bounce
? "(bounce) " : "",
10026 link_state_reason_name(ppd
, state
));
10028 was_up
= !!(ppd
->host_link_state
& HLS_UP
);
10031 * If we're going to a (HLS_*) link state that implies the logical
10032 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10033 * reset is_sm_config_started to 0.
10035 if (!(state
& (HLS_UP_ARMED
| HLS_UP_ACTIVE
)))
10036 ppd
->is_sm_config_started
= 0;
10039 * Do nothing if the states match. Let a poll to poll link bounce
10042 if (ppd
->host_link_state
== state
&& !poll_bounce
)
10047 if (ppd
->host_link_state
== HLS_DN_POLL
&& (quick_linkup
10048 || dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
)) {
10050 * Quick link up jumps from polling to here.
10052 * Whether in normal or loopback mode, the
10053 * simulator jumps from polling to link up.
10054 * Accept that here.
10057 } else if (ppd
->host_link_state
!= HLS_GOING_UP
) {
10061 ppd
->host_link_state
= HLS_UP_INIT
;
10062 ret
= wait_logical_linkstate(ppd
, IB_PORT_INIT
, 1000);
10064 /* logical state didn't change, stay at going_up */
10065 ppd
->host_link_state
= HLS_GOING_UP
;
10067 "%s: logical state did not change to INIT\n",
10070 /* clear old transient LINKINIT_REASON code */
10071 if (ppd
->linkinit_reason
>= OPA_LINKINIT_REASON_CLEAR
)
10072 ppd
->linkinit_reason
=
10073 OPA_LINKINIT_REASON_LINKUP
;
10075 /* enable the port */
10076 add_rcvctrl(dd
, RCV_CTRL_RCV_PORT_ENABLE_SMASK
);
10078 handle_linkup_change(dd
, 1);
10082 if (ppd
->host_link_state
!= HLS_UP_INIT
)
10085 ppd
->host_link_state
= HLS_UP_ARMED
;
10086 set_logical_state(dd
, LSTATE_ARMED
);
10087 ret
= wait_logical_linkstate(ppd
, IB_PORT_ARMED
, 1000);
10089 /* logical state didn't change, stay at init */
10090 ppd
->host_link_state
= HLS_UP_INIT
;
10092 "%s: logical state did not change to ARMED\n",
10096 * The simulator does not currently implement SMA messages,
10097 * so neighbor_normal is not set. Set it here when we first
10100 if (dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
)
10101 ppd
->neighbor_normal
= 1;
10103 case HLS_UP_ACTIVE
:
10104 if (ppd
->host_link_state
!= HLS_UP_ARMED
)
10107 ppd
->host_link_state
= HLS_UP_ACTIVE
;
10108 set_logical_state(dd
, LSTATE_ACTIVE
);
10109 ret
= wait_logical_linkstate(ppd
, IB_PORT_ACTIVE
, 1000);
10111 /* logical state didn't change, stay at armed */
10112 ppd
->host_link_state
= HLS_UP_ARMED
;
10114 "%s: logical state did not change to ACTIVE\n",
10118 /* tell all engines to go running */
10119 sdma_all_running(dd
);
10121 /* Signal the IB layer that the port has went active */
10122 event
.device
= &dd
->verbs_dev
.rdi
.ibdev
;
10123 event
.element
.port_num
= ppd
->port
;
10124 event
.event
= IB_EVENT_PORT_ACTIVE
;
10128 if ((ppd
->host_link_state
== HLS_DN_DISABLE
||
10129 ppd
->host_link_state
== HLS_DN_OFFLINE
) &&
10132 /* Hand LED control to the DC */
10133 write_csr(dd
, DCC_CFG_LED_CNTRL
, 0);
10135 if (ppd
->host_link_state
!= HLS_DN_OFFLINE
) {
10136 u8 tmp
= ppd
->link_enabled
;
10138 ret
= goto_offline(ppd
, ppd
->remote_link_down_reason
);
10140 ppd
->link_enabled
= tmp
;
10143 ppd
->remote_link_down_reason
= 0;
10145 if (ppd
->driver_link_ready
)
10146 ppd
->link_enabled
= 1;
10149 set_all_slowpath(ppd
->dd
);
10150 ret
= set_local_link_attributes(ppd
);
10154 ppd
->port_error_action
= 0;
10155 ppd
->host_link_state
= HLS_DN_POLL
;
10157 if (quick_linkup
) {
10158 /* quick linkup does not go into polling */
10159 ret
= do_quick_linkup(dd
);
10161 ret1
= set_physical_link_state(dd
, PLS_POLLING
);
10162 if (ret1
!= HCMD_SUCCESS
) {
10164 "Failed to transition to Polling link state, return 0x%x\n",
10169 ppd
->offline_disabled_reason
=
10170 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE
);
10172 * If an error occurred above, go back to offline. The
10173 * caller may reschedule another attempt.
10176 goto_offline(ppd
, 0);
10178 case HLS_DN_DISABLE
:
10179 /* link is disabled */
10180 ppd
->link_enabled
= 0;
10182 /* allow any state to transition to disabled */
10184 /* must transition to offline first */
10185 if (ppd
->host_link_state
!= HLS_DN_OFFLINE
) {
10186 ret
= goto_offline(ppd
, ppd
->remote_link_down_reason
);
10189 ppd
->remote_link_down_reason
= 0;
10192 ret1
= set_physical_link_state(dd
, PLS_DISABLED
);
10193 if (ret1
!= HCMD_SUCCESS
) {
10195 "Failed to transition to Disabled link state, return 0x%x\n",
10200 ppd
->host_link_state
= HLS_DN_DISABLE
;
10203 case HLS_DN_OFFLINE
:
10204 if (ppd
->host_link_state
== HLS_DN_DISABLE
)
10207 /* allow any state to transition to offline */
10208 ret
= goto_offline(ppd
, ppd
->remote_link_down_reason
);
10210 ppd
->remote_link_down_reason
= 0;
10212 case HLS_VERIFY_CAP
:
10213 if (ppd
->host_link_state
!= HLS_DN_POLL
)
10215 ppd
->host_link_state
= HLS_VERIFY_CAP
;
10218 if (ppd
->host_link_state
!= HLS_VERIFY_CAP
)
10221 ret1
= set_physical_link_state(dd
, PLS_LINKUP
);
10222 if (ret1
!= HCMD_SUCCESS
) {
10224 "Failed to transition to link up state, return 0x%x\n",
10229 ppd
->host_link_state
= HLS_GOING_UP
;
10232 case HLS_GOING_OFFLINE
: /* transient within goto_offline() */
10233 case HLS_LINK_COOLDOWN
: /* transient within goto_offline() */
10235 dd_dev_info(dd
, "%s: state 0x%x: not supported\n",
10241 is_down
= !!(ppd
->host_link_state
& (HLS_DN_POLL
|
10242 HLS_DN_DISABLE
| HLS_DN_OFFLINE
));
10244 if (was_up
&& is_down
&& ppd
->local_link_down_reason
.sma
== 0 &&
10245 ppd
->neigh_link_down_reason
.sma
== 0) {
10246 ppd
->local_link_down_reason
.sma
=
10247 ppd
->local_link_down_reason
.latest
;
10248 ppd
->neigh_link_down_reason
.sma
=
10249 ppd
->neigh_link_down_reason
.latest
;
10255 dd_dev_err(dd
, "%s: unexpected state transition from %s to %s\n",
10256 __func__
, link_state_name(ppd
->host_link_state
),
10257 link_state_name(state
));
10261 mutex_unlock(&ppd
->hls_lock
);
10264 ib_dispatch_event(&event
);
10269 int hfi1_set_ib_cfg(struct hfi1_pportdata
*ppd
, int which
, u32 val
)
10275 case HFI1_IB_CFG_LIDLMC
:
10278 case HFI1_IB_CFG_VL_HIGH_LIMIT
:
10280 * The VL Arbitrator high limit is sent in units of 4k
10281 * bytes, while HFI stores it in units of 64 bytes.
10284 reg
= ((u64
)val
& SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK
)
10285 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT
;
10286 write_csr(ppd
->dd
, SEND_HIGH_PRIORITY_LIMIT
, reg
);
10288 case HFI1_IB_CFG_LINKDEFAULT
: /* IB link default (sleep/poll) */
10289 /* HFI only supports POLL as the default link down state */
10290 if (val
!= HLS_DN_POLL
)
10293 case HFI1_IB_CFG_OP_VLS
:
10294 if (ppd
->vls_operational
!= val
) {
10295 ppd
->vls_operational
= val
;
10299 ret
= sdma_map_init(
10307 * For link width, link width downgrade, and speed enable, always AND
10308 * the setting with what is actually supported. This has two benefits.
10309 * First, enabled can't have unsupported values, no matter what the
10310 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10311 * "fill in with your supported value" have all the bits in the
10312 * field set, so simply ANDing with supported has the desired result.
10314 case HFI1_IB_CFG_LWID_ENB
: /* set allowed Link-width */
10315 ppd
->link_width_enabled
= val
& ppd
->link_width_supported
;
10317 case HFI1_IB_CFG_LWID_DG_ENB
: /* set allowed link width downgrade */
10318 ppd
->link_width_downgrade_enabled
=
10319 val
& ppd
->link_width_downgrade_supported
;
10321 case HFI1_IB_CFG_SPD_ENB
: /* allowed Link speeds */
10322 ppd
->link_speed_enabled
= val
& ppd
->link_speed_supported
;
10324 case HFI1_IB_CFG_OVERRUN_THRESH
: /* IB overrun threshold */
10326 * HFI does not follow IB specs, save this value
10327 * so we can report it, if asked.
10329 ppd
->overrun_threshold
= val
;
10331 case HFI1_IB_CFG_PHYERR_THRESH
: /* IB PHY error threshold */
10333 * HFI does not follow IB specs, save this value
10334 * so we can report it, if asked.
10336 ppd
->phy_error_threshold
= val
;
10339 case HFI1_IB_CFG_MTU
:
10340 set_send_length(ppd
);
10343 case HFI1_IB_CFG_PKEYS
:
10344 if (HFI1_CAP_IS_KSET(PKEY_CHECK
))
10345 set_partition_keys(ppd
);
10349 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL
))
10350 dd_dev_info(ppd
->dd
,
10351 "%s: which %s, val 0x%x: not implemented\n",
10352 __func__
, ib_cfg_name(which
), val
);
10358 /* begin functions related to vl arbitration table caching */
10359 static void init_vl_arb_caches(struct hfi1_pportdata
*ppd
)
10363 BUILD_BUG_ON(VL_ARB_TABLE_SIZE
!=
10364 VL_ARB_LOW_PRIO_TABLE_SIZE
);
10365 BUILD_BUG_ON(VL_ARB_TABLE_SIZE
!=
10366 VL_ARB_HIGH_PRIO_TABLE_SIZE
);
10369 * Note that we always return values directly from the
10370 * 'vl_arb_cache' (and do no CSR reads) in response to a
10371 * 'Get(VLArbTable)'. This is obviously correct after a
10372 * 'Set(VLArbTable)', since the cache will then be up to
10373 * date. But it's also correct prior to any 'Set(VLArbTable)'
10374 * since then both the cache, and the relevant h/w registers
10378 for (i
= 0; i
< MAX_PRIO_TABLE
; i
++)
10379 spin_lock_init(&ppd
->vl_arb_cache
[i
].lock
);
10383 * vl_arb_lock_cache
10385 * All other vl_arb_* functions should be called only after locking
10388 static inline struct vl_arb_cache
*
10389 vl_arb_lock_cache(struct hfi1_pportdata
*ppd
, int idx
)
10391 if (idx
!= LO_PRIO_TABLE
&& idx
!= HI_PRIO_TABLE
)
10393 spin_lock(&ppd
->vl_arb_cache
[idx
].lock
);
10394 return &ppd
->vl_arb_cache
[idx
];
10397 static inline void vl_arb_unlock_cache(struct hfi1_pportdata
*ppd
, int idx
)
10399 spin_unlock(&ppd
->vl_arb_cache
[idx
].lock
);
10402 static void vl_arb_get_cache(struct vl_arb_cache
*cache
,
10403 struct ib_vl_weight_elem
*vl
)
10405 memcpy(vl
, cache
->table
, VL_ARB_TABLE_SIZE
* sizeof(*vl
));
10408 static void vl_arb_set_cache(struct vl_arb_cache
*cache
,
10409 struct ib_vl_weight_elem
*vl
)
10411 memcpy(cache
->table
, vl
, VL_ARB_TABLE_SIZE
* sizeof(*vl
));
10414 static int vl_arb_match_cache(struct vl_arb_cache
*cache
,
10415 struct ib_vl_weight_elem
*vl
)
10417 return !memcmp(cache
->table
, vl
, VL_ARB_TABLE_SIZE
* sizeof(*vl
));
10419 /* end functions related to vl arbitration table caching */
10421 static int set_vl_weights(struct hfi1_pportdata
*ppd
, u32 target
,
10422 u32 size
, struct ib_vl_weight_elem
*vl
)
10424 struct hfi1_devdata
*dd
= ppd
->dd
;
10426 unsigned int i
, is_up
= 0;
10427 int drain
, ret
= 0;
10429 mutex_lock(&ppd
->hls_lock
);
10431 if (ppd
->host_link_state
& HLS_UP
)
10434 drain
= !is_ax(dd
) && is_up
;
10438 * Before adjusting VL arbitration weights, empty per-VL
10439 * FIFOs, otherwise a packet whose VL weight is being
10440 * set to 0 could get stuck in a FIFO with no chance to
10443 ret
= stop_drain_data_vls(dd
);
10448 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10453 for (i
= 0; i
< size
; i
++, vl
++) {
10455 * NOTE: The low priority shift and mask are used here, but
10456 * they are the same for both the low and high registers.
10458 reg
= (((u64
)vl
->vl
& SEND_LOW_PRIORITY_LIST_VL_MASK
)
10459 << SEND_LOW_PRIORITY_LIST_VL_SHIFT
)
10460 | (((u64
)vl
->weight
10461 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK
)
10462 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT
);
10463 write_csr(dd
, target
+ (i
* 8), reg
);
10465 pio_send_control(dd
, PSC_GLOBAL_VLARB_ENABLE
);
10468 open_fill_data_vls(dd
); /* reopen all VLs */
10471 mutex_unlock(&ppd
->hls_lock
);
10477 * Read one credit merge VL register.
10479 static void read_one_cm_vl(struct hfi1_devdata
*dd
, u32 csr
,
10480 struct vl_limit
*vll
)
10482 u64 reg
= read_csr(dd
, csr
);
10484 vll
->dedicated
= cpu_to_be16(
10485 (reg
>> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT
)
10486 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK
);
10487 vll
->shared
= cpu_to_be16(
10488 (reg
>> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT
)
10489 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK
);
10493 * Read the current credit merge limits.
10495 static int get_buffer_control(struct hfi1_devdata
*dd
,
10496 struct buffer_control
*bc
, u16
*overall_limit
)
10501 /* not all entries are filled in */
10502 memset(bc
, 0, sizeof(*bc
));
10504 /* OPA and HFI have a 1-1 mapping */
10505 for (i
= 0; i
< TXE_NUM_DATA_VL
; i
++)
10506 read_one_cm_vl(dd
, SEND_CM_CREDIT_VL
+ (8*i
), &bc
->vl
[i
]);
10508 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10509 read_one_cm_vl(dd
, SEND_CM_CREDIT_VL15
, &bc
->vl
[15]);
10511 reg
= read_csr(dd
, SEND_CM_GLOBAL_CREDIT
);
10512 bc
->overall_shared_limit
= cpu_to_be16(
10513 (reg
>> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT
)
10514 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK
);
10516 *overall_limit
= (reg
10517 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT
)
10518 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK
;
10519 return sizeof(struct buffer_control
);
10522 static int get_sc2vlnt(struct hfi1_devdata
*dd
, struct sc2vlnt
*dp
)
10527 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10528 reg
= read_csr(dd
, DCC_CFG_SC_VL_TABLE_15_0
);
10529 for (i
= 0; i
< sizeof(u64
); i
++) {
10530 u8 byte
= *(((u8
*)®
) + i
);
10532 dp
->vlnt
[2 * i
] = byte
& 0xf;
10533 dp
->vlnt
[(2 * i
) + 1] = (byte
& 0xf0) >> 4;
10536 reg
= read_csr(dd
, DCC_CFG_SC_VL_TABLE_31_16
);
10537 for (i
= 0; i
< sizeof(u64
); i
++) {
10538 u8 byte
= *(((u8
*)®
) + i
);
10540 dp
->vlnt
[16 + (2 * i
)] = byte
& 0xf;
10541 dp
->vlnt
[16 + (2 * i
) + 1] = (byte
& 0xf0) >> 4;
10543 return sizeof(struct sc2vlnt
);
10546 static void get_vlarb_preempt(struct hfi1_devdata
*dd
, u32 nelems
,
10547 struct ib_vl_weight_elem
*vl
)
10551 for (i
= 0; i
< nelems
; i
++, vl
++) {
10557 static void set_sc2vlnt(struct hfi1_devdata
*dd
, struct sc2vlnt
*dp
)
10559 write_csr(dd
, DCC_CFG_SC_VL_TABLE_15_0
,
10561 0, dp
->vlnt
[0] & 0xf,
10562 1, dp
->vlnt
[1] & 0xf,
10563 2, dp
->vlnt
[2] & 0xf,
10564 3, dp
->vlnt
[3] & 0xf,
10565 4, dp
->vlnt
[4] & 0xf,
10566 5, dp
->vlnt
[5] & 0xf,
10567 6, dp
->vlnt
[6] & 0xf,
10568 7, dp
->vlnt
[7] & 0xf,
10569 8, dp
->vlnt
[8] & 0xf,
10570 9, dp
->vlnt
[9] & 0xf,
10571 10, dp
->vlnt
[10] & 0xf,
10572 11, dp
->vlnt
[11] & 0xf,
10573 12, dp
->vlnt
[12] & 0xf,
10574 13, dp
->vlnt
[13] & 0xf,
10575 14, dp
->vlnt
[14] & 0xf,
10576 15, dp
->vlnt
[15] & 0xf));
10577 write_csr(dd
, DCC_CFG_SC_VL_TABLE_31_16
,
10578 DC_SC_VL_VAL(31_16
,
10579 16, dp
->vlnt
[16] & 0xf,
10580 17, dp
->vlnt
[17] & 0xf,
10581 18, dp
->vlnt
[18] & 0xf,
10582 19, dp
->vlnt
[19] & 0xf,
10583 20, dp
->vlnt
[20] & 0xf,
10584 21, dp
->vlnt
[21] & 0xf,
10585 22, dp
->vlnt
[22] & 0xf,
10586 23, dp
->vlnt
[23] & 0xf,
10587 24, dp
->vlnt
[24] & 0xf,
10588 25, dp
->vlnt
[25] & 0xf,
10589 26, dp
->vlnt
[26] & 0xf,
10590 27, dp
->vlnt
[27] & 0xf,
10591 28, dp
->vlnt
[28] & 0xf,
10592 29, dp
->vlnt
[29] & 0xf,
10593 30, dp
->vlnt
[30] & 0xf,
10594 31, dp
->vlnt
[31] & 0xf));
10597 static void nonzero_msg(struct hfi1_devdata
*dd
, int idx
, const char *what
,
10601 dd_dev_info(dd
, "Invalid %s limit %d on VL %d, ignoring\n",
10602 what
, (int)limit
, idx
);
10605 /* change only the shared limit portion of SendCmGLobalCredit */
10606 static void set_global_shared(struct hfi1_devdata
*dd
, u16 limit
)
10610 reg
= read_csr(dd
, SEND_CM_GLOBAL_CREDIT
);
10611 reg
&= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK
;
10612 reg
|= (u64
)limit
<< SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT
;
10613 write_csr(dd
, SEND_CM_GLOBAL_CREDIT
, reg
);
10616 /* change only the total credit limit portion of SendCmGLobalCredit */
10617 static void set_global_limit(struct hfi1_devdata
*dd
, u16 limit
)
10621 reg
= read_csr(dd
, SEND_CM_GLOBAL_CREDIT
);
10622 reg
&= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK
;
10623 reg
|= (u64
)limit
<< SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT
;
10624 write_csr(dd
, SEND_CM_GLOBAL_CREDIT
, reg
);
10627 /* set the given per-VL shared limit */
10628 static void set_vl_shared(struct hfi1_devdata
*dd
, int vl
, u16 limit
)
10633 if (vl
< TXE_NUM_DATA_VL
)
10634 addr
= SEND_CM_CREDIT_VL
+ (8 * vl
);
10636 addr
= SEND_CM_CREDIT_VL15
;
10638 reg
= read_csr(dd
, addr
);
10639 reg
&= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK
;
10640 reg
|= (u64
)limit
<< SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT
;
10641 write_csr(dd
, addr
, reg
);
10644 /* set the given per-VL dedicated limit */
10645 static void set_vl_dedicated(struct hfi1_devdata
*dd
, int vl
, u16 limit
)
10650 if (vl
< TXE_NUM_DATA_VL
)
10651 addr
= SEND_CM_CREDIT_VL
+ (8 * vl
);
10653 addr
= SEND_CM_CREDIT_VL15
;
10655 reg
= read_csr(dd
, addr
);
10656 reg
&= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK
;
10657 reg
|= (u64
)limit
<< SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT
;
10658 write_csr(dd
, addr
, reg
);
10661 /* spin until the given per-VL status mask bits clear */
10662 static void wait_for_vl_status_clear(struct hfi1_devdata
*dd
, u64 mask
,
10665 unsigned long timeout
;
10668 timeout
= jiffies
+ msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT
);
10670 reg
= read_csr(dd
, SEND_CM_CREDIT_USED_STATUS
) & mask
;
10673 return; /* success */
10674 if (time_after(jiffies
, timeout
))
10675 break; /* timed out */
10680 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10681 which
, VL_STATUS_CLEAR_TIMEOUT
, mask
, reg
);
10683 * If this occurs, it is likely there was a credit loss on the link.
10684 * The only recovery from that is a link bounce.
10687 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
10691 * The number of credits on the VLs may be changed while everything
10692 * is "live", but the following algorithm must be followed due to
10693 * how the hardware is actually implemented. In particular,
10694 * Return_Credit_Status[] is the only correct status check.
10696 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10697 * set Global_Shared_Credit_Limit = 0
10699 * mask0 = all VLs that are changing either dedicated or shared limits
10700 * set Shared_Limit[mask0] = 0
10701 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10702 * if (changing any dedicated limit)
10703 * mask1 = all VLs that are lowering dedicated limits
10704 * lower Dedicated_Limit[mask1]
10705 * spin until Return_Credit_Status[mask1] == 0
10706 * raise Dedicated_Limits
10707 * raise Shared_Limits
10708 * raise Global_Shared_Credit_Limit
10710 * lower = if the new limit is lower, set the limit to the new value
10711 * raise = if the new limit is higher than the current value (may be changed
10712 * earlier in the algorithm), set the new limit to the new value
10714 static int set_buffer_control(struct hfi1_devdata
*dd
,
10715 struct buffer_control
*new_bc
)
10717 u64 changing_mask
, ld_mask
, stat_mask
;
10719 int i
, use_all_mask
;
10720 int this_shared_changing
;
10722 * A0: add the variable any_shared_limit_changing below and in the
10723 * algorithm above. If removing A0 support, it can be removed.
10725 int any_shared_limit_changing
;
10726 struct buffer_control cur_bc
;
10727 u8 changing
[OPA_MAX_VLS
];
10728 u8 lowering_dedicated
[OPA_MAX_VLS
];
10731 const u64 all_mask
=
10732 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10733 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10734 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10735 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10736 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10737 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10738 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10739 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10740 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK
;
10742 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10743 #define NUM_USABLE_VLS 16 /* look at VL15 and less */
10746 /* find the new total credits, do sanity check on unused VLs */
10747 for (i
= 0; i
< OPA_MAX_VLS
; i
++) {
10749 new_total
+= be16_to_cpu(new_bc
->vl
[i
].dedicated
);
10752 nonzero_msg(dd
, i
, "dedicated",
10753 be16_to_cpu(new_bc
->vl
[i
].dedicated
));
10754 nonzero_msg(dd
, i
, "shared",
10755 be16_to_cpu(new_bc
->vl
[i
].shared
));
10756 new_bc
->vl
[i
].dedicated
= 0;
10757 new_bc
->vl
[i
].shared
= 0;
10759 new_total
+= be16_to_cpu(new_bc
->overall_shared_limit
);
10761 /* fetch the current values */
10762 get_buffer_control(dd
, &cur_bc
, &cur_total
);
10765 * Create the masks we will use.
10767 memset(changing
, 0, sizeof(changing
));
10768 memset(lowering_dedicated
, 0, sizeof(lowering_dedicated
));
10769 /* NOTE: Assumes that the individual VL bits are adjacent and in
10770 increasing order */
10772 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
;
10776 any_shared_limit_changing
= 0;
10777 for (i
= 0; i
< NUM_USABLE_VLS
; i
++, stat_mask
<<= 1) {
10780 this_shared_changing
= new_bc
->vl
[i
].shared
10781 != cur_bc
.vl
[i
].shared
;
10782 if (this_shared_changing
)
10783 any_shared_limit_changing
= 1;
10784 if (new_bc
->vl
[i
].dedicated
!= cur_bc
.vl
[i
].dedicated
10785 || this_shared_changing
) {
10787 changing_mask
|= stat_mask
;
10790 if (be16_to_cpu(new_bc
->vl
[i
].dedicated
) <
10791 be16_to_cpu(cur_bc
.vl
[i
].dedicated
)) {
10792 lowering_dedicated
[i
] = 1;
10793 ld_mask
|= stat_mask
;
10797 /* bracket the credit change with a total adjustment */
10798 if (new_total
> cur_total
)
10799 set_global_limit(dd
, new_total
);
10802 * Start the credit change algorithm.
10805 if ((be16_to_cpu(new_bc
->overall_shared_limit
) <
10806 be16_to_cpu(cur_bc
.overall_shared_limit
)) ||
10807 (is_ax(dd
) && any_shared_limit_changing
)) {
10808 set_global_shared(dd
, 0);
10809 cur_bc
.overall_shared_limit
= 0;
10813 for (i
= 0; i
< NUM_USABLE_VLS
; i
++) {
10818 set_vl_shared(dd
, i
, 0);
10819 cur_bc
.vl
[i
].shared
= 0;
10823 wait_for_vl_status_clear(dd
, use_all_mask
? all_mask
: changing_mask
,
10826 if (change_count
> 0) {
10827 for (i
= 0; i
< NUM_USABLE_VLS
; i
++) {
10831 if (lowering_dedicated
[i
]) {
10832 set_vl_dedicated(dd
, i
,
10833 be16_to_cpu(new_bc
->vl
[i
].dedicated
));
10834 cur_bc
.vl
[i
].dedicated
=
10835 new_bc
->vl
[i
].dedicated
;
10839 wait_for_vl_status_clear(dd
, ld_mask
, "dedicated");
10841 /* now raise all dedicated that are going up */
10842 for (i
= 0; i
< NUM_USABLE_VLS
; i
++) {
10846 if (be16_to_cpu(new_bc
->vl
[i
].dedicated
) >
10847 be16_to_cpu(cur_bc
.vl
[i
].dedicated
))
10848 set_vl_dedicated(dd
, i
,
10849 be16_to_cpu(new_bc
->vl
[i
].dedicated
));
10853 /* next raise all shared that are going up */
10854 for (i
= 0; i
< NUM_USABLE_VLS
; i
++) {
10858 if (be16_to_cpu(new_bc
->vl
[i
].shared
) >
10859 be16_to_cpu(cur_bc
.vl
[i
].shared
))
10860 set_vl_shared(dd
, i
, be16_to_cpu(new_bc
->vl
[i
].shared
));
10863 /* finally raise the global shared */
10864 if (be16_to_cpu(new_bc
->overall_shared_limit
) >
10865 be16_to_cpu(cur_bc
.overall_shared_limit
))
10866 set_global_shared(dd
,
10867 be16_to_cpu(new_bc
->overall_shared_limit
));
10869 /* bracket the credit change with a total adjustment */
10870 if (new_total
< cur_total
)
10871 set_global_limit(dd
, new_total
);
10876 * Read the given fabric manager table. Return the size of the
10877 * table (in bytes) on success, and a negative error code on
10880 int fm_get_table(struct hfi1_pportdata
*ppd
, int which
, void *t
)
10884 struct vl_arb_cache
*vlc
;
10887 case FM_TBL_VL_HIGH_ARB
:
10890 * OPA specifies 128 elements (of 2 bytes each), though
10891 * HFI supports only 16 elements in h/w.
10893 vlc
= vl_arb_lock_cache(ppd
, HI_PRIO_TABLE
);
10894 vl_arb_get_cache(vlc
, t
);
10895 vl_arb_unlock_cache(ppd
, HI_PRIO_TABLE
);
10897 case FM_TBL_VL_LOW_ARB
:
10900 * OPA specifies 128 elements (of 2 bytes each), though
10901 * HFI supports only 16 elements in h/w.
10903 vlc
= vl_arb_lock_cache(ppd
, LO_PRIO_TABLE
);
10904 vl_arb_get_cache(vlc
, t
);
10905 vl_arb_unlock_cache(ppd
, LO_PRIO_TABLE
);
10907 case FM_TBL_BUFFER_CONTROL
:
10908 size
= get_buffer_control(ppd
->dd
, t
, NULL
);
10910 case FM_TBL_SC2VLNT
:
10911 size
= get_sc2vlnt(ppd
->dd
, t
);
10913 case FM_TBL_VL_PREEMPT_ELEMS
:
10915 /* OPA specifies 128 elements, of 2 bytes each */
10916 get_vlarb_preempt(ppd
->dd
, OPA_MAX_VLS
, t
);
10918 case FM_TBL_VL_PREEMPT_MATRIX
:
10921 * OPA specifies that this is the same size as the VL
10922 * arbitration tables (i.e., 256 bytes).
10932 * Write the given fabric manager table.
10934 int fm_set_table(struct hfi1_pportdata
*ppd
, int which
, void *t
)
10937 struct vl_arb_cache
*vlc
;
10940 case FM_TBL_VL_HIGH_ARB
:
10941 vlc
= vl_arb_lock_cache(ppd
, HI_PRIO_TABLE
);
10942 if (vl_arb_match_cache(vlc
, t
)) {
10943 vl_arb_unlock_cache(ppd
, HI_PRIO_TABLE
);
10946 vl_arb_set_cache(vlc
, t
);
10947 vl_arb_unlock_cache(ppd
, HI_PRIO_TABLE
);
10948 ret
= set_vl_weights(ppd
, SEND_HIGH_PRIORITY_LIST
,
10949 VL_ARB_HIGH_PRIO_TABLE_SIZE
, t
);
10951 case FM_TBL_VL_LOW_ARB
:
10952 vlc
= vl_arb_lock_cache(ppd
, LO_PRIO_TABLE
);
10953 if (vl_arb_match_cache(vlc
, t
)) {
10954 vl_arb_unlock_cache(ppd
, LO_PRIO_TABLE
);
10957 vl_arb_set_cache(vlc
, t
);
10958 vl_arb_unlock_cache(ppd
, LO_PRIO_TABLE
);
10959 ret
= set_vl_weights(ppd
, SEND_LOW_PRIORITY_LIST
,
10960 VL_ARB_LOW_PRIO_TABLE_SIZE
, t
);
10962 case FM_TBL_BUFFER_CONTROL
:
10963 ret
= set_buffer_control(ppd
->dd
, t
);
10965 case FM_TBL_SC2VLNT
:
10966 set_sc2vlnt(ppd
->dd
, t
);
10975 * Disable all data VLs.
10977 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
10979 static int disable_data_vls(struct hfi1_devdata
*dd
)
10984 pio_send_control(dd
, PSC_DATA_VL_DISABLE
);
10990 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
10991 * Just re-enables all data VLs (the "fill" part happens
10992 * automatically - the name was chosen for symmetry with
10993 * stop_drain_data_vls()).
10995 * Return 0 if successful, non-zero if the VLs cannot be enabled.
10997 int open_fill_data_vls(struct hfi1_devdata
*dd
)
11002 pio_send_control(dd
, PSC_DATA_VL_ENABLE
);
11008 * drain_data_vls() - assumes that disable_data_vls() has been called,
11009 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11010 * engines to drop to 0.
11012 static void drain_data_vls(struct hfi1_devdata
*dd
)
11016 pause_for_credit_return(dd
);
11020 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11022 * Use open_fill_data_vls() to resume using data VLs. This pair is
11023 * meant to be used like this:
11025 * stop_drain_data_vls(dd);
11026 * // do things with per-VL resources
11027 * open_fill_data_vls(dd);
11029 int stop_drain_data_vls(struct hfi1_devdata
*dd
)
11033 ret
= disable_data_vls(dd
);
11035 drain_data_vls(dd
);
11041 * Convert a nanosecond time to a cclock count. No matter how slow
11042 * the cclock, a non-zero ns will always have a non-zero result.
11044 u32
ns_to_cclock(struct hfi1_devdata
*dd
, u32 ns
)
11048 if (dd
->icode
== ICODE_FPGA_EMULATION
)
11049 cclocks
= (ns
* 1000) / FPGA_CCLOCK_PS
;
11050 else /* simulation pretends to be ASIC */
11051 cclocks
= (ns
* 1000) / ASIC_CCLOCK_PS
;
11052 if (ns
&& !cclocks
) /* if ns nonzero, must be at least 1 */
11058 * Convert a cclock count to nanoseconds. Not matter how slow
11059 * the cclock, a non-zero cclocks will always have a non-zero result.
11061 u32
cclock_to_ns(struct hfi1_devdata
*dd
, u32 cclocks
)
11065 if (dd
->icode
== ICODE_FPGA_EMULATION
)
11066 ns
= (cclocks
* FPGA_CCLOCK_PS
) / 1000;
11067 else /* simulation pretends to be ASIC */
11068 ns
= (cclocks
* ASIC_CCLOCK_PS
) / 1000;
11069 if (cclocks
&& !ns
)
11075 * Dynamically adjust the receive interrupt timeout for a context based on
11076 * incoming packet rate.
11078 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11080 static void adjust_rcv_timeout(struct hfi1_ctxtdata
*rcd
, u32 npkts
)
11082 struct hfi1_devdata
*dd
= rcd
->dd
;
11083 u32 timeout
= rcd
->rcvavail_timeout
;
11086 * This algorithm doubles or halves the timeout depending on whether
11087 * the number of packets received in this interrupt were less than or
11088 * greater equal the interrupt count.
11090 * The calculations below do not allow a steady state to be achieved.
11091 * Only at the endpoints it is possible to have an unchanging
11094 if (npkts
< rcv_intr_count
) {
11096 * Not enough packets arrived before the timeout, adjust
11097 * timeout downward.
11099 if (timeout
< 2) /* already at minimum? */
11104 * More than enough packets arrived before the timeout, adjust
11107 if (timeout
>= dd
->rcv_intr_timeout_csr
) /* already at max? */
11109 timeout
= min(timeout
<< 1, dd
->rcv_intr_timeout_csr
);
11112 rcd
->rcvavail_timeout
= timeout
;
11113 /* timeout cannot be larger than rcv_intr_timeout_csr which has already
11114 been verified to be in range */
11115 write_kctxt_csr(dd
, rcd
->ctxt
, RCV_AVAIL_TIME_OUT
,
11116 (u64
)timeout
<< RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT
);
11119 void update_usrhead(struct hfi1_ctxtdata
*rcd
, u32 hd
, u32 updegr
, u32 egrhd
,
11120 u32 intr_adjust
, u32 npkts
)
11122 struct hfi1_devdata
*dd
= rcd
->dd
;
11124 u32 ctxt
= rcd
->ctxt
;
11127 * Need to write timeout register before updating RcvHdrHead to ensure
11128 * that a new value is used when the HW decides to restart counting.
11131 adjust_rcv_timeout(rcd
, npkts
);
11133 reg
= (egrhd
& RCV_EGR_INDEX_HEAD_HEAD_MASK
)
11134 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT
;
11135 write_uctxt_csr(dd
, ctxt
, RCV_EGR_INDEX_HEAD
, reg
);
11138 reg
= ((u64
)rcv_intr_count
<< RCV_HDR_HEAD_COUNTER_SHIFT
) |
11139 (((u64
)hd
& RCV_HDR_HEAD_HEAD_MASK
)
11140 << RCV_HDR_HEAD_HEAD_SHIFT
);
11141 write_uctxt_csr(dd
, ctxt
, RCV_HDR_HEAD
, reg
);
11145 u32
hdrqempty(struct hfi1_ctxtdata
*rcd
)
11149 head
= (read_uctxt_csr(rcd
->dd
, rcd
->ctxt
, RCV_HDR_HEAD
)
11150 & RCV_HDR_HEAD_HEAD_SMASK
) >> RCV_HDR_HEAD_HEAD_SHIFT
;
11152 if (rcd
->rcvhdrtail_kvaddr
)
11153 tail
= get_rcvhdrtail(rcd
);
11155 tail
= read_uctxt_csr(rcd
->dd
, rcd
->ctxt
, RCV_HDR_TAIL
);
11157 return head
== tail
;
11161 * Context Control and Receive Array encoding for buffer size:
11170 * 0x8 512 KB (Receive Array only)
11171 * 0x9 1 MB (Receive Array only)
11172 * 0xa 2 MB (Receive Array only)
11174 * 0xB-0xF - reserved (Receive Array only)
11177 * This routine assumes that the value has already been sanity checked.
11179 static u32
encoded_size(u32 size
)
11182 case 4*1024: return 0x1;
11183 case 8*1024: return 0x2;
11184 case 16*1024: return 0x3;
11185 case 32*1024: return 0x4;
11186 case 64*1024: return 0x5;
11187 case 128*1024: return 0x6;
11188 case 256*1024: return 0x7;
11189 case 512*1024: return 0x8;
11190 case 1*1024*1024: return 0x9;
11191 case 2*1024*1024: return 0xa;
11193 return 0x1; /* if invalid, go with the minimum size */
11196 void hfi1_rcvctrl(struct hfi1_devdata
*dd
, unsigned int op
, int ctxt
)
11198 struct hfi1_ctxtdata
*rcd
;
11200 int did_enable
= 0;
11202 rcd
= dd
->rcd
[ctxt
];
11206 hfi1_cdbg(RCVCTRL
, "ctxt %d op 0x%x", ctxt
, op
);
11208 rcvctrl
= read_kctxt_csr(dd
, ctxt
, RCV_CTXT_CTRL
);
11209 /* if the context already enabled, don't do the extra steps */
11210 if ((op
& HFI1_RCVCTRL_CTXT_ENB
)
11211 && !(rcvctrl
& RCV_CTXT_CTRL_ENABLE_SMASK
)) {
11212 /* reset the tail and hdr addresses, and sequence count */
11213 write_kctxt_csr(dd
, ctxt
, RCV_HDR_ADDR
,
11214 rcd
->rcvhdrq_phys
);
11215 if (HFI1_CAP_KGET_MASK(rcd
->flags
, DMA_RTAIL
))
11216 write_kctxt_csr(dd
, ctxt
, RCV_HDR_TAIL_ADDR
,
11217 rcd
->rcvhdrqtailaddr_phys
);
11220 /* reset the cached receive header queue head value */
11224 * Zero the receive header queue so we don't get false
11225 * positives when checking the sequence number. The
11226 * sequence numbers could land exactly on the same spot.
11227 * E.g. a rcd restart before the receive header wrapped.
11229 memset(rcd
->rcvhdrq
, 0, rcd
->rcvhdrq_size
);
11231 /* starting timeout */
11232 rcd
->rcvavail_timeout
= dd
->rcv_intr_timeout_csr
;
11234 /* enable the context */
11235 rcvctrl
|= RCV_CTXT_CTRL_ENABLE_SMASK
;
11237 /* clean the egr buffer size first */
11238 rcvctrl
&= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK
;
11239 rcvctrl
|= ((u64
)encoded_size(rcd
->egrbufs
.rcvtid_size
)
11240 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK
)
11241 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT
;
11243 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11244 write_uctxt_csr(dd
, ctxt
, RCV_HDR_HEAD
, 0);
11247 /* zero RcvEgrIndexHead */
11248 write_uctxt_csr(dd
, ctxt
, RCV_EGR_INDEX_HEAD
, 0);
11250 /* set eager count and base index */
11251 reg
= (((u64
)(rcd
->egrbufs
.alloced
>> RCV_SHIFT
)
11252 & RCV_EGR_CTRL_EGR_CNT_MASK
)
11253 << RCV_EGR_CTRL_EGR_CNT_SHIFT
) |
11254 (((rcd
->eager_base
>> RCV_SHIFT
)
11255 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK
)
11256 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT
);
11257 write_kctxt_csr(dd
, ctxt
, RCV_EGR_CTRL
, reg
);
11260 * Set TID (expected) count and base index.
11261 * rcd->expected_count is set to individual RcvArray entries,
11262 * not pairs, and the CSR takes a pair-count in groups of
11263 * four, so divide by 8.
11265 reg
= (((rcd
->expected_count
>> RCV_SHIFT
)
11266 & RCV_TID_CTRL_TID_PAIR_CNT_MASK
)
11267 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT
) |
11268 (((rcd
->expected_base
>> RCV_SHIFT
)
11269 & RCV_TID_CTRL_TID_BASE_INDEX_MASK
)
11270 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT
);
11271 write_kctxt_csr(dd
, ctxt
, RCV_TID_CTRL
, reg
);
11272 if (ctxt
== HFI1_CTRL_CTXT
)
11273 write_csr(dd
, RCV_VL15
, HFI1_CTRL_CTXT
);
11275 if (op
& HFI1_RCVCTRL_CTXT_DIS
) {
11276 write_csr(dd
, RCV_VL15
, 0);
11278 * When receive context is being disabled turn on tail
11279 * update with a dummy tail address and then disable
11282 if (dd
->rcvhdrtail_dummy_physaddr
) {
11283 write_kctxt_csr(dd
, ctxt
, RCV_HDR_TAIL_ADDR
,
11284 dd
->rcvhdrtail_dummy_physaddr
);
11285 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11286 rcvctrl
|= RCV_CTXT_CTRL_TAIL_UPD_SMASK
;
11289 rcvctrl
&= ~RCV_CTXT_CTRL_ENABLE_SMASK
;
11291 if (op
& HFI1_RCVCTRL_INTRAVAIL_ENB
)
11292 rcvctrl
|= RCV_CTXT_CTRL_INTR_AVAIL_SMASK
;
11293 if (op
& HFI1_RCVCTRL_INTRAVAIL_DIS
)
11294 rcvctrl
&= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK
;
11295 if (op
& HFI1_RCVCTRL_TAILUPD_ENB
&& rcd
->rcvhdrqtailaddr_phys
)
11296 rcvctrl
|= RCV_CTXT_CTRL_TAIL_UPD_SMASK
;
11297 if (op
& HFI1_RCVCTRL_TAILUPD_DIS
) {
11298 /* See comment on RcvCtxtCtrl.TailUpd above */
11299 if (!(op
& HFI1_RCVCTRL_CTXT_DIS
))
11300 rcvctrl
&= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK
;
11302 if (op
& HFI1_RCVCTRL_TIDFLOW_ENB
)
11303 rcvctrl
|= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK
;
11304 if (op
& HFI1_RCVCTRL_TIDFLOW_DIS
)
11305 rcvctrl
&= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK
;
11306 if (op
& HFI1_RCVCTRL_ONE_PKT_EGR_ENB
) {
11307 /* In one-packet-per-eager mode, the size comes from
11308 the RcvArray entry. */
11309 rcvctrl
&= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK
;
11310 rcvctrl
|= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK
;
11312 if (op
& HFI1_RCVCTRL_ONE_PKT_EGR_DIS
)
11313 rcvctrl
&= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK
;
11314 if (op
& HFI1_RCVCTRL_NO_RHQ_DROP_ENB
)
11315 rcvctrl
|= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK
;
11316 if (op
& HFI1_RCVCTRL_NO_RHQ_DROP_DIS
)
11317 rcvctrl
&= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK
;
11318 if (op
& HFI1_RCVCTRL_NO_EGR_DROP_ENB
)
11319 rcvctrl
|= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK
;
11320 if (op
& HFI1_RCVCTRL_NO_EGR_DROP_DIS
)
11321 rcvctrl
&= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK
;
11322 rcd
->rcvctrl
= rcvctrl
;
11323 hfi1_cdbg(RCVCTRL
, "ctxt %d rcvctrl 0x%llx\n", ctxt
, rcvctrl
);
11324 write_kctxt_csr(dd
, ctxt
, RCV_CTXT_CTRL
, rcd
->rcvctrl
);
11326 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
11328 && (rcvctrl
& RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK
)) {
11329 reg
= read_kctxt_csr(dd
, ctxt
, RCV_CTXT_STATUS
);
11331 dd_dev_info(dd
, "ctxt %d status %lld (blocked)\n",
11333 read_uctxt_csr(dd
, ctxt
, RCV_HDR_HEAD
);
11334 write_uctxt_csr(dd
, ctxt
, RCV_HDR_HEAD
, 0x10);
11335 write_uctxt_csr(dd
, ctxt
, RCV_HDR_HEAD
, 0x00);
11336 read_uctxt_csr(dd
, ctxt
, RCV_HDR_HEAD
);
11337 reg
= read_kctxt_csr(dd
, ctxt
, RCV_CTXT_STATUS
);
11338 dd_dev_info(dd
, "ctxt %d status %lld (%s blocked)\n",
11339 ctxt
, reg
, reg
== 0 ? "not" : "still");
11345 * The interrupt timeout and count must be set after
11346 * the context is enabled to take effect.
11348 /* set interrupt timeout */
11349 write_kctxt_csr(dd
, ctxt
, RCV_AVAIL_TIME_OUT
,
11350 (u64
)rcd
->rcvavail_timeout
<<
11351 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT
);
11353 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11354 reg
= (u64
)rcv_intr_count
<< RCV_HDR_HEAD_COUNTER_SHIFT
;
11355 write_uctxt_csr(dd
, ctxt
, RCV_HDR_HEAD
, reg
);
11358 if (op
& (HFI1_RCVCTRL_TAILUPD_DIS
| HFI1_RCVCTRL_CTXT_DIS
))
11360 * If the context has been disabled and the Tail Update has
11361 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11362 * so it doesn't contain an address that is invalid.
11364 write_kctxt_csr(dd
, ctxt
, RCV_HDR_TAIL_ADDR
,
11365 dd
->rcvhdrtail_dummy_physaddr
);
11368 u32
hfi1_read_cntrs(struct hfi1_devdata
*dd
, loff_t pos
, char **namep
,
11375 ret
= dd
->cntrnameslen
;
11377 dd_dev_err(dd
, "read_cntrs does not support indexing");
11380 *namep
= dd
->cntrnames
;
11382 const struct cntr_entry
*entry
;
11385 ret
= (dd
->ndevcntrs
) * sizeof(u64
);
11387 dd_dev_err(dd
, "read_cntrs does not support indexing");
11391 /* Get the start of the block of counters */
11392 *cntrp
= dd
->cntrs
;
11395 * Now go and fill in each counter in the block.
11397 for (i
= 0; i
< DEV_CNTR_LAST
; i
++) {
11398 entry
= &dev_cntrs
[i
];
11399 hfi1_cdbg(CNTR
, "reading %s", entry
->name
);
11400 if (entry
->flags
& CNTR_DISABLED
) {
11402 hfi1_cdbg(CNTR
, "\tDisabled\n");
11404 if (entry
->flags
& CNTR_VL
) {
11405 hfi1_cdbg(CNTR
, "\tPer VL\n");
11406 for (j
= 0; j
< C_VL_COUNT
; j
++) {
11407 val
= entry
->rw_cntr(entry
,
11413 "\t\tRead 0x%llx for %d\n",
11415 dd
->cntrs
[entry
->offset
+ j
] =
11418 } else if (entry
->flags
& CNTR_SDMA
) {
11420 "\t Per SDMA Engine\n");
11421 for (j
= 0; j
< dd
->chip_sdma_engines
;
11424 entry
->rw_cntr(entry
, dd
, j
,
11427 "\t\tRead 0x%llx for %d\n",
11429 dd
->cntrs
[entry
->offset
+ j
] =
11433 val
= entry
->rw_cntr(entry
, dd
,
11436 dd
->cntrs
[entry
->offset
] = val
;
11437 hfi1_cdbg(CNTR
, "\tRead 0x%llx", val
);
11446 * Used by sysfs to create files for hfi stats to read
11448 u32
hfi1_read_portcntrs(struct hfi1_devdata
*dd
, loff_t pos
, u32 port
,
11449 char **namep
, u64
**cntrp
)
11455 ret
= dd
->portcntrnameslen
;
11457 dd_dev_err(dd
, "index not supported");
11460 *namep
= dd
->portcntrnames
;
11462 const struct cntr_entry
*entry
;
11463 struct hfi1_pportdata
*ppd
;
11466 ret
= (dd
->nportcntrs
) * sizeof(u64
);
11468 dd_dev_err(dd
, "indexing not supported");
11471 ppd
= (struct hfi1_pportdata
*)(dd
+ 1 + port
);
11472 *cntrp
= ppd
->cntrs
;
11474 for (i
= 0; i
< PORT_CNTR_LAST
; i
++) {
11475 entry
= &port_cntrs
[i
];
11476 hfi1_cdbg(CNTR
, "reading %s", entry
->name
);
11477 if (entry
->flags
& CNTR_DISABLED
) {
11479 hfi1_cdbg(CNTR
, "\tDisabled\n");
11483 if (entry
->flags
& CNTR_VL
) {
11484 hfi1_cdbg(CNTR
, "\tPer VL");
11485 for (j
= 0; j
< C_VL_COUNT
; j
++) {
11486 val
= entry
->rw_cntr(entry
, ppd
, j
,
11491 "\t\tRead 0x%llx for %d",
11493 ppd
->cntrs
[entry
->offset
+ j
] = val
;
11496 val
= entry
->rw_cntr(entry
, ppd
,
11500 ppd
->cntrs
[entry
->offset
] = val
;
11501 hfi1_cdbg(CNTR
, "\tRead 0x%llx", val
);
11508 static void free_cntrs(struct hfi1_devdata
*dd
)
11510 struct hfi1_pportdata
*ppd
;
11513 if (dd
->synth_stats_timer
.data
)
11514 del_timer_sync(&dd
->synth_stats_timer
);
11515 dd
->synth_stats_timer
.data
= 0;
11516 ppd
= (struct hfi1_pportdata
*)(dd
+ 1);
11517 for (i
= 0; i
< dd
->num_pports
; i
++, ppd
++) {
11519 kfree(ppd
->scntrs
);
11520 free_percpu(ppd
->ibport_data
.rvp
.rc_acks
);
11521 free_percpu(ppd
->ibport_data
.rvp
.rc_qacks
);
11522 free_percpu(ppd
->ibport_data
.rvp
.rc_delayed_comp
);
11524 ppd
->scntrs
= NULL
;
11525 ppd
->ibport_data
.rvp
.rc_acks
= NULL
;
11526 ppd
->ibport_data
.rvp
.rc_qacks
= NULL
;
11527 ppd
->ibport_data
.rvp
.rc_delayed_comp
= NULL
;
11529 kfree(dd
->portcntrnames
);
11530 dd
->portcntrnames
= NULL
;
11535 kfree(dd
->cntrnames
);
11536 dd
->cntrnames
= NULL
;
11539 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
11540 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
11542 static u64
read_dev_port_cntr(struct hfi1_devdata
*dd
, struct cntr_entry
*entry
,
11543 u64
*psval
, void *context
, int vl
)
11548 if (entry
->flags
& CNTR_DISABLED
) {
11549 dd_dev_err(dd
, "Counter %s not enabled", entry
->name
);
11553 hfi1_cdbg(CNTR
, "cntr: %s vl %d psval 0x%llx", entry
->name
, vl
, *psval
);
11555 val
= entry
->rw_cntr(entry
, context
, vl
, CNTR_MODE_R
, 0);
11557 /* If its a synthetic counter there is more work we need to do */
11558 if (entry
->flags
& CNTR_SYNTH
) {
11559 if (sval
== CNTR_MAX
) {
11560 /* No need to read already saturated */
11564 if (entry
->flags
& CNTR_32BIT
) {
11565 /* 32bit counters can wrap multiple times */
11566 u64 upper
= sval
>> 32;
11567 u64 lower
= (sval
<< 32) >> 32;
11569 if (lower
> val
) { /* hw wrapped */
11570 if (upper
== CNTR_32BIT_MAX
)
11576 if (val
!= CNTR_MAX
)
11577 val
= (upper
<< 32) | val
;
11580 /* If we rolled we are saturated */
11581 if ((val
< sval
) || (val
> CNTR_MAX
))
11588 hfi1_cdbg(CNTR
, "\tNew val=0x%llx", val
);
11593 static u64
write_dev_port_cntr(struct hfi1_devdata
*dd
,
11594 struct cntr_entry
*entry
,
11595 u64
*psval
, void *context
, int vl
, u64 data
)
11599 if (entry
->flags
& CNTR_DISABLED
) {
11600 dd_dev_err(dd
, "Counter %s not enabled", entry
->name
);
11604 hfi1_cdbg(CNTR
, "cntr: %s vl %d psval 0x%llx", entry
->name
, vl
, *psval
);
11606 if (entry
->flags
& CNTR_SYNTH
) {
11608 if (entry
->flags
& CNTR_32BIT
) {
11609 val
= entry
->rw_cntr(entry
, context
, vl
, CNTR_MODE_W
,
11610 (data
<< 32) >> 32);
11611 val
= data
; /* return the full 64bit value */
11613 val
= entry
->rw_cntr(entry
, context
, vl
, CNTR_MODE_W
,
11617 val
= entry
->rw_cntr(entry
, context
, vl
, CNTR_MODE_W
, data
);
11622 hfi1_cdbg(CNTR
, "\tNew val=0x%llx", val
);
11627 u64
read_dev_cntr(struct hfi1_devdata
*dd
, int index
, int vl
)
11629 struct cntr_entry
*entry
;
11632 entry
= &dev_cntrs
[index
];
11633 sval
= dd
->scntrs
+ entry
->offset
;
11635 if (vl
!= CNTR_INVALID_VL
)
11638 return read_dev_port_cntr(dd
, entry
, sval
, dd
, vl
);
11641 u64
write_dev_cntr(struct hfi1_devdata
*dd
, int index
, int vl
, u64 data
)
11643 struct cntr_entry
*entry
;
11646 entry
= &dev_cntrs
[index
];
11647 sval
= dd
->scntrs
+ entry
->offset
;
11649 if (vl
!= CNTR_INVALID_VL
)
11652 return write_dev_port_cntr(dd
, entry
, sval
, dd
, vl
, data
);
11655 u64
read_port_cntr(struct hfi1_pportdata
*ppd
, int index
, int vl
)
11657 struct cntr_entry
*entry
;
11660 entry
= &port_cntrs
[index
];
11661 sval
= ppd
->scntrs
+ entry
->offset
;
11663 if (vl
!= CNTR_INVALID_VL
)
11666 if ((index
>= C_RCV_HDR_OVF_FIRST
+ ppd
->dd
->num_rcv_contexts
) &&
11667 (index
<= C_RCV_HDR_OVF_LAST
)) {
11668 /* We do not want to bother for disabled contexts */
11672 return read_dev_port_cntr(ppd
->dd
, entry
, sval
, ppd
, vl
);
11675 u64
write_port_cntr(struct hfi1_pportdata
*ppd
, int index
, int vl
, u64 data
)
11677 struct cntr_entry
*entry
;
11680 entry
= &port_cntrs
[index
];
11681 sval
= ppd
->scntrs
+ entry
->offset
;
11683 if (vl
!= CNTR_INVALID_VL
)
11686 if ((index
>= C_RCV_HDR_OVF_FIRST
+ ppd
->dd
->num_rcv_contexts
) &&
11687 (index
<= C_RCV_HDR_OVF_LAST
)) {
11688 /* We do not want to bother for disabled contexts */
11692 return write_dev_port_cntr(ppd
->dd
, entry
, sval
, ppd
, vl
, data
);
11695 static void update_synth_timer(unsigned long opaque
)
11702 struct hfi1_pportdata
*ppd
;
11703 struct cntr_entry
*entry
;
11705 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)opaque
;
11708 * Rather than keep beating on the CSRs pick a minimal set that we can
11709 * check to watch for potential roll over. We can do this by looking at
11710 * the number of flits sent/recv. If the total flits exceeds 32bits then
11711 * we have to iterate all the counters and update.
11713 entry
= &dev_cntrs
[C_DC_RCV_FLITS
];
11714 cur_rx
= entry
->rw_cntr(entry
, dd
, CNTR_INVALID_VL
, CNTR_MODE_R
, 0);
11716 entry
= &dev_cntrs
[C_DC_XMIT_FLITS
];
11717 cur_tx
= entry
->rw_cntr(entry
, dd
, CNTR_INVALID_VL
, CNTR_MODE_R
, 0);
11721 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11722 dd
->unit
, cur_tx
, cur_rx
, dd
->last_tx
, dd
->last_rx
);
11724 if ((cur_tx
< dd
->last_tx
) || (cur_rx
< dd
->last_rx
)) {
11726 * May not be strictly necessary to update but it won't hurt and
11727 * simplifies the logic here.
11730 hfi1_cdbg(CNTR
, "[%d] Tripwire counter rolled, updating",
11733 total_flits
= (cur_tx
- dd
->last_tx
) + (cur_rx
- dd
->last_rx
);
11735 "[%d] total flits 0x%llx limit 0x%llx\n", dd
->unit
,
11736 total_flits
, (u64
)CNTR_32BIT_MAX
);
11737 if (total_flits
>= CNTR_32BIT_MAX
) {
11738 hfi1_cdbg(CNTR
, "[%d] 32bit limit hit, updating",
11745 hfi1_cdbg(CNTR
, "[%d] Updating dd and ppd counters", dd
->unit
);
11746 for (i
= 0; i
< DEV_CNTR_LAST
; i
++) {
11747 entry
= &dev_cntrs
[i
];
11748 if (entry
->flags
& CNTR_VL
) {
11749 for (vl
= 0; vl
< C_VL_COUNT
; vl
++)
11750 read_dev_cntr(dd
, i
, vl
);
11752 read_dev_cntr(dd
, i
, CNTR_INVALID_VL
);
11755 ppd
= (struct hfi1_pportdata
*)(dd
+ 1);
11756 for (i
= 0; i
< dd
->num_pports
; i
++, ppd
++) {
11757 for (j
= 0; j
< PORT_CNTR_LAST
; j
++) {
11758 entry
= &port_cntrs
[j
];
11759 if (entry
->flags
& CNTR_VL
) {
11760 for (vl
= 0; vl
< C_VL_COUNT
; vl
++)
11761 read_port_cntr(ppd
, j
, vl
);
11763 read_port_cntr(ppd
, j
, CNTR_INVALID_VL
);
11769 * We want the value in the register. The goal is to keep track
11770 * of the number of "ticks" not the counter value. In other
11771 * words if the register rolls we want to notice it and go ahead
11772 * and force an update.
11774 entry
= &dev_cntrs
[C_DC_XMIT_FLITS
];
11775 dd
->last_tx
= entry
->rw_cntr(entry
, dd
, CNTR_INVALID_VL
,
11778 entry
= &dev_cntrs
[C_DC_RCV_FLITS
];
11779 dd
->last_rx
= entry
->rw_cntr(entry
, dd
, CNTR_INVALID_VL
,
11782 hfi1_cdbg(CNTR
, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11783 dd
->unit
, dd
->last_tx
, dd
->last_rx
);
11786 hfi1_cdbg(CNTR
, "[%d] No update necessary", dd
->unit
);
11789 mod_timer(&dd
->synth_stats_timer
, jiffies
+ HZ
* SYNTH_CNT_TIME
);
11792 #define C_MAX_NAME 13 /* 12 chars + one for /0 */
11793 static int init_cntrs(struct hfi1_devdata
*dd
)
11795 int i
, rcv_ctxts
, j
;
11798 char name
[C_MAX_NAME
];
11799 struct hfi1_pportdata
*ppd
;
11800 const char *bit_type_32
= ",32";
11801 const int bit_type_32_sz
= strlen(bit_type_32
);
11803 /* set up the stats timer; the add_timer is done at the end */
11804 setup_timer(&dd
->synth_stats_timer
, update_synth_timer
,
11805 (unsigned long)dd
);
11807 /***********************/
11808 /* per device counters */
11809 /***********************/
11811 /* size names and determine how many we have*/
11815 for (i
= 0; i
< DEV_CNTR_LAST
; i
++) {
11816 hfi1_dbg_early("Init cntr %s\n", dev_cntrs
[i
].name
);
11817 if (dev_cntrs
[i
].flags
& CNTR_DISABLED
) {
11818 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs
[i
].name
);
11822 if (dev_cntrs
[i
].flags
& CNTR_VL
) {
11823 hfi1_dbg_early("\tProcessing VL cntr\n");
11824 dev_cntrs
[i
].offset
= dd
->ndevcntrs
;
11825 for (j
= 0; j
< C_VL_COUNT
; j
++) {
11826 memset(name
, '\0', C_MAX_NAME
);
11827 snprintf(name
, C_MAX_NAME
, "%s%d",
11830 sz
+= strlen(name
);
11831 /* Add ",32" for 32-bit counters */
11832 if (dev_cntrs
[i
].flags
& CNTR_32BIT
)
11833 sz
+= bit_type_32_sz
;
11835 hfi1_dbg_early("\t\t%s\n", name
);
11838 } else if (dev_cntrs
[i
].flags
& CNTR_SDMA
) {
11840 "\tProcessing per SDE counters chip enginers %u\n",
11841 dd
->chip_sdma_engines
);
11842 dev_cntrs
[i
].offset
= dd
->ndevcntrs
;
11843 for (j
= 0; j
< dd
->chip_sdma_engines
; j
++) {
11844 memset(name
, '\0', C_MAX_NAME
);
11845 snprintf(name
, C_MAX_NAME
, "%s%d",
11846 dev_cntrs
[i
].name
, j
);
11847 sz
+= strlen(name
);
11848 /* Add ",32" for 32-bit counters */
11849 if (dev_cntrs
[i
].flags
& CNTR_32BIT
)
11850 sz
+= bit_type_32_sz
;
11852 hfi1_dbg_early("\t\t%s\n", name
);
11856 /* +1 for newline. */
11857 sz
+= strlen(dev_cntrs
[i
].name
) + 1;
11858 /* Add ",32" for 32-bit counters */
11859 if (dev_cntrs
[i
].flags
& CNTR_32BIT
)
11860 sz
+= bit_type_32_sz
;
11861 dev_cntrs
[i
].offset
= dd
->ndevcntrs
;
11863 hfi1_dbg_early("\tAdding %s\n", dev_cntrs
[i
].name
);
11867 /* allocate space for the counter values */
11868 dd
->cntrs
= kcalloc(dd
->ndevcntrs
, sizeof(u64
), GFP_KERNEL
);
11872 dd
->scntrs
= kcalloc(dd
->ndevcntrs
, sizeof(u64
), GFP_KERNEL
);
11877 /* allocate space for the counter names */
11878 dd
->cntrnameslen
= sz
;
11879 dd
->cntrnames
= kmalloc(sz
, GFP_KERNEL
);
11880 if (!dd
->cntrnames
)
11883 /* fill in the names */
11884 for (p
= dd
->cntrnames
, i
= 0; i
< DEV_CNTR_LAST
; i
++) {
11885 if (dev_cntrs
[i
].flags
& CNTR_DISABLED
) {
11887 } else if (dev_cntrs
[i
].flags
& CNTR_VL
) {
11888 for (j
= 0; j
< C_VL_COUNT
; j
++) {
11889 memset(name
, '\0', C_MAX_NAME
);
11890 snprintf(name
, C_MAX_NAME
, "%s%d",
11893 memcpy(p
, name
, strlen(name
));
11896 /* Counter is 32 bits */
11897 if (dev_cntrs
[i
].flags
& CNTR_32BIT
) {
11898 memcpy(p
, bit_type_32
, bit_type_32_sz
);
11899 p
+= bit_type_32_sz
;
11904 } else if (dev_cntrs
[i
].flags
& CNTR_SDMA
) {
11905 for (j
= 0; j
< dd
->chip_sdma_engines
; j
++) {
11906 memset(name
, '\0', C_MAX_NAME
);
11907 snprintf(name
, C_MAX_NAME
, "%s%d",
11908 dev_cntrs
[i
].name
, j
);
11909 memcpy(p
, name
, strlen(name
));
11912 /* Counter is 32 bits */
11913 if (dev_cntrs
[i
].flags
& CNTR_32BIT
) {
11914 memcpy(p
, bit_type_32
, bit_type_32_sz
);
11915 p
+= bit_type_32_sz
;
11921 memcpy(p
, dev_cntrs
[i
].name
, strlen(dev_cntrs
[i
].name
));
11922 p
+= strlen(dev_cntrs
[i
].name
);
11924 /* Counter is 32 bits */
11925 if (dev_cntrs
[i
].flags
& CNTR_32BIT
) {
11926 memcpy(p
, bit_type_32
, bit_type_32_sz
);
11927 p
+= bit_type_32_sz
;
11934 /*********************/
11935 /* per port counters */
11936 /*********************/
11939 * Go through the counters for the overflows and disable the ones we
11940 * don't need. This varies based on platform so we need to do it
11941 * dynamically here.
11943 rcv_ctxts
= dd
->num_rcv_contexts
;
11944 for (i
= C_RCV_HDR_OVF_FIRST
+ rcv_ctxts
;
11945 i
<= C_RCV_HDR_OVF_LAST
; i
++) {
11946 port_cntrs
[i
].flags
|= CNTR_DISABLED
;
11949 /* size port counter names and determine how many we have*/
11951 dd
->nportcntrs
= 0;
11952 for (i
= 0; i
< PORT_CNTR_LAST
; i
++) {
11953 hfi1_dbg_early("Init pcntr %s\n", port_cntrs
[i
].name
);
11954 if (port_cntrs
[i
].flags
& CNTR_DISABLED
) {
11955 hfi1_dbg_early("\tSkipping %s\n", port_cntrs
[i
].name
);
11959 if (port_cntrs
[i
].flags
& CNTR_VL
) {
11960 hfi1_dbg_early("\tProcessing VL cntr\n");
11961 port_cntrs
[i
].offset
= dd
->nportcntrs
;
11962 for (j
= 0; j
< C_VL_COUNT
; j
++) {
11963 memset(name
, '\0', C_MAX_NAME
);
11964 snprintf(name
, C_MAX_NAME
, "%s%d",
11965 port_cntrs
[i
].name
,
11967 sz
+= strlen(name
);
11968 /* Add ",32" for 32-bit counters */
11969 if (port_cntrs
[i
].flags
& CNTR_32BIT
)
11970 sz
+= bit_type_32_sz
;
11972 hfi1_dbg_early("\t\t%s\n", name
);
11976 /* +1 for newline */
11977 sz
+= strlen(port_cntrs
[i
].name
) + 1;
11978 /* Add ",32" for 32-bit counters */
11979 if (port_cntrs
[i
].flags
& CNTR_32BIT
)
11980 sz
+= bit_type_32_sz
;
11981 port_cntrs
[i
].offset
= dd
->nportcntrs
;
11983 hfi1_dbg_early("\tAdding %s\n", port_cntrs
[i
].name
);
11987 /* allocate space for the counter names */
11988 dd
->portcntrnameslen
= sz
;
11989 dd
->portcntrnames
= kmalloc(sz
, GFP_KERNEL
);
11990 if (!dd
->portcntrnames
)
11993 /* fill in port cntr names */
11994 for (p
= dd
->portcntrnames
, i
= 0; i
< PORT_CNTR_LAST
; i
++) {
11995 if (port_cntrs
[i
].flags
& CNTR_DISABLED
)
11998 if (port_cntrs
[i
].flags
& CNTR_VL
) {
11999 for (j
= 0; j
< C_VL_COUNT
; j
++) {
12000 memset(name
, '\0', C_MAX_NAME
);
12001 snprintf(name
, C_MAX_NAME
, "%s%d",
12002 port_cntrs
[i
].name
,
12004 memcpy(p
, name
, strlen(name
));
12007 /* Counter is 32 bits */
12008 if (port_cntrs
[i
].flags
& CNTR_32BIT
) {
12009 memcpy(p
, bit_type_32
, bit_type_32_sz
);
12010 p
+= bit_type_32_sz
;
12016 memcpy(p
, port_cntrs
[i
].name
,
12017 strlen(port_cntrs
[i
].name
));
12018 p
+= strlen(port_cntrs
[i
].name
);
12020 /* Counter is 32 bits */
12021 if (port_cntrs
[i
].flags
& CNTR_32BIT
) {
12022 memcpy(p
, bit_type_32
, bit_type_32_sz
);
12023 p
+= bit_type_32_sz
;
12030 /* allocate per port storage for counter values */
12031 ppd
= (struct hfi1_pportdata
*)(dd
+ 1);
12032 for (i
= 0; i
< dd
->num_pports
; i
++, ppd
++) {
12033 ppd
->cntrs
= kcalloc(dd
->nportcntrs
, sizeof(u64
), GFP_KERNEL
);
12037 ppd
->scntrs
= kcalloc(dd
->nportcntrs
, sizeof(u64
), GFP_KERNEL
);
12042 /* CPU counters need to be allocated and zeroed */
12043 if (init_cpu_counters(dd
))
12046 mod_timer(&dd
->synth_stats_timer
, jiffies
+ HZ
* SYNTH_CNT_TIME
);
12054 static u32
chip_to_opa_lstate(struct hfi1_devdata
*dd
, u32 chip_lstate
)
12056 switch (chip_lstate
) {
12059 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12063 return IB_PORT_DOWN
;
12065 return IB_PORT_INIT
;
12067 return IB_PORT_ARMED
;
12068 case LSTATE_ACTIVE
:
12069 return IB_PORT_ACTIVE
;
12073 u32
chip_to_opa_pstate(struct hfi1_devdata
*dd
, u32 chip_pstate
)
12075 /* look at the HFI meta-states only */
12076 switch (chip_pstate
& 0xf0) {
12078 dd_dev_err(dd
, "Unexpected chip physical state of 0x%x\n",
12082 return IB_PORTPHYSSTATE_DISABLED
;
12084 return OPA_PORTPHYSSTATE_OFFLINE
;
12086 return IB_PORTPHYSSTATE_POLLING
;
12087 case PLS_CONFIGPHY
:
12088 return IB_PORTPHYSSTATE_TRAINING
;
12090 return IB_PORTPHYSSTATE_LINKUP
;
12092 return IB_PORTPHYSSTATE_PHY_TEST
;
12096 /* return the OPA port logical state name */
12097 const char *opa_lstate_name(u32 lstate
)
12099 static const char * const port_logical_names
[] = {
12105 "PORT_ACTIVE_DEFER",
12107 if (lstate
< ARRAY_SIZE(port_logical_names
))
12108 return port_logical_names
[lstate
];
12112 /* return the OPA port physical state name */
12113 const char *opa_pstate_name(u32 pstate
)
12115 static const char * const port_physical_names
[] = {
12122 "PHYS_LINK_ERR_RECOVER",
12129 if (pstate
< ARRAY_SIZE(port_physical_names
))
12130 return port_physical_names
[pstate
];
12135 * Read the hardware link state and set the driver's cached value of it.
12136 * Return the (new) current value.
12138 u32
get_logical_state(struct hfi1_pportdata
*ppd
)
12142 new_state
= chip_to_opa_lstate(ppd
->dd
, read_logical_state(ppd
->dd
));
12143 if (new_state
!= ppd
->lstate
) {
12144 dd_dev_info(ppd
->dd
, "logical state changed to %s (0x%x)\n",
12145 opa_lstate_name(new_state
), new_state
);
12146 ppd
->lstate
= new_state
;
12149 * Set port status flags in the page mapped into userspace
12150 * memory. Do it here to ensure a reliable state - this is
12151 * the only function called by all state handling code.
12152 * Always set the flags due to the fact that the cache value
12153 * might have been changed explicitly outside of this
12156 if (ppd
->statusp
) {
12157 switch (ppd
->lstate
) {
12160 *ppd
->statusp
&= ~(HFI1_STATUS_IB_CONF
|
12161 HFI1_STATUS_IB_READY
);
12163 case IB_PORT_ARMED
:
12164 *ppd
->statusp
|= HFI1_STATUS_IB_CONF
;
12166 case IB_PORT_ACTIVE
:
12167 *ppd
->statusp
|= HFI1_STATUS_IB_READY
;
12171 return ppd
->lstate
;
12175 * wait_logical_linkstate - wait for an IB link state change to occur
12176 * @ppd: port device
12177 * @state: the state to wait for
12178 * @msecs: the number of milliseconds to wait
12180 * Wait up to msecs milliseconds for IB link state change to occur.
12181 * For now, take the easy polling route.
12182 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12184 static int wait_logical_linkstate(struct hfi1_pportdata
*ppd
, u32 state
,
12187 unsigned long timeout
;
12189 timeout
= jiffies
+ msecs_to_jiffies(msecs
);
12191 if (get_logical_state(ppd
) == state
)
12193 if (time_after(jiffies
, timeout
))
12197 dd_dev_err(ppd
->dd
, "timeout waiting for link state 0x%x\n", state
);
12202 u8
hfi1_ibphys_portstate(struct hfi1_pportdata
*ppd
)
12204 static u32 remembered_state
= 0xff;
12208 pstate
= read_physical_state(ppd
->dd
);
12209 ib_pstate
= chip_to_opa_pstate(ppd
->dd
, pstate
);
12210 if (remembered_state
!= ib_pstate
) {
12211 dd_dev_info(ppd
->dd
,
12212 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12213 __func__
, opa_pstate_name(ib_pstate
), ib_pstate
,
12215 remembered_state
= ib_pstate
;
12221 * Read/modify/write ASIC_QSFP register bits as selected by mask
12222 * data: 0 or 1 in the positions depending on what needs to be written
12223 * dir: 0 for read, 1 for write
12224 * mask: select by setting
12228 u64
hfi1_gpio_mod(struct hfi1_devdata
*dd
, u32 target
, u32 data
, u32 dir
,
12231 u64 qsfp_oe
, target_oe
;
12233 target_oe
= target
? ASIC_QSFP2_OE
: ASIC_QSFP1_OE
;
12235 /* We are writing register bits, so lock access */
12239 qsfp_oe
= read_csr(dd
, target_oe
);
12240 qsfp_oe
= (qsfp_oe
& ~(u64
)mask
) | (u64
)dir
;
12241 write_csr(dd
, target_oe
, qsfp_oe
);
12243 /* We are exclusively reading bits here, but it is unlikely
12244 * we'll get valid data when we set the direction of the pin
12245 * in the same call, so read should call this function again
12246 * to get valid data
12248 return read_csr(dd
, target
? ASIC_QSFP2_IN
: ASIC_QSFP1_IN
);
12251 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12252 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12254 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
12255 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12257 int hfi1_init_ctxt(struct send_context
*sc
)
12260 struct hfi1_devdata
*dd
= sc
->dd
;
12262 u8 set
= (sc
->type
== SC_USER
?
12263 HFI1_CAP_IS_USET(STATIC_RATE_CTRL
) :
12264 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL
));
12265 reg
= read_kctxt_csr(dd
, sc
->hw_context
,
12266 SEND_CTXT_CHECK_ENABLE
);
12268 CLEAR_STATIC_RATE_CONTROL_SMASK(reg
);
12270 SET_STATIC_RATE_CONTROL_SMASK(reg
);
12271 write_kctxt_csr(dd
, sc
->hw_context
,
12272 SEND_CTXT_CHECK_ENABLE
, reg
);
12277 int hfi1_tempsense_rd(struct hfi1_devdata
*dd
, struct hfi1_temp
*temp
)
12282 if (dd
->icode
!= ICODE_RTL_SILICON
) {
12283 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL
))
12284 dd_dev_info(dd
, "%s: tempsense not supported by HW\n",
12288 reg
= read_csr(dd
, ASIC_STS_THERM
);
12289 temp
->curr
= ((reg
>> ASIC_STS_THERM_CURR_TEMP_SHIFT
) &
12290 ASIC_STS_THERM_CURR_TEMP_MASK
);
12291 temp
->lo_lim
= ((reg
>> ASIC_STS_THERM_LO_TEMP_SHIFT
) &
12292 ASIC_STS_THERM_LO_TEMP_MASK
);
12293 temp
->hi_lim
= ((reg
>> ASIC_STS_THERM_HI_TEMP_SHIFT
) &
12294 ASIC_STS_THERM_HI_TEMP_MASK
);
12295 temp
->crit_lim
= ((reg
>> ASIC_STS_THERM_CRIT_TEMP_SHIFT
) &
12296 ASIC_STS_THERM_CRIT_TEMP_MASK
);
12297 /* triggers is a 3-bit value - 1 bit per trigger. */
12298 temp
->triggers
= (u8
)((reg
>> ASIC_STS_THERM_LOW_SHIFT
) & 0x7);
12303 /* ========================================================================= */
12306 * Enable/disable chip from delivering interrupts.
12308 void set_intr_state(struct hfi1_devdata
*dd
, u32 enable
)
12313 * In HFI, the mask needs to be 1 to allow interrupts.
12316 /* enable all interrupts */
12317 for (i
= 0; i
< CCE_NUM_INT_CSRS
; i
++)
12318 write_csr(dd
, CCE_INT_MASK
+ (8*i
), ~(u64
)0);
12322 for (i
= 0; i
< CCE_NUM_INT_CSRS
; i
++)
12323 write_csr(dd
, CCE_INT_MASK
+ (8*i
), 0ull);
12328 * Clear all interrupt sources on the chip.
12330 static void clear_all_interrupts(struct hfi1_devdata
*dd
)
12334 for (i
= 0; i
< CCE_NUM_INT_CSRS
; i
++)
12335 write_csr(dd
, CCE_INT_CLEAR
+ (8*i
), ~(u64
)0);
12337 write_csr(dd
, CCE_ERR_CLEAR
, ~(u64
)0);
12338 write_csr(dd
, MISC_ERR_CLEAR
, ~(u64
)0);
12339 write_csr(dd
, RCV_ERR_CLEAR
, ~(u64
)0);
12340 write_csr(dd
, SEND_ERR_CLEAR
, ~(u64
)0);
12341 write_csr(dd
, SEND_PIO_ERR_CLEAR
, ~(u64
)0);
12342 write_csr(dd
, SEND_DMA_ERR_CLEAR
, ~(u64
)0);
12343 write_csr(dd
, SEND_EGRESS_ERR_CLEAR
, ~(u64
)0);
12344 for (i
= 0; i
< dd
->chip_send_contexts
; i
++)
12345 write_kctxt_csr(dd
, i
, SEND_CTXT_ERR_CLEAR
, ~(u64
)0);
12346 for (i
= 0; i
< dd
->chip_sdma_engines
; i
++)
12347 write_kctxt_csr(dd
, i
, SEND_DMA_ENG_ERR_CLEAR
, ~(u64
)0);
12349 write_csr(dd
, DCC_ERR_FLG_CLR
, ~(u64
)0);
12350 write_csr(dd
, DC_LCB_ERR_CLR
, ~(u64
)0);
12351 write_csr(dd
, DC_DC8051_ERR_CLR
, ~(u64
)0);
12354 /* Move to pcie.c? */
12355 static void disable_intx(struct pci_dev
*pdev
)
12360 static void clean_up_interrupts(struct hfi1_devdata
*dd
)
12364 /* remove irqs - must happen before disabling/turning off */
12365 if (dd
->num_msix_entries
) {
12367 struct hfi1_msix_entry
*me
= dd
->msix_entries
;
12369 for (i
= 0; i
< dd
->num_msix_entries
; i
++, me
++) {
12370 if (me
->arg
== NULL
) /* => no irq, no affinity */
12372 hfi1_put_irq_affinity(dd
, &dd
->msix_entries
[i
]);
12373 free_irq(me
->msix
.vector
, me
->arg
);
12377 if (dd
->requested_intx_irq
) {
12378 free_irq(dd
->pcidev
->irq
, dd
);
12379 dd
->requested_intx_irq
= 0;
12383 /* turn off interrupts */
12384 if (dd
->num_msix_entries
) {
12386 pci_disable_msix(dd
->pcidev
);
12389 disable_intx(dd
->pcidev
);
12392 /* clean structures */
12393 kfree(dd
->msix_entries
);
12394 dd
->msix_entries
= NULL
;
12395 dd
->num_msix_entries
= 0;
12399 * Remap the interrupt source from the general handler to the given MSI-X
12402 static void remap_intr(struct hfi1_devdata
*dd
, int isrc
, int msix_intr
)
12407 /* clear from the handled mask of the general interrupt */
12410 dd
->gi_mask
[m
] &= ~((u64
)1 << n
);
12412 /* direct the chip source to the given MSI-X interrupt */
12415 reg
= read_csr(dd
, CCE_INT_MAP
+ (8*m
));
12416 reg
&= ~((u64
)0xff << (8*n
));
12417 reg
|= ((u64
)msix_intr
& 0xff) << (8*n
);
12418 write_csr(dd
, CCE_INT_MAP
+ (8*m
), reg
);
12421 static void remap_sdma_interrupts(struct hfi1_devdata
*dd
,
12422 int engine
, int msix_intr
)
12425 * SDMA engine interrupt sources grouped by type, rather than
12426 * engine. Per-engine interrupts are as follows:
12431 remap_intr(dd
, IS_SDMA_START
+ 0*TXE_NUM_SDMA_ENGINES
+ engine
,
12433 remap_intr(dd
, IS_SDMA_START
+ 1*TXE_NUM_SDMA_ENGINES
+ engine
,
12435 remap_intr(dd
, IS_SDMA_START
+ 2*TXE_NUM_SDMA_ENGINES
+ engine
,
12439 static int request_intx_irq(struct hfi1_devdata
*dd
)
12443 snprintf(dd
->intx_name
, sizeof(dd
->intx_name
), DRIVER_NAME
"_%d",
12445 ret
= request_irq(dd
->pcidev
->irq
, general_interrupt
,
12446 IRQF_SHARED
, dd
->intx_name
, dd
);
12448 dd_dev_err(dd
, "unable to request INTx interrupt, err %d\n",
12451 dd
->requested_intx_irq
= 1;
12455 static int request_msix_irqs(struct hfi1_devdata
*dd
)
12457 int first_general
, last_general
;
12458 int first_sdma
, last_sdma
;
12459 int first_rx
, last_rx
;
12462 /* calculate the ranges we are going to use */
12464 first_sdma
= last_general
= first_general
+ 1;
12465 first_rx
= last_sdma
= first_sdma
+ dd
->num_sdma
;
12466 last_rx
= first_rx
+ dd
->n_krcv_queues
;
12469 * Sanity check - the code expects all SDMA chip source
12470 * interrupts to be in the same CSR, starting at bit 0. Verify
12471 * that this is true by checking the bit location of the start.
12473 BUILD_BUG_ON(IS_SDMA_START
% 64);
12475 for (i
= 0; i
< dd
->num_msix_entries
; i
++) {
12476 struct hfi1_msix_entry
*me
= &dd
->msix_entries
[i
];
12477 const char *err_info
;
12478 irq_handler_t handler
;
12479 irq_handler_t thread
= NULL
;
12482 struct hfi1_ctxtdata
*rcd
= NULL
;
12483 struct sdma_engine
*sde
= NULL
;
12485 /* obtain the arguments to request_irq */
12486 if (first_general
<= i
&& i
< last_general
) {
12487 idx
= i
- first_general
;
12488 handler
= general_interrupt
;
12490 snprintf(me
->name
, sizeof(me
->name
),
12491 DRIVER_NAME
"_%d", dd
->unit
);
12492 err_info
= "general";
12493 me
->type
= IRQ_GENERAL
;
12494 } else if (first_sdma
<= i
&& i
< last_sdma
) {
12495 idx
= i
- first_sdma
;
12496 sde
= &dd
->per_sdma
[idx
];
12497 handler
= sdma_interrupt
;
12499 snprintf(me
->name
, sizeof(me
->name
),
12500 DRIVER_NAME
"_%d sdma%d", dd
->unit
, idx
);
12502 remap_sdma_interrupts(dd
, idx
, i
);
12503 me
->type
= IRQ_SDMA
;
12504 } else if (first_rx
<= i
&& i
< last_rx
) {
12505 idx
= i
- first_rx
;
12506 rcd
= dd
->rcd
[idx
];
12507 /* no interrupt if no rcd */
12511 * Set the interrupt register and mask for this
12512 * context's interrupt.
12514 rcd
->ireg
= (IS_RCVAVAIL_START
+idx
) / 64;
12515 rcd
->imask
= ((u64
)1) <<
12516 ((IS_RCVAVAIL_START
+idx
) % 64);
12517 handler
= receive_context_interrupt
;
12518 thread
= receive_context_thread
;
12520 snprintf(me
->name
, sizeof(me
->name
),
12521 DRIVER_NAME
"_%d kctxt%d", dd
->unit
, idx
);
12522 err_info
= "receive context";
12523 remap_intr(dd
, IS_RCVAVAIL_START
+ idx
, i
);
12524 me
->type
= IRQ_RCVCTXT
;
12526 /* not in our expected range - complain, then
12529 "Unexpected extra MSI-X interrupt %d\n", i
);
12532 /* no argument, no interrupt */
12535 /* make sure the name is terminated */
12536 me
->name
[sizeof(me
->name
)-1] = 0;
12538 ret
= request_threaded_irq(me
->msix
.vector
, handler
, thread
, 0,
12542 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12543 err_info
, me
->msix
.vector
, idx
, ret
);
12547 * assign arg after request_irq call, so it will be
12552 ret
= hfi1_get_irq_affinity(dd
, me
);
12555 "unable to pin IRQ %d\n", ret
);
12562 * Set the general handler to accept all interrupts, remap all
12563 * chip interrupts back to MSI-X 0.
12565 static void reset_interrupts(struct hfi1_devdata
*dd
)
12569 /* all interrupts handled by the general handler */
12570 for (i
= 0; i
< CCE_NUM_INT_CSRS
; i
++)
12571 dd
->gi_mask
[i
] = ~(u64
)0;
12573 /* all chip interrupts map to MSI-X 0 */
12574 for (i
= 0; i
< CCE_NUM_INT_MAP_CSRS
; i
++)
12575 write_csr(dd
, CCE_INT_MAP
+ (8*i
), 0);
12578 static int set_up_interrupts(struct hfi1_devdata
*dd
)
12580 struct hfi1_msix_entry
*entries
;
12581 u32 total
, request
;
12583 int single_interrupt
= 0; /* we expect to have all the interrupts */
12587 * 1 general, "slow path" interrupt (includes the SDMA engines
12588 * slow source, SDMACleanupDone)
12589 * N interrupts - one per used SDMA engine
12590 * M interrupt - one per kernel receive context
12592 total
= 1 + dd
->num_sdma
+ dd
->n_krcv_queues
;
12594 entries
= kcalloc(total
, sizeof(*entries
), GFP_KERNEL
);
12599 /* 1-1 MSI-X entry assignment */
12600 for (i
= 0; i
< total
; i
++)
12601 entries
[i
].msix
.entry
= i
;
12603 /* ask for MSI-X interrupts */
12605 request_msix(dd
, &request
, entries
);
12607 if (request
== 0) {
12609 /* dd->num_msix_entries already zero */
12611 single_interrupt
= 1;
12612 dd_dev_err(dd
, "MSI-X failed, using INTx interrupts\n");
12615 dd
->num_msix_entries
= request
;
12616 dd
->msix_entries
= entries
;
12618 if (request
!= total
) {
12619 /* using MSI-X, with reduced interrupts */
12622 "cannot handle reduced interrupt case, want %u, got %u\n",
12627 dd_dev_info(dd
, "%u MSI-X interrupts allocated\n", total
);
12630 /* mask all interrupts */
12631 set_intr_state(dd
, 0);
12632 /* clear all pending interrupts */
12633 clear_all_interrupts(dd
);
12635 /* reset general handler mask, chip MSI-X mappings */
12636 reset_interrupts(dd
);
12638 if (single_interrupt
)
12639 ret
= request_intx_irq(dd
);
12641 ret
= request_msix_irqs(dd
);
12648 clean_up_interrupts(dd
);
12653 * Set up context values in dd. Sets:
12655 * num_rcv_contexts - number of contexts being used
12656 * n_krcv_queues - number of kernel contexts
12657 * first_user_ctxt - first non-kernel context in array of contexts
12658 * freectxts - number of free user contexts
12659 * num_send_contexts - number of PIO send contexts being used
12661 static int set_up_context_variables(struct hfi1_devdata
*dd
)
12663 int num_kernel_contexts
;
12664 int total_contexts
;
12669 * Kernel contexts: (to be fixed later):
12670 * - min or 2 or 1 context/numa
12671 * - Context 0 - control context (VL15/multicast/error)
12672 * - Context 1 - default context
12676 * Don't count context 0 in n_krcvqs since
12677 * is isn't used for normal verbs traffic.
12679 * krcvqs will reflect number of kernel
12680 * receive contexts above 0.
12682 num_kernel_contexts
= n_krcvqs
+ MIN_KERNEL_KCTXTS
- 1;
12684 num_kernel_contexts
= num_online_nodes() + 1;
12685 num_kernel_contexts
=
12686 max_t(int, MIN_KERNEL_KCTXTS
, num_kernel_contexts
);
12688 * Every kernel receive context needs an ACK send context.
12689 * one send context is allocated for each VL{0-7} and VL15
12691 if (num_kernel_contexts
> (dd
->chip_send_contexts
- num_vls
- 1)) {
12693 "Reducing # kernel rcv contexts to: %d, from %d\n",
12694 (int)(dd
->chip_send_contexts
- num_vls
- 1),
12695 (int)num_kernel_contexts
);
12696 num_kernel_contexts
= dd
->chip_send_contexts
- num_vls
- 1;
12699 * User contexts: (to be fixed later)
12700 * - default to 1 user context per CPU if num_user_contexts is
12703 if (num_user_contexts
< 0)
12704 num_user_contexts
= num_online_cpus();
12706 total_contexts
= num_kernel_contexts
+ num_user_contexts
;
12709 * Adjust the counts given a global max.
12711 if (total_contexts
> dd
->chip_rcv_contexts
) {
12713 "Reducing # user receive contexts to: %d, from %d\n",
12714 (int)(dd
->chip_rcv_contexts
- num_kernel_contexts
),
12715 (int)num_user_contexts
);
12716 num_user_contexts
= dd
->chip_rcv_contexts
- num_kernel_contexts
;
12718 total_contexts
= num_kernel_contexts
+ num_user_contexts
;
12721 /* the first N are kernel contexts, the rest are user contexts */
12722 dd
->num_rcv_contexts
= total_contexts
;
12723 dd
->n_krcv_queues
= num_kernel_contexts
;
12724 dd
->first_user_ctxt
= num_kernel_contexts
;
12725 dd
->num_user_contexts
= num_user_contexts
;
12726 dd
->freectxts
= num_user_contexts
;
12728 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12729 (int)dd
->chip_rcv_contexts
,
12730 (int)dd
->num_rcv_contexts
,
12731 (int)dd
->n_krcv_queues
,
12732 (int)dd
->num_rcv_contexts
- dd
->n_krcv_queues
);
12735 * Receive array allocation:
12736 * All RcvArray entries are divided into groups of 8. This
12737 * is required by the hardware and will speed up writes to
12738 * consecutive entries by using write-combining of the entire
12741 * The number of groups are evenly divided among all contexts.
12742 * any left over groups will be given to the first N user
12745 dd
->rcv_entries
.group_size
= RCV_INCREMENT
;
12746 ngroups
= dd
->chip_rcv_array_count
/ dd
->rcv_entries
.group_size
;
12747 dd
->rcv_entries
.ngroups
= ngroups
/ dd
->num_rcv_contexts
;
12748 dd
->rcv_entries
.nctxt_extra
= ngroups
-
12749 (dd
->num_rcv_contexts
* dd
->rcv_entries
.ngroups
);
12750 dd_dev_info(dd
, "RcvArray groups %u, ctxts extra %u\n",
12751 dd
->rcv_entries
.ngroups
,
12752 dd
->rcv_entries
.nctxt_extra
);
12753 if (dd
->rcv_entries
.ngroups
* dd
->rcv_entries
.group_size
>
12754 MAX_EAGER_ENTRIES
* 2) {
12755 dd
->rcv_entries
.ngroups
= (MAX_EAGER_ENTRIES
* 2) /
12756 dd
->rcv_entries
.group_size
;
12758 "RcvArray group count too high, change to %u\n",
12759 dd
->rcv_entries
.ngroups
);
12760 dd
->rcv_entries
.nctxt_extra
= 0;
12763 * PIO send contexts
12765 ret
= init_sc_pools_and_sizes(dd
);
12766 if (ret
>= 0) { /* success */
12767 dd
->num_send_contexts
= ret
;
12770 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d)\n",
12771 dd
->chip_send_contexts
,
12772 dd
->num_send_contexts
,
12773 dd
->sc_sizes
[SC_KERNEL
].count
,
12774 dd
->sc_sizes
[SC_ACK
].count
,
12775 dd
->sc_sizes
[SC_USER
].count
);
12776 ret
= 0; /* success */
12783 * Set the device/port partition key table. The MAD code
12784 * will ensure that, at least, the partial management
12785 * partition key is present in the table.
12787 static void set_partition_keys(struct hfi1_pportdata
*ppd
)
12789 struct hfi1_devdata
*dd
= ppd
->dd
;
12793 dd_dev_info(dd
, "Setting partition keys\n");
12794 for (i
= 0; i
< hfi1_get_npkeys(dd
); i
++) {
12795 reg
|= (ppd
->pkeys
[i
] &
12796 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK
) <<
12798 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT
);
12799 /* Each register holds 4 PKey values. */
12800 if ((i
% 4) == 3) {
12801 write_csr(dd
, RCV_PARTITION_KEY
+
12802 ((i
- 3) * 2), reg
);
12807 /* Always enable HW pkeys check when pkeys table is set */
12808 add_rcvctrl(dd
, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK
);
12812 * These CSRs and memories are uninitialized on reset and must be
12813 * written before reading to set the ECC/parity bits.
12815 * NOTE: All user context CSRs that are not mmaped write-only
12816 * (e.g. the TID flows) must be initialized even if the driver never
12819 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata
*dd
)
12824 for (i
= 0; i
< CCE_NUM_INT_MAP_CSRS
; i
++)
12825 write_csr(dd
, CCE_INT_MAP
+(8*i
), 0);
12827 /* SendCtxtCreditReturnAddr */
12828 for (i
= 0; i
< dd
->chip_send_contexts
; i
++)
12829 write_kctxt_csr(dd
, i
, SEND_CTXT_CREDIT_RETURN_ADDR
, 0);
12831 /* PIO Send buffers */
12832 /* SDMA Send buffers */
12833 /* These are not normally read, and (presently) have no method
12834 to be read, so are not pre-initialized */
12837 /* RcvHdrTailAddr */
12838 /* RcvTidFlowTable */
12839 for (i
= 0; i
< dd
->chip_rcv_contexts
; i
++) {
12840 write_kctxt_csr(dd
, i
, RCV_HDR_ADDR
, 0);
12841 write_kctxt_csr(dd
, i
, RCV_HDR_TAIL_ADDR
, 0);
12842 for (j
= 0; j
< RXE_NUM_TID_FLOWS
; j
++)
12843 write_uctxt_csr(dd
, i
, RCV_TID_FLOW_TABLE
+(8*j
), 0);
12847 for (i
= 0; i
< dd
->chip_rcv_array_count
; i
++)
12848 write_csr(dd
, RCV_ARRAY
+ (8*i
),
12849 RCV_ARRAY_RT_WRITE_ENABLE_SMASK
);
12851 /* RcvQPMapTable */
12852 for (i
= 0; i
< 32; i
++)
12853 write_csr(dd
, RCV_QP_MAP_TABLE
+ (8 * i
), 0);
12857 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
12859 static void clear_cce_status(struct hfi1_devdata
*dd
, u64 status_bits
,
12862 unsigned long timeout
;
12865 /* is the condition present? */
12866 reg
= read_csr(dd
, CCE_STATUS
);
12867 if ((reg
& status_bits
) == 0)
12870 /* clear the condition */
12871 write_csr(dd
, CCE_CTRL
, ctrl_bits
);
12873 /* wait for the condition to clear */
12874 timeout
= jiffies
+ msecs_to_jiffies(CCE_STATUS_TIMEOUT
);
12876 reg
= read_csr(dd
, CCE_STATUS
);
12877 if ((reg
& status_bits
) == 0)
12879 if (time_after(jiffies
, timeout
)) {
12881 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
12882 status_bits
, reg
& status_bits
);
12889 /* set CCE CSRs to chip reset defaults */
12890 static void reset_cce_csrs(struct hfi1_devdata
*dd
)
12894 /* CCE_REVISION read-only */
12895 /* CCE_REVISION2 read-only */
12896 /* CCE_CTRL - bits clear automatically */
12897 /* CCE_STATUS read-only, use CceCtrl to clear */
12898 clear_cce_status(dd
, ALL_FROZE
, CCE_CTRL_SPC_UNFREEZE_SMASK
);
12899 clear_cce_status(dd
, ALL_TXE_PAUSE
, CCE_CTRL_TXE_RESUME_SMASK
);
12900 clear_cce_status(dd
, ALL_RXE_PAUSE
, CCE_CTRL_RXE_RESUME_SMASK
);
12901 for (i
= 0; i
< CCE_NUM_SCRATCH
; i
++)
12902 write_csr(dd
, CCE_SCRATCH
+ (8 * i
), 0);
12903 /* CCE_ERR_STATUS read-only */
12904 write_csr(dd
, CCE_ERR_MASK
, 0);
12905 write_csr(dd
, CCE_ERR_CLEAR
, ~0ull);
12906 /* CCE_ERR_FORCE leave alone */
12907 for (i
= 0; i
< CCE_NUM_32_BIT_COUNTERS
; i
++)
12908 write_csr(dd
, CCE_COUNTER_ARRAY32
+ (8 * i
), 0);
12909 write_csr(dd
, CCE_DC_CTRL
, CCE_DC_CTRL_RESETCSR
);
12910 /* CCE_PCIE_CTRL leave alone */
12911 for (i
= 0; i
< CCE_NUM_MSIX_VECTORS
; i
++) {
12912 write_csr(dd
, CCE_MSIX_TABLE_LOWER
+ (8 * i
), 0);
12913 write_csr(dd
, CCE_MSIX_TABLE_UPPER
+ (8 * i
),
12914 CCE_MSIX_TABLE_UPPER_RESETCSR
);
12916 for (i
= 0; i
< CCE_NUM_MSIX_PBAS
; i
++) {
12917 /* CCE_MSIX_PBA read-only */
12918 write_csr(dd
, CCE_MSIX_INT_GRANTED
, ~0ull);
12919 write_csr(dd
, CCE_MSIX_VEC_CLR_WITHOUT_INT
, ~0ull);
12921 for (i
= 0; i
< CCE_NUM_INT_MAP_CSRS
; i
++)
12922 write_csr(dd
, CCE_INT_MAP
, 0);
12923 for (i
= 0; i
< CCE_NUM_INT_CSRS
; i
++) {
12924 /* CCE_INT_STATUS read-only */
12925 write_csr(dd
, CCE_INT_MASK
+ (8 * i
), 0);
12926 write_csr(dd
, CCE_INT_CLEAR
+ (8 * i
), ~0ull);
12927 /* CCE_INT_FORCE leave alone */
12928 /* CCE_INT_BLOCKED read-only */
12930 for (i
= 0; i
< CCE_NUM_32_BIT_INT_COUNTERS
; i
++)
12931 write_csr(dd
, CCE_INT_COUNTER_ARRAY32
+ (8 * i
), 0);
12934 /* set ASIC CSRs to chip reset defaults */
12935 static void reset_asic_csrs(struct hfi1_devdata
*dd
)
12940 * If the HFIs are shared between separate nodes or VMs,
12941 * then more will need to be done here. One idea is a module
12942 * parameter that returns early, letting the first power-on or
12943 * a known first load do the reset and blocking all others.
12946 if (!(dd
->flags
& HFI1_DO_INIT_ASIC
))
12949 if (dd
->icode
!= ICODE_FPGA_EMULATION
) {
12950 /* emulation does not have an SBus - leave these alone */
12952 * All writes to ASIC_CFG_SBUS_REQUEST do something.
12954 * o The reset is not zero if aimed at the core. See the
12955 * SBus documentation for details.
12956 * o If the SBus firmware has been updated (e.g. by the BIOS),
12957 * will the reset revert that?
12959 /* ASIC_CFG_SBUS_REQUEST leave alone */
12960 write_csr(dd
, ASIC_CFG_SBUS_EXECUTE
, 0);
12962 /* ASIC_SBUS_RESULT read-only */
12963 write_csr(dd
, ASIC_STS_SBUS_COUNTERS
, 0);
12964 for (i
= 0; i
< ASIC_NUM_SCRATCH
; i
++)
12965 write_csr(dd
, ASIC_CFG_SCRATCH
+ (8 * i
), 0);
12966 write_csr(dd
, ASIC_CFG_MUTEX
, 0); /* this will clear it */
12968 /* We might want to retain this state across FLR if we ever use it */
12969 write_csr(dd
, ASIC_CFG_DRV_STR
, 0);
12971 /* ASIC_CFG_THERM_POLL_EN leave alone */
12972 /* ASIC_STS_THERM read-only */
12973 /* ASIC_CFG_RESET leave alone */
12975 write_csr(dd
, ASIC_PCIE_SD_HOST_CMD
, 0);
12976 /* ASIC_PCIE_SD_HOST_STATUS read-only */
12977 write_csr(dd
, ASIC_PCIE_SD_INTRPT_DATA_CODE
, 0);
12978 write_csr(dd
, ASIC_PCIE_SD_INTRPT_ENABLE
, 0);
12979 /* ASIC_PCIE_SD_INTRPT_PROGRESS read-only */
12980 write_csr(dd
, ASIC_PCIE_SD_INTRPT_STATUS
, ~0ull); /* clear */
12981 /* ASIC_HFI0_PCIE_SD_INTRPT_RSPD_DATA read-only */
12982 /* ASIC_HFI1_PCIE_SD_INTRPT_RSPD_DATA read-only */
12983 for (i
= 0; i
< 16; i
++)
12984 write_csr(dd
, ASIC_PCIE_SD_INTRPT_LIST
+ (8 * i
), 0);
12986 /* ASIC_GPIO_IN read-only */
12987 write_csr(dd
, ASIC_GPIO_OE
, 0);
12988 write_csr(dd
, ASIC_GPIO_INVERT
, 0);
12989 write_csr(dd
, ASIC_GPIO_OUT
, 0);
12990 write_csr(dd
, ASIC_GPIO_MASK
, 0);
12991 /* ASIC_GPIO_STATUS read-only */
12992 write_csr(dd
, ASIC_GPIO_CLEAR
, ~0ull);
12993 /* ASIC_GPIO_FORCE leave alone */
12995 /* ASIC_QSFP1_IN read-only */
12996 write_csr(dd
, ASIC_QSFP1_OE
, 0);
12997 write_csr(dd
, ASIC_QSFP1_INVERT
, 0);
12998 write_csr(dd
, ASIC_QSFP1_OUT
, 0);
12999 write_csr(dd
, ASIC_QSFP1_MASK
, 0);
13000 /* ASIC_QSFP1_STATUS read-only */
13001 write_csr(dd
, ASIC_QSFP1_CLEAR
, ~0ull);
13002 /* ASIC_QSFP1_FORCE leave alone */
13004 /* ASIC_QSFP2_IN read-only */
13005 write_csr(dd
, ASIC_QSFP2_OE
, 0);
13006 write_csr(dd
, ASIC_QSFP2_INVERT
, 0);
13007 write_csr(dd
, ASIC_QSFP2_OUT
, 0);
13008 write_csr(dd
, ASIC_QSFP2_MASK
, 0);
13009 /* ASIC_QSFP2_STATUS read-only */
13010 write_csr(dd
, ASIC_QSFP2_CLEAR
, ~0ull);
13011 /* ASIC_QSFP2_FORCE leave alone */
13013 write_csr(dd
, ASIC_EEP_CTL_STAT
, ASIC_EEP_CTL_STAT_RESETCSR
);
13014 /* this also writes a NOP command, clearing paging mode */
13015 write_csr(dd
, ASIC_EEP_ADDR_CMD
, 0);
13016 write_csr(dd
, ASIC_EEP_DATA
, 0);
13019 /* set MISC CSRs to chip reset defaults */
13020 static void reset_misc_csrs(struct hfi1_devdata
*dd
)
13024 for (i
= 0; i
< 32; i
++) {
13025 write_csr(dd
, MISC_CFG_RSA_R2
+ (8 * i
), 0);
13026 write_csr(dd
, MISC_CFG_RSA_SIGNATURE
+ (8 * i
), 0);
13027 write_csr(dd
, MISC_CFG_RSA_MODULUS
+ (8 * i
), 0);
13029 /* MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13030 only be written 128-byte chunks */
13031 /* init RSA engine to clear lingering errors */
13032 write_csr(dd
, MISC_CFG_RSA_CMD
, 1);
13033 write_csr(dd
, MISC_CFG_RSA_MU
, 0);
13034 write_csr(dd
, MISC_CFG_FW_CTRL
, 0);
13035 /* MISC_STS_8051_DIGEST read-only */
13036 /* MISC_STS_SBM_DIGEST read-only */
13037 /* MISC_STS_PCIE_DIGEST read-only */
13038 /* MISC_STS_FAB_DIGEST read-only */
13039 /* MISC_ERR_STATUS read-only */
13040 write_csr(dd
, MISC_ERR_MASK
, 0);
13041 write_csr(dd
, MISC_ERR_CLEAR
, ~0ull);
13042 /* MISC_ERR_FORCE leave alone */
13045 /* set TXE CSRs to chip reset defaults */
13046 static void reset_txe_csrs(struct hfi1_devdata
*dd
)
13053 write_csr(dd
, SEND_CTRL
, 0);
13054 __cm_reset(dd
, 0); /* reset CM internal state */
13055 /* SEND_CONTEXTS read-only */
13056 /* SEND_DMA_ENGINES read-only */
13057 /* SEND_PIO_MEM_SIZE read-only */
13058 /* SEND_DMA_MEM_SIZE read-only */
13059 write_csr(dd
, SEND_HIGH_PRIORITY_LIMIT
, 0);
13060 pio_reset_all(dd
); /* SEND_PIO_INIT_CTXT */
13061 /* SEND_PIO_ERR_STATUS read-only */
13062 write_csr(dd
, SEND_PIO_ERR_MASK
, 0);
13063 write_csr(dd
, SEND_PIO_ERR_CLEAR
, ~0ull);
13064 /* SEND_PIO_ERR_FORCE leave alone */
13065 /* SEND_DMA_ERR_STATUS read-only */
13066 write_csr(dd
, SEND_DMA_ERR_MASK
, 0);
13067 write_csr(dd
, SEND_DMA_ERR_CLEAR
, ~0ull);
13068 /* SEND_DMA_ERR_FORCE leave alone */
13069 /* SEND_EGRESS_ERR_STATUS read-only */
13070 write_csr(dd
, SEND_EGRESS_ERR_MASK
, 0);
13071 write_csr(dd
, SEND_EGRESS_ERR_CLEAR
, ~0ull);
13072 /* SEND_EGRESS_ERR_FORCE leave alone */
13073 write_csr(dd
, SEND_BTH_QP
, 0);
13074 write_csr(dd
, SEND_STATIC_RATE_CONTROL
, 0);
13075 write_csr(dd
, SEND_SC2VLT0
, 0);
13076 write_csr(dd
, SEND_SC2VLT1
, 0);
13077 write_csr(dd
, SEND_SC2VLT2
, 0);
13078 write_csr(dd
, SEND_SC2VLT3
, 0);
13079 write_csr(dd
, SEND_LEN_CHECK0
, 0);
13080 write_csr(dd
, SEND_LEN_CHECK1
, 0);
13081 /* SEND_ERR_STATUS read-only */
13082 write_csr(dd
, SEND_ERR_MASK
, 0);
13083 write_csr(dd
, SEND_ERR_CLEAR
, ~0ull);
13084 /* SEND_ERR_FORCE read-only */
13085 for (i
= 0; i
< VL_ARB_LOW_PRIO_TABLE_SIZE
; i
++)
13086 write_csr(dd
, SEND_LOW_PRIORITY_LIST
+ (8*i
), 0);
13087 for (i
= 0; i
< VL_ARB_HIGH_PRIO_TABLE_SIZE
; i
++)
13088 write_csr(dd
, SEND_HIGH_PRIORITY_LIST
+ (8*i
), 0);
13089 for (i
= 0; i
< dd
->chip_send_contexts
/NUM_CONTEXTS_PER_SET
; i
++)
13090 write_csr(dd
, SEND_CONTEXT_SET_CTRL
+ (8*i
), 0);
13091 for (i
= 0; i
< TXE_NUM_32_BIT_COUNTER
; i
++)
13092 write_csr(dd
, SEND_COUNTER_ARRAY32
+ (8*i
), 0);
13093 for (i
= 0; i
< TXE_NUM_64_BIT_COUNTER
; i
++)
13094 write_csr(dd
, SEND_COUNTER_ARRAY64
+ (8*i
), 0);
13095 write_csr(dd
, SEND_CM_CTRL
, SEND_CM_CTRL_RESETCSR
);
13096 write_csr(dd
, SEND_CM_GLOBAL_CREDIT
,
13097 SEND_CM_GLOBAL_CREDIT_RESETCSR
);
13098 /* SEND_CM_CREDIT_USED_STATUS read-only */
13099 write_csr(dd
, SEND_CM_TIMER_CTRL
, 0);
13100 write_csr(dd
, SEND_CM_LOCAL_AU_TABLE0_TO3
, 0);
13101 write_csr(dd
, SEND_CM_LOCAL_AU_TABLE4_TO7
, 0);
13102 write_csr(dd
, SEND_CM_REMOTE_AU_TABLE0_TO3
, 0);
13103 write_csr(dd
, SEND_CM_REMOTE_AU_TABLE4_TO7
, 0);
13104 for (i
= 0; i
< TXE_NUM_DATA_VL
; i
++)
13105 write_csr(dd
, SEND_CM_CREDIT_VL
+ (8*i
), 0);
13106 write_csr(dd
, SEND_CM_CREDIT_VL15
, 0);
13107 /* SEND_CM_CREDIT_USED_VL read-only */
13108 /* SEND_CM_CREDIT_USED_VL15 read-only */
13109 /* SEND_EGRESS_CTXT_STATUS read-only */
13110 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13111 write_csr(dd
, SEND_EGRESS_ERR_INFO
, ~0ull);
13112 /* SEND_EGRESS_ERR_INFO read-only */
13113 /* SEND_EGRESS_ERR_SOURCE read-only */
13116 * TXE Per-Context CSRs
13118 for (i
= 0; i
< dd
->chip_send_contexts
; i
++) {
13119 write_kctxt_csr(dd
, i
, SEND_CTXT_CTRL
, 0);
13120 write_kctxt_csr(dd
, i
, SEND_CTXT_CREDIT_CTRL
, 0);
13121 write_kctxt_csr(dd
, i
, SEND_CTXT_CREDIT_RETURN_ADDR
, 0);
13122 write_kctxt_csr(dd
, i
, SEND_CTXT_CREDIT_FORCE
, 0);
13123 write_kctxt_csr(dd
, i
, SEND_CTXT_ERR_MASK
, 0);
13124 write_kctxt_csr(dd
, i
, SEND_CTXT_ERR_CLEAR
, ~0ull);
13125 write_kctxt_csr(dd
, i
, SEND_CTXT_CHECK_ENABLE
, 0);
13126 write_kctxt_csr(dd
, i
, SEND_CTXT_CHECK_VL
, 0);
13127 write_kctxt_csr(dd
, i
, SEND_CTXT_CHECK_JOB_KEY
, 0);
13128 write_kctxt_csr(dd
, i
, SEND_CTXT_CHECK_PARTITION_KEY
, 0);
13129 write_kctxt_csr(dd
, i
, SEND_CTXT_CHECK_SLID
, 0);
13130 write_kctxt_csr(dd
, i
, SEND_CTXT_CHECK_OPCODE
, 0);
13134 * TXE Per-SDMA CSRs
13136 for (i
= 0; i
< dd
->chip_sdma_engines
; i
++) {
13137 write_kctxt_csr(dd
, i
, SEND_DMA_CTRL
, 0);
13138 /* SEND_DMA_STATUS read-only */
13139 write_kctxt_csr(dd
, i
, SEND_DMA_BASE_ADDR
, 0);
13140 write_kctxt_csr(dd
, i
, SEND_DMA_LEN_GEN
, 0);
13141 write_kctxt_csr(dd
, i
, SEND_DMA_TAIL
, 0);
13142 /* SEND_DMA_HEAD read-only */
13143 write_kctxt_csr(dd
, i
, SEND_DMA_HEAD_ADDR
, 0);
13144 write_kctxt_csr(dd
, i
, SEND_DMA_PRIORITY_THLD
, 0);
13145 /* SEND_DMA_IDLE_CNT read-only */
13146 write_kctxt_csr(dd
, i
, SEND_DMA_RELOAD_CNT
, 0);
13147 write_kctxt_csr(dd
, i
, SEND_DMA_DESC_CNT
, 0);
13148 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13149 /* SEND_DMA_ENG_ERR_STATUS read-only */
13150 write_kctxt_csr(dd
, i
, SEND_DMA_ENG_ERR_MASK
, 0);
13151 write_kctxt_csr(dd
, i
, SEND_DMA_ENG_ERR_CLEAR
, ~0ull);
13152 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13153 write_kctxt_csr(dd
, i
, SEND_DMA_CHECK_ENABLE
, 0);
13154 write_kctxt_csr(dd
, i
, SEND_DMA_CHECK_VL
, 0);
13155 write_kctxt_csr(dd
, i
, SEND_DMA_CHECK_JOB_KEY
, 0);
13156 write_kctxt_csr(dd
, i
, SEND_DMA_CHECK_PARTITION_KEY
, 0);
13157 write_kctxt_csr(dd
, i
, SEND_DMA_CHECK_SLID
, 0);
13158 write_kctxt_csr(dd
, i
, SEND_DMA_CHECK_OPCODE
, 0);
13159 write_kctxt_csr(dd
, i
, SEND_DMA_MEMORY
, 0);
13165 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13167 static void init_rbufs(struct hfi1_devdata
*dd
)
13173 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13178 reg
= read_csr(dd
, RCV_STATUS
);
13179 if ((reg
& (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13180 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK
)) == 0)
13183 * Give up after 1ms - maximum wait time.
13185 * RBuf size is 148KiB. Slowest possible is PCIe Gen1 x1 at
13186 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13187 * 148 KB / (66% * 250MB/s) = 920us
13189 if (count
++ > 500) {
13191 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13195 udelay(2); /* do not busy-wait the CSR */
13198 /* start the init - expect RcvCtrl to be 0 */
13199 write_csr(dd
, RCV_CTRL
, RCV_CTRL_RX_RBUF_INIT_SMASK
);
13202 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13203 * period after the write before RcvStatus.RxRbufInitDone is valid.
13204 * The delay in the first run through the loop below is sufficient and
13205 * required before the first read of RcvStatus.RxRbufInintDone.
13207 read_csr(dd
, RCV_CTRL
);
13209 /* wait for the init to finish */
13212 /* delay is required first time through - see above */
13213 udelay(2); /* do not busy-wait the CSR */
13214 reg
= read_csr(dd
, RCV_STATUS
);
13215 if (reg
& (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK
))
13218 /* give up after 100us - slowest possible at 33MHz is 73us */
13219 if (count
++ > 50) {
13221 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13228 /* set RXE CSRs to chip reset defaults */
13229 static void reset_rxe_csrs(struct hfi1_devdata
*dd
)
13236 write_csr(dd
, RCV_CTRL
, 0);
13238 /* RCV_STATUS read-only */
13239 /* RCV_CONTEXTS read-only */
13240 /* RCV_ARRAY_CNT read-only */
13241 /* RCV_BUF_SIZE read-only */
13242 write_csr(dd
, RCV_BTH_QP
, 0);
13243 write_csr(dd
, RCV_MULTICAST
, 0);
13244 write_csr(dd
, RCV_BYPASS
, 0);
13245 write_csr(dd
, RCV_VL15
, 0);
13246 /* this is a clear-down */
13247 write_csr(dd
, RCV_ERR_INFO
,
13248 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK
);
13249 /* RCV_ERR_STATUS read-only */
13250 write_csr(dd
, RCV_ERR_MASK
, 0);
13251 write_csr(dd
, RCV_ERR_CLEAR
, ~0ull);
13252 /* RCV_ERR_FORCE leave alone */
13253 for (i
= 0; i
< 32; i
++)
13254 write_csr(dd
, RCV_QP_MAP_TABLE
+ (8 * i
), 0);
13255 for (i
= 0; i
< 4; i
++)
13256 write_csr(dd
, RCV_PARTITION_KEY
+ (8 * i
), 0);
13257 for (i
= 0; i
< RXE_NUM_32_BIT_COUNTERS
; i
++)
13258 write_csr(dd
, RCV_COUNTER_ARRAY32
+ (8 * i
), 0);
13259 for (i
= 0; i
< RXE_NUM_64_BIT_COUNTERS
; i
++)
13260 write_csr(dd
, RCV_COUNTER_ARRAY64
+ (8 * i
), 0);
13261 for (i
= 0; i
< RXE_NUM_RSM_INSTANCES
; i
++) {
13262 write_csr(dd
, RCV_RSM_CFG
+ (8 * i
), 0);
13263 write_csr(dd
, RCV_RSM_SELECT
+ (8 * i
), 0);
13264 write_csr(dd
, RCV_RSM_MATCH
+ (8 * i
), 0);
13266 for (i
= 0; i
< 32; i
++)
13267 write_csr(dd
, RCV_RSM_MAP_TABLE
+ (8 * i
), 0);
13270 * RXE Kernel and User Per-Context CSRs
13272 for (i
= 0; i
< dd
->chip_rcv_contexts
; i
++) {
13274 write_kctxt_csr(dd
, i
, RCV_CTXT_CTRL
, 0);
13275 /* RCV_CTXT_STATUS read-only */
13276 write_kctxt_csr(dd
, i
, RCV_EGR_CTRL
, 0);
13277 write_kctxt_csr(dd
, i
, RCV_TID_CTRL
, 0);
13278 write_kctxt_csr(dd
, i
, RCV_KEY_CTRL
, 0);
13279 write_kctxt_csr(dd
, i
, RCV_HDR_ADDR
, 0);
13280 write_kctxt_csr(dd
, i
, RCV_HDR_CNT
, 0);
13281 write_kctxt_csr(dd
, i
, RCV_HDR_ENT_SIZE
, 0);
13282 write_kctxt_csr(dd
, i
, RCV_HDR_SIZE
, 0);
13283 write_kctxt_csr(dd
, i
, RCV_HDR_TAIL_ADDR
, 0);
13284 write_kctxt_csr(dd
, i
, RCV_AVAIL_TIME_OUT
, 0);
13285 write_kctxt_csr(dd
, i
, RCV_HDR_OVFL_CNT
, 0);
13288 /* RCV_HDR_TAIL read-only */
13289 write_uctxt_csr(dd
, i
, RCV_HDR_HEAD
, 0);
13290 /* RCV_EGR_INDEX_TAIL read-only */
13291 write_uctxt_csr(dd
, i
, RCV_EGR_INDEX_HEAD
, 0);
13292 /* RCV_EGR_OFFSET_TAIL read-only */
13293 for (j
= 0; j
< RXE_NUM_TID_FLOWS
; j
++) {
13294 write_uctxt_csr(dd
, i
, RCV_TID_FLOW_TABLE
+ (8 * j
),
13301 * Set sc2vl tables.
13303 * They power on to zeros, so to avoid send context errors
13304 * they need to be set:
13306 * SC 0-7 -> VL 0-7 (respectively)
13311 static void init_sc2vl_tables(struct hfi1_devdata
*dd
)
13314 /* init per architecture spec, constrained by hardware capability */
13316 /* HFI maps sent packets */
13317 write_csr(dd
, SEND_SC2VLT0
, SC2VL_VAL(
13323 write_csr(dd
, SEND_SC2VLT1
, SC2VL_VAL(
13329 write_csr(dd
, SEND_SC2VLT2
, SC2VL_VAL(
13335 write_csr(dd
, SEND_SC2VLT3
, SC2VL_VAL(
13342 /* DC maps received packets */
13343 write_csr(dd
, DCC_CFG_SC_VL_TABLE_15_0
, DC_SC_VL_VAL(
13345 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13346 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13347 write_csr(dd
, DCC_CFG_SC_VL_TABLE_31_16
, DC_SC_VL_VAL(
13349 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13350 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13352 /* initialize the cached sc2vl values consistently with h/w */
13353 for (i
= 0; i
< 32; i
++) {
13354 if (i
< 8 || i
== 15)
13355 *((u8
*)(dd
->sc2vl
) + i
) = (u8
)i
;
13357 *((u8
*)(dd
->sc2vl
) + i
) = 0;
13362 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13363 * depend on the chip going through a power-on reset - a driver may be loaded
13364 * and unloaded many times.
13366 * Do not write any CSR values to the chip in this routine - there may be
13367 * a reset following the (possible) FLR in this routine.
13370 static void init_chip(struct hfi1_devdata
*dd
)
13375 * Put the HFI CSRs in a known state.
13376 * Combine this with a DC reset.
13378 * Stop the device from doing anything while we do a
13379 * reset. We know there are no other active users of
13380 * the device since we are now in charge. Turn off
13381 * off all outbound and inbound traffic and make sure
13382 * the device does not generate any interrupts.
13385 /* disable send contexts and SDMA engines */
13386 write_csr(dd
, SEND_CTRL
, 0);
13387 for (i
= 0; i
< dd
->chip_send_contexts
; i
++)
13388 write_kctxt_csr(dd
, i
, SEND_CTXT_CTRL
, 0);
13389 for (i
= 0; i
< dd
->chip_sdma_engines
; i
++)
13390 write_kctxt_csr(dd
, i
, SEND_DMA_CTRL
, 0);
13391 /* disable port (turn off RXE inbound traffic) and contexts */
13392 write_csr(dd
, RCV_CTRL
, 0);
13393 for (i
= 0; i
< dd
->chip_rcv_contexts
; i
++)
13394 write_csr(dd
, RCV_CTXT_CTRL
, 0);
13395 /* mask all interrupt sources */
13396 for (i
= 0; i
< CCE_NUM_INT_CSRS
; i
++)
13397 write_csr(dd
, CCE_INT_MASK
+ (8*i
), 0ull);
13400 * DC Reset: do a full DC reset before the register clear.
13401 * A recommended length of time to hold is one CSR read,
13402 * so reread the CceDcCtrl. Then, hold the DC in reset
13403 * across the clear.
13405 write_csr(dd
, CCE_DC_CTRL
, CCE_DC_CTRL_DC_RESET_SMASK
);
13406 (void) read_csr(dd
, CCE_DC_CTRL
);
13410 * A FLR will reset the SPC core and part of the PCIe.
13411 * The parts that need to be restored have already been
13414 dd_dev_info(dd
, "Resetting CSRs with FLR\n");
13416 /* do the FLR, the DC reset will remain */
13419 /* restore command and BARs */
13420 restore_pci_variables(dd
);
13423 dd_dev_info(dd
, "Resetting CSRs with FLR\n");
13425 restore_pci_variables(dd
);
13428 reset_asic_csrs(dd
);
13430 dd_dev_info(dd
, "Resetting CSRs with writes\n");
13431 reset_cce_csrs(dd
);
13432 reset_txe_csrs(dd
);
13433 reset_rxe_csrs(dd
);
13434 reset_asic_csrs(dd
);
13435 reset_misc_csrs(dd
);
13437 /* clear the DC reset */
13438 write_csr(dd
, CCE_DC_CTRL
, 0);
13440 /* Set the LED off */
13444 * Clear the QSFP reset.
13445 * An FLR enforces a 0 on all out pins. The driver does not touch
13446 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
13447 * anything plugged constantly in reset, if it pays attention
13449 * Prime examples of this are optical cables. Set all pins high.
13450 * I2CCLK and I2CDAT will change per direction, and INT_N and
13451 * MODPRS_N are input only and their value is ignored.
13453 write_csr(dd
, ASIC_QSFP1_OUT
, 0x1f);
13454 write_csr(dd
, ASIC_QSFP2_OUT
, 0x1f);
13457 static void init_early_variables(struct hfi1_devdata
*dd
)
13461 /* assign link credit variables */
13463 dd
->link_credits
= CM_GLOBAL_CREDITS
;
13465 dd
->link_credits
--;
13466 dd
->vcu
= cu_to_vcu(hfi1_cu
);
13467 /* enough room for 8 MAD packets plus header - 17K */
13468 dd
->vl15_init
= (8 * (2048 + 128)) / vau_to_au(dd
->vau
);
13469 if (dd
->vl15_init
> dd
->link_credits
)
13470 dd
->vl15_init
= dd
->link_credits
;
13472 write_uninitialized_csrs_and_memories(dd
);
13474 if (HFI1_CAP_IS_KSET(PKEY_CHECK
))
13475 for (i
= 0; i
< dd
->num_pports
; i
++) {
13476 struct hfi1_pportdata
*ppd
= &dd
->pport
[i
];
13478 set_partition_keys(ppd
);
13480 init_sc2vl_tables(dd
);
13483 static void init_kdeth_qp(struct hfi1_devdata
*dd
)
13485 /* user changed the KDETH_QP */
13486 if (kdeth_qp
!= 0 && kdeth_qp
>= 0xff) {
13487 /* out of range or illegal value */
13488 dd_dev_err(dd
, "Invalid KDETH queue pair prefix, ignoring");
13491 if (kdeth_qp
== 0) /* not set, or failed range check */
13492 kdeth_qp
= DEFAULT_KDETH_QP
;
13494 write_csr(dd
, SEND_BTH_QP
,
13495 (kdeth_qp
& SEND_BTH_QP_KDETH_QP_MASK
)
13496 << SEND_BTH_QP_KDETH_QP_SHIFT
);
13498 write_csr(dd
, RCV_BTH_QP
,
13499 (kdeth_qp
& RCV_BTH_QP_KDETH_QP_MASK
)
13500 << RCV_BTH_QP_KDETH_QP_SHIFT
);
13505 * @dd - device data
13506 * @first_ctxt - first context
13507 * @last_ctxt - first context
13509 * This return sets the qpn mapping table that
13510 * is indexed by qpn[8:1].
13512 * The routine will round robin the 256 settings
13513 * from first_ctxt to last_ctxt.
13515 * The first/last looks ahead to having specialized
13516 * receive contexts for mgmt and bypass. Normal
13517 * verbs traffic will assumed to be on a range
13518 * of receive contexts.
13520 static void init_qpmap_table(struct hfi1_devdata
*dd
,
13525 u64 regno
= RCV_QP_MAP_TABLE
;
13527 u64 ctxt
= first_ctxt
;
13529 for (i
= 0; i
< 256;) {
13530 reg
|= ctxt
<< (8 * (i
% 8));
13533 if (ctxt
> last_ctxt
)
13536 write_csr(dd
, regno
, reg
);
13542 write_csr(dd
, regno
, reg
);
13544 add_rcvctrl(dd
, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13545 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK
);
13549 * init_qos - init RX qos
13550 * @dd - device data
13553 * This routine initializes Rule 0 and the
13554 * RSM map table to implement qos.
13556 * If all of the limit tests succeed,
13557 * qos is applied based on the array
13558 * interpretation of krcvqs where
13561 * The number of vl bits (n) and the number of qpn
13562 * bits (m) are computed to feed both the RSM map table
13563 * and the single rule.
13566 static void init_qos(struct hfi1_devdata
*dd
, u32 first_ctxt
)
13569 unsigned qpns_per_vl
, ctxt
, i
, qpn
, n
= 1, m
;
13572 u8 rxcontext
= is_ax(dd
) ? 0 : 0xff; /* 0 is default if a0 ver. */
13575 if (dd
->n_krcv_queues
<= MIN_KERNEL_KCTXTS
||
13579 for (i
= 0; i
< min_t(unsigned, num_vls
, krcvqsset
); i
++)
13580 if (krcvqs
[i
] > max_by_vl
)
13581 max_by_vl
= krcvqs
[i
];
13582 if (max_by_vl
> 32)
13584 qpns_per_vl
= __roundup_pow_of_two(max_by_vl
);
13585 /* determine bits vl */
13586 n
= ilog2(num_vls
);
13587 /* determine bits for qpn */
13588 m
= ilog2(qpns_per_vl
);
13591 if (num_vls
* qpns_per_vl
> dd
->chip_rcv_contexts
)
13593 rsmmap
= kmalloc_array(NUM_MAP_REGS
, sizeof(u64
), GFP_KERNEL
);
13596 memset(rsmmap
, rxcontext
, NUM_MAP_REGS
* sizeof(u64
));
13597 /* init the local copy of the table */
13598 for (i
= 0, ctxt
= first_ctxt
; i
< num_vls
; i
++) {
13601 for (qpn
= 0, tctxt
= ctxt
;
13602 krcvqs
[i
] && qpn
< qpns_per_vl
; qpn
++) {
13603 unsigned idx
, regoff
, regidx
;
13605 /* generate index <= 128 */
13606 idx
= (qpn
<< n
) ^ i
;
13607 regoff
= (idx
% 8) * 8;
13609 reg
= rsmmap
[regidx
];
13610 /* replace 0xff with context number */
13611 reg
&= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13613 reg
|= (u64
)(tctxt
++) << regoff
;
13614 rsmmap
[regidx
] = reg
;
13615 if (tctxt
== ctxt
+ krcvqs
[i
])
13620 /* flush cached copies to chip */
13621 for (i
= 0; i
< NUM_MAP_REGS
; i
++)
13622 write_csr(dd
, RCV_RSM_MAP_TABLE
+ (8 * i
), rsmmap
[i
]);
13624 write_csr(dd
, RCV_RSM_CFG
/* + (8 * 0) */,
13625 RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK
13626 << RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT
|
13627 2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT
);
13628 write_csr(dd
, RCV_RSM_SELECT
/* + (8 * 0) */,
13629 LRH_BTH_MATCH_OFFSET
13630 << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT
|
13631 LRH_SC_MATCH_OFFSET
<< RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT
|
13632 LRH_SC_SELECT_OFFSET
<< RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT
|
13633 ((u64
)n
) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT
|
13634 QPN_SELECT_OFFSET
<< RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT
|
13635 ((u64
)m
+ (u64
)n
) << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT
);
13636 write_csr(dd
, RCV_RSM_MATCH
/* + (8 * 0) */,
13637 LRH_BTH_MASK
<< RCV_RSM_MATCH_MASK1_SHIFT
|
13638 LRH_BTH_VALUE
<< RCV_RSM_MATCH_VALUE1_SHIFT
|
13639 LRH_SC_MASK
<< RCV_RSM_MATCH_MASK2_SHIFT
|
13640 LRH_SC_VALUE
<< RCV_RSM_MATCH_VALUE2_SHIFT
);
13642 add_rcvctrl(dd
, RCV_CTRL_RCV_RSM_ENABLE_SMASK
);
13644 /* map everything else to first context */
13645 init_qpmap_table(dd
, FIRST_KERNEL_KCTXT
, MIN_KERNEL_KCTXTS
- 1);
13646 dd
->qos_shift
= n
+ 1;
13650 init_qpmap_table(dd
, FIRST_KERNEL_KCTXT
, dd
->n_krcv_queues
- 1);
13653 static void init_rxe(struct hfi1_devdata
*dd
)
13655 /* enable all receive errors */
13656 write_csr(dd
, RCV_ERR_MASK
, ~0ull);
13657 /* setup QPN map table - start where VL15 context leaves off */
13660 dd
->n_krcv_queues
> MIN_KERNEL_KCTXTS
? MIN_KERNEL_KCTXTS
: 0);
13662 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13663 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13664 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
13665 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13666 * Max_PayLoad_Size set to its minimum of 128.
13668 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13669 * (64 bytes). Max_Payload_Size is possibly modified upward in
13670 * tune_pcie_caps() which is called after this routine.
13674 static void init_other(struct hfi1_devdata
*dd
)
13676 /* enable all CCE errors */
13677 write_csr(dd
, CCE_ERR_MASK
, ~0ull);
13678 /* enable *some* Misc errors */
13679 write_csr(dd
, MISC_ERR_MASK
, DRIVER_MISC_MASK
);
13680 /* enable all DC errors, except LCB */
13681 write_csr(dd
, DCC_ERR_FLG_EN
, ~0ull);
13682 write_csr(dd
, DC_DC8051_ERR_EN
, ~0ull);
13686 * Fill out the given AU table using the given CU. A CU is defined in terms
13687 * AUs. The table is a an encoding: given the index, how many AUs does that
13690 * NOTE: Assumes that the register layout is the same for the
13691 * local and remote tables.
13693 static void assign_cm_au_table(struct hfi1_devdata
*dd
, u32 cu
,
13694 u32 csr0to3
, u32 csr4to7
)
13696 write_csr(dd
, csr0to3
,
13698 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT
13700 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT
13702 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT
13704 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT
);
13705 write_csr(dd
, csr4to7
,
13707 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT
13709 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT
13711 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT
13713 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT
);
13717 static void assign_local_cm_au_table(struct hfi1_devdata
*dd
, u8 vcu
)
13719 assign_cm_au_table(dd
, vcu_to_cu(vcu
), SEND_CM_LOCAL_AU_TABLE0_TO3
,
13720 SEND_CM_LOCAL_AU_TABLE4_TO7
);
13723 void assign_remote_cm_au_table(struct hfi1_devdata
*dd
, u8 vcu
)
13725 assign_cm_au_table(dd
, vcu_to_cu(vcu
), SEND_CM_REMOTE_AU_TABLE0_TO3
,
13726 SEND_CM_REMOTE_AU_TABLE4_TO7
);
13729 static void init_txe(struct hfi1_devdata
*dd
)
13733 /* enable all PIO, SDMA, general, and Egress errors */
13734 write_csr(dd
, SEND_PIO_ERR_MASK
, ~0ull);
13735 write_csr(dd
, SEND_DMA_ERR_MASK
, ~0ull);
13736 write_csr(dd
, SEND_ERR_MASK
, ~0ull);
13737 write_csr(dd
, SEND_EGRESS_ERR_MASK
, ~0ull);
13739 /* enable all per-context and per-SDMA engine errors */
13740 for (i
= 0; i
< dd
->chip_send_contexts
; i
++)
13741 write_kctxt_csr(dd
, i
, SEND_CTXT_ERR_MASK
, ~0ull);
13742 for (i
= 0; i
< dd
->chip_sdma_engines
; i
++)
13743 write_kctxt_csr(dd
, i
, SEND_DMA_ENG_ERR_MASK
, ~0ull);
13745 /* set the local CU to AU mapping */
13746 assign_local_cm_au_table(dd
, dd
->vcu
);
13749 * Set reasonable default for Credit Return Timer
13750 * Don't set on Simulator - causes it to choke.
13752 if (dd
->icode
!= ICODE_FUNCTIONAL_SIMULATOR
)
13753 write_csr(dd
, SEND_CM_TIMER_CTRL
, HFI1_CREDIT_RETURN_RATE
);
13756 int hfi1_set_ctxt_jkey(struct hfi1_devdata
*dd
, unsigned ctxt
, u16 jkey
)
13758 struct hfi1_ctxtdata
*rcd
= dd
->rcd
[ctxt
];
13763 if (!rcd
|| !rcd
->sc
) {
13767 sctxt
= rcd
->sc
->hw_context
;
13768 reg
= SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK
| /* mask is always 1's */
13769 ((jkey
& SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK
) <<
13770 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT
);
13771 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
13772 if (HFI1_CAP_KGET_MASK(rcd
->flags
, ALLOW_PERM_JKEY
))
13773 reg
|= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK
;
13774 write_kctxt_csr(dd
, sctxt
, SEND_CTXT_CHECK_JOB_KEY
, reg
);
13776 * Enable send-side J_KEY integrity check, unless this is A0 h/w
13779 reg
= read_kctxt_csr(dd
, sctxt
, SEND_CTXT_CHECK_ENABLE
);
13780 reg
|= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK
;
13781 write_kctxt_csr(dd
, sctxt
, SEND_CTXT_CHECK_ENABLE
, reg
);
13784 /* Enable J_KEY check on receive context. */
13785 reg
= RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK
|
13786 ((jkey
& RCV_KEY_CTRL_JOB_KEY_VALUE_MASK
) <<
13787 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT
);
13788 write_kctxt_csr(dd
, ctxt
, RCV_KEY_CTRL
, reg
);
13793 int hfi1_clear_ctxt_jkey(struct hfi1_devdata
*dd
, unsigned ctxt
)
13795 struct hfi1_ctxtdata
*rcd
= dd
->rcd
[ctxt
];
13800 if (!rcd
|| !rcd
->sc
) {
13804 sctxt
= rcd
->sc
->hw_context
;
13805 write_kctxt_csr(dd
, sctxt
, SEND_CTXT_CHECK_JOB_KEY
, 0);
13807 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
13808 * This check would not have been enabled for A0 h/w, see
13812 reg
= read_kctxt_csr(dd
, sctxt
, SEND_CTXT_CHECK_ENABLE
);
13813 reg
&= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK
;
13814 write_kctxt_csr(dd
, sctxt
, SEND_CTXT_CHECK_ENABLE
, reg
);
13816 /* Turn off the J_KEY on the receive side */
13817 write_kctxt_csr(dd
, ctxt
, RCV_KEY_CTRL
, 0);
13822 int hfi1_set_ctxt_pkey(struct hfi1_devdata
*dd
, unsigned ctxt
, u16 pkey
)
13824 struct hfi1_ctxtdata
*rcd
;
13829 if (ctxt
< dd
->num_rcv_contexts
)
13830 rcd
= dd
->rcd
[ctxt
];
13835 if (!rcd
|| !rcd
->sc
) {
13839 sctxt
= rcd
->sc
->hw_context
;
13840 reg
= ((u64
)pkey
& SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK
) <<
13841 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT
;
13842 write_kctxt_csr(dd
, sctxt
, SEND_CTXT_CHECK_PARTITION_KEY
, reg
);
13843 reg
= read_kctxt_csr(dd
, sctxt
, SEND_CTXT_CHECK_ENABLE
);
13844 reg
|= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK
;
13845 write_kctxt_csr(dd
, sctxt
, SEND_CTXT_CHECK_ENABLE
, reg
);
13850 int hfi1_clear_ctxt_pkey(struct hfi1_devdata
*dd
, unsigned ctxt
)
13852 struct hfi1_ctxtdata
*rcd
;
13857 if (ctxt
< dd
->num_rcv_contexts
)
13858 rcd
= dd
->rcd
[ctxt
];
13863 if (!rcd
|| !rcd
->sc
) {
13867 sctxt
= rcd
->sc
->hw_context
;
13868 reg
= read_kctxt_csr(dd
, sctxt
, SEND_CTXT_CHECK_ENABLE
);
13869 reg
&= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK
;
13870 write_kctxt_csr(dd
, sctxt
, SEND_CTXT_CHECK_ENABLE
, reg
);
13871 write_kctxt_csr(dd
, sctxt
, SEND_CTXT_CHECK_PARTITION_KEY
, 0);
13877 * Start doing the clean up the the chip. Our clean up happens in multiple
13878 * stages and this is just the first.
13880 void hfi1_start_cleanup(struct hfi1_devdata
*dd
)
13885 clean_up_interrupts(dd
);
13888 #define HFI_BASE_GUID(dev) \
13889 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
13892 * Certain chip functions need to be initialized only once per asic
13893 * instead of per-device. This function finds the peer device and
13894 * checks whether that chip initialization needs to be done by this
13897 static void asic_should_init(struct hfi1_devdata
*dd
)
13899 unsigned long flags
;
13900 struct hfi1_devdata
*tmp
, *peer
= NULL
;
13902 spin_lock_irqsave(&hfi1_devs_lock
, flags
);
13903 /* Find our peer device */
13904 list_for_each_entry(tmp
, &hfi1_dev_list
, list
) {
13905 if ((HFI_BASE_GUID(dd
) == HFI_BASE_GUID(tmp
)) &&
13906 dd
->unit
!= tmp
->unit
) {
13913 * "Claim" the ASIC for initialization if it hasn't been
13916 if (!peer
|| !(peer
->flags
& HFI1_DO_INIT_ASIC
))
13917 dd
->flags
|= HFI1_DO_INIT_ASIC
;
13918 spin_unlock_irqrestore(&hfi1_devs_lock
, flags
);
13922 * Set dd->boardname. Use a generic name if a name is not returned from
13923 * EFI variable space.
13925 * Return 0 on success, -ENOMEM if space could not be allocated.
13927 static int obtain_boardname(struct hfi1_devdata
*dd
)
13929 /* generic board description */
13930 const char generic
[] =
13931 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
13932 unsigned long size
;
13935 ret
= read_hfi1_efi_var(dd
, "description", &size
,
13936 (void **)&dd
->boardname
);
13938 dd_dev_info(dd
, "Board description not found\n");
13939 /* use generic description */
13940 dd
->boardname
= kstrdup(generic
, GFP_KERNEL
);
13941 if (!dd
->boardname
)
13948 * Allocate and initialize the device structure for the hfi.
13949 * @dev: the pci_dev for hfi1_ib device
13950 * @ent: pci_device_id struct for this dev
13952 * Also allocates, initializes, and returns the devdata struct for this
13955 * This is global, and is called directly at init to set up the
13956 * chip-specific function pointers for later use.
13958 struct hfi1_devdata
*hfi1_init_dd(struct pci_dev
*pdev
,
13959 const struct pci_device_id
*ent
)
13961 struct hfi1_devdata
*dd
;
13962 struct hfi1_pportdata
*ppd
;
13965 static const char * const inames
[] = { /* implementation names */
13967 "RTL VCS simulation",
13968 "RTL FPGA emulation",
13969 "Functional simulator"
13972 dd
= hfi1_alloc_devdata(pdev
,
13973 NUM_IB_PORTS
* sizeof(struct hfi1_pportdata
));
13977 for (i
= 0; i
< dd
->num_pports
; i
++, ppd
++) {
13979 /* init common fields */
13980 hfi1_init_pportdata(pdev
, ppd
, dd
, 0, 1);
13981 /* DC supports 4 link widths */
13982 ppd
->link_width_supported
=
13983 OPA_LINK_WIDTH_1X
| OPA_LINK_WIDTH_2X
|
13984 OPA_LINK_WIDTH_3X
| OPA_LINK_WIDTH_4X
;
13985 ppd
->link_width_downgrade_supported
=
13986 ppd
->link_width_supported
;
13987 /* start out enabling only 4X */
13988 ppd
->link_width_enabled
= OPA_LINK_WIDTH_4X
;
13989 ppd
->link_width_downgrade_enabled
=
13990 ppd
->link_width_downgrade_supported
;
13991 /* link width active is 0 when link is down */
13992 /* link width downgrade active is 0 when link is down */
13994 if (num_vls
< HFI1_MIN_VLS_SUPPORTED
13995 || num_vls
> HFI1_MAX_VLS_SUPPORTED
) {
13996 hfi1_early_err(&pdev
->dev
,
13997 "Invalid num_vls %u, using %u VLs\n",
13998 num_vls
, HFI1_MAX_VLS_SUPPORTED
);
13999 num_vls
= HFI1_MAX_VLS_SUPPORTED
;
14001 ppd
->vls_supported
= num_vls
;
14002 ppd
->vls_operational
= ppd
->vls_supported
;
14003 /* Set the default MTU. */
14004 for (vl
= 0; vl
< num_vls
; vl
++)
14005 dd
->vld
[vl
].mtu
= hfi1_max_mtu
;
14006 dd
->vld
[15].mtu
= MAX_MAD_PACKET
;
14008 * Set the initial values to reasonable default, will be set
14009 * for real when link is up.
14011 ppd
->lstate
= IB_PORT_DOWN
;
14012 ppd
->overrun_threshold
= 0x4;
14013 ppd
->phy_error_threshold
= 0xf;
14014 ppd
->port_crc_mode_enabled
= link_crc_mask
;
14015 /* initialize supported LTP CRC mode */
14016 ppd
->port_ltp_crc_mode
= cap_to_port_ltp(link_crc_mask
) << 8;
14017 /* initialize enabled LTP CRC mode */
14018 ppd
->port_ltp_crc_mode
|= cap_to_port_ltp(link_crc_mask
) << 4;
14019 /* start in offline */
14020 ppd
->host_link_state
= HLS_DN_OFFLINE
;
14021 init_vl_arb_caches(ppd
);
14024 dd
->link_default
= HLS_DN_POLL
;
14027 * Do remaining PCIe setup and save PCIe values in dd.
14028 * Any error printing is already done by the init code.
14029 * On return, we have the chip mapped.
14031 ret
= hfi1_pcie_ddinit(dd
, pdev
, ent
);
14035 /* verify that reads actually work, save revision for reset check */
14036 dd
->revision
= read_csr(dd
, CCE_REVISION
);
14037 if (dd
->revision
== ~(u64
)0) {
14038 dd_dev_err(dd
, "cannot read chip CSRs\n");
14042 dd
->majrev
= (dd
->revision
>> CCE_REVISION_CHIP_REV_MAJOR_SHIFT
)
14043 & CCE_REVISION_CHIP_REV_MAJOR_MASK
;
14044 dd
->minrev
= (dd
->revision
>> CCE_REVISION_CHIP_REV_MINOR_SHIFT
)
14045 & CCE_REVISION_CHIP_REV_MINOR_MASK
;
14047 /* obtain the hardware ID - NOT related to unit, which is a
14048 software enumeration */
14049 reg
= read_csr(dd
, CCE_REVISION2
);
14050 dd
->hfi1_id
= (reg
>> CCE_REVISION2_HFI_ID_SHIFT
)
14051 & CCE_REVISION2_HFI_ID_MASK
;
14052 /* the variable size will remove unwanted bits */
14053 dd
->icode
= reg
>> CCE_REVISION2_IMPL_CODE_SHIFT
;
14054 dd
->irev
= reg
>> CCE_REVISION2_IMPL_REVISION_SHIFT
;
14055 dd_dev_info(dd
, "Implementation: %s, revision 0x%x\n",
14056 dd
->icode
< ARRAY_SIZE(inames
) ? inames
[dd
->icode
] : "unknown",
14059 /* speeds the hardware can support */
14060 dd
->pport
->link_speed_supported
= OPA_LINK_SPEED_25G
;
14061 /* speeds allowed to run at */
14062 dd
->pport
->link_speed_enabled
= dd
->pport
->link_speed_supported
;
14063 /* give a reasonable active value, will be set on link up */
14064 dd
->pport
->link_speed_active
= OPA_LINK_SPEED_25G
;
14066 dd
->chip_rcv_contexts
= read_csr(dd
, RCV_CONTEXTS
);
14067 dd
->chip_send_contexts
= read_csr(dd
, SEND_CONTEXTS
);
14068 dd
->chip_sdma_engines
= read_csr(dd
, SEND_DMA_ENGINES
);
14069 dd
->chip_pio_mem_size
= read_csr(dd
, SEND_PIO_MEM_SIZE
);
14070 dd
->chip_sdma_mem_size
= read_csr(dd
, SEND_DMA_MEM_SIZE
);
14071 /* fix up link widths for emulation _p */
14073 if (dd
->icode
== ICODE_FPGA_EMULATION
&& is_emulator_p(dd
)) {
14074 ppd
->link_width_supported
=
14075 ppd
->link_width_enabled
=
14076 ppd
->link_width_downgrade_supported
=
14077 ppd
->link_width_downgrade_enabled
=
14080 /* insure num_vls isn't larger than number of sdma engines */
14081 if (HFI1_CAP_IS_KSET(SDMA
) && num_vls
> dd
->chip_sdma_engines
) {
14082 dd_dev_err(dd
, "num_vls %u too large, using %u VLs\n",
14083 num_vls
, dd
->chip_sdma_engines
);
14084 num_vls
= dd
->chip_sdma_engines
;
14085 ppd
->vls_supported
= dd
->chip_sdma_engines
;
14089 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14090 * Limit the max if larger than the field holds. If timeout is
14091 * non-zero, then the calculated field will be at least 1.
14093 * Must be after icode is set up - the cclock rate depends
14094 * on knowing the hardware being used.
14096 dd
->rcv_intr_timeout_csr
= ns_to_cclock(dd
, rcv_intr_timeout
) / 64;
14097 if (dd
->rcv_intr_timeout_csr
>
14098 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK
)
14099 dd
->rcv_intr_timeout_csr
=
14100 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK
;
14101 else if (dd
->rcv_intr_timeout_csr
== 0 && rcv_intr_timeout
)
14102 dd
->rcv_intr_timeout_csr
= 1;
14104 /* needs to be done before we look for the peer device */
14107 /* should this device init the ASIC block? */
14108 asic_should_init(dd
);
14110 /* obtain chip sizes, reset chip CSRs */
14113 /* read in the PCIe link speed information */
14114 ret
= pcie_speeds(dd
);
14118 /* read in firmware */
14119 ret
= hfi1_firmware_init(dd
);
14124 * In general, the PCIe Gen3 transition must occur after the
14125 * chip has been idled (so it won't initiate any PCIe transactions
14126 * e.g. an interrupt) and before the driver changes any registers
14127 * (the transition will reset the registers).
14129 * In particular, place this call after:
14130 * - init_chip() - the chip will not initiate any PCIe transactions
14131 * - pcie_speeds() - reads the current link speed
14132 * - hfi1_firmware_init() - the needed firmware is ready to be
14135 ret
= do_pcie_gen3_transition(dd
);
14139 /* start setting dd values and adjusting CSRs */
14140 init_early_variables(dd
);
14142 parse_platform_config(dd
);
14144 ret
= obtain_boardname(dd
);
14148 snprintf(dd
->boardversion
, BOARD_VERS_MAX
,
14149 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
14150 HFI1_CHIP_VERS_MAJ
, HFI1_CHIP_VERS_MIN
,
14153 (dd
->revision
>> CCE_REVISION_SW_SHIFT
)
14154 & CCE_REVISION_SW_MASK
);
14156 ret
= set_up_context_variables(dd
);
14160 /* set initial RXE CSRs */
14162 /* set initial TXE CSRs */
14164 /* set initial non-RXE, non-TXE CSRs */
14166 /* set up KDETH QP prefix in both RX and TX CSRs */
14169 ret
= hfi1_dev_affinity_init(dd
);
14173 /* send contexts must be set up before receive contexts */
14174 ret
= init_send_contexts(dd
);
14178 ret
= hfi1_create_ctxts(dd
);
14182 dd
->rcvhdrsize
= DEFAULT_RCVHDRSIZE
;
14184 * rcd[0] is guaranteed to be valid by this point. Also, all
14185 * context are using the same value, as per the module parameter.
14187 dd
->rhf_offset
= dd
->rcd
[0]->rcvhdrqentsize
- sizeof(u64
) / sizeof(u32
);
14189 ret
= init_pervl_scs(dd
);
14194 for (i
= 0; i
< dd
->num_pports
; ++i
) {
14195 ret
= sdma_init(dd
, i
);
14200 /* use contexts created by hfi1_create_ctxts */
14201 ret
= set_up_interrupts(dd
);
14205 /* set up LCB access - must be after set_up_interrupts() */
14206 init_lcb_access(dd
);
14208 snprintf(dd
->serial
, SERIAL_MAX
, "0x%08llx\n",
14209 dd
->base_guid
& 0xFFFFFF);
14211 dd
->oui1
= dd
->base_guid
>> 56 & 0xFF;
14212 dd
->oui2
= dd
->base_guid
>> 48 & 0xFF;
14213 dd
->oui3
= dd
->base_guid
>> 40 & 0xFF;
14215 ret
= load_firmware(dd
); /* asymmetric with dispose_firmware() */
14217 goto bail_clear_intr
;
14218 check_fabric_firmware_versions(dd
);
14222 ret
= init_cntrs(dd
);
14224 goto bail_clear_intr
;
14226 ret
= init_rcverr(dd
);
14228 goto bail_free_cntrs
;
14230 ret
= eprom_init(dd
);
14232 goto bail_free_rcverr
;
14241 clean_up_interrupts(dd
);
14243 hfi1_pcie_ddcleanup(dd
);
14245 hfi1_free_devdata(dd
);
14251 static u16
delay_cycles(struct hfi1_pportdata
*ppd
, u32 desired_egress_rate
,
14255 u32 current_egress_rate
= ppd
->current_egress_rate
;
14256 /* rates here are in units of 10^6 bits/sec */
14258 if (desired_egress_rate
== -1)
14259 return 0; /* shouldn't happen */
14261 if (desired_egress_rate
>= current_egress_rate
)
14262 return 0; /* we can't help go faster, only slower */
14264 delta_cycles
= egress_cycles(dw_len
* 4, desired_egress_rate
) -
14265 egress_cycles(dw_len
* 4, current_egress_rate
);
14267 return (u16
)delta_cycles
;
14272 * create_pbc - build a pbc for transmission
14273 * @flags: special case flags or-ed in built pbc
14274 * @srate: static rate
14276 * @dwlen: dword length (header words + data words + pbc words)
14278 * Create a PBC with the given flags, rate, VL, and length.
14280 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14281 * for verbs, which does not use this PSM feature. The lone other caller
14282 * is for the diagnostic interface which calls this if the user does not
14283 * supply their own PBC.
14285 u64
create_pbc(struct hfi1_pportdata
*ppd
, u64 flags
, int srate_mbs
, u32 vl
,
14288 u64 pbc
, delay
= 0;
14290 if (unlikely(srate_mbs
))
14291 delay
= delay_cycles(ppd
, srate_mbs
, dw_len
);
14294 | (delay
<< PBC_STATIC_RATE_CONTROL_COUNT_SHIFT
)
14295 | ((u64
)PBC_IHCRC_NONE
<< PBC_INSERT_HCRC_SHIFT
)
14296 | (vl
& PBC_VL_MASK
) << PBC_VL_SHIFT
14297 | (dw_len
& PBC_LENGTH_DWS_MASK
)
14298 << PBC_LENGTH_DWS_SHIFT
;
14303 #define SBUS_THERMAL 0x4f
14304 #define SBUS_THERM_MONITOR_MODE 0x1
14306 #define THERM_FAILURE(dev, ret, reason) \
14308 "Thermal sensor initialization failed: %s (%d)\n", \
14312 * Initialize the Avago Thermal sensor.
14314 * After initialization, enable polling of thermal sensor through
14315 * SBus interface. In order for this to work, the SBus Master
14316 * firmware has to be loaded due to the fact that the HW polling
14317 * logic uses SBus interrupts, which are not supported with
14318 * default firmware. Otherwise, no data will be returned through
14319 * the ASIC_STS_THERM CSR.
14321 static int thermal_init(struct hfi1_devdata
*dd
)
14325 if (dd
->icode
!= ICODE_RTL_SILICON
||
14326 !(dd
->flags
& HFI1_DO_INIT_ASIC
))
14329 acquire_hw_mutex(dd
);
14330 dd_dev_info(dd
, "Initializing thermal sensor\n");
14331 /* Disable polling of thermal readings */
14332 write_csr(dd
, ASIC_CFG_THERM_POLL_EN
, 0x0);
14334 /* Thermal Sensor Initialization */
14335 /* Step 1: Reset the Thermal SBus Receiver */
14336 ret
= sbus_request_slow(dd
, SBUS_THERMAL
, 0x0,
14337 RESET_SBUS_RECEIVER
, 0);
14339 THERM_FAILURE(dd
, ret
, "Bus Reset");
14342 /* Step 2: Set Reset bit in Thermal block */
14343 ret
= sbus_request_slow(dd
, SBUS_THERMAL
, 0x0,
14344 WRITE_SBUS_RECEIVER
, 0x1);
14346 THERM_FAILURE(dd
, ret
, "Therm Block Reset");
14349 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
14350 ret
= sbus_request_slow(dd
, SBUS_THERMAL
, 0x1,
14351 WRITE_SBUS_RECEIVER
, 0x32);
14353 THERM_FAILURE(dd
, ret
, "Write Clock Div");
14356 /* Step 4: Select temperature mode */
14357 ret
= sbus_request_slow(dd
, SBUS_THERMAL
, 0x3,
14358 WRITE_SBUS_RECEIVER
,
14359 SBUS_THERM_MONITOR_MODE
);
14361 THERM_FAILURE(dd
, ret
, "Write Mode Sel");
14364 /* Step 5: De-assert block reset and start conversion */
14365 ret
= sbus_request_slow(dd
, SBUS_THERMAL
, 0x0,
14366 WRITE_SBUS_RECEIVER
, 0x2);
14368 THERM_FAILURE(dd
, ret
, "Write Reset Deassert");
14371 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
14374 /* Enable polling of thermal readings */
14375 write_csr(dd
, ASIC_CFG_THERM_POLL_EN
, 0x1);
14377 release_hw_mutex(dd
);
14381 static void handle_temp_err(struct hfi1_devdata
*dd
)
14383 struct hfi1_pportdata
*ppd
= &dd
->pport
[0];
14385 * Thermal Critical Interrupt
14386 * Put the device into forced freeze mode, take link down to
14387 * offline, and put DC into reset.
14390 "Critical temperature reached! Forcing device into freeze mode!\n");
14391 dd
->flags
|= HFI1_FORCED_FREEZE
;
14392 start_freeze_handling(ppd
, FREEZE_SELF
|FREEZE_ABORT
);
14394 * Shut DC down as much and as quickly as possible.
14396 * Step 1: Take the link down to OFFLINE. This will cause the
14397 * 8051 to put the Serdes in reset. However, we don't want to
14398 * go through the entire link state machine since we want to
14399 * shutdown ASAP. Furthermore, this is not a graceful shutdown
14400 * but rather an attempt to save the chip.
14401 * Code below is almost the same as quiet_serdes() but avoids
14402 * all the extra work and the sleeps.
14404 ppd
->driver_link_ready
= 0;
14405 ppd
->link_enabled
= 0;
14406 set_physical_link_state(dd
, PLS_OFFLINE
|
14407 (OPA_LINKDOWN_REASON_SMA_DISABLED
<< 8));
14409 * Step 2: Shutdown LCB and 8051
14410 * After shutdown, do not restore DC_CFG_RESET value.