2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 * This file contains all of the code that is specific to the HFI chip
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
67 #define NUM_IB_PORTS 1
70 module_param_named(kdeth_qp
, kdeth_qp
, uint
, S_IRUGO
);
71 MODULE_PARM_DESC(kdeth_qp
, "Set the KDETH queue pair prefix");
73 uint num_vls
= HFI1_MAX_VLS_SUPPORTED
;
74 module_param(num_vls
, uint
, S_IRUGO
);
75 MODULE_PARM_DESC(num_vls
, "Set number of Virtual Lanes to use (1-8)");
78 * Default time to aggregate two 10K packets from the idle state
79 * (timer not running). The timer starts at the end of the first packet,
80 * so only the time for one 10K packet and header plus a bit extra is needed.
81 * 10 * 1024 + 64 header byte = 10304 byte
82 * 10304 byte / 12.5 GB/s = 824.32ns
84 uint rcv_intr_timeout
= (824 + 16); /* 16 is for coalescing interrupt */
85 module_param(rcv_intr_timeout
, uint
, S_IRUGO
);
86 MODULE_PARM_DESC(rcv_intr_timeout
, "Receive interrupt mitigation timeout in ns");
88 uint rcv_intr_count
= 16; /* same as qib */
89 module_param(rcv_intr_count
, uint
, S_IRUGO
);
90 MODULE_PARM_DESC(rcv_intr_count
, "Receive interrupt mitigation count");
92 ushort link_crc_mask
= SUPPORTED_CRCS
;
93 module_param(link_crc_mask
, ushort
, S_IRUGO
);
94 MODULE_PARM_DESC(link_crc_mask
, "CRCs to use on the link");
97 module_param_named(loopback
, loopback
, uint
, S_IRUGO
);
98 MODULE_PARM_DESC(loopback
, "Put into loopback mode (1 = serdes, 3 = external cable");
100 /* Other driver tunables */
101 uint rcv_intr_dynamic
= 1; /* enable dynamic mode for rcv int mitigation*/
102 static ushort crc_14b_sideband
= 1;
103 static uint use_flr
= 1;
104 uint quick_linkup
; /* skip LNI */
107 u64 flag
; /* the flag */
108 char *str
; /* description string */
109 u16 extra
; /* extra information */
114 /* str must be a string constant */
115 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
116 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
118 /* Send Error Consequences */
119 #define SEC_WRITE_DROPPED 0x1
120 #define SEC_PACKET_DROPPED 0x2
121 #define SEC_SC_HALTED 0x4 /* per-context only */
122 #define SEC_SPC_FREEZE 0x8 /* per-HFI only */
124 #define MIN_KERNEL_KCTXTS 2
125 #define FIRST_KERNEL_KCTXT 1
126 /* sizes for both the QP and RSM map tables */
127 #define NUM_MAP_ENTRIES 256
128 #define NUM_MAP_REGS 32
130 /* Bit offset into the GUID which carries HFI id information */
131 #define GUID_HFI_INDEX_SHIFT 39
133 /* extract the emulation revision */
134 #define emulator_rev(dd) ((dd)->irev >> 8)
135 /* parallel and serial emulation versions are 3 and 4 respectively */
136 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
137 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
142 #define IB_PACKET_TYPE 2ull
143 #define QW_SHIFT 6ull
145 #define QPN_WIDTH 7ull
147 /* LRH.BTH: QW 0, OFFSET 48 - for match */
148 #define LRH_BTH_QW 0ull
149 #define LRH_BTH_BIT_OFFSET 48ull
150 #define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
151 #define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
152 #define LRH_BTH_SELECT
153 #define LRH_BTH_MASK 3ull
154 #define LRH_BTH_VALUE 2ull
156 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
157 #define LRH_SC_QW 0ull
158 #define LRH_SC_BIT_OFFSET 56ull
159 #define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
160 #define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
161 #define LRH_SC_MASK 128ull
162 #define LRH_SC_VALUE 0ull
164 /* SC[n..0] QW 0, OFFSET 60 - for select */
165 #define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
167 /* QPN[m+n:1] QW 1, OFFSET 1 */
168 #define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
170 /* defines to build power on SC2VL table */
182 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
183 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
184 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
185 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
186 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
187 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
188 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
189 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
192 #define DC_SC_VL_VAL( \
211 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
212 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
213 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
214 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
215 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
216 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
217 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
218 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
219 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
220 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
221 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
222 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
223 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
224 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
225 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
226 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
229 /* all CceStatus sub-block freeze bits */
230 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
231 | CCE_STATUS_RXE_FROZE_SMASK \
232 | CCE_STATUS_TXE_FROZE_SMASK \
233 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
234 /* all CceStatus sub-block TXE pause bits */
235 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
236 | CCE_STATUS_TXE_PAUSED_SMASK \
237 | CCE_STATUS_SDMA_PAUSED_SMASK)
238 /* all CceStatus sub-block RXE pause bits */
239 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
244 static struct flag_table cce_err_status_flags
[] = {
245 /* 0*/ FLAG_ENTRY0("CceCsrParityErr",
246 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK
),
247 /* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
248 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK
),
249 /* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
250 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK
),
251 /* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
252 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK
),
253 /* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
254 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK
),
255 /* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
256 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK
),
257 /* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
258 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK
),
259 /* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
260 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK
),
261 /* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
262 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK
),
263 /* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
264 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK
),
265 /*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
266 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK
),
267 /*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
268 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK
),
269 /*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
270 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK
),
271 /*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
272 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK
),
273 /*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
274 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK
),
275 /*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
276 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK
),
277 /*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
278 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK
),
279 /*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
280 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK
),
281 /*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
282 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK
),
283 /*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
284 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK
),
285 /*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
286 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK
),
287 /*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
288 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK
),
289 /*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
290 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK
),
291 /*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
292 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK
),
293 /*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
294 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK
),
295 /*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
296 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK
),
297 /*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
298 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK
),
299 /*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
300 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK
),
301 /*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
302 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK
),
303 /*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
304 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK
),
305 /*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
306 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK
),
307 /*31*/ FLAG_ENTRY0("LATriggered",
308 CCE_ERR_STATUS_LA_TRIGGERED_SMASK
),
309 /*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
310 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK
),
311 /*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
312 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK
),
313 /*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
314 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK
),
315 /*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
316 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK
),
317 /*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
318 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK
),
319 /*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
320 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK
),
321 /*38*/ FLAG_ENTRY0("CceIntMapCorErr",
322 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK
),
323 /*39*/ FLAG_ENTRY0("CceIntMapUncErr",
324 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK
),
325 /*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
326 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK
),
333 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
334 static struct flag_table misc_err_status_flags
[] = {
335 /* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY
)),
336 /* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR
)),
337 /* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR
)),
338 /* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED
)),
339 /* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH
)),
340 /* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED
)),
341 /* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY
)),
342 /* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR
)),
343 /* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE
)),
344 /* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY
)),
345 /*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD
)),
346 /*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL
)),
347 /*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL
))
351 * TXE PIO Error flags and consequences
353 static struct flag_table pio_err_status_flags
[] = {
354 /* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
356 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK
),
357 /* 1*/ FLAG_ENTRY("PioWriteAddrParity",
359 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK
),
360 /* 2*/ FLAG_ENTRY("PioCsrParity",
362 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK
),
363 /* 3*/ FLAG_ENTRY("PioSbMemFifo0",
365 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK
),
366 /* 4*/ FLAG_ENTRY("PioSbMemFifo1",
368 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK
),
369 /* 5*/ FLAG_ENTRY("PioPccFifoParity",
371 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK
),
372 /* 6*/ FLAG_ENTRY("PioPecFifoParity",
374 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK
),
375 /* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
377 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK
),
378 /* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
380 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK
),
381 /* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
383 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK
),
384 /*10*/ FLAG_ENTRY("PioSmPktResetParity",
386 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK
),
387 /*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
389 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK
),
390 /*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
392 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK
),
393 /*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
395 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK
),
396 /*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
398 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK
),
399 /*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
401 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK
),
402 /*16*/ FLAG_ENTRY("PioPpmcPblFifo",
404 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK
),
405 /*17*/ FLAG_ENTRY("PioInitSmIn",
407 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK
),
408 /*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
410 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK
),
411 /*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
413 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK
),
414 /*20*/ FLAG_ENTRY("PioHostAddrMemCor",
416 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK
),
417 /*21*/ FLAG_ENTRY("PioWriteDataParity",
419 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK
),
420 /*22*/ FLAG_ENTRY("PioStateMachine",
422 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK
),
423 /*23*/ FLAG_ENTRY("PioWriteQwValidParity",
424 SEC_WRITE_DROPPED
| SEC_SPC_FREEZE
,
425 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK
),
426 /*24*/ FLAG_ENTRY("PioBlockQwCountParity",
427 SEC_WRITE_DROPPED
| SEC_SPC_FREEZE
,
428 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK
),
429 /*25*/ FLAG_ENTRY("PioVlfVlLenParity",
431 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK
),
432 /*26*/ FLAG_ENTRY("PioVlfSopParity",
434 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK
),
435 /*27*/ FLAG_ENTRY("PioVlFifoParity",
437 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK
),
438 /*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
440 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK
),
441 /*29*/ FLAG_ENTRY("PioPpmcSopLen",
443 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK
),
445 /*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
447 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK
),
448 /*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
450 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK
),
451 /*34*/ FLAG_ENTRY("PioPccSopHeadParity",
453 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK
),
454 /*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
456 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK
),
460 /* TXE PIO errors that cause an SPC freeze */
461 #define ALL_PIO_FREEZE_ERR \
462 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
463 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
464 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
465 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
466 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
467 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
468 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
490 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
493 * TXE SDMA Error flags
495 static struct flag_table sdma_err_status_flags
[] = {
496 /* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
497 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK
),
498 /* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
499 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK
),
500 /* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
501 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK
),
502 /* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
503 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK
),
507 /* TXE SDMA errors that cause an SPC freeze */
508 #define ALL_SDMA_FREEZE_ERR \
509 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
510 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
511 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
513 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
514 #define PORT_DISCARD_EGRESS_ERRS \
515 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
516 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
517 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
520 * TXE Egress Error flags
522 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
523 static struct flag_table egress_err_status_flags
[] = {
524 /* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR
)),
525 /* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC
)),
527 /* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
528 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY
)),
529 /* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN
)),
530 /* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE
)),
532 /* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
533 SEES(TX_PIO_LAUNCH_INTF_PARITY
)),
534 /* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
535 SEES(TX_SDMA_LAUNCH_INTF_PARITY
)),
537 /*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
538 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY
)),
539 /*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL
)),
540 /*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY
)),
541 /*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY
)),
542 /*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY
)),
543 /*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
544 SEES(TX_SDMA0_DISALLOWED_PACKET
)),
545 /*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
546 SEES(TX_SDMA1_DISALLOWED_PACKET
)),
547 /*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
548 SEES(TX_SDMA2_DISALLOWED_PACKET
)),
549 /*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
550 SEES(TX_SDMA3_DISALLOWED_PACKET
)),
551 /*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
552 SEES(TX_SDMA4_DISALLOWED_PACKET
)),
553 /*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
554 SEES(TX_SDMA5_DISALLOWED_PACKET
)),
555 /*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
556 SEES(TX_SDMA6_DISALLOWED_PACKET
)),
557 /*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
558 SEES(TX_SDMA7_DISALLOWED_PACKET
)),
559 /*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
560 SEES(TX_SDMA8_DISALLOWED_PACKET
)),
561 /*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
562 SEES(TX_SDMA9_DISALLOWED_PACKET
)),
563 /*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
564 SEES(TX_SDMA10_DISALLOWED_PACKET
)),
565 /*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
566 SEES(TX_SDMA11_DISALLOWED_PACKET
)),
567 /*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
568 SEES(TX_SDMA12_DISALLOWED_PACKET
)),
569 /*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
570 SEES(TX_SDMA13_DISALLOWED_PACKET
)),
571 /*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
572 SEES(TX_SDMA14_DISALLOWED_PACKET
)),
573 /*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
574 SEES(TX_SDMA15_DISALLOWED_PACKET
)),
575 /*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
576 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY
)),
577 /*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
578 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY
)),
579 /*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
580 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY
)),
581 /*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
582 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY
)),
583 /*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
584 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY
)),
585 /*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
586 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY
)),
587 /*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
588 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY
)),
589 /*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
590 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY
)),
591 /*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
592 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY
)),
593 /*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY
)),
594 /*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC
)),
595 /*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC
)),
596 /*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC
)),
597 /*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC
)),
598 /*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION
)),
599 /*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL
)),
600 /*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR
)),
601 /*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR
)),
602 /*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR
)),
603 /*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR
)),
604 /*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR
)),
605 /*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR
)),
606 /*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR
)),
607 /*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR
)),
608 /*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR
)),
609 /*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN
)),
610 /*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR
)),
611 /*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR
)),
612 /*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR
)),
613 /*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR
)),
614 /*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
615 SEES(TX_READ_SDMA_MEMORY_CSR_UNC
)),
616 /*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
617 SEES(TX_READ_PIO_MEMORY_CSR_UNC
)),
621 * TXE Egress Error Info flags
623 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
624 static struct flag_table egress_err_info_flags
[] = {
625 /* 0*/ FLAG_ENTRY0("Reserved", 0ull),
626 /* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL
)),
627 /* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY
)),
628 /* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY
)),
629 /* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY
)),
630 /* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID
)),
631 /* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE
)),
632 /* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING
)),
633 /* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW
)),
634 /* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6
)),
635 /*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH
)),
636 /*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS
)),
637 /*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS
)),
638 /*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS
)),
639 /*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS
)),
640 /*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS
)),
641 /*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST
)),
642 /*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN
)),
643 /*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET
)),
644 /*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS
)),
645 /*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL
)),
646 /*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN
)),
649 /* TXE Egress errors that cause an SPC freeze */
650 #define ALL_TXE_EGRESS_FREEZE_ERR \
651 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
652 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
653 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
654 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
655 | SEES(TX_LAUNCH_CSR_PARITY) \
656 | SEES(TX_SBRD_CTL_CSR_PARITY) \
657 | SEES(TX_CONFIG_PARITY) \
658 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
659 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
660 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
661 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
662 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
663 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
664 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
665 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
666 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
667 | SEES(TX_CREDIT_RETURN_PARITY))
670 * TXE Send error flags
672 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
673 static struct flag_table send_err_status_flags
[] = {
674 /* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY
)),
675 /* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR
)),
676 /* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR
))
680 * TXE Send Context Error flags and consequences
682 static struct flag_table sc_err_status_flags
[] = {
683 /* 0*/ FLAG_ENTRY("InconsistentSop",
684 SEC_PACKET_DROPPED
| SEC_SC_HALTED
,
685 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK
),
686 /* 1*/ FLAG_ENTRY("DisallowedPacket",
687 SEC_PACKET_DROPPED
| SEC_SC_HALTED
,
688 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK
),
689 /* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
690 SEC_WRITE_DROPPED
| SEC_SC_HALTED
,
691 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK
),
692 /* 3*/ FLAG_ENTRY("WriteOverflow",
693 SEC_WRITE_DROPPED
| SEC_SC_HALTED
,
694 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK
),
695 /* 4*/ FLAG_ENTRY("WriteOutOfBounds",
696 SEC_WRITE_DROPPED
| SEC_SC_HALTED
,
697 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK
),
702 * RXE Receive Error flags
704 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
705 static struct flag_table rxe_err_status_flags
[] = {
706 /* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR
)),
707 /* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY
)),
708 /* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC
)),
709 /* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR
)),
710 /* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC
)),
711 /* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR
)),
712 /* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC
)),
713 /* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR
)),
714 /* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY
)),
715 /* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY
)),
716 /*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC
)),
717 /*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR
)),
718 /*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING
)),
719 /*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC
)),
720 /*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR
)),
721 /*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC
)),
722 /*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
723 RXES(RBUF_LOOKUP_DES_REG_UNC_COR
)),
724 /*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC
)),
725 /*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR
)),
726 /*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
727 RXES(RBUF_BLOCK_LIST_READ_UNC
)),
728 /*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
729 RXES(RBUF_BLOCK_LIST_READ_COR
)),
730 /*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
731 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY
)),
732 /*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
733 RXES(RBUF_CSR_QENT_CNT_PARITY
)),
734 /*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
735 RXES(RBUF_CSR_QNEXT_BUF_PARITY
)),
736 /*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
737 RXES(RBUF_CSR_QVLD_BIT_PARITY
)),
738 /*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY
)),
739 /*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY
)),
740 /*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
741 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY
)),
742 /*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY
)),
743 /*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY
)),
744 /*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP
)),
745 /*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL
)),
746 /*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY
)),
747 /*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY
)),
748 /*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY
)),
749 /*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
750 RXES(RBUF_FL_INITDONE_PARITY
)),
751 /*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
752 RXES(RBUF_FL_INIT_WR_ADDR_PARITY
)),
753 /*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC
)),
754 /*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR
)),
755 /*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC
)),
756 /*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
757 RXES(LOOKUP_DES_PART1_UNC_COR
)),
758 /*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
759 RXES(LOOKUP_DES_PART2_PARITY
)),
760 /*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC
)),
761 /*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR
)),
762 /*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY
)),
763 /*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY
)),
764 /*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM
)),
765 /*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC
)),
766 /*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR
)),
767 /*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC
)),
768 /*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR
)),
769 /*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC
)),
770 /*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR
)),
771 /*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC
)),
772 /*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR
)),
773 /*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC
)),
774 /*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR
)),
775 /*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY
)),
776 /*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING
)),
777 /*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING
)),
778 /*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC
)),
779 /*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR
)),
780 /*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR
)),
781 /*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY
))
784 /* RXE errors that will trigger an SPC freeze */
785 #define ALL_RXE_FREEZE_ERR \
786 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
787 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
788 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
789 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
790 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
791 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
792 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
831 #define RXE_FREEZE_ABORT_MASK \
832 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
833 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
834 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
839 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
840 static struct flag_table dcc_err_flags
[] = {
841 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR
)),
842 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR
)),
843 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR
)),
844 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR
)),
845 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR
)),
846 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR
)),
847 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR
)),
848 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR
)),
849 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR
)),
850 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR
)),
851 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR
)),
852 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE
)),
853 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR
)),
854 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR
)),
855 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR
)),
856 FLAG_ENTRY0("link_err", DCCE(LINK_ERR
)),
857 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR
)),
858 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR
)),
859 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR
)),
860 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR
)),
861 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR
)),
862 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR
)),
863 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR
)),
864 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR
)),
865 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR
)),
866 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR
)),
867 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR
)),
868 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR
)),
869 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR
)),
870 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR
)),
871 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR
)),
872 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR
)),
873 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR
)),
874 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR
)),
875 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST
)),
876 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC
)),
877 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR
)),
878 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR
)),
879 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR
)),
880 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR
)),
881 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR
)),
882 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR
)),
883 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR
)),
884 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR
)),
885 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR
)),
886 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR
)),
892 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
893 static struct flag_table lcb_err_flags
[] = {
894 /* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR
)),
895 /* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR
)),
896 /* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW
)),
897 /* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
898 LCBE(ALL_LNS_FAILED_REINIT_TEST
)),
899 /* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS
)),
900 /* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS
)),
901 /* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS
)),
902 /* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR
)),
903 /* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER
)),
904 /* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE
)),
905 /*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT
)),
906 /*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED
)),
907 /*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER
)),
908 /*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
909 LCBE(UNEXPECTED_ROUND_TRIP_MARKER
)),
910 /*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP
)),
911 /*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING
)),
912 /*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW
)),
913 /*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW
)),
914 /*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR
)),
915 /*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
916 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE
)),
917 /*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE
)),
918 /*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE
)),
919 /*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE
)),
920 /*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE
)),
921 /*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE
)),
922 /*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT
)),
923 /*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
924 LCBE(RST_FOR_INCOMPLT_RND_TRIP
)),
925 /*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT
)),
926 /*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
927 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE
)),
928 /*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
929 LCBE(REDUNDANT_FLIT_PARITY_ERR
))
935 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
936 static struct flag_table dc8051_err_flags
[] = {
937 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051
)),
938 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT
)),
939 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE
)),
940 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE
)),
941 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE
)),
942 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE
)),
943 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE
)),
944 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE
)),
945 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
946 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES
)),
947 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR
)),
951 * DC8051 Information Error flags
953 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
955 static struct flag_table dc8051_info_err_flags
[] = {
956 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED
),
957 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME
),
958 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET
),
959 FLAG_ENTRY0("Serdes internal loopback failure",
960 FAILED_SERDES_INTERNAL_LOOPBACK
),
961 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT
),
962 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING
),
963 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE
),
964 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM
),
965 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ
),
966 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1
),
967 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2
),
968 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT
),
969 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT
)
973 * DC8051 Information Host Information flags
975 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
977 static struct flag_table dc8051_info_host_msg_flags
[] = {
978 FLAG_ENTRY0("Host request done", 0x0001),
979 FLAG_ENTRY0("BC SMA message", 0x0002),
980 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
981 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
982 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
983 FLAG_ENTRY0("External device config request", 0x0020),
984 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
985 FLAG_ENTRY0("LinkUp achieved", 0x0080),
986 FLAG_ENTRY0("Link going down", 0x0100),
989 static u32
encoded_size(u32 size
);
990 static u32
chip_to_opa_lstate(struct hfi1_devdata
*dd
, u32 chip_lstate
);
991 static int set_physical_link_state(struct hfi1_devdata
*dd
, u64 state
);
992 static void read_vc_remote_phy(struct hfi1_devdata
*dd
, u8
*power_management
,
994 static void read_vc_remote_fabric(struct hfi1_devdata
*dd
, u8
*vau
, u8
*z
,
995 u8
*vcu
, u16
*vl15buf
, u8
*crc_sizes
);
996 static void read_vc_remote_link_width(struct hfi1_devdata
*dd
,
997 u8
*remote_tx_rate
, u16
*link_widths
);
998 static void read_vc_local_link_width(struct hfi1_devdata
*dd
, u8
*misc_bits
,
999 u8
*flag_bits
, u16
*link_widths
);
1000 static void read_remote_device_id(struct hfi1_devdata
*dd
, u16
*device_id
,
1002 static void read_mgmt_allowed(struct hfi1_devdata
*dd
, u8
*mgmt_allowed
);
1003 static void read_local_lni(struct hfi1_devdata
*dd
, u8
*enable_lane_rx
);
1004 static int read_tx_settings(struct hfi1_devdata
*dd
, u8
*enable_lane_tx
,
1005 u8
*tx_polarity_inversion
,
1006 u8
*rx_polarity_inversion
, u8
*max_rate
);
1007 static void handle_sdma_eng_err(struct hfi1_devdata
*dd
,
1008 unsigned int context
, u64 err_status
);
1009 static void handle_qsfp_int(struct hfi1_devdata
*dd
, u32 source
, u64 reg
);
1010 static void handle_dcc_err(struct hfi1_devdata
*dd
,
1011 unsigned int context
, u64 err_status
);
1012 static void handle_lcb_err(struct hfi1_devdata
*dd
,
1013 unsigned int context
, u64 err_status
);
1014 static void handle_8051_interrupt(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
);
1015 static void handle_cce_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
);
1016 static void handle_rxe_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
);
1017 static void handle_misc_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
);
1018 static void handle_pio_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
);
1019 static void handle_sdma_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
);
1020 static void handle_egress_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
);
1021 static void handle_txe_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
);
1022 static void set_partition_keys(struct hfi1_pportdata
*);
1023 static const char *link_state_name(u32 state
);
1024 static const char *link_state_reason_name(struct hfi1_pportdata
*ppd
,
1026 static int do_8051_command(struct hfi1_devdata
*dd
, u32 type
, u64 in_data
,
1028 static int read_idle_sma(struct hfi1_devdata
*dd
, u64
*data
);
1029 static int thermal_init(struct hfi1_devdata
*dd
);
1031 static int wait_logical_linkstate(struct hfi1_pportdata
*ppd
, u32 state
,
1033 static void read_planned_down_reason_code(struct hfi1_devdata
*dd
, u8
*pdrrc
);
1034 static void read_link_down_reason(struct hfi1_devdata
*dd
, u8
*ldr
);
1035 static void handle_temp_err(struct hfi1_devdata
*);
1036 static void dc_shutdown(struct hfi1_devdata
*);
1037 static void dc_start(struct hfi1_devdata
*);
1038 static int qos_rmt_entries(struct hfi1_devdata
*dd
, unsigned int *mp
,
1040 static void remove_full_mgmt_pkey(struct hfi1_pportdata
*ppd
);
1043 * Error interrupt table entry. This is used as input to the interrupt
1044 * "clear down" routine used for all second tier error interrupt register.
1045 * Second tier interrupt registers have a single bit representing them
1046 * in the top-level CceIntStatus.
1048 struct err_reg_info
{
1049 u32 status
; /* status CSR offset */
1050 u32 clear
; /* clear CSR offset */
1051 u32 mask
; /* mask CSR offset */
1052 void (*handler
)(struct hfi1_devdata
*dd
, u32 source
, u64 reg
);
1056 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1057 #define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1058 #define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1061 * Helpers for building HFI and DC error interrupt table entries. Different
1062 * helpers are needed because of inconsistent register names.
1064 #define EE(reg, handler, desc) \
1065 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1067 #define DC_EE1(reg, handler, desc) \
1068 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1069 #define DC_EE2(reg, handler, desc) \
1070 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1073 * Table of the "misc" grouping of error interrupts. Each entry refers to
1074 * another register containing more information.
1076 static const struct err_reg_info misc_errs
[NUM_MISC_ERRS
] = {
1077 /* 0*/ EE(CCE_ERR
, handle_cce_err
, "CceErr"),
1078 /* 1*/ EE(RCV_ERR
, handle_rxe_err
, "RxeErr"),
1079 /* 2*/ EE(MISC_ERR
, handle_misc_err
, "MiscErr"),
1080 /* 3*/ { 0, 0, 0, NULL
}, /* reserved */
1081 /* 4*/ EE(SEND_PIO_ERR
, handle_pio_err
, "PioErr"),
1082 /* 5*/ EE(SEND_DMA_ERR
, handle_sdma_err
, "SDmaErr"),
1083 /* 6*/ EE(SEND_EGRESS_ERR
, handle_egress_err
, "EgressErr"),
1084 /* 7*/ EE(SEND_ERR
, handle_txe_err
, "TxeErr")
1085 /* the rest are reserved */
1089 * Index into the Various section of the interrupt sources
1090 * corresponding to the Critical Temperature interrupt.
1092 #define TCRIT_INT_SOURCE 4
1095 * SDMA error interrupt entry - refers to another register containing more
1098 static const struct err_reg_info sdma_eng_err
=
1099 EE(SEND_DMA_ENG_ERR
, handle_sdma_eng_err
, "SDmaEngErr");
1101 static const struct err_reg_info various_err
[NUM_VARIOUS
] = {
1102 /* 0*/ { 0, 0, 0, NULL
}, /* PbcInt */
1103 /* 1*/ { 0, 0, 0, NULL
}, /* GpioAssertInt */
1104 /* 2*/ EE(ASIC_QSFP1
, handle_qsfp_int
, "QSFP1"),
1105 /* 3*/ EE(ASIC_QSFP2
, handle_qsfp_int
, "QSFP2"),
1106 /* 4*/ { 0, 0, 0, NULL
}, /* TCritInt */
1107 /* rest are reserved */
1111 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1112 * register can not be derived from the MTU value because 10K is not
1113 * a power of 2. Therefore, we need a constant. Everything else can
1116 #define DCC_CFG_PORT_MTU_CAP_10240 7
1119 * Table of the DC grouping of error interrupts. Each entry refers to
1120 * another register containing more information.
1122 static const struct err_reg_info dc_errs
[NUM_DC_ERRS
] = {
1123 /* 0*/ DC_EE1(DCC_ERR
, handle_dcc_err
, "DCC Err"),
1124 /* 1*/ DC_EE2(DC_LCB_ERR
, handle_lcb_err
, "LCB Err"),
1125 /* 2*/ DC_EE2(DC_DC8051_ERR
, handle_8051_interrupt
, "DC8051 Interrupt"),
1126 /* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1127 /* the rest are reserved */
1137 * csr to read for name (if applicable)
1142 * offset into dd or ppd to store the counter's value
1152 * accessor for stat element, context either dd or ppd
1154 u64 (*rw_cntr
)(const struct cntr_entry
*, void *context
, int vl
,
1155 int mode
, u64 data
);
1158 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1159 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1161 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1171 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1173 (counter * 8 + RCV_COUNTER_ARRAY32), \
1174 0, flags | CNTR_32BIT, \
1175 port_access_u32_csr)
1177 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1179 (counter * 8 + RCV_COUNTER_ARRAY32), \
1180 0, flags | CNTR_32BIT, \
1184 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1186 (counter * 8 + RCV_COUNTER_ARRAY64), \
1188 port_access_u64_csr)
1190 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1192 (counter * 8 + RCV_COUNTER_ARRAY64), \
1196 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1197 #define OVR_ELM(ctx) \
1198 CNTR_ELEM("RcvHdrOvr" #ctx, \
1199 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1200 0, CNTR_NORMAL, port_access_u64_csr)
1203 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1205 (counter * 8 + SEND_COUNTER_ARRAY32), \
1206 0, flags | CNTR_32BIT, \
1207 port_access_u32_csr)
1210 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1212 (counter * 8 + SEND_COUNTER_ARRAY64), \
1214 port_access_u64_csr)
1216 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1218 counter * 8 + SEND_COUNTER_ARRAY64, \
1224 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1226 (counter * 8 + CCE_COUNTER_ARRAY32), \
1227 0, flags | CNTR_32BIT, \
1230 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1232 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1233 0, flags | CNTR_32BIT, \
1237 #define DC_PERF_CNTR(name, counter, flags) \
1244 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1252 #define SW_IBP_CNTR(name, cntr) \
1259 u64
read_csr(const struct hfi1_devdata
*dd
, u32 offset
)
1261 if (dd
->flags
& HFI1_PRESENT
) {
1262 return readq((void __iomem
*)dd
->kregbase
+ offset
);
1267 void write_csr(const struct hfi1_devdata
*dd
, u32 offset
, u64 value
)
1269 if (dd
->flags
& HFI1_PRESENT
)
1270 writeq(value
, (void __iomem
*)dd
->kregbase
+ offset
);
1273 void __iomem
*get_csr_addr(
1274 struct hfi1_devdata
*dd
,
1277 return (void __iomem
*)dd
->kregbase
+ offset
;
1280 static inline u64
read_write_csr(const struct hfi1_devdata
*dd
, u32 csr
,
1281 int mode
, u64 value
)
1285 if (mode
== CNTR_MODE_R
) {
1286 ret
= read_csr(dd
, csr
);
1287 } else if (mode
== CNTR_MODE_W
) {
1288 write_csr(dd
, csr
, value
);
1291 dd_dev_err(dd
, "Invalid cntr register access mode");
1295 hfi1_cdbg(CNTR
, "csr 0x%x val 0x%llx mode %d", csr
, ret
, mode
);
1300 static u64
dev_access_u32_csr(const struct cntr_entry
*entry
,
1301 void *context
, int vl
, int mode
, u64 data
)
1303 struct hfi1_devdata
*dd
= context
;
1304 u64 csr
= entry
->csr
;
1306 if (entry
->flags
& CNTR_SDMA
) {
1307 if (vl
== CNTR_INVALID_VL
)
1311 if (vl
!= CNTR_INVALID_VL
)
1314 return read_write_csr(dd
, csr
, mode
, data
);
1317 static u64
access_sde_err_cnt(const struct cntr_entry
*entry
,
1318 void *context
, int idx
, int mode
, u64 data
)
1320 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1322 if (dd
->per_sdma
&& idx
< dd
->num_sdma
)
1323 return dd
->per_sdma
[idx
].err_cnt
;
1327 static u64
access_sde_int_cnt(const struct cntr_entry
*entry
,
1328 void *context
, int idx
, int mode
, u64 data
)
1330 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1332 if (dd
->per_sdma
&& idx
< dd
->num_sdma
)
1333 return dd
->per_sdma
[idx
].sdma_int_cnt
;
1337 static u64
access_sde_idle_int_cnt(const struct cntr_entry
*entry
,
1338 void *context
, int idx
, int mode
, u64 data
)
1340 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1342 if (dd
->per_sdma
&& idx
< dd
->num_sdma
)
1343 return dd
->per_sdma
[idx
].idle_int_cnt
;
1347 static u64
access_sde_progress_int_cnt(const struct cntr_entry
*entry
,
1348 void *context
, int idx
, int mode
,
1351 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1353 if (dd
->per_sdma
&& idx
< dd
->num_sdma
)
1354 return dd
->per_sdma
[idx
].progress_int_cnt
;
1358 static u64
dev_access_u64_csr(const struct cntr_entry
*entry
, void *context
,
1359 int vl
, int mode
, u64 data
)
1361 struct hfi1_devdata
*dd
= context
;
1364 u64 csr
= entry
->csr
;
1366 if (entry
->flags
& CNTR_VL
) {
1367 if (vl
== CNTR_INVALID_VL
)
1371 if (vl
!= CNTR_INVALID_VL
)
1375 val
= read_write_csr(dd
, csr
, mode
, data
);
1379 static u64
dc_access_lcb_cntr(const struct cntr_entry
*entry
, void *context
,
1380 int vl
, int mode
, u64 data
)
1382 struct hfi1_devdata
*dd
= context
;
1383 u32 csr
= entry
->csr
;
1386 if (vl
!= CNTR_INVALID_VL
)
1388 if (mode
== CNTR_MODE_R
)
1389 ret
= read_lcb_csr(dd
, csr
, &data
);
1390 else if (mode
== CNTR_MODE_W
)
1391 ret
= write_lcb_csr(dd
, csr
, data
);
1394 dd_dev_err(dd
, "Could not acquire LCB for counter 0x%x", csr
);
1398 hfi1_cdbg(CNTR
, "csr 0x%x val 0x%llx mode %d", csr
, data
, mode
);
1403 static u64
port_access_u32_csr(const struct cntr_entry
*entry
, void *context
,
1404 int vl
, int mode
, u64 data
)
1406 struct hfi1_pportdata
*ppd
= context
;
1408 if (vl
!= CNTR_INVALID_VL
)
1410 return read_write_csr(ppd
->dd
, entry
->csr
, mode
, data
);
1413 static u64
port_access_u64_csr(const struct cntr_entry
*entry
,
1414 void *context
, int vl
, int mode
, u64 data
)
1416 struct hfi1_pportdata
*ppd
= context
;
1418 u64 csr
= entry
->csr
;
1420 if (entry
->flags
& CNTR_VL
) {
1421 if (vl
== CNTR_INVALID_VL
)
1425 if (vl
!= CNTR_INVALID_VL
)
1428 val
= read_write_csr(ppd
->dd
, csr
, mode
, data
);
1432 /* Software defined */
1433 static inline u64
read_write_sw(struct hfi1_devdata
*dd
, u64
*cntr
, int mode
,
1438 if (mode
== CNTR_MODE_R
) {
1440 } else if (mode
== CNTR_MODE_W
) {
1444 dd_dev_err(dd
, "Invalid cntr sw access mode");
1448 hfi1_cdbg(CNTR
, "val 0x%llx mode %d", ret
, mode
);
1453 static u64
access_sw_link_dn_cnt(const struct cntr_entry
*entry
, void *context
,
1454 int vl
, int mode
, u64 data
)
1456 struct hfi1_pportdata
*ppd
= context
;
1458 if (vl
!= CNTR_INVALID_VL
)
1460 return read_write_sw(ppd
->dd
, &ppd
->link_downed
, mode
, data
);
1463 static u64
access_sw_link_up_cnt(const struct cntr_entry
*entry
, void *context
,
1464 int vl
, int mode
, u64 data
)
1466 struct hfi1_pportdata
*ppd
= context
;
1468 if (vl
!= CNTR_INVALID_VL
)
1470 return read_write_sw(ppd
->dd
, &ppd
->link_up
, mode
, data
);
1473 static u64
access_sw_unknown_frame_cnt(const struct cntr_entry
*entry
,
1474 void *context
, int vl
, int mode
,
1477 struct hfi1_pportdata
*ppd
= (struct hfi1_pportdata
*)context
;
1479 if (vl
!= CNTR_INVALID_VL
)
1481 return read_write_sw(ppd
->dd
, &ppd
->unknown_frame_count
, mode
, data
);
1484 static u64
access_sw_xmit_discards(const struct cntr_entry
*entry
,
1485 void *context
, int vl
, int mode
, u64 data
)
1487 struct hfi1_pportdata
*ppd
= (struct hfi1_pportdata
*)context
;
1491 if (vl
== CNTR_INVALID_VL
)
1492 counter
= &ppd
->port_xmit_discards
;
1493 else if (vl
>= 0 && vl
< C_VL_COUNT
)
1494 counter
= &ppd
->port_xmit_discards_vl
[vl
];
1498 return read_write_sw(ppd
->dd
, counter
, mode
, data
);
1501 static u64
access_xmit_constraint_errs(const struct cntr_entry
*entry
,
1502 void *context
, int vl
, int mode
,
1505 struct hfi1_pportdata
*ppd
= context
;
1507 if (vl
!= CNTR_INVALID_VL
)
1510 return read_write_sw(ppd
->dd
, &ppd
->port_xmit_constraint_errors
,
1514 static u64
access_rcv_constraint_errs(const struct cntr_entry
*entry
,
1515 void *context
, int vl
, int mode
, u64 data
)
1517 struct hfi1_pportdata
*ppd
= context
;
1519 if (vl
!= CNTR_INVALID_VL
)
1522 return read_write_sw(ppd
->dd
, &ppd
->port_rcv_constraint_errors
,
1526 u64
get_all_cpu_total(u64 __percpu
*cntr
)
1531 for_each_possible_cpu(cpu
)
1532 counter
+= *per_cpu_ptr(cntr
, cpu
);
1536 static u64
read_write_cpu(struct hfi1_devdata
*dd
, u64
*z_val
,
1538 int vl
, int mode
, u64 data
)
1542 if (vl
!= CNTR_INVALID_VL
)
1545 if (mode
== CNTR_MODE_R
) {
1546 ret
= get_all_cpu_total(cntr
) - *z_val
;
1547 } else if (mode
== CNTR_MODE_W
) {
1548 /* A write can only zero the counter */
1550 *z_val
= get_all_cpu_total(cntr
);
1552 dd_dev_err(dd
, "Per CPU cntrs can only be zeroed");
1554 dd_dev_err(dd
, "Invalid cntr sw cpu access mode");
1561 static u64
access_sw_cpu_intr(const struct cntr_entry
*entry
,
1562 void *context
, int vl
, int mode
, u64 data
)
1564 struct hfi1_devdata
*dd
= context
;
1566 return read_write_cpu(dd
, &dd
->z_int_counter
, dd
->int_counter
, vl
,
1570 static u64
access_sw_cpu_rcv_limit(const struct cntr_entry
*entry
,
1571 void *context
, int vl
, int mode
, u64 data
)
1573 struct hfi1_devdata
*dd
= context
;
1575 return read_write_cpu(dd
, &dd
->z_rcv_limit
, dd
->rcv_limit
, vl
,
1579 static u64
access_sw_pio_wait(const struct cntr_entry
*entry
,
1580 void *context
, int vl
, int mode
, u64 data
)
1582 struct hfi1_devdata
*dd
= context
;
1584 return dd
->verbs_dev
.n_piowait
;
1587 static u64
access_sw_pio_drain(const struct cntr_entry
*entry
,
1588 void *context
, int vl
, int mode
, u64 data
)
1590 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1592 return dd
->verbs_dev
.n_piodrain
;
1595 static u64
access_sw_vtx_wait(const struct cntr_entry
*entry
,
1596 void *context
, int vl
, int mode
, u64 data
)
1598 struct hfi1_devdata
*dd
= context
;
1600 return dd
->verbs_dev
.n_txwait
;
1603 static u64
access_sw_kmem_wait(const struct cntr_entry
*entry
,
1604 void *context
, int vl
, int mode
, u64 data
)
1606 struct hfi1_devdata
*dd
= context
;
1608 return dd
->verbs_dev
.n_kmem_wait
;
1611 static u64
access_sw_send_schedule(const struct cntr_entry
*entry
,
1612 void *context
, int vl
, int mode
, u64 data
)
1614 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1616 return read_write_cpu(dd
, &dd
->z_send_schedule
, dd
->send_schedule
, vl
,
1620 /* Software counters for the error status bits within MISC_ERR_STATUS */
1621 static u64
access_misc_pll_lock_fail_err_cnt(const struct cntr_entry
*entry
,
1622 void *context
, int vl
, int mode
,
1625 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1627 return dd
->misc_err_status_cnt
[12];
1630 static u64
access_misc_mbist_fail_err_cnt(const struct cntr_entry
*entry
,
1631 void *context
, int vl
, int mode
,
1634 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1636 return dd
->misc_err_status_cnt
[11];
1639 static u64
access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry
*entry
,
1640 void *context
, int vl
, int mode
,
1643 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1645 return dd
->misc_err_status_cnt
[10];
1648 static u64
access_misc_efuse_done_parity_err_cnt(const struct cntr_entry
*entry
,
1649 void *context
, int vl
,
1652 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1654 return dd
->misc_err_status_cnt
[9];
1657 static u64
access_misc_efuse_write_err_cnt(const struct cntr_entry
*entry
,
1658 void *context
, int vl
, int mode
,
1661 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1663 return dd
->misc_err_status_cnt
[8];
1666 static u64
access_misc_efuse_read_bad_addr_err_cnt(
1667 const struct cntr_entry
*entry
,
1668 void *context
, int vl
, int mode
, u64 data
)
1670 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1672 return dd
->misc_err_status_cnt
[7];
1675 static u64
access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry
*entry
,
1676 void *context
, int vl
,
1679 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1681 return dd
->misc_err_status_cnt
[6];
1684 static u64
access_misc_fw_auth_failed_err_cnt(const struct cntr_entry
*entry
,
1685 void *context
, int vl
, int mode
,
1688 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1690 return dd
->misc_err_status_cnt
[5];
1693 static u64
access_misc_key_mismatch_err_cnt(const struct cntr_entry
*entry
,
1694 void *context
, int vl
, int mode
,
1697 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1699 return dd
->misc_err_status_cnt
[4];
1702 static u64
access_misc_sbus_write_failed_err_cnt(const struct cntr_entry
*entry
,
1703 void *context
, int vl
,
1706 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1708 return dd
->misc_err_status_cnt
[3];
1711 static u64
access_misc_csr_write_bad_addr_err_cnt(
1712 const struct cntr_entry
*entry
,
1713 void *context
, int vl
, int mode
, u64 data
)
1715 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1717 return dd
->misc_err_status_cnt
[2];
1720 static u64
access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry
*entry
,
1721 void *context
, int vl
,
1724 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1726 return dd
->misc_err_status_cnt
[1];
1729 static u64
access_misc_csr_parity_err_cnt(const struct cntr_entry
*entry
,
1730 void *context
, int vl
, int mode
,
1733 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1735 return dd
->misc_err_status_cnt
[0];
1739 * Software counter for the aggregate of
1740 * individual CceErrStatus counters
1742 static u64
access_sw_cce_err_status_aggregated_cnt(
1743 const struct cntr_entry
*entry
,
1744 void *context
, int vl
, int mode
, u64 data
)
1746 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1748 return dd
->sw_cce_err_status_aggregate
;
1752 * Software counters corresponding to each of the
1753 * error status bits within CceErrStatus
1755 static u64
access_cce_msix_csr_parity_err_cnt(const struct cntr_entry
*entry
,
1756 void *context
, int vl
, int mode
,
1759 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1761 return dd
->cce_err_status_cnt
[40];
1764 static u64
access_cce_int_map_unc_err_cnt(const struct cntr_entry
*entry
,
1765 void *context
, int vl
, int mode
,
1768 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1770 return dd
->cce_err_status_cnt
[39];
1773 static u64
access_cce_int_map_cor_err_cnt(const struct cntr_entry
*entry
,
1774 void *context
, int vl
, int mode
,
1777 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1779 return dd
->cce_err_status_cnt
[38];
1782 static u64
access_cce_msix_table_unc_err_cnt(const struct cntr_entry
*entry
,
1783 void *context
, int vl
, int mode
,
1786 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1788 return dd
->cce_err_status_cnt
[37];
1791 static u64
access_cce_msix_table_cor_err_cnt(const struct cntr_entry
*entry
,
1792 void *context
, int vl
, int mode
,
1795 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1797 return dd
->cce_err_status_cnt
[36];
1800 static u64
access_cce_rxdma_conv_fifo_parity_err_cnt(
1801 const struct cntr_entry
*entry
,
1802 void *context
, int vl
, int mode
, u64 data
)
1804 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1806 return dd
->cce_err_status_cnt
[35];
1809 static u64
access_cce_rcpl_async_fifo_parity_err_cnt(
1810 const struct cntr_entry
*entry
,
1811 void *context
, int vl
, int mode
, u64 data
)
1813 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1815 return dd
->cce_err_status_cnt
[34];
1818 static u64
access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry
*entry
,
1819 void *context
, int vl
,
1822 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1824 return dd
->cce_err_status_cnt
[33];
1827 static u64
access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry
*entry
,
1828 void *context
, int vl
, int mode
,
1831 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1833 return dd
->cce_err_status_cnt
[32];
1836 static u64
access_la_triggered_cnt(const struct cntr_entry
*entry
,
1837 void *context
, int vl
, int mode
, u64 data
)
1839 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1841 return dd
->cce_err_status_cnt
[31];
1844 static u64
access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry
*entry
,
1845 void *context
, int vl
, int mode
,
1848 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1850 return dd
->cce_err_status_cnt
[30];
1853 static u64
access_pcic_receive_parity_err_cnt(const struct cntr_entry
*entry
,
1854 void *context
, int vl
, int mode
,
1857 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1859 return dd
->cce_err_status_cnt
[29];
1862 static u64
access_pcic_transmit_back_parity_err_cnt(
1863 const struct cntr_entry
*entry
,
1864 void *context
, int vl
, int mode
, u64 data
)
1866 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1868 return dd
->cce_err_status_cnt
[28];
1871 static u64
access_pcic_transmit_front_parity_err_cnt(
1872 const struct cntr_entry
*entry
,
1873 void *context
, int vl
, int mode
, u64 data
)
1875 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1877 return dd
->cce_err_status_cnt
[27];
1880 static u64
access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry
*entry
,
1881 void *context
, int vl
, int mode
,
1884 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1886 return dd
->cce_err_status_cnt
[26];
1889 static u64
access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry
*entry
,
1890 void *context
, int vl
, int mode
,
1893 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1895 return dd
->cce_err_status_cnt
[25];
1898 static u64
access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry
*entry
,
1899 void *context
, int vl
, int mode
,
1902 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1904 return dd
->cce_err_status_cnt
[24];
1907 static u64
access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry
*entry
,
1908 void *context
, int vl
, int mode
,
1911 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1913 return dd
->cce_err_status_cnt
[23];
1916 static u64
access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry
*entry
,
1917 void *context
, int vl
,
1920 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1922 return dd
->cce_err_status_cnt
[22];
1925 static u64
access_pcic_retry_mem_unc_err(const struct cntr_entry
*entry
,
1926 void *context
, int vl
, int mode
,
1929 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1931 return dd
->cce_err_status_cnt
[21];
1934 static u64
access_pcic_n_post_dat_q_parity_err_cnt(
1935 const struct cntr_entry
*entry
,
1936 void *context
, int vl
, int mode
, u64 data
)
1938 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1940 return dd
->cce_err_status_cnt
[20];
1943 static u64
access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry
*entry
,
1944 void *context
, int vl
,
1947 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1949 return dd
->cce_err_status_cnt
[19];
1952 static u64
access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry
*entry
,
1953 void *context
, int vl
, int mode
,
1956 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1958 return dd
->cce_err_status_cnt
[18];
1961 static u64
access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry
*entry
,
1962 void *context
, int vl
, int mode
,
1965 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1967 return dd
->cce_err_status_cnt
[17];
1970 static u64
access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry
*entry
,
1971 void *context
, int vl
, int mode
,
1974 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1976 return dd
->cce_err_status_cnt
[16];
1979 static u64
access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry
*entry
,
1980 void *context
, int vl
, int mode
,
1983 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1985 return dd
->cce_err_status_cnt
[15];
1988 static u64
access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry
*entry
,
1989 void *context
, int vl
,
1992 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
1994 return dd
->cce_err_status_cnt
[14];
1997 static u64
access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry
*entry
,
1998 void *context
, int vl
, int mode
,
2001 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2003 return dd
->cce_err_status_cnt
[13];
2006 static u64
access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2007 const struct cntr_entry
*entry
,
2008 void *context
, int vl
, int mode
, u64 data
)
2010 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2012 return dd
->cce_err_status_cnt
[12];
2015 static u64
access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2016 const struct cntr_entry
*entry
,
2017 void *context
, int vl
, int mode
, u64 data
)
2019 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2021 return dd
->cce_err_status_cnt
[11];
2024 static u64
access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2025 const struct cntr_entry
*entry
,
2026 void *context
, int vl
, int mode
, u64 data
)
2028 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2030 return dd
->cce_err_status_cnt
[10];
2033 static u64
access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2034 const struct cntr_entry
*entry
,
2035 void *context
, int vl
, int mode
, u64 data
)
2037 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2039 return dd
->cce_err_status_cnt
[9];
2042 static u64
access_cce_cli2_async_fifo_parity_err_cnt(
2043 const struct cntr_entry
*entry
,
2044 void *context
, int vl
, int mode
, u64 data
)
2046 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2048 return dd
->cce_err_status_cnt
[8];
2051 static u64
access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry
*entry
,
2052 void *context
, int vl
,
2055 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2057 return dd
->cce_err_status_cnt
[7];
2060 static u64
access_cce_cli0_async_fifo_parity_err_cnt(
2061 const struct cntr_entry
*entry
,
2062 void *context
, int vl
, int mode
, u64 data
)
2064 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2066 return dd
->cce_err_status_cnt
[6];
2069 static u64
access_cce_rspd_data_parity_err_cnt(const struct cntr_entry
*entry
,
2070 void *context
, int vl
, int mode
,
2073 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2075 return dd
->cce_err_status_cnt
[5];
2078 static u64
access_cce_trgt_access_err_cnt(const struct cntr_entry
*entry
,
2079 void *context
, int vl
, int mode
,
2082 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2084 return dd
->cce_err_status_cnt
[4];
2087 static u64
access_cce_trgt_async_fifo_parity_err_cnt(
2088 const struct cntr_entry
*entry
,
2089 void *context
, int vl
, int mode
, u64 data
)
2091 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2093 return dd
->cce_err_status_cnt
[3];
2096 static u64
access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry
*entry
,
2097 void *context
, int vl
,
2100 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2102 return dd
->cce_err_status_cnt
[2];
2105 static u64
access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry
*entry
,
2106 void *context
, int vl
,
2109 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2111 return dd
->cce_err_status_cnt
[1];
2114 static u64
access_ccs_csr_parity_err_cnt(const struct cntr_entry
*entry
,
2115 void *context
, int vl
, int mode
,
2118 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2120 return dd
->cce_err_status_cnt
[0];
2124 * Software counters corresponding to each of the
2125 * error status bits within RcvErrStatus
2127 static u64
access_rx_csr_parity_err_cnt(const struct cntr_entry
*entry
,
2128 void *context
, int vl
, int mode
,
2131 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2133 return dd
->rcv_err_status_cnt
[63];
2136 static u64
access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry
*entry
,
2137 void *context
, int vl
,
2140 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2142 return dd
->rcv_err_status_cnt
[62];
2145 static u64
access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry
*entry
,
2146 void *context
, int vl
, int mode
,
2149 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2151 return dd
->rcv_err_status_cnt
[61];
2154 static u64
access_rx_dma_csr_unc_err_cnt(const struct cntr_entry
*entry
,
2155 void *context
, int vl
, int mode
,
2158 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2160 return dd
->rcv_err_status_cnt
[60];
2163 static u64
access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry
*entry
,
2164 void *context
, int vl
,
2167 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2169 return dd
->rcv_err_status_cnt
[59];
2172 static u64
access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry
*entry
,
2173 void *context
, int vl
,
2176 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2178 return dd
->rcv_err_status_cnt
[58];
2181 static u64
access_rx_dma_csr_parity_err_cnt(const struct cntr_entry
*entry
,
2182 void *context
, int vl
, int mode
,
2185 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2187 return dd
->rcv_err_status_cnt
[57];
2190 static u64
access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry
*entry
,
2191 void *context
, int vl
, int mode
,
2194 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2196 return dd
->rcv_err_status_cnt
[56];
2199 static u64
access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry
*entry
,
2200 void *context
, int vl
, int mode
,
2203 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2205 return dd
->rcv_err_status_cnt
[55];
2208 static u64
access_rx_dma_data_fifo_rd_cor_err_cnt(
2209 const struct cntr_entry
*entry
,
2210 void *context
, int vl
, int mode
, u64 data
)
2212 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2214 return dd
->rcv_err_status_cnt
[54];
2217 static u64
access_rx_dma_data_fifo_rd_unc_err_cnt(
2218 const struct cntr_entry
*entry
,
2219 void *context
, int vl
, int mode
, u64 data
)
2221 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2223 return dd
->rcv_err_status_cnt
[53];
2226 static u64
access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry
*entry
,
2227 void *context
, int vl
,
2230 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2232 return dd
->rcv_err_status_cnt
[52];
2235 static u64
access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry
*entry
,
2236 void *context
, int vl
,
2239 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2241 return dd
->rcv_err_status_cnt
[51];
2244 static u64
access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry
*entry
,
2245 void *context
, int vl
,
2248 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2250 return dd
->rcv_err_status_cnt
[50];
2253 static u64
access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry
*entry
,
2254 void *context
, int vl
,
2257 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2259 return dd
->rcv_err_status_cnt
[49];
2262 static u64
access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry
*entry
,
2263 void *context
, int vl
,
2266 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2268 return dd
->rcv_err_status_cnt
[48];
2271 static u64
access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry
*entry
,
2272 void *context
, int vl
,
2275 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2277 return dd
->rcv_err_status_cnt
[47];
2280 static u64
access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry
*entry
,
2281 void *context
, int vl
, int mode
,
2284 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2286 return dd
->rcv_err_status_cnt
[46];
2289 static u64
access_rx_hq_intr_csr_parity_err_cnt(
2290 const struct cntr_entry
*entry
,
2291 void *context
, int vl
, int mode
, u64 data
)
2293 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2295 return dd
->rcv_err_status_cnt
[45];
2298 static u64
access_rx_lookup_csr_parity_err_cnt(
2299 const struct cntr_entry
*entry
,
2300 void *context
, int vl
, int mode
, u64 data
)
2302 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2304 return dd
->rcv_err_status_cnt
[44];
2307 static u64
access_rx_lookup_rcv_array_cor_err_cnt(
2308 const struct cntr_entry
*entry
,
2309 void *context
, int vl
, int mode
, u64 data
)
2311 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2313 return dd
->rcv_err_status_cnt
[43];
2316 static u64
access_rx_lookup_rcv_array_unc_err_cnt(
2317 const struct cntr_entry
*entry
,
2318 void *context
, int vl
, int mode
, u64 data
)
2320 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2322 return dd
->rcv_err_status_cnt
[42];
2325 static u64
access_rx_lookup_des_part2_parity_err_cnt(
2326 const struct cntr_entry
*entry
,
2327 void *context
, int vl
, int mode
, u64 data
)
2329 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2331 return dd
->rcv_err_status_cnt
[41];
2334 static u64
access_rx_lookup_des_part1_unc_cor_err_cnt(
2335 const struct cntr_entry
*entry
,
2336 void *context
, int vl
, int mode
, u64 data
)
2338 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2340 return dd
->rcv_err_status_cnt
[40];
2343 static u64
access_rx_lookup_des_part1_unc_err_cnt(
2344 const struct cntr_entry
*entry
,
2345 void *context
, int vl
, int mode
, u64 data
)
2347 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2349 return dd
->rcv_err_status_cnt
[39];
2352 static u64
access_rx_rbuf_next_free_buf_cor_err_cnt(
2353 const struct cntr_entry
*entry
,
2354 void *context
, int vl
, int mode
, u64 data
)
2356 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2358 return dd
->rcv_err_status_cnt
[38];
2361 static u64
access_rx_rbuf_next_free_buf_unc_err_cnt(
2362 const struct cntr_entry
*entry
,
2363 void *context
, int vl
, int mode
, u64 data
)
2365 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2367 return dd
->rcv_err_status_cnt
[37];
2370 static u64
access_rbuf_fl_init_wr_addr_parity_err_cnt(
2371 const struct cntr_entry
*entry
,
2372 void *context
, int vl
, int mode
, u64 data
)
2374 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2376 return dd
->rcv_err_status_cnt
[36];
2379 static u64
access_rx_rbuf_fl_initdone_parity_err_cnt(
2380 const struct cntr_entry
*entry
,
2381 void *context
, int vl
, int mode
, u64 data
)
2383 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2385 return dd
->rcv_err_status_cnt
[35];
2388 static u64
access_rx_rbuf_fl_write_addr_parity_err_cnt(
2389 const struct cntr_entry
*entry
,
2390 void *context
, int vl
, int mode
, u64 data
)
2392 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2394 return dd
->rcv_err_status_cnt
[34];
2397 static u64
access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2398 const struct cntr_entry
*entry
,
2399 void *context
, int vl
, int mode
, u64 data
)
2401 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2403 return dd
->rcv_err_status_cnt
[33];
2406 static u64
access_rx_rbuf_empty_err_cnt(const struct cntr_entry
*entry
,
2407 void *context
, int vl
, int mode
,
2410 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2412 return dd
->rcv_err_status_cnt
[32];
2415 static u64
access_rx_rbuf_full_err_cnt(const struct cntr_entry
*entry
,
2416 void *context
, int vl
, int mode
,
2419 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2421 return dd
->rcv_err_status_cnt
[31];
2424 static u64
access_rbuf_bad_lookup_err_cnt(const struct cntr_entry
*entry
,
2425 void *context
, int vl
, int mode
,
2428 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2430 return dd
->rcv_err_status_cnt
[30];
2433 static u64
access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry
*entry
,
2434 void *context
, int vl
, int mode
,
2437 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2439 return dd
->rcv_err_status_cnt
[29];
2442 static u64
access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry
*entry
,
2443 void *context
, int vl
,
2446 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2448 return dd
->rcv_err_status_cnt
[28];
2451 static u64
access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2452 const struct cntr_entry
*entry
,
2453 void *context
, int vl
, int mode
, u64 data
)
2455 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2457 return dd
->rcv_err_status_cnt
[27];
2460 static u64
access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2461 const struct cntr_entry
*entry
,
2462 void *context
, int vl
, int mode
, u64 data
)
2464 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2466 return dd
->rcv_err_status_cnt
[26];
2469 static u64
access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2470 const struct cntr_entry
*entry
,
2471 void *context
, int vl
, int mode
, u64 data
)
2473 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2475 return dd
->rcv_err_status_cnt
[25];
2478 static u64
access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2479 const struct cntr_entry
*entry
,
2480 void *context
, int vl
, int mode
, u64 data
)
2482 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2484 return dd
->rcv_err_status_cnt
[24];
2487 static u64
access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2488 const struct cntr_entry
*entry
,
2489 void *context
, int vl
, int mode
, u64 data
)
2491 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2493 return dd
->rcv_err_status_cnt
[23];
2496 static u64
access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2497 const struct cntr_entry
*entry
,
2498 void *context
, int vl
, int mode
, u64 data
)
2500 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2502 return dd
->rcv_err_status_cnt
[22];
2505 static u64
access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2506 const struct cntr_entry
*entry
,
2507 void *context
, int vl
, int mode
, u64 data
)
2509 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2511 return dd
->rcv_err_status_cnt
[21];
2514 static u64
access_rx_rbuf_block_list_read_cor_err_cnt(
2515 const struct cntr_entry
*entry
,
2516 void *context
, int vl
, int mode
, u64 data
)
2518 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2520 return dd
->rcv_err_status_cnt
[20];
2523 static u64
access_rx_rbuf_block_list_read_unc_err_cnt(
2524 const struct cntr_entry
*entry
,
2525 void *context
, int vl
, int mode
, u64 data
)
2527 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2529 return dd
->rcv_err_status_cnt
[19];
2532 static u64
access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry
*entry
,
2533 void *context
, int vl
,
2536 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2538 return dd
->rcv_err_status_cnt
[18];
2541 static u64
access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry
*entry
,
2542 void *context
, int vl
,
2545 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2547 return dd
->rcv_err_status_cnt
[17];
2550 static u64
access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2551 const struct cntr_entry
*entry
,
2552 void *context
, int vl
, int mode
, u64 data
)
2554 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2556 return dd
->rcv_err_status_cnt
[16];
2559 static u64
access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2560 const struct cntr_entry
*entry
,
2561 void *context
, int vl
, int mode
, u64 data
)
2563 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2565 return dd
->rcv_err_status_cnt
[15];
2568 static u64
access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry
*entry
,
2569 void *context
, int vl
,
2572 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2574 return dd
->rcv_err_status_cnt
[14];
2577 static u64
access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry
*entry
,
2578 void *context
, int vl
,
2581 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2583 return dd
->rcv_err_status_cnt
[13];
2586 static u64
access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry
*entry
,
2587 void *context
, int vl
, int mode
,
2590 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2592 return dd
->rcv_err_status_cnt
[12];
2595 static u64
access_rx_dma_flag_cor_err_cnt(const struct cntr_entry
*entry
,
2596 void *context
, int vl
, int mode
,
2599 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2601 return dd
->rcv_err_status_cnt
[11];
2604 static u64
access_rx_dma_flag_unc_err_cnt(const struct cntr_entry
*entry
,
2605 void *context
, int vl
, int mode
,
2608 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2610 return dd
->rcv_err_status_cnt
[10];
2613 static u64
access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry
*entry
,
2614 void *context
, int vl
, int mode
,
2617 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2619 return dd
->rcv_err_status_cnt
[9];
2622 static u64
access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry
*entry
,
2623 void *context
, int vl
, int mode
,
2626 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2628 return dd
->rcv_err_status_cnt
[8];
2631 static u64
access_rx_rcv_qp_map_table_cor_err_cnt(
2632 const struct cntr_entry
*entry
,
2633 void *context
, int vl
, int mode
, u64 data
)
2635 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2637 return dd
->rcv_err_status_cnt
[7];
2640 static u64
access_rx_rcv_qp_map_table_unc_err_cnt(
2641 const struct cntr_entry
*entry
,
2642 void *context
, int vl
, int mode
, u64 data
)
2644 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2646 return dd
->rcv_err_status_cnt
[6];
2649 static u64
access_rx_rcv_data_cor_err_cnt(const struct cntr_entry
*entry
,
2650 void *context
, int vl
, int mode
,
2653 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2655 return dd
->rcv_err_status_cnt
[5];
2658 static u64
access_rx_rcv_data_unc_err_cnt(const struct cntr_entry
*entry
,
2659 void *context
, int vl
, int mode
,
2662 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2664 return dd
->rcv_err_status_cnt
[4];
2667 static u64
access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry
*entry
,
2668 void *context
, int vl
, int mode
,
2671 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2673 return dd
->rcv_err_status_cnt
[3];
2676 static u64
access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry
*entry
,
2677 void *context
, int vl
, int mode
,
2680 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2682 return dd
->rcv_err_status_cnt
[2];
2685 static u64
access_rx_dc_intf_parity_err_cnt(const struct cntr_entry
*entry
,
2686 void *context
, int vl
, int mode
,
2689 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2691 return dd
->rcv_err_status_cnt
[1];
2694 static u64
access_rx_dma_csr_cor_err_cnt(const struct cntr_entry
*entry
,
2695 void *context
, int vl
, int mode
,
2698 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2700 return dd
->rcv_err_status_cnt
[0];
2704 * Software counters corresponding to each of the
2705 * error status bits within SendPioErrStatus
2707 static u64
access_pio_pec_sop_head_parity_err_cnt(
2708 const struct cntr_entry
*entry
,
2709 void *context
, int vl
, int mode
, u64 data
)
2711 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2713 return dd
->send_pio_err_status_cnt
[35];
2716 static u64
access_pio_pcc_sop_head_parity_err_cnt(
2717 const struct cntr_entry
*entry
,
2718 void *context
, int vl
, int mode
, u64 data
)
2720 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2722 return dd
->send_pio_err_status_cnt
[34];
2725 static u64
access_pio_last_returned_cnt_parity_err_cnt(
2726 const struct cntr_entry
*entry
,
2727 void *context
, int vl
, int mode
, u64 data
)
2729 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2731 return dd
->send_pio_err_status_cnt
[33];
2734 static u64
access_pio_current_free_cnt_parity_err_cnt(
2735 const struct cntr_entry
*entry
,
2736 void *context
, int vl
, int mode
, u64 data
)
2738 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2740 return dd
->send_pio_err_status_cnt
[32];
2743 static u64
access_pio_reserved_31_err_cnt(const struct cntr_entry
*entry
,
2744 void *context
, int vl
, int mode
,
2747 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2749 return dd
->send_pio_err_status_cnt
[31];
2752 static u64
access_pio_reserved_30_err_cnt(const struct cntr_entry
*entry
,
2753 void *context
, int vl
, int mode
,
2756 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2758 return dd
->send_pio_err_status_cnt
[30];
2761 static u64
access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry
*entry
,
2762 void *context
, int vl
, int mode
,
2765 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2767 return dd
->send_pio_err_status_cnt
[29];
2770 static u64
access_pio_ppmc_bqc_mem_parity_err_cnt(
2771 const struct cntr_entry
*entry
,
2772 void *context
, int vl
, int mode
, u64 data
)
2774 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2776 return dd
->send_pio_err_status_cnt
[28];
2779 static u64
access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry
*entry
,
2780 void *context
, int vl
, int mode
,
2783 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2785 return dd
->send_pio_err_status_cnt
[27];
2788 static u64
access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry
*entry
,
2789 void *context
, int vl
, int mode
,
2792 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2794 return dd
->send_pio_err_status_cnt
[26];
2797 static u64
access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry
*entry
,
2798 void *context
, int vl
,
2801 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2803 return dd
->send_pio_err_status_cnt
[25];
2806 static u64
access_pio_block_qw_count_parity_err_cnt(
2807 const struct cntr_entry
*entry
,
2808 void *context
, int vl
, int mode
, u64 data
)
2810 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2812 return dd
->send_pio_err_status_cnt
[24];
2815 static u64
access_pio_write_qw_valid_parity_err_cnt(
2816 const struct cntr_entry
*entry
,
2817 void *context
, int vl
, int mode
, u64 data
)
2819 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2821 return dd
->send_pio_err_status_cnt
[23];
2824 static u64
access_pio_state_machine_err_cnt(const struct cntr_entry
*entry
,
2825 void *context
, int vl
, int mode
,
2828 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2830 return dd
->send_pio_err_status_cnt
[22];
2833 static u64
access_pio_write_data_parity_err_cnt(const struct cntr_entry
*entry
,
2834 void *context
, int vl
,
2837 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2839 return dd
->send_pio_err_status_cnt
[21];
2842 static u64
access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry
*entry
,
2843 void *context
, int vl
,
2846 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2848 return dd
->send_pio_err_status_cnt
[20];
2851 static u64
access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry
*entry
,
2852 void *context
, int vl
,
2855 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2857 return dd
->send_pio_err_status_cnt
[19];
2860 static u64
access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2861 const struct cntr_entry
*entry
,
2862 void *context
, int vl
, int mode
, u64 data
)
2864 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2866 return dd
->send_pio_err_status_cnt
[18];
2869 static u64
access_pio_init_sm_in_err_cnt(const struct cntr_entry
*entry
,
2870 void *context
, int vl
, int mode
,
2873 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2875 return dd
->send_pio_err_status_cnt
[17];
2878 static u64
access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry
*entry
,
2879 void *context
, int vl
, int mode
,
2882 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2884 return dd
->send_pio_err_status_cnt
[16];
2887 static u64
access_pio_credit_ret_fifo_parity_err_cnt(
2888 const struct cntr_entry
*entry
,
2889 void *context
, int vl
, int mode
, u64 data
)
2891 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2893 return dd
->send_pio_err_status_cnt
[15];
2896 static u64
access_pio_v1_len_mem_bank1_cor_err_cnt(
2897 const struct cntr_entry
*entry
,
2898 void *context
, int vl
, int mode
, u64 data
)
2900 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2902 return dd
->send_pio_err_status_cnt
[14];
2905 static u64
access_pio_v1_len_mem_bank0_cor_err_cnt(
2906 const struct cntr_entry
*entry
,
2907 void *context
, int vl
, int mode
, u64 data
)
2909 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2911 return dd
->send_pio_err_status_cnt
[13];
2914 static u64
access_pio_v1_len_mem_bank1_unc_err_cnt(
2915 const struct cntr_entry
*entry
,
2916 void *context
, int vl
, int mode
, u64 data
)
2918 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2920 return dd
->send_pio_err_status_cnt
[12];
2923 static u64
access_pio_v1_len_mem_bank0_unc_err_cnt(
2924 const struct cntr_entry
*entry
,
2925 void *context
, int vl
, int mode
, u64 data
)
2927 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2929 return dd
->send_pio_err_status_cnt
[11];
2932 static u64
access_pio_sm_pkt_reset_parity_err_cnt(
2933 const struct cntr_entry
*entry
,
2934 void *context
, int vl
, int mode
, u64 data
)
2936 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2938 return dd
->send_pio_err_status_cnt
[10];
2941 static u64
access_pio_pkt_evict_fifo_parity_err_cnt(
2942 const struct cntr_entry
*entry
,
2943 void *context
, int vl
, int mode
, u64 data
)
2945 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2947 return dd
->send_pio_err_status_cnt
[9];
2950 static u64
access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2951 const struct cntr_entry
*entry
,
2952 void *context
, int vl
, int mode
, u64 data
)
2954 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2956 return dd
->send_pio_err_status_cnt
[8];
2959 static u64
access_pio_sbrdctl_crrel_parity_err_cnt(
2960 const struct cntr_entry
*entry
,
2961 void *context
, int vl
, int mode
, u64 data
)
2963 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2965 return dd
->send_pio_err_status_cnt
[7];
2968 static u64
access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry
*entry
,
2969 void *context
, int vl
, int mode
,
2972 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2974 return dd
->send_pio_err_status_cnt
[6];
2977 static u64
access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry
*entry
,
2978 void *context
, int vl
, int mode
,
2981 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2983 return dd
->send_pio_err_status_cnt
[5];
2986 static u64
access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry
*entry
,
2987 void *context
, int vl
, int mode
,
2990 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
2992 return dd
->send_pio_err_status_cnt
[4];
2995 static u64
access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry
*entry
,
2996 void *context
, int vl
, int mode
,
2999 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3001 return dd
->send_pio_err_status_cnt
[3];
3004 static u64
access_pio_csr_parity_err_cnt(const struct cntr_entry
*entry
,
3005 void *context
, int vl
, int mode
,
3008 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3010 return dd
->send_pio_err_status_cnt
[2];
3013 static u64
access_pio_write_addr_parity_err_cnt(const struct cntr_entry
*entry
,
3014 void *context
, int vl
,
3017 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3019 return dd
->send_pio_err_status_cnt
[1];
3022 static u64
access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry
*entry
,
3023 void *context
, int vl
, int mode
,
3026 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3028 return dd
->send_pio_err_status_cnt
[0];
3032 * Software counters corresponding to each of the
3033 * error status bits within SendDmaErrStatus
3035 static u64
access_sdma_pcie_req_tracking_cor_err_cnt(
3036 const struct cntr_entry
*entry
,
3037 void *context
, int vl
, int mode
, u64 data
)
3039 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3041 return dd
->send_dma_err_status_cnt
[3];
3044 static u64
access_sdma_pcie_req_tracking_unc_err_cnt(
3045 const struct cntr_entry
*entry
,
3046 void *context
, int vl
, int mode
, u64 data
)
3048 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3050 return dd
->send_dma_err_status_cnt
[2];
3053 static u64
access_sdma_csr_parity_err_cnt(const struct cntr_entry
*entry
,
3054 void *context
, int vl
, int mode
,
3057 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3059 return dd
->send_dma_err_status_cnt
[1];
3062 static u64
access_sdma_rpy_tag_err_cnt(const struct cntr_entry
*entry
,
3063 void *context
, int vl
, int mode
,
3066 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3068 return dd
->send_dma_err_status_cnt
[0];
3072 * Software counters corresponding to each of the
3073 * error status bits within SendEgressErrStatus
3075 static u64
access_tx_read_pio_memory_csr_unc_err_cnt(
3076 const struct cntr_entry
*entry
,
3077 void *context
, int vl
, int mode
, u64 data
)
3079 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3081 return dd
->send_egress_err_status_cnt
[63];
3084 static u64
access_tx_read_sdma_memory_csr_err_cnt(
3085 const struct cntr_entry
*entry
,
3086 void *context
, int vl
, int mode
, u64 data
)
3088 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3090 return dd
->send_egress_err_status_cnt
[62];
3093 static u64
access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry
*entry
,
3094 void *context
, int vl
, int mode
,
3097 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3099 return dd
->send_egress_err_status_cnt
[61];
3102 static u64
access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry
*entry
,
3103 void *context
, int vl
,
3106 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3108 return dd
->send_egress_err_status_cnt
[60];
3111 static u64
access_tx_read_sdma_memory_cor_err_cnt(
3112 const struct cntr_entry
*entry
,
3113 void *context
, int vl
, int mode
, u64 data
)
3115 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3117 return dd
->send_egress_err_status_cnt
[59];
3120 static u64
access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry
*entry
,
3121 void *context
, int vl
, int mode
,
3124 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3126 return dd
->send_egress_err_status_cnt
[58];
3129 static u64
access_tx_credit_overrun_err_cnt(const struct cntr_entry
*entry
,
3130 void *context
, int vl
, int mode
,
3133 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3135 return dd
->send_egress_err_status_cnt
[57];
3138 static u64
access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry
*entry
,
3139 void *context
, int vl
, int mode
,
3142 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3144 return dd
->send_egress_err_status_cnt
[56];
3147 static u64
access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry
*entry
,
3148 void *context
, int vl
, int mode
,
3151 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3153 return dd
->send_egress_err_status_cnt
[55];
3156 static u64
access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry
*entry
,
3157 void *context
, int vl
, int mode
,
3160 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3162 return dd
->send_egress_err_status_cnt
[54];
3165 static u64
access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry
*entry
,
3166 void *context
, int vl
, int mode
,
3169 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3171 return dd
->send_egress_err_status_cnt
[53];
3174 static u64
access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry
*entry
,
3175 void *context
, int vl
, int mode
,
3178 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3180 return dd
->send_egress_err_status_cnt
[52];
3183 static u64
access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry
*entry
,
3184 void *context
, int vl
, int mode
,
3187 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3189 return dd
->send_egress_err_status_cnt
[51];
3192 static u64
access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry
*entry
,
3193 void *context
, int vl
, int mode
,
3196 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3198 return dd
->send_egress_err_status_cnt
[50];
3201 static u64
access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry
*entry
,
3202 void *context
, int vl
, int mode
,
3205 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3207 return dd
->send_egress_err_status_cnt
[49];
3210 static u64
access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry
*entry
,
3211 void *context
, int vl
, int mode
,
3214 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3216 return dd
->send_egress_err_status_cnt
[48];
3219 static u64
access_tx_credit_return_vl_err_cnt(const struct cntr_entry
*entry
,
3220 void *context
, int vl
, int mode
,
3223 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3225 return dd
->send_egress_err_status_cnt
[47];
3228 static u64
access_tx_hcrc_insertion_err_cnt(const struct cntr_entry
*entry
,
3229 void *context
, int vl
, int mode
,
3232 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3234 return dd
->send_egress_err_status_cnt
[46];
3237 static u64
access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry
*entry
,
3238 void *context
, int vl
, int mode
,
3241 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3243 return dd
->send_egress_err_status_cnt
[45];
3246 static u64
access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry
*entry
,
3247 void *context
, int vl
,
3250 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3252 return dd
->send_egress_err_status_cnt
[44];
3255 static u64
access_tx_read_sdma_memory_unc_err_cnt(
3256 const struct cntr_entry
*entry
,
3257 void *context
, int vl
, int mode
, u64 data
)
3259 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3261 return dd
->send_egress_err_status_cnt
[43];
3264 static u64
access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry
*entry
,
3265 void *context
, int vl
, int mode
,
3268 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3270 return dd
->send_egress_err_status_cnt
[42];
3273 static u64
access_tx_credit_return_partiy_err_cnt(
3274 const struct cntr_entry
*entry
,
3275 void *context
, int vl
, int mode
, u64 data
)
3277 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3279 return dd
->send_egress_err_status_cnt
[41];
3282 static u64
access_tx_launch_fifo8_unc_or_parity_err_cnt(
3283 const struct cntr_entry
*entry
,
3284 void *context
, int vl
, int mode
, u64 data
)
3286 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3288 return dd
->send_egress_err_status_cnt
[40];
3291 static u64
access_tx_launch_fifo7_unc_or_parity_err_cnt(
3292 const struct cntr_entry
*entry
,
3293 void *context
, int vl
, int mode
, u64 data
)
3295 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3297 return dd
->send_egress_err_status_cnt
[39];
3300 static u64
access_tx_launch_fifo6_unc_or_parity_err_cnt(
3301 const struct cntr_entry
*entry
,
3302 void *context
, int vl
, int mode
, u64 data
)
3304 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3306 return dd
->send_egress_err_status_cnt
[38];
3309 static u64
access_tx_launch_fifo5_unc_or_parity_err_cnt(
3310 const struct cntr_entry
*entry
,
3311 void *context
, int vl
, int mode
, u64 data
)
3313 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3315 return dd
->send_egress_err_status_cnt
[37];
3318 static u64
access_tx_launch_fifo4_unc_or_parity_err_cnt(
3319 const struct cntr_entry
*entry
,
3320 void *context
, int vl
, int mode
, u64 data
)
3322 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3324 return dd
->send_egress_err_status_cnt
[36];
3327 static u64
access_tx_launch_fifo3_unc_or_parity_err_cnt(
3328 const struct cntr_entry
*entry
,
3329 void *context
, int vl
, int mode
, u64 data
)
3331 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3333 return dd
->send_egress_err_status_cnt
[35];
3336 static u64
access_tx_launch_fifo2_unc_or_parity_err_cnt(
3337 const struct cntr_entry
*entry
,
3338 void *context
, int vl
, int mode
, u64 data
)
3340 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3342 return dd
->send_egress_err_status_cnt
[34];
3345 static u64
access_tx_launch_fifo1_unc_or_parity_err_cnt(
3346 const struct cntr_entry
*entry
,
3347 void *context
, int vl
, int mode
, u64 data
)
3349 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3351 return dd
->send_egress_err_status_cnt
[33];
3354 static u64
access_tx_launch_fifo0_unc_or_parity_err_cnt(
3355 const struct cntr_entry
*entry
,
3356 void *context
, int vl
, int mode
, u64 data
)
3358 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3360 return dd
->send_egress_err_status_cnt
[32];
3363 static u64
access_tx_sdma15_disallowed_packet_err_cnt(
3364 const struct cntr_entry
*entry
,
3365 void *context
, int vl
, int mode
, u64 data
)
3367 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3369 return dd
->send_egress_err_status_cnt
[31];
3372 static u64
access_tx_sdma14_disallowed_packet_err_cnt(
3373 const struct cntr_entry
*entry
,
3374 void *context
, int vl
, int mode
, u64 data
)
3376 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3378 return dd
->send_egress_err_status_cnt
[30];
3381 static u64
access_tx_sdma13_disallowed_packet_err_cnt(
3382 const struct cntr_entry
*entry
,
3383 void *context
, int vl
, int mode
, u64 data
)
3385 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3387 return dd
->send_egress_err_status_cnt
[29];
3390 static u64
access_tx_sdma12_disallowed_packet_err_cnt(
3391 const struct cntr_entry
*entry
,
3392 void *context
, int vl
, int mode
, u64 data
)
3394 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3396 return dd
->send_egress_err_status_cnt
[28];
3399 static u64
access_tx_sdma11_disallowed_packet_err_cnt(
3400 const struct cntr_entry
*entry
,
3401 void *context
, int vl
, int mode
, u64 data
)
3403 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3405 return dd
->send_egress_err_status_cnt
[27];
3408 static u64
access_tx_sdma10_disallowed_packet_err_cnt(
3409 const struct cntr_entry
*entry
,
3410 void *context
, int vl
, int mode
, u64 data
)
3412 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3414 return dd
->send_egress_err_status_cnt
[26];
3417 static u64
access_tx_sdma9_disallowed_packet_err_cnt(
3418 const struct cntr_entry
*entry
,
3419 void *context
, int vl
, int mode
, u64 data
)
3421 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3423 return dd
->send_egress_err_status_cnt
[25];
3426 static u64
access_tx_sdma8_disallowed_packet_err_cnt(
3427 const struct cntr_entry
*entry
,
3428 void *context
, int vl
, int mode
, u64 data
)
3430 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3432 return dd
->send_egress_err_status_cnt
[24];
3435 static u64
access_tx_sdma7_disallowed_packet_err_cnt(
3436 const struct cntr_entry
*entry
,
3437 void *context
, int vl
, int mode
, u64 data
)
3439 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3441 return dd
->send_egress_err_status_cnt
[23];
3444 static u64
access_tx_sdma6_disallowed_packet_err_cnt(
3445 const struct cntr_entry
*entry
,
3446 void *context
, int vl
, int mode
, u64 data
)
3448 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3450 return dd
->send_egress_err_status_cnt
[22];
3453 static u64
access_tx_sdma5_disallowed_packet_err_cnt(
3454 const struct cntr_entry
*entry
,
3455 void *context
, int vl
, int mode
, u64 data
)
3457 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3459 return dd
->send_egress_err_status_cnt
[21];
3462 static u64
access_tx_sdma4_disallowed_packet_err_cnt(
3463 const struct cntr_entry
*entry
,
3464 void *context
, int vl
, int mode
, u64 data
)
3466 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3468 return dd
->send_egress_err_status_cnt
[20];
3471 static u64
access_tx_sdma3_disallowed_packet_err_cnt(
3472 const struct cntr_entry
*entry
,
3473 void *context
, int vl
, int mode
, u64 data
)
3475 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3477 return dd
->send_egress_err_status_cnt
[19];
3480 static u64
access_tx_sdma2_disallowed_packet_err_cnt(
3481 const struct cntr_entry
*entry
,
3482 void *context
, int vl
, int mode
, u64 data
)
3484 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3486 return dd
->send_egress_err_status_cnt
[18];
3489 static u64
access_tx_sdma1_disallowed_packet_err_cnt(
3490 const struct cntr_entry
*entry
,
3491 void *context
, int vl
, int mode
, u64 data
)
3493 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3495 return dd
->send_egress_err_status_cnt
[17];
3498 static u64
access_tx_sdma0_disallowed_packet_err_cnt(
3499 const struct cntr_entry
*entry
,
3500 void *context
, int vl
, int mode
, u64 data
)
3502 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3504 return dd
->send_egress_err_status_cnt
[16];
3507 static u64
access_tx_config_parity_err_cnt(const struct cntr_entry
*entry
,
3508 void *context
, int vl
, int mode
,
3511 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3513 return dd
->send_egress_err_status_cnt
[15];
3516 static u64
access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry
*entry
,
3517 void *context
, int vl
,
3520 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3522 return dd
->send_egress_err_status_cnt
[14];
3525 static u64
access_tx_launch_csr_parity_err_cnt(const struct cntr_entry
*entry
,
3526 void *context
, int vl
, int mode
,
3529 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3531 return dd
->send_egress_err_status_cnt
[13];
3534 static u64
access_tx_illegal_vl_err_cnt(const struct cntr_entry
*entry
,
3535 void *context
, int vl
, int mode
,
3538 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3540 return dd
->send_egress_err_status_cnt
[12];
3543 static u64
access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3544 const struct cntr_entry
*entry
,
3545 void *context
, int vl
, int mode
, u64 data
)
3547 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3549 return dd
->send_egress_err_status_cnt
[11];
3552 static u64
access_egress_reserved_10_err_cnt(const struct cntr_entry
*entry
,
3553 void *context
, int vl
, int mode
,
3556 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3558 return dd
->send_egress_err_status_cnt
[10];
3561 static u64
access_egress_reserved_9_err_cnt(const struct cntr_entry
*entry
,
3562 void *context
, int vl
, int mode
,
3565 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3567 return dd
->send_egress_err_status_cnt
[9];
3570 static u64
access_tx_sdma_launch_intf_parity_err_cnt(
3571 const struct cntr_entry
*entry
,
3572 void *context
, int vl
, int mode
, u64 data
)
3574 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3576 return dd
->send_egress_err_status_cnt
[8];
3579 static u64
access_tx_pio_launch_intf_parity_err_cnt(
3580 const struct cntr_entry
*entry
,
3581 void *context
, int vl
, int mode
, u64 data
)
3583 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3585 return dd
->send_egress_err_status_cnt
[7];
3588 static u64
access_egress_reserved_6_err_cnt(const struct cntr_entry
*entry
,
3589 void *context
, int vl
, int mode
,
3592 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3594 return dd
->send_egress_err_status_cnt
[6];
3597 static u64
access_tx_incorrect_link_state_err_cnt(
3598 const struct cntr_entry
*entry
,
3599 void *context
, int vl
, int mode
, u64 data
)
3601 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3603 return dd
->send_egress_err_status_cnt
[5];
3606 static u64
access_tx_linkdown_err_cnt(const struct cntr_entry
*entry
,
3607 void *context
, int vl
, int mode
,
3610 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3612 return dd
->send_egress_err_status_cnt
[4];
3615 static u64
access_tx_egress_fifi_underrun_or_parity_err_cnt(
3616 const struct cntr_entry
*entry
,
3617 void *context
, int vl
, int mode
, u64 data
)
3619 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3621 return dd
->send_egress_err_status_cnt
[3];
3624 static u64
access_egress_reserved_2_err_cnt(const struct cntr_entry
*entry
,
3625 void *context
, int vl
, int mode
,
3628 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3630 return dd
->send_egress_err_status_cnt
[2];
3633 static u64
access_tx_pkt_integrity_mem_unc_err_cnt(
3634 const struct cntr_entry
*entry
,
3635 void *context
, int vl
, int mode
, u64 data
)
3637 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3639 return dd
->send_egress_err_status_cnt
[1];
3642 static u64
access_tx_pkt_integrity_mem_cor_err_cnt(
3643 const struct cntr_entry
*entry
,
3644 void *context
, int vl
, int mode
, u64 data
)
3646 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3648 return dd
->send_egress_err_status_cnt
[0];
3652 * Software counters corresponding to each of the
3653 * error status bits within SendErrStatus
3655 static u64
access_send_csr_write_bad_addr_err_cnt(
3656 const struct cntr_entry
*entry
,
3657 void *context
, int vl
, int mode
, u64 data
)
3659 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3661 return dd
->send_err_status_cnt
[2];
3664 static u64
access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry
*entry
,
3665 void *context
, int vl
,
3668 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3670 return dd
->send_err_status_cnt
[1];
3673 static u64
access_send_csr_parity_cnt(const struct cntr_entry
*entry
,
3674 void *context
, int vl
, int mode
,
3677 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3679 return dd
->send_err_status_cnt
[0];
3683 * Software counters corresponding to each of the
3684 * error status bits within SendCtxtErrStatus
3686 static u64
access_pio_write_out_of_bounds_err_cnt(
3687 const struct cntr_entry
*entry
,
3688 void *context
, int vl
, int mode
, u64 data
)
3690 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3692 return dd
->sw_ctxt_err_status_cnt
[4];
3695 static u64
access_pio_write_overflow_err_cnt(const struct cntr_entry
*entry
,
3696 void *context
, int vl
, int mode
,
3699 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3701 return dd
->sw_ctxt_err_status_cnt
[3];
3704 static u64
access_pio_write_crosses_boundary_err_cnt(
3705 const struct cntr_entry
*entry
,
3706 void *context
, int vl
, int mode
, u64 data
)
3708 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3710 return dd
->sw_ctxt_err_status_cnt
[2];
3713 static u64
access_pio_disallowed_packet_err_cnt(const struct cntr_entry
*entry
,
3714 void *context
, int vl
,
3717 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3719 return dd
->sw_ctxt_err_status_cnt
[1];
3722 static u64
access_pio_inconsistent_sop_err_cnt(const struct cntr_entry
*entry
,
3723 void *context
, int vl
, int mode
,
3726 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3728 return dd
->sw_ctxt_err_status_cnt
[0];
3732 * Software counters corresponding to each of the
3733 * error status bits within SendDmaEngErrStatus
3735 static u64
access_sdma_header_request_fifo_cor_err_cnt(
3736 const struct cntr_entry
*entry
,
3737 void *context
, int vl
, int mode
, u64 data
)
3739 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3741 return dd
->sw_send_dma_eng_err_status_cnt
[23];
3744 static u64
access_sdma_header_storage_cor_err_cnt(
3745 const struct cntr_entry
*entry
,
3746 void *context
, int vl
, int mode
, u64 data
)
3748 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3750 return dd
->sw_send_dma_eng_err_status_cnt
[22];
3753 static u64
access_sdma_packet_tracking_cor_err_cnt(
3754 const struct cntr_entry
*entry
,
3755 void *context
, int vl
, int mode
, u64 data
)
3757 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3759 return dd
->sw_send_dma_eng_err_status_cnt
[21];
3762 static u64
access_sdma_assembly_cor_err_cnt(const struct cntr_entry
*entry
,
3763 void *context
, int vl
, int mode
,
3766 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3768 return dd
->sw_send_dma_eng_err_status_cnt
[20];
3771 static u64
access_sdma_desc_table_cor_err_cnt(const struct cntr_entry
*entry
,
3772 void *context
, int vl
, int mode
,
3775 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3777 return dd
->sw_send_dma_eng_err_status_cnt
[19];
3780 static u64
access_sdma_header_request_fifo_unc_err_cnt(
3781 const struct cntr_entry
*entry
,
3782 void *context
, int vl
, int mode
, u64 data
)
3784 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3786 return dd
->sw_send_dma_eng_err_status_cnt
[18];
3789 static u64
access_sdma_header_storage_unc_err_cnt(
3790 const struct cntr_entry
*entry
,
3791 void *context
, int vl
, int mode
, u64 data
)
3793 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3795 return dd
->sw_send_dma_eng_err_status_cnt
[17];
3798 static u64
access_sdma_packet_tracking_unc_err_cnt(
3799 const struct cntr_entry
*entry
,
3800 void *context
, int vl
, int mode
, u64 data
)
3802 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3804 return dd
->sw_send_dma_eng_err_status_cnt
[16];
3807 static u64
access_sdma_assembly_unc_err_cnt(const struct cntr_entry
*entry
,
3808 void *context
, int vl
, int mode
,
3811 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3813 return dd
->sw_send_dma_eng_err_status_cnt
[15];
3816 static u64
access_sdma_desc_table_unc_err_cnt(const struct cntr_entry
*entry
,
3817 void *context
, int vl
, int mode
,
3820 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3822 return dd
->sw_send_dma_eng_err_status_cnt
[14];
3825 static u64
access_sdma_timeout_err_cnt(const struct cntr_entry
*entry
,
3826 void *context
, int vl
, int mode
,
3829 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3831 return dd
->sw_send_dma_eng_err_status_cnt
[13];
3834 static u64
access_sdma_header_length_err_cnt(const struct cntr_entry
*entry
,
3835 void *context
, int vl
, int mode
,
3838 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3840 return dd
->sw_send_dma_eng_err_status_cnt
[12];
3843 static u64
access_sdma_header_address_err_cnt(const struct cntr_entry
*entry
,
3844 void *context
, int vl
, int mode
,
3847 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3849 return dd
->sw_send_dma_eng_err_status_cnt
[11];
3852 static u64
access_sdma_header_select_err_cnt(const struct cntr_entry
*entry
,
3853 void *context
, int vl
, int mode
,
3856 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3858 return dd
->sw_send_dma_eng_err_status_cnt
[10];
3861 static u64
access_sdma_reserved_9_err_cnt(const struct cntr_entry
*entry
,
3862 void *context
, int vl
, int mode
,
3865 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3867 return dd
->sw_send_dma_eng_err_status_cnt
[9];
3870 static u64
access_sdma_packet_desc_overflow_err_cnt(
3871 const struct cntr_entry
*entry
,
3872 void *context
, int vl
, int mode
, u64 data
)
3874 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3876 return dd
->sw_send_dma_eng_err_status_cnt
[8];
3879 static u64
access_sdma_length_mismatch_err_cnt(const struct cntr_entry
*entry
,
3880 void *context
, int vl
,
3883 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3885 return dd
->sw_send_dma_eng_err_status_cnt
[7];
3888 static u64
access_sdma_halt_err_cnt(const struct cntr_entry
*entry
,
3889 void *context
, int vl
, int mode
, u64 data
)
3891 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3893 return dd
->sw_send_dma_eng_err_status_cnt
[6];
3896 static u64
access_sdma_mem_read_err_cnt(const struct cntr_entry
*entry
,
3897 void *context
, int vl
, int mode
,
3900 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3902 return dd
->sw_send_dma_eng_err_status_cnt
[5];
3905 static u64
access_sdma_first_desc_err_cnt(const struct cntr_entry
*entry
,
3906 void *context
, int vl
, int mode
,
3909 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3911 return dd
->sw_send_dma_eng_err_status_cnt
[4];
3914 static u64
access_sdma_tail_out_of_bounds_err_cnt(
3915 const struct cntr_entry
*entry
,
3916 void *context
, int vl
, int mode
, u64 data
)
3918 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3920 return dd
->sw_send_dma_eng_err_status_cnt
[3];
3923 static u64
access_sdma_too_long_err_cnt(const struct cntr_entry
*entry
,
3924 void *context
, int vl
, int mode
,
3927 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3929 return dd
->sw_send_dma_eng_err_status_cnt
[2];
3932 static u64
access_sdma_gen_mismatch_err_cnt(const struct cntr_entry
*entry
,
3933 void *context
, int vl
, int mode
,
3936 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3938 return dd
->sw_send_dma_eng_err_status_cnt
[1];
3941 static u64
access_sdma_wrong_dw_err_cnt(const struct cntr_entry
*entry
,
3942 void *context
, int vl
, int mode
,
3945 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)context
;
3947 return dd
->sw_send_dma_eng_err_status_cnt
[0];
3950 #define def_access_sw_cpu(cntr) \
3951 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3952 void *context, int vl, int mode, u64 data) \
3954 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3955 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
3956 ppd->ibport_data.rvp.cntr, vl, \
3960 def_access_sw_cpu(rc_acks
);
3961 def_access_sw_cpu(rc_qacks
);
3962 def_access_sw_cpu(rc_delayed_comp
);
3964 #define def_access_ibp_counter(cntr) \
3965 static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3966 void *context, int vl, int mode, u64 data) \
3968 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3970 if (vl != CNTR_INVALID_VL) \
3973 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
3977 def_access_ibp_counter(loop_pkts
);
3978 def_access_ibp_counter(rc_resends
);
3979 def_access_ibp_counter(rnr_naks
);
3980 def_access_ibp_counter(other_naks
);
3981 def_access_ibp_counter(rc_timeouts
);
3982 def_access_ibp_counter(pkt_drops
);
3983 def_access_ibp_counter(dmawait
);
3984 def_access_ibp_counter(rc_seqnak
);
3985 def_access_ibp_counter(rc_dupreq
);
3986 def_access_ibp_counter(rdma_seq
);
3987 def_access_ibp_counter(unaligned
);
3988 def_access_ibp_counter(seq_naks
);
3990 static struct cntr_entry dev_cntrs
[DEV_CNTR_LAST
] = {
3991 [C_RCV_OVF
] = RXE32_DEV_CNTR_ELEM(RcvOverflow
, RCV_BUF_OVFL_CNT
, CNTR_SYNTH
),
3992 [C_RX_TID_FULL
] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr
, RCV_TID_FULL_ERR_CNT
,
3994 [C_RX_TID_INVALID
] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid
, RCV_TID_VALID_ERR_CNT
,
3996 [C_RX_TID_FLGMS
] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs
,
3997 RCV_TID_FLOW_GEN_MISMATCH_CNT
,
3999 [C_RX_CTX_EGRS
] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS
, RCV_CONTEXT_EGR_STALL
,
4001 [C_RCV_TID_FLSMS
] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs
,
4002 RCV_TID_FLOW_SEQ_MISMATCH_CNT
, CNTR_NORMAL
),
4003 [C_CCE_PCI_CR_ST
] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt
,
4004 CCE_PCIE_POSTED_CRDT_STALL_CNT
, CNTR_NORMAL
),
4005 [C_CCE_PCI_TR_ST
] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt
, CCE_PCIE_TRGT_STALL_CNT
,
4007 [C_CCE_PIO_WR_ST
] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt
, CCE_PIO_WR_STALL_CNT
,
4009 [C_CCE_ERR_INT
] = CCE_INT_DEV_CNTR_ELEM(CceErrInt
, CCE_ERR_INT_CNT
,
4011 [C_CCE_SDMA_INT
] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt
, CCE_SDMA_INT_CNT
,
4013 [C_CCE_MISC_INT
] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt
, CCE_MISC_INT_CNT
,
4015 [C_CCE_RCV_AV_INT
] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt
, CCE_RCV_AVAIL_INT_CNT
,
4017 [C_CCE_RCV_URG_INT
] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt
,
4018 CCE_RCV_URGENT_INT_CNT
, CNTR_NORMAL
),
4019 [C_CCE_SEND_CR_INT
] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt
,
4020 CCE_SEND_CREDIT_INT_CNT
, CNTR_NORMAL
),
4021 [C_DC_UNC_ERR
] = DC_PERF_CNTR(DcUnctblErr
, DCC_ERR_UNCORRECTABLE_CNT
,
4023 [C_DC_RCV_ERR
] = DC_PERF_CNTR(DcRecvErr
, DCC_ERR_PORTRCV_ERR_CNT
, CNTR_SYNTH
),
4024 [C_DC_FM_CFG_ERR
] = DC_PERF_CNTR(DcFmCfgErr
, DCC_ERR_FMCONFIG_ERR_CNT
,
4026 [C_DC_RMT_PHY_ERR
] = DC_PERF_CNTR(DcRmtPhyErr
, DCC_ERR_RCVREMOTE_PHY_ERR_CNT
,
4028 [C_DC_DROPPED_PKT
] = DC_PERF_CNTR(DcDroppedPkt
, DCC_ERR_DROPPED_PKT_CNT
,
4030 [C_DC_MC_XMIT_PKTS
] = DC_PERF_CNTR(DcMcXmitPkts
,
4031 DCC_PRF_PORT_XMIT_MULTICAST_CNT
, CNTR_SYNTH
),
4032 [C_DC_MC_RCV_PKTS
] = DC_PERF_CNTR(DcMcRcvPkts
,
4033 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT
,
4035 [C_DC_XMIT_CERR
] = DC_PERF_CNTR(DcXmitCorr
,
4036 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT
, CNTR_SYNTH
),
4037 [C_DC_RCV_CERR
] = DC_PERF_CNTR(DcRcvCorrCnt
, DCC_PRF_PORT_RCV_CORRECTABLE_CNT
,
4039 [C_DC_RCV_FCC
] = DC_PERF_CNTR(DcRxFCntl
, DCC_PRF_RX_FLOW_CRTL_CNT
,
4041 [C_DC_XMIT_FCC
] = DC_PERF_CNTR(DcXmitFCntl
, DCC_PRF_TX_FLOW_CRTL_CNT
,
4043 [C_DC_XMIT_FLITS
] = DC_PERF_CNTR(DcXmitFlits
, DCC_PRF_PORT_XMIT_DATA_CNT
,
4045 [C_DC_RCV_FLITS
] = DC_PERF_CNTR(DcRcvFlits
, DCC_PRF_PORT_RCV_DATA_CNT
,
4047 [C_DC_XMIT_PKTS
] = DC_PERF_CNTR(DcXmitPkts
, DCC_PRF_PORT_XMIT_PKTS_CNT
,
4049 [C_DC_RCV_PKTS
] = DC_PERF_CNTR(DcRcvPkts
, DCC_PRF_PORT_RCV_PKTS_CNT
,
4051 [C_DC_RX_FLIT_VL
] = DC_PERF_CNTR(DcRxFlitVl
, DCC_PRF_PORT_VL_RCV_DATA_CNT
,
4052 CNTR_SYNTH
| CNTR_VL
),
4053 [C_DC_RX_PKT_VL
] = DC_PERF_CNTR(DcRxPktVl
, DCC_PRF_PORT_VL_RCV_PKTS_CNT
,
4054 CNTR_SYNTH
| CNTR_VL
),
4055 [C_DC_RCV_FCN
] = DC_PERF_CNTR(DcRcvFcn
, DCC_PRF_PORT_RCV_FECN_CNT
, CNTR_SYNTH
),
4056 [C_DC_RCV_FCN_VL
] = DC_PERF_CNTR(DcRcvFcnVl
, DCC_PRF_PORT_VL_RCV_FECN_CNT
,
4057 CNTR_SYNTH
| CNTR_VL
),
4058 [C_DC_RCV_BCN
] = DC_PERF_CNTR(DcRcvBcn
, DCC_PRF_PORT_RCV_BECN_CNT
, CNTR_SYNTH
),
4059 [C_DC_RCV_BCN_VL
] = DC_PERF_CNTR(DcRcvBcnVl
, DCC_PRF_PORT_VL_RCV_BECN_CNT
,
4060 CNTR_SYNTH
| CNTR_VL
),
4061 [C_DC_RCV_BBL
] = DC_PERF_CNTR(DcRcvBbl
, DCC_PRF_PORT_RCV_BUBBLE_CNT
,
4063 [C_DC_RCV_BBL_VL
] = DC_PERF_CNTR(DcRcvBblVl
, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT
,
4064 CNTR_SYNTH
| CNTR_VL
),
4065 [C_DC_MARK_FECN
] = DC_PERF_CNTR(DcMarkFcn
, DCC_PRF_PORT_MARK_FECN_CNT
,
4067 [C_DC_MARK_FECN_VL
] = DC_PERF_CNTR(DcMarkFcnVl
, DCC_PRF_PORT_VL_MARK_FECN_CNT
,
4068 CNTR_SYNTH
| CNTR_VL
),
4070 DC_PERF_CNTR_LCB(DcTotCrc
, DC_LCB_ERR_INFO_TOTAL_CRC_ERR
,
4072 [C_DC_CRC_LN0
] = DC_PERF_CNTR_LCB(DcCrcLn0
, DC_LCB_ERR_INFO_CRC_ERR_LN0
,
4074 [C_DC_CRC_LN1
] = DC_PERF_CNTR_LCB(DcCrcLn1
, DC_LCB_ERR_INFO_CRC_ERR_LN1
,
4076 [C_DC_CRC_LN2
] = DC_PERF_CNTR_LCB(DcCrcLn2
, DC_LCB_ERR_INFO_CRC_ERR_LN2
,
4078 [C_DC_CRC_LN3
] = DC_PERF_CNTR_LCB(DcCrcLn3
, DC_LCB_ERR_INFO_CRC_ERR_LN3
,
4080 [C_DC_CRC_MULT_LN
] =
4081 DC_PERF_CNTR_LCB(DcMultLn
, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN
,
4083 [C_DC_TX_REPLAY
] = DC_PERF_CNTR_LCB(DcTxReplay
, DC_LCB_ERR_INFO_TX_REPLAY_CNT
,
4085 [C_DC_RX_REPLAY
] = DC_PERF_CNTR_LCB(DcRxReplay
, DC_LCB_ERR_INFO_RX_REPLAY_CNT
,
4087 [C_DC_SEQ_CRC_CNT
] =
4088 DC_PERF_CNTR_LCB(DcLinkSeqCrc
, DC_LCB_ERR_INFO_SEQ_CRC_CNT
,
4090 [C_DC_ESC0_ONLY_CNT
] =
4091 DC_PERF_CNTR_LCB(DcEsc0
, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT
,
4093 [C_DC_ESC0_PLUS1_CNT
] =
4094 DC_PERF_CNTR_LCB(DcEsc1
, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT
,
4096 [C_DC_ESC0_PLUS2_CNT
] =
4097 DC_PERF_CNTR_LCB(DcEsc0Plus2
, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT
,
4099 [C_DC_REINIT_FROM_PEER_CNT
] =
4100 DC_PERF_CNTR_LCB(DcReinitPeer
, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT
,
4102 [C_DC_SBE_CNT
] = DC_PERF_CNTR_LCB(DcSbe
, DC_LCB_ERR_INFO_SBE_CNT
,
4104 [C_DC_MISC_FLG_CNT
] =
4105 DC_PERF_CNTR_LCB(DcMiscFlg
, DC_LCB_ERR_INFO_MISC_FLG_CNT
,
4107 [C_DC_PRF_GOOD_LTP_CNT
] =
4108 DC_PERF_CNTR_LCB(DcGoodLTP
, DC_LCB_PRF_GOOD_LTP_CNT
, CNTR_SYNTH
),
4109 [C_DC_PRF_ACCEPTED_LTP_CNT
] =
4110 DC_PERF_CNTR_LCB(DcAccLTP
, DC_LCB_PRF_ACCEPTED_LTP_CNT
,
4112 [C_DC_PRF_RX_FLIT_CNT
] =
4113 DC_PERF_CNTR_LCB(DcPrfRxFlit
, DC_LCB_PRF_RX_FLIT_CNT
, CNTR_SYNTH
),
4114 [C_DC_PRF_TX_FLIT_CNT
] =
4115 DC_PERF_CNTR_LCB(DcPrfTxFlit
, DC_LCB_PRF_TX_FLIT_CNT
, CNTR_SYNTH
),
4116 [C_DC_PRF_CLK_CNTR
] =
4117 DC_PERF_CNTR_LCB(DcPrfClk
, DC_LCB_PRF_CLK_CNTR
, CNTR_SYNTH
),
4118 [C_DC_PG_DBG_FLIT_CRDTS_CNT
] =
4119 DC_PERF_CNTR_LCB(DcFltCrdts
, DC_LCB_PG_DBG_FLIT_CRDTS_CNT
, CNTR_SYNTH
),
4120 [C_DC_PG_STS_PAUSE_COMPLETE_CNT
] =
4121 DC_PERF_CNTR_LCB(DcPauseComp
, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT
,
4123 [C_DC_PG_STS_TX_SBE_CNT
] =
4124 DC_PERF_CNTR_LCB(DcStsTxSbe
, DC_LCB_PG_STS_TX_SBE_CNT
, CNTR_SYNTH
),
4125 [C_DC_PG_STS_TX_MBE_CNT
] =
4126 DC_PERF_CNTR_LCB(DcStsTxMbe
, DC_LCB_PG_STS_TX_MBE_CNT
,
4128 [C_SW_CPU_INTR
] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL
,
4129 access_sw_cpu_intr
),
4130 [C_SW_CPU_RCV_LIM
] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL
,
4131 access_sw_cpu_rcv_limit
),
4132 [C_SW_VTX_WAIT
] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL
,
4133 access_sw_vtx_wait
),
4134 [C_SW_PIO_WAIT
] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL
,
4135 access_sw_pio_wait
),
4136 [C_SW_PIO_DRAIN
] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL
,
4137 access_sw_pio_drain
),
4138 [C_SW_KMEM_WAIT
] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL
,
4139 access_sw_kmem_wait
),
4140 [C_SW_SEND_SCHED
] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL
,
4141 access_sw_send_schedule
),
4142 [C_SDMA_DESC_FETCHED_CNT
] = CNTR_ELEM("SDEDscFdCn",
4143 SEND_DMA_DESC_FETCHED_CNT
, 0,
4144 CNTR_NORMAL
| CNTR_32BIT
| CNTR_SDMA
,
4145 dev_access_u32_csr
),
4146 [C_SDMA_INT_CNT
] = CNTR_ELEM("SDMAInt", 0, 0,
4147 CNTR_NORMAL
| CNTR_32BIT
| CNTR_SDMA
,
4148 access_sde_int_cnt
),
4149 [C_SDMA_ERR_CNT
] = CNTR_ELEM("SDMAErrCt", 0, 0,
4150 CNTR_NORMAL
| CNTR_32BIT
| CNTR_SDMA
,
4151 access_sde_err_cnt
),
4152 [C_SDMA_IDLE_INT_CNT
] = CNTR_ELEM("SDMAIdInt", 0, 0,
4153 CNTR_NORMAL
| CNTR_32BIT
| CNTR_SDMA
,
4154 access_sde_idle_int_cnt
),
4155 [C_SDMA_PROGRESS_INT_CNT
] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4156 CNTR_NORMAL
| CNTR_32BIT
| CNTR_SDMA
,
4157 access_sde_progress_int_cnt
),
4158 /* MISC_ERR_STATUS */
4159 [C_MISC_PLL_LOCK_FAIL_ERR
] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4161 access_misc_pll_lock_fail_err_cnt
),
4162 [C_MISC_MBIST_FAIL_ERR
] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4164 access_misc_mbist_fail_err_cnt
),
4165 [C_MISC_INVALID_EEP_CMD_ERR
] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4167 access_misc_invalid_eep_cmd_err_cnt
),
4168 [C_MISC_EFUSE_DONE_PARITY_ERR
] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4170 access_misc_efuse_done_parity_err_cnt
),
4171 [C_MISC_EFUSE_WRITE_ERR
] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4173 access_misc_efuse_write_err_cnt
),
4174 [C_MISC_EFUSE_READ_BAD_ADDR_ERR
] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4176 access_misc_efuse_read_bad_addr_err_cnt
),
4177 [C_MISC_EFUSE_CSR_PARITY_ERR
] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4179 access_misc_efuse_csr_parity_err_cnt
),
4180 [C_MISC_FW_AUTH_FAILED_ERR
] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4182 access_misc_fw_auth_failed_err_cnt
),
4183 [C_MISC_KEY_MISMATCH_ERR
] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4185 access_misc_key_mismatch_err_cnt
),
4186 [C_MISC_SBUS_WRITE_FAILED_ERR
] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4188 access_misc_sbus_write_failed_err_cnt
),
4189 [C_MISC_CSR_WRITE_BAD_ADDR_ERR
] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4191 access_misc_csr_write_bad_addr_err_cnt
),
4192 [C_MISC_CSR_READ_BAD_ADDR_ERR
] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4194 access_misc_csr_read_bad_addr_err_cnt
),
4195 [C_MISC_CSR_PARITY_ERR
] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4197 access_misc_csr_parity_err_cnt
),
4199 [C_CCE_ERR_STATUS_AGGREGATED_CNT
] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4201 access_sw_cce_err_status_aggregated_cnt
),
4202 [C_CCE_MSIX_CSR_PARITY_ERR
] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4204 access_cce_msix_csr_parity_err_cnt
),
4205 [C_CCE_INT_MAP_UNC_ERR
] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4207 access_cce_int_map_unc_err_cnt
),
4208 [C_CCE_INT_MAP_COR_ERR
] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4210 access_cce_int_map_cor_err_cnt
),
4211 [C_CCE_MSIX_TABLE_UNC_ERR
] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4213 access_cce_msix_table_unc_err_cnt
),
4214 [C_CCE_MSIX_TABLE_COR_ERR
] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4216 access_cce_msix_table_cor_err_cnt
),
4217 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR
] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4219 access_cce_rxdma_conv_fifo_parity_err_cnt
),
4220 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR
] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4222 access_cce_rcpl_async_fifo_parity_err_cnt
),
4223 [C_CCE_SEG_WRITE_BAD_ADDR_ERR
] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4225 access_cce_seg_write_bad_addr_err_cnt
),
4226 [C_CCE_SEG_READ_BAD_ADDR_ERR
] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4228 access_cce_seg_read_bad_addr_err_cnt
),
4229 [C_LA_TRIGGERED
] = CNTR_ELEM("Cce LATriggered", 0, 0,
4231 access_la_triggered_cnt
),
4232 [C_CCE_TRGT_CPL_TIMEOUT_ERR
] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4234 access_cce_trgt_cpl_timeout_err_cnt
),
4235 [C_PCIC_RECEIVE_PARITY_ERR
] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4237 access_pcic_receive_parity_err_cnt
),
4238 [C_PCIC_TRANSMIT_BACK_PARITY_ERR
] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4240 access_pcic_transmit_back_parity_err_cnt
),
4241 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR
] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4243 access_pcic_transmit_front_parity_err_cnt
),
4244 [C_PCIC_CPL_DAT_Q_UNC_ERR
] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4246 access_pcic_cpl_dat_q_unc_err_cnt
),
4247 [C_PCIC_CPL_HD_Q_UNC_ERR
] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4249 access_pcic_cpl_hd_q_unc_err_cnt
),
4250 [C_PCIC_POST_DAT_Q_UNC_ERR
] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4252 access_pcic_post_dat_q_unc_err_cnt
),
4253 [C_PCIC_POST_HD_Q_UNC_ERR
] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4255 access_pcic_post_hd_q_unc_err_cnt
),
4256 [C_PCIC_RETRY_SOT_MEM_UNC_ERR
] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4258 access_pcic_retry_sot_mem_unc_err_cnt
),
4259 [C_PCIC_RETRY_MEM_UNC_ERR
] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4261 access_pcic_retry_mem_unc_err
),
4262 [C_PCIC_N_POST_DAT_Q_PARITY_ERR
] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4264 access_pcic_n_post_dat_q_parity_err_cnt
),
4265 [C_PCIC_N_POST_H_Q_PARITY_ERR
] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4267 access_pcic_n_post_h_q_parity_err_cnt
),
4268 [C_PCIC_CPL_DAT_Q_COR_ERR
] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4270 access_pcic_cpl_dat_q_cor_err_cnt
),
4271 [C_PCIC_CPL_HD_Q_COR_ERR
] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4273 access_pcic_cpl_hd_q_cor_err_cnt
),
4274 [C_PCIC_POST_DAT_Q_COR_ERR
] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4276 access_pcic_post_dat_q_cor_err_cnt
),
4277 [C_PCIC_POST_HD_Q_COR_ERR
] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4279 access_pcic_post_hd_q_cor_err_cnt
),
4280 [C_PCIC_RETRY_SOT_MEM_COR_ERR
] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4282 access_pcic_retry_sot_mem_cor_err_cnt
),
4283 [C_PCIC_RETRY_MEM_COR_ERR
] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4285 access_pcic_retry_mem_cor_err_cnt
),
4286 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR
] = CNTR_ELEM(
4287 "CceCli1AsyncFifoDbgParityError", 0, 0,
4289 access_cce_cli1_async_fifo_dbg_parity_err_cnt
),
4290 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR
] = CNTR_ELEM(
4291 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4293 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4295 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR
] = CNTR_ELEM(
4296 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4298 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt
),
4299 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR
] = CNTR_ELEM(
4300 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4302 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt
),
4303 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR
] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4305 access_cce_cli2_async_fifo_parity_err_cnt
),
4306 [C_CCE_CSR_CFG_BUS_PARITY_ERR
] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4308 access_cce_csr_cfg_bus_parity_err_cnt
),
4309 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR
] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4311 access_cce_cli0_async_fifo_parity_err_cnt
),
4312 [C_CCE_RSPD_DATA_PARITY_ERR
] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4314 access_cce_rspd_data_parity_err_cnt
),
4315 [C_CCE_TRGT_ACCESS_ERR
] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4317 access_cce_trgt_access_err_cnt
),
4318 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR
] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4320 access_cce_trgt_async_fifo_parity_err_cnt
),
4321 [C_CCE_CSR_WRITE_BAD_ADDR_ERR
] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4323 access_cce_csr_write_bad_addr_err_cnt
),
4324 [C_CCE_CSR_READ_BAD_ADDR_ERR
] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4326 access_cce_csr_read_bad_addr_err_cnt
),
4327 [C_CCE_CSR_PARITY_ERR
] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4329 access_ccs_csr_parity_err_cnt
),
4332 [C_RX_CSR_PARITY_ERR
] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4334 access_rx_csr_parity_err_cnt
),
4335 [C_RX_CSR_WRITE_BAD_ADDR_ERR
] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4337 access_rx_csr_write_bad_addr_err_cnt
),
4338 [C_RX_CSR_READ_BAD_ADDR_ERR
] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4340 access_rx_csr_read_bad_addr_err_cnt
),
4341 [C_RX_DMA_CSR_UNC_ERR
] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4343 access_rx_dma_csr_unc_err_cnt
),
4344 [C_RX_DMA_DQ_FSM_ENCODING_ERR
] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4346 access_rx_dma_dq_fsm_encoding_err_cnt
),
4347 [C_RX_DMA_EQ_FSM_ENCODING_ERR
] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4349 access_rx_dma_eq_fsm_encoding_err_cnt
),
4350 [C_RX_DMA_CSR_PARITY_ERR
] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4352 access_rx_dma_csr_parity_err_cnt
),
4353 [C_RX_RBUF_DATA_COR_ERR
] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4355 access_rx_rbuf_data_cor_err_cnt
),
4356 [C_RX_RBUF_DATA_UNC_ERR
] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4358 access_rx_rbuf_data_unc_err_cnt
),
4359 [C_RX_DMA_DATA_FIFO_RD_COR_ERR
] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4361 access_rx_dma_data_fifo_rd_cor_err_cnt
),
4362 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR
] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4364 access_rx_dma_data_fifo_rd_unc_err_cnt
),
4365 [C_RX_DMA_HDR_FIFO_RD_COR_ERR
] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4367 access_rx_dma_hdr_fifo_rd_cor_err_cnt
),
4368 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR
] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4370 access_rx_dma_hdr_fifo_rd_unc_err_cnt
),
4371 [C_RX_RBUF_DESC_PART2_COR_ERR
] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4373 access_rx_rbuf_desc_part2_cor_err_cnt
),
4374 [C_RX_RBUF_DESC_PART2_UNC_ERR
] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4376 access_rx_rbuf_desc_part2_unc_err_cnt
),
4377 [C_RX_RBUF_DESC_PART1_COR_ERR
] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4379 access_rx_rbuf_desc_part1_cor_err_cnt
),
4380 [C_RX_RBUF_DESC_PART1_UNC_ERR
] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4382 access_rx_rbuf_desc_part1_unc_err_cnt
),
4383 [C_RX_HQ_INTR_FSM_ERR
] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4385 access_rx_hq_intr_fsm_err_cnt
),
4386 [C_RX_HQ_INTR_CSR_PARITY_ERR
] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4388 access_rx_hq_intr_csr_parity_err_cnt
),
4389 [C_RX_LOOKUP_CSR_PARITY_ERR
] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4391 access_rx_lookup_csr_parity_err_cnt
),
4392 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR
] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4394 access_rx_lookup_rcv_array_cor_err_cnt
),
4395 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR
] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4397 access_rx_lookup_rcv_array_unc_err_cnt
),
4398 [C_RX_LOOKUP_DES_PART2_PARITY_ERR
] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4400 access_rx_lookup_des_part2_parity_err_cnt
),
4401 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR
] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4403 access_rx_lookup_des_part1_unc_cor_err_cnt
),
4404 [C_RX_LOOKUP_DES_PART1_UNC_ERR
] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4406 access_rx_lookup_des_part1_unc_err_cnt
),
4407 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR
] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4409 access_rx_rbuf_next_free_buf_cor_err_cnt
),
4410 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR
] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4412 access_rx_rbuf_next_free_buf_unc_err_cnt
),
4413 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR
] = CNTR_ELEM(
4414 "RxRbufFlInitWrAddrParityErr", 0, 0,
4416 access_rbuf_fl_init_wr_addr_parity_err_cnt
),
4417 [C_RX_RBUF_FL_INITDONE_PARITY_ERR
] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4419 access_rx_rbuf_fl_initdone_parity_err_cnt
),
4420 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR
] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4422 access_rx_rbuf_fl_write_addr_parity_err_cnt
),
4423 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR
] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4425 access_rx_rbuf_fl_rd_addr_parity_err_cnt
),
4426 [C_RX_RBUF_EMPTY_ERR
] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4428 access_rx_rbuf_empty_err_cnt
),
4429 [C_RX_RBUF_FULL_ERR
] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4431 access_rx_rbuf_full_err_cnt
),
4432 [C_RX_RBUF_BAD_LOOKUP_ERR
] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4434 access_rbuf_bad_lookup_err_cnt
),
4435 [C_RX_RBUF_CTX_ID_PARITY_ERR
] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4437 access_rbuf_ctx_id_parity_err_cnt
),
4438 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR
] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4440 access_rbuf_csr_qeopdw_parity_err_cnt
),
4441 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR
] = CNTR_ELEM(
4442 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4444 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt
),
4445 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR
] = CNTR_ELEM(
4446 "RxRbufCsrQTlPtrParityErr", 0, 0,
4448 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt
),
4449 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR
] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4451 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt
),
4452 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR
] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4454 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt
),
4455 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR
] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4457 access_rx_rbuf_csr_q_next_buf_parity_err_cnt
),
4458 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR
] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4460 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt
),
4461 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR
] = CNTR_ELEM(
4462 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4464 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt
),
4465 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR
] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4467 access_rx_rbuf_block_list_read_cor_err_cnt
),
4468 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR
] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4470 access_rx_rbuf_block_list_read_unc_err_cnt
),
4471 [C_RX_RBUF_LOOKUP_DES_COR_ERR
] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4473 access_rx_rbuf_lookup_des_cor_err_cnt
),
4474 [C_RX_RBUF_LOOKUP_DES_UNC_ERR
] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4476 access_rx_rbuf_lookup_des_unc_err_cnt
),
4477 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR
] = CNTR_ELEM(
4478 "RxRbufLookupDesRegUncCorErr", 0, 0,
4480 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt
),
4481 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR
] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4483 access_rx_rbuf_lookup_des_reg_unc_err_cnt
),
4484 [C_RX_RBUF_FREE_LIST_COR_ERR
] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4486 access_rx_rbuf_free_list_cor_err_cnt
),
4487 [C_RX_RBUF_FREE_LIST_UNC_ERR
] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4489 access_rx_rbuf_free_list_unc_err_cnt
),
4490 [C_RX_RCV_FSM_ENCODING_ERR
] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4492 access_rx_rcv_fsm_encoding_err_cnt
),
4493 [C_RX_DMA_FLAG_COR_ERR
] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4495 access_rx_dma_flag_cor_err_cnt
),
4496 [C_RX_DMA_FLAG_UNC_ERR
] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4498 access_rx_dma_flag_unc_err_cnt
),
4499 [C_RX_DC_SOP_EOP_PARITY_ERR
] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4501 access_rx_dc_sop_eop_parity_err_cnt
),
4502 [C_RX_RCV_CSR_PARITY_ERR
] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4504 access_rx_rcv_csr_parity_err_cnt
),
4505 [C_RX_RCV_QP_MAP_TABLE_COR_ERR
] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4507 access_rx_rcv_qp_map_table_cor_err_cnt
),
4508 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR
] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4510 access_rx_rcv_qp_map_table_unc_err_cnt
),
4511 [C_RX_RCV_DATA_COR_ERR
] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4513 access_rx_rcv_data_cor_err_cnt
),
4514 [C_RX_RCV_DATA_UNC_ERR
] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4516 access_rx_rcv_data_unc_err_cnt
),
4517 [C_RX_RCV_HDR_COR_ERR
] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4519 access_rx_rcv_hdr_cor_err_cnt
),
4520 [C_RX_RCV_HDR_UNC_ERR
] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4522 access_rx_rcv_hdr_unc_err_cnt
),
4523 [C_RX_DC_INTF_PARITY_ERR
] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4525 access_rx_dc_intf_parity_err_cnt
),
4526 [C_RX_DMA_CSR_COR_ERR
] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4528 access_rx_dma_csr_cor_err_cnt
),
4529 /* SendPioErrStatus */
4530 [C_PIO_PEC_SOP_HEAD_PARITY_ERR
] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4532 access_pio_pec_sop_head_parity_err_cnt
),
4533 [C_PIO_PCC_SOP_HEAD_PARITY_ERR
] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4535 access_pio_pcc_sop_head_parity_err_cnt
),
4536 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR
] = CNTR_ELEM("PioLastReturnedCntParityErr",
4538 access_pio_last_returned_cnt_parity_err_cnt
),
4539 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR
] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4541 access_pio_current_free_cnt_parity_err_cnt
),
4542 [C_PIO_RSVD_31_ERR
] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4544 access_pio_reserved_31_err_cnt
),
4545 [C_PIO_RSVD_30_ERR
] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4547 access_pio_reserved_30_err_cnt
),
4548 [C_PIO_PPMC_SOP_LEN_ERR
] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4550 access_pio_ppmc_sop_len_err_cnt
),
4551 [C_PIO_PPMC_BQC_MEM_PARITY_ERR
] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4553 access_pio_ppmc_bqc_mem_parity_err_cnt
),
4554 [C_PIO_VL_FIFO_PARITY_ERR
] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4556 access_pio_vl_fifo_parity_err_cnt
),
4557 [C_PIO_VLF_SOP_PARITY_ERR
] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4559 access_pio_vlf_sop_parity_err_cnt
),
4560 [C_PIO_VLF_V1_LEN_PARITY_ERR
] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4562 access_pio_vlf_v1_len_parity_err_cnt
),
4563 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR
] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4565 access_pio_block_qw_count_parity_err_cnt
),
4566 [C_PIO_WRITE_QW_VALID_PARITY_ERR
] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4568 access_pio_write_qw_valid_parity_err_cnt
),
4569 [C_PIO_STATE_MACHINE_ERR
] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4571 access_pio_state_machine_err_cnt
),
4572 [C_PIO_WRITE_DATA_PARITY_ERR
] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4574 access_pio_write_data_parity_err_cnt
),
4575 [C_PIO_HOST_ADDR_MEM_COR_ERR
] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4577 access_pio_host_addr_mem_cor_err_cnt
),
4578 [C_PIO_HOST_ADDR_MEM_UNC_ERR
] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4580 access_pio_host_addr_mem_unc_err_cnt
),
4581 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR
] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4583 access_pio_pkt_evict_sm_or_arb_sm_err_cnt
),
4584 [C_PIO_INIT_SM_IN_ERR
] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4586 access_pio_init_sm_in_err_cnt
),
4587 [C_PIO_PPMC_PBL_FIFO_ERR
] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4589 access_pio_ppmc_pbl_fifo_err_cnt
),
4590 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR
] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4592 access_pio_credit_ret_fifo_parity_err_cnt
),
4593 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR
] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4595 access_pio_v1_len_mem_bank1_cor_err_cnt
),
4596 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR
] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4598 access_pio_v1_len_mem_bank0_cor_err_cnt
),
4599 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR
] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4601 access_pio_v1_len_mem_bank1_unc_err_cnt
),
4602 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR
] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4604 access_pio_v1_len_mem_bank0_unc_err_cnt
),
4605 [C_PIO_SM_PKT_RESET_PARITY_ERR
] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4607 access_pio_sm_pkt_reset_parity_err_cnt
),
4608 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR
] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4610 access_pio_pkt_evict_fifo_parity_err_cnt
),
4611 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR
] = CNTR_ELEM(
4612 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4614 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt
),
4615 [C_PIO_SBRDCTL_CRREL_PARITY_ERR
] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4617 access_pio_sbrdctl_crrel_parity_err_cnt
),
4618 [C_PIO_PEC_FIFO_PARITY_ERR
] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4620 access_pio_pec_fifo_parity_err_cnt
),
4621 [C_PIO_PCC_FIFO_PARITY_ERR
] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4623 access_pio_pcc_fifo_parity_err_cnt
),
4624 [C_PIO_SB_MEM_FIFO1_ERR
] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4626 access_pio_sb_mem_fifo1_err_cnt
),
4627 [C_PIO_SB_MEM_FIFO0_ERR
] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4629 access_pio_sb_mem_fifo0_err_cnt
),
4630 [C_PIO_CSR_PARITY_ERR
] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4632 access_pio_csr_parity_err_cnt
),
4633 [C_PIO_WRITE_ADDR_PARITY_ERR
] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4635 access_pio_write_addr_parity_err_cnt
),
4636 [C_PIO_WRITE_BAD_CTXT_ERR
] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4638 access_pio_write_bad_ctxt_err_cnt
),
4639 /* SendDmaErrStatus */
4640 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR
] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4642 access_sdma_pcie_req_tracking_cor_err_cnt
),
4643 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR
] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4645 access_sdma_pcie_req_tracking_unc_err_cnt
),
4646 [C_SDMA_CSR_PARITY_ERR
] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4648 access_sdma_csr_parity_err_cnt
),
4649 [C_SDMA_RPY_TAG_ERR
] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4651 access_sdma_rpy_tag_err_cnt
),
4652 /* SendEgressErrStatus */
4653 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR
] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4655 access_tx_read_pio_memory_csr_unc_err_cnt
),
4656 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR
] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4658 access_tx_read_sdma_memory_csr_err_cnt
),
4659 [C_TX_EGRESS_FIFO_COR_ERR
] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4661 access_tx_egress_fifo_cor_err_cnt
),
4662 [C_TX_READ_PIO_MEMORY_COR_ERR
] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4664 access_tx_read_pio_memory_cor_err_cnt
),
4665 [C_TX_READ_SDMA_MEMORY_COR_ERR
] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4667 access_tx_read_sdma_memory_cor_err_cnt
),
4668 [C_TX_SB_HDR_COR_ERR
] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4670 access_tx_sb_hdr_cor_err_cnt
),
4671 [C_TX_CREDIT_OVERRUN_ERR
] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4673 access_tx_credit_overrun_err_cnt
),
4674 [C_TX_LAUNCH_FIFO8_COR_ERR
] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4676 access_tx_launch_fifo8_cor_err_cnt
),
4677 [C_TX_LAUNCH_FIFO7_COR_ERR
] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4679 access_tx_launch_fifo7_cor_err_cnt
),
4680 [C_TX_LAUNCH_FIFO6_COR_ERR
] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4682 access_tx_launch_fifo6_cor_err_cnt
),
4683 [C_TX_LAUNCH_FIFO5_COR_ERR
] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4685 access_tx_launch_fifo5_cor_err_cnt
),
4686 [C_TX_LAUNCH_FIFO4_COR_ERR
] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4688 access_tx_launch_fifo4_cor_err_cnt
),
4689 [C_TX_LAUNCH_FIFO3_COR_ERR
] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4691 access_tx_launch_fifo3_cor_err_cnt
),
4692 [C_TX_LAUNCH_FIFO2_COR_ERR
] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4694 access_tx_launch_fifo2_cor_err_cnt
),
4695 [C_TX_LAUNCH_FIFO1_COR_ERR
] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4697 access_tx_launch_fifo1_cor_err_cnt
),
4698 [C_TX_LAUNCH_FIFO0_COR_ERR
] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4700 access_tx_launch_fifo0_cor_err_cnt
),
4701 [C_TX_CREDIT_RETURN_VL_ERR
] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4703 access_tx_credit_return_vl_err_cnt
),
4704 [C_TX_HCRC_INSERTION_ERR
] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4706 access_tx_hcrc_insertion_err_cnt
),
4707 [C_TX_EGRESS_FIFI_UNC_ERR
] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4709 access_tx_egress_fifo_unc_err_cnt
),
4710 [C_TX_READ_PIO_MEMORY_UNC_ERR
] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4712 access_tx_read_pio_memory_unc_err_cnt
),
4713 [C_TX_READ_SDMA_MEMORY_UNC_ERR
] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4715 access_tx_read_sdma_memory_unc_err_cnt
),
4716 [C_TX_SB_HDR_UNC_ERR
] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4718 access_tx_sb_hdr_unc_err_cnt
),
4719 [C_TX_CREDIT_RETURN_PARITY_ERR
] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4721 access_tx_credit_return_partiy_err_cnt
),
4722 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4724 access_tx_launch_fifo8_unc_or_parity_err_cnt
),
4725 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4727 access_tx_launch_fifo7_unc_or_parity_err_cnt
),
4728 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4730 access_tx_launch_fifo6_unc_or_parity_err_cnt
),
4731 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4733 access_tx_launch_fifo5_unc_or_parity_err_cnt
),
4734 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4736 access_tx_launch_fifo4_unc_or_parity_err_cnt
),
4737 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4739 access_tx_launch_fifo3_unc_or_parity_err_cnt
),
4740 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4742 access_tx_launch_fifo2_unc_or_parity_err_cnt
),
4743 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4745 access_tx_launch_fifo1_unc_or_parity_err_cnt
),
4746 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR
] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4748 access_tx_launch_fifo0_unc_or_parity_err_cnt
),
4749 [C_TX_SDMA15_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4751 access_tx_sdma15_disallowed_packet_err_cnt
),
4752 [C_TX_SDMA14_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4754 access_tx_sdma14_disallowed_packet_err_cnt
),
4755 [C_TX_SDMA13_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4757 access_tx_sdma13_disallowed_packet_err_cnt
),
4758 [C_TX_SDMA12_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4760 access_tx_sdma12_disallowed_packet_err_cnt
),
4761 [C_TX_SDMA11_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4763 access_tx_sdma11_disallowed_packet_err_cnt
),
4764 [C_TX_SDMA10_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4766 access_tx_sdma10_disallowed_packet_err_cnt
),
4767 [C_TX_SDMA9_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4769 access_tx_sdma9_disallowed_packet_err_cnt
),
4770 [C_TX_SDMA8_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4772 access_tx_sdma8_disallowed_packet_err_cnt
),
4773 [C_TX_SDMA7_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4775 access_tx_sdma7_disallowed_packet_err_cnt
),
4776 [C_TX_SDMA6_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4778 access_tx_sdma6_disallowed_packet_err_cnt
),
4779 [C_TX_SDMA5_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4781 access_tx_sdma5_disallowed_packet_err_cnt
),
4782 [C_TX_SDMA4_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4784 access_tx_sdma4_disallowed_packet_err_cnt
),
4785 [C_TX_SDMA3_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4787 access_tx_sdma3_disallowed_packet_err_cnt
),
4788 [C_TX_SDMA2_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4790 access_tx_sdma2_disallowed_packet_err_cnt
),
4791 [C_TX_SDMA1_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4793 access_tx_sdma1_disallowed_packet_err_cnt
),
4794 [C_TX_SDMA0_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4796 access_tx_sdma0_disallowed_packet_err_cnt
),
4797 [C_TX_CONFIG_PARITY_ERR
] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4799 access_tx_config_parity_err_cnt
),
4800 [C_TX_SBRD_CTL_CSR_PARITY_ERR
] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4802 access_tx_sbrd_ctl_csr_parity_err_cnt
),
4803 [C_TX_LAUNCH_CSR_PARITY_ERR
] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4805 access_tx_launch_csr_parity_err_cnt
),
4806 [C_TX_ILLEGAL_CL_ERR
] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4808 access_tx_illegal_vl_err_cnt
),
4809 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR
] = CNTR_ELEM(
4810 "TxSbrdCtlStateMachineParityErr", 0, 0,
4812 access_tx_sbrd_ctl_state_machine_parity_err_cnt
),
4813 [C_TX_RESERVED_10
] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4815 access_egress_reserved_10_err_cnt
),
4816 [C_TX_RESERVED_9
] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4818 access_egress_reserved_9_err_cnt
),
4819 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR
] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4821 access_tx_sdma_launch_intf_parity_err_cnt
),
4822 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR
] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4824 access_tx_pio_launch_intf_parity_err_cnt
),
4825 [C_TX_RESERVED_6
] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4827 access_egress_reserved_6_err_cnt
),
4828 [C_TX_INCORRECT_LINK_STATE_ERR
] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4830 access_tx_incorrect_link_state_err_cnt
),
4831 [C_TX_LINK_DOWN_ERR
] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4833 access_tx_linkdown_err_cnt
),
4834 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR
] = CNTR_ELEM(
4835 "EgressFifoUnderrunOrParityErr", 0, 0,
4837 access_tx_egress_fifi_underrun_or_parity_err_cnt
),
4838 [C_TX_RESERVED_2
] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4840 access_egress_reserved_2_err_cnt
),
4841 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR
] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4843 access_tx_pkt_integrity_mem_unc_err_cnt
),
4844 [C_TX_PKT_INTEGRITY_MEM_COR_ERR
] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4846 access_tx_pkt_integrity_mem_cor_err_cnt
),
4848 [C_SEND_CSR_WRITE_BAD_ADDR_ERR
] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4850 access_send_csr_write_bad_addr_err_cnt
),
4851 [C_SEND_CSR_READ_BAD_ADD_ERR
] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4853 access_send_csr_read_bad_addr_err_cnt
),
4854 [C_SEND_CSR_PARITY_ERR
] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4856 access_send_csr_parity_cnt
),
4857 /* SendCtxtErrStatus */
4858 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR
] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4860 access_pio_write_out_of_bounds_err_cnt
),
4861 [C_PIO_WRITE_OVERFLOW_ERR
] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4863 access_pio_write_overflow_err_cnt
),
4864 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR
] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4866 access_pio_write_crosses_boundary_err_cnt
),
4867 [C_PIO_DISALLOWED_PACKET_ERR
] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4869 access_pio_disallowed_packet_err_cnt
),
4870 [C_PIO_INCONSISTENT_SOP_ERR
] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4872 access_pio_inconsistent_sop_err_cnt
),
4873 /* SendDmaEngErrStatus */
4874 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR
] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4876 access_sdma_header_request_fifo_cor_err_cnt
),
4877 [C_SDMA_HEADER_STORAGE_COR_ERR
] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4879 access_sdma_header_storage_cor_err_cnt
),
4880 [C_SDMA_PACKET_TRACKING_COR_ERR
] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4882 access_sdma_packet_tracking_cor_err_cnt
),
4883 [C_SDMA_ASSEMBLY_COR_ERR
] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4885 access_sdma_assembly_cor_err_cnt
),
4886 [C_SDMA_DESC_TABLE_COR_ERR
] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4888 access_sdma_desc_table_cor_err_cnt
),
4889 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR
] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4891 access_sdma_header_request_fifo_unc_err_cnt
),
4892 [C_SDMA_HEADER_STORAGE_UNC_ERR
] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4894 access_sdma_header_storage_unc_err_cnt
),
4895 [C_SDMA_PACKET_TRACKING_UNC_ERR
] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4897 access_sdma_packet_tracking_unc_err_cnt
),
4898 [C_SDMA_ASSEMBLY_UNC_ERR
] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4900 access_sdma_assembly_unc_err_cnt
),
4901 [C_SDMA_DESC_TABLE_UNC_ERR
] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4903 access_sdma_desc_table_unc_err_cnt
),
4904 [C_SDMA_TIMEOUT_ERR
] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4906 access_sdma_timeout_err_cnt
),
4907 [C_SDMA_HEADER_LENGTH_ERR
] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4909 access_sdma_header_length_err_cnt
),
4910 [C_SDMA_HEADER_ADDRESS_ERR
] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4912 access_sdma_header_address_err_cnt
),
4913 [C_SDMA_HEADER_SELECT_ERR
] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4915 access_sdma_header_select_err_cnt
),
4916 [C_SMDA_RESERVED_9
] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4918 access_sdma_reserved_9_err_cnt
),
4919 [C_SDMA_PACKET_DESC_OVERFLOW_ERR
] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4921 access_sdma_packet_desc_overflow_err_cnt
),
4922 [C_SDMA_LENGTH_MISMATCH_ERR
] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4924 access_sdma_length_mismatch_err_cnt
),
4925 [C_SDMA_HALT_ERR
] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4927 access_sdma_halt_err_cnt
),
4928 [C_SDMA_MEM_READ_ERR
] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4930 access_sdma_mem_read_err_cnt
),
4931 [C_SDMA_FIRST_DESC_ERR
] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4933 access_sdma_first_desc_err_cnt
),
4934 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR
] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4936 access_sdma_tail_out_of_bounds_err_cnt
),
4937 [C_SDMA_TOO_LONG_ERR
] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4939 access_sdma_too_long_err_cnt
),
4940 [C_SDMA_GEN_MISMATCH_ERR
] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4942 access_sdma_gen_mismatch_err_cnt
),
4943 [C_SDMA_WRONG_DW_ERR
] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4945 access_sdma_wrong_dw_err_cnt
),
4948 static struct cntr_entry port_cntrs
[PORT_CNTR_LAST
] = {
4949 [C_TX_UNSUP_VL
] = TXE32_PORT_CNTR_ELEM(TxUnVLErr
, SEND_UNSUP_VL_ERR_CNT
,
4951 [C_TX_INVAL_LEN
] = TXE32_PORT_CNTR_ELEM(TxInvalLen
, SEND_LEN_ERR_CNT
,
4953 [C_TX_MM_LEN_ERR
] = TXE32_PORT_CNTR_ELEM(TxMMLenErr
, SEND_MAX_MIN_LEN_ERR_CNT
,
4955 [C_TX_UNDERRUN
] = TXE32_PORT_CNTR_ELEM(TxUnderrun
, SEND_UNDERRUN_CNT
,
4957 [C_TX_FLOW_STALL
] = TXE32_PORT_CNTR_ELEM(TxFlowStall
, SEND_FLOW_STALL_CNT
,
4959 [C_TX_DROPPED
] = TXE32_PORT_CNTR_ELEM(TxDropped
, SEND_DROPPED_PKT_CNT
,
4961 [C_TX_HDR_ERR
] = TXE32_PORT_CNTR_ELEM(TxHdrErr
, SEND_HEADERS_ERR_CNT
,
4963 [C_TX_PKT
] = TXE64_PORT_CNTR_ELEM(TxPkt
, SEND_DATA_PKT_CNT
, CNTR_NORMAL
),
4964 [C_TX_WORDS
] = TXE64_PORT_CNTR_ELEM(TxWords
, SEND_DWORD_CNT
, CNTR_NORMAL
),
4965 [C_TX_WAIT
] = TXE64_PORT_CNTR_ELEM(TxWait
, SEND_WAIT_CNT
, CNTR_SYNTH
),
4966 [C_TX_FLIT_VL
] = TXE64_PORT_CNTR_ELEM(TxFlitVL
, SEND_DATA_VL0_CNT
,
4967 CNTR_SYNTH
| CNTR_VL
),
4968 [C_TX_PKT_VL
] = TXE64_PORT_CNTR_ELEM(TxPktVL
, SEND_DATA_PKT_VL0_CNT
,
4969 CNTR_SYNTH
| CNTR_VL
),
4970 [C_TX_WAIT_VL
] = TXE64_PORT_CNTR_ELEM(TxWaitVL
, SEND_WAIT_VL0_CNT
,
4971 CNTR_SYNTH
| CNTR_VL
),
4972 [C_RX_PKT
] = RXE64_PORT_CNTR_ELEM(RxPkt
, RCV_DATA_PKT_CNT
, CNTR_NORMAL
),
4973 [C_RX_WORDS
] = RXE64_PORT_CNTR_ELEM(RxWords
, RCV_DWORD_CNT
, CNTR_NORMAL
),
4974 [C_SW_LINK_DOWN
] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH
| CNTR_32BIT
,
4975 access_sw_link_dn_cnt
),
4976 [C_SW_LINK_UP
] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH
| CNTR_32BIT
,
4977 access_sw_link_up_cnt
),
4978 [C_SW_UNKNOWN_FRAME
] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL
,
4979 access_sw_unknown_frame_cnt
),
4980 [C_SW_XMIT_DSCD
] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH
| CNTR_32BIT
,
4981 access_sw_xmit_discards
),
4982 [C_SW_XMIT_DSCD_VL
] = CNTR_ELEM("XmitDscdVl", 0, 0,
4983 CNTR_SYNTH
| CNTR_32BIT
| CNTR_VL
,
4984 access_sw_xmit_discards
),
4985 [C_SW_XMIT_CSTR_ERR
] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH
,
4986 access_xmit_constraint_errs
),
4987 [C_SW_RCV_CSTR_ERR
] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH
,
4988 access_rcv_constraint_errs
),
4989 [C_SW_IBP_LOOP_PKTS
] = SW_IBP_CNTR(LoopPkts
, loop_pkts
),
4990 [C_SW_IBP_RC_RESENDS
] = SW_IBP_CNTR(RcResend
, rc_resends
),
4991 [C_SW_IBP_RNR_NAKS
] = SW_IBP_CNTR(RnrNak
, rnr_naks
),
4992 [C_SW_IBP_OTHER_NAKS
] = SW_IBP_CNTR(OtherNak
, other_naks
),
4993 [C_SW_IBP_RC_TIMEOUTS
] = SW_IBP_CNTR(RcTimeOut
, rc_timeouts
),
4994 [C_SW_IBP_PKT_DROPS
] = SW_IBP_CNTR(PktDrop
, pkt_drops
),
4995 [C_SW_IBP_DMA_WAIT
] = SW_IBP_CNTR(DmaWait
, dmawait
),
4996 [C_SW_IBP_RC_SEQNAK
] = SW_IBP_CNTR(RcSeqNak
, rc_seqnak
),
4997 [C_SW_IBP_RC_DUPREQ
] = SW_IBP_CNTR(RcDupRew
, rc_dupreq
),
4998 [C_SW_IBP_RDMA_SEQ
] = SW_IBP_CNTR(RdmaSeq
, rdma_seq
),
4999 [C_SW_IBP_UNALIGNED
] = SW_IBP_CNTR(Unaligned
, unaligned
),
5000 [C_SW_IBP_SEQ_NAK
] = SW_IBP_CNTR(SeqNak
, seq_naks
),
5001 [C_SW_CPU_RC_ACKS
] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL
,
5002 access_sw_cpu_rc_acks
),
5003 [C_SW_CPU_RC_QACKS
] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL
,
5004 access_sw_cpu_rc_qacks
),
5005 [C_SW_CPU_RC_DELAYED_COMP
] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL
,
5006 access_sw_cpu_rc_delayed_comp
),
5007 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5008 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5009 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5010 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5011 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5012 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5013 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5014 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5015 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5016 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5017 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5018 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5019 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5020 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5021 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5022 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5023 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5024 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5025 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5026 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5027 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5028 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5029 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5030 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5031 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5032 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5033 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5034 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5035 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5036 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5037 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5038 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5039 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5040 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5041 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5042 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5043 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5044 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5045 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5046 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5047 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5048 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5049 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5050 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5051 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5052 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5053 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5054 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5055 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5056 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5057 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5058 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5059 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5060 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5061 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5062 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5063 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5064 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5065 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5066 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5067 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5068 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5069 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5070 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5071 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5072 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5073 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5074 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5075 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5076 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5077 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5078 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5079 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5080 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5081 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5082 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5083 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5084 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5085 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5086 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5089 /* ======================================================================== */
5091 /* return true if this is chip revision revision a */
5092 int is_ax(struct hfi1_devdata
*dd
)
5095 dd
->revision
>> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5096 & CCE_REVISION_CHIP_REV_MINOR_MASK
;
5097 return (chip_rev_minor
& 0xf0) == 0;
5100 /* return true if this is chip revision revision b */
5101 int is_bx(struct hfi1_devdata
*dd
)
5104 dd
->revision
>> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5105 & CCE_REVISION_CHIP_REV_MINOR_MASK
;
5106 return (chip_rev_minor
& 0xF0) == 0x10;
5110 * Append string s to buffer buf. Arguments curp and len are the current
5111 * position and remaining length, respectively.
5113 * return 0 on success, 1 on out of room
5115 static int append_str(char *buf
, char **curp
, int *lenp
, const char *s
)
5119 int result
= 0; /* success */
5122 /* add a comma, if first in the buffer */
5125 result
= 1; /* out of room */
5132 /* copy the string */
5133 while ((c
= *s
++) != 0) {
5135 result
= 1; /* out of room */
5143 /* write return values */
5151 * Using the given flag table, print a comma separated string into
5152 * the buffer. End in '*' if the buffer is too short.
5154 static char *flag_string(char *buf
, int buf_len
, u64 flags
,
5155 struct flag_table
*table
, int table_size
)
5163 /* make sure there is at least 2 so we can form "*" */
5167 len
--; /* leave room for a nul */
5168 for (i
= 0; i
< table_size
; i
++) {
5169 if (flags
& table
[i
].flag
) {
5170 no_room
= append_str(buf
, &p
, &len
, table
[i
].str
);
5173 flags
&= ~table
[i
].flag
;
5177 /* any undocumented bits left? */
5178 if (!no_room
&& flags
) {
5179 snprintf(extra
, sizeof(extra
), "bits 0x%llx", flags
);
5180 no_room
= append_str(buf
, &p
, &len
, extra
);
5183 /* add * if ran out of room */
5185 /* may need to back up to add space for a '*' */
5191 /* add final nul - space already allocated above */
5196 /* first 8 CCE error interrupt source names */
5197 static const char * const cce_misc_names
[] = {
5198 "CceErrInt", /* 0 */
5199 "RxeErrInt", /* 1 */
5200 "MiscErrInt", /* 2 */
5201 "Reserved3", /* 3 */
5202 "PioErrInt", /* 4 */
5203 "SDmaErrInt", /* 5 */
5204 "EgressErrInt", /* 6 */
5209 * Return the miscellaneous error interrupt name.
5211 static char *is_misc_err_name(char *buf
, size_t bsize
, unsigned int source
)
5213 if (source
< ARRAY_SIZE(cce_misc_names
))
5214 strncpy(buf
, cce_misc_names
[source
], bsize
);
5216 snprintf(buf
, bsize
, "Reserved%u",
5217 source
+ IS_GENERAL_ERR_START
);
5223 * Return the SDMA engine error interrupt name.
5225 static char *is_sdma_eng_err_name(char *buf
, size_t bsize
, unsigned int source
)
5227 snprintf(buf
, bsize
, "SDmaEngErrInt%u", source
);
5232 * Return the send context error interrupt name.
5234 static char *is_sendctxt_err_name(char *buf
, size_t bsize
, unsigned int source
)
5236 snprintf(buf
, bsize
, "SendCtxtErrInt%u", source
);
5240 static const char * const various_names
[] = {
5249 * Return the various interrupt name.
5251 static char *is_various_name(char *buf
, size_t bsize
, unsigned int source
)
5253 if (source
< ARRAY_SIZE(various_names
))
5254 strncpy(buf
, various_names
[source
], bsize
);
5256 snprintf(buf
, bsize
, "Reserved%u", source
+ IS_VARIOUS_START
);
5261 * Return the DC interrupt name.
5263 static char *is_dc_name(char *buf
, size_t bsize
, unsigned int source
)
5265 static const char * const dc_int_names
[] = {
5269 "lbm" /* local block merge */
5272 if (source
< ARRAY_SIZE(dc_int_names
))
5273 snprintf(buf
, bsize
, "dc_%s_int", dc_int_names
[source
]);
5275 snprintf(buf
, bsize
, "DCInt%u", source
);
5279 static const char * const sdma_int_names
[] = {
5286 * Return the SDMA engine interrupt name.
5288 static char *is_sdma_eng_name(char *buf
, size_t bsize
, unsigned int source
)
5290 /* what interrupt */
5291 unsigned int what
= source
/ TXE_NUM_SDMA_ENGINES
;
5293 unsigned int which
= source
% TXE_NUM_SDMA_ENGINES
;
5295 if (likely(what
< 3))
5296 snprintf(buf
, bsize
, "%s%u", sdma_int_names
[what
], which
);
5298 snprintf(buf
, bsize
, "Invalid SDMA interrupt %u", source
);
5303 * Return the receive available interrupt name.
5305 static char *is_rcv_avail_name(char *buf
, size_t bsize
, unsigned int source
)
5307 snprintf(buf
, bsize
, "RcvAvailInt%u", source
);
5312 * Return the receive urgent interrupt name.
5314 static char *is_rcv_urgent_name(char *buf
, size_t bsize
, unsigned int source
)
5316 snprintf(buf
, bsize
, "RcvUrgentInt%u", source
);
5321 * Return the send credit interrupt name.
5323 static char *is_send_credit_name(char *buf
, size_t bsize
, unsigned int source
)
5325 snprintf(buf
, bsize
, "SendCreditInt%u", source
);
5330 * Return the reserved interrupt name.
5332 static char *is_reserved_name(char *buf
, size_t bsize
, unsigned int source
)
5334 snprintf(buf
, bsize
, "Reserved%u", source
+ IS_RESERVED_START
);
5338 static char *cce_err_status_string(char *buf
, int buf_len
, u64 flags
)
5340 return flag_string(buf
, buf_len
, flags
,
5341 cce_err_status_flags
,
5342 ARRAY_SIZE(cce_err_status_flags
));
5345 static char *rxe_err_status_string(char *buf
, int buf_len
, u64 flags
)
5347 return flag_string(buf
, buf_len
, flags
,
5348 rxe_err_status_flags
,
5349 ARRAY_SIZE(rxe_err_status_flags
));
5352 static char *misc_err_status_string(char *buf
, int buf_len
, u64 flags
)
5354 return flag_string(buf
, buf_len
, flags
, misc_err_status_flags
,
5355 ARRAY_SIZE(misc_err_status_flags
));
5358 static char *pio_err_status_string(char *buf
, int buf_len
, u64 flags
)
5360 return flag_string(buf
, buf_len
, flags
,
5361 pio_err_status_flags
,
5362 ARRAY_SIZE(pio_err_status_flags
));
5365 static char *sdma_err_status_string(char *buf
, int buf_len
, u64 flags
)
5367 return flag_string(buf
, buf_len
, flags
,
5368 sdma_err_status_flags
,
5369 ARRAY_SIZE(sdma_err_status_flags
));
5372 static char *egress_err_status_string(char *buf
, int buf_len
, u64 flags
)
5374 return flag_string(buf
, buf_len
, flags
,
5375 egress_err_status_flags
,
5376 ARRAY_SIZE(egress_err_status_flags
));
5379 static char *egress_err_info_string(char *buf
, int buf_len
, u64 flags
)
5381 return flag_string(buf
, buf_len
, flags
,
5382 egress_err_info_flags
,
5383 ARRAY_SIZE(egress_err_info_flags
));
5386 static char *send_err_status_string(char *buf
, int buf_len
, u64 flags
)
5388 return flag_string(buf
, buf_len
, flags
,
5389 send_err_status_flags
,
5390 ARRAY_SIZE(send_err_status_flags
));
5393 static void handle_cce_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
5399 * For most these errors, there is nothing that can be done except
5400 * report or record it.
5402 dd_dev_info(dd
, "CCE Error: %s\n",
5403 cce_err_status_string(buf
, sizeof(buf
), reg
));
5405 if ((reg
& CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK
) &&
5406 is_ax(dd
) && (dd
->icode
!= ICODE_FUNCTIONAL_SIMULATOR
)) {
5407 /* this error requires a manual drop into SPC freeze mode */
5409 start_freeze_handling(dd
->pport
, FREEZE_SELF
);
5412 for (i
= 0; i
< NUM_CCE_ERR_STATUS_COUNTERS
; i
++) {
5413 if (reg
& (1ull << i
)) {
5414 incr_cntr64(&dd
->cce_err_status_cnt
[i
]);
5415 /* maintain a counter over all cce_err_status errors */
5416 incr_cntr64(&dd
->sw_cce_err_status_aggregate
);
5422 * Check counters for receive errors that do not have an interrupt
5423 * associated with them.
5425 #define RCVERR_CHECK_TIME 10
5426 static void update_rcverr_timer(unsigned long opaque
)
5428 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)opaque
;
5429 struct hfi1_pportdata
*ppd
= dd
->pport
;
5430 u32 cur_ovfl_cnt
= read_dev_cntr(dd
, C_RCV_OVF
, CNTR_INVALID_VL
);
5432 if (dd
->rcv_ovfl_cnt
< cur_ovfl_cnt
&&
5433 ppd
->port_error_action
& OPA_PI_MASK_EX_BUFFER_OVERRUN
) {
5434 dd_dev_info(dd
, "%s: PortErrorAction bounce\n", __func__
);
5435 set_link_down_reason(
5436 ppd
, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN
, 0,
5437 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN
);
5438 queue_work(ppd
->hfi1_wq
, &ppd
->link_bounce_work
);
5440 dd
->rcv_ovfl_cnt
= (u32
)cur_ovfl_cnt
;
5442 mod_timer(&dd
->rcverr_timer
, jiffies
+ HZ
* RCVERR_CHECK_TIME
);
5445 static int init_rcverr(struct hfi1_devdata
*dd
)
5447 setup_timer(&dd
->rcverr_timer
, update_rcverr_timer
, (unsigned long)dd
);
5448 /* Assume the hardware counter has been reset */
5449 dd
->rcv_ovfl_cnt
= 0;
5450 return mod_timer(&dd
->rcverr_timer
, jiffies
+ HZ
* RCVERR_CHECK_TIME
);
5453 static void free_rcverr(struct hfi1_devdata
*dd
)
5455 if (dd
->rcverr_timer
.data
)
5456 del_timer_sync(&dd
->rcverr_timer
);
5457 dd
->rcverr_timer
.data
= 0;
5460 static void handle_rxe_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
5465 dd_dev_info(dd
, "Receive Error: %s\n",
5466 rxe_err_status_string(buf
, sizeof(buf
), reg
));
5468 if (reg
& ALL_RXE_FREEZE_ERR
) {
5472 * Freeze mode recovery is disabled for the errors
5473 * in RXE_FREEZE_ABORT_MASK
5475 if (is_ax(dd
) && (reg
& RXE_FREEZE_ABORT_MASK
))
5476 flags
= FREEZE_ABORT
;
5478 start_freeze_handling(dd
->pport
, flags
);
5481 for (i
= 0; i
< NUM_RCV_ERR_STATUS_COUNTERS
; i
++) {
5482 if (reg
& (1ull << i
))
5483 incr_cntr64(&dd
->rcv_err_status_cnt
[i
]);
5487 static void handle_misc_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
5492 dd_dev_info(dd
, "Misc Error: %s",
5493 misc_err_status_string(buf
, sizeof(buf
), reg
));
5494 for (i
= 0; i
< NUM_MISC_ERR_STATUS_COUNTERS
; i
++) {
5495 if (reg
& (1ull << i
))
5496 incr_cntr64(&dd
->misc_err_status_cnt
[i
]);
5500 static void handle_pio_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
5505 dd_dev_info(dd
, "PIO Error: %s\n",
5506 pio_err_status_string(buf
, sizeof(buf
), reg
));
5508 if (reg
& ALL_PIO_FREEZE_ERR
)
5509 start_freeze_handling(dd
->pport
, 0);
5511 for (i
= 0; i
< NUM_SEND_PIO_ERR_STATUS_COUNTERS
; i
++) {
5512 if (reg
& (1ull << i
))
5513 incr_cntr64(&dd
->send_pio_err_status_cnt
[i
]);
5517 static void handle_sdma_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
5522 dd_dev_info(dd
, "SDMA Error: %s\n",
5523 sdma_err_status_string(buf
, sizeof(buf
), reg
));
5525 if (reg
& ALL_SDMA_FREEZE_ERR
)
5526 start_freeze_handling(dd
->pport
, 0);
5528 for (i
= 0; i
< NUM_SEND_DMA_ERR_STATUS_COUNTERS
; i
++) {
5529 if (reg
& (1ull << i
))
5530 incr_cntr64(&dd
->send_dma_err_status_cnt
[i
]);
5534 static inline void __count_port_discards(struct hfi1_pportdata
*ppd
)
5536 incr_cntr64(&ppd
->port_xmit_discards
);
5539 static void count_port_inactive(struct hfi1_devdata
*dd
)
5541 __count_port_discards(dd
->pport
);
5545 * We have had a "disallowed packet" error during egress. Determine the
5546 * integrity check which failed, and update relevant error counter, etc.
5548 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5549 * bit of state per integrity check, and so we can miss the reason for an
5550 * egress error if more than one packet fails the same integrity check
5551 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5553 static void handle_send_egress_err_info(struct hfi1_devdata
*dd
,
5556 struct hfi1_pportdata
*ppd
= dd
->pport
;
5557 u64 src
= read_csr(dd
, SEND_EGRESS_ERR_SOURCE
); /* read first */
5558 u64 info
= read_csr(dd
, SEND_EGRESS_ERR_INFO
);
5561 /* clear down all observed info as quickly as possible after read */
5562 write_csr(dd
, SEND_EGRESS_ERR_INFO
, info
);
5565 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5566 info
, egress_err_info_string(buf
, sizeof(buf
), info
), src
);
5568 /* Eventually add other counters for each bit */
5569 if (info
& PORT_DISCARD_EGRESS_ERRS
) {
5573 * Count all applicable bits as individual errors and
5574 * attribute them to the packet that triggered this handler.
5575 * This may not be completely accurate due to limitations
5576 * on the available hardware error information. There is
5577 * a single information register and any number of error
5578 * packets may have occurred and contributed to it before
5579 * this routine is called. This means that:
5580 * a) If multiple packets with the same error occur before
5581 * this routine is called, earlier packets are missed.
5582 * There is only a single bit for each error type.
5583 * b) Errors may not be attributed to the correct VL.
5584 * The driver is attributing all bits in the info register
5585 * to the packet that triggered this call, but bits
5586 * could be an accumulation of different packets with
5588 * c) A single error packet may have multiple counts attached
5589 * to it. There is no way for the driver to know if
5590 * multiple bits set in the info register are due to a
5591 * single packet or multiple packets. The driver assumes
5594 weight
= hweight64(info
& PORT_DISCARD_EGRESS_ERRS
);
5595 for (i
= 0; i
< weight
; i
++) {
5596 __count_port_discards(ppd
);
5597 if (vl
>= 0 && vl
< TXE_NUM_DATA_VL
)
5598 incr_cntr64(&ppd
->port_xmit_discards_vl
[vl
]);
5600 incr_cntr64(&ppd
->port_xmit_discards_vl
5607 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5608 * register. Does it represent a 'port inactive' error?
5610 static inline int port_inactive_err(u64 posn
)
5612 return (posn
>= SEES(TX_LINKDOWN
) &&
5613 posn
<= SEES(TX_INCORRECT_LINK_STATE
));
5617 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5618 * register. Does it represent a 'disallowed packet' error?
5620 static inline int disallowed_pkt_err(int posn
)
5622 return (posn
>= SEES(TX_SDMA0_DISALLOWED_PACKET
) &&
5623 posn
<= SEES(TX_SDMA15_DISALLOWED_PACKET
));
5627 * Input value is a bit position of one of the SDMA engine disallowed
5628 * packet errors. Return which engine. Use of this must be guarded by
5629 * disallowed_pkt_err().
5631 static inline int disallowed_pkt_engine(int posn
)
5633 return posn
- SEES(TX_SDMA0_DISALLOWED_PACKET
);
5637 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5640 static int engine_to_vl(struct hfi1_devdata
*dd
, int engine
)
5642 struct sdma_vl_map
*m
;
5646 if (engine
< 0 || engine
>= TXE_NUM_SDMA_ENGINES
)
5650 m
= rcu_dereference(dd
->sdma_map
);
5651 vl
= m
->engine_to_vl
[engine
];
5658 * Translate the send context (sofware index) into a VL. Return -1 if the
5659 * translation cannot be done.
5661 static int sc_to_vl(struct hfi1_devdata
*dd
, int sw_index
)
5663 struct send_context_info
*sci
;
5664 struct send_context
*sc
;
5667 sci
= &dd
->send_contexts
[sw_index
];
5669 /* there is no information for user (PSM) and ack contexts */
5670 if ((sci
->type
!= SC_KERNEL
) && (sci
->type
!= SC_VL15
))
5676 if (dd
->vld
[15].sc
== sc
)
5678 for (i
= 0; i
< num_vls
; i
++)
5679 if (dd
->vld
[i
].sc
== sc
)
5685 static void handle_egress_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
5687 u64 reg_copy
= reg
, handled
= 0;
5691 if (reg
& ALL_TXE_EGRESS_FREEZE_ERR
)
5692 start_freeze_handling(dd
->pport
, 0);
5693 else if (is_ax(dd
) &&
5694 (reg
& SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK
) &&
5695 (dd
->icode
!= ICODE_FUNCTIONAL_SIMULATOR
))
5696 start_freeze_handling(dd
->pport
, 0);
5699 int posn
= fls64(reg_copy
);
5700 /* fls64() returns a 1-based offset, we want it zero based */
5701 int shift
= posn
- 1;
5702 u64 mask
= 1ULL << shift
;
5704 if (port_inactive_err(shift
)) {
5705 count_port_inactive(dd
);
5707 } else if (disallowed_pkt_err(shift
)) {
5708 int vl
= engine_to_vl(dd
, disallowed_pkt_engine(shift
));
5710 handle_send_egress_err_info(dd
, vl
);
5719 dd_dev_info(dd
, "Egress Error: %s\n",
5720 egress_err_status_string(buf
, sizeof(buf
), reg
));
5722 for (i
= 0; i
< NUM_SEND_EGRESS_ERR_STATUS_COUNTERS
; i
++) {
5723 if (reg
& (1ull << i
))
5724 incr_cntr64(&dd
->send_egress_err_status_cnt
[i
]);
5728 static void handle_txe_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
5733 dd_dev_info(dd
, "Send Error: %s\n",
5734 send_err_status_string(buf
, sizeof(buf
), reg
));
5736 for (i
= 0; i
< NUM_SEND_ERR_STATUS_COUNTERS
; i
++) {
5737 if (reg
& (1ull << i
))
5738 incr_cntr64(&dd
->send_err_status_cnt
[i
]);
5743 * The maximum number of times the error clear down will loop before
5744 * blocking a repeating error. This value is arbitrary.
5746 #define MAX_CLEAR_COUNT 20
5749 * Clear and handle an error register. All error interrupts are funneled
5750 * through here to have a central location to correctly handle single-
5751 * or multi-shot errors.
5753 * For non per-context registers, call this routine with a context value
5754 * of 0 so the per-context offset is zero.
5756 * If the handler loops too many times, assume that something is wrong
5757 * and can't be fixed, so mask the error bits.
5759 static void interrupt_clear_down(struct hfi1_devdata
*dd
,
5761 const struct err_reg_info
*eri
)
5766 /* read in a loop until no more errors are seen */
5769 reg
= read_kctxt_csr(dd
, context
, eri
->status
);
5772 write_kctxt_csr(dd
, context
, eri
->clear
, reg
);
5773 if (likely(eri
->handler
))
5774 eri
->handler(dd
, context
, reg
);
5776 if (count
> MAX_CLEAR_COUNT
) {
5779 dd_dev_err(dd
, "Repeating %s bits 0x%llx - masking\n",
5782 * Read-modify-write so any other masked bits
5785 mask
= read_kctxt_csr(dd
, context
, eri
->mask
);
5787 write_kctxt_csr(dd
, context
, eri
->mask
, mask
);
5794 * CCE block "misc" interrupt. Source is < 16.
5796 static void is_misc_err_int(struct hfi1_devdata
*dd
, unsigned int source
)
5798 const struct err_reg_info
*eri
= &misc_errs
[source
];
5801 interrupt_clear_down(dd
, 0, eri
);
5803 dd_dev_err(dd
, "Unexpected misc interrupt (%u) - reserved\n",
5808 static char *send_context_err_status_string(char *buf
, int buf_len
, u64 flags
)
5810 return flag_string(buf
, buf_len
, flags
,
5811 sc_err_status_flags
,
5812 ARRAY_SIZE(sc_err_status_flags
));
5816 * Send context error interrupt. Source (hw_context) is < 160.
5818 * All send context errors cause the send context to halt. The normal
5819 * clear-down mechanism cannot be used because we cannot clear the
5820 * error bits until several other long-running items are done first.
5821 * This is OK because with the context halted, nothing else is going
5822 * to happen on it anyway.
5824 static void is_sendctxt_err_int(struct hfi1_devdata
*dd
,
5825 unsigned int hw_context
)
5827 struct send_context_info
*sci
;
5828 struct send_context
*sc
;
5834 sw_index
= dd
->hw_to_sw
[hw_context
];
5835 if (sw_index
>= dd
->num_send_contexts
) {
5837 "out of range sw index %u for send context %u\n",
5838 sw_index
, hw_context
);
5841 sci
= &dd
->send_contexts
[sw_index
];
5844 dd_dev_err(dd
, "%s: context %u(%u): no sc?\n", __func__
,
5845 sw_index
, hw_context
);
5849 /* tell the software that a halt has begun */
5850 sc_stop(sc
, SCF_HALTED
);
5852 status
= read_kctxt_csr(dd
, hw_context
, SEND_CTXT_ERR_STATUS
);
5854 dd_dev_info(dd
, "Send Context %u(%u) Error: %s\n", sw_index
, hw_context
,
5855 send_context_err_status_string(flags
, sizeof(flags
),
5858 if (status
& SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK
)
5859 handle_send_egress_err_info(dd
, sc_to_vl(dd
, sw_index
));
5862 * Automatically restart halted kernel contexts out of interrupt
5863 * context. User contexts must ask the driver to restart the context.
5865 if (sc
->type
!= SC_USER
)
5866 queue_work(dd
->pport
->hfi1_wq
, &sc
->halt_work
);
5869 * Update the counters for the corresponding status bits.
5870 * Note that these particular counters are aggregated over all
5873 for (i
= 0; i
< NUM_SEND_CTXT_ERR_STATUS_COUNTERS
; i
++) {
5874 if (status
& (1ull << i
))
5875 incr_cntr64(&dd
->sw_ctxt_err_status_cnt
[i
]);
5879 static void handle_sdma_eng_err(struct hfi1_devdata
*dd
,
5880 unsigned int source
, u64 status
)
5882 struct sdma_engine
*sde
;
5885 sde
= &dd
->per_sdma
[source
];
5886 #ifdef CONFIG_SDMA_VERBOSITY
5887 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) %s:%d %s()\n", sde
->this_idx
,
5888 slashstrip(__FILE__
), __LINE__
, __func__
);
5889 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5890 sde
->this_idx
, source
, (unsigned long long)status
);
5893 sdma_engine_error(sde
, status
);
5896 * Update the counters for the corresponding status bits.
5897 * Note that these particular counters are aggregated over
5898 * all 16 DMA engines.
5900 for (i
= 0; i
< NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS
; i
++) {
5901 if (status
& (1ull << i
))
5902 incr_cntr64(&dd
->sw_send_dma_eng_err_status_cnt
[i
]);
5907 * CCE block SDMA error interrupt. Source is < 16.
5909 static void is_sdma_eng_err_int(struct hfi1_devdata
*dd
, unsigned int source
)
5911 #ifdef CONFIG_SDMA_VERBOSITY
5912 struct sdma_engine
*sde
= &dd
->per_sdma
[source
];
5914 dd_dev_err(dd
, "CONFIG SDMA(%u) %s:%d %s()\n", sde
->this_idx
,
5915 slashstrip(__FILE__
), __LINE__
, __func__
);
5916 dd_dev_err(dd
, "CONFIG SDMA(%u) source: %u\n", sde
->this_idx
,
5918 sdma_dumpstate(sde
);
5920 interrupt_clear_down(dd
, source
, &sdma_eng_err
);
5924 * CCE block "various" interrupt. Source is < 8.
5926 static void is_various_int(struct hfi1_devdata
*dd
, unsigned int source
)
5928 const struct err_reg_info
*eri
= &various_err
[source
];
5931 * TCritInt cannot go through interrupt_clear_down()
5932 * because it is not a second tier interrupt. The handler
5933 * should be called directly.
5935 if (source
== TCRIT_INT_SOURCE
)
5936 handle_temp_err(dd
);
5937 else if (eri
->handler
)
5938 interrupt_clear_down(dd
, 0, eri
);
5941 "%s: Unimplemented/reserved interrupt %d\n",
5945 static void handle_qsfp_int(struct hfi1_devdata
*dd
, u32 src_ctx
, u64 reg
)
5947 /* src_ctx is always zero */
5948 struct hfi1_pportdata
*ppd
= dd
->pport
;
5949 unsigned long flags
;
5950 u64 qsfp_int_mgmt
= (u64
)(QSFP_HFI0_INT_N
| QSFP_HFI0_MODPRST_N
);
5952 if (reg
& QSFP_HFI0_MODPRST_N
) {
5953 if (!qsfp_mod_present(ppd
)) {
5954 dd_dev_info(dd
, "%s: QSFP module removed\n",
5957 ppd
->driver_link_ready
= 0;
5959 * Cable removed, reset all our information about the
5960 * cache and cable capabilities
5963 spin_lock_irqsave(&ppd
->qsfp_info
.qsfp_lock
, flags
);
5965 * We don't set cache_refresh_required here as we expect
5966 * an interrupt when a cable is inserted
5968 ppd
->qsfp_info
.cache_valid
= 0;
5969 ppd
->qsfp_info
.reset_needed
= 0;
5970 ppd
->qsfp_info
.limiting_active
= 0;
5971 spin_unlock_irqrestore(&ppd
->qsfp_info
.qsfp_lock
,
5973 /* Invert the ModPresent pin now to detect plug-in */
5974 write_csr(dd
, dd
->hfi1_id
? ASIC_QSFP2_INVERT
:
5975 ASIC_QSFP1_INVERT
, qsfp_int_mgmt
);
5977 if ((ppd
->offline_disabled_reason
>
5979 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED
)) ||
5980 (ppd
->offline_disabled_reason
==
5981 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE
)))
5982 ppd
->offline_disabled_reason
=
5984 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED
);
5986 if (ppd
->host_link_state
== HLS_DN_POLL
) {
5988 * The link is still in POLL. This means
5989 * that the normal link down processing
5990 * will not happen. We have to do it here
5991 * before turning the DC off.
5993 queue_work(ppd
->hfi1_wq
, &ppd
->link_down_work
);
5996 dd_dev_info(dd
, "%s: QSFP module inserted\n",
5999 spin_lock_irqsave(&ppd
->qsfp_info
.qsfp_lock
, flags
);
6000 ppd
->qsfp_info
.cache_valid
= 0;
6001 ppd
->qsfp_info
.cache_refresh_required
= 1;
6002 spin_unlock_irqrestore(&ppd
->qsfp_info
.qsfp_lock
,
6006 * Stop inversion of ModPresent pin to detect
6007 * removal of the cable
6009 qsfp_int_mgmt
&= ~(u64
)QSFP_HFI0_MODPRST_N
;
6010 write_csr(dd
, dd
->hfi1_id
? ASIC_QSFP2_INVERT
:
6011 ASIC_QSFP1_INVERT
, qsfp_int_mgmt
);
6013 ppd
->offline_disabled_reason
=
6014 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT
);
6018 if (reg
& QSFP_HFI0_INT_N
) {
6019 dd_dev_info(dd
, "%s: Interrupt received from QSFP module\n",
6021 spin_lock_irqsave(&ppd
->qsfp_info
.qsfp_lock
, flags
);
6022 ppd
->qsfp_info
.check_interrupt_flags
= 1;
6023 spin_unlock_irqrestore(&ppd
->qsfp_info
.qsfp_lock
, flags
);
6026 /* Schedule the QSFP work only if there is a cable attached. */
6027 if (qsfp_mod_present(ppd
))
6028 queue_work(ppd
->hfi1_wq
, &ppd
->qsfp_info
.qsfp_work
);
6031 static int request_host_lcb_access(struct hfi1_devdata
*dd
)
6035 ret
= do_8051_command(dd
, HCMD_MISC
,
6036 (u64
)HCMD_MISC_REQUEST_LCB_ACCESS
<<
6037 LOAD_DATA_FIELD_ID_SHIFT
, NULL
);
6038 if (ret
!= HCMD_SUCCESS
) {
6039 dd_dev_err(dd
, "%s: command failed with error %d\n",
6042 return ret
== HCMD_SUCCESS
? 0 : -EBUSY
;
6045 static int request_8051_lcb_access(struct hfi1_devdata
*dd
)
6049 ret
= do_8051_command(dd
, HCMD_MISC
,
6050 (u64
)HCMD_MISC_GRANT_LCB_ACCESS
<<
6051 LOAD_DATA_FIELD_ID_SHIFT
, NULL
);
6052 if (ret
!= HCMD_SUCCESS
) {
6053 dd_dev_err(dd
, "%s: command failed with error %d\n",
6056 return ret
== HCMD_SUCCESS
? 0 : -EBUSY
;
6060 * Set the LCB selector - allow host access. The DCC selector always
6061 * points to the host.
6063 static inline void set_host_lcb_access(struct hfi1_devdata
*dd
)
6065 write_csr(dd
, DC_DC8051_CFG_CSR_ACCESS_SEL
,
6066 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK
|
6067 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK
);
6071 * Clear the LCB selector - allow 8051 access. The DCC selector always
6072 * points to the host.
6074 static inline void set_8051_lcb_access(struct hfi1_devdata
*dd
)
6076 write_csr(dd
, DC_DC8051_CFG_CSR_ACCESS_SEL
,
6077 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK
);
6081 * Acquire LCB access from the 8051. If the host already has access,
6082 * just increment a counter. Otherwise, inform the 8051 that the
6083 * host is taking access.
6087 * -EBUSY if the 8051 has control and cannot be disturbed
6088 * -errno if unable to acquire access from the 8051
6090 int acquire_lcb_access(struct hfi1_devdata
*dd
, int sleep_ok
)
6092 struct hfi1_pportdata
*ppd
= dd
->pport
;
6096 * Use the host link state lock so the operation of this routine
6097 * { link state check, selector change, count increment } can occur
6098 * as a unit against a link state change. Otherwise there is a
6099 * race between the state change and the count increment.
6102 mutex_lock(&ppd
->hls_lock
);
6104 while (!mutex_trylock(&ppd
->hls_lock
))
6108 /* this access is valid only when the link is up */
6109 if (ppd
->host_link_state
& HLS_DOWN
) {
6110 dd_dev_info(dd
, "%s: link state %s not up\n",
6111 __func__
, link_state_name(ppd
->host_link_state
));
6116 if (dd
->lcb_access_count
== 0) {
6117 ret
= request_host_lcb_access(dd
);
6120 "%s: unable to acquire LCB access, err %d\n",
6124 set_host_lcb_access(dd
);
6126 dd
->lcb_access_count
++;
6128 mutex_unlock(&ppd
->hls_lock
);
6133 * Release LCB access by decrementing the use count. If the count is moving
6134 * from 1 to 0, inform 8051 that it has control back.
6138 * -errno if unable to release access to the 8051
6140 int release_lcb_access(struct hfi1_devdata
*dd
, int sleep_ok
)
6145 * Use the host link state lock because the acquire needed it.
6146 * Here, we only need to keep { selector change, count decrement }
6150 mutex_lock(&dd
->pport
->hls_lock
);
6152 while (!mutex_trylock(&dd
->pport
->hls_lock
))
6156 if (dd
->lcb_access_count
== 0) {
6157 dd_dev_err(dd
, "%s: LCB access count is zero. Skipping.\n",
6162 if (dd
->lcb_access_count
== 1) {
6163 set_8051_lcb_access(dd
);
6164 ret
= request_8051_lcb_access(dd
);
6167 "%s: unable to release LCB access, err %d\n",
6169 /* restore host access if the grant didn't work */
6170 set_host_lcb_access(dd
);
6174 dd
->lcb_access_count
--;
6176 mutex_unlock(&dd
->pport
->hls_lock
);
6181 * Initialize LCB access variables and state. Called during driver load,
6182 * after most of the initialization is finished.
6184 * The DC default is LCB access on for the host. The driver defaults to
6185 * leaving access to the 8051. Assign access now - this constrains the call
6186 * to this routine to be after all LCB set-up is done. In particular, after
6187 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6189 static void init_lcb_access(struct hfi1_devdata
*dd
)
6191 dd
->lcb_access_count
= 0;
6195 * Write a response back to a 8051 request.
6197 static void hreq_response(struct hfi1_devdata
*dd
, u8 return_code
, u16 rsp_data
)
6199 write_csr(dd
, DC_DC8051_CFG_EXT_DEV_0
,
6200 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK
|
6202 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT
|
6203 (u64
)rsp_data
<< DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT
);
6207 * Handle host requests from the 8051.
6209 static void handle_8051_request(struct hfi1_pportdata
*ppd
)
6211 struct hfi1_devdata
*dd
= ppd
->dd
;
6216 reg
= read_csr(dd
, DC_DC8051_CFG_EXT_DEV_1
);
6217 if ((reg
& DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK
) == 0)
6218 return; /* no request */
6220 /* zero out COMPLETED so the response is seen */
6221 write_csr(dd
, DC_DC8051_CFG_EXT_DEV_0
, 0);
6223 /* extract request details */
6224 type
= (reg
>> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT
)
6225 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK
;
6226 data
= (reg
>> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT
)
6227 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK
;
6230 case HREQ_LOAD_CONFIG
:
6231 case HREQ_SAVE_CONFIG
:
6232 case HREQ_READ_CONFIG
:
6233 case HREQ_SET_TX_EQ_ABS
:
6234 case HREQ_SET_TX_EQ_REL
:
6236 dd_dev_info(dd
, "8051 request: request 0x%x not supported\n",
6238 hreq_response(dd
, HREQ_NOT_SUPPORTED
, 0);
6240 case HREQ_CONFIG_DONE
:
6241 hreq_response(dd
, HREQ_SUCCESS
, 0);
6244 case HREQ_INTERFACE_TEST
:
6245 hreq_response(dd
, HREQ_SUCCESS
, data
);
6248 dd_dev_err(dd
, "8051 request: unknown request 0x%x\n", type
);
6249 hreq_response(dd
, HREQ_NOT_SUPPORTED
, 0);
6254 static void write_global_credit(struct hfi1_devdata
*dd
,
6255 u8 vau
, u16 total
, u16 shared
)
6257 write_csr(dd
, SEND_CM_GLOBAL_CREDIT
,
6259 SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT
) |
6261 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT
) |
6262 ((u64
)vau
<< SEND_CM_GLOBAL_CREDIT_AU_SHIFT
));
6266 * Set up initial VL15 credits of the remote. Assumes the rest of
6267 * the CM credit registers are zero from a previous global or credit reset .
6269 void set_up_vl15(struct hfi1_devdata
*dd
, u8 vau
, u16 vl15buf
)
6271 /* leave shared count at zero for both global and VL15 */
6272 write_global_credit(dd
, vau
, vl15buf
, 0);
6274 /* We may need some credits for another VL when sending packets
6275 * with the snoop interface. Dividing it down the middle for VL15
6276 * and VL0 should suffice.
6278 if (unlikely(dd
->hfi1_snoop
.mode_flag
== HFI1_PORT_SNOOP_MODE
)) {
6279 write_csr(dd
, SEND_CM_CREDIT_VL15
, (u64
)(vl15buf
>> 1)
6280 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT
);
6281 write_csr(dd
, SEND_CM_CREDIT_VL
, (u64
)(vl15buf
>> 1)
6282 << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT
);
6284 write_csr(dd
, SEND_CM_CREDIT_VL15
, (u64
)vl15buf
6285 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT
);
6290 * Zero all credit details from the previous connection and
6291 * reset the CM manager's internal counters.
6293 void reset_link_credits(struct hfi1_devdata
*dd
)
6297 /* remove all previous VL credit limits */
6298 for (i
= 0; i
< TXE_NUM_DATA_VL
; i
++)
6299 write_csr(dd
, SEND_CM_CREDIT_VL
+ (8 * i
), 0);
6300 write_csr(dd
, SEND_CM_CREDIT_VL15
, 0);
6301 write_global_credit(dd
, 0, 0, 0);
6302 /* reset the CM block */
6303 pio_send_control(dd
, PSC_CM_RESET
);
6306 /* convert a vCU to a CU */
6307 static u32
vcu_to_cu(u8 vcu
)
6312 /* convert a CU to a vCU */
6313 static u8
cu_to_vcu(u32 cu
)
6318 /* convert a vAU to an AU */
6319 static u32
vau_to_au(u8 vau
)
6321 return 8 * (1 << vau
);
6324 static void set_linkup_defaults(struct hfi1_pportdata
*ppd
)
6326 ppd
->sm_trap_qp
= 0x0;
6331 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6333 static void lcb_shutdown(struct hfi1_devdata
*dd
, int abort
)
6337 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6338 write_csr(dd
, DC_LCB_CFG_RUN
, 0);
6339 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6340 write_csr(dd
, DC_LCB_CFG_TX_FIFOS_RESET
,
6341 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT
);
6342 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6343 dd
->lcb_err_en
= read_csr(dd
, DC_LCB_ERR_EN
);
6344 reg
= read_csr(dd
, DCC_CFG_RESET
);
6345 write_csr(dd
, DCC_CFG_RESET
, reg
|
6346 (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT
) |
6347 (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT
));
6348 (void)read_csr(dd
, DCC_CFG_RESET
); /* make sure the write completed */
6350 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6351 write_csr(dd
, DCC_CFG_RESET
, reg
);
6352 write_csr(dd
, DC_LCB_ERR_EN
, dd
->lcb_err_en
);
6357 * This routine should be called after the link has been transitioned to
6358 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6361 * The expectation is that the caller of this routine would have taken
6362 * care of properly transitioning the link into the correct state.
6364 static void dc_shutdown(struct hfi1_devdata
*dd
)
6366 unsigned long flags
;
6368 spin_lock_irqsave(&dd
->dc8051_lock
, flags
);
6369 if (dd
->dc_shutdown
) {
6370 spin_unlock_irqrestore(&dd
->dc8051_lock
, flags
);
6373 dd
->dc_shutdown
= 1;
6374 spin_unlock_irqrestore(&dd
->dc8051_lock
, flags
);
6375 /* Shutdown the LCB */
6376 lcb_shutdown(dd
, 1);
6378 * Going to OFFLINE would have causes the 8051 to put the
6379 * SerDes into reset already. Just need to shut down the 8051,
6382 write_csr(dd
, DC_DC8051_CFG_RST
, 0x1);
6386 * Calling this after the DC has been brought out of reset should not
6389 static void dc_start(struct hfi1_devdata
*dd
)
6391 unsigned long flags
;
6394 spin_lock_irqsave(&dd
->dc8051_lock
, flags
);
6395 if (!dd
->dc_shutdown
)
6397 spin_unlock_irqrestore(&dd
->dc8051_lock
, flags
);
6398 /* Take the 8051 out of reset */
6399 write_csr(dd
, DC_DC8051_CFG_RST
, 0ull);
6400 /* Wait until 8051 is ready */
6401 ret
= wait_fm_ready(dd
, TIMEOUT_8051_START
);
6403 dd_dev_err(dd
, "%s: timeout starting 8051 firmware\n",
6406 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6407 write_csr(dd
, DCC_CFG_RESET
, 0x10);
6408 /* lcb_shutdown() with abort=1 does not restore these */
6409 write_csr(dd
, DC_LCB_ERR_EN
, dd
->lcb_err_en
);
6410 spin_lock_irqsave(&dd
->dc8051_lock
, flags
);
6411 dd
->dc_shutdown
= 0;
6413 spin_unlock_irqrestore(&dd
->dc8051_lock
, flags
);
6417 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6419 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata
*dd
)
6421 u64 rx_radr
, tx_radr
;
6424 if (dd
->icode
!= ICODE_FPGA_EMULATION
)
6428 * These LCB defaults on emulator _s are good, nothing to do here:
6429 * LCB_CFG_TX_FIFOS_RADR
6430 * LCB_CFG_RX_FIFOS_RADR
6432 * LCB_CFG_IGNORE_LOST_RCLK
6434 if (is_emulator_s(dd
))
6436 /* else this is _p */
6438 version
= emulator_rev(dd
);
6440 version
= 0x2d; /* all B0 use 0x2d or higher settings */
6442 if (version
<= 0x12) {
6443 /* release 0x12 and below */
6446 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6447 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6448 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6451 0xaull
<< DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6452 | 0x9ull
<< DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6453 | 0x9ull
<< DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT
;
6455 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6456 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6458 tx_radr
= 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT
;
6459 } else if (version
<= 0x18) {
6460 /* release 0x13 up to 0x18 */
6461 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6463 0x9ull
<< DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6464 | 0x8ull
<< DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6465 | 0x8ull
<< DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT
;
6466 tx_radr
= 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT
;
6467 } else if (version
== 0x19) {
6469 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6471 0xAull
<< DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6472 | 0x9ull
<< DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6473 | 0x9ull
<< DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT
;
6474 tx_radr
= 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT
;
6475 } else if (version
== 0x1a) {
6477 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6479 0x9ull
<< DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6480 | 0x8ull
<< DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6481 | 0x8ull
<< DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT
;
6482 tx_radr
= 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT
;
6483 write_csr(dd
, DC_LCB_CFG_LN_DCLK
, 1ull);
6485 /* release 0x1b and higher */
6486 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6488 0x8ull
<< DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6489 | 0x7ull
<< DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6490 | 0x7ull
<< DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT
;
6491 tx_radr
= 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT
;
6494 write_csr(dd
, DC_LCB_CFG_RX_FIFOS_RADR
, rx_radr
);
6495 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6496 write_csr(dd
, DC_LCB_CFG_IGNORE_LOST_RCLK
,
6497 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK
);
6498 write_csr(dd
, DC_LCB_CFG_TX_FIFOS_RADR
, tx_radr
);
6502 * Handle a SMA idle message
6504 * This is a work-queue function outside of the interrupt.
6506 void handle_sma_message(struct work_struct
*work
)
6508 struct hfi1_pportdata
*ppd
= container_of(work
, struct hfi1_pportdata
,
6510 struct hfi1_devdata
*dd
= ppd
->dd
;
6515 * msg is bytes 1-4 of the 40-bit idle message - the command code
6518 ret
= read_idle_sma(dd
, &msg
);
6521 dd_dev_info(dd
, "%s: SMA message 0x%llx\n", __func__
, msg
);
6523 * React to the SMA message. Byte[1] (0 for us) is the command.
6525 switch (msg
& 0xff) {
6528 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6531 * Only expected in INIT or ARMED, discard otherwise.
6533 if (ppd
->host_link_state
& (HLS_UP_INIT
| HLS_UP_ARMED
))
6534 ppd
->neighbor_normal
= 1;
6536 case SMA_IDLE_ACTIVE
:
6538 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6541 * Can activate the node. Discard otherwise.
6543 if (ppd
->host_link_state
== HLS_UP_ARMED
&&
6544 ppd
->is_active_optimize_enabled
) {
6545 ppd
->neighbor_normal
= 1;
6546 ret
= set_link_state(ppd
, HLS_UP_ACTIVE
);
6550 "%s: received Active SMA idle message, couldn't set link to Active\n",
6556 "%s: received unexpected SMA idle message 0x%llx\n",
6562 static void adjust_rcvctrl(struct hfi1_devdata
*dd
, u64 add
, u64 clear
)
6565 unsigned long flags
;
6567 spin_lock_irqsave(&dd
->rcvctrl_lock
, flags
);
6568 rcvctrl
= read_csr(dd
, RCV_CTRL
);
6571 write_csr(dd
, RCV_CTRL
, rcvctrl
);
6572 spin_unlock_irqrestore(&dd
->rcvctrl_lock
, flags
);
6575 static inline void add_rcvctrl(struct hfi1_devdata
*dd
, u64 add
)
6577 adjust_rcvctrl(dd
, add
, 0);
6580 static inline void clear_rcvctrl(struct hfi1_devdata
*dd
, u64 clear
)
6582 adjust_rcvctrl(dd
, 0, clear
);
6586 * Called from all interrupt handlers to start handling an SPC freeze.
6588 void start_freeze_handling(struct hfi1_pportdata
*ppd
, int flags
)
6590 struct hfi1_devdata
*dd
= ppd
->dd
;
6591 struct send_context
*sc
;
6594 if (flags
& FREEZE_SELF
)
6595 write_csr(dd
, CCE_CTRL
, CCE_CTRL_SPC_FREEZE_SMASK
);
6597 /* enter frozen mode */
6598 dd
->flags
|= HFI1_FROZEN
;
6600 /* notify all SDMA engines that they are going into a freeze */
6601 sdma_freeze_notify(dd
, !!(flags
& FREEZE_LINK_DOWN
));
6603 /* do halt pre-handling on all enabled send contexts */
6604 for (i
= 0; i
< dd
->num_send_contexts
; i
++) {
6605 sc
= dd
->send_contexts
[i
].sc
;
6606 if (sc
&& (sc
->flags
& SCF_ENABLED
))
6607 sc_stop(sc
, SCF_FROZEN
| SCF_HALTED
);
6610 /* Send context are frozen. Notify user space */
6611 hfi1_set_uevent_bits(ppd
, _HFI1_EVENT_FROZEN_BIT
);
6613 if (flags
& FREEZE_ABORT
) {
6615 "Aborted freeze recovery. Please REBOOT system\n");
6618 /* queue non-interrupt handler */
6619 queue_work(ppd
->hfi1_wq
, &ppd
->freeze_work
);
6623 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6624 * depending on the "freeze" parameter.
6626 * No need to return an error if it times out, our only option
6627 * is to proceed anyway.
6629 static void wait_for_freeze_status(struct hfi1_devdata
*dd
, int freeze
)
6631 unsigned long timeout
;
6634 timeout
= jiffies
+ msecs_to_jiffies(FREEZE_STATUS_TIMEOUT
);
6636 reg
= read_csr(dd
, CCE_STATUS
);
6638 /* waiting until all indicators are set */
6639 if ((reg
& ALL_FROZE
) == ALL_FROZE
)
6640 return; /* all done */
6642 /* waiting until all indicators are clear */
6643 if ((reg
& ALL_FROZE
) == 0)
6644 return; /* all done */
6647 if (time_after(jiffies
, timeout
)) {
6649 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6650 freeze
? "" : "un", reg
& ALL_FROZE
,
6651 freeze
? ALL_FROZE
: 0ull);
6654 usleep_range(80, 120);
6659 * Do all freeze handling for the RXE block.
6661 static void rxe_freeze(struct hfi1_devdata
*dd
)
6666 clear_rcvctrl(dd
, RCV_CTRL_RCV_PORT_ENABLE_SMASK
);
6668 /* disable all receive contexts */
6669 for (i
= 0; i
< dd
->num_rcv_contexts
; i
++)
6670 hfi1_rcvctrl(dd
, HFI1_RCVCTRL_CTXT_DIS
, i
);
6674 * Unfreeze handling for the RXE block - kernel contexts only.
6675 * This will also enable the port. User contexts will do unfreeze
6676 * handling on a per-context basis as they call into the driver.
6679 static void rxe_kernel_unfreeze(struct hfi1_devdata
*dd
)
6684 /* enable all kernel contexts */
6685 for (i
= 0; i
< dd
->n_krcv_queues
; i
++) {
6686 rcvmask
= HFI1_RCVCTRL_CTXT_ENB
;
6687 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6688 rcvmask
|= HFI1_CAP_KGET_MASK(dd
->rcd
[i
]->flags
, DMA_RTAIL
) ?
6689 HFI1_RCVCTRL_TAILUPD_ENB
: HFI1_RCVCTRL_TAILUPD_DIS
;
6690 hfi1_rcvctrl(dd
, rcvmask
, i
);
6694 add_rcvctrl(dd
, RCV_CTRL_RCV_PORT_ENABLE_SMASK
);
6698 * Non-interrupt SPC freeze handling.
6700 * This is a work-queue function outside of the triggering interrupt.
6702 void handle_freeze(struct work_struct
*work
)
6704 struct hfi1_pportdata
*ppd
= container_of(work
, struct hfi1_pportdata
,
6706 struct hfi1_devdata
*dd
= ppd
->dd
;
6708 /* wait for freeze indicators on all affected blocks */
6709 wait_for_freeze_status(dd
, 1);
6711 /* SPC is now frozen */
6713 /* do send PIO freeze steps */
6716 /* do send DMA freeze steps */
6719 /* do send egress freeze steps - nothing to do */
6721 /* do receive freeze steps */
6725 * Unfreeze the hardware - clear the freeze, wait for each
6726 * block's frozen bit to clear, then clear the frozen flag.
6728 write_csr(dd
, CCE_CTRL
, CCE_CTRL_SPC_UNFREEZE_SMASK
);
6729 wait_for_freeze_status(dd
, 0);
6732 write_csr(dd
, CCE_CTRL
, CCE_CTRL_SPC_FREEZE_SMASK
);
6733 wait_for_freeze_status(dd
, 1);
6734 write_csr(dd
, CCE_CTRL
, CCE_CTRL_SPC_UNFREEZE_SMASK
);
6735 wait_for_freeze_status(dd
, 0);
6738 /* do send PIO unfreeze steps for kernel contexts */
6739 pio_kernel_unfreeze(dd
);
6741 /* do send DMA unfreeze steps */
6744 /* do send egress unfreeze steps - nothing to do */
6746 /* do receive unfreeze steps for kernel contexts */
6747 rxe_kernel_unfreeze(dd
);
6750 * The unfreeze procedure touches global device registers when
6751 * it disables and re-enables RXE. Mark the device unfrozen
6752 * after all that is done so other parts of the driver waiting
6753 * for the device to unfreeze don't do things out of order.
6755 * The above implies that the meaning of HFI1_FROZEN flag is
6756 * "Device has gone into freeze mode and freeze mode handling
6757 * is still in progress."
6759 * The flag will be removed when freeze mode processing has
6762 dd
->flags
&= ~HFI1_FROZEN
;
6763 wake_up(&dd
->event_queue
);
6765 /* no longer frozen */
6769 * Handle a link up interrupt from the 8051.
6771 * This is a work-queue function outside of the interrupt.
6773 void handle_link_up(struct work_struct
*work
)
6775 struct hfi1_pportdata
*ppd
= container_of(work
, struct hfi1_pportdata
,
6777 set_link_state(ppd
, HLS_UP_INIT
);
6779 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6780 read_ltp_rtt(ppd
->dd
);
6782 * OPA specifies that certain counters are cleared on a transition
6783 * to link up, so do that.
6785 clear_linkup_counters(ppd
->dd
);
6787 * And (re)set link up default values.
6789 set_linkup_defaults(ppd
);
6791 /* enforce link speed enabled */
6792 if ((ppd
->link_speed_active
& ppd
->link_speed_enabled
) == 0) {
6793 /* oops - current speed is not enabled, bounce */
6795 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6796 ppd
->link_speed_active
, ppd
->link_speed_enabled
);
6797 set_link_down_reason(ppd
, OPA_LINKDOWN_REASON_SPEED_POLICY
, 0,
6798 OPA_LINKDOWN_REASON_SPEED_POLICY
);
6799 set_link_state(ppd
, HLS_DN_OFFLINE
);
6806 * Several pieces of LNI information were cached for SMA in ppd.
6807 * Reset these on link down
6809 static void reset_neighbor_info(struct hfi1_pportdata
*ppd
)
6811 ppd
->neighbor_guid
= 0;
6812 ppd
->neighbor_port_number
= 0;
6813 ppd
->neighbor_type
= 0;
6814 ppd
->neighbor_fm_security
= 0;
6817 static const char * const link_down_reason_strs
[] = {
6818 [OPA_LINKDOWN_REASON_NONE
] = "None",
6819 [OPA_LINKDOWN_REASON_RCV_ERROR_0
] = "Recive error 0",
6820 [OPA_LINKDOWN_REASON_BAD_PKT_LEN
] = "Bad packet length",
6821 [OPA_LINKDOWN_REASON_PKT_TOO_LONG
] = "Packet too long",
6822 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT
] = "Packet too short",
6823 [OPA_LINKDOWN_REASON_BAD_SLID
] = "Bad SLID",
6824 [OPA_LINKDOWN_REASON_BAD_DLID
] = "Bad DLID",
6825 [OPA_LINKDOWN_REASON_BAD_L2
] = "Bad L2",
6826 [OPA_LINKDOWN_REASON_BAD_SC
] = "Bad SC",
6827 [OPA_LINKDOWN_REASON_RCV_ERROR_8
] = "Receive error 8",
6828 [OPA_LINKDOWN_REASON_BAD_MID_TAIL
] = "Bad mid tail",
6829 [OPA_LINKDOWN_REASON_RCV_ERROR_10
] = "Receive error 10",
6830 [OPA_LINKDOWN_REASON_PREEMPT_ERROR
] = "Preempt error",
6831 [OPA_LINKDOWN_REASON_PREEMPT_VL15
] = "Preempt vl15",
6832 [OPA_LINKDOWN_REASON_BAD_VL_MARKER
] = "Bad VL marker",
6833 [OPA_LINKDOWN_REASON_RCV_ERROR_14
] = "Receive error 14",
6834 [OPA_LINKDOWN_REASON_RCV_ERROR_15
] = "Receive error 15",
6835 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST
] = "Bad head distance",
6836 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST
] = "Bad tail distance",
6837 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST
] = "Bad control distance",
6838 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK
] = "Bad credit ack",
6839 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER
] = "Unsupported VL marker",
6840 [OPA_LINKDOWN_REASON_BAD_PREEMPT
] = "Bad preempt",
6841 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT
] = "Bad control flit",
6842 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT
] = "Exceed multicast limit",
6843 [OPA_LINKDOWN_REASON_RCV_ERROR_24
] = "Receive error 24",
6844 [OPA_LINKDOWN_REASON_RCV_ERROR_25
] = "Receive error 25",
6845 [OPA_LINKDOWN_REASON_RCV_ERROR_26
] = "Receive error 26",
6846 [OPA_LINKDOWN_REASON_RCV_ERROR_27
] = "Receive error 27",
6847 [OPA_LINKDOWN_REASON_RCV_ERROR_28
] = "Receive error 28",
6848 [OPA_LINKDOWN_REASON_RCV_ERROR_29
] = "Receive error 29",
6849 [OPA_LINKDOWN_REASON_RCV_ERROR_30
] = "Receive error 30",
6850 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN
] =
6851 "Excessive buffer overrun",
6852 [OPA_LINKDOWN_REASON_UNKNOWN
] = "Unknown",
6853 [OPA_LINKDOWN_REASON_REBOOT
] = "Reboot",
6854 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN
] = "Neighbor unknown",
6855 [OPA_LINKDOWN_REASON_FM_BOUNCE
] = "FM bounce",
6856 [OPA_LINKDOWN_REASON_SPEED_POLICY
] = "Speed policy",
6857 [OPA_LINKDOWN_REASON_WIDTH_POLICY
] = "Width policy",
6858 [OPA_LINKDOWN_REASON_DISCONNECTED
] = "Disconnected",
6859 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED
] =
6860 "Local media not installed",
6861 [OPA_LINKDOWN_REASON_NOT_INSTALLED
] = "Not installed",
6862 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG
] = "Chassis config",
6863 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED
] =
6864 "End to end not installed",
6865 [OPA_LINKDOWN_REASON_POWER_POLICY
] = "Power policy",
6866 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY
] = "Link speed policy",
6867 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY
] = "Link width policy",
6868 [OPA_LINKDOWN_REASON_SWITCH_MGMT
] = "Switch management",
6869 [OPA_LINKDOWN_REASON_SMA_DISABLED
] = "SMA disabled",
6870 [OPA_LINKDOWN_REASON_TRANSIENT
] = "Transient"
6873 /* return the neighbor link down reason string */
6874 static const char *link_down_reason_str(u8 reason
)
6876 const char *str
= NULL
;
6878 if (reason
< ARRAY_SIZE(link_down_reason_strs
))
6879 str
= link_down_reason_strs
[reason
];
6887 * Handle a link down interrupt from the 8051.
6889 * This is a work-queue function outside of the interrupt.
6891 void handle_link_down(struct work_struct
*work
)
6893 u8 lcl_reason
, neigh_reason
= 0;
6894 u8 link_down_reason
;
6895 struct hfi1_pportdata
*ppd
= container_of(work
, struct hfi1_pportdata
,
6898 static const char ldr_str
[] = "Link down reason: ";
6900 if ((ppd
->host_link_state
&
6901 (HLS_DN_POLL
| HLS_VERIFY_CAP
| HLS_GOING_UP
)) &&
6902 ppd
->port_type
== PORT_TYPE_FIXED
)
6903 ppd
->offline_disabled_reason
=
6904 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED
);
6906 /* Go offline first, then deal with reading/writing through 8051 */
6907 was_up
= !!(ppd
->host_link_state
& HLS_UP
);
6908 set_link_state(ppd
, HLS_DN_OFFLINE
);
6912 /* link down reason is only valid if the link was up */
6913 read_link_down_reason(ppd
->dd
, &link_down_reason
);
6914 switch (link_down_reason
) {
6915 case LDR_LINK_TRANSFER_ACTIVE_LOW
:
6916 /* the link went down, no idle message reason */
6917 dd_dev_info(ppd
->dd
, "%sUnexpected link down\n",
6920 case LDR_RECEIVED_LINKDOWN_IDLE_MSG
:
6922 * The neighbor reason is only valid if an idle message
6923 * was received for it.
6925 read_planned_down_reason_code(ppd
->dd
, &neigh_reason
);
6926 dd_dev_info(ppd
->dd
,
6927 "%sNeighbor link down message %d, %s\n",
6928 ldr_str
, neigh_reason
,
6929 link_down_reason_str(neigh_reason
));
6931 case LDR_RECEIVED_HOST_OFFLINE_REQ
:
6932 dd_dev_info(ppd
->dd
,
6933 "%sHost requested link to go offline\n",
6937 dd_dev_info(ppd
->dd
, "%sUnknown reason 0x%x\n",
6938 ldr_str
, link_down_reason
);
6943 * If no reason, assume peer-initiated but missed
6944 * LinkGoingDown idle flits.
6946 if (neigh_reason
== 0)
6947 lcl_reason
= OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN
;
6949 /* went down while polling or going up */
6950 lcl_reason
= OPA_LINKDOWN_REASON_TRANSIENT
;
6953 set_link_down_reason(ppd
, lcl_reason
, neigh_reason
, 0);
6955 /* inform the SMA when the link transitions from up to down */
6956 if (was_up
&& ppd
->local_link_down_reason
.sma
== 0 &&
6957 ppd
->neigh_link_down_reason
.sma
== 0) {
6958 ppd
->local_link_down_reason
.sma
=
6959 ppd
->local_link_down_reason
.latest
;
6960 ppd
->neigh_link_down_reason
.sma
=
6961 ppd
->neigh_link_down_reason
.latest
;
6964 reset_neighbor_info(ppd
);
6965 if (ppd
->mgmt_allowed
)
6966 remove_full_mgmt_pkey(ppd
);
6968 /* disable the port */
6969 clear_rcvctrl(ppd
->dd
, RCV_CTRL_RCV_PORT_ENABLE_SMASK
);
6972 * If there is no cable attached, turn the DC off. Otherwise,
6973 * start the link bring up.
6975 if (ppd
->port_type
== PORT_TYPE_QSFP
&& !qsfp_mod_present(ppd
)) {
6976 dc_shutdown(ppd
->dd
);
6983 void handle_link_bounce(struct work_struct
*work
)
6985 struct hfi1_pportdata
*ppd
= container_of(work
, struct hfi1_pportdata
,
6989 * Only do something if the link is currently up.
6991 if (ppd
->host_link_state
& HLS_UP
) {
6992 set_link_state(ppd
, HLS_DN_OFFLINE
);
6996 dd_dev_info(ppd
->dd
, "%s: link not up (%s), nothing to do\n",
6997 __func__
, link_state_name(ppd
->host_link_state
));
7002 * Mask conversion: Capability exchange to Port LTP. The capability
7003 * exchange has an implicit 16b CRC that is mandatory.
7005 static int cap_to_port_ltp(int cap
)
7007 int port_ltp
= PORT_LTP_CRC_MODE_16
; /* this mode is mandatory */
7009 if (cap
& CAP_CRC_14B
)
7010 port_ltp
|= PORT_LTP_CRC_MODE_14
;
7011 if (cap
& CAP_CRC_48B
)
7012 port_ltp
|= PORT_LTP_CRC_MODE_48
;
7013 if (cap
& CAP_CRC_12B_16B_PER_LANE
)
7014 port_ltp
|= PORT_LTP_CRC_MODE_PER_LANE
;
7020 * Convert an OPA Port LTP mask to capability mask
7022 int port_ltp_to_cap(int port_ltp
)
7026 if (port_ltp
& PORT_LTP_CRC_MODE_14
)
7027 cap_mask
|= CAP_CRC_14B
;
7028 if (port_ltp
& PORT_LTP_CRC_MODE_48
)
7029 cap_mask
|= CAP_CRC_48B
;
7030 if (port_ltp
& PORT_LTP_CRC_MODE_PER_LANE
)
7031 cap_mask
|= CAP_CRC_12B_16B_PER_LANE
;
7037 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7039 static int lcb_to_port_ltp(int lcb_crc
)
7043 if (lcb_crc
== LCB_CRC_12B_16B_PER_LANE
)
7044 port_ltp
= PORT_LTP_CRC_MODE_PER_LANE
;
7045 else if (lcb_crc
== LCB_CRC_48B
)
7046 port_ltp
= PORT_LTP_CRC_MODE_48
;
7047 else if (lcb_crc
== LCB_CRC_14B
)
7048 port_ltp
= PORT_LTP_CRC_MODE_14
;
7050 port_ltp
= PORT_LTP_CRC_MODE_16
;
7056 * Our neighbor has indicated that we are allowed to act as a fabric
7057 * manager, so place the full management partition key in the second
7058 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7059 * that we should already have the limited management partition key in
7060 * array element 1, and also that the port is not yet up when
7061 * add_full_mgmt_pkey() is invoked.
7063 static void add_full_mgmt_pkey(struct hfi1_pportdata
*ppd
)
7065 struct hfi1_devdata
*dd
= ppd
->dd
;
7067 /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
7068 if (!((ppd
->pkeys
[2] == 0) || (ppd
->pkeys
[2] == FULL_MGMT_P_KEY
)))
7069 dd_dev_warn(dd
, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7070 __func__
, ppd
->pkeys
[2], FULL_MGMT_P_KEY
);
7071 ppd
->pkeys
[2] = FULL_MGMT_P_KEY
;
7072 (void)hfi1_set_ib_cfg(ppd
, HFI1_IB_CFG_PKEYS
, 0);
7075 static void remove_full_mgmt_pkey(struct hfi1_pportdata
*ppd
)
7078 (void)hfi1_set_ib_cfg(ppd
, HFI1_IB_CFG_PKEYS
, 0);
7082 * Convert the given link width to the OPA link width bitmask.
7084 static u16
link_width_to_bits(struct hfi1_devdata
*dd
, u16 width
)
7089 * Simulator and quick linkup do not set the width.
7090 * Just set it to 4x without complaint.
7092 if (dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
|| quick_linkup
)
7093 return OPA_LINK_WIDTH_4X
;
7094 return 0; /* no lanes up */
7095 case 1: return OPA_LINK_WIDTH_1X
;
7096 case 2: return OPA_LINK_WIDTH_2X
;
7097 case 3: return OPA_LINK_WIDTH_3X
;
7099 dd_dev_info(dd
, "%s: invalid width %d, using 4\n",
7102 case 4: return OPA_LINK_WIDTH_4X
;
7107 * Do a population count on the bottom nibble.
7109 static const u8 bit_counts
[16] = {
7110 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7113 static inline u8
nibble_to_count(u8 nibble
)
7115 return bit_counts
[nibble
& 0xf];
7119 * Read the active lane information from the 8051 registers and return
7122 * Active lane information is found in these 8051 registers:
7126 static void get_link_widths(struct hfi1_devdata
*dd
, u16
*tx_width
,
7132 u8 tx_polarity_inversion
;
7133 u8 rx_polarity_inversion
;
7136 /* read the active lanes */
7137 read_tx_settings(dd
, &enable_lane_tx
, &tx_polarity_inversion
,
7138 &rx_polarity_inversion
, &max_rate
);
7139 read_local_lni(dd
, &enable_lane_rx
);
7141 /* convert to counts */
7142 tx
= nibble_to_count(enable_lane_tx
);
7143 rx
= nibble_to_count(enable_lane_rx
);
7146 * Set link_speed_active here, overriding what was set in
7147 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7148 * set the max_rate field in handle_verify_cap until v0.19.
7150 if ((dd
->icode
== ICODE_RTL_SILICON
) &&
7151 (dd
->dc8051_ver
< dc8051_ver(0, 19))) {
7152 /* max_rate: 0 = 12.5G, 1 = 25G */
7155 dd
->pport
[0].link_speed_active
= OPA_LINK_SPEED_12_5G
;
7159 "%s: unexpected max rate %d, using 25Gb\n",
7160 __func__
, (int)max_rate
);
7163 dd
->pport
[0].link_speed_active
= OPA_LINK_SPEED_25G
;
7169 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7170 enable_lane_tx
, tx
, enable_lane_rx
, rx
);
7171 *tx_width
= link_width_to_bits(dd
, tx
);
7172 *rx_width
= link_width_to_bits(dd
, rx
);
7176 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7177 * Valid after the end of VerifyCap and during LinkUp. Does not change
7178 * after link up. I.e. look elsewhere for downgrade information.
7181 * + bits [7:4] contain the number of active transmitters
7182 * + bits [3:0] contain the number of active receivers
7183 * These are numbers 1 through 4 and can be different values if the
7184 * link is asymmetric.
7186 * verify_cap_local_fm_link_width[0] retains its original value.
7188 static void get_linkup_widths(struct hfi1_devdata
*dd
, u16
*tx_width
,
7192 u8 misc_bits
, local_flags
;
7193 u16 active_tx
, active_rx
;
7195 read_vc_local_link_width(dd
, &misc_bits
, &local_flags
, &widths
);
7197 rx
= (widths
>> 8) & 0xf;
7199 *tx_width
= link_width_to_bits(dd
, tx
);
7200 *rx_width
= link_width_to_bits(dd
, rx
);
7202 /* print the active widths */
7203 get_link_widths(dd
, &active_tx
, &active_rx
);
7207 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7208 * hardware information when the link first comes up.
7210 * The link width is not available until after VerifyCap.AllFramesReceived
7211 * (the trigger for handle_verify_cap), so this is outside that routine
7212 * and should be called when the 8051 signals linkup.
7214 void get_linkup_link_widths(struct hfi1_pportdata
*ppd
)
7216 u16 tx_width
, rx_width
;
7218 /* get end-of-LNI link widths */
7219 get_linkup_widths(ppd
->dd
, &tx_width
, &rx_width
);
7221 /* use tx_width as the link is supposed to be symmetric on link up */
7222 ppd
->link_width_active
= tx_width
;
7223 /* link width downgrade active (LWD.A) starts out matching LW.A */
7224 ppd
->link_width_downgrade_tx_active
= ppd
->link_width_active
;
7225 ppd
->link_width_downgrade_rx_active
= ppd
->link_width_active
;
7226 /* per OPA spec, on link up LWD.E resets to LWD.S */
7227 ppd
->link_width_downgrade_enabled
= ppd
->link_width_downgrade_supported
;
7228 /* cache the active egress rate (units {10^6 bits/sec]) */
7229 ppd
->current_egress_rate
= active_egress_rate(ppd
);
7233 * Handle a verify capabilities interrupt from the 8051.
7235 * This is a work-queue function outside of the interrupt.
7237 void handle_verify_cap(struct work_struct
*work
)
7239 struct hfi1_pportdata
*ppd
= container_of(work
, struct hfi1_pportdata
,
7241 struct hfi1_devdata
*dd
= ppd
->dd
;
7243 u8 power_management
;
7253 u16 active_tx
, active_rx
;
7254 u8 partner_supported_crc
;
7258 set_link_state(ppd
, HLS_VERIFY_CAP
);
7260 lcb_shutdown(dd
, 0);
7261 adjust_lcb_for_fpga_serdes(dd
);
7264 * These are now valid:
7265 * remote VerifyCap fields in the general LNI config
7266 * CSR DC8051_STS_REMOTE_GUID
7267 * CSR DC8051_STS_REMOTE_NODE_TYPE
7268 * CSR DC8051_STS_REMOTE_FM_SECURITY
7269 * CSR DC8051_STS_REMOTE_PORT_NO
7272 read_vc_remote_phy(dd
, &power_management
, &continious
);
7273 read_vc_remote_fabric(dd
, &vau
, &z
, &vcu
, &vl15buf
,
7274 &partner_supported_crc
);
7275 read_vc_remote_link_width(dd
, &remote_tx_rate
, &link_widths
);
7276 read_remote_device_id(dd
, &device_id
, &device_rev
);
7278 * And the 'MgmtAllowed' information, which is exchanged during
7279 * LNI, is also be available at this point.
7281 read_mgmt_allowed(dd
, &ppd
->mgmt_allowed
);
7282 /* print the active widths */
7283 get_link_widths(dd
, &active_tx
, &active_rx
);
7285 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7286 (int)power_management
, (int)continious
);
7288 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7289 (int)vau
, (int)z
, (int)vcu
, (int)vl15buf
,
7290 (int)partner_supported_crc
);
7291 dd_dev_info(dd
, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7292 (u32
)remote_tx_rate
, (u32
)link_widths
);
7293 dd_dev_info(dd
, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7294 (u32
)device_id
, (u32
)device_rev
);
7296 * The peer vAU value just read is the peer receiver value. HFI does
7297 * not support a transmit vAU of 0 (AU == 8). We advertised that
7298 * with Z=1 in the fabric capabilities sent to the peer. The peer
7299 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7300 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7301 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7302 * subject to the Z value exception.
7306 set_up_vl15(dd
, vau
, vl15buf
);
7308 /* set up the LCB CRC mode */
7309 crc_mask
= ppd
->port_crc_mode_enabled
& partner_supported_crc
;
7311 /* order is important: use the lowest bit in common */
7312 if (crc_mask
& CAP_CRC_14B
)
7313 crc_val
= LCB_CRC_14B
;
7314 else if (crc_mask
& CAP_CRC_48B
)
7315 crc_val
= LCB_CRC_48B
;
7316 else if (crc_mask
& CAP_CRC_12B_16B_PER_LANE
)
7317 crc_val
= LCB_CRC_12B_16B_PER_LANE
;
7319 crc_val
= LCB_CRC_16B
;
7321 dd_dev_info(dd
, "Final LCB CRC mode: %d\n", (int)crc_val
);
7322 write_csr(dd
, DC_LCB_CFG_CRC_MODE
,
7323 (u64
)crc_val
<< DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT
);
7325 /* set (14b only) or clear sideband credit */
7326 reg
= read_csr(dd
, SEND_CM_CTRL
);
7327 if (crc_val
== LCB_CRC_14B
&& crc_14b_sideband
) {
7328 write_csr(dd
, SEND_CM_CTRL
,
7329 reg
| SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK
);
7331 write_csr(dd
, SEND_CM_CTRL
,
7332 reg
& ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK
);
7335 ppd
->link_speed_active
= 0; /* invalid value */
7336 if (dd
->dc8051_ver
< dc8051_ver(0, 20)) {
7337 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7338 switch (remote_tx_rate
) {
7340 ppd
->link_speed_active
= OPA_LINK_SPEED_12_5G
;
7343 ppd
->link_speed_active
= OPA_LINK_SPEED_25G
;
7347 /* actual rate is highest bit of the ANDed rates */
7348 u8 rate
= remote_tx_rate
& ppd
->local_tx_rate
;
7351 ppd
->link_speed_active
= OPA_LINK_SPEED_25G
;
7353 ppd
->link_speed_active
= OPA_LINK_SPEED_12_5G
;
7355 if (ppd
->link_speed_active
== 0) {
7356 dd_dev_err(dd
, "%s: unexpected remote tx rate %d, using 25Gb\n",
7357 __func__
, (int)remote_tx_rate
);
7358 ppd
->link_speed_active
= OPA_LINK_SPEED_25G
;
7362 * Cache the values of the supported, enabled, and active
7363 * LTP CRC modes to return in 'portinfo' queries. But the bit
7364 * flags that are returned in the portinfo query differ from
7365 * what's in the link_crc_mask, crc_sizes, and crc_val
7366 * variables. Convert these here.
7368 ppd
->port_ltp_crc_mode
= cap_to_port_ltp(link_crc_mask
) << 8;
7369 /* supported crc modes */
7370 ppd
->port_ltp_crc_mode
|=
7371 cap_to_port_ltp(ppd
->port_crc_mode_enabled
) << 4;
7372 /* enabled crc modes */
7373 ppd
->port_ltp_crc_mode
|= lcb_to_port_ltp(crc_val
);
7374 /* active crc mode */
7376 /* set up the remote credit return table */
7377 assign_remote_cm_au_table(dd
, vcu
);
7380 * The LCB is reset on entry to handle_verify_cap(), so this must
7381 * be applied on every link up.
7383 * Adjust LCB error kill enable to kill the link if
7384 * these RBUF errors are seen:
7385 * REPLAY_BUF_MBE_SMASK
7386 * FLIT_INPUT_BUF_MBE_SMASK
7388 if (is_ax(dd
)) { /* fixed in B0 */
7389 reg
= read_csr(dd
, DC_LCB_CFG_LINK_KILL_EN
);
7390 reg
|= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7391 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK
;
7392 write_csr(dd
, DC_LCB_CFG_LINK_KILL_EN
, reg
);
7395 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7396 write_csr(dd
, DC_LCB_CFG_TX_FIFOS_RESET
, 0);
7398 /* give 8051 access to the LCB CSRs */
7399 write_csr(dd
, DC_LCB_ERR_EN
, 0); /* mask LCB errors */
7400 set_8051_lcb_access(dd
);
7402 ppd
->neighbor_guid
=
7403 read_csr(dd
, DC_DC8051_STS_REMOTE_GUID
);
7404 ppd
->neighbor_port_number
= read_csr(dd
, DC_DC8051_STS_REMOTE_PORT_NO
) &
7405 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK
;
7406 ppd
->neighbor_type
=
7407 read_csr(dd
, DC_DC8051_STS_REMOTE_NODE_TYPE
) &
7408 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK
;
7409 ppd
->neighbor_fm_security
=
7410 read_csr(dd
, DC_DC8051_STS_REMOTE_FM_SECURITY
) &
7411 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK
;
7413 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7414 ppd
->neighbor_guid
, ppd
->neighbor_type
,
7415 ppd
->mgmt_allowed
, ppd
->neighbor_fm_security
);
7416 if (ppd
->mgmt_allowed
)
7417 add_full_mgmt_pkey(ppd
);
7419 /* tell the 8051 to go to LinkUp */
7420 set_link_state(ppd
, HLS_GOING_UP
);
7424 * Apply the link width downgrade enabled policy against the current active
7427 * Called when the enabled policy changes or the active link widths change.
7429 void apply_link_downgrade_policy(struct hfi1_pportdata
*ppd
, int refresh_widths
)
7436 /* use the hls lock to avoid a race with actual link up */
7439 mutex_lock(&ppd
->hls_lock
);
7440 /* only apply if the link is up */
7441 if (ppd
->host_link_state
& HLS_DOWN
) {
7442 /* still going up..wait and retry */
7443 if (ppd
->host_link_state
& HLS_GOING_UP
) {
7444 if (++tries
< 1000) {
7445 mutex_unlock(&ppd
->hls_lock
);
7446 usleep_range(100, 120); /* arbitrary */
7450 "%s: giving up waiting for link state change\n",
7456 lwde
= ppd
->link_width_downgrade_enabled
;
7458 if (refresh_widths
) {
7459 get_link_widths(ppd
->dd
, &tx
, &rx
);
7460 ppd
->link_width_downgrade_tx_active
= tx
;
7461 ppd
->link_width_downgrade_rx_active
= rx
;
7464 if (ppd
->link_width_downgrade_tx_active
== 0 ||
7465 ppd
->link_width_downgrade_rx_active
== 0) {
7466 /* the 8051 reported a dead link as a downgrade */
7467 dd_dev_err(ppd
->dd
, "Link downgrade is really a link down, ignoring\n");
7468 } else if (lwde
== 0) {
7469 /* downgrade is disabled */
7471 /* bounce if not at starting active width */
7472 if ((ppd
->link_width_active
!=
7473 ppd
->link_width_downgrade_tx_active
) ||
7474 (ppd
->link_width_active
!=
7475 ppd
->link_width_downgrade_rx_active
)) {
7477 "Link downgrade is disabled and link has downgraded, downing link\n");
7479 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7480 ppd
->link_width_active
,
7481 ppd
->link_width_downgrade_tx_active
,
7482 ppd
->link_width_downgrade_rx_active
);
7485 } else if ((lwde
& ppd
->link_width_downgrade_tx_active
) == 0 ||
7486 (lwde
& ppd
->link_width_downgrade_rx_active
) == 0) {
7487 /* Tx or Rx is outside the enabled policy */
7489 "Link is outside of downgrade allowed, downing link\n");
7491 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7492 lwde
, ppd
->link_width_downgrade_tx_active
,
7493 ppd
->link_width_downgrade_rx_active
);
7498 mutex_unlock(&ppd
->hls_lock
);
7501 set_link_down_reason(ppd
, OPA_LINKDOWN_REASON_WIDTH_POLICY
, 0,
7502 OPA_LINKDOWN_REASON_WIDTH_POLICY
);
7503 set_link_state(ppd
, HLS_DN_OFFLINE
);
7510 * Handle a link downgrade interrupt from the 8051.
7512 * This is a work-queue function outside of the interrupt.
7514 void handle_link_downgrade(struct work_struct
*work
)
7516 struct hfi1_pportdata
*ppd
= container_of(work
, struct hfi1_pportdata
,
7517 link_downgrade_work
);
7519 dd_dev_info(ppd
->dd
, "8051: Link width downgrade\n");
7520 apply_link_downgrade_policy(ppd
, 1);
7523 static char *dcc_err_string(char *buf
, int buf_len
, u64 flags
)
7525 return flag_string(buf
, buf_len
, flags
, dcc_err_flags
,
7526 ARRAY_SIZE(dcc_err_flags
));
7529 static char *lcb_err_string(char *buf
, int buf_len
, u64 flags
)
7531 return flag_string(buf
, buf_len
, flags
, lcb_err_flags
,
7532 ARRAY_SIZE(lcb_err_flags
));
7535 static char *dc8051_err_string(char *buf
, int buf_len
, u64 flags
)
7537 return flag_string(buf
, buf_len
, flags
, dc8051_err_flags
,
7538 ARRAY_SIZE(dc8051_err_flags
));
7541 static char *dc8051_info_err_string(char *buf
, int buf_len
, u64 flags
)
7543 return flag_string(buf
, buf_len
, flags
, dc8051_info_err_flags
,
7544 ARRAY_SIZE(dc8051_info_err_flags
));
7547 static char *dc8051_info_host_msg_string(char *buf
, int buf_len
, u64 flags
)
7549 return flag_string(buf
, buf_len
, flags
, dc8051_info_host_msg_flags
,
7550 ARRAY_SIZE(dc8051_info_host_msg_flags
));
7553 static void handle_8051_interrupt(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
7555 struct hfi1_pportdata
*ppd
= dd
->pport
;
7556 u64 info
, err
, host_msg
;
7557 int queue_link_down
= 0;
7560 /* look at the flags */
7561 if (reg
& DC_DC8051_ERR_FLG_SET_BY_8051_SMASK
) {
7562 /* 8051 information set by firmware */
7563 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7564 info
= read_csr(dd
, DC_DC8051_DBG_ERR_INFO_SET_BY_8051
);
7565 err
= (info
>> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT
)
7566 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK
;
7568 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT
)
7569 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK
;
7572 * Handle error flags.
7574 if (err
& FAILED_LNI
) {
7576 * LNI error indications are cleared by the 8051
7577 * only when starting polling. Only pay attention
7578 * to them when in the states that occur during
7581 if (ppd
->host_link_state
7582 & (HLS_DN_POLL
| HLS_VERIFY_CAP
| HLS_GOING_UP
)) {
7583 queue_link_down
= 1;
7584 dd_dev_info(dd
, "Link error: %s\n",
7585 dc8051_info_err_string(buf
,
7590 err
&= ~(u64
)FAILED_LNI
;
7592 /* unknown frames can happen durning LNI, just count */
7593 if (err
& UNKNOWN_FRAME
) {
7594 ppd
->unknown_frame_count
++;
7595 err
&= ~(u64
)UNKNOWN_FRAME
;
7598 /* report remaining errors, but do not do anything */
7599 dd_dev_err(dd
, "8051 info error: %s\n",
7600 dc8051_info_err_string(buf
, sizeof(buf
),
7605 * Handle host message flags.
7607 if (host_msg
& HOST_REQ_DONE
) {
7609 * Presently, the driver does a busy wait for
7610 * host requests to complete. This is only an
7611 * informational message.
7612 * NOTE: The 8051 clears the host message
7613 * information *on the next 8051 command*.
7614 * Therefore, when linkup is achieved,
7615 * this flag will still be set.
7617 host_msg
&= ~(u64
)HOST_REQ_DONE
;
7619 if (host_msg
& BC_SMA_MSG
) {
7620 queue_work(ppd
->hfi1_wq
, &ppd
->sma_message_work
);
7621 host_msg
&= ~(u64
)BC_SMA_MSG
;
7623 if (host_msg
& LINKUP_ACHIEVED
) {
7624 dd_dev_info(dd
, "8051: Link up\n");
7625 queue_work(ppd
->hfi1_wq
, &ppd
->link_up_work
);
7626 host_msg
&= ~(u64
)LINKUP_ACHIEVED
;
7628 if (host_msg
& EXT_DEVICE_CFG_REQ
) {
7629 handle_8051_request(ppd
);
7630 host_msg
&= ~(u64
)EXT_DEVICE_CFG_REQ
;
7632 if (host_msg
& VERIFY_CAP_FRAME
) {
7633 queue_work(ppd
->hfi1_wq
, &ppd
->link_vc_work
);
7634 host_msg
&= ~(u64
)VERIFY_CAP_FRAME
;
7636 if (host_msg
& LINK_GOING_DOWN
) {
7637 const char *extra
= "";
7638 /* no downgrade action needed if going down */
7639 if (host_msg
& LINK_WIDTH_DOWNGRADED
) {
7640 host_msg
&= ~(u64
)LINK_WIDTH_DOWNGRADED
;
7641 extra
= " (ignoring downgrade)";
7643 dd_dev_info(dd
, "8051: Link down%s\n", extra
);
7644 queue_link_down
= 1;
7645 host_msg
&= ~(u64
)LINK_GOING_DOWN
;
7647 if (host_msg
& LINK_WIDTH_DOWNGRADED
) {
7648 queue_work(ppd
->hfi1_wq
, &ppd
->link_downgrade_work
);
7649 host_msg
&= ~(u64
)LINK_WIDTH_DOWNGRADED
;
7652 /* report remaining messages, but do not do anything */
7653 dd_dev_info(dd
, "8051 info host message: %s\n",
7654 dc8051_info_host_msg_string(buf
,
7659 reg
&= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK
;
7661 if (reg
& DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK
) {
7663 * Lost the 8051 heartbeat. If this happens, we
7664 * receive constant interrupts about it. Disable
7665 * the interrupt after the first.
7667 dd_dev_err(dd
, "Lost 8051 heartbeat\n");
7668 write_csr(dd
, DC_DC8051_ERR_EN
,
7669 read_csr(dd
, DC_DC8051_ERR_EN
) &
7670 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK
);
7672 reg
&= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK
;
7675 /* report the error, but do not do anything */
7676 dd_dev_err(dd
, "8051 error: %s\n",
7677 dc8051_err_string(buf
, sizeof(buf
), reg
));
7680 if (queue_link_down
) {
7682 * if the link is already going down or disabled, do not
7685 if ((ppd
->host_link_state
&
7686 (HLS_GOING_OFFLINE
| HLS_LINK_COOLDOWN
)) ||
7687 ppd
->link_enabled
== 0) {
7688 dd_dev_info(dd
, "%s: not queuing link down\n",
7691 queue_work(ppd
->hfi1_wq
, &ppd
->link_down_work
);
7696 static const char * const fm_config_txt
[] = {
7698 "BadHeadDist: Distance violation between two head flits",
7700 "BadTailDist: Distance violation between two tail flits",
7702 "BadCtrlDist: Distance violation between two credit control flits",
7704 "BadCrdAck: Credits return for unsupported VL",
7706 "UnsupportedVLMarker: Received VL Marker",
7708 "BadPreempt: Exceeded the preemption nesting level",
7710 "BadControlFlit: Received unsupported control flit",
7713 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7716 static const char * const port_rcv_txt
[] = {
7718 "BadPktLen: Illegal PktLen",
7720 "PktLenTooLong: Packet longer than PktLen",
7722 "PktLenTooShort: Packet shorter than PktLen",
7724 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7726 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7728 "BadL2: Illegal L2 opcode",
7730 "BadSC: Unsupported SC",
7732 "BadRC: Illegal RC",
7734 "PreemptError: Preempting with same VL",
7736 "PreemptVL15: Preempting a VL15 packet",
7739 #define OPA_LDR_FMCONFIG_OFFSET 16
7740 #define OPA_LDR_PORTRCV_OFFSET 0
7741 static void handle_dcc_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
7743 u64 info
, hdr0
, hdr1
;
7746 struct hfi1_pportdata
*ppd
= dd
->pport
;
7750 if (reg
& DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK
) {
7751 if (!(dd
->err_info_uncorrectable
& OPA_EI_STATUS_SMASK
)) {
7752 info
= read_csr(dd
, DCC_ERR_INFO_UNCORRECTABLE
);
7753 dd
->err_info_uncorrectable
= info
& OPA_EI_CODE_SMASK
;
7754 /* set status bit */
7755 dd
->err_info_uncorrectable
|= OPA_EI_STATUS_SMASK
;
7757 reg
&= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK
;
7760 if (reg
& DCC_ERR_FLG_LINK_ERR_SMASK
) {
7761 struct hfi1_pportdata
*ppd
= dd
->pport
;
7762 /* this counter saturates at (2^32) - 1 */
7763 if (ppd
->link_downed
< (u32
)UINT_MAX
)
7765 reg
&= ~DCC_ERR_FLG_LINK_ERR_SMASK
;
7768 if (reg
& DCC_ERR_FLG_FMCONFIG_ERR_SMASK
) {
7769 u8 reason_valid
= 1;
7771 info
= read_csr(dd
, DCC_ERR_INFO_FMCONFIG
);
7772 if (!(dd
->err_info_fmconfig
& OPA_EI_STATUS_SMASK
)) {
7773 dd
->err_info_fmconfig
= info
& OPA_EI_CODE_SMASK
;
7774 /* set status bit */
7775 dd
->err_info_fmconfig
|= OPA_EI_STATUS_SMASK
;
7785 extra
= fm_config_txt
[info
];
7788 extra
= fm_config_txt
[info
];
7789 if (ppd
->port_error_action
&
7790 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER
) {
7793 * lcl_reason cannot be derived from info
7797 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER
;
7802 snprintf(buf
, sizeof(buf
), "reserved%lld", info
);
7807 if (reason_valid
&& !do_bounce
) {
7808 do_bounce
= ppd
->port_error_action
&
7809 (1 << (OPA_LDR_FMCONFIG_OFFSET
+ info
));
7810 lcl_reason
= info
+ OPA_LINKDOWN_REASON_BAD_HEAD_DIST
;
7813 /* just report this */
7814 dd_dev_info(dd
, "DCC Error: fmconfig error: %s\n", extra
);
7815 reg
&= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK
;
7818 if (reg
& DCC_ERR_FLG_RCVPORT_ERR_SMASK
) {
7819 u8 reason_valid
= 1;
7821 info
= read_csr(dd
, DCC_ERR_INFO_PORTRCV
);
7822 hdr0
= read_csr(dd
, DCC_ERR_INFO_PORTRCV_HDR0
);
7823 hdr1
= read_csr(dd
, DCC_ERR_INFO_PORTRCV_HDR1
);
7824 if (!(dd
->err_info_rcvport
.status_and_code
&
7825 OPA_EI_STATUS_SMASK
)) {
7826 dd
->err_info_rcvport
.status_and_code
=
7827 info
& OPA_EI_CODE_SMASK
;
7828 /* set status bit */
7829 dd
->err_info_rcvport
.status_and_code
|=
7830 OPA_EI_STATUS_SMASK
;
7832 * save first 2 flits in the packet that caused
7835 dd
->err_info_rcvport
.packet_flit1
= hdr0
;
7836 dd
->err_info_rcvport
.packet_flit2
= hdr1
;
7849 extra
= port_rcv_txt
[info
];
7853 snprintf(buf
, sizeof(buf
), "reserved%lld", info
);
7858 if (reason_valid
&& !do_bounce
) {
7859 do_bounce
= ppd
->port_error_action
&
7860 (1 << (OPA_LDR_PORTRCV_OFFSET
+ info
));
7861 lcl_reason
= info
+ OPA_LINKDOWN_REASON_RCV_ERROR_0
;
7864 /* just report this */
7865 dd_dev_info(dd
, "DCC Error: PortRcv error: %s\n", extra
);
7866 dd_dev_info(dd
, " hdr0 0x%llx, hdr1 0x%llx\n",
7869 reg
&= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK
;
7872 if (reg
& DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK
) {
7873 /* informative only */
7874 dd_dev_info(dd
, "8051 access to LCB blocked\n");
7875 reg
&= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK
;
7877 if (reg
& DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK
) {
7878 /* informative only */
7879 dd_dev_info(dd
, "host access to LCB blocked\n");
7880 reg
&= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK
;
7883 /* report any remaining errors */
7885 dd_dev_info(dd
, "DCC Error: %s\n",
7886 dcc_err_string(buf
, sizeof(buf
), reg
));
7888 if (lcl_reason
== 0)
7889 lcl_reason
= OPA_LINKDOWN_REASON_UNKNOWN
;
7892 dd_dev_info(dd
, "%s: PortErrorAction bounce\n", __func__
);
7893 set_link_down_reason(ppd
, lcl_reason
, 0, lcl_reason
);
7894 queue_work(ppd
->hfi1_wq
, &ppd
->link_bounce_work
);
7898 static void handle_lcb_err(struct hfi1_devdata
*dd
, u32 unused
, u64 reg
)
7902 dd_dev_info(dd
, "LCB Error: %s\n",
7903 lcb_err_string(buf
, sizeof(buf
), reg
));
7907 * CCE block DC interrupt. Source is < 8.
7909 static void is_dc_int(struct hfi1_devdata
*dd
, unsigned int source
)
7911 const struct err_reg_info
*eri
= &dc_errs
[source
];
7914 interrupt_clear_down(dd
, 0, eri
);
7915 } else if (source
== 3 /* dc_lbm_int */) {
7917 * This indicates that a parity error has occurred on the
7918 * address/control lines presented to the LBM. The error
7919 * is a single pulse, there is no associated error flag,
7920 * and it is non-maskable. This is because if a parity
7921 * error occurs on the request the request is dropped.
7922 * This should never occur, but it is nice to know if it
7925 dd_dev_err(dd
, "Parity error in DC LBM block\n");
7927 dd_dev_err(dd
, "Invalid DC interrupt %u\n", source
);
7932 * TX block send credit interrupt. Source is < 160.
7934 static void is_send_credit_int(struct hfi1_devdata
*dd
, unsigned int source
)
7936 sc_group_release_update(dd
, source
);
7940 * TX block SDMA interrupt. Source is < 48.
7942 * SDMA interrupts are grouped by type:
7945 * N - 2N-1 = SDmaProgress
7946 * 2N - 3N-1 = SDmaIdle
7948 static void is_sdma_eng_int(struct hfi1_devdata
*dd
, unsigned int source
)
7950 /* what interrupt */
7951 unsigned int what
= source
/ TXE_NUM_SDMA_ENGINES
;
7953 unsigned int which
= source
% TXE_NUM_SDMA_ENGINES
;
7955 #ifdef CONFIG_SDMA_VERBOSITY
7956 dd_dev_err(dd
, "CONFIG SDMA(%u) %s:%d %s()\n", which
,
7957 slashstrip(__FILE__
), __LINE__
, __func__
);
7958 sdma_dumpstate(&dd
->per_sdma
[which
]);
7961 if (likely(what
< 3 && which
< dd
->num_sdma
)) {
7962 sdma_engine_interrupt(&dd
->per_sdma
[which
], 1ull << source
);
7964 /* should not happen */
7965 dd_dev_err(dd
, "Invalid SDMA interrupt 0x%x\n", source
);
7970 * RX block receive available interrupt. Source is < 160.
7972 static void is_rcv_avail_int(struct hfi1_devdata
*dd
, unsigned int source
)
7974 struct hfi1_ctxtdata
*rcd
;
7977 if (likely(source
< dd
->num_rcv_contexts
)) {
7978 rcd
= dd
->rcd
[source
];
7980 if (source
< dd
->first_user_ctxt
)
7981 rcd
->do_interrupt(rcd
, 0);
7983 handle_user_interrupt(rcd
);
7986 /* received an interrupt, but no rcd */
7987 err_detail
= "dataless";
7989 /* received an interrupt, but are not using that context */
7990 err_detail
= "out of range";
7992 dd_dev_err(dd
, "unexpected %s receive available context interrupt %u\n",
7993 err_detail
, source
);
7997 * RX block receive urgent interrupt. Source is < 160.
7999 static void is_rcv_urgent_int(struct hfi1_devdata
*dd
, unsigned int source
)
8001 struct hfi1_ctxtdata
*rcd
;
8004 if (likely(source
< dd
->num_rcv_contexts
)) {
8005 rcd
= dd
->rcd
[source
];
8007 /* only pay attention to user urgent interrupts */
8008 if (source
>= dd
->first_user_ctxt
)
8009 handle_user_interrupt(rcd
);
8012 /* received an interrupt, but no rcd */
8013 err_detail
= "dataless";
8015 /* received an interrupt, but are not using that context */
8016 err_detail
= "out of range";
8018 dd_dev_err(dd
, "unexpected %s receive urgent context interrupt %u\n",
8019 err_detail
, source
);
8023 * Reserved range interrupt. Should not be called in normal operation.
8025 static void is_reserved_int(struct hfi1_devdata
*dd
, unsigned int source
)
8029 dd_dev_err(dd
, "unexpected %s interrupt\n",
8030 is_reserved_name(name
, sizeof(name
), source
));
8033 static const struct is_table is_table
[] = {
8036 * name func interrupt func
8038 { IS_GENERAL_ERR_START
, IS_GENERAL_ERR_END
,
8039 is_misc_err_name
, is_misc_err_int
},
8040 { IS_SDMAENG_ERR_START
, IS_SDMAENG_ERR_END
,
8041 is_sdma_eng_err_name
, is_sdma_eng_err_int
},
8042 { IS_SENDCTXT_ERR_START
, IS_SENDCTXT_ERR_END
,
8043 is_sendctxt_err_name
, is_sendctxt_err_int
},
8044 { IS_SDMA_START
, IS_SDMA_END
,
8045 is_sdma_eng_name
, is_sdma_eng_int
},
8046 { IS_VARIOUS_START
, IS_VARIOUS_END
,
8047 is_various_name
, is_various_int
},
8048 { IS_DC_START
, IS_DC_END
,
8049 is_dc_name
, is_dc_int
},
8050 { IS_RCVAVAIL_START
, IS_RCVAVAIL_END
,
8051 is_rcv_avail_name
, is_rcv_avail_int
},
8052 { IS_RCVURGENT_START
, IS_RCVURGENT_END
,
8053 is_rcv_urgent_name
, is_rcv_urgent_int
},
8054 { IS_SENDCREDIT_START
, IS_SENDCREDIT_END
,
8055 is_send_credit_name
, is_send_credit_int
},
8056 { IS_RESERVED_START
, IS_RESERVED_END
,
8057 is_reserved_name
, is_reserved_int
},
8061 * Interrupt source interrupt - called when the given source has an interrupt.
8062 * Source is a bit index into an array of 64-bit integers.
8064 static void is_interrupt(struct hfi1_devdata
*dd
, unsigned int source
)
8066 const struct is_table
*entry
;
8068 /* avoids a double compare by walking the table in-order */
8069 for (entry
= &is_table
[0]; entry
->is_name
; entry
++) {
8070 if (source
< entry
->end
) {
8071 trace_hfi1_interrupt(dd
, entry
, source
);
8072 entry
->is_int(dd
, source
- entry
->start
);
8076 /* fell off the end */
8077 dd_dev_err(dd
, "invalid interrupt source %u\n", source
);
8081 * General interrupt handler. This is able to correctly handle
8082 * all interrupts in case INTx is used.
8084 static irqreturn_t
general_interrupt(int irq
, void *data
)
8086 struct hfi1_devdata
*dd
= data
;
8087 u64 regs
[CCE_NUM_INT_CSRS
];
8091 this_cpu_inc(*dd
->int_counter
);
8093 /* phase 1: scan and clear all handled interrupts */
8094 for (i
= 0; i
< CCE_NUM_INT_CSRS
; i
++) {
8095 if (dd
->gi_mask
[i
] == 0) {
8096 regs
[i
] = 0; /* used later */
8099 regs
[i
] = read_csr(dd
, CCE_INT_STATUS
+ (8 * i
)) &
8101 /* only clear if anything is set */
8103 write_csr(dd
, CCE_INT_CLEAR
+ (8 * i
), regs
[i
]);
8106 /* phase 2: call the appropriate handler */
8107 for_each_set_bit(bit
, (unsigned long *)®s
[0],
8108 CCE_NUM_INT_CSRS
* 64) {
8109 is_interrupt(dd
, bit
);
8115 static irqreturn_t
sdma_interrupt(int irq
, void *data
)
8117 struct sdma_engine
*sde
= data
;
8118 struct hfi1_devdata
*dd
= sde
->dd
;
8121 #ifdef CONFIG_SDMA_VERBOSITY
8122 dd_dev_err(dd
, "CONFIG SDMA(%u) %s:%d %s()\n", sde
->this_idx
,
8123 slashstrip(__FILE__
), __LINE__
, __func__
);
8124 sdma_dumpstate(sde
);
8127 this_cpu_inc(*dd
->int_counter
);
8129 /* This read_csr is really bad in the hot path */
8130 status
= read_csr(dd
,
8131 CCE_INT_STATUS
+ (8 * (IS_SDMA_START
/ 64)))
8133 if (likely(status
)) {
8134 /* clear the interrupt(s) */
8136 CCE_INT_CLEAR
+ (8 * (IS_SDMA_START
/ 64)),
8139 /* handle the interrupt(s) */
8140 sdma_engine_interrupt(sde
, status
);
8142 dd_dev_err(dd
, "SDMA engine %u interrupt, but no status bits set\n",
8149 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8150 * to insure that the write completed. This does NOT guarantee that
8151 * queued DMA writes to memory from the chip are pushed.
8153 static inline void clear_recv_intr(struct hfi1_ctxtdata
*rcd
)
8155 struct hfi1_devdata
*dd
= rcd
->dd
;
8156 u32 addr
= CCE_INT_CLEAR
+ (8 * rcd
->ireg
);
8158 mmiowb(); /* make sure everything before is written */
8159 write_csr(dd
, addr
, rcd
->imask
);
8160 /* force the above write on the chip and get a value back */
8161 (void)read_csr(dd
, addr
);
8164 /* force the receive interrupt */
8165 void force_recv_intr(struct hfi1_ctxtdata
*rcd
)
8167 write_csr(rcd
->dd
, CCE_INT_FORCE
+ (8 * rcd
->ireg
), rcd
->imask
);
8171 * Return non-zero if a packet is present.
8173 * This routine is called when rechecking for packets after the RcvAvail
8174 * interrupt has been cleared down. First, do a quick check of memory for
8175 * a packet present. If not found, use an expensive CSR read of the context
8176 * tail to determine the actual tail. The CSR read is necessary because there
8177 * is no method to push pending DMAs to memory other than an interrupt and we
8178 * are trying to determine if we need to force an interrupt.
8180 static inline int check_packet_present(struct hfi1_ctxtdata
*rcd
)
8185 if (!HFI1_CAP_IS_KSET(DMA_RTAIL
))
8186 present
= (rcd
->seq_cnt
==
8187 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd
))));
8188 else /* is RDMA rtail */
8189 present
= (rcd
->head
!= get_rcvhdrtail(rcd
));
8194 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8195 tail
= (u32
)read_uctxt_csr(rcd
->dd
, rcd
->ctxt
, RCV_HDR_TAIL
);
8196 return rcd
->head
!= tail
;
8200 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8201 * This routine will try to handle packets immediately (latency), but if
8202 * it finds too many, it will invoke the thread handler (bandwitdh). The
8203 * chip receive interrupt is *not* cleared down until this or the thread (if
8204 * invoked) is finished. The intent is to avoid extra interrupts while we
8205 * are processing packets anyway.
8207 static irqreturn_t
receive_context_interrupt(int irq
, void *data
)
8209 struct hfi1_ctxtdata
*rcd
= data
;
8210 struct hfi1_devdata
*dd
= rcd
->dd
;
8214 trace_hfi1_receive_interrupt(dd
, rcd
->ctxt
);
8215 this_cpu_inc(*dd
->int_counter
);
8216 aspm_ctx_disable(rcd
);
8218 /* receive interrupt remains blocked while processing packets */
8219 disposition
= rcd
->do_interrupt(rcd
, 0);
8222 * Too many packets were seen while processing packets in this
8223 * IRQ handler. Invoke the handler thread. The receive interrupt
8226 if (disposition
== RCV_PKT_LIMIT
)
8227 return IRQ_WAKE_THREAD
;
8230 * The packet processor detected no more packets. Clear the receive
8231 * interrupt and recheck for a packet packet that may have arrived
8232 * after the previous check and interrupt clear. If a packet arrived,
8233 * force another interrupt.
8235 clear_recv_intr(rcd
);
8236 present
= check_packet_present(rcd
);
8238 force_recv_intr(rcd
);
8244 * Receive packet thread handler. This expects to be invoked with the
8245 * receive interrupt still blocked.
8247 static irqreturn_t
receive_context_thread(int irq
, void *data
)
8249 struct hfi1_ctxtdata
*rcd
= data
;
8252 /* receive interrupt is still blocked from the IRQ handler */
8253 (void)rcd
->do_interrupt(rcd
, 1);
8256 * The packet processor will only return if it detected no more
8257 * packets. Hold IRQs here so we can safely clear the interrupt and
8258 * recheck for a packet that may have arrived after the previous
8259 * check and the interrupt clear. If a packet arrived, force another
8262 local_irq_disable();
8263 clear_recv_intr(rcd
);
8264 present
= check_packet_present(rcd
);
8266 force_recv_intr(rcd
);
8272 /* ========================================================================= */
8274 u32
read_physical_state(struct hfi1_devdata
*dd
)
8278 reg
= read_csr(dd
, DC_DC8051_STS_CUR_STATE
);
8279 return (reg
>> DC_DC8051_STS_CUR_STATE_PORT_SHIFT
)
8280 & DC_DC8051_STS_CUR_STATE_PORT_MASK
;
8283 u32
read_logical_state(struct hfi1_devdata
*dd
)
8287 reg
= read_csr(dd
, DCC_CFG_PORT_CONFIG
);
8288 return (reg
>> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT
)
8289 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK
;
8292 static void set_logical_state(struct hfi1_devdata
*dd
, u32 chip_lstate
)
8296 reg
= read_csr(dd
, DCC_CFG_PORT_CONFIG
);
8297 /* clear current state, set new state */
8298 reg
&= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK
;
8299 reg
|= (u64
)chip_lstate
<< DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT
;
8300 write_csr(dd
, DCC_CFG_PORT_CONFIG
, reg
);
8304 * Use the 8051 to read a LCB CSR.
8306 static int read_lcb_via_8051(struct hfi1_devdata
*dd
, u32 addr
, u64
*data
)
8311 if (dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
) {
8312 if (acquire_lcb_access(dd
, 0) == 0) {
8313 *data
= read_csr(dd
, addr
);
8314 release_lcb_access(dd
, 0);
8320 /* register is an index of LCB registers: (offset - base) / 8 */
8321 regno
= (addr
- DC_LCB_CFG_RUN
) >> 3;
8322 ret
= do_8051_command(dd
, HCMD_READ_LCB_CSR
, regno
, data
);
8323 if (ret
!= HCMD_SUCCESS
)
8329 * Read an LCB CSR. Access may not be in host control, so check.
8330 * Return 0 on success, -EBUSY on failure.
8332 int read_lcb_csr(struct hfi1_devdata
*dd
, u32 addr
, u64
*data
)
8334 struct hfi1_pportdata
*ppd
= dd
->pport
;
8336 /* if up, go through the 8051 for the value */
8337 if (ppd
->host_link_state
& HLS_UP
)
8338 return read_lcb_via_8051(dd
, addr
, data
);
8339 /* if going up or down, no access */
8340 if (ppd
->host_link_state
& (HLS_GOING_UP
| HLS_GOING_OFFLINE
))
8342 /* otherwise, host has access */
8343 *data
= read_csr(dd
, addr
);
8348 * Use the 8051 to write a LCB CSR.
8350 static int write_lcb_via_8051(struct hfi1_devdata
*dd
, u32 addr
, u64 data
)
8355 if (dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
||
8356 (dd
->dc8051_ver
< dc8051_ver(0, 20))) {
8357 if (acquire_lcb_access(dd
, 0) == 0) {
8358 write_csr(dd
, addr
, data
);
8359 release_lcb_access(dd
, 0);
8365 /* register is an index of LCB registers: (offset - base) / 8 */
8366 regno
= (addr
- DC_LCB_CFG_RUN
) >> 3;
8367 ret
= do_8051_command(dd
, HCMD_WRITE_LCB_CSR
, regno
, &data
);
8368 if (ret
!= HCMD_SUCCESS
)
8374 * Write an LCB CSR. Access may not be in host control, so check.
8375 * Return 0 on success, -EBUSY on failure.
8377 int write_lcb_csr(struct hfi1_devdata
*dd
, u32 addr
, u64 data
)
8379 struct hfi1_pportdata
*ppd
= dd
->pport
;
8381 /* if up, go through the 8051 for the value */
8382 if (ppd
->host_link_state
& HLS_UP
)
8383 return write_lcb_via_8051(dd
, addr
, data
);
8384 /* if going up or down, no access */
8385 if (ppd
->host_link_state
& (HLS_GOING_UP
| HLS_GOING_OFFLINE
))
8387 /* otherwise, host has access */
8388 write_csr(dd
, addr
, data
);
8394 * < 0 = Linux error, not able to get access
8395 * > 0 = 8051 command RETURN_CODE
8397 static int do_8051_command(
8398 struct hfi1_devdata
*dd
,
8405 unsigned long flags
;
8406 unsigned long timeout
;
8408 hfi1_cdbg(DC8051
, "type %d, data 0x%012llx", type
, in_data
);
8411 * Alternative to holding the lock for a long time:
8412 * - keep busy wait - have other users bounce off
8414 spin_lock_irqsave(&dd
->dc8051_lock
, flags
);
8416 /* We can't send any commands to the 8051 if it's in reset */
8417 if (dd
->dc_shutdown
) {
8418 return_code
= -ENODEV
;
8423 * If an 8051 host command timed out previously, then the 8051 is
8426 * On first timeout, attempt to reset and restart the entire DC
8427 * block (including 8051). (Is this too big of a hammer?)
8429 * If the 8051 times out a second time, the reset did not bring it
8430 * back to healthy life. In that case, fail any subsequent commands.
8432 if (dd
->dc8051_timed_out
) {
8433 if (dd
->dc8051_timed_out
> 1) {
8435 "Previous 8051 host command timed out, skipping command %u\n",
8437 return_code
= -ENXIO
;
8440 spin_unlock_irqrestore(&dd
->dc8051_lock
, flags
);
8443 spin_lock_irqsave(&dd
->dc8051_lock
, flags
);
8447 * If there is no timeout, then the 8051 command interface is
8448 * waiting for a command.
8452 * When writing a LCB CSR, out_data contains the full value to
8453 * to be written, while in_data contains the relative LCB
8454 * address in 7:0. Do the work here, rather than the caller,
8455 * of distrubting the write data to where it needs to go:
8458 * 39:00 -> in_data[47:8]
8459 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8460 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8462 if (type
== HCMD_WRITE_LCB_CSR
) {
8463 in_data
|= ((*out_data
) & 0xffffffffffull
) << 8;
8464 reg
= ((((*out_data
) >> 40) & 0xff) <<
8465 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT
)
8466 | ((((*out_data
) >> 48) & 0xffff) <<
8467 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT
);
8468 write_csr(dd
, DC_DC8051_CFG_EXT_DEV_0
, reg
);
8472 * Do two writes: the first to stabilize the type and req_data, the
8473 * second to activate.
8475 reg
= ((u64
)type
& DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK
)
8476 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8477 | (in_data
& DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK
)
8478 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT
;
8479 write_csr(dd
, DC_DC8051_CFG_HOST_CMD_0
, reg
);
8480 reg
|= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK
;
8481 write_csr(dd
, DC_DC8051_CFG_HOST_CMD_0
, reg
);
8483 /* wait for completion, alternate: interrupt */
8484 timeout
= jiffies
+ msecs_to_jiffies(DC8051_COMMAND_TIMEOUT
);
8486 reg
= read_csr(dd
, DC_DC8051_CFG_HOST_CMD_1
);
8487 completed
= reg
& DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK
;
8490 if (time_after(jiffies
, timeout
)) {
8491 dd
->dc8051_timed_out
++;
8492 dd_dev_err(dd
, "8051 host command %u timeout\n", type
);
8495 return_code
= -ETIMEDOUT
;
8502 *out_data
= (reg
>> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT
)
8503 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK
;
8504 if (type
== HCMD_READ_LCB_CSR
) {
8505 /* top 16 bits are in a different register */
8506 *out_data
|= (read_csr(dd
, DC_DC8051_CFG_EXT_DEV_1
)
8507 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK
)
8509 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT
);
8512 return_code
= (reg
>> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT
)
8513 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK
;
8514 dd
->dc8051_timed_out
= 0;
8516 * Clear command for next user.
8518 write_csr(dd
, DC_DC8051_CFG_HOST_CMD_0
, 0);
8521 spin_unlock_irqrestore(&dd
->dc8051_lock
, flags
);
8526 static int set_physical_link_state(struct hfi1_devdata
*dd
, u64 state
)
8528 return do_8051_command(dd
, HCMD_CHANGE_PHY_STATE
, state
, NULL
);
8531 int load_8051_config(struct hfi1_devdata
*dd
, u8 field_id
,
8532 u8 lane_id
, u32 config_data
)
8537 data
= (u64
)field_id
<< LOAD_DATA_FIELD_ID_SHIFT
8538 | (u64
)lane_id
<< LOAD_DATA_LANE_ID_SHIFT
8539 | (u64
)config_data
<< LOAD_DATA_DATA_SHIFT
;
8540 ret
= do_8051_command(dd
, HCMD_LOAD_CONFIG_DATA
, data
, NULL
);
8541 if (ret
!= HCMD_SUCCESS
) {
8543 "load 8051 config: field id %d, lane %d, err %d\n",
8544 (int)field_id
, (int)lane_id
, ret
);
8550 * Read the 8051 firmware "registers". Use the RAM directly. Always
8551 * set the result, even on error.
8552 * Return 0 on success, -errno on failure
8554 int read_8051_config(struct hfi1_devdata
*dd
, u8 field_id
, u8 lane_id
,
8561 /* address start depends on the lane_id */
8563 addr
= (4 * NUM_GENERAL_FIELDS
)
8564 + (lane_id
* 4 * NUM_LANE_FIELDS
);
8567 addr
+= field_id
* 4;
8569 /* read is in 8-byte chunks, hardware will truncate the address down */
8570 ret
= read_8051_data(dd
, addr
, 8, &big_data
);
8573 /* extract the 4 bytes we want */
8575 *result
= (u32
)(big_data
>> 32);
8577 *result
= (u32
)big_data
;
8580 dd_dev_err(dd
, "%s: direct read failed, lane %d, field %d!\n",
8581 __func__
, lane_id
, field_id
);
8587 static int write_vc_local_phy(struct hfi1_devdata
*dd
, u8 power_management
,
8592 frame
= continuous
<< CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8593 | power_management
<< POWER_MANAGEMENT_SHIFT
;
8594 return load_8051_config(dd
, VERIFY_CAP_LOCAL_PHY
,
8595 GENERAL_CONFIG
, frame
);
8598 static int write_vc_local_fabric(struct hfi1_devdata
*dd
, u8 vau
, u8 z
, u8 vcu
,
8599 u16 vl15buf
, u8 crc_sizes
)
8603 frame
= (u32
)vau
<< VAU_SHIFT
8605 | (u32
)vcu
<< VCU_SHIFT
8606 | (u32
)vl15buf
<< VL15BUF_SHIFT
8607 | (u32
)crc_sizes
<< CRC_SIZES_SHIFT
;
8608 return load_8051_config(dd
, VERIFY_CAP_LOCAL_FABRIC
,
8609 GENERAL_CONFIG
, frame
);
8612 static void read_vc_local_link_width(struct hfi1_devdata
*dd
, u8
*misc_bits
,
8613 u8
*flag_bits
, u16
*link_widths
)
8617 read_8051_config(dd
, VERIFY_CAP_LOCAL_LINK_WIDTH
, GENERAL_CONFIG
,
8619 *misc_bits
= (frame
>> MISC_CONFIG_BITS_SHIFT
) & MISC_CONFIG_BITS_MASK
;
8620 *flag_bits
= (frame
>> LOCAL_FLAG_BITS_SHIFT
) & LOCAL_FLAG_BITS_MASK
;
8621 *link_widths
= (frame
>> LINK_WIDTH_SHIFT
) & LINK_WIDTH_MASK
;
8624 static int write_vc_local_link_width(struct hfi1_devdata
*dd
,
8631 frame
= (u32
)misc_bits
<< MISC_CONFIG_BITS_SHIFT
8632 | (u32
)flag_bits
<< LOCAL_FLAG_BITS_SHIFT
8633 | (u32
)link_widths
<< LINK_WIDTH_SHIFT
;
8634 return load_8051_config(dd
, VERIFY_CAP_LOCAL_LINK_WIDTH
, GENERAL_CONFIG
,
8638 static int write_local_device_id(struct hfi1_devdata
*dd
, u16 device_id
,
8643 frame
= ((u32
)device_id
<< LOCAL_DEVICE_ID_SHIFT
)
8644 | ((u32
)device_rev
<< LOCAL_DEVICE_REV_SHIFT
);
8645 return load_8051_config(dd
, LOCAL_DEVICE_ID
, GENERAL_CONFIG
, frame
);
8648 static void read_remote_device_id(struct hfi1_devdata
*dd
, u16
*device_id
,
8653 read_8051_config(dd
, REMOTE_DEVICE_ID
, GENERAL_CONFIG
, &frame
);
8654 *device_id
= (frame
>> REMOTE_DEVICE_ID_SHIFT
) & REMOTE_DEVICE_ID_MASK
;
8655 *device_rev
= (frame
>> REMOTE_DEVICE_REV_SHIFT
)
8656 & REMOTE_DEVICE_REV_MASK
;
8659 void read_misc_status(struct hfi1_devdata
*dd
, u8
*ver_a
, u8
*ver_b
)
8663 read_8051_config(dd
, MISC_STATUS
, GENERAL_CONFIG
, &frame
);
8664 *ver_a
= (frame
>> STS_FM_VERSION_A_SHIFT
) & STS_FM_VERSION_A_MASK
;
8665 *ver_b
= (frame
>> STS_FM_VERSION_B_SHIFT
) & STS_FM_VERSION_B_MASK
;
8668 static void read_vc_remote_phy(struct hfi1_devdata
*dd
, u8
*power_management
,
8673 read_8051_config(dd
, VERIFY_CAP_REMOTE_PHY
, GENERAL_CONFIG
, &frame
);
8674 *power_management
= (frame
>> POWER_MANAGEMENT_SHIFT
)
8675 & POWER_MANAGEMENT_MASK
;
8676 *continuous
= (frame
>> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
)
8677 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK
;
8680 static void read_vc_remote_fabric(struct hfi1_devdata
*dd
, u8
*vau
, u8
*z
,
8681 u8
*vcu
, u16
*vl15buf
, u8
*crc_sizes
)
8685 read_8051_config(dd
, VERIFY_CAP_REMOTE_FABRIC
, GENERAL_CONFIG
, &frame
);
8686 *vau
= (frame
>> VAU_SHIFT
) & VAU_MASK
;
8687 *z
= (frame
>> Z_SHIFT
) & Z_MASK
;
8688 *vcu
= (frame
>> VCU_SHIFT
) & VCU_MASK
;
8689 *vl15buf
= (frame
>> VL15BUF_SHIFT
) & VL15BUF_MASK
;
8690 *crc_sizes
= (frame
>> CRC_SIZES_SHIFT
) & CRC_SIZES_MASK
;
8693 static void read_vc_remote_link_width(struct hfi1_devdata
*dd
,
8699 read_8051_config(dd
, VERIFY_CAP_REMOTE_LINK_WIDTH
, GENERAL_CONFIG
,
8701 *remote_tx_rate
= (frame
>> REMOTE_TX_RATE_SHIFT
)
8702 & REMOTE_TX_RATE_MASK
;
8703 *link_widths
= (frame
>> LINK_WIDTH_SHIFT
) & LINK_WIDTH_MASK
;
8706 static void read_local_lni(struct hfi1_devdata
*dd
, u8
*enable_lane_rx
)
8710 read_8051_config(dd
, LOCAL_LNI_INFO
, GENERAL_CONFIG
, &frame
);
8711 *enable_lane_rx
= (frame
>> ENABLE_LANE_RX_SHIFT
) & ENABLE_LANE_RX_MASK
;
8714 static void read_mgmt_allowed(struct hfi1_devdata
*dd
, u8
*mgmt_allowed
)
8718 read_8051_config(dd
, REMOTE_LNI_INFO
, GENERAL_CONFIG
, &frame
);
8719 *mgmt_allowed
= (frame
>> MGMT_ALLOWED_SHIFT
) & MGMT_ALLOWED_MASK
;
8722 static void read_last_local_state(struct hfi1_devdata
*dd
, u32
*lls
)
8724 read_8051_config(dd
, LAST_LOCAL_STATE_COMPLETE
, GENERAL_CONFIG
, lls
);
8727 static void read_last_remote_state(struct hfi1_devdata
*dd
, u32
*lrs
)
8729 read_8051_config(dd
, LAST_REMOTE_STATE_COMPLETE
, GENERAL_CONFIG
, lrs
);
8732 void hfi1_read_link_quality(struct hfi1_devdata
*dd
, u8
*link_quality
)
8738 if (dd
->pport
->host_link_state
& HLS_UP
) {
8739 ret
= read_8051_config(dd
, LINK_QUALITY_INFO
, GENERAL_CONFIG
,
8742 *link_quality
= (frame
>> LINK_QUALITY_SHIFT
)
8743 & LINK_QUALITY_MASK
;
8747 static void read_planned_down_reason_code(struct hfi1_devdata
*dd
, u8
*pdrrc
)
8751 read_8051_config(dd
, LINK_QUALITY_INFO
, GENERAL_CONFIG
, &frame
);
8752 *pdrrc
= (frame
>> DOWN_REMOTE_REASON_SHIFT
) & DOWN_REMOTE_REASON_MASK
;
8755 static void read_link_down_reason(struct hfi1_devdata
*dd
, u8
*ldr
)
8759 read_8051_config(dd
, LINK_DOWN_REASON
, GENERAL_CONFIG
, &frame
);
8760 *ldr
= (frame
& 0xff);
8763 static int read_tx_settings(struct hfi1_devdata
*dd
,
8765 u8
*tx_polarity_inversion
,
8766 u8
*rx_polarity_inversion
,
8772 ret
= read_8051_config(dd
, TX_SETTINGS
, GENERAL_CONFIG
, &frame
);
8773 *enable_lane_tx
= (frame
>> ENABLE_LANE_TX_SHIFT
)
8774 & ENABLE_LANE_TX_MASK
;
8775 *tx_polarity_inversion
= (frame
>> TX_POLARITY_INVERSION_SHIFT
)
8776 & TX_POLARITY_INVERSION_MASK
;
8777 *rx_polarity_inversion
= (frame
>> RX_POLARITY_INVERSION_SHIFT
)
8778 & RX_POLARITY_INVERSION_MASK
;
8779 *max_rate
= (frame
>> MAX_RATE_SHIFT
) & MAX_RATE_MASK
;
8783 static int write_tx_settings(struct hfi1_devdata
*dd
,
8785 u8 tx_polarity_inversion
,
8786 u8 rx_polarity_inversion
,
8791 /* no need to mask, all variable sizes match field widths */
8792 frame
= enable_lane_tx
<< ENABLE_LANE_TX_SHIFT
8793 | tx_polarity_inversion
<< TX_POLARITY_INVERSION_SHIFT
8794 | rx_polarity_inversion
<< RX_POLARITY_INVERSION_SHIFT
8795 | max_rate
<< MAX_RATE_SHIFT
;
8796 return load_8051_config(dd
, TX_SETTINGS
, GENERAL_CONFIG
, frame
);
8799 static void check_fabric_firmware_versions(struct hfi1_devdata
*dd
)
8801 u32 frame
, version
, prod_id
;
8805 for (lane
= 0; lane
< 4; lane
++) {
8806 ret
= read_8051_config(dd
, SPICO_FW_VERSION
, lane
, &frame
);
8809 "Unable to read lane %d firmware details\n",
8813 version
= (frame
>> SPICO_ROM_VERSION_SHIFT
)
8814 & SPICO_ROM_VERSION_MASK
;
8815 prod_id
= (frame
>> SPICO_ROM_PROD_ID_SHIFT
)
8816 & SPICO_ROM_PROD_ID_MASK
;
8818 "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8819 lane
, version
, prod_id
);
8824 * Read an idle LCB message.
8826 * Returns 0 on success, -EINVAL on error
8828 static int read_idle_message(struct hfi1_devdata
*dd
, u64 type
, u64
*data_out
)
8832 ret
= do_8051_command(dd
, HCMD_READ_LCB_IDLE_MSG
, type
, data_out
);
8833 if (ret
!= HCMD_SUCCESS
) {
8834 dd_dev_err(dd
, "read idle message: type %d, err %d\n",
8838 dd_dev_info(dd
, "%s: read idle message 0x%llx\n", __func__
, *data_out
);
8839 /* return only the payload as we already know the type */
8840 *data_out
>>= IDLE_PAYLOAD_SHIFT
;
8845 * Read an idle SMA message. To be done in response to a notification from
8848 * Returns 0 on success, -EINVAL on error
8850 static int read_idle_sma(struct hfi1_devdata
*dd
, u64
*data
)
8852 return read_idle_message(dd
, (u64
)IDLE_SMA
<< IDLE_MSG_TYPE_SHIFT
,
8857 * Send an idle LCB message.
8859 * Returns 0 on success, -EINVAL on error
8861 static int send_idle_message(struct hfi1_devdata
*dd
, u64 data
)
8865 dd_dev_info(dd
, "%s: sending idle message 0x%llx\n", __func__
, data
);
8866 ret
= do_8051_command(dd
, HCMD_SEND_LCB_IDLE_MSG
, data
, NULL
);
8867 if (ret
!= HCMD_SUCCESS
) {
8868 dd_dev_err(dd
, "send idle message: data 0x%llx, err %d\n",
8876 * Send an idle SMA message.
8878 * Returns 0 on success, -EINVAL on error
8880 int send_idle_sma(struct hfi1_devdata
*dd
, u64 message
)
8884 data
= ((message
& IDLE_PAYLOAD_MASK
) << IDLE_PAYLOAD_SHIFT
) |
8885 ((u64
)IDLE_SMA
<< IDLE_MSG_TYPE_SHIFT
);
8886 return send_idle_message(dd
, data
);
8890 * Initialize the LCB then do a quick link up. This may or may not be
8893 * return 0 on success, -errno on error
8895 static int do_quick_linkup(struct hfi1_devdata
*dd
)
8898 unsigned long timeout
;
8901 lcb_shutdown(dd
, 0);
8904 /* LCB_CFG_LOOPBACK.VAL = 2 */
8905 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8906 write_csr(dd
, DC_LCB_CFG_LOOPBACK
,
8907 IB_PACKET_TYPE
<< DC_LCB_CFG_LOOPBACK_VAL_SHIFT
);
8908 write_csr(dd
, DC_LCB_CFG_LANE_WIDTH
, 0);
8911 /* start the LCBs */
8912 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8913 write_csr(dd
, DC_LCB_CFG_TX_FIFOS_RESET
, 0);
8915 /* simulator only loopback steps */
8916 if (loopback
&& dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
) {
8917 /* LCB_CFG_RUN.EN = 1 */
8918 write_csr(dd
, DC_LCB_CFG_RUN
,
8919 1ull << DC_LCB_CFG_RUN_EN_SHIFT
);
8921 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8922 timeout
= jiffies
+ msecs_to_jiffies(10);
8924 reg
= read_csr(dd
, DC_LCB_STS_LINK_TRANSFER_ACTIVE
);
8927 if (time_after(jiffies
, timeout
)) {
8929 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
8935 write_csr(dd
, DC_LCB_CFG_ALLOW_LINK_UP
,
8936 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT
);
8941 * When doing quick linkup and not in loopback, both
8942 * sides must be done with LCB set-up before either
8943 * starts the quick linkup. Put a delay here so that
8944 * both sides can be started and have a chance to be
8945 * done with LCB set up before resuming.
8948 "Pausing for peer to be finished with LCB set up\n");
8950 dd_dev_err(dd
, "Continuing with quick linkup\n");
8953 write_csr(dd
, DC_LCB_ERR_EN
, 0); /* mask LCB errors */
8954 set_8051_lcb_access(dd
);
8957 * State "quick" LinkUp request sets the physical link state to
8958 * LinkUp without a verify capability sequence.
8959 * This state is in simulator v37 and later.
8961 ret
= set_physical_link_state(dd
, PLS_QUICK_LINKUP
);
8962 if (ret
!= HCMD_SUCCESS
) {
8964 "%s: set physical link state to quick LinkUp failed with return %d\n",
8967 set_host_lcb_access(dd
);
8968 write_csr(dd
, DC_LCB_ERR_EN
, ~0ull); /* watch LCB errors */
8975 return 0; /* success */
8979 * Set the SerDes to internal loopback mode.
8980 * Returns 0 on success, -errno on error.
8982 static int set_serdes_loopback_mode(struct hfi1_devdata
*dd
)
8986 ret
= set_physical_link_state(dd
, PLS_INTERNAL_SERDES_LOOPBACK
);
8987 if (ret
== HCMD_SUCCESS
)
8990 "Set physical link state to SerDes Loopback failed with return %d\n",
8998 * Do all special steps to set up loopback.
9000 static int init_loopback(struct hfi1_devdata
*dd
)
9002 dd_dev_info(dd
, "Entering loopback mode\n");
9004 /* all loopbacks should disable self GUID check */
9005 write_csr(dd
, DC_DC8051_CFG_MODE
,
9006 (read_csr(dd
, DC_DC8051_CFG_MODE
) | DISABLE_SELF_GUID_CHECK
));
9009 * The simulator has only one loopback option - LCB. Switch
9010 * to that option, which includes quick link up.
9012 * Accept all valid loopback values.
9014 if ((dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
) &&
9015 (loopback
== LOOPBACK_SERDES
|| loopback
== LOOPBACK_LCB
||
9016 loopback
== LOOPBACK_CABLE
)) {
9017 loopback
= LOOPBACK_LCB
;
9022 /* handle serdes loopback */
9023 if (loopback
== LOOPBACK_SERDES
) {
9024 /* internal serdes loopack needs quick linkup on RTL */
9025 if (dd
->icode
== ICODE_RTL_SILICON
)
9027 return set_serdes_loopback_mode(dd
);
9030 /* LCB loopback - handled at poll time */
9031 if (loopback
== LOOPBACK_LCB
) {
9032 quick_linkup
= 1; /* LCB is always quick linkup */
9034 /* not supported in emulation due to emulation RTL changes */
9035 if (dd
->icode
== ICODE_FPGA_EMULATION
) {
9037 "LCB loopback not supported in emulation\n");
9043 /* external cable loopback requires no extra steps */
9044 if (loopback
== LOOPBACK_CABLE
)
9047 dd_dev_err(dd
, "Invalid loopback mode %d\n", loopback
);
9052 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9053 * used in the Verify Capability link width attribute.
9055 static u16
opa_to_vc_link_widths(u16 opa_widths
)
9060 static const struct link_bits
{
9063 } opa_link_xlate
[] = {
9064 { OPA_LINK_WIDTH_1X
, 1 << (1 - 1) },
9065 { OPA_LINK_WIDTH_2X
, 1 << (2 - 1) },
9066 { OPA_LINK_WIDTH_3X
, 1 << (3 - 1) },
9067 { OPA_LINK_WIDTH_4X
, 1 << (4 - 1) },
9070 for (i
= 0; i
< ARRAY_SIZE(opa_link_xlate
); i
++) {
9071 if (opa_widths
& opa_link_xlate
[i
].from
)
9072 result
|= opa_link_xlate
[i
].to
;
9078 * Set link attributes before moving to polling.
9080 static int set_local_link_attributes(struct hfi1_pportdata
*ppd
)
9082 struct hfi1_devdata
*dd
= ppd
->dd
;
9084 u8 tx_polarity_inversion
;
9085 u8 rx_polarity_inversion
;
9088 /* reset our fabric serdes to clear any lingering problems */
9089 fabric_serdes_reset(dd
);
9091 /* set the local tx rate - need to read-modify-write */
9092 ret
= read_tx_settings(dd
, &enable_lane_tx
, &tx_polarity_inversion
,
9093 &rx_polarity_inversion
, &ppd
->local_tx_rate
);
9095 goto set_local_link_attributes_fail
;
9097 if (dd
->dc8051_ver
< dc8051_ver(0, 20)) {
9098 /* set the tx rate to the fastest enabled */
9099 if (ppd
->link_speed_enabled
& OPA_LINK_SPEED_25G
)
9100 ppd
->local_tx_rate
= 1;
9102 ppd
->local_tx_rate
= 0;
9104 /* set the tx rate to all enabled */
9105 ppd
->local_tx_rate
= 0;
9106 if (ppd
->link_speed_enabled
& OPA_LINK_SPEED_25G
)
9107 ppd
->local_tx_rate
|= 2;
9108 if (ppd
->link_speed_enabled
& OPA_LINK_SPEED_12_5G
)
9109 ppd
->local_tx_rate
|= 1;
9112 enable_lane_tx
= 0xF; /* enable all four lanes */
9113 ret
= write_tx_settings(dd
, enable_lane_tx
, tx_polarity_inversion
,
9114 rx_polarity_inversion
, ppd
->local_tx_rate
);
9115 if (ret
!= HCMD_SUCCESS
)
9116 goto set_local_link_attributes_fail
;
9119 * DC supports continuous updates.
9121 ret
= write_vc_local_phy(dd
,
9122 0 /* no power management */,
9123 1 /* continuous updates */);
9124 if (ret
!= HCMD_SUCCESS
)
9125 goto set_local_link_attributes_fail
;
9127 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9128 ret
= write_vc_local_fabric(dd
, dd
->vau
, 1, dd
->vcu
, dd
->vl15_init
,
9129 ppd
->port_crc_mode_enabled
);
9130 if (ret
!= HCMD_SUCCESS
)
9131 goto set_local_link_attributes_fail
;
9133 ret
= write_vc_local_link_width(dd
, 0, 0,
9134 opa_to_vc_link_widths(
9135 ppd
->link_width_enabled
));
9136 if (ret
!= HCMD_SUCCESS
)
9137 goto set_local_link_attributes_fail
;
9139 /* let peer know who we are */
9140 ret
= write_local_device_id(dd
, dd
->pcidev
->device
, dd
->minrev
);
9141 if (ret
== HCMD_SUCCESS
)
9144 set_local_link_attributes_fail
:
9146 "Failed to set local link attributes, return 0x%x\n",
9152 * Call this to start the link.
9153 * Do not do anything if the link is disabled.
9154 * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
9156 int start_link(struct hfi1_pportdata
*ppd
)
9158 if (!ppd
->link_enabled
) {
9159 dd_dev_info(ppd
->dd
,
9160 "%s: stopping link start because link is disabled\n",
9164 if (!ppd
->driver_link_ready
) {
9165 dd_dev_info(ppd
->dd
,
9166 "%s: stopping link start because driver is not ready\n",
9171 return set_link_state(ppd
, HLS_DN_POLL
);
9174 static void wait_for_qsfp_init(struct hfi1_pportdata
*ppd
)
9176 struct hfi1_devdata
*dd
= ppd
->dd
;
9178 unsigned long timeout
;
9181 * Check for QSFP interrupt for t_init (SFF 8679)
9183 timeout
= jiffies
+ msecs_to_jiffies(2000);
9185 mask
= read_csr(dd
, dd
->hfi1_id
?
9186 ASIC_QSFP2_IN
: ASIC_QSFP1_IN
);
9187 if (!(mask
& QSFP_HFI0_INT_N
)) {
9188 write_csr(dd
, dd
->hfi1_id
? ASIC_QSFP2_CLEAR
:
9189 ASIC_QSFP1_CLEAR
, QSFP_HFI0_INT_N
);
9192 if (time_after(jiffies
, timeout
)) {
9193 dd_dev_info(dd
, "%s: No IntN detected, reset complete\n",
9201 static void set_qsfp_int_n(struct hfi1_pportdata
*ppd
, u8 enable
)
9203 struct hfi1_devdata
*dd
= ppd
->dd
;
9206 mask
= read_csr(dd
, dd
->hfi1_id
? ASIC_QSFP2_MASK
: ASIC_QSFP1_MASK
);
9208 mask
|= (u64
)QSFP_HFI0_INT_N
;
9210 mask
&= ~(u64
)QSFP_HFI0_INT_N
;
9211 write_csr(dd
, dd
->hfi1_id
? ASIC_QSFP2_MASK
: ASIC_QSFP1_MASK
, mask
);
9214 void reset_qsfp(struct hfi1_pportdata
*ppd
)
9216 struct hfi1_devdata
*dd
= ppd
->dd
;
9217 u64 mask
, qsfp_mask
;
9219 /* Disable INT_N from triggering QSFP interrupts */
9220 set_qsfp_int_n(ppd
, 0);
9222 /* Reset the QSFP */
9223 mask
= (u64
)QSFP_HFI0_RESET_N
;
9225 qsfp_mask
= read_csr(dd
,
9226 dd
->hfi1_id
? ASIC_QSFP2_OUT
: ASIC_QSFP1_OUT
);
9229 dd
->hfi1_id
? ASIC_QSFP2_OUT
: ASIC_QSFP1_OUT
, qsfp_mask
);
9235 dd
->hfi1_id
? ASIC_QSFP2_OUT
: ASIC_QSFP1_OUT
, qsfp_mask
);
9237 wait_for_qsfp_init(ppd
);
9240 * Allow INT_N to trigger the QSFP interrupt to watch
9241 * for alarms and warnings
9243 set_qsfp_int_n(ppd
, 1);
9246 static int handle_qsfp_error_conditions(struct hfi1_pportdata
*ppd
,
9247 u8
*qsfp_interrupt_status
)
9249 struct hfi1_devdata
*dd
= ppd
->dd
;
9251 if ((qsfp_interrupt_status
[0] & QSFP_HIGH_TEMP_ALARM
) ||
9252 (qsfp_interrupt_status
[0] & QSFP_HIGH_TEMP_WARNING
))
9253 dd_dev_info(dd
, "%s: QSFP cable on fire\n",
9256 if ((qsfp_interrupt_status
[0] & QSFP_LOW_TEMP_ALARM
) ||
9257 (qsfp_interrupt_status
[0] & QSFP_LOW_TEMP_WARNING
))
9258 dd_dev_info(dd
, "%s: QSFP cable temperature too low\n",
9262 * The remaining alarms/warnings don't matter if the link is down.
9264 if (ppd
->host_link_state
& HLS_DOWN
)
9267 if ((qsfp_interrupt_status
[1] & QSFP_HIGH_VCC_ALARM
) ||
9268 (qsfp_interrupt_status
[1] & QSFP_HIGH_VCC_WARNING
))
9269 dd_dev_info(dd
, "%s: QSFP supply voltage too high\n",
9272 if ((qsfp_interrupt_status
[1] & QSFP_LOW_VCC_ALARM
) ||
9273 (qsfp_interrupt_status
[1] & QSFP_LOW_VCC_WARNING
))
9274 dd_dev_info(dd
, "%s: QSFP supply voltage too low\n",
9277 /* Byte 2 is vendor specific */
9279 if ((qsfp_interrupt_status
[3] & QSFP_HIGH_POWER_ALARM
) ||
9280 (qsfp_interrupt_status
[3] & QSFP_HIGH_POWER_WARNING
))
9281 dd_dev_info(dd
, "%s: Cable RX channel 1/2 power too high\n",
9284 if ((qsfp_interrupt_status
[3] & QSFP_LOW_POWER_ALARM
) ||
9285 (qsfp_interrupt_status
[3] & QSFP_LOW_POWER_WARNING
))
9286 dd_dev_info(dd
, "%s: Cable RX channel 1/2 power too low\n",
9289 if ((qsfp_interrupt_status
[4] & QSFP_HIGH_POWER_ALARM
) ||
9290 (qsfp_interrupt_status
[4] & QSFP_HIGH_POWER_WARNING
))
9291 dd_dev_info(dd
, "%s: Cable RX channel 3/4 power too high\n",
9294 if ((qsfp_interrupt_status
[4] & QSFP_LOW_POWER_ALARM
) ||
9295 (qsfp_interrupt_status
[4] & QSFP_LOW_POWER_WARNING
))
9296 dd_dev_info(dd
, "%s: Cable RX channel 3/4 power too low\n",
9299 if ((qsfp_interrupt_status
[5] & QSFP_HIGH_BIAS_ALARM
) ||
9300 (qsfp_interrupt_status
[5] & QSFP_HIGH_BIAS_WARNING
))
9301 dd_dev_info(dd
, "%s: Cable TX channel 1/2 bias too high\n",
9304 if ((qsfp_interrupt_status
[5] & QSFP_LOW_BIAS_ALARM
) ||
9305 (qsfp_interrupt_status
[5] & QSFP_LOW_BIAS_WARNING
))
9306 dd_dev_info(dd
, "%s: Cable TX channel 1/2 bias too low\n",
9309 if ((qsfp_interrupt_status
[6] & QSFP_HIGH_BIAS_ALARM
) ||
9310 (qsfp_interrupt_status
[6] & QSFP_HIGH_BIAS_WARNING
))
9311 dd_dev_info(dd
, "%s: Cable TX channel 3/4 bias too high\n",
9314 if ((qsfp_interrupt_status
[6] & QSFP_LOW_BIAS_ALARM
) ||
9315 (qsfp_interrupt_status
[6] & QSFP_LOW_BIAS_WARNING
))
9316 dd_dev_info(dd
, "%s: Cable TX channel 3/4 bias too low\n",
9319 if ((qsfp_interrupt_status
[7] & QSFP_HIGH_POWER_ALARM
) ||
9320 (qsfp_interrupt_status
[7] & QSFP_HIGH_POWER_WARNING
))
9321 dd_dev_info(dd
, "%s: Cable TX channel 1/2 power too high\n",
9324 if ((qsfp_interrupt_status
[7] & QSFP_LOW_POWER_ALARM
) ||
9325 (qsfp_interrupt_status
[7] & QSFP_LOW_POWER_WARNING
))
9326 dd_dev_info(dd
, "%s: Cable TX channel 1/2 power too low\n",
9329 if ((qsfp_interrupt_status
[8] & QSFP_HIGH_POWER_ALARM
) ||
9330 (qsfp_interrupt_status
[8] & QSFP_HIGH_POWER_WARNING
))
9331 dd_dev_info(dd
, "%s: Cable TX channel 3/4 power too high\n",
9334 if ((qsfp_interrupt_status
[8] & QSFP_LOW_POWER_ALARM
) ||
9335 (qsfp_interrupt_status
[8] & QSFP_LOW_POWER_WARNING
))
9336 dd_dev_info(dd
, "%s: Cable TX channel 3/4 power too low\n",
9339 /* Bytes 9-10 and 11-12 are reserved */
9340 /* Bytes 13-15 are vendor specific */
9345 /* This routine will only be scheduled if the QSFP module present is asserted */
9346 void qsfp_event(struct work_struct
*work
)
9348 struct qsfp_data
*qd
;
9349 struct hfi1_pportdata
*ppd
;
9350 struct hfi1_devdata
*dd
;
9352 qd
= container_of(work
, struct qsfp_data
, qsfp_work
);
9357 if (!qsfp_mod_present(ppd
))
9361 * Turn DC back on after cable has been re-inserted. Up until
9362 * now, the DC has been in reset to save power.
9366 if (qd
->cache_refresh_required
) {
9367 set_qsfp_int_n(ppd
, 0);
9369 wait_for_qsfp_init(ppd
);
9372 * Allow INT_N to trigger the QSFP interrupt to watch
9373 * for alarms and warnings
9375 set_qsfp_int_n(ppd
, 1);
9382 if (qd
->check_interrupt_flags
) {
9383 u8 qsfp_interrupt_status
[16] = {0,};
9385 if (one_qsfp_read(ppd
, dd
->hfi1_id
, 6,
9386 &qsfp_interrupt_status
[0], 16) != 16) {
9388 "%s: Failed to read status of QSFP module\n",
9391 unsigned long flags
;
9393 handle_qsfp_error_conditions(
9394 ppd
, qsfp_interrupt_status
);
9395 spin_lock_irqsave(&ppd
->qsfp_info
.qsfp_lock
, flags
);
9396 ppd
->qsfp_info
.check_interrupt_flags
= 0;
9397 spin_unlock_irqrestore(&ppd
->qsfp_info
.qsfp_lock
,
9403 static void init_qsfp_int(struct hfi1_devdata
*dd
)
9405 struct hfi1_pportdata
*ppd
= dd
->pport
;
9406 u64 qsfp_mask
, cce_int_mask
;
9407 const int qsfp1_int_smask
= QSFP1_INT
% 64;
9408 const int qsfp2_int_smask
= QSFP2_INT
% 64;
9411 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9412 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9413 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9414 * the index of the appropriate CSR in the CCEIntMask CSR array
9416 cce_int_mask
= read_csr(dd
, CCE_INT_MASK
+
9417 (8 * (QSFP1_INT
/ 64)));
9419 cce_int_mask
&= ~((u64
)1 << qsfp1_int_smask
);
9420 write_csr(dd
, CCE_INT_MASK
+ (8 * (QSFP1_INT
/ 64)),
9423 cce_int_mask
&= ~((u64
)1 << qsfp2_int_smask
);
9424 write_csr(dd
, CCE_INT_MASK
+ (8 * (QSFP2_INT
/ 64)),
9428 qsfp_mask
= (u64
)(QSFP_HFI0_INT_N
| QSFP_HFI0_MODPRST_N
);
9429 /* Clear current status to avoid spurious interrupts */
9430 write_csr(dd
, dd
->hfi1_id
? ASIC_QSFP2_CLEAR
: ASIC_QSFP1_CLEAR
,
9432 write_csr(dd
, dd
->hfi1_id
? ASIC_QSFP2_MASK
: ASIC_QSFP1_MASK
,
9435 set_qsfp_int_n(ppd
, 0);
9437 /* Handle active low nature of INT_N and MODPRST_N pins */
9438 if (qsfp_mod_present(ppd
))
9439 qsfp_mask
&= ~(u64
)QSFP_HFI0_MODPRST_N
;
9441 dd
->hfi1_id
? ASIC_QSFP2_INVERT
: ASIC_QSFP1_INVERT
,
9446 * Do a one-time initialize of the LCB block.
9448 static void init_lcb(struct hfi1_devdata
*dd
)
9450 /* simulator does not correctly handle LCB cclk loopback, skip */
9451 if (dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
)
9454 /* the DC has been reset earlier in the driver load */
9456 /* set LCB for cclk loopback on the port */
9457 write_csr(dd
, DC_LCB_CFG_TX_FIFOS_RESET
, 0x01);
9458 write_csr(dd
, DC_LCB_CFG_LANE_WIDTH
, 0x00);
9459 write_csr(dd
, DC_LCB_CFG_REINIT_AS_SLAVE
, 0x00);
9460 write_csr(dd
, DC_LCB_CFG_CNT_FOR_SKIP_STALL
, 0x110);
9461 write_csr(dd
, DC_LCB_CFG_CLK_CNTR
, 0x08);
9462 write_csr(dd
, DC_LCB_CFG_LOOPBACK
, 0x02);
9463 write_csr(dd
, DC_LCB_CFG_TX_FIFOS_RESET
, 0x00);
9466 int bringup_serdes(struct hfi1_pportdata
*ppd
)
9468 struct hfi1_devdata
*dd
= ppd
->dd
;
9472 if (HFI1_CAP_IS_KSET(EXTENDED_PSN
))
9473 add_rcvctrl(dd
, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK
);
9478 guid
= dd
->base_guid
+ ppd
->port
- 1;
9482 /* Set linkinit_reason on power up per OPA spec */
9483 ppd
->linkinit_reason
= OPA_LINKINIT_REASON_LINKUP
;
9485 /* one-time init of the LCB */
9489 ret
= init_loopback(dd
);
9495 if (ppd
->port_type
== PORT_TYPE_QSFP
) {
9496 set_qsfp_int_n(ppd
, 0);
9497 wait_for_qsfp_init(ppd
);
9498 set_qsfp_int_n(ppd
, 1);
9502 * Tune the SerDes to a ballpark setting for
9503 * optimal signal and bit error rate
9504 * Needs to be done before starting the link
9508 return start_link(ppd
);
9511 void hfi1_quiet_serdes(struct hfi1_pportdata
*ppd
)
9513 struct hfi1_devdata
*dd
= ppd
->dd
;
9516 * Shut down the link and keep it down. First turn off that the
9517 * driver wants to allow the link to be up (driver_link_ready).
9518 * Then make sure the link is not automatically restarted
9519 * (link_enabled). Cancel any pending restart. And finally
9522 ppd
->driver_link_ready
= 0;
9523 ppd
->link_enabled
= 0;
9525 ppd
->offline_disabled_reason
=
9526 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED
);
9527 set_link_down_reason(ppd
, OPA_LINKDOWN_REASON_SMA_DISABLED
, 0,
9528 OPA_LINKDOWN_REASON_SMA_DISABLED
);
9529 set_link_state(ppd
, HLS_DN_OFFLINE
);
9531 /* disable the port */
9532 clear_rcvctrl(dd
, RCV_CTRL_RCV_PORT_ENABLE_SMASK
);
9535 static inline int init_cpu_counters(struct hfi1_devdata
*dd
)
9537 struct hfi1_pportdata
*ppd
;
9540 ppd
= (struct hfi1_pportdata
*)(dd
+ 1);
9541 for (i
= 0; i
< dd
->num_pports
; i
++, ppd
++) {
9542 ppd
->ibport_data
.rvp
.rc_acks
= NULL
;
9543 ppd
->ibport_data
.rvp
.rc_qacks
= NULL
;
9544 ppd
->ibport_data
.rvp
.rc_acks
= alloc_percpu(u64
);
9545 ppd
->ibport_data
.rvp
.rc_qacks
= alloc_percpu(u64
);
9546 ppd
->ibport_data
.rvp
.rc_delayed_comp
= alloc_percpu(u64
);
9547 if (!ppd
->ibport_data
.rvp
.rc_acks
||
9548 !ppd
->ibport_data
.rvp
.rc_delayed_comp
||
9549 !ppd
->ibport_data
.rvp
.rc_qacks
)
9556 static const char * const pt_names
[] = {
9562 static const char *pt_name(u32 type
)
9564 return type
>= ARRAY_SIZE(pt_names
) ? "unknown" : pt_names
[type
];
9568 * index is the index into the receive array
9570 void hfi1_put_tid(struct hfi1_devdata
*dd
, u32 index
,
9571 u32 type
, unsigned long pa
, u16 order
)
9574 void __iomem
*base
= (dd
->rcvarray_wc
? dd
->rcvarray_wc
:
9575 (dd
->kregbase
+ RCV_ARRAY
));
9577 if (!(dd
->flags
& HFI1_PRESENT
))
9580 if (type
== PT_INVALID
) {
9582 } else if (type
> PT_INVALID
) {
9584 "unexpected receive array type %u for index %u, not handled\n",
9589 hfi1_cdbg(TID
, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9590 pt_name(type
), index
, pa
, (unsigned long)order
);
9592 #define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9593 reg
= RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9594 | (u64
)order
<< RCV_ARRAY_RT_BUF_SIZE_SHIFT
9595 | ((pa
>> RT_ADDR_SHIFT
) & RCV_ARRAY_RT_ADDR_MASK
)
9596 << RCV_ARRAY_RT_ADDR_SHIFT
;
9597 writeq(reg
, base
+ (index
* 8));
9599 if (type
== PT_EAGER
)
9601 * Eager entries are written one-by-one so we have to push them
9602 * after we write the entry.
9609 void hfi1_clear_tids(struct hfi1_ctxtdata
*rcd
)
9611 struct hfi1_devdata
*dd
= rcd
->dd
;
9614 /* this could be optimized */
9615 for (i
= rcd
->eager_base
; i
< rcd
->eager_base
+
9616 rcd
->egrbufs
.alloced
; i
++)
9617 hfi1_put_tid(dd
, i
, PT_INVALID
, 0, 0);
9619 for (i
= rcd
->expected_base
;
9620 i
< rcd
->expected_base
+ rcd
->expected_count
; i
++)
9621 hfi1_put_tid(dd
, i
, PT_INVALID
, 0, 0);
9624 int hfi1_get_base_kinfo(struct hfi1_ctxtdata
*rcd
,
9625 struct hfi1_ctxt_info
*kinfo
)
9627 kinfo
->runtime_flags
= (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT
) |
9628 HFI1_CAP_UGET(MASK
) | HFI1_CAP_KGET(K2U
);
9632 struct hfi1_message_header
*hfi1_get_msgheader(
9633 struct hfi1_devdata
*dd
, __le32
*rhf_addr
)
9635 u32 offset
= rhf_hdrq_offset(rhf_to_cpu(rhf_addr
));
9637 return (struct hfi1_message_header
*)
9638 (rhf_addr
- dd
->rhf_offset
+ offset
);
9641 static const char * const ib_cfg_name_strings
[] = {
9642 "HFI1_IB_CFG_LIDLMC",
9643 "HFI1_IB_CFG_LWID_DG_ENB",
9644 "HFI1_IB_CFG_LWID_ENB",
9646 "HFI1_IB_CFG_SPD_ENB",
9648 "HFI1_IB_CFG_RXPOL_ENB",
9649 "HFI1_IB_CFG_LREV_ENB",
9650 "HFI1_IB_CFG_LINKLATENCY",
9651 "HFI1_IB_CFG_HRTBT",
9652 "HFI1_IB_CFG_OP_VLS",
9653 "HFI1_IB_CFG_VL_HIGH_CAP",
9654 "HFI1_IB_CFG_VL_LOW_CAP",
9655 "HFI1_IB_CFG_OVERRUN_THRESH",
9656 "HFI1_IB_CFG_PHYERR_THRESH",
9657 "HFI1_IB_CFG_LINKDEFAULT",
9658 "HFI1_IB_CFG_PKEYS",
9660 "HFI1_IB_CFG_LSTATE",
9661 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9662 "HFI1_IB_CFG_PMA_TICKS",
9666 static const char *ib_cfg_name(int which
)
9668 if (which
< 0 || which
>= ARRAY_SIZE(ib_cfg_name_strings
))
9670 return ib_cfg_name_strings
[which
];
9673 int hfi1_get_ib_cfg(struct hfi1_pportdata
*ppd
, int which
)
9675 struct hfi1_devdata
*dd
= ppd
->dd
;
9679 case HFI1_IB_CFG_LWID_ENB
: /* allowed Link-width */
9680 val
= ppd
->link_width_enabled
;
9682 case HFI1_IB_CFG_LWID
: /* currently active Link-width */
9683 val
= ppd
->link_width_active
;
9685 case HFI1_IB_CFG_SPD_ENB
: /* allowed Link speeds */
9686 val
= ppd
->link_speed_enabled
;
9688 case HFI1_IB_CFG_SPD
: /* current Link speed */
9689 val
= ppd
->link_speed_active
;
9692 case HFI1_IB_CFG_RXPOL_ENB
: /* Auto-RX-polarity enable */
9693 case HFI1_IB_CFG_LREV_ENB
: /* Auto-Lane-reversal enable */
9694 case HFI1_IB_CFG_LINKLATENCY
:
9697 case HFI1_IB_CFG_OP_VLS
:
9698 val
= ppd
->vls_operational
;
9700 case HFI1_IB_CFG_VL_HIGH_CAP
: /* VL arb high priority table size */
9701 val
= VL_ARB_HIGH_PRIO_TABLE_SIZE
;
9703 case HFI1_IB_CFG_VL_LOW_CAP
: /* VL arb low priority table size */
9704 val
= VL_ARB_LOW_PRIO_TABLE_SIZE
;
9706 case HFI1_IB_CFG_OVERRUN_THRESH
: /* IB overrun threshold */
9707 val
= ppd
->overrun_threshold
;
9709 case HFI1_IB_CFG_PHYERR_THRESH
: /* IB PHY error threshold */
9710 val
= ppd
->phy_error_threshold
;
9712 case HFI1_IB_CFG_LINKDEFAULT
: /* IB link default (sleep/poll) */
9713 val
= dd
->link_default
;
9716 case HFI1_IB_CFG_HRTBT
: /* Heartbeat off/enable/auto */
9717 case HFI1_IB_CFG_PMA_TICKS
:
9720 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL
))
9723 "%s: which %s: not implemented\n",
9725 ib_cfg_name(which
));
9733 * The largest MAD packet size.
9735 #define MAX_MAD_PACKET 2048
9738 * Return the maximum header bytes that can go on the _wire_
9739 * for this device. This count includes the ICRC which is
9740 * not part of the packet held in memory but it is appended
9742 * This is dependent on the device's receive header entry size.
9743 * HFI allows this to be set per-receive context, but the
9744 * driver presently enforces a global value.
9746 u32
lrh_max_header_bytes(struct hfi1_devdata
*dd
)
9749 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9750 * the Receive Header Entry Size minus the PBC (or RHF) size
9751 * plus one DW for the ICRC appended by HW.
9753 * dd->rcd[0].rcvhdrqentsize is in DW.
9754 * We use rcd[0] as all context will have the same value. Also,
9755 * the first kernel context would have been allocated by now so
9756 * we are guaranteed a valid value.
9758 return (dd
->rcd
[0]->rcvhdrqentsize
- 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9763 * @ppd - per port data
9765 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9766 * registers compare against LRH.PktLen, so use the max bytes included
9769 * This routine changes all VL values except VL15, which it maintains at
9772 static void set_send_length(struct hfi1_pportdata
*ppd
)
9774 struct hfi1_devdata
*dd
= ppd
->dd
;
9775 u32 max_hb
= lrh_max_header_bytes(dd
), dcmtu
;
9776 u32 maxvlmtu
= dd
->vld
[15].mtu
;
9777 u64 len1
= 0, len2
= (((dd
->vld
[15].mtu
+ max_hb
) >> 2)
9778 & SEND_LEN_CHECK1_LEN_VL15_MASK
) <<
9779 SEND_LEN_CHECK1_LEN_VL15_SHIFT
;
9783 for (i
= 0; i
< ppd
->vls_supported
; i
++) {
9784 if (dd
->vld
[i
].mtu
> maxvlmtu
)
9785 maxvlmtu
= dd
->vld
[i
].mtu
;
9787 len1
|= (((dd
->vld
[i
].mtu
+ max_hb
) >> 2)
9788 & SEND_LEN_CHECK0_LEN_VL0_MASK
) <<
9789 ((i
% 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT
);
9791 len2
|= (((dd
->vld
[i
].mtu
+ max_hb
) >> 2)
9792 & SEND_LEN_CHECK1_LEN_VL4_MASK
) <<
9793 ((i
% 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT
);
9795 write_csr(dd
, SEND_LEN_CHECK0
, len1
);
9796 write_csr(dd
, SEND_LEN_CHECK1
, len2
);
9797 /* adjust kernel credit return thresholds based on new MTUs */
9798 /* all kernel receive contexts have the same hdrqentsize */
9799 for (i
= 0; i
< ppd
->vls_supported
; i
++) {
9800 thres
= min(sc_percent_to_threshold(dd
->vld
[i
].sc
, 50),
9801 sc_mtu_to_threshold(dd
->vld
[i
].sc
,
9803 dd
->rcd
[0]->rcvhdrqentsize
));
9804 sc_set_cr_threshold(dd
->vld
[i
].sc
, thres
);
9806 thres
= min(sc_percent_to_threshold(dd
->vld
[15].sc
, 50),
9807 sc_mtu_to_threshold(dd
->vld
[15].sc
,
9809 dd
->rcd
[0]->rcvhdrqentsize
));
9810 sc_set_cr_threshold(dd
->vld
[15].sc
, thres
);
9812 /* Adjust maximum MTU for the port in DC */
9813 dcmtu
= maxvlmtu
== 10240 ? DCC_CFG_PORT_MTU_CAP_10240
:
9814 (ilog2(maxvlmtu
>> 8) + 1);
9815 len1
= read_csr(ppd
->dd
, DCC_CFG_PORT_CONFIG
);
9816 len1
&= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK
;
9817 len1
|= ((u64
)dcmtu
& DCC_CFG_PORT_CONFIG_MTU_CAP_MASK
) <<
9818 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT
;
9819 write_csr(ppd
->dd
, DCC_CFG_PORT_CONFIG
, len1
);
9822 static void set_lidlmc(struct hfi1_pportdata
*ppd
)
9826 struct hfi1_devdata
*dd
= ppd
->dd
;
9827 u32 mask
= ~((1U << ppd
->lmc
) - 1);
9828 u64 c1
= read_csr(ppd
->dd
, DCC_CFG_PORT_CONFIG1
);
9830 if (dd
->hfi1_snoop
.mode_flag
)
9831 dd_dev_info(dd
, "Set lid/lmc while snooping");
9833 c1
&= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9834 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK
);
9835 c1
|= ((ppd
->lid
& DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK
)
9836 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT
) |
9837 ((mask
& DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK
)
9838 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT
);
9839 write_csr(ppd
->dd
, DCC_CFG_PORT_CONFIG1
, c1
);
9842 * Iterate over all the send contexts and set their SLID check
9844 sreg
= ((mask
& SEND_CTXT_CHECK_SLID_MASK_MASK
) <<
9845 SEND_CTXT_CHECK_SLID_MASK_SHIFT
) |
9846 (((ppd
->lid
& mask
) & SEND_CTXT_CHECK_SLID_VALUE_MASK
) <<
9847 SEND_CTXT_CHECK_SLID_VALUE_SHIFT
);
9849 for (i
= 0; i
< dd
->chip_send_contexts
; i
++) {
9850 hfi1_cdbg(LINKVERB
, "SendContext[%d].SLID_CHECK = 0x%x",
9852 write_kctxt_csr(dd
, i
, SEND_CTXT_CHECK_SLID
, sreg
);
9855 /* Now we have to do the same thing for the sdma engines */
9856 sdma_update_lmc(dd
, mask
, ppd
->lid
);
9859 static int wait_phy_linkstate(struct hfi1_devdata
*dd
, u32 state
, u32 msecs
)
9861 unsigned long timeout
;
9864 timeout
= jiffies
+ msecs_to_jiffies(msecs
);
9866 curr_state
= read_physical_state(dd
);
9867 if (curr_state
== state
)
9869 if (time_after(jiffies
, timeout
)) {
9871 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9875 usleep_range(1950, 2050); /* sleep 2ms-ish */
9882 * Helper for set_link_state(). Do not call except from that routine.
9883 * Expects ppd->hls_mutex to be held.
9885 * @rem_reason value to be sent to the neighbor
9887 * LinkDownReasons only set if transition succeeds.
9889 static int goto_offline(struct hfi1_pportdata
*ppd
, u8 rem_reason
)
9891 struct hfi1_devdata
*dd
= ppd
->dd
;
9892 u32 pstate
, previous_state
;
9893 u32 last_local_state
;
9894 u32 last_remote_state
;
9899 previous_state
= ppd
->host_link_state
;
9900 ppd
->host_link_state
= HLS_GOING_OFFLINE
;
9901 pstate
= read_physical_state(dd
);
9902 if (pstate
== PLS_OFFLINE
) {
9903 do_transition
= 0; /* in right state */
9904 do_wait
= 0; /* ...no need to wait */
9905 } else if ((pstate
& 0xff) == PLS_OFFLINE
) {
9906 do_transition
= 0; /* in an offline transient state */
9907 do_wait
= 1; /* ...wait for it to settle */
9909 do_transition
= 1; /* need to move to offline */
9910 do_wait
= 1; /* ...will need to wait */
9913 if (do_transition
) {
9914 ret
= set_physical_link_state(dd
,
9915 (rem_reason
<< 8) | PLS_OFFLINE
);
9917 if (ret
!= HCMD_SUCCESS
) {
9919 "Failed to transition to Offline link state, return %d\n",
9923 if (ppd
->offline_disabled_reason
==
9924 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE
))
9925 ppd
->offline_disabled_reason
=
9926 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT
);
9930 /* it can take a while for the link to go down */
9931 ret
= wait_phy_linkstate(dd
, PLS_OFFLINE
, 10000);
9936 /* make sure the logical state is also down */
9937 wait_logical_linkstate(ppd
, IB_PORT_DOWN
, 1000);
9940 * Now in charge of LCB - must be after the physical state is
9941 * offline.quiet and before host_link_state is changed.
9943 set_host_lcb_access(dd
);
9944 write_csr(dd
, DC_LCB_ERR_EN
, ~0ull); /* watch LCB errors */
9945 ppd
->host_link_state
= HLS_LINK_COOLDOWN
; /* LCB access allowed */
9947 if (ppd
->port_type
== PORT_TYPE_QSFP
&&
9948 ppd
->qsfp_info
.limiting_active
&&
9949 qsfp_mod_present(ppd
)) {
9952 ret
= acquire_chip_resource(dd
, qsfp_resource(dd
), QSFP_WAIT
);
9954 set_qsfp_tx(ppd
, 0);
9955 release_chip_resource(dd
, qsfp_resource(dd
));
9957 /* not fatal, but should warn */
9959 "Unable to acquire lock to turn off QSFP TX\n");
9964 * The LNI has a mandatory wait time after the physical state
9965 * moves to Offline.Quiet. The wait time may be different
9966 * depending on how the link went down. The 8051 firmware
9967 * will observe the needed wait time and only move to ready
9968 * when that is completed. The largest of the quiet timeouts
9969 * is 6s, so wait that long and then at least 0.5s more for
9970 * other transitions, and another 0.5s for a buffer.
9972 ret
= wait_fm_ready(dd
, 7000);
9975 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
9976 /* state is really offline, so make it so */
9977 ppd
->host_link_state
= HLS_DN_OFFLINE
;
9982 * The state is now offline and the 8051 is ready to accept host
9984 * - change our state
9985 * - notify others if we were previously in a linkup state
9987 ppd
->host_link_state
= HLS_DN_OFFLINE
;
9988 if (previous_state
& HLS_UP
) {
9989 /* went down while link was up */
9990 handle_linkup_change(dd
, 0);
9991 } else if (previous_state
9992 & (HLS_DN_POLL
| HLS_VERIFY_CAP
| HLS_GOING_UP
)) {
9993 /* went down while attempting link up */
9994 /* byte 1 of last_*_state is the failure reason */
9995 read_last_local_state(dd
, &last_local_state
);
9996 read_last_remote_state(dd
, &last_remote_state
);
9998 "LNI failure last states: local 0x%08x, remote 0x%08x\n",
9999 last_local_state
, last_remote_state
);
10002 /* the active link width (downgrade) is 0 on link down */
10003 ppd
->link_width_active
= 0;
10004 ppd
->link_width_downgrade_tx_active
= 0;
10005 ppd
->link_width_downgrade_rx_active
= 0;
10006 ppd
->current_egress_rate
= 0;
10010 /* return the link state name */
10011 static const char *link_state_name(u32 state
)
10014 int n
= ilog2(state
);
10015 static const char * const names
[] = {
10016 [__HLS_UP_INIT_BP
] = "INIT",
10017 [__HLS_UP_ARMED_BP
] = "ARMED",
10018 [__HLS_UP_ACTIVE_BP
] = "ACTIVE",
10019 [__HLS_DN_DOWNDEF_BP
] = "DOWNDEF",
10020 [__HLS_DN_POLL_BP
] = "POLL",
10021 [__HLS_DN_DISABLE_BP
] = "DISABLE",
10022 [__HLS_DN_OFFLINE_BP
] = "OFFLINE",
10023 [__HLS_VERIFY_CAP_BP
] = "VERIFY_CAP",
10024 [__HLS_GOING_UP_BP
] = "GOING_UP",
10025 [__HLS_GOING_OFFLINE_BP
] = "GOING_OFFLINE",
10026 [__HLS_LINK_COOLDOWN_BP
] = "LINK_COOLDOWN"
10029 name
= n
< ARRAY_SIZE(names
) ? names
[n
] : NULL
;
10030 return name
? name
: "unknown";
10033 /* return the link state reason name */
10034 static const char *link_state_reason_name(struct hfi1_pportdata
*ppd
, u32 state
)
10036 if (state
== HLS_UP_INIT
) {
10037 switch (ppd
->linkinit_reason
) {
10038 case OPA_LINKINIT_REASON_LINKUP
:
10040 case OPA_LINKINIT_REASON_FLAPPING
:
10041 return "(FLAPPING)";
10042 case OPA_LINKINIT_OUTSIDE_POLICY
:
10043 return "(OUTSIDE_POLICY)";
10044 case OPA_LINKINIT_QUARANTINED
:
10045 return "(QUARANTINED)";
10046 case OPA_LINKINIT_INSUFIC_CAPABILITY
:
10047 return "(INSUFIC_CAPABILITY)";
10056 * driver_physical_state - convert the driver's notion of a port's
10057 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10058 * Return -1 (converted to a u32) to indicate error.
10060 u32
driver_physical_state(struct hfi1_pportdata
*ppd
)
10062 switch (ppd
->host_link_state
) {
10065 case HLS_UP_ACTIVE
:
10066 return IB_PORTPHYSSTATE_LINKUP
;
10068 return IB_PORTPHYSSTATE_POLLING
;
10069 case HLS_DN_DISABLE
:
10070 return IB_PORTPHYSSTATE_DISABLED
;
10071 case HLS_DN_OFFLINE
:
10072 return OPA_PORTPHYSSTATE_OFFLINE
;
10073 case HLS_VERIFY_CAP
:
10074 return IB_PORTPHYSSTATE_POLLING
;
10076 return IB_PORTPHYSSTATE_POLLING
;
10077 case HLS_GOING_OFFLINE
:
10078 return OPA_PORTPHYSSTATE_OFFLINE
;
10079 case HLS_LINK_COOLDOWN
:
10080 return OPA_PORTPHYSSTATE_OFFLINE
;
10081 case HLS_DN_DOWNDEF
:
10083 dd_dev_err(ppd
->dd
, "invalid host_link_state 0x%x\n",
10084 ppd
->host_link_state
);
10090 * driver_logical_state - convert the driver's notion of a port's
10091 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10092 * (converted to a u32) to indicate error.
10094 u32
driver_logical_state(struct hfi1_pportdata
*ppd
)
10096 if (ppd
->host_link_state
&& (ppd
->host_link_state
& HLS_DOWN
))
10097 return IB_PORT_DOWN
;
10099 switch (ppd
->host_link_state
& HLS_UP
) {
10101 return IB_PORT_INIT
;
10103 return IB_PORT_ARMED
;
10104 case HLS_UP_ACTIVE
:
10105 return IB_PORT_ACTIVE
;
10107 dd_dev_err(ppd
->dd
, "invalid host_link_state 0x%x\n",
10108 ppd
->host_link_state
);
10113 void set_link_down_reason(struct hfi1_pportdata
*ppd
, u8 lcl_reason
,
10114 u8 neigh_reason
, u8 rem_reason
)
10116 if (ppd
->local_link_down_reason
.latest
== 0 &&
10117 ppd
->neigh_link_down_reason
.latest
== 0) {
10118 ppd
->local_link_down_reason
.latest
= lcl_reason
;
10119 ppd
->neigh_link_down_reason
.latest
= neigh_reason
;
10120 ppd
->remote_link_down_reason
= rem_reason
;
10125 * Change the physical and/or logical link state.
10127 * Do not call this routine while inside an interrupt. It contains
10128 * calls to routines that can take multiple seconds to finish.
10130 * Returns 0 on success, -errno on failure.
10132 int set_link_state(struct hfi1_pportdata
*ppd
, u32 state
)
10134 struct hfi1_devdata
*dd
= ppd
->dd
;
10135 struct ib_event event
= {.device
= NULL
};
10137 int orig_new_state
, poll_bounce
;
10139 mutex_lock(&ppd
->hls_lock
);
10141 orig_new_state
= state
;
10142 if (state
== HLS_DN_DOWNDEF
)
10143 state
= dd
->link_default
;
10145 /* interpret poll -> poll as a link bounce */
10146 poll_bounce
= ppd
->host_link_state
== HLS_DN_POLL
&&
10147 state
== HLS_DN_POLL
;
10149 dd_dev_info(dd
, "%s: current %s, new %s %s%s\n", __func__
,
10150 link_state_name(ppd
->host_link_state
),
10151 link_state_name(orig_new_state
),
10152 poll_bounce
? "(bounce) " : "",
10153 link_state_reason_name(ppd
, state
));
10156 * If we're going to a (HLS_*) link state that implies the logical
10157 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10158 * reset is_sm_config_started to 0.
10160 if (!(state
& (HLS_UP_ARMED
| HLS_UP_ACTIVE
)))
10161 ppd
->is_sm_config_started
= 0;
10164 * Do nothing if the states match. Let a poll to poll link bounce
10167 if (ppd
->host_link_state
== state
&& !poll_bounce
)
10172 if (ppd
->host_link_state
== HLS_DN_POLL
&&
10173 (quick_linkup
|| dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
)) {
10175 * Quick link up jumps from polling to here.
10177 * Whether in normal or loopback mode, the
10178 * simulator jumps from polling to link up.
10179 * Accept that here.
10182 } else if (ppd
->host_link_state
!= HLS_GOING_UP
) {
10186 ppd
->host_link_state
= HLS_UP_INIT
;
10187 ret
= wait_logical_linkstate(ppd
, IB_PORT_INIT
, 1000);
10189 /* logical state didn't change, stay at going_up */
10190 ppd
->host_link_state
= HLS_GOING_UP
;
10192 "%s: logical state did not change to INIT\n",
10195 /* clear old transient LINKINIT_REASON code */
10196 if (ppd
->linkinit_reason
>= OPA_LINKINIT_REASON_CLEAR
)
10197 ppd
->linkinit_reason
=
10198 OPA_LINKINIT_REASON_LINKUP
;
10200 /* enable the port */
10201 add_rcvctrl(dd
, RCV_CTRL_RCV_PORT_ENABLE_SMASK
);
10203 handle_linkup_change(dd
, 1);
10207 if (ppd
->host_link_state
!= HLS_UP_INIT
)
10210 ppd
->host_link_state
= HLS_UP_ARMED
;
10211 set_logical_state(dd
, LSTATE_ARMED
);
10212 ret
= wait_logical_linkstate(ppd
, IB_PORT_ARMED
, 1000);
10214 /* logical state didn't change, stay at init */
10215 ppd
->host_link_state
= HLS_UP_INIT
;
10217 "%s: logical state did not change to ARMED\n",
10221 * The simulator does not currently implement SMA messages,
10222 * so neighbor_normal is not set. Set it here when we first
10225 if (dd
->icode
== ICODE_FUNCTIONAL_SIMULATOR
)
10226 ppd
->neighbor_normal
= 1;
10228 case HLS_UP_ACTIVE
:
10229 if (ppd
->host_link_state
!= HLS_UP_ARMED
)
10232 ppd
->host_link_state
= HLS_UP_ACTIVE
;
10233 set_logical_state(dd
, LSTATE_ACTIVE
);
10234 ret
= wait_logical_linkstate(ppd
, IB_PORT_ACTIVE
, 1000);
10236 /* logical state didn't change, stay at armed */
10237 ppd
->host_link_state
= HLS_UP_ARMED
;
10239 "%s: logical state did not change to ACTIVE\n",
10242 /* tell all engines to go running */
10243 sdma_all_running(dd
);
10245 /* Signal the IB layer that the port has went active */
10246 event
.device
= &dd
->verbs_dev
.rdi
.ibdev
;
10247 event
.element
.port_num
= ppd
->port
;
10248 event
.event
= IB_EVENT_PORT_ACTIVE
;
10252 if ((ppd
->host_link_state
== HLS_DN_DISABLE
||
10253 ppd
->host_link_state
== HLS_DN_OFFLINE
) &&
10256 /* Hand LED control to the DC */
10257 write_csr(dd
, DCC_CFG_LED_CNTRL
, 0);
10259 if (ppd
->host_link_state
!= HLS_DN_OFFLINE
) {
10260 u8 tmp
= ppd
->link_enabled
;
10262 ret
= goto_offline(ppd
, ppd
->remote_link_down_reason
);
10264 ppd
->link_enabled
= tmp
;
10267 ppd
->remote_link_down_reason
= 0;
10269 if (ppd
->driver_link_ready
)
10270 ppd
->link_enabled
= 1;
10273 set_all_slowpath(ppd
->dd
);
10274 ret
= set_local_link_attributes(ppd
);
10278 ppd
->port_error_action
= 0;
10279 ppd
->host_link_state
= HLS_DN_POLL
;
10281 if (quick_linkup
) {
10282 /* quick linkup does not go into polling */
10283 ret
= do_quick_linkup(dd
);
10285 ret1
= set_physical_link_state(dd
, PLS_POLLING
);
10286 if (ret1
!= HCMD_SUCCESS
) {
10288 "Failed to transition to Polling link state, return 0x%x\n",
10293 ppd
->offline_disabled_reason
=
10294 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE
);
10296 * If an error occurred above, go back to offline. The
10297 * caller may reschedule another attempt.
10300 goto_offline(ppd
, 0);
10302 case HLS_DN_DISABLE
:
10303 /* link is disabled */
10304 ppd
->link_enabled
= 0;
10306 /* allow any state to transition to disabled */
10308 /* must transition to offline first */
10309 if (ppd
->host_link_state
!= HLS_DN_OFFLINE
) {
10310 ret
= goto_offline(ppd
, ppd
->remote_link_down_reason
);
10313 ppd
->remote_link_down_reason
= 0;
10316 ret1
= set_physical_link_state(dd
, PLS_DISABLED
);
10317 if (ret1
!= HCMD_SUCCESS
) {
10319 "Failed to transition to Disabled link state, return 0x%x\n",
10324 ppd
->host_link_state
= HLS_DN_DISABLE
;
10327 case HLS_DN_OFFLINE
:
10328 if (ppd
->host_link_state
== HLS_DN_DISABLE
)
10331 /* allow any state to transition to offline */
10332 ret
= goto_offline(ppd
, ppd
->remote_link_down_reason
);
10334 ppd
->remote_link_down_reason
= 0;
10336 case HLS_VERIFY_CAP
:
10337 if (ppd
->host_link_state
!= HLS_DN_POLL
)
10339 ppd
->host_link_state
= HLS_VERIFY_CAP
;
10342 if (ppd
->host_link_state
!= HLS_VERIFY_CAP
)
10345 ret1
= set_physical_link_state(dd
, PLS_LINKUP
);
10346 if (ret1
!= HCMD_SUCCESS
) {
10348 "Failed to transition to link up state, return 0x%x\n",
10353 ppd
->host_link_state
= HLS_GOING_UP
;
10356 case HLS_GOING_OFFLINE
: /* transient within goto_offline() */
10357 case HLS_LINK_COOLDOWN
: /* transient within goto_offline() */
10359 dd_dev_info(dd
, "%s: state 0x%x: not supported\n",
10368 dd_dev_err(dd
, "%s: unexpected state transition from %s to %s\n",
10369 __func__
, link_state_name(ppd
->host_link_state
),
10370 link_state_name(state
));
10374 mutex_unlock(&ppd
->hls_lock
);
10377 ib_dispatch_event(&event
);
10382 int hfi1_set_ib_cfg(struct hfi1_pportdata
*ppd
, int which
, u32 val
)
10388 case HFI1_IB_CFG_LIDLMC
:
10391 case HFI1_IB_CFG_VL_HIGH_LIMIT
:
10393 * The VL Arbitrator high limit is sent in units of 4k
10394 * bytes, while HFI stores it in units of 64 bytes.
10397 reg
= ((u64
)val
& SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK
)
10398 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT
;
10399 write_csr(ppd
->dd
, SEND_HIGH_PRIORITY_LIMIT
, reg
);
10401 case HFI1_IB_CFG_LINKDEFAULT
: /* IB link default (sleep/poll) */
10402 /* HFI only supports POLL as the default link down state */
10403 if (val
!= HLS_DN_POLL
)
10406 case HFI1_IB_CFG_OP_VLS
:
10407 if (ppd
->vls_operational
!= val
) {
10408 ppd
->vls_operational
= val
;
10414 * For link width, link width downgrade, and speed enable, always AND
10415 * the setting with what is actually supported. This has two benefits.
10416 * First, enabled can't have unsupported values, no matter what the
10417 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10418 * "fill in with your supported value" have all the bits in the
10419 * field set, so simply ANDing with supported has the desired result.
10421 case HFI1_IB_CFG_LWID_ENB
: /* set allowed Link-width */
10422 ppd
->link_width_enabled
= val
& ppd
->link_width_supported
;
10424 case HFI1_IB_CFG_LWID_DG_ENB
: /* set allowed link width downgrade */
10425 ppd
->link_width_downgrade_enabled
=
10426 val
& ppd
->link_width_downgrade_supported
;
10428 case HFI1_IB_CFG_SPD_ENB
: /* allowed Link speeds */
10429 ppd
->link_speed_enabled
= val
& ppd
->link_speed_supported
;
10431 case HFI1_IB_CFG_OVERRUN_THRESH
: /* IB overrun threshold */
10433 * HFI does not follow IB specs, save this value
10434 * so we can report it, if asked.
10436 ppd
->overrun_threshold
= val
;
10438 case HFI1_IB_CFG_PHYERR_THRESH
: /* IB PHY error threshold */
10440 * HFI does not follow IB specs, save this value
10441 * so we can report it, if asked.
10443 ppd
->phy_error_threshold
= val
;
10446 case HFI1_IB_CFG_MTU
:
10447 set_send_length(ppd
);
10450 case HFI1_IB_CFG_PKEYS
:
10451 if (HFI1_CAP_IS_KSET(PKEY_CHECK
))
10452 set_partition_keys(ppd
);
10456 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL
))
10457 dd_dev_info(ppd
->dd
,
10458 "%s: which %s, val 0x%x: not implemented\n",
10459 __func__
, ib_cfg_name(which
), val
);
10465 /* begin functions related to vl arbitration table caching */
10466 static void init_vl_arb_caches(struct hfi1_pportdata
*ppd
)
10470 BUILD_BUG_ON(VL_ARB_TABLE_SIZE
!=
10471 VL_ARB_LOW_PRIO_TABLE_SIZE
);
10472 BUILD_BUG_ON(VL_ARB_TABLE_SIZE
!=
10473 VL_ARB_HIGH_PRIO_TABLE_SIZE
);
10476 * Note that we always return values directly from the
10477 * 'vl_arb_cache' (and do no CSR reads) in response to a
10478 * 'Get(VLArbTable)'. This is obviously correct after a
10479 * 'Set(VLArbTable)', since the cache will then be up to
10480 * date. But it's also correct prior to any 'Set(VLArbTable)'
10481 * since then both the cache, and the relevant h/w registers
10485 for (i
= 0; i
< MAX_PRIO_TABLE
; i
++)
10486 spin_lock_init(&ppd
->vl_arb_cache
[i
].lock
);
10490 * vl_arb_lock_cache
10492 * All other vl_arb_* functions should be called only after locking
10495 static inline struct vl_arb_cache
*
10496 vl_arb_lock_cache(struct hfi1_pportdata
*ppd
, int idx
)
10498 if (idx
!= LO_PRIO_TABLE
&& idx
!= HI_PRIO_TABLE
)
10500 spin_lock(&ppd
->vl_arb_cache
[idx
].lock
);
10501 return &ppd
->vl_arb_cache
[idx
];
10504 static inline void vl_arb_unlock_cache(struct hfi1_pportdata
*ppd
, int idx
)
10506 spin_unlock(&ppd
->vl_arb_cache
[idx
].lock
);
10509 static void vl_arb_get_cache(struct vl_arb_cache
*cache
,
10510 struct ib_vl_weight_elem
*vl
)
10512 memcpy(vl
, cache
->table
, VL_ARB_TABLE_SIZE
* sizeof(*vl
));
10515 static void vl_arb_set_cache(struct vl_arb_cache
*cache
,
10516 struct ib_vl_weight_elem
*vl
)
10518 memcpy(cache
->table
, vl
, VL_ARB_TABLE_SIZE
* sizeof(*vl
));
10521 static int vl_arb_match_cache(struct vl_arb_cache
*cache
,
10522 struct ib_vl_weight_elem
*vl
)
10524 return !memcmp(cache
->table
, vl
, VL_ARB_TABLE_SIZE
* sizeof(*vl
));
10527 /* end functions related to vl arbitration table caching */
10529 static int set_vl_weights(struct hfi1_pportdata
*ppd
, u32 target
,
10530 u32 size
, struct ib_vl_weight_elem
*vl
)
10532 struct hfi1_devdata
*dd
= ppd
->dd
;
10534 unsigned int i
, is_up
= 0;
10535 int drain
, ret
= 0;
10537 mutex_lock(&ppd
->hls_lock
);
10539 if (ppd
->host_link_state
& HLS_UP
)
10542 drain
= !is_ax(dd
) && is_up
;
10546 * Before adjusting VL arbitration weights, empty per-VL
10547 * FIFOs, otherwise a packet whose VL weight is being
10548 * set to 0 could get stuck in a FIFO with no chance to
10551 ret
= stop_drain_data_vls(dd
);
10556 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10561 for (i
= 0; i
< size
; i
++, vl
++) {
10563 * NOTE: The low priority shift and mask are used here, but
10564 * they are the same for both the low and high registers.
10566 reg
= (((u64
)vl
->vl
& SEND_LOW_PRIORITY_LIST_VL_MASK
)
10567 << SEND_LOW_PRIORITY_LIST_VL_SHIFT
)
10568 | (((u64
)vl
->weight
10569 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK
)
10570 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT
);
10571 write_csr(dd
, target
+ (i
* 8), reg
);
10573 pio_send_control(dd
, PSC_GLOBAL_VLARB_ENABLE
);
10576 open_fill_data_vls(dd
); /* reopen all VLs */
10579 mutex_unlock(&ppd
->hls_lock
);
10585 * Read one credit merge VL register.
10587 static void read_one_cm_vl(struct hfi1_devdata
*dd
, u32 csr
,
10588 struct vl_limit
*vll
)
10590 u64 reg
= read_csr(dd
, csr
);
10592 vll
->dedicated
= cpu_to_be16(
10593 (reg
>> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT
)
10594 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK
);
10595 vll
->shared
= cpu_to_be16(
10596 (reg
>> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT
)
10597 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK
);
10601 * Read the current credit merge limits.
10603 static int get_buffer_control(struct hfi1_devdata
*dd
,
10604 struct buffer_control
*bc
, u16
*overall_limit
)
10609 /* not all entries are filled in */
10610 memset(bc
, 0, sizeof(*bc
));
10612 /* OPA and HFI have a 1-1 mapping */
10613 for (i
= 0; i
< TXE_NUM_DATA_VL
; i
++)
10614 read_one_cm_vl(dd
, SEND_CM_CREDIT_VL
+ (8 * i
), &bc
->vl
[i
]);
10616 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10617 read_one_cm_vl(dd
, SEND_CM_CREDIT_VL15
, &bc
->vl
[15]);
10619 reg
= read_csr(dd
, SEND_CM_GLOBAL_CREDIT
);
10620 bc
->overall_shared_limit
= cpu_to_be16(
10621 (reg
>> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT
)
10622 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK
);
10624 *overall_limit
= (reg
10625 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT
)
10626 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK
;
10627 return sizeof(struct buffer_control
);
10630 static int get_sc2vlnt(struct hfi1_devdata
*dd
, struct sc2vlnt
*dp
)
10635 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10636 reg
= read_csr(dd
, DCC_CFG_SC_VL_TABLE_15_0
);
10637 for (i
= 0; i
< sizeof(u64
); i
++) {
10638 u8 byte
= *(((u8
*)®
) + i
);
10640 dp
->vlnt
[2 * i
] = byte
& 0xf;
10641 dp
->vlnt
[(2 * i
) + 1] = (byte
& 0xf0) >> 4;
10644 reg
= read_csr(dd
, DCC_CFG_SC_VL_TABLE_31_16
);
10645 for (i
= 0; i
< sizeof(u64
); i
++) {
10646 u8 byte
= *(((u8
*)®
) + i
);
10648 dp
->vlnt
[16 + (2 * i
)] = byte
& 0xf;
10649 dp
->vlnt
[16 + (2 * i
) + 1] = (byte
& 0xf0) >> 4;
10651 return sizeof(struct sc2vlnt
);
10654 static void get_vlarb_preempt(struct hfi1_devdata
*dd
, u32 nelems
,
10655 struct ib_vl_weight_elem
*vl
)
10659 for (i
= 0; i
< nelems
; i
++, vl
++) {
10665 static void set_sc2vlnt(struct hfi1_devdata
*dd
, struct sc2vlnt
*dp
)
10667 write_csr(dd
, DCC_CFG_SC_VL_TABLE_15_0
,
10669 0, dp
->vlnt
[0] & 0xf,
10670 1, dp
->vlnt
[1] & 0xf,
10671 2, dp
->vlnt
[2] & 0xf,
10672 3, dp
->vlnt
[3] & 0xf,
10673 4, dp
->vlnt
[4] & 0xf,
10674 5, dp
->vlnt
[5] & 0xf,
10675 6, dp
->vlnt
[6] & 0xf,
10676 7, dp
->vlnt
[7] & 0xf,
10677 8, dp
->vlnt
[8] & 0xf,
10678 9, dp
->vlnt
[9] & 0xf,
10679 10, dp
->vlnt
[10] & 0xf,
10680 11, dp
->vlnt
[11] & 0xf,
10681 12, dp
->vlnt
[12] & 0xf,
10682 13, dp
->vlnt
[13] & 0xf,
10683 14, dp
->vlnt
[14] & 0xf,
10684 15, dp
->vlnt
[15] & 0xf));
10685 write_csr(dd
, DCC_CFG_SC_VL_TABLE_31_16
,
10686 DC_SC_VL_VAL(31_16
,
10687 16, dp
->vlnt
[16] & 0xf,
10688 17, dp
->vlnt
[17] & 0xf,
10689 18, dp
->vlnt
[18] & 0xf,
10690 19, dp
->vlnt
[19] & 0xf,
10691 20, dp
->vlnt
[20] & 0xf,
10692 21, dp
->vlnt
[21] & 0xf,
10693 22, dp
->vlnt
[22] & 0xf,
10694 23, dp
->vlnt
[23] & 0xf,
10695 24, dp
->vlnt
[24] & 0xf,
10696 25, dp
->vlnt
[25] & 0xf,
10697 26, dp
->vlnt
[26] & 0xf,
10698 27, dp
->vlnt
[27] & 0xf,
10699 28, dp
->vlnt
[28] & 0xf,
10700 29, dp
->vlnt
[29] & 0xf,
10701 30, dp
->vlnt
[30] & 0xf,
10702 31, dp
->vlnt
[31] & 0xf));
10705 static void nonzero_msg(struct hfi1_devdata
*dd
, int idx
, const char *what
,
10709 dd_dev_info(dd
, "Invalid %s limit %d on VL %d, ignoring\n",
10710 what
, (int)limit
, idx
);
10713 /* change only the shared limit portion of SendCmGLobalCredit */
10714 static void set_global_shared(struct hfi1_devdata
*dd
, u16 limit
)
10718 reg
= read_csr(dd
, SEND_CM_GLOBAL_CREDIT
);
10719 reg
&= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK
;
10720 reg
|= (u64
)limit
<< SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT
;
10721 write_csr(dd
, SEND_CM_GLOBAL_CREDIT
, reg
);
10724 /* change only the total credit limit portion of SendCmGLobalCredit */
10725 static void set_global_limit(struct hfi1_devdata
*dd
, u16 limit
)
10729 reg
= read_csr(dd
, SEND_CM_GLOBAL_CREDIT
);
10730 reg
&= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK
;
10731 reg
|= (u64
)limit
<< SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT
;
10732 write_csr(dd
, SEND_CM_GLOBAL_CREDIT
, reg
);
10735 /* set the given per-VL shared limit */
10736 static void set_vl_shared(struct hfi1_devdata
*dd
, int vl
, u16 limit
)
10741 if (vl
< TXE_NUM_DATA_VL
)
10742 addr
= SEND_CM_CREDIT_VL
+ (8 * vl
);
10744 addr
= SEND_CM_CREDIT_VL15
;
10746 reg
= read_csr(dd
, addr
);
10747 reg
&= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK
;
10748 reg
|= (u64
)limit
<< SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT
;
10749 write_csr(dd
, addr
, reg
);
10752 /* set the given per-VL dedicated limit */
10753 static void set_vl_dedicated(struct hfi1_devdata
*dd
, int vl
, u16 limit
)
10758 if (vl
< TXE_NUM_DATA_VL
)
10759 addr
= SEND_CM_CREDIT_VL
+ (8 * vl
);
10761 addr
= SEND_CM_CREDIT_VL15
;
10763 reg
= read_csr(dd
, addr
);
10764 reg
&= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK
;
10765 reg
|= (u64
)limit
<< SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT
;
10766 write_csr(dd
, addr
, reg
);
10769 /* spin until the given per-VL status mask bits clear */
10770 static void wait_for_vl_status_clear(struct hfi1_devdata
*dd
, u64 mask
,
10773 unsigned long timeout
;
10776 timeout
= jiffies
+ msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT
);
10778 reg
= read_csr(dd
, SEND_CM_CREDIT_USED_STATUS
) & mask
;
10781 return; /* success */
10782 if (time_after(jiffies
, timeout
))
10783 break; /* timed out */
10788 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10789 which
, VL_STATUS_CLEAR_TIMEOUT
, mask
, reg
);
10791 * If this occurs, it is likely there was a credit loss on the link.
10792 * The only recovery from that is a link bounce.
10795 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
10799 * The number of credits on the VLs may be changed while everything
10800 * is "live", but the following algorithm must be followed due to
10801 * how the hardware is actually implemented. In particular,
10802 * Return_Credit_Status[] is the only correct status check.
10804 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10805 * set Global_Shared_Credit_Limit = 0
10807 * mask0 = all VLs that are changing either dedicated or shared limits
10808 * set Shared_Limit[mask0] = 0
10809 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10810 * if (changing any dedicated limit)
10811 * mask1 = all VLs that are lowering dedicated limits
10812 * lower Dedicated_Limit[mask1]
10813 * spin until Return_Credit_Status[mask1] == 0
10814 * raise Dedicated_Limits
10815 * raise Shared_Limits
10816 * raise Global_Shared_Credit_Limit
10818 * lower = if the new limit is lower, set the limit to the new value
10819 * raise = if the new limit is higher than the current value (may be changed
10820 * earlier in the algorithm), set the new limit to the new value
10822 int set_buffer_control(struct hfi1_pportdata
*ppd
,
10823 struct buffer_control
*new_bc
)
10825 struct hfi1_devdata
*dd
= ppd
->dd
;
10826 u64 changing_mask
, ld_mask
, stat_mask
;
10828 int i
, use_all_mask
;
10829 int this_shared_changing
;
10830 int vl_count
= 0, ret
;
10832 * A0: add the variable any_shared_limit_changing below and in the
10833 * algorithm above. If removing A0 support, it can be removed.
10835 int any_shared_limit_changing
;
10836 struct buffer_control cur_bc
;
10837 u8 changing
[OPA_MAX_VLS
];
10838 u8 lowering_dedicated
[OPA_MAX_VLS
];
10841 const u64 all_mask
=
10842 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10843 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10844 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10845 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10846 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10847 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10848 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10849 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10850 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK
;
10852 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10853 #define NUM_USABLE_VLS 16 /* look at VL15 and less */
10855 /* find the new total credits, do sanity check on unused VLs */
10856 for (i
= 0; i
< OPA_MAX_VLS
; i
++) {
10858 new_total
+= be16_to_cpu(new_bc
->vl
[i
].dedicated
);
10861 nonzero_msg(dd
, i
, "dedicated",
10862 be16_to_cpu(new_bc
->vl
[i
].dedicated
));
10863 nonzero_msg(dd
, i
, "shared",
10864 be16_to_cpu(new_bc
->vl
[i
].shared
));
10865 new_bc
->vl
[i
].dedicated
= 0;
10866 new_bc
->vl
[i
].shared
= 0;
10868 new_total
+= be16_to_cpu(new_bc
->overall_shared_limit
);
10870 /* fetch the current values */
10871 get_buffer_control(dd
, &cur_bc
, &cur_total
);
10874 * Create the masks we will use.
10876 memset(changing
, 0, sizeof(changing
));
10877 memset(lowering_dedicated
, 0, sizeof(lowering_dedicated
));
10879 * NOTE: Assumes that the individual VL bits are adjacent and in
10883 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
;
10887 any_shared_limit_changing
= 0;
10888 for (i
= 0; i
< NUM_USABLE_VLS
; i
++, stat_mask
<<= 1) {
10891 this_shared_changing
= new_bc
->vl
[i
].shared
10892 != cur_bc
.vl
[i
].shared
;
10893 if (this_shared_changing
)
10894 any_shared_limit_changing
= 1;
10895 if (new_bc
->vl
[i
].dedicated
!= cur_bc
.vl
[i
].dedicated
||
10896 this_shared_changing
) {
10898 changing_mask
|= stat_mask
;
10901 if (be16_to_cpu(new_bc
->vl
[i
].dedicated
) <
10902 be16_to_cpu(cur_bc
.vl
[i
].dedicated
)) {
10903 lowering_dedicated
[i
] = 1;
10904 ld_mask
|= stat_mask
;
10908 /* bracket the credit change with a total adjustment */
10909 if (new_total
> cur_total
)
10910 set_global_limit(dd
, new_total
);
10913 * Start the credit change algorithm.
10916 if ((be16_to_cpu(new_bc
->overall_shared_limit
) <
10917 be16_to_cpu(cur_bc
.overall_shared_limit
)) ||
10918 (is_ax(dd
) && any_shared_limit_changing
)) {
10919 set_global_shared(dd
, 0);
10920 cur_bc
.overall_shared_limit
= 0;
10924 for (i
= 0; i
< NUM_USABLE_VLS
; i
++) {
10929 set_vl_shared(dd
, i
, 0);
10930 cur_bc
.vl
[i
].shared
= 0;
10934 wait_for_vl_status_clear(dd
, use_all_mask
? all_mask
: changing_mask
,
10937 if (change_count
> 0) {
10938 for (i
= 0; i
< NUM_USABLE_VLS
; i
++) {
10942 if (lowering_dedicated
[i
]) {
10943 set_vl_dedicated(dd
, i
,
10944 be16_to_cpu(new_bc
->
10946 cur_bc
.vl
[i
].dedicated
=
10947 new_bc
->vl
[i
].dedicated
;
10951 wait_for_vl_status_clear(dd
, ld_mask
, "dedicated");
10953 /* now raise all dedicated that are going up */
10954 for (i
= 0; i
< NUM_USABLE_VLS
; i
++) {
10958 if (be16_to_cpu(new_bc
->vl
[i
].dedicated
) >
10959 be16_to_cpu(cur_bc
.vl
[i
].dedicated
))
10960 set_vl_dedicated(dd
, i
,
10961 be16_to_cpu(new_bc
->
10966 /* next raise all shared that are going up */
10967 for (i
= 0; i
< NUM_USABLE_VLS
; i
++) {
10971 if (be16_to_cpu(new_bc
->vl
[i
].shared
) >
10972 be16_to_cpu(cur_bc
.vl
[i
].shared
))
10973 set_vl_shared(dd
, i
, be16_to_cpu(new_bc
->vl
[i
].shared
));
10976 /* finally raise the global shared */
10977 if (be16_to_cpu(new_bc
->overall_shared_limit
) >
10978 be16_to_cpu(cur_bc
.overall_shared_limit
))
10979 set_global_shared(dd
,
10980 be16_to_cpu(new_bc
->overall_shared_limit
));
10982 /* bracket the credit change with a total adjustment */
10983 if (new_total
< cur_total
)
10984 set_global_limit(dd
, new_total
);
10987 * Determine the actual number of operational VLS using the number of
10988 * dedicated and shared credits for each VL.
10990 if (change_count
> 0) {
10991 for (i
= 0; i
< TXE_NUM_DATA_VL
; i
++)
10992 if (be16_to_cpu(new_bc
->vl
[i
].dedicated
) > 0 ||
10993 be16_to_cpu(new_bc
->vl
[i
].shared
) > 0)
10995 ppd
->actual_vls_operational
= vl_count
;
10996 ret
= sdma_map_init(dd
, ppd
->port
- 1, vl_count
?
10997 ppd
->actual_vls_operational
:
10998 ppd
->vls_operational
,
11001 ret
= pio_map_init(dd
, ppd
->port
- 1, vl_count
?
11002 ppd
->actual_vls_operational
:
11003 ppd
->vls_operational
, NULL
);
11011 * Read the given fabric manager table. Return the size of the
11012 * table (in bytes) on success, and a negative error code on
11015 int fm_get_table(struct hfi1_pportdata
*ppd
, int which
, void *t
)
11019 struct vl_arb_cache
*vlc
;
11022 case FM_TBL_VL_HIGH_ARB
:
11025 * OPA specifies 128 elements (of 2 bytes each), though
11026 * HFI supports only 16 elements in h/w.
11028 vlc
= vl_arb_lock_cache(ppd
, HI_PRIO_TABLE
);
11029 vl_arb_get_cache(vlc
, t
);
11030 vl_arb_unlock_cache(ppd
, HI_PRIO_TABLE
);
11032 case FM_TBL_VL_LOW_ARB
:
11035 * OPA specifies 128 elements (of 2 bytes each), though
11036 * HFI supports only 16 elements in h/w.
11038 vlc
= vl_arb_lock_cache(ppd
, LO_PRIO_TABLE
);
11039 vl_arb_get_cache(vlc
, t
);
11040 vl_arb_unlock_cache(ppd
, LO_PRIO_TABLE
);
11042 case FM_TBL_BUFFER_CONTROL
:
11043 size
= get_buffer_control(ppd
->dd
, t
, NULL
);
11045 case FM_TBL_SC2VLNT
:
11046 size
= get_sc2vlnt(ppd
->dd
, t
);
11048 case FM_TBL_VL_PREEMPT_ELEMS
:
11050 /* OPA specifies 128 elements, of 2 bytes each */
11051 get_vlarb_preempt(ppd
->dd
, OPA_MAX_VLS
, t
);
11053 case FM_TBL_VL_PREEMPT_MATRIX
:
11056 * OPA specifies that this is the same size as the VL
11057 * arbitration tables (i.e., 256 bytes).
11067 * Write the given fabric manager table.
11069 int fm_set_table(struct hfi1_pportdata
*ppd
, int which
, void *t
)
11072 struct vl_arb_cache
*vlc
;
11075 case FM_TBL_VL_HIGH_ARB
:
11076 vlc
= vl_arb_lock_cache(ppd
, HI_PRIO_TABLE
);
11077 if (vl_arb_match_cache(vlc
, t
)) {
11078 vl_arb_unlock_cache(ppd
, HI_PRIO_TABLE
);
11081 vl_arb_set_cache(vlc
, t
);
11082 vl_arb_unlock_cache(ppd
, HI_PRIO_TABLE
);
11083 ret
= set_vl_weights(ppd
, SEND_HIGH_PRIORITY_LIST
,
11084 VL_ARB_HIGH_PRIO_TABLE_SIZE
, t
);
11086 case FM_TBL_VL_LOW_ARB
:
11087 vlc
= vl_arb_lock_cache(ppd
, LO_PRIO_TABLE
);
11088 if (vl_arb_match_cache(vlc
, t
)) {
11089 vl_arb_unlock_cache(ppd
, LO_PRIO_TABLE
);
11092 vl_arb_set_cache(vlc
, t
);
11093 vl_arb_unlock_cache(ppd
, LO_PRIO_TABLE
);
11094 ret
= set_vl_weights(ppd
, SEND_LOW_PRIORITY_LIST
,
11095 VL_ARB_LOW_PRIO_TABLE_SIZE
, t
);
11097 case FM_TBL_BUFFER_CONTROL
:
11098 ret
= set_buffer_control(ppd
, t
);
11100 case FM_TBL_SC2VLNT
:
11101 set_sc2vlnt(ppd
->dd
, t
);
11110 * Disable all data VLs.
11112 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11114 static int disable_data_vls(struct hfi1_devdata
*dd
)
11119 pio_send_control(dd
, PSC_DATA_VL_DISABLE
);
11125 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11126 * Just re-enables all data VLs (the "fill" part happens
11127 * automatically - the name was chosen for symmetry with
11128 * stop_drain_data_vls()).
11130 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11132 int open_fill_data_vls(struct hfi1_devdata
*dd
)
11137 pio_send_control(dd
, PSC_DATA_VL_ENABLE
);
11143 * drain_data_vls() - assumes that disable_data_vls() has been called,
11144 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11145 * engines to drop to 0.
11147 static void drain_data_vls(struct hfi1_devdata
*dd
)
11151 pause_for_credit_return(dd
);
11155 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11157 * Use open_fill_data_vls() to resume using data VLs. This pair is
11158 * meant to be used like this:
11160 * stop_drain_data_vls(dd);
11161 * // do things with per-VL resources
11162 * open_fill_data_vls(dd);
11164 int stop_drain_data_vls(struct hfi1_devdata
*dd
)
11168 ret
= disable_data_vls(dd
);
11170 drain_data_vls(dd
);
11176 * Convert a nanosecond time to a cclock count. No matter how slow
11177 * the cclock, a non-zero ns will always have a non-zero result.
11179 u32
ns_to_cclock(struct hfi1_devdata
*dd
, u32 ns
)
11183 if (dd
->icode
== ICODE_FPGA_EMULATION
)
11184 cclocks
= (ns
* 1000) / FPGA_CCLOCK_PS
;
11185 else /* simulation pretends to be ASIC */
11186 cclocks
= (ns
* 1000) / ASIC_CCLOCK_PS
;
11187 if (ns
&& !cclocks
) /* if ns nonzero, must be at least 1 */
11193 * Convert a cclock count to nanoseconds. Not matter how slow
11194 * the cclock, a non-zero cclocks will always have a non-zero result.
11196 u32
cclock_to_ns(struct hfi1_devdata
*dd
, u32 cclocks
)
11200 if (dd
->icode
== ICODE_FPGA_EMULATION
)
11201 ns
= (cclocks
* FPGA_CCLOCK_PS
) / 1000;
11202 else /* simulation pretends to be ASIC */
11203 ns
= (cclocks
* ASIC_CCLOCK_PS
) / 1000;
11204 if (cclocks
&& !ns
)
11210 * Dynamically adjust the receive interrupt timeout for a context based on
11211 * incoming packet rate.
11213 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11215 static void adjust_rcv_timeout(struct hfi1_ctxtdata
*rcd
, u32 npkts
)
11217 struct hfi1_devdata
*dd
= rcd
->dd
;
11218 u32 timeout
= rcd
->rcvavail_timeout
;
11221 * This algorithm doubles or halves the timeout depending on whether
11222 * the number of packets received in this interrupt were less than or
11223 * greater equal the interrupt count.
11225 * The calculations below do not allow a steady state to be achieved.
11226 * Only at the endpoints it is possible to have an unchanging
11229 if (npkts
< rcv_intr_count
) {
11231 * Not enough packets arrived before the timeout, adjust
11232 * timeout downward.
11234 if (timeout
< 2) /* already at minimum? */
11239 * More than enough packets arrived before the timeout, adjust
11242 if (timeout
>= dd
->rcv_intr_timeout_csr
) /* already at max? */
11244 timeout
= min(timeout
<< 1, dd
->rcv_intr_timeout_csr
);
11247 rcd
->rcvavail_timeout
= timeout
;
11249 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11250 * been verified to be in range
11252 write_kctxt_csr(dd
, rcd
->ctxt
, RCV_AVAIL_TIME_OUT
,
11254 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT
);
11257 void update_usrhead(struct hfi1_ctxtdata
*rcd
, u32 hd
, u32 updegr
, u32 egrhd
,
11258 u32 intr_adjust
, u32 npkts
)
11260 struct hfi1_devdata
*dd
= rcd
->dd
;
11262 u32 ctxt
= rcd
->ctxt
;
11265 * Need to write timeout register before updating RcvHdrHead to ensure
11266 * that a new value is used when the HW decides to restart counting.
11269 adjust_rcv_timeout(rcd
, npkts
);
11271 reg
= (egrhd
& RCV_EGR_INDEX_HEAD_HEAD_MASK
)
11272 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT
;
11273 write_uctxt_csr(dd
, ctxt
, RCV_EGR_INDEX_HEAD
, reg
);
11276 reg
= ((u64
)rcv_intr_count
<< RCV_HDR_HEAD_COUNTER_SHIFT
) |
11277 (((u64
)hd
& RCV_HDR_HEAD_HEAD_MASK
)
11278 << RCV_HDR_HEAD_HEAD_SHIFT
);
11279 write_uctxt_csr(dd
, ctxt
, RCV_HDR_HEAD
, reg
);
11283 u32
hdrqempty(struct hfi1_ctxtdata
*rcd
)
11287 head
= (read_uctxt_csr(rcd
->dd
, rcd
->ctxt
, RCV_HDR_HEAD
)
11288 & RCV_HDR_HEAD_HEAD_SMASK
) >> RCV_HDR_HEAD_HEAD_SHIFT
;
11290 if (rcd
->rcvhdrtail_kvaddr
)
11291 tail
= get_rcvhdrtail(rcd
);
11293 tail
= read_uctxt_csr(rcd
->dd
, rcd
->ctxt
, RCV_HDR_TAIL
);
11295 return head
== tail
;
11299 * Context Control and Receive Array encoding for buffer size:
11308 * 0x8 512 KB (Receive Array only)
11309 * 0x9 1 MB (Receive Array only)
11310 * 0xa 2 MB (Receive Array only)
11312 * 0xB-0xF - reserved (Receive Array only)
11315 * This routine assumes that the value has already been sanity checked.
11317 static u32
encoded_size(u32 size
)
11320 case 4 * 1024: return 0x1;
11321 case 8 * 1024: return 0x2;
11322 case 16 * 1024: return 0x3;
11323 case 32 * 1024: return 0x4;
11324 case 64 * 1024: return 0x5;
11325 case 128 * 1024: return 0x6;
11326 case 256 * 1024: return 0x7;
11327 case 512 * 1024: return 0x8;
11328 case 1 * 1024 * 1024: return 0x9;
11329 case 2 * 1024 * 1024: return 0xa;
11331 return 0x1; /* if invalid, go with the minimum size */
11334 void hfi1_rcvctrl(struct hfi1_devdata
*dd
, unsigned int op
, int ctxt
)
11336 struct hfi1_ctxtdata
*rcd
;
11338 int did_enable
= 0;
11340 rcd
= dd
->rcd
[ctxt
];
11344 hfi1_cdbg(RCVCTRL
, "ctxt %d op 0x%x", ctxt
, op
);
11346 rcvctrl
= read_kctxt_csr(dd
, ctxt
, RCV_CTXT_CTRL
);
11347 /* if the context already enabled, don't do the extra steps */
11348 if ((op
& HFI1_RCVCTRL_CTXT_ENB
) &&
11349 !(rcvctrl
& RCV_CTXT_CTRL_ENABLE_SMASK
)) {
11350 /* reset the tail and hdr addresses, and sequence count */
11351 write_kctxt_csr(dd
, ctxt
, RCV_HDR_ADDR
,
11352 rcd
->rcvhdrq_phys
);
11353 if (HFI1_CAP_KGET_MASK(rcd
->flags
, DMA_RTAIL
))
11354 write_kctxt_csr(dd
, ctxt
, RCV_HDR_TAIL_ADDR
,
11355 rcd
->rcvhdrqtailaddr_phys
);
11358 /* reset the cached receive header queue head value */
11362 * Zero the receive header queue so we don't get false
11363 * positives when checking the sequence number. The
11364 * sequence numbers could land exactly on the same spot.
11365 * E.g. a rcd restart before the receive header wrapped.
11367 memset(rcd
->rcvhdrq
, 0, rcd
->rcvhdrq_size
);
11369 /* starting timeout */
11370 rcd
->rcvavail_timeout
= dd
->rcv_intr_timeout_csr
;
11372 /* enable the context */
11373 rcvctrl
|= RCV_CTXT_CTRL_ENABLE_SMASK
;
11375 /* clean the egr buffer size first */
11376 rcvctrl
&= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK
;
11377 rcvctrl
|= ((u64
)encoded_size(rcd
->egrbufs
.rcvtid_size
)
11378 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK
)
11379 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT
;
11381 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11382 write_uctxt_csr(dd
, ctxt
, RCV_HDR_HEAD
, 0);
11385 /* zero RcvEgrIndexHead */
11386 write_uctxt_csr(dd
, ctxt
, RCV_EGR_INDEX_HEAD
, 0);
11388 /* set eager count and base index */
11389 reg
= (((u64
)(rcd
->egrbufs
.alloced
>> RCV_SHIFT
)
11390 & RCV_EGR_CTRL_EGR_CNT_MASK
)
11391 << RCV_EGR_CTRL_EGR_CNT_SHIFT
) |
11392 (((rcd
->eager_base
>> RCV_SHIFT
)
11393 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK
)
11394 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT
);
11395 write_kctxt_csr(dd
, ctxt
, RCV_EGR_CTRL
, reg
);
11398 * Set TID (expected) count and base index.
11399 * rcd->expected_count is set to individual RcvArray entries,
11400 * not pairs, and the CSR takes a pair-count in groups of
11401 * four, so divide by 8.
11403 reg
= (((rcd
->expected_count
>> RCV_SHIFT
)
11404 & RCV_TID_CTRL_TID_PAIR_CNT_MASK
)
11405 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT
) |
11406 (((rcd
->expected_base
>> RCV_SHIFT
)
11407 & RCV_TID_CTRL_TID_BASE_INDEX_MASK
)
11408 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT
);
11409 write_kctxt_csr(dd
, ctxt
, RCV_TID_CTRL
, reg
);
11410 if (ctxt
== HFI1_CTRL_CTXT
)
11411 write_csr(dd
, RCV_VL15
, HFI1_CTRL_CTXT
);
11413 if (op
& HFI1_RCVCTRL_CTXT_DIS
) {
11414 write_csr(dd
, RCV_VL15
, 0);
11416 * When receive context is being disabled turn on tail
11417 * update with a dummy tail address and then disable
11420 if (dd
->rcvhdrtail_dummy_physaddr
) {
11421 write_kctxt_csr(dd
, ctxt
, RCV_HDR_TAIL_ADDR
,
11422 dd
->rcvhdrtail_dummy_physaddr
);
11423 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11424 rcvctrl
|= RCV_CTXT_CTRL_TAIL_UPD_SMASK
;
11427 rcvctrl
&= ~RCV_CTXT_CTRL_ENABLE_SMASK
;
11429 if (op
& HFI1_RCVCTRL_INTRAVAIL_ENB
)
11430 rcvctrl
|= RCV_CTXT_CTRL_INTR_AVAIL_SMASK
;
11431 if (op
& HFI1_RCVCTRL_INTRAVAIL_DIS
)
11432 rcvctrl
&= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK
;
11433 if (op
& HFI1_RCVCTRL_TAILUPD_ENB
&& rcd
->rcvhdrqtailaddr_phys
)
11434 rcvctrl
|= RCV_CTXT_CTRL_TAIL_UPD_SMASK
;
11435 if (op
& HFI1_RCVCTRL_TAILUPD_DIS
) {
11436 /* See comment on RcvCtxtCtrl.TailUpd above */
11437 if (!(op
& HFI1_RCVCTRL_CTXT_DIS
))
11438 rcvctrl
&= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK
;
11440 if (op
& HFI1_RCVCTRL_TIDFLOW_ENB
)
11441 rcvctrl
|= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK
;
11442 if (op
& HFI1_RCVCTRL_TIDFLOW_DIS
)
11443 rcvctrl
&= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK
;
11444 if (op
& HFI1_RCVCTRL_ONE_PKT_EGR_ENB
) {
11446 * In one-packet-per-eager mode, the size comes from
11447 * the RcvArray entry.
11449 rcvctrl
&= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK
;
11450 rcvctrl
|= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK
;
11452 if (op
& HFI1_RCVCTRL_ONE_PKT_EGR_DIS
)
11453 rcvctrl
&= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK
;
11454 if (op
& HFI1_RCVCTRL_NO_RHQ_DROP_ENB
)
11455 rcvctrl
|= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK
;
11456 if (op
& HFI1_RCVCTRL_NO_RHQ_DROP_DIS
)
11457 rcvctrl
&= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK
;
11458 if (op
& HFI1_RCVCTRL_NO_EGR_DROP_ENB
)
11459 rcvctrl
|= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK
;
11460 if (op
& HFI1_RCVCTRL_NO_EGR_DROP_DIS
)
11461 rcvctrl
&= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK
;
11462 rcd
->rcvctrl
= rcvctrl
;
11463 hfi1_cdbg(RCVCTRL
, "ctxt %d rcvctrl 0x%llx\n", ctxt
, rcvctrl
);
11464 write_kctxt_csr(dd
, ctxt
, RCV_CTXT_CTRL
, rcd
->rcvctrl
);
11466 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
11468 (rcvctrl
& RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK
)) {
11469 reg
= read_kctxt_csr(dd
, ctxt
, RCV_CTXT_STATUS
);
11471 dd_dev_info(dd
, "ctxt %d status %lld (blocked)\n",
11473 read_uctxt_csr(dd
, ctxt
, RCV_HDR_HEAD
);
11474 write_uctxt_csr(dd
, ctxt
, RCV_HDR_HEAD
, 0x10);
11475 write_uctxt_csr(dd
, ctxt
, RCV_HDR_HEAD
, 0x00);
11476 read_uctxt_csr(dd
, ctxt
, RCV_HDR_HEAD
);
11477 reg
= read_kctxt_csr(dd
, ctxt
, RCV_CTXT_STATUS
);
11478 dd_dev_info(dd
, "ctxt %d status %lld (%s blocked)\n",
11479 ctxt
, reg
, reg
== 0 ? "not" : "still");
11485 * The interrupt timeout and count must be set after
11486 * the context is enabled to take effect.
11488 /* set interrupt timeout */
11489 write_kctxt_csr(dd
, ctxt
, RCV_AVAIL_TIME_OUT
,
11490 (u64
)rcd
->rcvavail_timeout
<<
11491 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT
);
11493 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11494 reg
= (u64
)rcv_intr_count
<< RCV_HDR_HEAD_COUNTER_SHIFT
;
11495 write_uctxt_csr(dd
, ctxt
, RCV_HDR_HEAD
, reg
);
11498 if (op
& (HFI1_RCVCTRL_TAILUPD_DIS
| HFI1_RCVCTRL_CTXT_DIS
))
11500 * If the context has been disabled and the Tail Update has
11501 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11502 * so it doesn't contain an address that is invalid.
11504 write_kctxt_csr(dd
, ctxt
, RCV_HDR_TAIL_ADDR
,
11505 dd
->rcvhdrtail_dummy_physaddr
);
11508 u32
hfi1_read_cntrs(struct hfi1_devdata
*dd
, char **namep
, u64
**cntrp
)
11514 ret
= dd
->cntrnameslen
;
11515 *namep
= dd
->cntrnames
;
11517 const struct cntr_entry
*entry
;
11520 ret
= (dd
->ndevcntrs
) * sizeof(u64
);
11522 /* Get the start of the block of counters */
11523 *cntrp
= dd
->cntrs
;
11526 * Now go and fill in each counter in the block.
11528 for (i
= 0; i
< DEV_CNTR_LAST
; i
++) {
11529 entry
= &dev_cntrs
[i
];
11530 hfi1_cdbg(CNTR
, "reading %s", entry
->name
);
11531 if (entry
->flags
& CNTR_DISABLED
) {
11533 hfi1_cdbg(CNTR
, "\tDisabled\n");
11535 if (entry
->flags
& CNTR_VL
) {
11536 hfi1_cdbg(CNTR
, "\tPer VL\n");
11537 for (j
= 0; j
< C_VL_COUNT
; j
++) {
11538 val
= entry
->rw_cntr(entry
,
11544 "\t\tRead 0x%llx for %d\n",
11546 dd
->cntrs
[entry
->offset
+ j
] =
11549 } else if (entry
->flags
& CNTR_SDMA
) {
11551 "\t Per SDMA Engine\n");
11552 for (j
= 0; j
< dd
->chip_sdma_engines
;
11555 entry
->rw_cntr(entry
, dd
, j
,
11558 "\t\tRead 0x%llx for %d\n",
11560 dd
->cntrs
[entry
->offset
+ j
] =
11564 val
= entry
->rw_cntr(entry
, dd
,
11567 dd
->cntrs
[entry
->offset
] = val
;
11568 hfi1_cdbg(CNTR
, "\tRead 0x%llx", val
);
11577 * Used by sysfs to create files for hfi stats to read
11579 u32
hfi1_read_portcntrs(struct hfi1_pportdata
*ppd
, char **namep
, u64
**cntrp
)
11585 ret
= ppd
->dd
->portcntrnameslen
;
11586 *namep
= ppd
->dd
->portcntrnames
;
11588 const struct cntr_entry
*entry
;
11591 ret
= ppd
->dd
->nportcntrs
* sizeof(u64
);
11592 *cntrp
= ppd
->cntrs
;
11594 for (i
= 0; i
< PORT_CNTR_LAST
; i
++) {
11595 entry
= &port_cntrs
[i
];
11596 hfi1_cdbg(CNTR
, "reading %s", entry
->name
);
11597 if (entry
->flags
& CNTR_DISABLED
) {
11599 hfi1_cdbg(CNTR
, "\tDisabled\n");
11603 if (entry
->flags
& CNTR_VL
) {
11604 hfi1_cdbg(CNTR
, "\tPer VL");
11605 for (j
= 0; j
< C_VL_COUNT
; j
++) {
11606 val
= entry
->rw_cntr(entry
, ppd
, j
,
11611 "\t\tRead 0x%llx for %d",
11613 ppd
->cntrs
[entry
->offset
+ j
] = val
;
11616 val
= entry
->rw_cntr(entry
, ppd
,
11620 ppd
->cntrs
[entry
->offset
] = val
;
11621 hfi1_cdbg(CNTR
, "\tRead 0x%llx", val
);
11628 static void free_cntrs(struct hfi1_devdata
*dd
)
11630 struct hfi1_pportdata
*ppd
;
11633 if (dd
->synth_stats_timer
.data
)
11634 del_timer_sync(&dd
->synth_stats_timer
);
11635 dd
->synth_stats_timer
.data
= 0;
11636 ppd
= (struct hfi1_pportdata
*)(dd
+ 1);
11637 for (i
= 0; i
< dd
->num_pports
; i
++, ppd
++) {
11639 kfree(ppd
->scntrs
);
11640 free_percpu(ppd
->ibport_data
.rvp
.rc_acks
);
11641 free_percpu(ppd
->ibport_data
.rvp
.rc_qacks
);
11642 free_percpu(ppd
->ibport_data
.rvp
.rc_delayed_comp
);
11644 ppd
->scntrs
= NULL
;
11645 ppd
->ibport_data
.rvp
.rc_acks
= NULL
;
11646 ppd
->ibport_data
.rvp
.rc_qacks
= NULL
;
11647 ppd
->ibport_data
.rvp
.rc_delayed_comp
= NULL
;
11649 kfree(dd
->portcntrnames
);
11650 dd
->portcntrnames
= NULL
;
11655 kfree(dd
->cntrnames
);
11656 dd
->cntrnames
= NULL
;
11659 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
11660 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
11662 static u64
read_dev_port_cntr(struct hfi1_devdata
*dd
, struct cntr_entry
*entry
,
11663 u64
*psval
, void *context
, int vl
)
11668 if (entry
->flags
& CNTR_DISABLED
) {
11669 dd_dev_err(dd
, "Counter %s not enabled", entry
->name
);
11673 hfi1_cdbg(CNTR
, "cntr: %s vl %d psval 0x%llx", entry
->name
, vl
, *psval
);
11675 val
= entry
->rw_cntr(entry
, context
, vl
, CNTR_MODE_R
, 0);
11677 /* If its a synthetic counter there is more work we need to do */
11678 if (entry
->flags
& CNTR_SYNTH
) {
11679 if (sval
== CNTR_MAX
) {
11680 /* No need to read already saturated */
11684 if (entry
->flags
& CNTR_32BIT
) {
11685 /* 32bit counters can wrap multiple times */
11686 u64 upper
= sval
>> 32;
11687 u64 lower
= (sval
<< 32) >> 32;
11689 if (lower
> val
) { /* hw wrapped */
11690 if (upper
== CNTR_32BIT_MAX
)
11696 if (val
!= CNTR_MAX
)
11697 val
= (upper
<< 32) | val
;
11700 /* If we rolled we are saturated */
11701 if ((val
< sval
) || (val
> CNTR_MAX
))
11708 hfi1_cdbg(CNTR
, "\tNew val=0x%llx", val
);
11713 static u64
write_dev_port_cntr(struct hfi1_devdata
*dd
,
11714 struct cntr_entry
*entry
,
11715 u64
*psval
, void *context
, int vl
, u64 data
)
11719 if (entry
->flags
& CNTR_DISABLED
) {
11720 dd_dev_err(dd
, "Counter %s not enabled", entry
->name
);
11724 hfi1_cdbg(CNTR
, "cntr: %s vl %d psval 0x%llx", entry
->name
, vl
, *psval
);
11726 if (entry
->flags
& CNTR_SYNTH
) {
11728 if (entry
->flags
& CNTR_32BIT
) {
11729 val
= entry
->rw_cntr(entry
, context
, vl
, CNTR_MODE_W
,
11730 (data
<< 32) >> 32);
11731 val
= data
; /* return the full 64bit value */
11733 val
= entry
->rw_cntr(entry
, context
, vl
, CNTR_MODE_W
,
11737 val
= entry
->rw_cntr(entry
, context
, vl
, CNTR_MODE_W
, data
);
11742 hfi1_cdbg(CNTR
, "\tNew val=0x%llx", val
);
11747 u64
read_dev_cntr(struct hfi1_devdata
*dd
, int index
, int vl
)
11749 struct cntr_entry
*entry
;
11752 entry
= &dev_cntrs
[index
];
11753 sval
= dd
->scntrs
+ entry
->offset
;
11755 if (vl
!= CNTR_INVALID_VL
)
11758 return read_dev_port_cntr(dd
, entry
, sval
, dd
, vl
);
11761 u64
write_dev_cntr(struct hfi1_devdata
*dd
, int index
, int vl
, u64 data
)
11763 struct cntr_entry
*entry
;
11766 entry
= &dev_cntrs
[index
];
11767 sval
= dd
->scntrs
+ entry
->offset
;
11769 if (vl
!= CNTR_INVALID_VL
)
11772 return write_dev_port_cntr(dd
, entry
, sval
, dd
, vl
, data
);
11775 u64
read_port_cntr(struct hfi1_pportdata
*ppd
, int index
, int vl
)
11777 struct cntr_entry
*entry
;
11780 entry
= &port_cntrs
[index
];
11781 sval
= ppd
->scntrs
+ entry
->offset
;
11783 if (vl
!= CNTR_INVALID_VL
)
11786 if ((index
>= C_RCV_HDR_OVF_FIRST
+ ppd
->dd
->num_rcv_contexts
) &&
11787 (index
<= C_RCV_HDR_OVF_LAST
)) {
11788 /* We do not want to bother for disabled contexts */
11792 return read_dev_port_cntr(ppd
->dd
, entry
, sval
, ppd
, vl
);
11795 u64
write_port_cntr(struct hfi1_pportdata
*ppd
, int index
, int vl
, u64 data
)
11797 struct cntr_entry
*entry
;
11800 entry
= &port_cntrs
[index
];
11801 sval
= ppd
->scntrs
+ entry
->offset
;
11803 if (vl
!= CNTR_INVALID_VL
)
11806 if ((index
>= C_RCV_HDR_OVF_FIRST
+ ppd
->dd
->num_rcv_contexts
) &&
11807 (index
<= C_RCV_HDR_OVF_LAST
)) {
11808 /* We do not want to bother for disabled contexts */
11812 return write_dev_port_cntr(ppd
->dd
, entry
, sval
, ppd
, vl
, data
);
11815 static void update_synth_timer(unsigned long opaque
)
11822 struct hfi1_pportdata
*ppd
;
11823 struct cntr_entry
*entry
;
11825 struct hfi1_devdata
*dd
= (struct hfi1_devdata
*)opaque
;
11828 * Rather than keep beating on the CSRs pick a minimal set that we can
11829 * check to watch for potential roll over. We can do this by looking at
11830 * the number of flits sent/recv. If the total flits exceeds 32bits then
11831 * we have to iterate all the counters and update.
11833 entry
= &dev_cntrs
[C_DC_RCV_FLITS
];
11834 cur_rx
= entry
->rw_cntr(entry
, dd
, CNTR_INVALID_VL
, CNTR_MODE_R
, 0);
11836 entry
= &dev_cntrs
[C_DC_XMIT_FLITS
];
11837 cur_tx
= entry
->rw_cntr(entry
, dd
, CNTR_INVALID_VL
, CNTR_MODE_R
, 0);
11841 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11842 dd
->unit
, cur_tx
, cur_rx
, dd
->last_tx
, dd
->last_rx
);
11844 if ((cur_tx
< dd
->last_tx
) || (cur_rx
< dd
->last_rx
)) {
11846 * May not be strictly necessary to update but it won't hurt and
11847 * simplifies the logic here.
11850 hfi1_cdbg(CNTR
, "[%d] Tripwire counter rolled, updating",
11853 total_flits
= (cur_tx
- dd
->last_tx
) + (cur_rx
- dd
->last_rx
);
11855 "[%d] total flits 0x%llx limit 0x%llx\n", dd
->unit
,
11856 total_flits
, (u64
)CNTR_32BIT_MAX
);
11857 if (total_flits
>= CNTR_32BIT_MAX
) {
11858 hfi1_cdbg(CNTR
, "[%d] 32bit limit hit, updating",
11865 hfi1_cdbg(CNTR
, "[%d] Updating dd and ppd counters", dd
->unit
);
11866 for (i
= 0; i
< DEV_CNTR_LAST
; i
++) {
11867 entry
= &dev_cntrs
[i
];
11868 if (entry
->flags
& CNTR_VL
) {
11869 for (vl
= 0; vl
< C_VL_COUNT
; vl
++)
11870 read_dev_cntr(dd
, i
, vl
);
11872 read_dev_cntr(dd
, i
, CNTR_INVALID_VL
);
11875 ppd
= (struct hfi1_pportdata
*)(dd
+ 1);
11876 for (i
= 0; i
< dd
->num_pports
; i
++, ppd
++) {
11877 for (j
= 0; j
< PORT_CNTR_LAST
; j
++) {
11878 entry
= &port_cntrs
[j
];
11879 if (entry
->flags
& CNTR_VL
) {
11880 for (vl
= 0; vl
< C_VL_COUNT
; vl
++)
11881 read_port_cntr(ppd
, j
, vl
);
11883 read_port_cntr(ppd
, j
, CNTR_INVALID_VL
);
11889 * We want the value in the register. The goal is to keep track
11890 * of the number of "ticks" not the counter value. In other
11891 * words if the register rolls we want to notice it and go ahead
11892 * and force an update.
11894 entry
= &dev_cntrs
[C_DC_XMIT_FLITS
];
11895 dd
->last_tx
= entry
->rw_cntr(entry
, dd
, CNTR_INVALID_VL
,
11898 entry
= &dev_cntrs
[C_DC_RCV_FLITS
];
11899 dd
->last_rx
= entry
->rw_cntr(entry
, dd
, CNTR_INVALID_VL
,
11902 hfi1_cdbg(CNTR
, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11903 dd
->unit
, dd
->last_tx
, dd
->last_rx
);
11906 hfi1_cdbg(CNTR
, "[%d] No update necessary", dd
->unit
);
11909 mod_timer(&dd
->synth_stats_timer
, jiffies
+ HZ
* SYNTH_CNT_TIME
);
11912 #define C_MAX_NAME 13 /* 12 chars + one for /0 */
11913 static int init_cntrs(struct hfi1_devdata
*dd
)
11915 int i
, rcv_ctxts
, j
;
11918 char name
[C_MAX_NAME
];
11919 struct hfi1_pportdata
*ppd
;
11920 const char *bit_type_32
= ",32";
11921 const int bit_type_32_sz
= strlen(bit_type_32
);
11923 /* set up the stats timer; the add_timer is done at the end */
11924 setup_timer(&dd
->synth_stats_timer
, update_synth_timer
,
11925 (unsigned long)dd
);
11927 /***********************/
11928 /* per device counters */
11929 /***********************/
11931 /* size names and determine how many we have*/
11935 for (i
= 0; i
< DEV_CNTR_LAST
; i
++) {
11936 if (dev_cntrs
[i
].flags
& CNTR_DISABLED
) {
11937 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs
[i
].name
);
11941 if (dev_cntrs
[i
].flags
& CNTR_VL
) {
11942 dev_cntrs
[i
].offset
= dd
->ndevcntrs
;
11943 for (j
= 0; j
< C_VL_COUNT
; j
++) {
11944 snprintf(name
, C_MAX_NAME
, "%s%d",
11945 dev_cntrs
[i
].name
, vl_from_idx(j
));
11946 sz
+= strlen(name
);
11947 /* Add ",32" for 32-bit counters */
11948 if (dev_cntrs
[i
].flags
& CNTR_32BIT
)
11949 sz
+= bit_type_32_sz
;
11953 } else if (dev_cntrs
[i
].flags
& CNTR_SDMA
) {
11954 dev_cntrs
[i
].offset
= dd
->ndevcntrs
;
11955 for (j
= 0; j
< dd
->chip_sdma_engines
; j
++) {
11956 snprintf(name
, C_MAX_NAME
, "%s%d",
11957 dev_cntrs
[i
].name
, j
);
11958 sz
+= strlen(name
);
11959 /* Add ",32" for 32-bit counters */
11960 if (dev_cntrs
[i
].flags
& CNTR_32BIT
)
11961 sz
+= bit_type_32_sz
;
11966 /* +1 for newline. */
11967 sz
+= strlen(dev_cntrs
[i
].name
) + 1;
11968 /* Add ",32" for 32-bit counters */
11969 if (dev_cntrs
[i
].flags
& CNTR_32BIT
)
11970 sz
+= bit_type_32_sz
;
11971 dev_cntrs
[i
].offset
= dd
->ndevcntrs
;
11976 /* allocate space for the counter values */
11977 dd
->cntrs
= kcalloc(dd
->ndevcntrs
, sizeof(u64
), GFP_KERNEL
);
11981 dd
->scntrs
= kcalloc(dd
->ndevcntrs
, sizeof(u64
), GFP_KERNEL
);
11985 /* allocate space for the counter names */
11986 dd
->cntrnameslen
= sz
;
11987 dd
->cntrnames
= kmalloc(sz
, GFP_KERNEL
);
11988 if (!dd
->cntrnames
)
11991 /* fill in the names */
11992 for (p
= dd
->cntrnames
, i
= 0; i
< DEV_CNTR_LAST
; i
++) {
11993 if (dev_cntrs
[i
].flags
& CNTR_DISABLED
) {
11995 } else if (dev_cntrs
[i
].flags
& CNTR_VL
) {
11996 for (j
= 0; j
< C_VL_COUNT
; j
++) {
11997 snprintf(name
, C_MAX_NAME
, "%s%d",
12000 memcpy(p
, name
, strlen(name
));
12003 /* Counter is 32 bits */
12004 if (dev_cntrs
[i
].flags
& CNTR_32BIT
) {
12005 memcpy(p
, bit_type_32
, bit_type_32_sz
);
12006 p
+= bit_type_32_sz
;
12011 } else if (dev_cntrs
[i
].flags
& CNTR_SDMA
) {
12012 for (j
= 0; j
< dd
->chip_sdma_engines
; j
++) {
12013 snprintf(name
, C_MAX_NAME
, "%s%d",
12014 dev_cntrs
[i
].name
, j
);
12015 memcpy(p
, name
, strlen(name
));
12018 /* Counter is 32 bits */
12019 if (dev_cntrs
[i
].flags
& CNTR_32BIT
) {
12020 memcpy(p
, bit_type_32
, bit_type_32_sz
);
12021 p
+= bit_type_32_sz
;
12027 memcpy(p
, dev_cntrs
[i
].name
, strlen(dev_cntrs
[i
].name
));
12028 p
+= strlen(dev_cntrs
[i
].name
);
12030 /* Counter is 32 bits */
12031 if (dev_cntrs
[i
].flags
& CNTR_32BIT
) {
12032 memcpy(p
, bit_type_32
, bit_type_32_sz
);
12033 p
+= bit_type_32_sz
;
12040 /*********************/
12041 /* per port counters */
12042 /*********************/
12045 * Go through the counters for the overflows and disable the ones we
12046 * don't need. This varies based on platform so we need to do it
12047 * dynamically here.
12049 rcv_ctxts
= dd
->num_rcv_contexts
;
12050 for (i
= C_RCV_HDR_OVF_FIRST
+ rcv_ctxts
;
12051 i
<= C_RCV_HDR_OVF_LAST
; i
++) {
12052 port_cntrs
[i
].flags
|= CNTR_DISABLED
;
12055 /* size port counter names and determine how many we have*/
12057 dd
->nportcntrs
= 0;
12058 for (i
= 0; i
< PORT_CNTR_LAST
; i
++) {
12059 if (port_cntrs
[i
].flags
& CNTR_DISABLED
) {
12060 hfi1_dbg_early("\tSkipping %s\n", port_cntrs
[i
].name
);
12064 if (port_cntrs
[i
].flags
& CNTR_VL
) {
12065 port_cntrs
[i
].offset
= dd
->nportcntrs
;
12066 for (j
= 0; j
< C_VL_COUNT
; j
++) {
12067 snprintf(name
, C_MAX_NAME
, "%s%d",
12068 port_cntrs
[i
].name
, vl_from_idx(j
));
12069 sz
+= strlen(name
);
12070 /* Add ",32" for 32-bit counters */
12071 if (port_cntrs
[i
].flags
& CNTR_32BIT
)
12072 sz
+= bit_type_32_sz
;
12077 /* +1 for newline */
12078 sz
+= strlen(port_cntrs
[i
].name
) + 1;
12079 /* Add ",32" for 32-bit counters */
12080 if (port_cntrs
[i
].flags
& CNTR_32BIT
)
12081 sz
+= bit_type_32_sz
;
12082 port_cntrs
[i
].offset
= dd
->nportcntrs
;
12087 /* allocate space for the counter names */
12088 dd
->portcntrnameslen
= sz
;
12089 dd
->portcntrnames
= kmalloc(sz
, GFP_KERNEL
);
12090 if (!dd
->portcntrnames
)
12093 /* fill in port cntr names */
12094 for (p
= dd
->portcntrnames
, i
= 0; i
< PORT_CNTR_LAST
; i
++) {
12095 if (port_cntrs
[i
].flags
& CNTR_DISABLED
)
12098 if (port_cntrs
[i
].flags
& CNTR_VL
) {
12099 for (j
= 0; j
< C_VL_COUNT
; j
++) {
12100 snprintf(name
, C_MAX_NAME
, "%s%d",
12101 port_cntrs
[i
].name
, vl_from_idx(j
));
12102 memcpy(p
, name
, strlen(name
));
12105 /* Counter is 32 bits */
12106 if (port_cntrs
[i
].flags
& CNTR_32BIT
) {
12107 memcpy(p
, bit_type_32
, bit_type_32_sz
);
12108 p
+= bit_type_32_sz
;
12114 memcpy(p
, port_cntrs
[i
].name
,
12115 strlen(port_cntrs
[i
].name
));
12116 p
+= strlen(port_cntrs
[i
].name
);
12118 /* Counter is 32 bits */
12119 if (port_cntrs
[i
].flags
& CNTR_32BIT
) {
12120 memcpy(p
, bit_type_32
, bit_type_32_sz
);
12121 p
+= bit_type_32_sz
;
12128 /* allocate per port storage for counter values */
12129 ppd
= (struct hfi1_pportdata
*)(dd
+ 1);
12130 for (i
= 0; i
< dd
->num_pports
; i
++, ppd
++) {
12131 ppd
->cntrs
= kcalloc(dd
->nportcntrs
, sizeof(u64
), GFP_KERNEL
);
12135 ppd
->scntrs
= kcalloc(dd
->nportcntrs
, sizeof(u64
), GFP_KERNEL
);
12140 /* CPU counters need to be allocated and zeroed */
12141 if (init_cpu_counters(dd
))
12144 mod_timer(&dd
->synth_stats_timer
, jiffies
+ HZ
* SYNTH_CNT_TIME
);
12151 static u32
chip_to_opa_lstate(struct hfi1_devdata
*dd
, u32 chip_lstate
)
12153 switch (chip_lstate
) {
12156 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12160 return IB_PORT_DOWN
;
12162 return IB_PORT_INIT
;
12164 return IB_PORT_ARMED
;
12165 case LSTATE_ACTIVE
:
12166 return IB_PORT_ACTIVE
;
12170 u32
chip_to_opa_pstate(struct hfi1_devdata
*dd
, u32 chip_pstate
)
12172 /* look at the HFI meta-states only */
12173 switch (chip_pstate
& 0xf0) {
12175 dd_dev_err(dd
, "Unexpected chip physical state of 0x%x\n",
12179 return IB_PORTPHYSSTATE_DISABLED
;
12181 return OPA_PORTPHYSSTATE_OFFLINE
;
12183 return IB_PORTPHYSSTATE_POLLING
;
12184 case PLS_CONFIGPHY
:
12185 return IB_PORTPHYSSTATE_TRAINING
;
12187 return IB_PORTPHYSSTATE_LINKUP
;
12189 return IB_PORTPHYSSTATE_PHY_TEST
;
12193 /* return the OPA port logical state name */
12194 const char *opa_lstate_name(u32 lstate
)
12196 static const char * const port_logical_names
[] = {
12202 "PORT_ACTIVE_DEFER",
12204 if (lstate
< ARRAY_SIZE(port_logical_names
))
12205 return port_logical_names
[lstate
];
12209 /* return the OPA port physical state name */
12210 const char *opa_pstate_name(u32 pstate
)
12212 static const char * const port_physical_names
[] = {
12219 "PHYS_LINK_ERR_RECOVER",
12226 if (pstate
< ARRAY_SIZE(port_physical_names
))
12227 return port_physical_names
[pstate
];
12232 * Read the hardware link state and set the driver's cached value of it.
12233 * Return the (new) current value.
12235 u32
get_logical_state(struct hfi1_pportdata
*ppd
)
12239 new_state
= chip_to_opa_lstate(ppd
->dd
, read_logical_state(ppd
->dd
));
12240 if (new_state
!= ppd
->lstate
) {
12241 dd_dev_info(ppd
->dd
, "logical state changed to %s (0x%x)\n",
12242 opa_lstate_name(new_state
), new_state
);
12243 ppd
->lstate
= new_state
;
12246 * Set port status flags in the page mapped into userspace
12247 * memory. Do it here to ensure a reliable state - this is
12248 * the only function called by all state handling code.
12249 * Always set the flags due to the fact that the cache value
12250 * might have been changed explicitly outside of this
12253 if (ppd
->statusp
) {
12254 switch (ppd
->lstate
) {
12257 *ppd
->statusp
&= ~(HFI1_STATUS_IB_CONF
|
12258 HFI1_STATUS_IB_READY
);
12260 case IB_PORT_ARMED
:
12261 *ppd
->statusp
|= HFI1_STATUS_IB_CONF
;
12263 case IB_PORT_ACTIVE
:
12264 *ppd
->statusp
|= HFI1_STATUS_IB_READY
;
12268 return ppd
->lstate
;
12272 * wait_logical_linkstate - wait for an IB link state change to occur
12273 * @ppd: port device
12274 * @state: the state to wait for
12275 * @msecs: the number of milliseconds to wait
12277 * Wait up to msecs milliseconds for IB link state change to occur.
12278 * For now, take the easy polling route.
12279 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12281 static int wait_logical_linkstate(struct hfi1_pportdata
*ppd
, u32 state
,
12284 unsigned long timeout
;
12286 timeout
= jiffies
+ msecs_to_jiffies(msecs
);
12288 if (get_logical_state(ppd
) == state
)
12290 if (time_after(jiffies
, timeout
))
12294 dd_dev_err(ppd
->dd
, "timeout waiting for link state 0x%x\n", state
);
12299 u8
hfi1_ibphys_portstate(struct hfi1_pportdata
*ppd
)
12304 pstate
= read_physical_state(ppd
->dd
);
12305 ib_pstate
= chip_to_opa_pstate(ppd
->dd
, pstate
);
12306 if (ppd
->last_pstate
!= ib_pstate
) {
12307 dd_dev_info(ppd
->dd
,
12308 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12309 __func__
, opa_pstate_name(ib_pstate
), ib_pstate
,
12311 ppd
->last_pstate
= ib_pstate
;
12317 * Read/modify/write ASIC_QSFP register bits as selected by mask
12318 * data: 0 or 1 in the positions depending on what needs to be written
12319 * dir: 0 for read, 1 for write
12320 * mask: select by setting
12324 u64
hfi1_gpio_mod(struct hfi1_devdata
*dd
, u32 target
, u32 data
, u32 dir
,
12327 u64 qsfp_oe
, target_oe
;
12329 target_oe
= target
? ASIC_QSFP2_OE
: ASIC_QSFP1_OE
;
12331 /* We are writing register bits, so lock access */
12335 qsfp_oe
= read_csr(dd
, target_oe
);
12336 qsfp_oe
= (qsfp_oe
& ~(u64
)mask
) | (u64
)dir
;
12337 write_csr(dd
, target_oe
, qsfp_oe
);
12339 /* We are exclusively reading bits here, but it is unlikely
12340 * we'll get valid data when we set the direction of the pin
12341 * in the same call, so read should call this function again
12342 * to get valid data
12344 return read_csr(dd
, target
? ASIC_QSFP2_IN
: ASIC_QSFP1_IN
);
12347 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12348 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12350 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
12351 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12353 int hfi1_init_ctxt(struct send_context
*sc
)
12356 struct hfi1_devdata
*dd
= sc
->dd
;
12358 u8 set
= (sc
->type
== SC_USER
?
12359 HFI1_CAP_IS_USET(STATIC_RATE_CTRL
) :
12360 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL
));
12361 reg
= read_kctxt_csr(dd
, sc
->hw_context
,
12362 SEND_CTXT_CHECK_ENABLE
);
12364 CLEAR_STATIC_RATE_CONTROL_SMASK(reg
);
12366 SET_STATIC_RATE_CONTROL_SMASK(reg
);
12367 write_kctxt_csr(dd
, sc
->hw_context
,
12368 SEND_CTXT_CHECK_ENABLE
, reg
);
12373 int hfi1_tempsense_rd(struct hfi1_devdata
*dd
, struct hfi1_temp
*temp
)
12378 if (dd
->icode
!= ICODE_RTL_SILICON
) {
12379 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL
))
12380 dd_dev_info(dd
, "%s: tempsense not supported by HW\n",
12384 reg
= read_csr(dd
, ASIC_STS_THERM
);
12385 temp
->curr
= ((reg
>> ASIC_STS_THERM_CURR_TEMP_SHIFT
) &
12386 ASIC_STS_THERM_CURR_TEMP_MASK
);
12387 temp
->lo_lim
= ((reg
>> ASIC_STS_THERM_LO_TEMP_SHIFT
) &
12388 ASIC_STS_THERM_LO_TEMP_MASK
);
12389 temp
->hi_lim
= ((reg
>> ASIC_STS_THERM_HI_TEMP_SHIFT
) &
12390 ASIC_STS_THERM_HI_TEMP_MASK
);
12391 temp
->crit_lim
= ((reg
>> ASIC_STS_THERM_CRIT_TEMP_SHIFT
) &
12392 ASIC_STS_THERM_CRIT_TEMP_MASK
);
12393 /* triggers is a 3-bit value - 1 bit per trigger. */
12394 temp
->triggers
= (u8
)((reg
>> ASIC_STS_THERM_LOW_SHIFT
) & 0x7);
12399 /* ========================================================================= */
12402 * Enable/disable chip from delivering interrupts.
12404 void set_intr_state(struct hfi1_devdata
*dd
, u32 enable
)
12409 * In HFI, the mask needs to be 1 to allow interrupts.
12412 /* enable all interrupts */
12413 for (i
= 0; i
< CCE_NUM_INT_CSRS
; i
++)
12414 write_csr(dd
, CCE_INT_MASK
+ (8 * i
), ~(u64
)0);
12418 for (i
= 0; i
< CCE_NUM_INT_CSRS
; i
++)
12419 write_csr(dd
, CCE_INT_MASK
+ (8 * i
), 0ull);
12424 * Clear all interrupt sources on the chip.
12426 static void clear_all_interrupts(struct hfi1_devdata
*dd
)
12430 for (i
= 0; i
< CCE_NUM_INT_CSRS
; i
++)
12431 write_csr(dd
, CCE_INT_CLEAR
+ (8 * i
), ~(u64
)0);
12433 write_csr(dd
, CCE_ERR_CLEAR
, ~(u64
)0);
12434 write_csr(dd
, MISC_ERR_CLEAR
, ~(u64
)0);
12435 write_csr(dd
, RCV_ERR_CLEAR
, ~(u64
)0);
12436 write_csr(dd
, SEND_ERR_CLEAR
, ~(u64
)0);
12437 write_csr(dd
, SEND_PIO_ERR_CLEAR
, ~(u64
)0);
12438 write_csr(dd
, SEND_DMA_ERR_CLEAR
, ~(u64
)0);
12439 write_csr(dd
, SEND_EGRESS_ERR_CLEAR
, ~(u64
)0);
12440 for (i
= 0; i
< dd
->chip_send_contexts
; i
++)
12441 write_kctxt_csr(dd
, i
, SEND_CTXT_ERR_CLEAR
, ~(u64
)0);
12442 for (i
= 0; i
< dd
->chip_sdma_engines
; i
++)
12443 write_kctxt_csr(dd
, i
, SEND_DMA_ENG_ERR_CLEAR
, ~(u64
)0);
12445 write_csr(dd
, DCC_ERR_FLG_CLR
, ~(u64
)0);
12446 write_csr(dd
, DC_LCB_ERR_CLR
, ~(u64
)0);
12447 write_csr(dd
, DC_DC8051_ERR_CLR
, ~(u64
)0);
12450 /* Move to pcie.c? */
12451 static void disable_intx(struct pci_dev
*pdev
)
12456 static void clean_up_interrupts(struct hfi1_devdata
*dd
)
12460 /* remove irqs - must happen before disabling/turning off */
12461 if (dd
->num_msix_entries
) {
12463 struct hfi1_msix_entry
*me
= dd
->msix_entries
;
12465 for (i
= 0; i
< dd
->num_msix_entries
; i
++, me
++) {
12466 if (!me
->arg
) /* => no irq, no affinity */
12468 hfi1_put_irq_affinity(dd
, &dd
->msix_entries
[i
]);
12469 free_irq(me
->msix
.vector
, me
->arg
);
12473 if (dd
->requested_intx_irq
) {
12474 free_irq(dd
->pcidev
->irq
, dd
);
12475 dd
->requested_intx_irq
= 0;
12479 /* turn off interrupts */
12480 if (dd
->num_msix_entries
) {
12482 pci_disable_msix(dd
->pcidev
);
12485 disable_intx(dd
->pcidev
);
12488 /* clean structures */
12489 kfree(dd
->msix_entries
);
12490 dd
->msix_entries
= NULL
;
12491 dd
->num_msix_entries
= 0;
12495 * Remap the interrupt source from the general handler to the given MSI-X
12498 static void remap_intr(struct hfi1_devdata
*dd
, int isrc
, int msix_intr
)
12503 /* clear from the handled mask of the general interrupt */
12506 dd
->gi_mask
[m
] &= ~((u64
)1 << n
);
12508 /* direct the chip source to the given MSI-X interrupt */
12511 reg
= read_csr(dd
, CCE_INT_MAP
+ (8 * m
));
12512 reg
&= ~((u64
)0xff << (8 * n
));
12513 reg
|= ((u64
)msix_intr
& 0xff) << (8 * n
);
12514 write_csr(dd
, CCE_INT_MAP
+ (8 * m
), reg
);
12517 static void remap_sdma_interrupts(struct hfi1_devdata
*dd
,
12518 int engine
, int msix_intr
)
12521 * SDMA engine interrupt sources grouped by type, rather than
12522 * engine. Per-engine interrupts are as follows:
12527 remap_intr(dd
, IS_SDMA_START
+ 0 * TXE_NUM_SDMA_ENGINES
+ engine
,
12529 remap_intr(dd
, IS_SDMA_START
+ 1 * TXE_NUM_SDMA_ENGINES
+ engine
,
12531 remap_intr(dd
, IS_SDMA_START
+ 2 * TXE_NUM_SDMA_ENGINES
+ engine
,
12535 static int request_intx_irq(struct hfi1_devdata
*dd
)
12539 snprintf(dd
->intx_name
, sizeof(dd
->intx_name
), DRIVER_NAME
"_%d",
12541 ret
= request_irq(dd
->pcidev
->irq
, general_interrupt
,
12542 IRQF_SHARED
, dd
->intx_name
, dd
);
12544 dd_dev_err(dd
, "unable to request INTx interrupt, err %d\n",
12547 dd
->requested_intx_irq
= 1;
12551 static int request_msix_irqs(struct hfi1_devdata
*dd
)
12553 int first_general
, last_general
;
12554 int first_sdma
, last_sdma
;
12555 int first_rx
, last_rx
;
12558 /* calculate the ranges we are going to use */
12560 last_general
= first_general
+ 1;
12561 first_sdma
= last_general
;
12562 last_sdma
= first_sdma
+ dd
->num_sdma
;
12563 first_rx
= last_sdma
;
12564 last_rx
= first_rx
+ dd
->n_krcv_queues
;
12567 * Sanity check - the code expects all SDMA chip source
12568 * interrupts to be in the same CSR, starting at bit 0. Verify
12569 * that this is true by checking the bit location of the start.
12571 BUILD_BUG_ON(IS_SDMA_START
% 64);
12573 for (i
= 0; i
< dd
->num_msix_entries
; i
++) {
12574 struct hfi1_msix_entry
*me
= &dd
->msix_entries
[i
];
12575 const char *err_info
;
12576 irq_handler_t handler
;
12577 irq_handler_t thread
= NULL
;
12580 struct hfi1_ctxtdata
*rcd
= NULL
;
12581 struct sdma_engine
*sde
= NULL
;
12583 /* obtain the arguments to request_irq */
12584 if (first_general
<= i
&& i
< last_general
) {
12585 idx
= i
- first_general
;
12586 handler
= general_interrupt
;
12588 snprintf(me
->name
, sizeof(me
->name
),
12589 DRIVER_NAME
"_%d", dd
->unit
);
12590 err_info
= "general";
12591 me
->type
= IRQ_GENERAL
;
12592 } else if (first_sdma
<= i
&& i
< last_sdma
) {
12593 idx
= i
- first_sdma
;
12594 sde
= &dd
->per_sdma
[idx
];
12595 handler
= sdma_interrupt
;
12597 snprintf(me
->name
, sizeof(me
->name
),
12598 DRIVER_NAME
"_%d sdma%d", dd
->unit
, idx
);
12600 remap_sdma_interrupts(dd
, idx
, i
);
12601 me
->type
= IRQ_SDMA
;
12602 } else if (first_rx
<= i
&& i
< last_rx
) {
12603 idx
= i
- first_rx
;
12604 rcd
= dd
->rcd
[idx
];
12605 /* no interrupt if no rcd */
12609 * Set the interrupt register and mask for this
12610 * context's interrupt.
12612 rcd
->ireg
= (IS_RCVAVAIL_START
+ idx
) / 64;
12613 rcd
->imask
= ((u64
)1) <<
12614 ((IS_RCVAVAIL_START
+ idx
) % 64);
12615 handler
= receive_context_interrupt
;
12616 thread
= receive_context_thread
;
12618 snprintf(me
->name
, sizeof(me
->name
),
12619 DRIVER_NAME
"_%d kctxt%d", dd
->unit
, idx
);
12620 err_info
= "receive context";
12621 remap_intr(dd
, IS_RCVAVAIL_START
+ idx
, i
);
12622 me
->type
= IRQ_RCVCTXT
;
12624 /* not in our expected range - complain, then
12628 "Unexpected extra MSI-X interrupt %d\n", i
);
12631 /* no argument, no interrupt */
12634 /* make sure the name is terminated */
12635 me
->name
[sizeof(me
->name
) - 1] = 0;
12637 ret
= request_threaded_irq(me
->msix
.vector
, handler
, thread
, 0,
12641 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12642 err_info
, me
->msix
.vector
, idx
, ret
);
12646 * assign arg after request_irq call, so it will be
12651 ret
= hfi1_get_irq_affinity(dd
, me
);
12654 "unable to pin IRQ %d\n", ret
);
12661 * Set the general handler to accept all interrupts, remap all
12662 * chip interrupts back to MSI-X 0.
12664 static void reset_interrupts(struct hfi1_devdata
*dd
)
12668 /* all interrupts handled by the general handler */
12669 for (i
= 0; i
< CCE_NUM_INT_CSRS
; i
++)
12670 dd
->gi_mask
[i
] = ~(u64
)0;
12672 /* all chip interrupts map to MSI-X 0 */
12673 for (i
= 0; i
< CCE_NUM_INT_MAP_CSRS
; i
++)
12674 write_csr(dd
, CCE_INT_MAP
+ (8 * i
), 0);
12677 static int set_up_interrupts(struct hfi1_devdata
*dd
)
12679 struct hfi1_msix_entry
*entries
;
12680 u32 total
, request
;
12682 int single_interrupt
= 0; /* we expect to have all the interrupts */
12686 * 1 general, "slow path" interrupt (includes the SDMA engines
12687 * slow source, SDMACleanupDone)
12688 * N interrupts - one per used SDMA engine
12689 * M interrupt - one per kernel receive context
12691 total
= 1 + dd
->num_sdma
+ dd
->n_krcv_queues
;
12693 entries
= kcalloc(total
, sizeof(*entries
), GFP_KERNEL
);
12698 /* 1-1 MSI-X entry assignment */
12699 for (i
= 0; i
< total
; i
++)
12700 entries
[i
].msix
.entry
= i
;
12702 /* ask for MSI-X interrupts */
12704 request_msix(dd
, &request
, entries
);
12706 if (request
== 0) {
12708 /* dd->num_msix_entries already zero */
12710 single_interrupt
= 1;
12711 dd_dev_err(dd
, "MSI-X failed, using INTx interrupts\n");
12714 dd
->num_msix_entries
= request
;
12715 dd
->msix_entries
= entries
;
12717 if (request
!= total
) {
12718 /* using MSI-X, with reduced interrupts */
12721 "cannot handle reduced interrupt case, want %u, got %u\n",
12726 dd_dev_info(dd
, "%u MSI-X interrupts allocated\n", total
);
12729 /* mask all interrupts */
12730 set_intr_state(dd
, 0);
12731 /* clear all pending interrupts */
12732 clear_all_interrupts(dd
);
12734 /* reset general handler mask, chip MSI-X mappings */
12735 reset_interrupts(dd
);
12737 if (single_interrupt
)
12738 ret
= request_intx_irq(dd
);
12740 ret
= request_msix_irqs(dd
);
12747 clean_up_interrupts(dd
);
12752 * Set up context values in dd. Sets:
12754 * num_rcv_contexts - number of contexts being used
12755 * n_krcv_queues - number of kernel contexts
12756 * first_user_ctxt - first non-kernel context in array of contexts
12757 * freectxts - number of free user contexts
12758 * num_send_contexts - number of PIO send contexts being used
12760 static int set_up_context_variables(struct hfi1_devdata
*dd
)
12762 int num_kernel_contexts
;
12763 int total_contexts
;
12767 int user_rmt_reduced
;
12770 * Kernel receive contexts:
12771 * - min of 2 or 1 context/numa (excluding control context)
12772 * - Context 0 - control context (VL15/multicast/error)
12773 * - Context 1 - first kernel context
12774 * - Context 2 - second kernel context
12779 * n_krcvqs is the sum of module parameter kernel receive
12780 * contexts, krcvqs[]. It does not include the control
12781 * context, so add that.
12783 num_kernel_contexts
= n_krcvqs
+ 1;
12785 num_kernel_contexts
= num_online_nodes() + 1;
12786 num_kernel_contexts
=
12787 max_t(int, MIN_KERNEL_KCTXTS
, num_kernel_contexts
);
12789 * Every kernel receive context needs an ACK send context.
12790 * one send context is allocated for each VL{0-7} and VL15
12792 if (num_kernel_contexts
> (dd
->chip_send_contexts
- num_vls
- 1)) {
12794 "Reducing # kernel rcv contexts to: %d, from %d\n",
12795 (int)(dd
->chip_send_contexts
- num_vls
- 1),
12796 (int)num_kernel_contexts
);
12797 num_kernel_contexts
= dd
->chip_send_contexts
- num_vls
- 1;
12801 * - default to 1 user context per real (non-HT) CPU core if
12802 * num_user_contexts is negative
12804 if (num_user_contexts
< 0)
12805 num_user_contexts
=
12806 cpumask_weight(&dd
->affinity
->real_cpu_mask
);
12808 total_contexts
= num_kernel_contexts
+ num_user_contexts
;
12811 * Adjust the counts given a global max.
12813 if (total_contexts
> dd
->chip_rcv_contexts
) {
12815 "Reducing # user receive contexts to: %d, from %d\n",
12816 (int)(dd
->chip_rcv_contexts
- num_kernel_contexts
),
12817 (int)num_user_contexts
);
12818 num_user_contexts
= dd
->chip_rcv_contexts
- num_kernel_contexts
;
12820 total_contexts
= num_kernel_contexts
+ num_user_contexts
;
12823 /* each user context requires an entry in the RMT */
12824 qos_rmt_count
= qos_rmt_entries(dd
, NULL
, NULL
);
12825 if (qos_rmt_count
+ num_user_contexts
> NUM_MAP_ENTRIES
) {
12826 user_rmt_reduced
= NUM_MAP_ENTRIES
- qos_rmt_count
;
12828 "RMT size is reducing the number of user receive contexts from %d to %d\n",
12829 (int)num_user_contexts
,
12832 num_user_contexts
= user_rmt_reduced
;
12833 total_contexts
= num_kernel_contexts
+ num_user_contexts
;
12836 /* the first N are kernel contexts, the rest are user contexts */
12837 dd
->num_rcv_contexts
= total_contexts
;
12838 dd
->n_krcv_queues
= num_kernel_contexts
;
12839 dd
->first_user_ctxt
= num_kernel_contexts
;
12840 dd
->num_user_contexts
= num_user_contexts
;
12841 dd
->freectxts
= num_user_contexts
;
12843 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12844 (int)dd
->chip_rcv_contexts
,
12845 (int)dd
->num_rcv_contexts
,
12846 (int)dd
->n_krcv_queues
,
12847 (int)dd
->num_rcv_contexts
- dd
->n_krcv_queues
);
12850 * Receive array allocation:
12851 * All RcvArray entries are divided into groups of 8. This
12852 * is required by the hardware and will speed up writes to
12853 * consecutive entries by using write-combining of the entire
12856 * The number of groups are evenly divided among all contexts.
12857 * any left over groups will be given to the first N user
12860 dd
->rcv_entries
.group_size
= RCV_INCREMENT
;
12861 ngroups
= dd
->chip_rcv_array_count
/ dd
->rcv_entries
.group_size
;
12862 dd
->rcv_entries
.ngroups
= ngroups
/ dd
->num_rcv_contexts
;
12863 dd
->rcv_entries
.nctxt_extra
= ngroups
-
12864 (dd
->num_rcv_contexts
* dd
->rcv_entries
.ngroups
);
12865 dd_dev_info(dd
, "RcvArray groups %u, ctxts extra %u\n",
12866 dd
->rcv_entries
.ngroups
,
12867 dd
->rcv_entries
.nctxt_extra
);
12868 if (dd
->rcv_entries
.ngroups
* dd
->rcv_entries
.group_size
>
12869 MAX_EAGER_ENTRIES
* 2) {
12870 dd
->rcv_entries
.ngroups
= (MAX_EAGER_ENTRIES
* 2) /
12871 dd
->rcv_entries
.group_size
;
12873 "RcvArray group count too high, change to %u\n",
12874 dd
->rcv_entries
.ngroups
);
12875 dd
->rcv_entries
.nctxt_extra
= 0;
12878 * PIO send contexts
12880 ret
= init_sc_pools_and_sizes(dd
);
12881 if (ret
>= 0) { /* success */
12882 dd
->num_send_contexts
= ret
;
12885 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
12886 dd
->chip_send_contexts
,
12887 dd
->num_send_contexts
,
12888 dd
->sc_sizes
[SC_KERNEL
].count
,
12889 dd
->sc_sizes
[SC_ACK
].count
,
12890 dd
->sc_sizes
[SC_USER
].count
,
12891 dd
->sc_sizes
[SC_VL15
].count
);
12892 ret
= 0; /* success */
12899 * Set the device/port partition key table. The MAD code
12900 * will ensure that, at least, the partial management
12901 * partition key is present in the table.
12903 static void set_partition_keys(struct hfi1_pportdata
*ppd
)
12905 struct hfi1_devdata
*dd
= ppd
->dd
;
12909 dd_dev_info(dd
, "Setting partition keys\n");
12910 for (i
= 0; i
< hfi1_get_npkeys(dd
); i
++) {
12911 reg
|= (ppd
->pkeys
[i
] &
12912 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK
) <<
12914 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT
);
12915 /* Each register holds 4 PKey values. */
12916 if ((i
% 4) == 3) {
12917 write_csr(dd
, RCV_PARTITION_KEY
+
12918 ((i
- 3) * 2), reg
);
12923 /* Always enable HW pkeys check when pkeys table is set */
12924 add_rcvctrl(dd
, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK
);
12928 * These CSRs and memories are uninitialized on reset and must be
12929 * written before reading to set the ECC/parity bits.
12931 * NOTE: All user context CSRs that are not mmaped write-only
12932 * (e.g. the TID flows) must be initialized even if the driver never
12935 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata
*dd
)
12940 for (i
= 0; i
< CCE_NUM_INT_MAP_CSRS
; i
++)
12941 write_csr(dd
, CCE_INT_MAP
+ (8 * i
), 0);
12943 /* SendCtxtCreditReturnAddr */
12944 for (i
= 0; i
< dd
->chip_send_contexts
; i
++)
12945 write_kctxt_csr(dd
, i
, SEND_CTXT_CREDIT_RETURN_ADDR
, 0);
12947 /* PIO Send buffers */
12948 /* SDMA Send buffers */
12950 * These are not normally read, and (presently) have no method
12951 * to be read, so are not pre-initialized
12955 /* RcvHdrTailAddr */
12956 /* RcvTidFlowTable */
12957 for (i
= 0; i
< dd
->chip_rcv_contexts
; i
++) {
12958 write_kctxt_csr(dd
, i
, RCV_HDR_ADDR
, 0);
12959 write_kctxt_csr(dd
, i
, RCV_HDR_TAIL_ADDR
, 0);
12960 for (j
= 0; j
< RXE_NUM_TID_FLOWS
; j
++)
12961 write_uctxt_csr(dd
, i
, RCV_TID_FLOW_TABLE
+ (8 * j
), 0);
12965 for (i
= 0; i
< dd
->chip_rcv_array_count
; i
++)
12966 write_csr(dd
, RCV_ARRAY
+ (8 * i
),
12967 RCV_ARRAY_RT_WRITE_ENABLE_SMASK
);
12969 /* RcvQPMapTable */
12970 for (i
= 0; i
< 32; i
++)
12971 write_csr(dd
, RCV_QP_MAP_TABLE
+ (8 * i
), 0);
12975 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
12977 static void clear_cce_status(struct hfi1_devdata
*dd
, u64 status_bits
,
12980 unsigned long timeout
;
12983 /* is the condition present? */
12984 reg
= read_csr(dd
, CCE_STATUS
);
12985 if ((reg
& status_bits
) == 0)
12988 /* clear the condition */
12989 write_csr(dd
, CCE_CTRL
, ctrl_bits
);
12991 /* wait for the condition to clear */
12992 timeout
= jiffies
+ msecs_to_jiffies(CCE_STATUS_TIMEOUT
);
12994 reg
= read_csr(dd
, CCE_STATUS
);
12995 if ((reg
& status_bits
) == 0)
12997 if (time_after(jiffies
, timeout
)) {
12999 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13000 status_bits
, reg
& status_bits
);
13007 /* set CCE CSRs to chip reset defaults */
13008 static void reset_cce_csrs(struct hfi1_devdata
*dd
)
13012 /* CCE_REVISION read-only */
13013 /* CCE_REVISION2 read-only */
13014 /* CCE_CTRL - bits clear automatically */
13015 /* CCE_STATUS read-only, use CceCtrl to clear */
13016 clear_cce_status(dd
, ALL_FROZE
, CCE_CTRL_SPC_UNFREEZE_SMASK
);
13017 clear_cce_status(dd
, ALL_TXE_PAUSE
, CCE_CTRL_TXE_RESUME_SMASK
);
13018 clear_cce_status(dd
, ALL_RXE_PAUSE
, CCE_CTRL_RXE_RESUME_SMASK
);
13019 for (i
= 0; i
< CCE_NUM_SCRATCH
; i
++)
13020 write_csr(dd
, CCE_SCRATCH
+ (8 * i
), 0);
13021 /* CCE_ERR_STATUS read-only */
13022 write_csr(dd
, CCE_ERR_MASK
, 0);
13023 write_csr(dd
, CCE_ERR_CLEAR
, ~0ull);
13024 /* CCE_ERR_FORCE leave alone */
13025 for (i
= 0; i
< CCE_NUM_32_BIT_COUNTERS
; i
++)
13026 write_csr(dd
, CCE_COUNTER_ARRAY32
+ (8 * i
), 0);
13027 write_csr(dd
, CCE_DC_CTRL
, CCE_DC_CTRL_RESETCSR
);
13028 /* CCE_PCIE_CTRL leave alone */
13029 for (i
= 0; i
< CCE_NUM_MSIX_VECTORS
; i
++) {
13030 write_csr(dd
, CCE_MSIX_TABLE_LOWER
+ (8 * i
), 0);
13031 write_csr(dd
, CCE_MSIX_TABLE_UPPER
+ (8 * i
),
13032 CCE_MSIX_TABLE_UPPER_RESETCSR
);
13034 for (i
= 0; i
< CCE_NUM_MSIX_PBAS
; i
++) {
13035 /* CCE_MSIX_PBA read-only */
13036 write_csr(dd
, CCE_MSIX_INT_GRANTED
, ~0ull);
13037 write_csr(dd
, CCE_MSIX_VEC_CLR_WITHOUT_INT
, ~0ull);
13039 for (i
= 0; i
< CCE_NUM_INT_MAP_CSRS
; i
++)
13040 write_csr(dd
, CCE_INT_MAP
, 0);
13041 for (i
= 0; i
< CCE_NUM_INT_CSRS
; i
++) {
13042 /* CCE_INT_STATUS read-only */
13043 write_csr(dd
, CCE_INT_MASK
+ (8 * i
), 0);
13044 write_csr(dd
, CCE_INT_CLEAR
+ (8 * i
), ~0ull);
13045 /* CCE_INT_FORCE leave alone */
13046 /* CCE_INT_BLOCKED read-only */
13048 for (i
= 0; i
< CCE_NUM_32_BIT_INT_COUNTERS
; i
++)
13049 write_csr(dd
, CCE_INT_COUNTER_ARRAY32
+ (8 * i
), 0);
13052 /* set MISC CSRs to chip reset defaults */
13053 static void reset_misc_csrs(struct hfi1_devdata
*dd
)
13057 for (i
= 0; i
< 32; i
++) {
13058 write_csr(dd
, MISC_CFG_RSA_R2
+ (8 * i
), 0);
13059 write_csr(dd
, MISC_CFG_RSA_SIGNATURE
+ (8 * i
), 0);
13060 write_csr(dd
, MISC_CFG_RSA_MODULUS
+ (8 * i
), 0);
13063 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13064 * only be written 128-byte chunks
13066 /* init RSA engine to clear lingering errors */
13067 write_csr(dd
, MISC_CFG_RSA_CMD
, 1);
13068 write_csr(dd
, MISC_CFG_RSA_MU
, 0);
13069 write_csr(dd
, MISC_CFG_FW_CTRL
, 0);
13070 /* MISC_STS_8051_DIGEST read-only */
13071 /* MISC_STS_SBM_DIGEST read-only */
13072 /* MISC_STS_PCIE_DIGEST read-only */
13073 /* MISC_STS_FAB_DIGEST read-only */
13074 /* MISC_ERR_STATUS read-only */
13075 write_csr(dd
, MISC_ERR_MASK
, 0);
13076 write_csr(dd
, MISC_ERR_CLEAR
, ~0ull);
13077 /* MISC_ERR_FORCE leave alone */
13080 /* set TXE CSRs to chip reset defaults */
13081 static void reset_txe_csrs(struct hfi1_devdata
*dd
)
13088 write_csr(dd
, SEND_CTRL
, 0);
13089 __cm_reset(dd
, 0); /* reset CM internal state */
13090 /* SEND_CONTEXTS read-only */
13091 /* SEND_DMA_ENGINES read-only */
13092 /* SEND_PIO_MEM_SIZE read-only */
13093 /* SEND_DMA_MEM_SIZE read-only */
13094 write_csr(dd
, SEND_HIGH_PRIORITY_LIMIT
, 0);
13095 pio_reset_all(dd
); /* SEND_PIO_INIT_CTXT */
13096 /* SEND_PIO_ERR_STATUS read-only */
13097 write_csr(dd
, SEND_PIO_ERR_MASK
, 0);
13098 write_csr(dd
, SEND_PIO_ERR_CLEAR
, ~0ull);
13099 /* SEND_PIO_ERR_FORCE leave alone */
13100 /* SEND_DMA_ERR_STATUS read-only */
13101 write_csr(dd
, SEND_DMA_ERR_MASK
, 0);
13102 write_csr(dd
, SEND_DMA_ERR_CLEAR
, ~0ull);
13103 /* SEND_DMA_ERR_FORCE leave alone */
13104 /* SEND_EGRESS_ERR_STATUS read-only */
13105 write_csr(dd
, SEND_EGRESS_ERR_MASK
, 0);
13106 write_csr(dd
, SEND_EGRESS_ERR_CLEAR
, ~0ull);
13107 /* SEND_EGRESS_ERR_FORCE leave alone */
13108 write_csr(dd
, SEND_BTH_QP
, 0);
13109 write_csr(dd
, SEND_STATIC_RATE_CONTROL
, 0);
13110 write_csr(dd
, SEND_SC2VLT0
, 0);
13111 write_csr(dd
, SEND_SC2VLT1
, 0);
13112 write_csr(dd
, SEND_SC2VLT2
, 0);
13113 write_csr(dd
, SEND_SC2VLT3
, 0);
13114 write_csr(dd
, SEND_LEN_CHECK0
, 0);
13115 write_csr(dd
, SEND_LEN_CHECK1
, 0);
13116 /* SEND_ERR_STATUS read-only */
13117 write_csr(dd
, SEND_ERR_MASK
, 0);
13118 write_csr(dd
, SEND_ERR_CLEAR
, ~0ull);
13119 /* SEND_ERR_FORCE read-only */
13120 for (i
= 0; i
< VL_ARB_LOW_PRIO_TABLE_SIZE
; i
++)
13121 write_csr(dd
, SEND_LOW_PRIORITY_LIST
+ (8 * i
), 0);
13122 for (i
= 0; i
< VL_ARB_HIGH_PRIO_TABLE_SIZE
; i
++)
13123 write_csr(dd
, SEND_HIGH_PRIORITY_LIST
+ (8 * i
), 0);
13124 for (i
= 0; i
< dd
->chip_send_contexts
/ NUM_CONTEXTS_PER_SET
; i
++)
13125 write_csr(dd
, SEND_CONTEXT_SET_CTRL
+ (8 * i
), 0);
13126 for (i
= 0; i
< TXE_NUM_32_BIT_COUNTER
; i
++)
13127 write_csr(dd
, SEND_COUNTER_ARRAY32
+ (8 * i
), 0);
13128 for (i
= 0; i
< TXE_NUM_64_BIT_COUNTER
; i
++)
13129 write_csr(dd
, SEND_COUNTER_ARRAY64
+ (8 * i
), 0);
13130 write_csr(dd
, SEND_CM_CTRL
, SEND_CM_CTRL_RESETCSR
);
13131 write_csr(dd
, SEND_CM_GLOBAL_CREDIT
, SEND_CM_GLOBAL_CREDIT_RESETCSR
);
13132 /* SEND_CM_CREDIT_USED_STATUS read-only */
13133 write_csr(dd
, SEND_CM_TIMER_CTRL
, 0);
13134 write_csr(dd
, SEND_CM_LOCAL_AU_TABLE0_TO3
, 0);
13135 write_csr(dd
, SEND_CM_LOCAL_AU_TABLE4_TO7
, 0);
13136 write_csr(dd
, SEND_CM_REMOTE_AU_TABLE0_TO3
, 0);
13137 write_csr(dd
, SEND_CM_REMOTE_AU_TABLE4_TO7
, 0);
13138 for (i
= 0; i
< TXE_NUM_DATA_VL
; i
++)
13139 write_csr(dd
, SEND_CM_CREDIT_VL
+ (8 * i
), 0);
13140 write_csr(dd
, SEND_CM_CREDIT_VL15
, 0);
13141 /* SEND_CM_CREDIT_USED_VL read-only */
13142 /* SEND_CM_CREDIT_USED_VL15 read-only */
13143 /* SEND_EGRESS_CTXT_STATUS read-only */
13144 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13145 write_csr(dd
, SEND_EGRESS_ERR_INFO
, ~0ull);
13146 /* SEND_EGRESS_ERR_INFO read-only */
13147 /* SEND_EGRESS_ERR_SOURCE read-only */
13150 * TXE Per-Context CSRs
13152 for (i
= 0; i
< dd
->chip_send_contexts
; i
++) {
13153 write_kctxt_csr(dd
, i
, SEND_CTXT_CTRL
, 0);
13154 write_kctxt_csr(dd
, i
, SEND_CTXT_CREDIT_CTRL
, 0);
13155 write_kctxt_csr(dd
, i
, SEND_CTXT_CREDIT_RETURN_ADDR
, 0);
13156 write_kctxt_csr(dd
, i
, SEND_CTXT_CREDIT_FORCE
, 0);
13157 write_kctxt_csr(dd
, i
, SEND_CTXT_ERR_MASK
, 0);
13158 write_kctxt_csr(dd
, i
, SEND_CTXT_ERR_CLEAR
, ~0ull);
13159 write_kctxt_csr(dd
, i
, SEND_CTXT_CHECK_ENABLE
, 0);
13160 write_kctxt_csr(dd
, i
, SEND_CTXT_CHECK_VL
, 0);
13161 write_kctxt_csr(dd
, i
, SEND_CTXT_CHECK_JOB_KEY
, 0);
13162 write_kctxt_csr(dd
, i
, SEND_CTXT_CHECK_PARTITION_KEY
, 0);
13163 write_kctxt_csr(dd
, i
, SEND_CTXT_CHECK_SLID
, 0);
13164 write_kctxt_csr(dd
, i
, SEND_CTXT_CHECK_OPCODE
, 0);
13168 * TXE Per-SDMA CSRs
13170 for (i
= 0; i
< dd
->chip_sdma_engines
; i
++) {
13171 write_kctxt_csr(dd
, i
, SEND_DMA_CTRL
, 0);
13172 /* SEND_DMA_STATUS read-only */
13173 write_kctxt_csr(dd
, i
, SEND_DMA_BASE_ADDR
, 0);
13174 write_kctxt_csr(dd
, i
, SEND_DMA_LEN_GEN
, 0);
13175 write_kctxt_csr(dd
, i
, SEND_DMA_TAIL
, 0);
13176 /* SEND_DMA_HEAD read-only */
13177 write_kctxt_csr(dd
, i
, SEND_DMA_HEAD_ADDR
, 0);
13178 write_kctxt_csr(dd
, i
, SEND_DMA_PRIORITY_THLD
, 0);
13179 /* SEND_DMA_IDLE_CNT read-only */
13180 write_kctxt_csr(dd
, i
, SEND_DMA_RELOAD_CNT
, 0);
13181 write_kctxt_csr(dd
, i
, SEND_DMA_DESC_CNT
, 0);
13182 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13183 /* SEND_DMA_ENG_ERR_STATUS read-only */
13184 write_kctxt_csr(dd
, i
, SEND_DMA_ENG_ERR_MASK
, 0);
13185 write_kctxt_csr(dd
, i
, SEND_DMA_ENG_ERR_CLEAR
, ~0ull);
13186 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13187 write_kctxt_csr(dd
, i
, SEND_DMA_CHECK_ENABLE
, 0);
13188 write_kctxt_csr(dd
, i
, SEND_DMA_CHECK_VL
, 0);
13189 write_kctxt_csr(dd
, i
, SEND_DMA_CHECK_JOB_KEY
, 0);
13190 write_kctxt_csr(dd
, i
, SEND_DMA_CHECK_PARTITION_KEY
, 0);
13191 write_kctxt_csr(dd
, i
, SEND_DMA_CHECK_SLID
, 0);
13192 write_kctxt_csr(dd
, i
, SEND_DMA_CHECK_OPCODE
, 0);
13193 write_kctxt_csr(dd
, i
, SEND_DMA_MEMORY
, 0);
13199 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13201 static void init_rbufs(struct hfi1_devdata
*dd
)
13207 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13212 reg
= read_csr(dd
, RCV_STATUS
);
13213 if ((reg
& (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13214 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK
)) == 0)
13217 * Give up after 1ms - maximum wait time.
13219 * RBuf size is 148KiB. Slowest possible is PCIe Gen1 x1 at
13220 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13221 * 148 KB / (66% * 250MB/s) = 920us
13223 if (count
++ > 500) {
13225 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13229 udelay(2); /* do not busy-wait the CSR */
13232 /* start the init - expect RcvCtrl to be 0 */
13233 write_csr(dd
, RCV_CTRL
, RCV_CTRL_RX_RBUF_INIT_SMASK
);
13236 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13237 * period after the write before RcvStatus.RxRbufInitDone is valid.
13238 * The delay in the first run through the loop below is sufficient and
13239 * required before the first read of RcvStatus.RxRbufInintDone.
13241 read_csr(dd
, RCV_CTRL
);
13243 /* wait for the init to finish */
13246 /* delay is required first time through - see above */
13247 udelay(2); /* do not busy-wait the CSR */
13248 reg
= read_csr(dd
, RCV_STATUS
);
13249 if (reg
& (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK
))
13252 /* give up after 100us - slowest possible at 33MHz is 73us */
13253 if (count
++ > 50) {
13255 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13262 /* set RXE CSRs to chip reset defaults */
13263 static void reset_rxe_csrs(struct hfi1_devdata
*dd
)
13270 write_csr(dd
, RCV_CTRL
, 0);
13272 /* RCV_STATUS read-only */
13273 /* RCV_CONTEXTS read-only */
13274 /* RCV_ARRAY_CNT read-only */
13275 /* RCV_BUF_SIZE read-only */
13276 write_csr(dd
, RCV_BTH_QP
, 0);
13277 write_csr(dd
, RCV_MULTICAST
, 0);
13278 write_csr(dd
, RCV_BYPASS
, 0);
13279 write_csr(dd
, RCV_VL15
, 0);
13280 /* this is a clear-down */
13281 write_csr(dd
, RCV_ERR_INFO
,
13282 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK
);
13283 /* RCV_ERR_STATUS read-only */
13284 write_csr(dd
, RCV_ERR_MASK
, 0);
13285 write_csr(dd
, RCV_ERR_CLEAR
, ~0ull);
13286 /* RCV_ERR_FORCE leave alone */
13287 for (i
= 0; i
< 32; i
++)
13288 write_csr(dd
, RCV_QP_MAP_TABLE
+ (8 * i
), 0);
13289 for (i
= 0; i
< 4; i
++)
13290 write_csr(dd
, RCV_PARTITION_KEY
+ (8 * i
), 0);
13291 for (i
= 0; i
< RXE_NUM_32_BIT_COUNTERS
; i
++)
13292 write_csr(dd
, RCV_COUNTER_ARRAY32
+ (8 * i
), 0);
13293 for (i
= 0; i
< RXE_NUM_64_BIT_COUNTERS
; i
++)
13294 write_csr(dd
, RCV_COUNTER_ARRAY64
+ (8 * i
), 0);
13295 for (i
= 0; i
< RXE_NUM_RSM_INSTANCES
; i
++) {
13296 write_csr(dd
, RCV_RSM_CFG
+ (8 * i
), 0);
13297 write_csr(dd
, RCV_RSM_SELECT
+ (8 * i
), 0);
13298 write_csr(dd
, RCV_RSM_MATCH
+ (8 * i
), 0);
13300 for (i
= 0; i
< 32; i
++)
13301 write_csr(dd
, RCV_RSM_MAP_TABLE
+ (8 * i
), 0);
13304 * RXE Kernel and User Per-Context CSRs
13306 for (i
= 0; i
< dd
->chip_rcv_contexts
; i
++) {
13308 write_kctxt_csr(dd
, i
, RCV_CTXT_CTRL
, 0);
13309 /* RCV_CTXT_STATUS read-only */
13310 write_kctxt_csr(dd
, i
, RCV_EGR_CTRL
, 0);
13311 write_kctxt_csr(dd
, i
, RCV_TID_CTRL
, 0);
13312 write_kctxt_csr(dd
, i
, RCV_KEY_CTRL
, 0);
13313 write_kctxt_csr(dd
, i
, RCV_HDR_ADDR
, 0);
13314 write_kctxt_csr(dd
, i
, RCV_HDR_CNT
, 0);
13315 write_kctxt_csr(dd
, i
, RCV_HDR_ENT_SIZE
, 0);
13316 write_kctxt_csr(dd
, i
, RCV_HDR_SIZE
, 0);
13317 write_kctxt_csr(dd
, i
, RCV_HDR_TAIL_ADDR
, 0);
13318 write_kctxt_csr(dd
, i
, RCV_AVAIL_TIME_OUT
, 0);
13319 write_kctxt_csr(dd
, i
, RCV_HDR_OVFL_CNT
, 0);
13322 /* RCV_HDR_TAIL read-only */
13323 write_uctxt_csr(dd
, i
, RCV_HDR_HEAD
, 0);
13324 /* RCV_EGR_INDEX_TAIL read-only */
13325 write_uctxt_csr(dd
, i
, RCV_EGR_INDEX_HEAD
, 0);
13326 /* RCV_EGR_OFFSET_TAIL read-only */
13327 for (j
= 0; j
< RXE_NUM_TID_FLOWS
; j
++) {
13328 write_uctxt_csr(dd
, i
,
13329 RCV_TID_FLOW_TABLE
+ (8 * j
), 0);
13335 * Set sc2vl tables.
13337 * They power on to zeros, so to avoid send context errors
13338 * they need to be set:
13340 * SC 0-7 -> VL 0-7 (respectively)
13345 static void init_sc2vl_tables(struct hfi1_devdata
*dd
)
13348 /* init per architecture spec, constrained by hardware capability */
13350 /* HFI maps sent packets */
13351 write_csr(dd
, SEND_SC2VLT0
, SC2VL_VAL(
13357 write_csr(dd
, SEND_SC2VLT1
, SC2VL_VAL(
13363 write_csr(dd
, SEND_SC2VLT2
, SC2VL_VAL(
13369 write_csr(dd
, SEND_SC2VLT3
, SC2VL_VAL(
13376 /* DC maps received packets */
13377 write_csr(dd
, DCC_CFG_SC_VL_TABLE_15_0
, DC_SC_VL_VAL(
13379 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13380 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13381 write_csr(dd
, DCC_CFG_SC_VL_TABLE_31_16
, DC_SC_VL_VAL(
13383 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13384 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13386 /* initialize the cached sc2vl values consistently with h/w */
13387 for (i
= 0; i
< 32; i
++) {
13388 if (i
< 8 || i
== 15)
13389 *((u8
*)(dd
->sc2vl
) + i
) = (u8
)i
;
13391 *((u8
*)(dd
->sc2vl
) + i
) = 0;
13396 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13397 * depend on the chip going through a power-on reset - a driver may be loaded
13398 * and unloaded many times.
13400 * Do not write any CSR values to the chip in this routine - there may be
13401 * a reset following the (possible) FLR in this routine.
13404 static void init_chip(struct hfi1_devdata
*dd
)
13409 * Put the HFI CSRs in a known state.
13410 * Combine this with a DC reset.
13412 * Stop the device from doing anything while we do a
13413 * reset. We know there are no other active users of
13414 * the device since we are now in charge. Turn off
13415 * off all outbound and inbound traffic and make sure
13416 * the device does not generate any interrupts.
13419 /* disable send contexts and SDMA engines */
13420 write_csr(dd
, SEND_CTRL
, 0);
13421 for (i
= 0; i
< dd
->chip_send_contexts
; i
++)
13422 write_kctxt_csr(dd
, i
, SEND_CTXT_CTRL
, 0);
13423 for (i
= 0; i
< dd
->chip_sdma_engines
; i
++)
13424 write_kctxt_csr(dd
, i
, SEND_DMA_CTRL
, 0);
13425 /* disable port (turn off RXE inbound traffic) and contexts */
13426 write_csr(dd
, RCV_CTRL
, 0);
13427 for (i
= 0; i
< dd
->chip_rcv_contexts
; i
++)
13428 write_csr(dd
, RCV_CTXT_CTRL
, 0);
13429 /* mask all interrupt sources */
13430 for (i
= 0; i
< CCE_NUM_INT_CSRS
; i
++)
13431 write_csr(dd
, CCE_INT_MASK
+ (8 * i
), 0ull);
13434 * DC Reset: do a full DC reset before the register clear.
13435 * A recommended length of time to hold is one CSR read,
13436 * so reread the CceDcCtrl. Then, hold the DC in reset
13437 * across the clear.
13439 write_csr(dd
, CCE_DC_CTRL
, CCE_DC_CTRL_DC_RESET_SMASK
);
13440 (void)read_csr(dd
, CCE_DC_CTRL
);
13444 * A FLR will reset the SPC core and part of the PCIe.
13445 * The parts that need to be restored have already been
13448 dd_dev_info(dd
, "Resetting CSRs with FLR\n");
13450 /* do the FLR, the DC reset will remain */
13453 /* restore command and BARs */
13454 restore_pci_variables(dd
);
13457 dd_dev_info(dd
, "Resetting CSRs with FLR\n");
13459 restore_pci_variables(dd
);
13462 dd_dev_info(dd
, "Resetting CSRs with writes\n");
13463 reset_cce_csrs(dd
);
13464 reset_txe_csrs(dd
);
13465 reset_rxe_csrs(dd
);
13466 reset_misc_csrs(dd
);
13468 /* clear the DC reset */
13469 write_csr(dd
, CCE_DC_CTRL
, 0);
13471 /* Set the LED off */
13475 * Clear the QSFP reset.
13476 * An FLR enforces a 0 on all out pins. The driver does not touch
13477 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
13478 * anything plugged constantly in reset, if it pays attention
13480 * Prime examples of this are optical cables. Set all pins high.
13481 * I2CCLK and I2CDAT will change per direction, and INT_N and
13482 * MODPRS_N are input only and their value is ignored.
13484 write_csr(dd
, ASIC_QSFP1_OUT
, 0x1f);
13485 write_csr(dd
, ASIC_QSFP2_OUT
, 0x1f);
13486 init_chip_resources(dd
);
13489 static void init_early_variables(struct hfi1_devdata
*dd
)
13493 /* assign link credit variables */
13495 dd
->link_credits
= CM_GLOBAL_CREDITS
;
13497 dd
->link_credits
--;
13498 dd
->vcu
= cu_to_vcu(hfi1_cu
);
13499 /* enough room for 8 MAD packets plus header - 17K */
13500 dd
->vl15_init
= (8 * (2048 + 128)) / vau_to_au(dd
->vau
);
13501 if (dd
->vl15_init
> dd
->link_credits
)
13502 dd
->vl15_init
= dd
->link_credits
;
13504 write_uninitialized_csrs_and_memories(dd
);
13506 if (HFI1_CAP_IS_KSET(PKEY_CHECK
))
13507 for (i
= 0; i
< dd
->num_pports
; i
++) {
13508 struct hfi1_pportdata
*ppd
= &dd
->pport
[i
];
13510 set_partition_keys(ppd
);
13512 init_sc2vl_tables(dd
);
13515 static void init_kdeth_qp(struct hfi1_devdata
*dd
)
13517 /* user changed the KDETH_QP */
13518 if (kdeth_qp
!= 0 && kdeth_qp
>= 0xff) {
13519 /* out of range or illegal value */
13520 dd_dev_err(dd
, "Invalid KDETH queue pair prefix, ignoring");
13523 if (kdeth_qp
== 0) /* not set, or failed range check */
13524 kdeth_qp
= DEFAULT_KDETH_QP
;
13526 write_csr(dd
, SEND_BTH_QP
,
13527 (kdeth_qp
& SEND_BTH_QP_KDETH_QP_MASK
) <<
13528 SEND_BTH_QP_KDETH_QP_SHIFT
);
13530 write_csr(dd
, RCV_BTH_QP
,
13531 (kdeth_qp
& RCV_BTH_QP_KDETH_QP_MASK
) <<
13532 RCV_BTH_QP_KDETH_QP_SHIFT
);
13537 * @dd - device data
13538 * @first_ctxt - first context
13539 * @last_ctxt - first context
13541 * This return sets the qpn mapping table that
13542 * is indexed by qpn[8:1].
13544 * The routine will round robin the 256 settings
13545 * from first_ctxt to last_ctxt.
13547 * The first/last looks ahead to having specialized
13548 * receive contexts for mgmt and bypass. Normal
13549 * verbs traffic will assumed to be on a range
13550 * of receive contexts.
13552 static void init_qpmap_table(struct hfi1_devdata
*dd
,
13557 u64 regno
= RCV_QP_MAP_TABLE
;
13559 u64 ctxt
= first_ctxt
;
13561 for (i
= 0; i
< 256; i
++) {
13562 reg
|= ctxt
<< (8 * (i
% 8));
13564 if (ctxt
> last_ctxt
)
13567 write_csr(dd
, regno
, reg
);
13573 add_rcvctrl(dd
, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13574 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK
);
13577 struct rsm_map_table
{
13578 u64 map
[NUM_MAP_REGS
];
13582 struct rsm_rule_data
{
13598 * Return an initialized RMT map table for users to fill in. OK if it
13599 * returns NULL, indicating no table.
13601 static struct rsm_map_table
*alloc_rsm_map_table(struct hfi1_devdata
*dd
)
13603 struct rsm_map_table
*rmt
;
13604 u8 rxcontext
= is_ax(dd
) ? 0 : 0xff; /* 0 is default if a0 ver. */
13606 rmt
= kmalloc(sizeof(*rmt
), GFP_KERNEL
);
13608 memset(rmt
->map
, rxcontext
, sizeof(rmt
->map
));
13616 * Write the final RMT map table to the chip and free the table. OK if
13619 static void complete_rsm_map_table(struct hfi1_devdata
*dd
,
13620 struct rsm_map_table
*rmt
)
13625 /* write table to chip */
13626 for (i
= 0; i
< NUM_MAP_REGS
; i
++)
13627 write_csr(dd
, RCV_RSM_MAP_TABLE
+ (8 * i
), rmt
->map
[i
]);
13630 add_rcvctrl(dd
, RCV_CTRL_RCV_RSM_ENABLE_SMASK
);
13635 * Add a receive side mapping rule.
13637 static void add_rsm_rule(struct hfi1_devdata
*dd
, u8 rule_index
,
13638 struct rsm_rule_data
*rrd
)
13640 write_csr(dd
, RCV_RSM_CFG
+ (8 * rule_index
),
13641 (u64
)rrd
->offset
<< RCV_RSM_CFG_OFFSET_SHIFT
|
13642 1ull << rule_index
| /* enable bit */
13643 (u64
)rrd
->pkt_type
<< RCV_RSM_CFG_PACKET_TYPE_SHIFT
);
13644 write_csr(dd
, RCV_RSM_SELECT
+ (8 * rule_index
),
13645 (u64
)rrd
->field1_off
<< RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT
|
13646 (u64
)rrd
->field2_off
<< RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT
|
13647 (u64
)rrd
->index1_off
<< RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT
|
13648 (u64
)rrd
->index1_width
<< RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT
|
13649 (u64
)rrd
->index2_off
<< RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT
|
13650 (u64
)rrd
->index2_width
<< RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT
);
13651 write_csr(dd
, RCV_RSM_MATCH
+ (8 * rule_index
),
13652 (u64
)rrd
->mask1
<< RCV_RSM_MATCH_MASK1_SHIFT
|
13653 (u64
)rrd
->value1
<< RCV_RSM_MATCH_VALUE1_SHIFT
|
13654 (u64
)rrd
->mask2
<< RCV_RSM_MATCH_MASK2_SHIFT
|
13655 (u64
)rrd
->value2
<< RCV_RSM_MATCH_VALUE2_SHIFT
);
13658 /* return the number of RSM map table entries that will be used for QOS */
13659 static int qos_rmt_entries(struct hfi1_devdata
*dd
, unsigned int *mp
,
13666 /* is QOS active at all? */
13667 if (dd
->n_krcv_queues
<= MIN_KERNEL_KCTXTS
||
13672 /* determine bits for qpn */
13673 for (i
= 0; i
< min_t(unsigned int, num_vls
, krcvqsset
); i
++)
13674 if (krcvqs
[i
] > max_by_vl
)
13675 max_by_vl
= krcvqs
[i
];
13676 if (max_by_vl
> 32)
13678 m
= ilog2(__roundup_pow_of_two(max_by_vl
));
13680 /* determine bits for vl */
13681 n
= ilog2(__roundup_pow_of_two(num_vls
));
13683 /* reject if too much is used */
13692 return 1 << (m
+ n
);
13703 * init_qos - init RX qos
13704 * @dd - device data
13705 * @rmt - RSM map table
13707 * This routine initializes Rule 0 and the RSM map table to implement
13708 * quality of service (qos).
13710 * If all of the limit tests succeed, qos is applied based on the array
13711 * interpretation of krcvqs where entry 0 is VL0.
13713 * The number of vl bits (n) and the number of qpn bits (m) are computed to
13714 * feed both the RSM map table and the single rule.
13716 static void init_qos(struct hfi1_devdata
*dd
, struct rsm_map_table
*rmt
)
13718 struct rsm_rule_data rrd
;
13719 unsigned qpns_per_vl
, ctxt
, i
, qpn
, n
= 1, m
;
13720 unsigned int rmt_entries
;
13725 rmt_entries
= qos_rmt_entries(dd
, &m
, &n
);
13726 if (rmt_entries
== 0)
13728 qpns_per_vl
= 1 << m
;
13730 /* enough room in the map table? */
13731 rmt_entries
= 1 << (m
+ n
);
13732 if (rmt
->used
+ rmt_entries
>= NUM_MAP_ENTRIES
)
13735 /* add qos entries to the the RSM map table */
13736 for (i
= 0, ctxt
= FIRST_KERNEL_KCTXT
; i
< num_vls
; i
++) {
13739 for (qpn
= 0, tctxt
= ctxt
;
13740 krcvqs
[i
] && qpn
< qpns_per_vl
; qpn
++) {
13741 unsigned idx
, regoff
, regidx
;
13743 /* generate the index the hardware will produce */
13744 idx
= rmt
->used
+ ((qpn
<< n
) ^ i
);
13745 regoff
= (idx
% 8) * 8;
13747 /* replace default with context number */
13748 reg
= rmt
->map
[regidx
];
13749 reg
&= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13751 reg
|= (u64
)(tctxt
++) << regoff
;
13752 rmt
->map
[regidx
] = reg
;
13753 if (tctxt
== ctxt
+ krcvqs
[i
])
13759 rrd
.offset
= rmt
->used
;
13761 rrd
.field1_off
= LRH_BTH_MATCH_OFFSET
;
13762 rrd
.field2_off
= LRH_SC_MATCH_OFFSET
;
13763 rrd
.index1_off
= LRH_SC_SELECT_OFFSET
;
13764 rrd
.index1_width
= n
;
13765 rrd
.index2_off
= QPN_SELECT_OFFSET
;
13766 rrd
.index2_width
= m
+ n
;
13767 rrd
.mask1
= LRH_BTH_MASK
;
13768 rrd
.value1
= LRH_BTH_VALUE
;
13769 rrd
.mask2
= LRH_SC_MASK
;
13770 rrd
.value2
= LRH_SC_VALUE
;
13773 add_rsm_rule(dd
, 0, &rrd
);
13775 /* mark RSM map entries as used */
13776 rmt
->used
+= rmt_entries
;
13777 /* map everything else to the mcast/err/vl15 context */
13778 init_qpmap_table(dd
, HFI1_CTRL_CTXT
, HFI1_CTRL_CTXT
);
13779 dd
->qos_shift
= n
+ 1;
13783 init_qpmap_table(dd
, FIRST_KERNEL_KCTXT
, dd
->n_krcv_queues
- 1);
13786 static void init_user_fecn_handling(struct hfi1_devdata
*dd
,
13787 struct rsm_map_table
*rmt
)
13789 struct rsm_rule_data rrd
;
13791 int i
, idx
, regoff
, regidx
;
13794 /* there needs to be enough room in the map table */
13795 if (rmt
->used
+ dd
->num_user_contexts
>= NUM_MAP_ENTRIES
) {
13796 dd_dev_err(dd
, "User FECN handling disabled - too many user contexts allocated\n");
13801 * RSM will extract the destination context as an index into the
13802 * map table. The destination contexts are a sequential block
13803 * in the range first_user_ctxt...num_rcv_contexts-1 (inclusive).
13804 * Map entries are accessed as offset + extracted value. Adjust
13805 * the added offset so this sequence can be placed anywhere in
13806 * the table - as long as the entries themselves do not wrap.
13807 * There are only enough bits in offset for the table size, so
13808 * start with that to allow for a "negative" offset.
13810 offset
= (u8
)(NUM_MAP_ENTRIES
+ (int)rmt
->used
-
13811 (int)dd
->first_user_ctxt
);
13813 for (i
= dd
->first_user_ctxt
, idx
= rmt
->used
;
13814 i
< dd
->num_rcv_contexts
; i
++, idx
++) {
13815 /* replace with identity mapping */
13816 regoff
= (idx
% 8) * 8;
13818 reg
= rmt
->map
[regidx
];
13819 reg
&= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
<< regoff
);
13820 reg
|= (u64
)i
<< regoff
;
13821 rmt
->map
[regidx
] = reg
;
13825 * For RSM intercept of Expected FECN packets:
13826 * o packet type 0 - expected
13827 * o match on F (bit 95), using select/match 1, and
13828 * o match on SH (bit 133), using select/match 2.
13830 * Use index 1 to extract the 8-bit receive context from DestQP
13831 * (start at bit 64). Use that as the RSM map table index.
13833 rrd
.offset
= offset
;
13835 rrd
.field1_off
= 95;
13836 rrd
.field2_off
= 133;
13837 rrd
.index1_off
= 64;
13838 rrd
.index1_width
= 8;
13839 rrd
.index2_off
= 0;
13840 rrd
.index2_width
= 0;
13847 add_rsm_rule(dd
, 1, &rrd
);
13849 rmt
->used
+= dd
->num_user_contexts
;
13852 static void init_rxe(struct hfi1_devdata
*dd
)
13854 struct rsm_map_table
*rmt
;
13856 /* enable all receive errors */
13857 write_csr(dd
, RCV_ERR_MASK
, ~0ull);
13859 rmt
= alloc_rsm_map_table(dd
);
13860 /* set up QOS, including the QPN map table */
13862 init_user_fecn_handling(dd
, rmt
);
13863 complete_rsm_map_table(dd
, rmt
);
13867 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13868 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13869 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
13870 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13871 * Max_PayLoad_Size set to its minimum of 128.
13873 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13874 * (64 bytes). Max_Payload_Size is possibly modified upward in
13875 * tune_pcie_caps() which is called after this routine.
13879 static void init_other(struct hfi1_devdata
*dd
)
13881 /* enable all CCE errors */
13882 write_csr(dd
, CCE_ERR_MASK
, ~0ull);
13883 /* enable *some* Misc errors */
13884 write_csr(dd
, MISC_ERR_MASK
, DRIVER_MISC_MASK
);
13885 /* enable all DC errors, except LCB */
13886 write_csr(dd
, DCC_ERR_FLG_EN
, ~0ull);
13887 write_csr(dd
, DC_DC8051_ERR_EN
, ~0ull);
13891 * Fill out the given AU table using the given CU. A CU is defined in terms
13892 * AUs. The table is a an encoding: given the index, how many AUs does that
13895 * NOTE: Assumes that the register layout is the same for the
13896 * local and remote tables.
13898 static void assign_cm_au_table(struct hfi1_devdata
*dd
, u32 cu
,
13899 u32 csr0to3
, u32 csr4to7
)
13901 write_csr(dd
, csr0to3
,
13902 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT
|
13903 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT
|
13905 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT
|
13907 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT
);
13908 write_csr(dd
, csr4to7
,
13910 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT
|
13912 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT
|
13914 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT
|
13916 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT
);
13919 static void assign_local_cm_au_table(struct hfi1_devdata
*dd
, u8 vcu
)
13921 assign_cm_au_table(dd
, vcu_to_cu(vcu
), SEND_CM_LOCAL_AU_TABLE0_TO3
,
13922 SEND_CM_LOCAL_AU_TABLE4_TO7
);
13925 void assign_remote_cm_au_table(struct hfi1_devdata
*dd
, u8 vcu
)
13927 assign_cm_au_table(dd
, vcu_to_cu(vcu
), SEND_CM_REMOTE_AU_TABLE0_TO3
,
13928 SEND_CM_REMOTE_AU_TABLE4_TO7
);
13931 static void init_txe(struct hfi1_devdata
*dd
)
13935 /* enable all PIO, SDMA, general, and Egress errors */
13936 write_csr(dd
, SEND_PIO_ERR_MASK
, ~0ull);
13937 write_csr(dd
, SEND_DMA_ERR_MASK
, ~0ull);
13938 write_csr(dd
, SEND_ERR_MASK
, ~0ull);
13939 write_csr(dd
, SEND_EGRESS_ERR_MASK
, ~0ull);
13941 /* enable all per-context and per-SDMA engine errors */
13942 for (i
= 0; i
< dd
->chip_send_contexts
; i
++)
13943 write_kctxt_csr(dd
, i
, SEND_CTXT_ERR_MASK
, ~0ull);
13944 for (i
= 0; i
< dd
->chip_sdma_engines
; i
++)
13945 write_kctxt_csr(dd
, i
, SEND_DMA_ENG_ERR_MASK
, ~0ull);
13947 /* set the local CU to AU mapping */
13948 assign_local_cm_au_table(dd
, dd
->vcu
);
13951 * Set reasonable default for Credit Return Timer
13952 * Don't set on Simulator - causes it to choke.
13954 if (dd
->icode
!= ICODE_FUNCTIONAL_SIMULATOR
)
13955 write_csr(dd
, SEND_CM_TIMER_CTRL
, HFI1_CREDIT_RETURN_RATE
);
13958 int hfi1_set_ctxt_jkey(struct hfi1_devdata
*dd
, unsigned ctxt
, u16 jkey
)
13960 struct hfi1_ctxtdata
*rcd
= dd
->rcd
[ctxt
];
13965 if (!rcd
|| !rcd
->sc
) {
13969 sctxt
= rcd
->sc
->hw_context
;
13970 reg
= SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK
| /* mask is always 1's */
13971 ((jkey
& SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK
) <<
13972 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT
);
13973 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
13974 if (HFI1_CAP_KGET_MASK(rcd
->flags
, ALLOW_PERM_JKEY
))
13975 reg
|= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK
;
13976 write_kctxt_csr(dd
, sctxt
, SEND_CTXT_CHECK_JOB_KEY
, reg
);
13978 * Enable send-side J_KEY integrity check, unless this is A0 h/w
13981 reg
= read_kctxt_csr(dd
, sctxt
, SEND_CTXT_CHECK_ENABLE
);
13982 reg
|= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK
;
13983 write_kctxt_csr(dd
, sctxt
, SEND_CTXT_CHECK_ENABLE
, reg
);
13986 /* Enable J_KEY check on receive context. */
13987 reg
= RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK
|
13988 ((jkey
& RCV_KEY_CTRL_JOB_KEY_VALUE_MASK
) <<
13989 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT
);
13990 write_kctxt_csr(dd
, ctxt
, RCV_KEY_CTRL
, reg
);
13995 int hfi1_clear_ctxt_jkey(struct hfi1_devdata
*dd
, unsigned ctxt
)
13997 struct hfi1_ctxtdata
*rcd
= dd
->rcd
[ctxt
];
14002 if (!rcd
|| !rcd
->sc
) {
14006 sctxt
= rcd
->sc
->hw_context
;
14007 write_kctxt_csr(dd
, sctxt
, SEND_CTXT_CHECK_JOB_KEY
, 0);
14009 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14010 * This check would not have been enabled for A0 h/w, see
14014 reg
= read_kctxt_csr(dd
, sctxt
, SEND_CTXT_CHECK_ENABLE
);
14015 reg
&= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK
;
14016 write_kctxt_csr(dd
, sctxt
, SEND_CTXT_CHECK_ENABLE
, reg
);
14018 /* Turn off the J_KEY on the receive side */
14019 write_kctxt_csr(dd
, ctxt
, RCV_KEY_CTRL
, 0);
14024 int hfi1_set_ctxt_pkey(struct hfi1_devdata
*dd
, unsigned ctxt
, u16 pkey
)
14026 struct hfi1_ctxtdata
*rcd
;
14031 if (ctxt
< dd
->num_rcv_contexts
) {
14032 rcd
= dd
->rcd
[ctxt
];
14037 if (!rcd
|| !rcd
->sc
) {
14041 sctxt
= rcd
->sc
->hw_context
;
14042 reg
= ((u64
)pkey
& SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK
) <<
14043 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT
;
14044 write_kctxt_csr(dd
, sctxt
, SEND_CTXT_CHECK_PARTITION_KEY
, reg
);
14045 reg
= read_kctxt_csr(dd
, sctxt
, SEND_CTXT_CHECK_ENABLE
);
14046 reg
|= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK
;
14047 reg
&= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK
;
14048 write_kctxt_csr(dd
, sctxt
, SEND_CTXT_CHECK_ENABLE
, reg
);
14053 int hfi1_clear_ctxt_pkey(struct hfi1_devdata
*dd
, unsigned ctxt
)
14055 struct hfi1_ctxtdata
*rcd
;
14060 if (ctxt
< dd
->num_rcv_contexts
) {
14061 rcd
= dd
->rcd
[ctxt
];
14066 if (!rcd
|| !rcd
->sc
) {
14070 sctxt
= rcd
->sc
->hw_context
;
14071 reg
= read_kctxt_csr(dd
, sctxt
, SEND_CTXT_CHECK_ENABLE
);
14072 reg
&= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK
;
14073 write_kctxt_csr(dd
, sctxt
, SEND_CTXT_CHECK_ENABLE
, reg
);
14074 write_kctxt_csr(dd
, sctxt
, SEND_CTXT_CHECK_PARTITION_KEY
, 0);
14080 * Start doing the clean up the the chip. Our clean up happens in multiple
14081 * stages and this is just the first.
14083 void hfi1_start_cleanup(struct hfi1_devdata
*dd
)
14088 clean_up_interrupts(dd
);
14089 finish_chip_resources(dd
);
14092 #define HFI_BASE_GUID(dev) \
14093 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14096 * Information can be shared between the two HFIs on the same ASIC
14097 * in the same OS. This function finds the peer device and sets
14098 * up a shared structure.
14100 static int init_asic_data(struct hfi1_devdata
*dd
)
14102 unsigned long flags
;
14103 struct hfi1_devdata
*tmp
, *peer
= NULL
;
14106 spin_lock_irqsave(&hfi1_devs_lock
, flags
);
14107 /* Find our peer device */
14108 list_for_each_entry(tmp
, &hfi1_dev_list
, list
) {
14109 if ((HFI_BASE_GUID(dd
) == HFI_BASE_GUID(tmp
)) &&
14110 dd
->unit
!= tmp
->unit
) {
14117 dd
->asic_data
= peer
->asic_data
;
14119 dd
->asic_data
= kzalloc(sizeof(*dd
->asic_data
), GFP_KERNEL
);
14120 if (!dd
->asic_data
) {
14124 mutex_init(&dd
->asic_data
->asic_resource_mutex
);
14126 dd
->asic_data
->dds
[dd
->hfi1_id
] = dd
; /* self back-pointer */
14129 spin_unlock_irqrestore(&hfi1_devs_lock
, flags
);
14134 * Set dd->boardname. Use a generic name if a name is not returned from
14135 * EFI variable space.
14137 * Return 0 on success, -ENOMEM if space could not be allocated.
14139 static int obtain_boardname(struct hfi1_devdata
*dd
)
14141 /* generic board description */
14142 const char generic
[] =
14143 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14144 unsigned long size
;
14147 ret
= read_hfi1_efi_var(dd
, "description", &size
,
14148 (void **)&dd
->boardname
);
14150 dd_dev_info(dd
, "Board description not found\n");
14151 /* use generic description */
14152 dd
->boardname
= kstrdup(generic
, GFP_KERNEL
);
14153 if (!dd
->boardname
)
14160 * Check the interrupt registers to make sure that they are mapped correctly.
14161 * It is intended to help user identify any mismapping by VMM when the driver
14162 * is running in a VM. This function should only be called before interrupt
14163 * is set up properly.
14165 * Return 0 on success, -EINVAL on failure.
14167 static int check_int_registers(struct hfi1_devdata
*dd
)
14170 u64 all_bits
= ~(u64
)0;
14173 /* Clear CceIntMask[0] to avoid raising any interrupts */
14174 mask
= read_csr(dd
, CCE_INT_MASK
);
14175 write_csr(dd
, CCE_INT_MASK
, 0ull);
14176 reg
= read_csr(dd
, CCE_INT_MASK
);
14180 /* Clear all interrupt status bits */
14181 write_csr(dd
, CCE_INT_CLEAR
, all_bits
);
14182 reg
= read_csr(dd
, CCE_INT_STATUS
);
14186 /* Set all interrupt status bits */
14187 write_csr(dd
, CCE_INT_FORCE
, all_bits
);
14188 reg
= read_csr(dd
, CCE_INT_STATUS
);
14189 if (reg
!= all_bits
)
14192 /* Restore the interrupt mask */
14193 write_csr(dd
, CCE_INT_CLEAR
, all_bits
);
14194 write_csr(dd
, CCE_INT_MASK
, mask
);
14198 write_csr(dd
, CCE_INT_MASK
, mask
);
14199 dd_dev_err(dd
, "Interrupt registers not properly mapped by VMM\n");
14204 * Allocate and initialize the device structure for the hfi.
14205 * @dev: the pci_dev for hfi1_ib device
14206 * @ent: pci_device_id struct for this dev
14208 * Also allocates, initializes, and returns the devdata struct for this
14211 * This is global, and is called directly at init to set up the
14212 * chip-specific function pointers for later use.
14214 struct hfi1_devdata
*hfi1_init_dd(struct pci_dev
*pdev
,
14215 const struct pci_device_id
*ent
)
14217 struct hfi1_devdata
*dd
;
14218 struct hfi1_pportdata
*ppd
;
14221 static const char * const inames
[] = { /* implementation names */
14223 "RTL VCS simulation",
14224 "RTL FPGA emulation",
14225 "Functional simulator"
14227 struct pci_dev
*parent
= pdev
->bus
->self
;
14229 dd
= hfi1_alloc_devdata(pdev
, NUM_IB_PORTS
*
14230 sizeof(struct hfi1_pportdata
));
14234 for (i
= 0; i
< dd
->num_pports
; i
++, ppd
++) {
14236 /* init common fields */
14237 hfi1_init_pportdata(pdev
, ppd
, dd
, 0, 1);
14238 /* DC supports 4 link widths */
14239 ppd
->link_width_supported
=
14240 OPA_LINK_WIDTH_1X
| OPA_LINK_WIDTH_2X
|
14241 OPA_LINK_WIDTH_3X
| OPA_LINK_WIDTH_4X
;
14242 ppd
->link_width_downgrade_supported
=
14243 ppd
->link_width_supported
;
14244 /* start out enabling only 4X */
14245 ppd
->link_width_enabled
= OPA_LINK_WIDTH_4X
;
14246 ppd
->link_width_downgrade_enabled
=
14247 ppd
->link_width_downgrade_supported
;
14248 /* link width active is 0 when link is down */
14249 /* link width downgrade active is 0 when link is down */
14251 if (num_vls
< HFI1_MIN_VLS_SUPPORTED
||
14252 num_vls
> HFI1_MAX_VLS_SUPPORTED
) {
14253 hfi1_early_err(&pdev
->dev
,
14254 "Invalid num_vls %u, using %u VLs\n",
14255 num_vls
, HFI1_MAX_VLS_SUPPORTED
);
14256 num_vls
= HFI1_MAX_VLS_SUPPORTED
;
14258 ppd
->vls_supported
= num_vls
;
14259 ppd
->vls_operational
= ppd
->vls_supported
;
14260 ppd
->actual_vls_operational
= ppd
->vls_supported
;
14261 /* Set the default MTU. */
14262 for (vl
= 0; vl
< num_vls
; vl
++)
14263 dd
->vld
[vl
].mtu
= hfi1_max_mtu
;
14264 dd
->vld
[15].mtu
= MAX_MAD_PACKET
;
14266 * Set the initial values to reasonable default, will be set
14267 * for real when link is up.
14269 ppd
->lstate
= IB_PORT_DOWN
;
14270 ppd
->overrun_threshold
= 0x4;
14271 ppd
->phy_error_threshold
= 0xf;
14272 ppd
->port_crc_mode_enabled
= link_crc_mask
;
14273 /* initialize supported LTP CRC mode */
14274 ppd
->port_ltp_crc_mode
= cap_to_port_ltp(link_crc_mask
) << 8;
14275 /* initialize enabled LTP CRC mode */
14276 ppd
->port_ltp_crc_mode
|= cap_to_port_ltp(link_crc_mask
) << 4;
14277 /* start in offline */
14278 ppd
->host_link_state
= HLS_DN_OFFLINE
;
14279 init_vl_arb_caches(ppd
);
14280 ppd
->last_pstate
= 0xff; /* invalid value */
14283 dd
->link_default
= HLS_DN_POLL
;
14286 * Do remaining PCIe setup and save PCIe values in dd.
14287 * Any error printing is already done by the init code.
14288 * On return, we have the chip mapped.
14290 ret
= hfi1_pcie_ddinit(dd
, pdev
, ent
);
14294 /* verify that reads actually work, save revision for reset check */
14295 dd
->revision
= read_csr(dd
, CCE_REVISION
);
14296 if (dd
->revision
== ~(u64
)0) {
14297 dd_dev_err(dd
, "cannot read chip CSRs\n");
14301 dd
->majrev
= (dd
->revision
>> CCE_REVISION_CHIP_REV_MAJOR_SHIFT
)
14302 & CCE_REVISION_CHIP_REV_MAJOR_MASK
;
14303 dd
->minrev
= (dd
->revision
>> CCE_REVISION_CHIP_REV_MINOR_SHIFT
)
14304 & CCE_REVISION_CHIP_REV_MINOR_MASK
;
14307 * Check interrupt registers mapping if the driver has no access to
14308 * the upstream component. In this case, it is likely that the driver
14309 * is running in a VM.
14312 ret
= check_int_registers(dd
);
14318 * obtain the hardware ID - NOT related to unit, which is a
14319 * software enumeration
14321 reg
= read_csr(dd
, CCE_REVISION2
);
14322 dd
->hfi1_id
= (reg
>> CCE_REVISION2_HFI_ID_SHIFT
)
14323 & CCE_REVISION2_HFI_ID_MASK
;
14324 /* the variable size will remove unwanted bits */
14325 dd
->icode
= reg
>> CCE_REVISION2_IMPL_CODE_SHIFT
;
14326 dd
->irev
= reg
>> CCE_REVISION2_IMPL_REVISION_SHIFT
;
14327 dd_dev_info(dd
, "Implementation: %s, revision 0x%x\n",
14328 dd
->icode
< ARRAY_SIZE(inames
) ?
14329 inames
[dd
->icode
] : "unknown", (int)dd
->irev
);
14331 /* speeds the hardware can support */
14332 dd
->pport
->link_speed_supported
= OPA_LINK_SPEED_25G
;
14333 /* speeds allowed to run at */
14334 dd
->pport
->link_speed_enabled
= dd
->pport
->link_speed_supported
;
14335 /* give a reasonable active value, will be set on link up */
14336 dd
->pport
->link_speed_active
= OPA_LINK_SPEED_25G
;
14338 dd
->chip_rcv_contexts
= read_csr(dd
, RCV_CONTEXTS
);
14339 dd
->chip_send_contexts
= read_csr(dd
, SEND_CONTEXTS
);
14340 dd
->chip_sdma_engines
= read_csr(dd
, SEND_DMA_ENGINES
);
14341 dd
->chip_pio_mem_size
= read_csr(dd
, SEND_PIO_MEM_SIZE
);
14342 dd
->chip_sdma_mem_size
= read_csr(dd
, SEND_DMA_MEM_SIZE
);
14343 /* fix up link widths for emulation _p */
14345 if (dd
->icode
== ICODE_FPGA_EMULATION
&& is_emulator_p(dd
)) {
14346 ppd
->link_width_supported
=
14347 ppd
->link_width_enabled
=
14348 ppd
->link_width_downgrade_supported
=
14349 ppd
->link_width_downgrade_enabled
=
14352 /* insure num_vls isn't larger than number of sdma engines */
14353 if (HFI1_CAP_IS_KSET(SDMA
) && num_vls
> dd
->chip_sdma_engines
) {
14354 dd_dev_err(dd
, "num_vls %u too large, using %u VLs\n",
14355 num_vls
, dd
->chip_sdma_engines
);
14356 num_vls
= dd
->chip_sdma_engines
;
14357 ppd
->vls_supported
= dd
->chip_sdma_engines
;
14358 ppd
->vls_operational
= ppd
->vls_supported
;
14362 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14363 * Limit the max if larger than the field holds. If timeout is
14364 * non-zero, then the calculated field will be at least 1.
14366 * Must be after icode is set up - the cclock rate depends
14367 * on knowing the hardware being used.
14369 dd
->rcv_intr_timeout_csr
= ns_to_cclock(dd
, rcv_intr_timeout
) / 64;
14370 if (dd
->rcv_intr_timeout_csr
>
14371 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK
)
14372 dd
->rcv_intr_timeout_csr
=
14373 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK
;
14374 else if (dd
->rcv_intr_timeout_csr
== 0 && rcv_intr_timeout
)
14375 dd
->rcv_intr_timeout_csr
= 1;
14377 /* needs to be done before we look for the peer device */
14380 /* set up shared ASIC data with peer device */
14381 ret
= init_asic_data(dd
);
14385 /* obtain chip sizes, reset chip CSRs */
14388 /* read in the PCIe link speed information */
14389 ret
= pcie_speeds(dd
);
14393 /* Needs to be called before hfi1_firmware_init */
14394 get_platform_config(dd
);
14396 /* read in firmware */
14397 ret
= hfi1_firmware_init(dd
);
14402 * In general, the PCIe Gen3 transition must occur after the
14403 * chip has been idled (so it won't initiate any PCIe transactions
14404 * e.g. an interrupt) and before the driver changes any registers
14405 * (the transition will reset the registers).
14407 * In particular, place this call after:
14408 * - init_chip() - the chip will not initiate any PCIe transactions
14409 * - pcie_speeds() - reads the current link speed
14410 * - hfi1_firmware_init() - the needed firmware is ready to be
14413 ret
= do_pcie_gen3_transition(dd
);
14417 /* start setting dd values and adjusting CSRs */
14418 init_early_variables(dd
);
14420 parse_platform_config(dd
);
14422 ret
= obtain_boardname(dd
);
14426 snprintf(dd
->boardversion
, BOARD_VERS_MAX
,
14427 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
14428 HFI1_CHIP_VERS_MAJ
, HFI1_CHIP_VERS_MIN
,
14431 (dd
->revision
>> CCE_REVISION_SW_SHIFT
)
14432 & CCE_REVISION_SW_MASK
);
14435 * The real cpu mask is part of the affinity struct but has to be
14436 * initialized earlier than the rest of the affinity struct because it
14437 * is needed to calculate the number of user contexts in
14438 * set_up_context_variables(). However, hfi1_dev_affinity_init(),
14439 * which initializes the rest of the affinity struct members,
14440 * depends on set_up_context_variables() for the number of kernel
14441 * contexts, so it cannot be called before set_up_context_variables().
14443 ret
= init_real_cpu_mask(dd
);
14447 ret
= set_up_context_variables(dd
);
14451 /* set initial RXE CSRs */
14453 /* set initial TXE CSRs */
14455 /* set initial non-RXE, non-TXE CSRs */
14457 /* set up KDETH QP prefix in both RX and TX CSRs */
14460 hfi1_dev_affinity_init(dd
);
14462 /* send contexts must be set up before receive contexts */
14463 ret
= init_send_contexts(dd
);
14467 ret
= hfi1_create_ctxts(dd
);
14471 dd
->rcvhdrsize
= DEFAULT_RCVHDRSIZE
;
14473 * rcd[0] is guaranteed to be valid by this point. Also, all
14474 * context are using the same value, as per the module parameter.
14476 dd
->rhf_offset
= dd
->rcd
[0]->rcvhdrqentsize
- sizeof(u64
) / sizeof(u32
);
14478 ret
= init_pervl_scs(dd
);
14483 for (i
= 0; i
< dd
->num_pports
; ++i
) {
14484 ret
= sdma_init(dd
, i
);
14489 /* use contexts created by hfi1_create_ctxts */
14490 ret
= set_up_interrupts(dd
);
14494 /* set up LCB access - must be after set_up_interrupts() */
14495 init_lcb_access(dd
);
14497 snprintf(dd
->serial
, SERIAL_MAX
, "0x%08llx\n",
14498 dd
->base_guid
& 0xFFFFFF);
14500 dd
->oui1
= dd
->base_guid
>> 56 & 0xFF;
14501 dd
->oui2
= dd
->base_guid
>> 48 & 0xFF;
14502 dd
->oui3
= dd
->base_guid
>> 40 & 0xFF;
14504 ret
= load_firmware(dd
); /* asymmetric with dispose_firmware() */
14506 goto bail_clear_intr
;
14507 check_fabric_firmware_versions(dd
);
14511 ret
= init_cntrs(dd
);
14513 goto bail_clear_intr
;
14515 ret
= init_rcverr(dd
);
14517 goto bail_free_cntrs
;
14519 ret
= eprom_init(dd
);
14521 goto bail_free_rcverr
;
14530 clean_up_interrupts(dd
);
14532 hfi1_pcie_ddcleanup(dd
);
14534 hfi1_free_devdata(dd
);
14540 static u16
delay_cycles(struct hfi1_pportdata
*ppd
, u32 desired_egress_rate
,
14544 u32 current_egress_rate
= ppd
->current_egress_rate
;
14545 /* rates here are in units of 10^6 bits/sec */
14547 if (desired_egress_rate
== -1)
14548 return 0; /* shouldn't happen */
14550 if (desired_egress_rate
>= current_egress_rate
)
14551 return 0; /* we can't help go faster, only slower */
14553 delta_cycles
= egress_cycles(dw_len
* 4, desired_egress_rate
) -
14554 egress_cycles(dw_len
* 4, current_egress_rate
);
14556 return (u16
)delta_cycles
;
14560 * create_pbc - build a pbc for transmission
14561 * @flags: special case flags or-ed in built pbc
14562 * @srate: static rate
14564 * @dwlen: dword length (header words + data words + pbc words)
14566 * Create a PBC with the given flags, rate, VL, and length.
14568 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14569 * for verbs, which does not use this PSM feature. The lone other caller
14570 * is for the diagnostic interface which calls this if the user does not
14571 * supply their own PBC.
14573 u64
create_pbc(struct hfi1_pportdata
*ppd
, u64 flags
, int srate_mbs
, u32 vl
,
14576 u64 pbc
, delay
= 0;
14578 if (unlikely(srate_mbs
))
14579 delay
= delay_cycles(ppd
, srate_mbs
, dw_len
);
14582 | (delay
<< PBC_STATIC_RATE_CONTROL_COUNT_SHIFT
)
14583 | ((u64
)PBC_IHCRC_NONE
<< PBC_INSERT_HCRC_SHIFT
)
14584 | (vl
& PBC_VL_MASK
) << PBC_VL_SHIFT
14585 | (dw_len
& PBC_LENGTH_DWS_MASK
)
14586 << PBC_LENGTH_DWS_SHIFT
;
14591 #define SBUS_THERMAL 0x4f
14592 #define SBUS_THERM_MONITOR_MODE 0x1
14594 #define THERM_FAILURE(dev, ret, reason) \
14596 "Thermal sensor initialization failed: %s (%d)\n", \
14600 * Initialize the thermal sensor.
14602 * After initialization, enable polling of thermal sensor through
14603 * SBus interface. In order for this to work, the SBus Master
14604 * firmware has to be loaded due to the fact that the HW polling
14605 * logic uses SBus interrupts, which are not supported with
14606 * default firmware. Otherwise, no data will be returned through
14607 * the ASIC_STS_THERM CSR.
14609 static int thermal_init(struct hfi1_devdata
*dd
)
14613 if (dd
->icode
!= ICODE_RTL_SILICON
||
14614 check_chip_resource(dd
, CR_THERM_INIT
, NULL
))
14617 ret
= acquire_chip_resource(dd
, CR_SBUS
, SBUS_TIMEOUT
);
14619 THERM_FAILURE(dd
, ret
, "Acquire SBus");
14623 dd_dev_info(dd
, "Initializing thermal sensor\n");
14624 /* Disable polling of thermal readings */
14625 write_csr(dd
, ASIC_CFG_THERM_POLL_EN
, 0x0);
14627 /* Thermal Sensor Initialization */
14628 /* Step 1: Reset the Thermal SBus Receiver */
14629 ret
= sbus_request_slow(dd
, SBUS_THERMAL
, 0x0,
14630 RESET_SBUS_RECEIVER
, 0);
14632 THERM_FAILURE(dd
, ret
, "Bus Reset");
14635 /* Step 2: Set Reset bit in Thermal block */
14636 ret
= sbus_request_slow(dd
, SBUS_THERMAL
, 0x0,
14637 WRITE_SBUS_RECEIVER
, 0x1);
14639 THERM_FAILURE(dd
, ret
, "Therm Block Reset");
14642 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
14643 ret
= sbus_request_slow(dd
, SBUS_THERMAL
, 0x1,
14644 WRITE_SBUS_RECEIVER
, 0x32);
14646 THERM_FAILURE(dd
, ret
, "Write Clock Div");
14649 /* Step 4: Select temperature mode */
14650 ret
= sbus_request_slow(dd
, SBUS_THERMAL
, 0x3,
14651 WRITE_SBUS_RECEIVER
,
14652 SBUS_THERM_MONITOR_MODE
);
14654 THERM_FAILURE(dd
, ret
, "Write Mode Sel");
14657 /* Step 5: De-assert block reset and start conversion */
14658 ret
= sbus_request_slow(dd
, SBUS_THERMAL
, 0x0,
14659 WRITE_SBUS_RECEIVER
, 0x2);
14661 THERM_FAILURE(dd
, ret
, "Write Reset Deassert");
14664 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
14667 /* Enable polling of thermal readings */
14668 write_csr(dd
, ASIC_CFG_THERM_POLL_EN
, 0x1);
14670 /* Set initialized flag */
14671 ret
= acquire_chip_resource(dd
, CR_THERM_INIT
, 0);
14673 THERM_FAILURE(dd
, ret
, "Unable to set thermal init flag");
14676 release_chip_resource(dd
, CR_SBUS
);
14680 static void handle_temp_err(struct hfi1_devdata
*dd
)
14682 struct hfi1_pportdata
*ppd
= &dd
->pport
[0];
14684 * Thermal Critical Interrupt
14685 * Put the device into forced freeze mode, take link down to
14686 * offline, and put DC into reset.
14689 "Critical temperature reached! Forcing device into freeze mode!\n");
14690 dd
->flags
|= HFI1_FORCED_FREEZE
;
14691 start_freeze_handling(ppd
, FREEZE_SELF
| FREEZE_ABORT
);
14693 * Shut DC down as much and as quickly as possible.
14695 * Step 1: Take the link down to OFFLINE. This will cause the
14696 * 8051 to put the Serdes in reset. However, we don't want to
14697 * go through the entire link state machine since we want to
14698 * shutdown ASAP. Furthermore, this is not a graceful shutdown
14699 * but rather an attempt to save the chip.
14700 * Code below is almost the same as quiet_serdes() but avoids
14701 * all the extra work and the sleeps.
14703 ppd
->driver_link_ready
= 0;
14704 ppd
->link_enabled
= 0;
14705 set_physical_link_state(dd
, (OPA_LINKDOWN_REASON_SMA_DISABLED
<< 8) |
14708 * Step 2: Shutdown LCB and 8051
14709 * After shutdown, do not restore DC_CFG_RESET value.