]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/infiniband/hw/hfi1/chip.c
IB/hfi1: Add support for extended memory management
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / hw / hfi1 / chip.c
CommitLineData
77241056 1/*
05d6ac1d 2 * Copyright(c) 2015, 2016 Intel Corporation.
77241056
MM
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
77241056
MM
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
77241056
MM
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48/*
49 * This file contains all of the code that is specific to the HFI chip
50 */
51
52#include <linux/pci.h>
53#include <linux/delay.h>
54#include <linux/interrupt.h>
55#include <linux/module.h>
56
57#include "hfi.h"
58#include "trace.h"
59#include "mad.h"
60#include "pio.h"
61#include "sdma.h"
62#include "eprom.h"
5d9157aa 63#include "efivar.h"
8ebd4cf1 64#include "platform.h"
affa48de 65#include "aspm.h"
4197344b 66#include "affinity.h"
77241056
MM
67
68#define NUM_IB_PORTS 1
69
70uint kdeth_qp;
71module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
72MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
73
74uint num_vls = HFI1_MAX_VLS_SUPPORTED;
75module_param(num_vls, uint, S_IRUGO);
76MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
77
78/*
79 * Default time to aggregate two 10K packets from the idle state
80 * (timer not running). The timer starts at the end of the first packet,
81 * so only the time for one 10K packet and header plus a bit extra is needed.
82 * 10 * 1024 + 64 header byte = 10304 byte
83 * 10304 byte / 12.5 GB/s = 824.32ns
84 */
85uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
86module_param(rcv_intr_timeout, uint, S_IRUGO);
87MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
88
89uint rcv_intr_count = 16; /* same as qib */
90module_param(rcv_intr_count, uint, S_IRUGO);
91MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
92
93ushort link_crc_mask = SUPPORTED_CRCS;
94module_param(link_crc_mask, ushort, S_IRUGO);
95MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
96
97uint loopback;
98module_param_named(loopback, loopback, uint, S_IRUGO);
99MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
100
101/* Other driver tunables */
102uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
103static ushort crc_14b_sideband = 1;
104static uint use_flr = 1;
105uint quick_linkup; /* skip LNI */
106
107struct flag_table {
108 u64 flag; /* the flag */
109 char *str; /* description string */
110 u16 extra; /* extra information */
111 u16 unused0;
112 u32 unused1;
113};
114
115/* str must be a string constant */
116#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
117#define FLAG_ENTRY0(str, flag) {flag, str, 0}
118
119/* Send Error Consequences */
120#define SEC_WRITE_DROPPED 0x1
121#define SEC_PACKET_DROPPED 0x2
122#define SEC_SC_HALTED 0x4 /* per-context only */
123#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
124
77241056 125#define MIN_KERNEL_KCTXTS 2
82c2611d 126#define FIRST_KERNEL_KCTXT 1
372cc85a
DL
127/* sizes for both the QP and RSM map tables */
128#define NUM_MAP_ENTRIES 256
77241056
MM
129#define NUM_MAP_REGS 32
130
131/* Bit offset into the GUID which carries HFI id information */
132#define GUID_HFI_INDEX_SHIFT 39
133
134/* extract the emulation revision */
135#define emulator_rev(dd) ((dd)->irev >> 8)
136/* parallel and serial emulation versions are 3 and 4 respectively */
137#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
138#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
139
140/* RSM fields */
141
142/* packet type */
143#define IB_PACKET_TYPE 2ull
144#define QW_SHIFT 6ull
145/* QPN[7..1] */
146#define QPN_WIDTH 7ull
147
148/* LRH.BTH: QW 0, OFFSET 48 - for match */
149#define LRH_BTH_QW 0ull
150#define LRH_BTH_BIT_OFFSET 48ull
151#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
152#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
153#define LRH_BTH_SELECT
154#define LRH_BTH_MASK 3ull
155#define LRH_BTH_VALUE 2ull
156
157/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
158#define LRH_SC_QW 0ull
159#define LRH_SC_BIT_OFFSET 56ull
160#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
161#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
162#define LRH_SC_MASK 128ull
163#define LRH_SC_VALUE 0ull
164
165/* SC[n..0] QW 0, OFFSET 60 - for select */
166#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
167
168/* QPN[m+n:1] QW 1, OFFSET 1 */
169#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
170
171/* defines to build power on SC2VL table */
172#define SC2VL_VAL( \
173 num, \
174 sc0, sc0val, \
175 sc1, sc1val, \
176 sc2, sc2val, \
177 sc3, sc3val, \
178 sc4, sc4val, \
179 sc5, sc5val, \
180 sc6, sc6val, \
181 sc7, sc7val) \
182( \
183 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
184 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
185 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
186 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
187 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
188 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
189 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
190 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
191)
192
193#define DC_SC_VL_VAL( \
194 range, \
195 e0, e0val, \
196 e1, e1val, \
197 e2, e2val, \
198 e3, e3val, \
199 e4, e4val, \
200 e5, e5val, \
201 e6, e6val, \
202 e7, e7val, \
203 e8, e8val, \
204 e9, e9val, \
205 e10, e10val, \
206 e11, e11val, \
207 e12, e12val, \
208 e13, e13val, \
209 e14, e14val, \
210 e15, e15val) \
211( \
212 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
213 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
214 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
215 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
216 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
217 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
218 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
219 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
220 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
221 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
222 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
223 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
224 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
225 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
226 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
227 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
228)
229
230/* all CceStatus sub-block freeze bits */
231#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
232 | CCE_STATUS_RXE_FROZE_SMASK \
233 | CCE_STATUS_TXE_FROZE_SMASK \
234 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
235/* all CceStatus sub-block TXE pause bits */
236#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
237 | CCE_STATUS_TXE_PAUSED_SMASK \
238 | CCE_STATUS_SDMA_PAUSED_SMASK)
239/* all CceStatus sub-block RXE pause bits */
240#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
241
2b719046
JP
242#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
243#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
244
77241056
MM
245/*
246 * CCE Error flags.
247 */
248static struct flag_table cce_err_status_flags[] = {
249/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
250 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
251/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
252 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
253/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
254 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
255/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
256 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
257/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
258 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
259/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
260 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
261/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
262 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
263/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
264 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
265/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
266 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
267/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
268 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
269/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
270 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
271/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
272 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
273/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
274 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
275/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
276 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
277/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
278 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
279/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
280 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
281/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
282 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
283/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
284 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
285/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
286 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
287/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
288 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
289/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
290 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
291/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
292 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
293/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
294 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
295/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
296 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
297/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
298 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
299/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
300 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
301/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
302 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
303/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
304 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
305/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
306 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
307/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
308 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
309/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
310 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
311/*31*/ FLAG_ENTRY0("LATriggered",
312 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
313/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
314 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
315/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
316 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
317/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
318 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
319/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
320 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
321/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
322 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
323/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
324 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
325/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
326 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
327/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
328 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
329/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
330 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
331/*41-63 reserved*/
332};
333
334/*
335 * Misc Error flags
336 */
337#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
338static struct flag_table misc_err_status_flags[] = {
339/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
340/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
341/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
342/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
343/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
344/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
345/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
346/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
347/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
348/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
349/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
350/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
351/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
352};
353
354/*
355 * TXE PIO Error flags and consequences
356 */
357static struct flag_table pio_err_status_flags[] = {
358/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
359 SEC_WRITE_DROPPED,
360 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
361/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
362 SEC_SPC_FREEZE,
363 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
364/* 2*/ FLAG_ENTRY("PioCsrParity",
365 SEC_SPC_FREEZE,
366 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
367/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
368 SEC_SPC_FREEZE,
369 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
370/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
371 SEC_SPC_FREEZE,
372 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
373/* 5*/ FLAG_ENTRY("PioPccFifoParity",
374 SEC_SPC_FREEZE,
375 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
376/* 6*/ FLAG_ENTRY("PioPecFifoParity",
377 SEC_SPC_FREEZE,
378 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
379/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
380 SEC_SPC_FREEZE,
381 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
382/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
383 SEC_SPC_FREEZE,
384 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
385/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
386 SEC_SPC_FREEZE,
387 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
388/*10*/ FLAG_ENTRY("PioSmPktResetParity",
389 SEC_SPC_FREEZE,
390 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
391/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
392 SEC_SPC_FREEZE,
393 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
394/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
395 SEC_SPC_FREEZE,
396 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
397/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
398 0,
399 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
400/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
401 0,
402 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
403/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
404 SEC_SPC_FREEZE,
405 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
406/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
407 SEC_SPC_FREEZE,
408 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
409/*17*/ FLAG_ENTRY("PioInitSmIn",
410 0,
411 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
412/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
413 SEC_SPC_FREEZE,
414 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
415/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
416 SEC_SPC_FREEZE,
417 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
418/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
419 0,
420 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
421/*21*/ FLAG_ENTRY("PioWriteDataParity",
422 SEC_SPC_FREEZE,
423 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
424/*22*/ FLAG_ENTRY("PioStateMachine",
425 SEC_SPC_FREEZE,
426 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
427/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
8638b77f 428 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
77241056
MM
429 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
430/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
8638b77f 431 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
77241056
MM
432 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
433/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
434 SEC_SPC_FREEZE,
435 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
436/*26*/ FLAG_ENTRY("PioVlfSopParity",
437 SEC_SPC_FREEZE,
438 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
439/*27*/ FLAG_ENTRY("PioVlFifoParity",
440 SEC_SPC_FREEZE,
441 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
442/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
443 SEC_SPC_FREEZE,
444 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
445/*29*/ FLAG_ENTRY("PioPpmcSopLen",
446 SEC_SPC_FREEZE,
447 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
448/*30-31 reserved*/
449/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
450 SEC_SPC_FREEZE,
451 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
452/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
453 SEC_SPC_FREEZE,
454 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
455/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
456 SEC_SPC_FREEZE,
457 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
458/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
459 SEC_SPC_FREEZE,
460 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
461/*36-63 reserved*/
462};
463
464/* TXE PIO errors that cause an SPC freeze */
465#define ALL_PIO_FREEZE_ERR \
466 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
467 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
468 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
490 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
491 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
492 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
493 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
494 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
495
496/*
497 * TXE SDMA Error flags
498 */
499static struct flag_table sdma_err_status_flags[] = {
500/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
501 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
502/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
503 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
504/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
505 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
506/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
507 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
508/*04-63 reserved*/
509};
510
511/* TXE SDMA errors that cause an SPC freeze */
512#define ALL_SDMA_FREEZE_ERR \
513 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
514 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
515 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
516
69a00b8e
MM
517/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
518#define PORT_DISCARD_EGRESS_ERRS \
519 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
520 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
521 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
522
77241056
MM
523/*
524 * TXE Egress Error flags
525 */
526#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
527static struct flag_table egress_err_status_flags[] = {
528/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
529/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
530/* 2 reserved */
531/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
532 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
533/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
534/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
535/* 6 reserved */
536/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
537 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
538/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
539 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
540/* 9-10 reserved */
541/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
542 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
543/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
544/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
545/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
546/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
547/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
548 SEES(TX_SDMA0_DISALLOWED_PACKET)),
549/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
550 SEES(TX_SDMA1_DISALLOWED_PACKET)),
551/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
552 SEES(TX_SDMA2_DISALLOWED_PACKET)),
553/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
554 SEES(TX_SDMA3_DISALLOWED_PACKET)),
555/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
556 SEES(TX_SDMA4_DISALLOWED_PACKET)),
557/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
558 SEES(TX_SDMA5_DISALLOWED_PACKET)),
559/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
560 SEES(TX_SDMA6_DISALLOWED_PACKET)),
561/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
562 SEES(TX_SDMA7_DISALLOWED_PACKET)),
563/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
564 SEES(TX_SDMA8_DISALLOWED_PACKET)),
565/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
566 SEES(TX_SDMA9_DISALLOWED_PACKET)),
567/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
568 SEES(TX_SDMA10_DISALLOWED_PACKET)),
569/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
570 SEES(TX_SDMA11_DISALLOWED_PACKET)),
571/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
572 SEES(TX_SDMA12_DISALLOWED_PACKET)),
573/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
574 SEES(TX_SDMA13_DISALLOWED_PACKET)),
575/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
576 SEES(TX_SDMA14_DISALLOWED_PACKET)),
577/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
578 SEES(TX_SDMA15_DISALLOWED_PACKET)),
579/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
580 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
581/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
582 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
583/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
584 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
585/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
586 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
587/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
588 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
589/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
590 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
591/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
592 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
593/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
594 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
595/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
596 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
597/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
598/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
599/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
600/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
601/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
602/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
603/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
604/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
605/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
606/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
607/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
608/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
609/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
610/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
611/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
612/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
613/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
614/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
615/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
616/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
617/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
618/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
619 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
620/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
621 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
622};
623
624/*
625 * TXE Egress Error Info flags
626 */
627#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
628static struct flag_table egress_err_info_flags[] = {
629/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
630/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
631/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
632/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
633/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
634/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
635/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
636/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
637/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
638/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
639/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
640/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
641/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
642/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
643/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
644/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
645/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
646/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
647/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
648/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
649/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
650/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
651};
652
653/* TXE Egress errors that cause an SPC freeze */
654#define ALL_TXE_EGRESS_FREEZE_ERR \
655 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
656 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
657 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
658 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
659 | SEES(TX_LAUNCH_CSR_PARITY) \
660 | SEES(TX_SBRD_CTL_CSR_PARITY) \
661 | SEES(TX_CONFIG_PARITY) \
662 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
663 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
664 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
665 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
666 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
667 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
668 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
669 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
670 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
671 | SEES(TX_CREDIT_RETURN_PARITY))
672
673/*
674 * TXE Send error flags
675 */
676#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
677static struct flag_table send_err_status_flags[] = {
2c5b521a 678/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
77241056
MM
679/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
680/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
681};
682
683/*
684 * TXE Send Context Error flags and consequences
685 */
686static struct flag_table sc_err_status_flags[] = {
687/* 0*/ FLAG_ENTRY("InconsistentSop",
688 SEC_PACKET_DROPPED | SEC_SC_HALTED,
689 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
690/* 1*/ FLAG_ENTRY("DisallowedPacket",
691 SEC_PACKET_DROPPED | SEC_SC_HALTED,
692 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
693/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
694 SEC_WRITE_DROPPED | SEC_SC_HALTED,
695 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
696/* 3*/ FLAG_ENTRY("WriteOverflow",
697 SEC_WRITE_DROPPED | SEC_SC_HALTED,
698 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
699/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
700 SEC_WRITE_DROPPED | SEC_SC_HALTED,
701 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
702/* 5-63 reserved*/
703};
704
705/*
706 * RXE Receive Error flags
707 */
708#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
709static struct flag_table rxe_err_status_flags[] = {
710/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
711/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
712/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
713/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
714/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
715/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
716/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
717/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
718/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
719/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
720/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
721/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
722/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
723/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
724/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
725/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
726/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
727 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
728/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
729/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
730/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
731 RXES(RBUF_BLOCK_LIST_READ_UNC)),
732/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
733 RXES(RBUF_BLOCK_LIST_READ_COR)),
734/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
735 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
736/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
737 RXES(RBUF_CSR_QENT_CNT_PARITY)),
738/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
739 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
740/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
741 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
742/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
743/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
744/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
745 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
746/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
747/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
748/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
749/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
750/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
751/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
752/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
753/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
754 RXES(RBUF_FL_INITDONE_PARITY)),
755/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
756 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
757/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
758/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
759/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
760/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
761 RXES(LOOKUP_DES_PART1_UNC_COR)),
762/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
763 RXES(LOOKUP_DES_PART2_PARITY)),
764/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
765/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
766/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
767/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
768/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
769/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
770/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
771/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
772/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
773/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
774/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
775/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
776/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
777/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
778/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
779/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
780/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
781/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
782/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
783/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
784/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
785/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
786};
787
788/* RXE errors that will trigger an SPC freeze */
789#define ALL_RXE_FREEZE_ERR \
790 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
791 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
792 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
831 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
832 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
833 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
834
835#define RXE_FREEZE_ABORT_MASK \
836 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
837 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
838 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
839
840/*
841 * DCC Error Flags
842 */
843#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
844static struct flag_table dcc_err_flags[] = {
845 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
846 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
847 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
848 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
849 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
850 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
851 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
852 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
853 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
854 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
855 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
856 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
857 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
858 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
859 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
860 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
861 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
862 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
863 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
864 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
865 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
866 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
867 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
868 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
869 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
870 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
871 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
872 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
873 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
874 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
875 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
876 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
877 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
878 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
879 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
880 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
881 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
882 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
883 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
884 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
885 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
886 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
887 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
888 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
889 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
890 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
891};
892
893/*
894 * LCB error flags
895 */
896#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
897static struct flag_table lcb_err_flags[] = {
898/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
899/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
900/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
901/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
902 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
903/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
904/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
905/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
906/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
907/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
908/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
909/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
910/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
911/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
912/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
913 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
914/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
915/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
916/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
917/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
918/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
919/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
920 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
921/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
922/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
923/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
924/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
925/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
926/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
927/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
928 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
929/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
930/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
931 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
932/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
933 LCBE(REDUNDANT_FLIT_PARITY_ERR))
934};
935
936/*
937 * DC8051 Error Flags
938 */
939#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
940static struct flag_table dc8051_err_flags[] = {
941 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
942 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
943 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
944 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
945 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
946 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
947 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
948 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
949 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
17fb4f29 950 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
77241056
MM
951 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
952};
953
954/*
955 * DC8051 Information Error flags
956 *
957 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
958 */
959static struct flag_table dc8051_info_err_flags[] = {
960 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
961 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
962 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
963 FLAG_ENTRY0("Serdes internal loopback failure",
17fb4f29 964 FAILED_SERDES_INTERNAL_LOOPBACK),
77241056
MM
965 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
966 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
967 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
968 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
969 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
970 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
971 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
8fefef12
JJ
972 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
973 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT)
77241056
MM
974};
975
976/*
977 * DC8051 Information Host Information flags
978 *
979 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
980 */
981static struct flag_table dc8051_info_host_msg_flags[] = {
982 FLAG_ENTRY0("Host request done", 0x0001),
983 FLAG_ENTRY0("BC SMA message", 0x0002),
984 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
985 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
986 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
987 FLAG_ENTRY0("External device config request", 0x0020),
988 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
989 FLAG_ENTRY0("LinkUp achieved", 0x0080),
990 FLAG_ENTRY0("Link going down", 0x0100),
991};
992
77241056
MM
993static u32 encoded_size(u32 size);
994static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
995static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
996static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
997 u8 *continuous);
998static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
999 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1000static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1001 u8 *remote_tx_rate, u16 *link_widths);
1002static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
1003 u8 *flag_bits, u16 *link_widths);
1004static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1005 u8 *device_rev);
1006static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1007static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1008static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1009 u8 *tx_polarity_inversion,
1010 u8 *rx_polarity_inversion, u8 *max_rate);
1011static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1012 unsigned int context, u64 err_status);
1013static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1014static void handle_dcc_err(struct hfi1_devdata *dd,
1015 unsigned int context, u64 err_status);
1016static void handle_lcb_err(struct hfi1_devdata *dd,
1017 unsigned int context, u64 err_status);
1018static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1019static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1020static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1021static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1022static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1023static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1024static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1025static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1026static void set_partition_keys(struct hfi1_pportdata *);
1027static const char *link_state_name(u32 state);
1028static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1029 u32 state);
1030static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1031 u64 *out_data);
1032static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1033static int thermal_init(struct hfi1_devdata *dd);
1034
1035static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1036 int msecs);
1037static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
feb831dd 1038static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
77241056
MM
1039static void handle_temp_err(struct hfi1_devdata *);
1040static void dc_shutdown(struct hfi1_devdata *);
1041static void dc_start(struct hfi1_devdata *);
8f000f7f
DL
1042static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1043 unsigned int *np);
3ec5fa28 1044static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
77241056
MM
1045
1046/*
1047 * Error interrupt table entry. This is used as input to the interrupt
1048 * "clear down" routine used for all second tier error interrupt register.
1049 * Second tier interrupt registers have a single bit representing them
1050 * in the top-level CceIntStatus.
1051 */
1052struct err_reg_info {
1053 u32 status; /* status CSR offset */
1054 u32 clear; /* clear CSR offset */
1055 u32 mask; /* mask CSR offset */
1056 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1057 const char *desc;
1058};
1059
1060#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1061#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1062#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1063
1064/*
1065 * Helpers for building HFI and DC error interrupt table entries. Different
1066 * helpers are needed because of inconsistent register names.
1067 */
1068#define EE(reg, handler, desc) \
1069 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1070 handler, desc }
1071#define DC_EE1(reg, handler, desc) \
1072 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1073#define DC_EE2(reg, handler, desc) \
1074 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1075
1076/*
1077 * Table of the "misc" grouping of error interrupts. Each entry refers to
1078 * another register containing more information.
1079 */
1080static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1081/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1082/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1083/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1084/* 3*/ { 0, 0, 0, NULL }, /* reserved */
1085/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1086/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1087/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1088/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1089 /* the rest are reserved */
1090};
1091
1092/*
1093 * Index into the Various section of the interrupt sources
1094 * corresponding to the Critical Temperature interrupt.
1095 */
1096#define TCRIT_INT_SOURCE 4
1097
1098/*
1099 * SDMA error interrupt entry - refers to another register containing more
1100 * information.
1101 */
1102static const struct err_reg_info sdma_eng_err =
1103 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1104
1105static const struct err_reg_info various_err[NUM_VARIOUS] = {
1106/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1107/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1108/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1109/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1110/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1111 /* rest are reserved */
1112};
1113
1114/*
1115 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1116 * register can not be derived from the MTU value because 10K is not
1117 * a power of 2. Therefore, we need a constant. Everything else can
1118 * be calculated.
1119 */
1120#define DCC_CFG_PORT_MTU_CAP_10240 7
1121
1122/*
1123 * Table of the DC grouping of error interrupts. Each entry refers to
1124 * another register containing more information.
1125 */
1126static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1127/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1128/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1129/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1130/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1131 /* the rest are reserved */
1132};
1133
1134struct cntr_entry {
1135 /*
1136 * counter name
1137 */
1138 char *name;
1139
1140 /*
1141 * csr to read for name (if applicable)
1142 */
1143 u64 csr;
1144
1145 /*
1146 * offset into dd or ppd to store the counter's value
1147 */
1148 int offset;
1149
1150 /*
1151 * flags
1152 */
1153 u8 flags;
1154
1155 /*
1156 * accessor for stat element, context either dd or ppd
1157 */
17fb4f29
JJ
1158 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1159 int mode, u64 data);
77241056
MM
1160};
1161
1162#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1163#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1164
1165#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1166{ \
1167 name, \
1168 csr, \
1169 offset, \
1170 flags, \
1171 accessor \
1172}
1173
1174/* 32bit RXE */
1175#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1176CNTR_ELEM(#name, \
1177 (counter * 8 + RCV_COUNTER_ARRAY32), \
1178 0, flags | CNTR_32BIT, \
1179 port_access_u32_csr)
1180
1181#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1182CNTR_ELEM(#name, \
1183 (counter * 8 + RCV_COUNTER_ARRAY32), \
1184 0, flags | CNTR_32BIT, \
1185 dev_access_u32_csr)
1186
1187/* 64bit RXE */
1188#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1189CNTR_ELEM(#name, \
1190 (counter * 8 + RCV_COUNTER_ARRAY64), \
1191 0, flags, \
1192 port_access_u64_csr)
1193
1194#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1195CNTR_ELEM(#name, \
1196 (counter * 8 + RCV_COUNTER_ARRAY64), \
1197 0, flags, \
1198 dev_access_u64_csr)
1199
1200#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1201#define OVR_ELM(ctx) \
1202CNTR_ELEM("RcvHdrOvr" #ctx, \
8638b77f 1203 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
77241056
MM
1204 0, CNTR_NORMAL, port_access_u64_csr)
1205
1206/* 32bit TXE */
1207#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1208CNTR_ELEM(#name, \
1209 (counter * 8 + SEND_COUNTER_ARRAY32), \
1210 0, flags | CNTR_32BIT, \
1211 port_access_u32_csr)
1212
1213/* 64bit TXE */
1214#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1215CNTR_ELEM(#name, \
1216 (counter * 8 + SEND_COUNTER_ARRAY64), \
1217 0, flags, \
1218 port_access_u64_csr)
1219
1220# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1221CNTR_ELEM(#name,\
1222 counter * 8 + SEND_COUNTER_ARRAY64, \
1223 0, \
1224 flags, \
1225 dev_access_u64_csr)
1226
1227/* CCE */
1228#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1229CNTR_ELEM(#name, \
1230 (counter * 8 + CCE_COUNTER_ARRAY32), \
1231 0, flags | CNTR_32BIT, \
1232 dev_access_u32_csr)
1233
1234#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1235CNTR_ELEM(#name, \
1236 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1237 0, flags | CNTR_32BIT, \
1238 dev_access_u32_csr)
1239
1240/* DC */
1241#define DC_PERF_CNTR(name, counter, flags) \
1242CNTR_ELEM(#name, \
1243 counter, \
1244 0, \
1245 flags, \
1246 dev_access_u64_csr)
1247
1248#define DC_PERF_CNTR_LCB(name, counter, flags) \
1249CNTR_ELEM(#name, \
1250 counter, \
1251 0, \
1252 flags, \
1253 dc_access_lcb_cntr)
1254
1255/* ibp counters */
1256#define SW_IBP_CNTR(name, cntr) \
1257CNTR_ELEM(#name, \
1258 0, \
1259 0, \
1260 CNTR_SYNTH, \
1261 access_ibp_##cntr)
1262
1263u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1264{
77241056 1265 if (dd->flags & HFI1_PRESENT) {
6d210eef 1266 return readq((void __iomem *)dd->kregbase + offset);
77241056
MM
1267 }
1268 return -1;
1269}
1270
1271void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1272{
1273 if (dd->flags & HFI1_PRESENT)
1274 writeq(value, (void __iomem *)dd->kregbase + offset);
1275}
1276
1277void __iomem *get_csr_addr(
1278 struct hfi1_devdata *dd,
1279 u32 offset)
1280{
1281 return (void __iomem *)dd->kregbase + offset;
1282}
1283
1284static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1285 int mode, u64 value)
1286{
1287 u64 ret;
1288
77241056
MM
1289 if (mode == CNTR_MODE_R) {
1290 ret = read_csr(dd, csr);
1291 } else if (mode == CNTR_MODE_W) {
1292 write_csr(dd, csr, value);
1293 ret = value;
1294 } else {
1295 dd_dev_err(dd, "Invalid cntr register access mode");
1296 return 0;
1297 }
1298
1299 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1300 return ret;
1301}
1302
1303/* Dev Access */
1304static u64 dev_access_u32_csr(const struct cntr_entry *entry,
17fb4f29 1305 void *context, int vl, int mode, u64 data)
77241056 1306{
a787bde8 1307 struct hfi1_devdata *dd = context;
a699c6c2 1308 u64 csr = entry->csr;
77241056 1309
a699c6c2
VM
1310 if (entry->flags & CNTR_SDMA) {
1311 if (vl == CNTR_INVALID_VL)
1312 return 0;
1313 csr += 0x100 * vl;
1314 } else {
1315 if (vl != CNTR_INVALID_VL)
1316 return 0;
1317 }
1318 return read_write_csr(dd, csr, mode, data);
1319}
1320
1321static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1322 void *context, int idx, int mode, u64 data)
1323{
1324 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1325
1326 if (dd->per_sdma && idx < dd->num_sdma)
1327 return dd->per_sdma[idx].err_cnt;
1328 return 0;
1329}
1330
1331static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1332 void *context, int idx, int mode, u64 data)
1333{
1334 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1335
1336 if (dd->per_sdma && idx < dd->num_sdma)
1337 return dd->per_sdma[idx].sdma_int_cnt;
1338 return 0;
1339}
1340
1341static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1342 void *context, int idx, int mode, u64 data)
1343{
1344 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1345
1346 if (dd->per_sdma && idx < dd->num_sdma)
1347 return dd->per_sdma[idx].idle_int_cnt;
1348 return 0;
1349}
1350
1351static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1352 void *context, int idx, int mode,
1353 u64 data)
1354{
1355 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1356
1357 if (dd->per_sdma && idx < dd->num_sdma)
1358 return dd->per_sdma[idx].progress_int_cnt;
1359 return 0;
77241056
MM
1360}
1361
1362static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
17fb4f29 1363 int vl, int mode, u64 data)
77241056 1364{
a787bde8 1365 struct hfi1_devdata *dd = context;
77241056
MM
1366
1367 u64 val = 0;
1368 u64 csr = entry->csr;
1369
1370 if (entry->flags & CNTR_VL) {
1371 if (vl == CNTR_INVALID_VL)
1372 return 0;
1373 csr += 8 * vl;
1374 } else {
1375 if (vl != CNTR_INVALID_VL)
1376 return 0;
1377 }
1378
1379 val = read_write_csr(dd, csr, mode, data);
1380 return val;
1381}
1382
1383static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
17fb4f29 1384 int vl, int mode, u64 data)
77241056 1385{
a787bde8 1386 struct hfi1_devdata *dd = context;
77241056
MM
1387 u32 csr = entry->csr;
1388 int ret = 0;
1389
1390 if (vl != CNTR_INVALID_VL)
1391 return 0;
1392 if (mode == CNTR_MODE_R)
1393 ret = read_lcb_csr(dd, csr, &data);
1394 else if (mode == CNTR_MODE_W)
1395 ret = write_lcb_csr(dd, csr, data);
1396
1397 if (ret) {
1398 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1399 return 0;
1400 }
1401
1402 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1403 return data;
1404}
1405
1406/* Port Access */
1407static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
17fb4f29 1408 int vl, int mode, u64 data)
77241056 1409{
a787bde8 1410 struct hfi1_pportdata *ppd = context;
77241056
MM
1411
1412 if (vl != CNTR_INVALID_VL)
1413 return 0;
1414 return read_write_csr(ppd->dd, entry->csr, mode, data);
1415}
1416
1417static u64 port_access_u64_csr(const struct cntr_entry *entry,
17fb4f29 1418 void *context, int vl, int mode, u64 data)
77241056 1419{
a787bde8 1420 struct hfi1_pportdata *ppd = context;
77241056
MM
1421 u64 val;
1422 u64 csr = entry->csr;
1423
1424 if (entry->flags & CNTR_VL) {
1425 if (vl == CNTR_INVALID_VL)
1426 return 0;
1427 csr += 8 * vl;
1428 } else {
1429 if (vl != CNTR_INVALID_VL)
1430 return 0;
1431 }
1432 val = read_write_csr(ppd->dd, csr, mode, data);
1433 return val;
1434}
1435
1436/* Software defined */
1437static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1438 u64 data)
1439{
1440 u64 ret;
1441
1442 if (mode == CNTR_MODE_R) {
1443 ret = *cntr;
1444 } else if (mode == CNTR_MODE_W) {
1445 *cntr = data;
1446 ret = data;
1447 } else {
1448 dd_dev_err(dd, "Invalid cntr sw access mode");
1449 return 0;
1450 }
1451
1452 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1453
1454 return ret;
1455}
1456
1457static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
17fb4f29 1458 int vl, int mode, u64 data)
77241056 1459{
a787bde8 1460 struct hfi1_pportdata *ppd = context;
77241056
MM
1461
1462 if (vl != CNTR_INVALID_VL)
1463 return 0;
1464 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1465}
1466
1467static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
17fb4f29 1468 int vl, int mode, u64 data)
77241056 1469{
a787bde8 1470 struct hfi1_pportdata *ppd = context;
77241056
MM
1471
1472 if (vl != CNTR_INVALID_VL)
1473 return 0;
1474 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1475}
1476
6d014530
DL
1477static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1478 void *context, int vl, int mode,
1479 u64 data)
1480{
1481 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1482
1483 if (vl != CNTR_INVALID_VL)
1484 return 0;
1485 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1486}
1487
77241056 1488static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
17fb4f29 1489 void *context, int vl, int mode, u64 data)
77241056 1490{
69a00b8e
MM
1491 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1492 u64 zero = 0;
1493 u64 *counter;
77241056 1494
69a00b8e
MM
1495 if (vl == CNTR_INVALID_VL)
1496 counter = &ppd->port_xmit_discards;
1497 else if (vl >= 0 && vl < C_VL_COUNT)
1498 counter = &ppd->port_xmit_discards_vl[vl];
1499 else
1500 counter = &zero;
77241056 1501
69a00b8e 1502 return read_write_sw(ppd->dd, counter, mode, data);
77241056
MM
1503}
1504
1505static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
17fb4f29
JJ
1506 void *context, int vl, int mode,
1507 u64 data)
77241056 1508{
a787bde8 1509 struct hfi1_pportdata *ppd = context;
77241056
MM
1510
1511 if (vl != CNTR_INVALID_VL)
1512 return 0;
1513
1514 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1515 mode, data);
1516}
1517
1518static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
17fb4f29 1519 void *context, int vl, int mode, u64 data)
77241056 1520{
a787bde8 1521 struct hfi1_pportdata *ppd = context;
77241056
MM
1522
1523 if (vl != CNTR_INVALID_VL)
1524 return 0;
1525
1526 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1527 mode, data);
1528}
1529
1530u64 get_all_cpu_total(u64 __percpu *cntr)
1531{
1532 int cpu;
1533 u64 counter = 0;
1534
1535 for_each_possible_cpu(cpu)
1536 counter += *per_cpu_ptr(cntr, cpu);
1537 return counter;
1538}
1539
1540static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1541 u64 __percpu *cntr,
1542 int vl, int mode, u64 data)
1543{
77241056
MM
1544 u64 ret = 0;
1545
1546 if (vl != CNTR_INVALID_VL)
1547 return 0;
1548
1549 if (mode == CNTR_MODE_R) {
1550 ret = get_all_cpu_total(cntr) - *z_val;
1551 } else if (mode == CNTR_MODE_W) {
1552 /* A write can only zero the counter */
1553 if (data == 0)
1554 *z_val = get_all_cpu_total(cntr);
1555 else
1556 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1557 } else {
1558 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1559 return 0;
1560 }
1561
1562 return ret;
1563}
1564
1565static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1566 void *context, int vl, int mode, u64 data)
1567{
a787bde8 1568 struct hfi1_devdata *dd = context;
77241056
MM
1569
1570 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1571 mode, data);
1572}
1573
1574static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
17fb4f29 1575 void *context, int vl, int mode, u64 data)
77241056 1576{
a787bde8 1577 struct hfi1_devdata *dd = context;
77241056
MM
1578
1579 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1580 mode, data);
1581}
1582
1583static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1584 void *context, int vl, int mode, u64 data)
1585{
a787bde8 1586 struct hfi1_devdata *dd = context;
77241056
MM
1587
1588 return dd->verbs_dev.n_piowait;
1589}
1590
14553ca1
MM
1591static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1592 void *context, int vl, int mode, u64 data)
1593{
1594 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1595
1596 return dd->verbs_dev.n_piodrain;
1597}
1598
77241056
MM
1599static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1600 void *context, int vl, int mode, u64 data)
1601{
a787bde8 1602 struct hfi1_devdata *dd = context;
77241056
MM
1603
1604 return dd->verbs_dev.n_txwait;
1605}
1606
1607static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1608 void *context, int vl, int mode, u64 data)
1609{
a787bde8 1610 struct hfi1_devdata *dd = context;
77241056
MM
1611
1612 return dd->verbs_dev.n_kmem_wait;
1613}
1614
b421922e 1615static u64 access_sw_send_schedule(const struct cntr_entry *entry,
17fb4f29 1616 void *context, int vl, int mode, u64 data)
b421922e
DL
1617{
1618 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1619
89abfc8d
VM
1620 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1621 mode, data);
b421922e
DL
1622}
1623
2c5b521a
JR
1624/* Software counters for the error status bits within MISC_ERR_STATUS */
1625static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1626 void *context, int vl, int mode,
1627 u64 data)
1628{
1629 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1630
1631 return dd->misc_err_status_cnt[12];
1632}
1633
1634static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1635 void *context, int vl, int mode,
1636 u64 data)
1637{
1638 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1639
1640 return dd->misc_err_status_cnt[11];
1641}
1642
1643static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1644 void *context, int vl, int mode,
1645 u64 data)
1646{
1647 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1648
1649 return dd->misc_err_status_cnt[10];
1650}
1651
1652static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1653 void *context, int vl,
1654 int mode, u64 data)
1655{
1656 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1657
1658 return dd->misc_err_status_cnt[9];
1659}
1660
1661static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1662 void *context, int vl, int mode,
1663 u64 data)
1664{
1665 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1666
1667 return dd->misc_err_status_cnt[8];
1668}
1669
1670static u64 access_misc_efuse_read_bad_addr_err_cnt(
1671 const struct cntr_entry *entry,
1672 void *context, int vl, int mode, u64 data)
1673{
1674 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1675
1676 return dd->misc_err_status_cnt[7];
1677}
1678
1679static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1680 void *context, int vl,
1681 int mode, u64 data)
1682{
1683 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1684
1685 return dd->misc_err_status_cnt[6];
1686}
1687
1688static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1689 void *context, int vl, int mode,
1690 u64 data)
1691{
1692 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1693
1694 return dd->misc_err_status_cnt[5];
1695}
1696
1697static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1698 void *context, int vl, int mode,
1699 u64 data)
1700{
1701 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1702
1703 return dd->misc_err_status_cnt[4];
1704}
1705
1706static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1707 void *context, int vl,
1708 int mode, u64 data)
1709{
1710 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1711
1712 return dd->misc_err_status_cnt[3];
1713}
1714
1715static u64 access_misc_csr_write_bad_addr_err_cnt(
1716 const struct cntr_entry *entry,
1717 void *context, int vl, int mode, u64 data)
1718{
1719 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1720
1721 return dd->misc_err_status_cnt[2];
1722}
1723
1724static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1725 void *context, int vl,
1726 int mode, u64 data)
1727{
1728 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1729
1730 return dd->misc_err_status_cnt[1];
1731}
1732
1733static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1734 void *context, int vl, int mode,
1735 u64 data)
1736{
1737 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1738
1739 return dd->misc_err_status_cnt[0];
1740}
1741
1742/*
1743 * Software counter for the aggregate of
1744 * individual CceErrStatus counters
1745 */
1746static u64 access_sw_cce_err_status_aggregated_cnt(
1747 const struct cntr_entry *entry,
1748 void *context, int vl, int mode, u64 data)
1749{
1750 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1751
1752 return dd->sw_cce_err_status_aggregate;
1753}
1754
1755/*
1756 * Software counters corresponding to each of the
1757 * error status bits within CceErrStatus
1758 */
1759static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1760 void *context, int vl, int mode,
1761 u64 data)
1762{
1763 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1764
1765 return dd->cce_err_status_cnt[40];
1766}
1767
1768static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1769 void *context, int vl, int mode,
1770 u64 data)
1771{
1772 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1773
1774 return dd->cce_err_status_cnt[39];
1775}
1776
1777static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1778 void *context, int vl, int mode,
1779 u64 data)
1780{
1781 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1782
1783 return dd->cce_err_status_cnt[38];
1784}
1785
1786static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1787 void *context, int vl, int mode,
1788 u64 data)
1789{
1790 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1791
1792 return dd->cce_err_status_cnt[37];
1793}
1794
1795static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1796 void *context, int vl, int mode,
1797 u64 data)
1798{
1799 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1800
1801 return dd->cce_err_status_cnt[36];
1802}
1803
1804static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1805 const struct cntr_entry *entry,
1806 void *context, int vl, int mode, u64 data)
1807{
1808 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1809
1810 return dd->cce_err_status_cnt[35];
1811}
1812
1813static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1814 const struct cntr_entry *entry,
1815 void *context, int vl, int mode, u64 data)
1816{
1817 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1818
1819 return dd->cce_err_status_cnt[34];
1820}
1821
1822static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1823 void *context, int vl,
1824 int mode, u64 data)
1825{
1826 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1827
1828 return dd->cce_err_status_cnt[33];
1829}
1830
1831static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1832 void *context, int vl, int mode,
1833 u64 data)
1834{
1835 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1836
1837 return dd->cce_err_status_cnt[32];
1838}
1839
1840static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1841 void *context, int vl, int mode, u64 data)
1842{
1843 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1844
1845 return dd->cce_err_status_cnt[31];
1846}
1847
1848static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1849 void *context, int vl, int mode,
1850 u64 data)
1851{
1852 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1853
1854 return dd->cce_err_status_cnt[30];
1855}
1856
1857static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1858 void *context, int vl, int mode,
1859 u64 data)
1860{
1861 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1862
1863 return dd->cce_err_status_cnt[29];
1864}
1865
1866static u64 access_pcic_transmit_back_parity_err_cnt(
1867 const struct cntr_entry *entry,
1868 void *context, int vl, int mode, u64 data)
1869{
1870 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1871
1872 return dd->cce_err_status_cnt[28];
1873}
1874
1875static u64 access_pcic_transmit_front_parity_err_cnt(
1876 const struct cntr_entry *entry,
1877 void *context, int vl, int mode, u64 data)
1878{
1879 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1880
1881 return dd->cce_err_status_cnt[27];
1882}
1883
1884static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1885 void *context, int vl, int mode,
1886 u64 data)
1887{
1888 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1889
1890 return dd->cce_err_status_cnt[26];
1891}
1892
1893static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1894 void *context, int vl, int mode,
1895 u64 data)
1896{
1897 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1898
1899 return dd->cce_err_status_cnt[25];
1900}
1901
1902static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1903 void *context, int vl, int mode,
1904 u64 data)
1905{
1906 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1907
1908 return dd->cce_err_status_cnt[24];
1909}
1910
1911static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1912 void *context, int vl, int mode,
1913 u64 data)
1914{
1915 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1916
1917 return dd->cce_err_status_cnt[23];
1918}
1919
1920static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1921 void *context, int vl,
1922 int mode, u64 data)
1923{
1924 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1925
1926 return dd->cce_err_status_cnt[22];
1927}
1928
1929static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1930 void *context, int vl, int mode,
1931 u64 data)
1932{
1933 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1934
1935 return dd->cce_err_status_cnt[21];
1936}
1937
1938static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1939 const struct cntr_entry *entry,
1940 void *context, int vl, int mode, u64 data)
1941{
1942 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1943
1944 return dd->cce_err_status_cnt[20];
1945}
1946
1947static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1948 void *context, int vl,
1949 int mode, u64 data)
1950{
1951 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1952
1953 return dd->cce_err_status_cnt[19];
1954}
1955
1956static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1957 void *context, int vl, int mode,
1958 u64 data)
1959{
1960 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1961
1962 return dd->cce_err_status_cnt[18];
1963}
1964
1965static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1966 void *context, int vl, int mode,
1967 u64 data)
1968{
1969 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1970
1971 return dd->cce_err_status_cnt[17];
1972}
1973
1974static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1975 void *context, int vl, int mode,
1976 u64 data)
1977{
1978 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1979
1980 return dd->cce_err_status_cnt[16];
1981}
1982
1983static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1984 void *context, int vl, int mode,
1985 u64 data)
1986{
1987 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1988
1989 return dd->cce_err_status_cnt[15];
1990}
1991
1992static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1993 void *context, int vl,
1994 int mode, u64 data)
1995{
1996 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1997
1998 return dd->cce_err_status_cnt[14];
1999}
2000
2001static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2002 void *context, int vl, int mode,
2003 u64 data)
2004{
2005 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2006
2007 return dd->cce_err_status_cnt[13];
2008}
2009
2010static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2011 const struct cntr_entry *entry,
2012 void *context, int vl, int mode, u64 data)
2013{
2014 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2015
2016 return dd->cce_err_status_cnt[12];
2017}
2018
2019static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2020 const struct cntr_entry *entry,
2021 void *context, int vl, int mode, u64 data)
2022{
2023 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2024
2025 return dd->cce_err_status_cnt[11];
2026}
2027
2028static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2029 const struct cntr_entry *entry,
2030 void *context, int vl, int mode, u64 data)
2031{
2032 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2033
2034 return dd->cce_err_status_cnt[10];
2035}
2036
2037static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2038 const struct cntr_entry *entry,
2039 void *context, int vl, int mode, u64 data)
2040{
2041 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2042
2043 return dd->cce_err_status_cnt[9];
2044}
2045
2046static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2047 const struct cntr_entry *entry,
2048 void *context, int vl, int mode, u64 data)
2049{
2050 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2051
2052 return dd->cce_err_status_cnt[8];
2053}
2054
2055static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2056 void *context, int vl,
2057 int mode, u64 data)
2058{
2059 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2060
2061 return dd->cce_err_status_cnt[7];
2062}
2063
2064static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2065 const struct cntr_entry *entry,
2066 void *context, int vl, int mode, u64 data)
2067{
2068 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2069
2070 return dd->cce_err_status_cnt[6];
2071}
2072
2073static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2074 void *context, int vl, int mode,
2075 u64 data)
2076{
2077 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2078
2079 return dd->cce_err_status_cnt[5];
2080}
2081
2082static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2083 void *context, int vl, int mode,
2084 u64 data)
2085{
2086 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2087
2088 return dd->cce_err_status_cnt[4];
2089}
2090
2091static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2092 const struct cntr_entry *entry,
2093 void *context, int vl, int mode, u64 data)
2094{
2095 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2096
2097 return dd->cce_err_status_cnt[3];
2098}
2099
2100static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2101 void *context, int vl,
2102 int mode, u64 data)
2103{
2104 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2105
2106 return dd->cce_err_status_cnt[2];
2107}
2108
2109static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2110 void *context, int vl,
2111 int mode, u64 data)
2112{
2113 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2114
2115 return dd->cce_err_status_cnt[1];
2116}
2117
2118static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2119 void *context, int vl, int mode,
2120 u64 data)
2121{
2122 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2123
2124 return dd->cce_err_status_cnt[0];
2125}
2126
2127/*
2128 * Software counters corresponding to each of the
2129 * error status bits within RcvErrStatus
2130 */
2131static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2132 void *context, int vl, int mode,
2133 u64 data)
2134{
2135 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2136
2137 return dd->rcv_err_status_cnt[63];
2138}
2139
2140static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2141 void *context, int vl,
2142 int mode, u64 data)
2143{
2144 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2145
2146 return dd->rcv_err_status_cnt[62];
2147}
2148
2149static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2150 void *context, int vl, int mode,
2151 u64 data)
2152{
2153 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2154
2155 return dd->rcv_err_status_cnt[61];
2156}
2157
2158static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2159 void *context, int vl, int mode,
2160 u64 data)
2161{
2162 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2163
2164 return dd->rcv_err_status_cnt[60];
2165}
2166
2167static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2168 void *context, int vl,
2169 int mode, u64 data)
2170{
2171 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2172
2173 return dd->rcv_err_status_cnt[59];
2174}
2175
2176static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2177 void *context, int vl,
2178 int mode, u64 data)
2179{
2180 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2181
2182 return dd->rcv_err_status_cnt[58];
2183}
2184
2185static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2186 void *context, int vl, int mode,
2187 u64 data)
2188{
2189 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2190
2191 return dd->rcv_err_status_cnt[57];
2192}
2193
2194static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2195 void *context, int vl, int mode,
2196 u64 data)
2197{
2198 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2199
2200 return dd->rcv_err_status_cnt[56];
2201}
2202
2203static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2204 void *context, int vl, int mode,
2205 u64 data)
2206{
2207 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2208
2209 return dd->rcv_err_status_cnt[55];
2210}
2211
2212static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2213 const struct cntr_entry *entry,
2214 void *context, int vl, int mode, u64 data)
2215{
2216 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2217
2218 return dd->rcv_err_status_cnt[54];
2219}
2220
2221static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2222 const struct cntr_entry *entry,
2223 void *context, int vl, int mode, u64 data)
2224{
2225 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2226
2227 return dd->rcv_err_status_cnt[53];
2228}
2229
2230static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2231 void *context, int vl,
2232 int mode, u64 data)
2233{
2234 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2235
2236 return dd->rcv_err_status_cnt[52];
2237}
2238
2239static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2240 void *context, int vl,
2241 int mode, u64 data)
2242{
2243 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2244
2245 return dd->rcv_err_status_cnt[51];
2246}
2247
2248static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2249 void *context, int vl,
2250 int mode, u64 data)
2251{
2252 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2253
2254 return dd->rcv_err_status_cnt[50];
2255}
2256
2257static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2258 void *context, int vl,
2259 int mode, u64 data)
2260{
2261 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2262
2263 return dd->rcv_err_status_cnt[49];
2264}
2265
2266static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2267 void *context, int vl,
2268 int mode, u64 data)
2269{
2270 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2271
2272 return dd->rcv_err_status_cnt[48];
2273}
2274
2275static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2276 void *context, int vl,
2277 int mode, u64 data)
2278{
2279 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2280
2281 return dd->rcv_err_status_cnt[47];
2282}
2283
2284static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2285 void *context, int vl, int mode,
2286 u64 data)
2287{
2288 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2289
2290 return dd->rcv_err_status_cnt[46];
2291}
2292
2293static u64 access_rx_hq_intr_csr_parity_err_cnt(
2294 const struct cntr_entry *entry,
2295 void *context, int vl, int mode, u64 data)
2296{
2297 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2298
2299 return dd->rcv_err_status_cnt[45];
2300}
2301
2302static u64 access_rx_lookup_csr_parity_err_cnt(
2303 const struct cntr_entry *entry,
2304 void *context, int vl, int mode, u64 data)
2305{
2306 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2307
2308 return dd->rcv_err_status_cnt[44];
2309}
2310
2311static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2312 const struct cntr_entry *entry,
2313 void *context, int vl, int mode, u64 data)
2314{
2315 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2316
2317 return dd->rcv_err_status_cnt[43];
2318}
2319
2320static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2321 const struct cntr_entry *entry,
2322 void *context, int vl, int mode, u64 data)
2323{
2324 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2325
2326 return dd->rcv_err_status_cnt[42];
2327}
2328
2329static u64 access_rx_lookup_des_part2_parity_err_cnt(
2330 const struct cntr_entry *entry,
2331 void *context, int vl, int mode, u64 data)
2332{
2333 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2334
2335 return dd->rcv_err_status_cnt[41];
2336}
2337
2338static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2339 const struct cntr_entry *entry,
2340 void *context, int vl, int mode, u64 data)
2341{
2342 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2343
2344 return dd->rcv_err_status_cnt[40];
2345}
2346
2347static u64 access_rx_lookup_des_part1_unc_err_cnt(
2348 const struct cntr_entry *entry,
2349 void *context, int vl, int mode, u64 data)
2350{
2351 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2352
2353 return dd->rcv_err_status_cnt[39];
2354}
2355
2356static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2357 const struct cntr_entry *entry,
2358 void *context, int vl, int mode, u64 data)
2359{
2360 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2361
2362 return dd->rcv_err_status_cnt[38];
2363}
2364
2365static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2366 const struct cntr_entry *entry,
2367 void *context, int vl, int mode, u64 data)
2368{
2369 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2370
2371 return dd->rcv_err_status_cnt[37];
2372}
2373
2374static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2375 const struct cntr_entry *entry,
2376 void *context, int vl, int mode, u64 data)
2377{
2378 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2379
2380 return dd->rcv_err_status_cnt[36];
2381}
2382
2383static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2384 const struct cntr_entry *entry,
2385 void *context, int vl, int mode, u64 data)
2386{
2387 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2388
2389 return dd->rcv_err_status_cnt[35];
2390}
2391
2392static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2393 const struct cntr_entry *entry,
2394 void *context, int vl, int mode, u64 data)
2395{
2396 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2397
2398 return dd->rcv_err_status_cnt[34];
2399}
2400
2401static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2402 const struct cntr_entry *entry,
2403 void *context, int vl, int mode, u64 data)
2404{
2405 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2406
2407 return dd->rcv_err_status_cnt[33];
2408}
2409
2410static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2411 void *context, int vl, int mode,
2412 u64 data)
2413{
2414 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2415
2416 return dd->rcv_err_status_cnt[32];
2417}
2418
2419static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2420 void *context, int vl, int mode,
2421 u64 data)
2422{
2423 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2424
2425 return dd->rcv_err_status_cnt[31];
2426}
2427
2428static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2429 void *context, int vl, int mode,
2430 u64 data)
2431{
2432 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2433
2434 return dd->rcv_err_status_cnt[30];
2435}
2436
2437static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2438 void *context, int vl, int mode,
2439 u64 data)
2440{
2441 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2442
2443 return dd->rcv_err_status_cnt[29];
2444}
2445
2446static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2447 void *context, int vl,
2448 int mode, u64 data)
2449{
2450 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2451
2452 return dd->rcv_err_status_cnt[28];
2453}
2454
2455static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2456 const struct cntr_entry *entry,
2457 void *context, int vl, int mode, u64 data)
2458{
2459 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2460
2461 return dd->rcv_err_status_cnt[27];
2462}
2463
2464static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2465 const struct cntr_entry *entry,
2466 void *context, int vl, int mode, u64 data)
2467{
2468 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2469
2470 return dd->rcv_err_status_cnt[26];
2471}
2472
2473static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2474 const struct cntr_entry *entry,
2475 void *context, int vl, int mode, u64 data)
2476{
2477 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2478
2479 return dd->rcv_err_status_cnt[25];
2480}
2481
2482static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2483 const struct cntr_entry *entry,
2484 void *context, int vl, int mode, u64 data)
2485{
2486 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2487
2488 return dd->rcv_err_status_cnt[24];
2489}
2490
2491static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2492 const struct cntr_entry *entry,
2493 void *context, int vl, int mode, u64 data)
2494{
2495 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2496
2497 return dd->rcv_err_status_cnt[23];
2498}
2499
2500static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2501 const struct cntr_entry *entry,
2502 void *context, int vl, int mode, u64 data)
2503{
2504 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2505
2506 return dd->rcv_err_status_cnt[22];
2507}
2508
2509static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2510 const struct cntr_entry *entry,
2511 void *context, int vl, int mode, u64 data)
2512{
2513 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2514
2515 return dd->rcv_err_status_cnt[21];
2516}
2517
2518static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2519 const struct cntr_entry *entry,
2520 void *context, int vl, int mode, u64 data)
2521{
2522 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2523
2524 return dd->rcv_err_status_cnt[20];
2525}
2526
2527static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2528 const struct cntr_entry *entry,
2529 void *context, int vl, int mode, u64 data)
2530{
2531 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2532
2533 return dd->rcv_err_status_cnt[19];
2534}
2535
2536static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2537 void *context, int vl,
2538 int mode, u64 data)
2539{
2540 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2541
2542 return dd->rcv_err_status_cnt[18];
2543}
2544
2545static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2546 void *context, int vl,
2547 int mode, u64 data)
2548{
2549 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2550
2551 return dd->rcv_err_status_cnt[17];
2552}
2553
2554static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2555 const struct cntr_entry *entry,
2556 void *context, int vl, int mode, u64 data)
2557{
2558 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2559
2560 return dd->rcv_err_status_cnt[16];
2561}
2562
2563static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2564 const struct cntr_entry *entry,
2565 void *context, int vl, int mode, u64 data)
2566{
2567 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2568
2569 return dd->rcv_err_status_cnt[15];
2570}
2571
2572static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2573 void *context, int vl,
2574 int mode, u64 data)
2575{
2576 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2577
2578 return dd->rcv_err_status_cnt[14];
2579}
2580
2581static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2582 void *context, int vl,
2583 int mode, u64 data)
2584{
2585 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2586
2587 return dd->rcv_err_status_cnt[13];
2588}
2589
2590static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2591 void *context, int vl, int mode,
2592 u64 data)
2593{
2594 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2595
2596 return dd->rcv_err_status_cnt[12];
2597}
2598
2599static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2600 void *context, int vl, int mode,
2601 u64 data)
2602{
2603 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2604
2605 return dd->rcv_err_status_cnt[11];
2606}
2607
2608static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2609 void *context, int vl, int mode,
2610 u64 data)
2611{
2612 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2613
2614 return dd->rcv_err_status_cnt[10];
2615}
2616
2617static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2618 void *context, int vl, int mode,
2619 u64 data)
2620{
2621 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2622
2623 return dd->rcv_err_status_cnt[9];
2624}
2625
2626static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2627 void *context, int vl, int mode,
2628 u64 data)
2629{
2630 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2631
2632 return dd->rcv_err_status_cnt[8];
2633}
2634
2635static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2636 const struct cntr_entry *entry,
2637 void *context, int vl, int mode, u64 data)
2638{
2639 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2640
2641 return dd->rcv_err_status_cnt[7];
2642}
2643
2644static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2645 const struct cntr_entry *entry,
2646 void *context, int vl, int mode, u64 data)
2647{
2648 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2649
2650 return dd->rcv_err_status_cnt[6];
2651}
2652
2653static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2654 void *context, int vl, int mode,
2655 u64 data)
2656{
2657 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2658
2659 return dd->rcv_err_status_cnt[5];
2660}
2661
2662static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2663 void *context, int vl, int mode,
2664 u64 data)
2665{
2666 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2667
2668 return dd->rcv_err_status_cnt[4];
2669}
2670
2671static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2672 void *context, int vl, int mode,
2673 u64 data)
2674{
2675 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2676
2677 return dd->rcv_err_status_cnt[3];
2678}
2679
2680static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2681 void *context, int vl, int mode,
2682 u64 data)
2683{
2684 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2685
2686 return dd->rcv_err_status_cnt[2];
2687}
2688
2689static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2690 void *context, int vl, int mode,
2691 u64 data)
2692{
2693 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2694
2695 return dd->rcv_err_status_cnt[1];
2696}
2697
2698static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2699 void *context, int vl, int mode,
2700 u64 data)
2701{
2702 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2703
2704 return dd->rcv_err_status_cnt[0];
2705}
2706
2707/*
2708 * Software counters corresponding to each of the
2709 * error status bits within SendPioErrStatus
2710 */
2711static u64 access_pio_pec_sop_head_parity_err_cnt(
2712 const struct cntr_entry *entry,
2713 void *context, int vl, int mode, u64 data)
2714{
2715 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2716
2717 return dd->send_pio_err_status_cnt[35];
2718}
2719
2720static u64 access_pio_pcc_sop_head_parity_err_cnt(
2721 const struct cntr_entry *entry,
2722 void *context, int vl, int mode, u64 data)
2723{
2724 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2725
2726 return dd->send_pio_err_status_cnt[34];
2727}
2728
2729static u64 access_pio_last_returned_cnt_parity_err_cnt(
2730 const struct cntr_entry *entry,
2731 void *context, int vl, int mode, u64 data)
2732{
2733 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2734
2735 return dd->send_pio_err_status_cnt[33];
2736}
2737
2738static u64 access_pio_current_free_cnt_parity_err_cnt(
2739 const struct cntr_entry *entry,
2740 void *context, int vl, int mode, u64 data)
2741{
2742 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2743
2744 return dd->send_pio_err_status_cnt[32];
2745}
2746
2747static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2748 void *context, int vl, int mode,
2749 u64 data)
2750{
2751 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2752
2753 return dd->send_pio_err_status_cnt[31];
2754}
2755
2756static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2757 void *context, int vl, int mode,
2758 u64 data)
2759{
2760 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2761
2762 return dd->send_pio_err_status_cnt[30];
2763}
2764
2765static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2766 void *context, int vl, int mode,
2767 u64 data)
2768{
2769 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2770
2771 return dd->send_pio_err_status_cnt[29];
2772}
2773
2774static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2775 const struct cntr_entry *entry,
2776 void *context, int vl, int mode, u64 data)
2777{
2778 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2779
2780 return dd->send_pio_err_status_cnt[28];
2781}
2782
2783static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2784 void *context, int vl, int mode,
2785 u64 data)
2786{
2787 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2788
2789 return dd->send_pio_err_status_cnt[27];
2790}
2791
2792static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2793 void *context, int vl, int mode,
2794 u64 data)
2795{
2796 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2797
2798 return dd->send_pio_err_status_cnt[26];
2799}
2800
2801static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2802 void *context, int vl,
2803 int mode, u64 data)
2804{
2805 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2806
2807 return dd->send_pio_err_status_cnt[25];
2808}
2809
2810static u64 access_pio_block_qw_count_parity_err_cnt(
2811 const struct cntr_entry *entry,
2812 void *context, int vl, int mode, u64 data)
2813{
2814 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2815
2816 return dd->send_pio_err_status_cnt[24];
2817}
2818
2819static u64 access_pio_write_qw_valid_parity_err_cnt(
2820 const struct cntr_entry *entry,
2821 void *context, int vl, int mode, u64 data)
2822{
2823 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2824
2825 return dd->send_pio_err_status_cnt[23];
2826}
2827
2828static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2829 void *context, int vl, int mode,
2830 u64 data)
2831{
2832 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2833
2834 return dd->send_pio_err_status_cnt[22];
2835}
2836
2837static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2838 void *context, int vl,
2839 int mode, u64 data)
2840{
2841 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2842
2843 return dd->send_pio_err_status_cnt[21];
2844}
2845
2846static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2847 void *context, int vl,
2848 int mode, u64 data)
2849{
2850 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2851
2852 return dd->send_pio_err_status_cnt[20];
2853}
2854
2855static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2856 void *context, int vl,
2857 int mode, u64 data)
2858{
2859 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2860
2861 return dd->send_pio_err_status_cnt[19];
2862}
2863
2864static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2865 const struct cntr_entry *entry,
2866 void *context, int vl, int mode, u64 data)
2867{
2868 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2869
2870 return dd->send_pio_err_status_cnt[18];
2871}
2872
2873static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2874 void *context, int vl, int mode,
2875 u64 data)
2876{
2877 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2878
2879 return dd->send_pio_err_status_cnt[17];
2880}
2881
2882static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2883 void *context, int vl, int mode,
2884 u64 data)
2885{
2886 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2887
2888 return dd->send_pio_err_status_cnt[16];
2889}
2890
2891static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2892 const struct cntr_entry *entry,
2893 void *context, int vl, int mode, u64 data)
2894{
2895 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2896
2897 return dd->send_pio_err_status_cnt[15];
2898}
2899
2900static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2901 const struct cntr_entry *entry,
2902 void *context, int vl, int mode, u64 data)
2903{
2904 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2905
2906 return dd->send_pio_err_status_cnt[14];
2907}
2908
2909static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2910 const struct cntr_entry *entry,
2911 void *context, int vl, int mode, u64 data)
2912{
2913 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2914
2915 return dd->send_pio_err_status_cnt[13];
2916}
2917
2918static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2919 const struct cntr_entry *entry,
2920 void *context, int vl, int mode, u64 data)
2921{
2922 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2923
2924 return dd->send_pio_err_status_cnt[12];
2925}
2926
2927static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2928 const struct cntr_entry *entry,
2929 void *context, int vl, int mode, u64 data)
2930{
2931 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2932
2933 return dd->send_pio_err_status_cnt[11];
2934}
2935
2936static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2937 const struct cntr_entry *entry,
2938 void *context, int vl, int mode, u64 data)
2939{
2940 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2941
2942 return dd->send_pio_err_status_cnt[10];
2943}
2944
2945static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2946 const struct cntr_entry *entry,
2947 void *context, int vl, int mode, u64 data)
2948{
2949 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2950
2951 return dd->send_pio_err_status_cnt[9];
2952}
2953
2954static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2955 const struct cntr_entry *entry,
2956 void *context, int vl, int mode, u64 data)
2957{
2958 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2959
2960 return dd->send_pio_err_status_cnt[8];
2961}
2962
2963static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2964 const struct cntr_entry *entry,
2965 void *context, int vl, int mode, u64 data)
2966{
2967 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2968
2969 return dd->send_pio_err_status_cnt[7];
2970}
2971
2972static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2973 void *context, int vl, int mode,
2974 u64 data)
2975{
2976 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2977
2978 return dd->send_pio_err_status_cnt[6];
2979}
2980
2981static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2982 void *context, int vl, int mode,
2983 u64 data)
2984{
2985 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2986
2987 return dd->send_pio_err_status_cnt[5];
2988}
2989
2990static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2991 void *context, int vl, int mode,
2992 u64 data)
2993{
2994 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2995
2996 return dd->send_pio_err_status_cnt[4];
2997}
2998
2999static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3000 void *context, int vl, int mode,
3001 u64 data)
3002{
3003 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3004
3005 return dd->send_pio_err_status_cnt[3];
3006}
3007
3008static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3009 void *context, int vl, int mode,
3010 u64 data)
3011{
3012 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3013
3014 return dd->send_pio_err_status_cnt[2];
3015}
3016
3017static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3018 void *context, int vl,
3019 int mode, u64 data)
3020{
3021 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3022
3023 return dd->send_pio_err_status_cnt[1];
3024}
3025
3026static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3027 void *context, int vl, int mode,
3028 u64 data)
3029{
3030 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3031
3032 return dd->send_pio_err_status_cnt[0];
3033}
3034
3035/*
3036 * Software counters corresponding to each of the
3037 * error status bits within SendDmaErrStatus
3038 */
3039static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3040 const struct cntr_entry *entry,
3041 void *context, int vl, int mode, u64 data)
3042{
3043 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3044
3045 return dd->send_dma_err_status_cnt[3];
3046}
3047
3048static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3049 const struct cntr_entry *entry,
3050 void *context, int vl, int mode, u64 data)
3051{
3052 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3053
3054 return dd->send_dma_err_status_cnt[2];
3055}
3056
3057static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3058 void *context, int vl, int mode,
3059 u64 data)
3060{
3061 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3062
3063 return dd->send_dma_err_status_cnt[1];
3064}
3065
3066static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3067 void *context, int vl, int mode,
3068 u64 data)
3069{
3070 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3071
3072 return dd->send_dma_err_status_cnt[0];
3073}
3074
3075/*
3076 * Software counters corresponding to each of the
3077 * error status bits within SendEgressErrStatus
3078 */
3079static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3080 const struct cntr_entry *entry,
3081 void *context, int vl, int mode, u64 data)
3082{
3083 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3084
3085 return dd->send_egress_err_status_cnt[63];
3086}
3087
3088static u64 access_tx_read_sdma_memory_csr_err_cnt(
3089 const struct cntr_entry *entry,
3090 void *context, int vl, int mode, u64 data)
3091{
3092 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3093
3094 return dd->send_egress_err_status_cnt[62];
3095}
3096
3097static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3098 void *context, int vl, int mode,
3099 u64 data)
3100{
3101 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3102
3103 return dd->send_egress_err_status_cnt[61];
3104}
3105
3106static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3107 void *context, int vl,
3108 int mode, u64 data)
3109{
3110 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3111
3112 return dd->send_egress_err_status_cnt[60];
3113}
3114
3115static u64 access_tx_read_sdma_memory_cor_err_cnt(
3116 const struct cntr_entry *entry,
3117 void *context, int vl, int mode, u64 data)
3118{
3119 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3120
3121 return dd->send_egress_err_status_cnt[59];
3122}
3123
3124static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3125 void *context, int vl, int mode,
3126 u64 data)
3127{
3128 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3129
3130 return dd->send_egress_err_status_cnt[58];
3131}
3132
3133static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3134 void *context, int vl, int mode,
3135 u64 data)
3136{
3137 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3138
3139 return dd->send_egress_err_status_cnt[57];
3140}
3141
3142static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3143 void *context, int vl, int mode,
3144 u64 data)
3145{
3146 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3147
3148 return dd->send_egress_err_status_cnt[56];
3149}
3150
3151static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3152 void *context, int vl, int mode,
3153 u64 data)
3154{
3155 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3156
3157 return dd->send_egress_err_status_cnt[55];
3158}
3159
3160static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3161 void *context, int vl, int mode,
3162 u64 data)
3163{
3164 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3165
3166 return dd->send_egress_err_status_cnt[54];
3167}
3168
3169static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3170 void *context, int vl, int mode,
3171 u64 data)
3172{
3173 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3174
3175 return dd->send_egress_err_status_cnt[53];
3176}
3177
3178static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3179 void *context, int vl, int mode,
3180 u64 data)
3181{
3182 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3183
3184 return dd->send_egress_err_status_cnt[52];
3185}
3186
3187static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3188 void *context, int vl, int mode,
3189 u64 data)
3190{
3191 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3192
3193 return dd->send_egress_err_status_cnt[51];
3194}
3195
3196static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3197 void *context, int vl, int mode,
3198 u64 data)
3199{
3200 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3201
3202 return dd->send_egress_err_status_cnt[50];
3203}
3204
3205static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3206 void *context, int vl, int mode,
3207 u64 data)
3208{
3209 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3210
3211 return dd->send_egress_err_status_cnt[49];
3212}
3213
3214static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3215 void *context, int vl, int mode,
3216 u64 data)
3217{
3218 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3219
3220 return dd->send_egress_err_status_cnt[48];
3221}
3222
3223static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3224 void *context, int vl, int mode,
3225 u64 data)
3226{
3227 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3228
3229 return dd->send_egress_err_status_cnt[47];
3230}
3231
3232static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3233 void *context, int vl, int mode,
3234 u64 data)
3235{
3236 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3237
3238 return dd->send_egress_err_status_cnt[46];
3239}
3240
3241static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3242 void *context, int vl, int mode,
3243 u64 data)
3244{
3245 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3246
3247 return dd->send_egress_err_status_cnt[45];
3248}
3249
3250static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3251 void *context, int vl,
3252 int mode, u64 data)
3253{
3254 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3255
3256 return dd->send_egress_err_status_cnt[44];
3257}
3258
3259static u64 access_tx_read_sdma_memory_unc_err_cnt(
3260 const struct cntr_entry *entry,
3261 void *context, int vl, int mode, u64 data)
3262{
3263 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3264
3265 return dd->send_egress_err_status_cnt[43];
3266}
3267
3268static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3269 void *context, int vl, int mode,
3270 u64 data)
3271{
3272 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3273
3274 return dd->send_egress_err_status_cnt[42];
3275}
3276
3277static u64 access_tx_credit_return_partiy_err_cnt(
3278 const struct cntr_entry *entry,
3279 void *context, int vl, int mode, u64 data)
3280{
3281 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3282
3283 return dd->send_egress_err_status_cnt[41];
3284}
3285
3286static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3287 const struct cntr_entry *entry,
3288 void *context, int vl, int mode, u64 data)
3289{
3290 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3291
3292 return dd->send_egress_err_status_cnt[40];
3293}
3294
3295static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3296 const struct cntr_entry *entry,
3297 void *context, int vl, int mode, u64 data)
3298{
3299 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3300
3301 return dd->send_egress_err_status_cnt[39];
3302}
3303
3304static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3305 const struct cntr_entry *entry,
3306 void *context, int vl, int mode, u64 data)
3307{
3308 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3309
3310 return dd->send_egress_err_status_cnt[38];
3311}
3312
3313static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3314 const struct cntr_entry *entry,
3315 void *context, int vl, int mode, u64 data)
3316{
3317 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3318
3319 return dd->send_egress_err_status_cnt[37];
3320}
3321
3322static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3323 const struct cntr_entry *entry,
3324 void *context, int vl, int mode, u64 data)
3325{
3326 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3327
3328 return dd->send_egress_err_status_cnt[36];
3329}
3330
3331static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3332 const struct cntr_entry *entry,
3333 void *context, int vl, int mode, u64 data)
3334{
3335 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3336
3337 return dd->send_egress_err_status_cnt[35];
3338}
3339
3340static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3341 const struct cntr_entry *entry,
3342 void *context, int vl, int mode, u64 data)
3343{
3344 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3345
3346 return dd->send_egress_err_status_cnt[34];
3347}
3348
3349static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3350 const struct cntr_entry *entry,
3351 void *context, int vl, int mode, u64 data)
3352{
3353 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3354
3355 return dd->send_egress_err_status_cnt[33];
3356}
3357
3358static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3359 const struct cntr_entry *entry,
3360 void *context, int vl, int mode, u64 data)
3361{
3362 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3363
3364 return dd->send_egress_err_status_cnt[32];
3365}
3366
3367static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3368 const struct cntr_entry *entry,
3369 void *context, int vl, int mode, u64 data)
3370{
3371 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3372
3373 return dd->send_egress_err_status_cnt[31];
3374}
3375
3376static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3377 const struct cntr_entry *entry,
3378 void *context, int vl, int mode, u64 data)
3379{
3380 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3381
3382 return dd->send_egress_err_status_cnt[30];
3383}
3384
3385static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3386 const struct cntr_entry *entry,
3387 void *context, int vl, int mode, u64 data)
3388{
3389 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3390
3391 return dd->send_egress_err_status_cnt[29];
3392}
3393
3394static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3395 const struct cntr_entry *entry,
3396 void *context, int vl, int mode, u64 data)
3397{
3398 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3399
3400 return dd->send_egress_err_status_cnt[28];
3401}
3402
3403static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3404 const struct cntr_entry *entry,
3405 void *context, int vl, int mode, u64 data)
3406{
3407 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3408
3409 return dd->send_egress_err_status_cnt[27];
3410}
3411
3412static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3413 const struct cntr_entry *entry,
3414 void *context, int vl, int mode, u64 data)
3415{
3416 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3417
3418 return dd->send_egress_err_status_cnt[26];
3419}
3420
3421static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3422 const struct cntr_entry *entry,
3423 void *context, int vl, int mode, u64 data)
3424{
3425 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3426
3427 return dd->send_egress_err_status_cnt[25];
3428}
3429
3430static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3431 const struct cntr_entry *entry,
3432 void *context, int vl, int mode, u64 data)
3433{
3434 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3435
3436 return dd->send_egress_err_status_cnt[24];
3437}
3438
3439static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3440 const struct cntr_entry *entry,
3441 void *context, int vl, int mode, u64 data)
3442{
3443 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3444
3445 return dd->send_egress_err_status_cnt[23];
3446}
3447
3448static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3449 const struct cntr_entry *entry,
3450 void *context, int vl, int mode, u64 data)
3451{
3452 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3453
3454 return dd->send_egress_err_status_cnt[22];
3455}
3456
3457static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3458 const struct cntr_entry *entry,
3459 void *context, int vl, int mode, u64 data)
3460{
3461 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3462
3463 return dd->send_egress_err_status_cnt[21];
3464}
3465
3466static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3467 const struct cntr_entry *entry,
3468 void *context, int vl, int mode, u64 data)
3469{
3470 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3471
3472 return dd->send_egress_err_status_cnt[20];
3473}
3474
3475static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3476 const struct cntr_entry *entry,
3477 void *context, int vl, int mode, u64 data)
3478{
3479 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3480
3481 return dd->send_egress_err_status_cnt[19];
3482}
3483
3484static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3485 const struct cntr_entry *entry,
3486 void *context, int vl, int mode, u64 data)
3487{
3488 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3489
3490 return dd->send_egress_err_status_cnt[18];
3491}
3492
3493static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3494 const struct cntr_entry *entry,
3495 void *context, int vl, int mode, u64 data)
3496{
3497 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3498
3499 return dd->send_egress_err_status_cnt[17];
3500}
3501
3502static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3503 const struct cntr_entry *entry,
3504 void *context, int vl, int mode, u64 data)
3505{
3506 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3507
3508 return dd->send_egress_err_status_cnt[16];
3509}
3510
3511static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3512 void *context, int vl, int mode,
3513 u64 data)
3514{
3515 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3516
3517 return dd->send_egress_err_status_cnt[15];
3518}
3519
3520static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3521 void *context, int vl,
3522 int mode, u64 data)
3523{
3524 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3525
3526 return dd->send_egress_err_status_cnt[14];
3527}
3528
3529static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3530 void *context, int vl, int mode,
3531 u64 data)
3532{
3533 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3534
3535 return dd->send_egress_err_status_cnt[13];
3536}
3537
3538static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3539 void *context, int vl, int mode,
3540 u64 data)
3541{
3542 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3543
3544 return dd->send_egress_err_status_cnt[12];
3545}
3546
3547static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3548 const struct cntr_entry *entry,
3549 void *context, int vl, int mode, u64 data)
3550{
3551 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3552
3553 return dd->send_egress_err_status_cnt[11];
3554}
3555
3556static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3557 void *context, int vl, int mode,
3558 u64 data)
3559{
3560 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3561
3562 return dd->send_egress_err_status_cnt[10];
3563}
3564
3565static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3566 void *context, int vl, int mode,
3567 u64 data)
3568{
3569 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3570
3571 return dd->send_egress_err_status_cnt[9];
3572}
3573
3574static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3575 const struct cntr_entry *entry,
3576 void *context, int vl, int mode, u64 data)
3577{
3578 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3579
3580 return dd->send_egress_err_status_cnt[8];
3581}
3582
3583static u64 access_tx_pio_launch_intf_parity_err_cnt(
3584 const struct cntr_entry *entry,
3585 void *context, int vl, int mode, u64 data)
3586{
3587 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3588
3589 return dd->send_egress_err_status_cnt[7];
3590}
3591
3592static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3593 void *context, int vl, int mode,
3594 u64 data)
3595{
3596 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3597
3598 return dd->send_egress_err_status_cnt[6];
3599}
3600
3601static u64 access_tx_incorrect_link_state_err_cnt(
3602 const struct cntr_entry *entry,
3603 void *context, int vl, int mode, u64 data)
3604{
3605 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3606
3607 return dd->send_egress_err_status_cnt[5];
3608}
3609
3610static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3611 void *context, int vl, int mode,
3612 u64 data)
3613{
3614 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3615
3616 return dd->send_egress_err_status_cnt[4];
3617}
3618
3619static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3620 const struct cntr_entry *entry,
3621 void *context, int vl, int mode, u64 data)
3622{
3623 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3624
3625 return dd->send_egress_err_status_cnt[3];
3626}
3627
3628static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3629 void *context, int vl, int mode,
3630 u64 data)
3631{
3632 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3633
3634 return dd->send_egress_err_status_cnt[2];
3635}
3636
3637static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3638 const struct cntr_entry *entry,
3639 void *context, int vl, int mode, u64 data)
3640{
3641 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3642
3643 return dd->send_egress_err_status_cnt[1];
3644}
3645
3646static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3647 const struct cntr_entry *entry,
3648 void *context, int vl, int mode, u64 data)
3649{
3650 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3651
3652 return dd->send_egress_err_status_cnt[0];
3653}
3654
3655/*
3656 * Software counters corresponding to each of the
3657 * error status bits within SendErrStatus
3658 */
3659static u64 access_send_csr_write_bad_addr_err_cnt(
3660 const struct cntr_entry *entry,
3661 void *context, int vl, int mode, u64 data)
3662{
3663 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3664
3665 return dd->send_err_status_cnt[2];
3666}
3667
3668static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3669 void *context, int vl,
3670 int mode, u64 data)
3671{
3672 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3673
3674 return dd->send_err_status_cnt[1];
3675}
3676
3677static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3678 void *context, int vl, int mode,
3679 u64 data)
3680{
3681 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3682
3683 return dd->send_err_status_cnt[0];
3684}
3685
3686/*
3687 * Software counters corresponding to each of the
3688 * error status bits within SendCtxtErrStatus
3689 */
3690static u64 access_pio_write_out_of_bounds_err_cnt(
3691 const struct cntr_entry *entry,
3692 void *context, int vl, int mode, u64 data)
3693{
3694 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3695
3696 return dd->sw_ctxt_err_status_cnt[4];
3697}
3698
3699static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3700 void *context, int vl, int mode,
3701 u64 data)
3702{
3703 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3704
3705 return dd->sw_ctxt_err_status_cnt[3];
3706}
3707
3708static u64 access_pio_write_crosses_boundary_err_cnt(
3709 const struct cntr_entry *entry,
3710 void *context, int vl, int mode, u64 data)
3711{
3712 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3713
3714 return dd->sw_ctxt_err_status_cnt[2];
3715}
3716
3717static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3718 void *context, int vl,
3719 int mode, u64 data)
3720{
3721 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3722
3723 return dd->sw_ctxt_err_status_cnt[1];
3724}
3725
3726static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3727 void *context, int vl, int mode,
3728 u64 data)
3729{
3730 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3731
3732 return dd->sw_ctxt_err_status_cnt[0];
3733}
3734
3735/*
3736 * Software counters corresponding to each of the
3737 * error status bits within SendDmaEngErrStatus
3738 */
3739static u64 access_sdma_header_request_fifo_cor_err_cnt(
3740 const struct cntr_entry *entry,
3741 void *context, int vl, int mode, u64 data)
3742{
3743 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3744
3745 return dd->sw_send_dma_eng_err_status_cnt[23];
3746}
3747
3748static u64 access_sdma_header_storage_cor_err_cnt(
3749 const struct cntr_entry *entry,
3750 void *context, int vl, int mode, u64 data)
3751{
3752 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3753
3754 return dd->sw_send_dma_eng_err_status_cnt[22];
3755}
3756
3757static u64 access_sdma_packet_tracking_cor_err_cnt(
3758 const struct cntr_entry *entry,
3759 void *context, int vl, int mode, u64 data)
3760{
3761 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3762
3763 return dd->sw_send_dma_eng_err_status_cnt[21];
3764}
3765
3766static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3767 void *context, int vl, int mode,
3768 u64 data)
3769{
3770 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3771
3772 return dd->sw_send_dma_eng_err_status_cnt[20];
3773}
3774
3775static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3776 void *context, int vl, int mode,
3777 u64 data)
3778{
3779 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3780
3781 return dd->sw_send_dma_eng_err_status_cnt[19];
3782}
3783
3784static u64 access_sdma_header_request_fifo_unc_err_cnt(
3785 const struct cntr_entry *entry,
3786 void *context, int vl, int mode, u64 data)
3787{
3788 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3789
3790 return dd->sw_send_dma_eng_err_status_cnt[18];
3791}
3792
3793static u64 access_sdma_header_storage_unc_err_cnt(
3794 const struct cntr_entry *entry,
3795 void *context, int vl, int mode, u64 data)
3796{
3797 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3798
3799 return dd->sw_send_dma_eng_err_status_cnt[17];
3800}
3801
3802static u64 access_sdma_packet_tracking_unc_err_cnt(
3803 const struct cntr_entry *entry,
3804 void *context, int vl, int mode, u64 data)
3805{
3806 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3807
3808 return dd->sw_send_dma_eng_err_status_cnt[16];
3809}
3810
3811static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3812 void *context, int vl, int mode,
3813 u64 data)
3814{
3815 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3816
3817 return dd->sw_send_dma_eng_err_status_cnt[15];
3818}
3819
3820static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3821 void *context, int vl, int mode,
3822 u64 data)
3823{
3824 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3825
3826 return dd->sw_send_dma_eng_err_status_cnt[14];
3827}
3828
3829static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3830 void *context, int vl, int mode,
3831 u64 data)
3832{
3833 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3834
3835 return dd->sw_send_dma_eng_err_status_cnt[13];
3836}
3837
3838static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3839 void *context, int vl, int mode,
3840 u64 data)
3841{
3842 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3843
3844 return dd->sw_send_dma_eng_err_status_cnt[12];
3845}
3846
3847static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3848 void *context, int vl, int mode,
3849 u64 data)
3850{
3851 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3852
3853 return dd->sw_send_dma_eng_err_status_cnt[11];
3854}
3855
3856static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3857 void *context, int vl, int mode,
3858 u64 data)
3859{
3860 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3861
3862 return dd->sw_send_dma_eng_err_status_cnt[10];
3863}
3864
3865static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3866 void *context, int vl, int mode,
3867 u64 data)
3868{
3869 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3870
3871 return dd->sw_send_dma_eng_err_status_cnt[9];
3872}
3873
3874static u64 access_sdma_packet_desc_overflow_err_cnt(
3875 const struct cntr_entry *entry,
3876 void *context, int vl, int mode, u64 data)
3877{
3878 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3879
3880 return dd->sw_send_dma_eng_err_status_cnt[8];
3881}
3882
3883static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3884 void *context, int vl,
3885 int mode, u64 data)
3886{
3887 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3888
3889 return dd->sw_send_dma_eng_err_status_cnt[7];
3890}
3891
3892static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3893 void *context, int vl, int mode, u64 data)
3894{
3895 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3896
3897 return dd->sw_send_dma_eng_err_status_cnt[6];
3898}
3899
3900static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3901 void *context, int vl, int mode,
3902 u64 data)
3903{
3904 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3905
3906 return dd->sw_send_dma_eng_err_status_cnt[5];
3907}
3908
3909static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3910 void *context, int vl, int mode,
3911 u64 data)
3912{
3913 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3914
3915 return dd->sw_send_dma_eng_err_status_cnt[4];
3916}
3917
3918static u64 access_sdma_tail_out_of_bounds_err_cnt(
3919 const struct cntr_entry *entry,
3920 void *context, int vl, int mode, u64 data)
3921{
3922 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3923
3924 return dd->sw_send_dma_eng_err_status_cnt[3];
3925}
3926
3927static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3928 void *context, int vl, int mode,
3929 u64 data)
3930{
3931 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3932
3933 return dd->sw_send_dma_eng_err_status_cnt[2];
3934}
3935
3936static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3937 void *context, int vl, int mode,
3938 u64 data)
3939{
3940 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3941
3942 return dd->sw_send_dma_eng_err_status_cnt[1];
3943}
3944
3945static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3946 void *context, int vl, int mode,
3947 u64 data)
3948{
3949 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3950
3951 return dd->sw_send_dma_eng_err_status_cnt[0];
3952}
3953
2b719046
JP
3954static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
3955 void *context, int vl, int mode,
3956 u64 data)
3957{
3958 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3959
3960 u64 val = 0;
3961 u64 csr = entry->csr;
3962
3963 val = read_write_csr(dd, csr, mode, data);
3964 if (mode == CNTR_MODE_R) {
3965 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
3966 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
3967 } else if (mode == CNTR_MODE_W) {
3968 dd->sw_rcv_bypass_packet_errors = 0;
3969 } else {
3970 dd_dev_err(dd, "Invalid cntr register access mode");
3971 return 0;
3972 }
3973 return val;
3974}
3975
77241056
MM
3976#define def_access_sw_cpu(cntr) \
3977static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3978 void *context, int vl, int mode, u64 data) \
3979{ \
3980 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4eb06882
DD
3981 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
3982 ppd->ibport_data.rvp.cntr, vl, \
77241056
MM
3983 mode, data); \
3984}
3985
3986def_access_sw_cpu(rc_acks);
3987def_access_sw_cpu(rc_qacks);
3988def_access_sw_cpu(rc_delayed_comp);
3989
3990#define def_access_ibp_counter(cntr) \
3991static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3992 void *context, int vl, int mode, u64 data) \
3993{ \
3994 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3995 \
3996 if (vl != CNTR_INVALID_VL) \
3997 return 0; \
3998 \
4eb06882 3999 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
77241056
MM
4000 mode, data); \
4001}
4002
4003def_access_ibp_counter(loop_pkts);
4004def_access_ibp_counter(rc_resends);
4005def_access_ibp_counter(rnr_naks);
4006def_access_ibp_counter(other_naks);
4007def_access_ibp_counter(rc_timeouts);
4008def_access_ibp_counter(pkt_drops);
4009def_access_ibp_counter(dmawait);
4010def_access_ibp_counter(rc_seqnak);
4011def_access_ibp_counter(rc_dupreq);
4012def_access_ibp_counter(rdma_seq);
4013def_access_ibp_counter(unaligned);
4014def_access_ibp_counter(seq_naks);
4015
4016static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4017[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4018[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4019 CNTR_NORMAL),
4020[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4021 CNTR_NORMAL),
4022[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4023 RCV_TID_FLOW_GEN_MISMATCH_CNT,
4024 CNTR_NORMAL),
77241056
MM
4025[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4026 CNTR_NORMAL),
4027[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4028 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4029[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4030 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4031[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4032 CNTR_NORMAL),
4033[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4034 CNTR_NORMAL),
4035[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4036 CNTR_NORMAL),
4037[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4038 CNTR_NORMAL),
4039[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4040 CNTR_NORMAL),
4041[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4042 CNTR_NORMAL),
4043[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4044 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4045[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4046 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4047[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4048 CNTR_SYNTH),
2b719046
JP
4049[C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4050 access_dc_rcv_err_cnt),
77241056
MM
4051[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4052 CNTR_SYNTH),
4053[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4054 CNTR_SYNTH),
4055[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4056 CNTR_SYNTH),
4057[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4058 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4059[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4060 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4061 CNTR_SYNTH),
4062[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4063 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4064[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4065 CNTR_SYNTH),
4066[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4067 CNTR_SYNTH),
4068[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4069 CNTR_SYNTH),
4070[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4071 CNTR_SYNTH),
4072[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4073 CNTR_SYNTH),
4074[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4075 CNTR_SYNTH),
4076[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4077 CNTR_SYNTH),
4078[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4079 CNTR_SYNTH | CNTR_VL),
4080[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4081 CNTR_SYNTH | CNTR_VL),
4082[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4083[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4084 CNTR_SYNTH | CNTR_VL),
4085[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4086[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4087 CNTR_SYNTH | CNTR_VL),
4088[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4089 CNTR_SYNTH),
4090[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4091 CNTR_SYNTH | CNTR_VL),
4092[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4093 CNTR_SYNTH),
4094[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4095 CNTR_SYNTH | CNTR_VL),
4096[C_DC_TOTAL_CRC] =
4097 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4098 CNTR_SYNTH),
4099[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4100 CNTR_SYNTH),
4101[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4102 CNTR_SYNTH),
4103[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4104 CNTR_SYNTH),
4105[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4106 CNTR_SYNTH),
4107[C_DC_CRC_MULT_LN] =
4108 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4109 CNTR_SYNTH),
4110[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4111 CNTR_SYNTH),
4112[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4113 CNTR_SYNTH),
4114[C_DC_SEQ_CRC_CNT] =
4115 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4116 CNTR_SYNTH),
4117[C_DC_ESC0_ONLY_CNT] =
4118 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4119 CNTR_SYNTH),
4120[C_DC_ESC0_PLUS1_CNT] =
4121 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4122 CNTR_SYNTH),
4123[C_DC_ESC0_PLUS2_CNT] =
4124 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4125 CNTR_SYNTH),
4126[C_DC_REINIT_FROM_PEER_CNT] =
4127 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4128 CNTR_SYNTH),
4129[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4130 CNTR_SYNTH),
4131[C_DC_MISC_FLG_CNT] =
4132 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4133 CNTR_SYNTH),
4134[C_DC_PRF_GOOD_LTP_CNT] =
4135 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4136[C_DC_PRF_ACCEPTED_LTP_CNT] =
4137 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4138 CNTR_SYNTH),
4139[C_DC_PRF_RX_FLIT_CNT] =
4140 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4141[C_DC_PRF_TX_FLIT_CNT] =
4142 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4143[C_DC_PRF_CLK_CNTR] =
4144 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4145[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4146 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4147[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4148 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4149 CNTR_SYNTH),
4150[C_DC_PG_STS_TX_SBE_CNT] =
4151 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4152[C_DC_PG_STS_TX_MBE_CNT] =
4153 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4154 CNTR_SYNTH),
4155[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4156 access_sw_cpu_intr),
4157[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4158 access_sw_cpu_rcv_limit),
4159[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4160 access_sw_vtx_wait),
4161[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4162 access_sw_pio_wait),
14553ca1
MM
4163[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4164 access_sw_pio_drain),
77241056
MM
4165[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4166 access_sw_kmem_wait),
b421922e
DL
4167[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4168 access_sw_send_schedule),
a699c6c2
VM
4169[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4170 SEND_DMA_DESC_FETCHED_CNT, 0,
4171 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4172 dev_access_u32_csr),
4173[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4174 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4175 access_sde_int_cnt),
4176[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4177 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4178 access_sde_err_cnt),
4179[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4180 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4181 access_sde_idle_int_cnt),
4182[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4183 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4184 access_sde_progress_int_cnt),
2c5b521a
JR
4185/* MISC_ERR_STATUS */
4186[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4187 CNTR_NORMAL,
4188 access_misc_pll_lock_fail_err_cnt),
4189[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4190 CNTR_NORMAL,
4191 access_misc_mbist_fail_err_cnt),
4192[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4193 CNTR_NORMAL,
4194 access_misc_invalid_eep_cmd_err_cnt),
4195[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4196 CNTR_NORMAL,
4197 access_misc_efuse_done_parity_err_cnt),
4198[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4199 CNTR_NORMAL,
4200 access_misc_efuse_write_err_cnt),
4201[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4202 0, CNTR_NORMAL,
4203 access_misc_efuse_read_bad_addr_err_cnt),
4204[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4205 CNTR_NORMAL,
4206 access_misc_efuse_csr_parity_err_cnt),
4207[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4208 CNTR_NORMAL,
4209 access_misc_fw_auth_failed_err_cnt),
4210[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4211 CNTR_NORMAL,
4212 access_misc_key_mismatch_err_cnt),
4213[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4214 CNTR_NORMAL,
4215 access_misc_sbus_write_failed_err_cnt),
4216[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4217 CNTR_NORMAL,
4218 access_misc_csr_write_bad_addr_err_cnt),
4219[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4220 CNTR_NORMAL,
4221 access_misc_csr_read_bad_addr_err_cnt),
4222[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4223 CNTR_NORMAL,
4224 access_misc_csr_parity_err_cnt),
4225/* CceErrStatus */
4226[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4227 CNTR_NORMAL,
4228 access_sw_cce_err_status_aggregated_cnt),
4229[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4230 CNTR_NORMAL,
4231 access_cce_msix_csr_parity_err_cnt),
4232[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4233 CNTR_NORMAL,
4234 access_cce_int_map_unc_err_cnt),
4235[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4236 CNTR_NORMAL,
4237 access_cce_int_map_cor_err_cnt),
4238[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4239 CNTR_NORMAL,
4240 access_cce_msix_table_unc_err_cnt),
4241[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4242 CNTR_NORMAL,
4243 access_cce_msix_table_cor_err_cnt),
4244[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4245 0, CNTR_NORMAL,
4246 access_cce_rxdma_conv_fifo_parity_err_cnt),
4247[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4248 0, CNTR_NORMAL,
4249 access_cce_rcpl_async_fifo_parity_err_cnt),
4250[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4251 CNTR_NORMAL,
4252 access_cce_seg_write_bad_addr_err_cnt),
4253[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4254 CNTR_NORMAL,
4255 access_cce_seg_read_bad_addr_err_cnt),
4256[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4257 CNTR_NORMAL,
4258 access_la_triggered_cnt),
4259[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4260 CNTR_NORMAL,
4261 access_cce_trgt_cpl_timeout_err_cnt),
4262[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4263 CNTR_NORMAL,
4264 access_pcic_receive_parity_err_cnt),
4265[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4266 CNTR_NORMAL,
4267 access_pcic_transmit_back_parity_err_cnt),
4268[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4269 0, CNTR_NORMAL,
4270 access_pcic_transmit_front_parity_err_cnt),
4271[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4272 CNTR_NORMAL,
4273 access_pcic_cpl_dat_q_unc_err_cnt),
4274[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4275 CNTR_NORMAL,
4276 access_pcic_cpl_hd_q_unc_err_cnt),
4277[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4278 CNTR_NORMAL,
4279 access_pcic_post_dat_q_unc_err_cnt),
4280[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4281 CNTR_NORMAL,
4282 access_pcic_post_hd_q_unc_err_cnt),
4283[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4284 CNTR_NORMAL,
4285 access_pcic_retry_sot_mem_unc_err_cnt),
4286[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4287 CNTR_NORMAL,
4288 access_pcic_retry_mem_unc_err),
4289[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4290 CNTR_NORMAL,
4291 access_pcic_n_post_dat_q_parity_err_cnt),
4292[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4293 CNTR_NORMAL,
4294 access_pcic_n_post_h_q_parity_err_cnt),
4295[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4296 CNTR_NORMAL,
4297 access_pcic_cpl_dat_q_cor_err_cnt),
4298[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4299 CNTR_NORMAL,
4300 access_pcic_cpl_hd_q_cor_err_cnt),
4301[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4302 CNTR_NORMAL,
4303 access_pcic_post_dat_q_cor_err_cnt),
4304[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4305 CNTR_NORMAL,
4306 access_pcic_post_hd_q_cor_err_cnt),
4307[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4308 CNTR_NORMAL,
4309 access_pcic_retry_sot_mem_cor_err_cnt),
4310[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4311 CNTR_NORMAL,
4312 access_pcic_retry_mem_cor_err_cnt),
4313[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4314 "CceCli1AsyncFifoDbgParityError", 0, 0,
4315 CNTR_NORMAL,
4316 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4317[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4318 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4319 CNTR_NORMAL,
4320 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4321 ),
4322[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4323 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4324 CNTR_NORMAL,
4325 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4326[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4327 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4328 CNTR_NORMAL,
4329 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4330[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4331 0, CNTR_NORMAL,
4332 access_cce_cli2_async_fifo_parity_err_cnt),
4333[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4334 CNTR_NORMAL,
4335 access_cce_csr_cfg_bus_parity_err_cnt),
4336[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4337 0, CNTR_NORMAL,
4338 access_cce_cli0_async_fifo_parity_err_cnt),
4339[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4340 CNTR_NORMAL,
4341 access_cce_rspd_data_parity_err_cnt),
4342[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4343 CNTR_NORMAL,
4344 access_cce_trgt_access_err_cnt),
4345[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4346 0, CNTR_NORMAL,
4347 access_cce_trgt_async_fifo_parity_err_cnt),
4348[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4349 CNTR_NORMAL,
4350 access_cce_csr_write_bad_addr_err_cnt),
4351[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4352 CNTR_NORMAL,
4353 access_cce_csr_read_bad_addr_err_cnt),
4354[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4355 CNTR_NORMAL,
4356 access_ccs_csr_parity_err_cnt),
4357
4358/* RcvErrStatus */
4359[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4360 CNTR_NORMAL,
4361 access_rx_csr_parity_err_cnt),
4362[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4363 CNTR_NORMAL,
4364 access_rx_csr_write_bad_addr_err_cnt),
4365[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4366 CNTR_NORMAL,
4367 access_rx_csr_read_bad_addr_err_cnt),
4368[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4369 CNTR_NORMAL,
4370 access_rx_dma_csr_unc_err_cnt),
4371[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4372 CNTR_NORMAL,
4373 access_rx_dma_dq_fsm_encoding_err_cnt),
4374[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4375 CNTR_NORMAL,
4376 access_rx_dma_eq_fsm_encoding_err_cnt),
4377[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4378 CNTR_NORMAL,
4379 access_rx_dma_csr_parity_err_cnt),
4380[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4381 CNTR_NORMAL,
4382 access_rx_rbuf_data_cor_err_cnt),
4383[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4384 CNTR_NORMAL,
4385 access_rx_rbuf_data_unc_err_cnt),
4386[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4387 CNTR_NORMAL,
4388 access_rx_dma_data_fifo_rd_cor_err_cnt),
4389[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4390 CNTR_NORMAL,
4391 access_rx_dma_data_fifo_rd_unc_err_cnt),
4392[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4393 CNTR_NORMAL,
4394 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4395[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4396 CNTR_NORMAL,
4397 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4398[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4399 CNTR_NORMAL,
4400 access_rx_rbuf_desc_part2_cor_err_cnt),
4401[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4402 CNTR_NORMAL,
4403 access_rx_rbuf_desc_part2_unc_err_cnt),
4404[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4405 CNTR_NORMAL,
4406 access_rx_rbuf_desc_part1_cor_err_cnt),
4407[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4408 CNTR_NORMAL,
4409 access_rx_rbuf_desc_part1_unc_err_cnt),
4410[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4411 CNTR_NORMAL,
4412 access_rx_hq_intr_fsm_err_cnt),
4413[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4414 CNTR_NORMAL,
4415 access_rx_hq_intr_csr_parity_err_cnt),
4416[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4417 CNTR_NORMAL,
4418 access_rx_lookup_csr_parity_err_cnt),
4419[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4420 CNTR_NORMAL,
4421 access_rx_lookup_rcv_array_cor_err_cnt),
4422[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4423 CNTR_NORMAL,
4424 access_rx_lookup_rcv_array_unc_err_cnt),
4425[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4426 0, CNTR_NORMAL,
4427 access_rx_lookup_des_part2_parity_err_cnt),
4428[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4429 0, CNTR_NORMAL,
4430 access_rx_lookup_des_part1_unc_cor_err_cnt),
4431[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4432 CNTR_NORMAL,
4433 access_rx_lookup_des_part1_unc_err_cnt),
4434[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4435 CNTR_NORMAL,
4436 access_rx_rbuf_next_free_buf_cor_err_cnt),
4437[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4438 CNTR_NORMAL,
4439 access_rx_rbuf_next_free_buf_unc_err_cnt),
4440[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4441 "RxRbufFlInitWrAddrParityErr", 0, 0,
4442 CNTR_NORMAL,
4443 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4444[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4445 0, CNTR_NORMAL,
4446 access_rx_rbuf_fl_initdone_parity_err_cnt),
4447[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4448 0, CNTR_NORMAL,
4449 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4450[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4451 CNTR_NORMAL,
4452 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4453[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4454 CNTR_NORMAL,
4455 access_rx_rbuf_empty_err_cnt),
4456[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4457 CNTR_NORMAL,
4458 access_rx_rbuf_full_err_cnt),
4459[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4460 CNTR_NORMAL,
4461 access_rbuf_bad_lookup_err_cnt),
4462[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4463 CNTR_NORMAL,
4464 access_rbuf_ctx_id_parity_err_cnt),
4465[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4466 CNTR_NORMAL,
4467 access_rbuf_csr_qeopdw_parity_err_cnt),
4468[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4469 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4470 CNTR_NORMAL,
4471 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4472[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4473 "RxRbufCsrQTlPtrParityErr", 0, 0,
4474 CNTR_NORMAL,
4475 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4476[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4477 0, CNTR_NORMAL,
4478 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4479[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4480 0, CNTR_NORMAL,
4481 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4482[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4483 0, 0, CNTR_NORMAL,
4484 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4485[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4486 0, CNTR_NORMAL,
4487 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4488[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4489 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4490 CNTR_NORMAL,
4491 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4492[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4493 0, CNTR_NORMAL,
4494 access_rx_rbuf_block_list_read_cor_err_cnt),
4495[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4496 0, CNTR_NORMAL,
4497 access_rx_rbuf_block_list_read_unc_err_cnt),
4498[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4499 CNTR_NORMAL,
4500 access_rx_rbuf_lookup_des_cor_err_cnt),
4501[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4502 CNTR_NORMAL,
4503 access_rx_rbuf_lookup_des_unc_err_cnt),
4504[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4505 "RxRbufLookupDesRegUncCorErr", 0, 0,
4506 CNTR_NORMAL,
4507 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4508[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4509 CNTR_NORMAL,
4510 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4511[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4512 CNTR_NORMAL,
4513 access_rx_rbuf_free_list_cor_err_cnt),
4514[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4515 CNTR_NORMAL,
4516 access_rx_rbuf_free_list_unc_err_cnt),
4517[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4518 CNTR_NORMAL,
4519 access_rx_rcv_fsm_encoding_err_cnt),
4520[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4521 CNTR_NORMAL,
4522 access_rx_dma_flag_cor_err_cnt),
4523[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4524 CNTR_NORMAL,
4525 access_rx_dma_flag_unc_err_cnt),
4526[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4527 CNTR_NORMAL,
4528 access_rx_dc_sop_eop_parity_err_cnt),
4529[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4530 CNTR_NORMAL,
4531 access_rx_rcv_csr_parity_err_cnt),
4532[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4533 CNTR_NORMAL,
4534 access_rx_rcv_qp_map_table_cor_err_cnt),
4535[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4536 CNTR_NORMAL,
4537 access_rx_rcv_qp_map_table_unc_err_cnt),
4538[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4539 CNTR_NORMAL,
4540 access_rx_rcv_data_cor_err_cnt),
4541[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4542 CNTR_NORMAL,
4543 access_rx_rcv_data_unc_err_cnt),
4544[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4545 CNTR_NORMAL,
4546 access_rx_rcv_hdr_cor_err_cnt),
4547[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4548 CNTR_NORMAL,
4549 access_rx_rcv_hdr_unc_err_cnt),
4550[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4551 CNTR_NORMAL,
4552 access_rx_dc_intf_parity_err_cnt),
4553[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4554 CNTR_NORMAL,
4555 access_rx_dma_csr_cor_err_cnt),
4556/* SendPioErrStatus */
4557[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4558 CNTR_NORMAL,
4559 access_pio_pec_sop_head_parity_err_cnt),
4560[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4561 CNTR_NORMAL,
4562 access_pio_pcc_sop_head_parity_err_cnt),
4563[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4564 0, 0, CNTR_NORMAL,
4565 access_pio_last_returned_cnt_parity_err_cnt),
4566[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4567 0, CNTR_NORMAL,
4568 access_pio_current_free_cnt_parity_err_cnt),
4569[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4570 CNTR_NORMAL,
4571 access_pio_reserved_31_err_cnt),
4572[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4573 CNTR_NORMAL,
4574 access_pio_reserved_30_err_cnt),
4575[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4576 CNTR_NORMAL,
4577 access_pio_ppmc_sop_len_err_cnt),
4578[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4579 CNTR_NORMAL,
4580 access_pio_ppmc_bqc_mem_parity_err_cnt),
4581[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4582 CNTR_NORMAL,
4583 access_pio_vl_fifo_parity_err_cnt),
4584[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4585 CNTR_NORMAL,
4586 access_pio_vlf_sop_parity_err_cnt),
4587[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4588 CNTR_NORMAL,
4589 access_pio_vlf_v1_len_parity_err_cnt),
4590[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4591 CNTR_NORMAL,
4592 access_pio_block_qw_count_parity_err_cnt),
4593[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4594 CNTR_NORMAL,
4595 access_pio_write_qw_valid_parity_err_cnt),
4596[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4597 CNTR_NORMAL,
4598 access_pio_state_machine_err_cnt),
4599[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4600 CNTR_NORMAL,
4601 access_pio_write_data_parity_err_cnt),
4602[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4603 CNTR_NORMAL,
4604 access_pio_host_addr_mem_cor_err_cnt),
4605[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4606 CNTR_NORMAL,
4607 access_pio_host_addr_mem_unc_err_cnt),
4608[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4609 CNTR_NORMAL,
4610 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4611[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4612 CNTR_NORMAL,
4613 access_pio_init_sm_in_err_cnt),
4614[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4615 CNTR_NORMAL,
4616 access_pio_ppmc_pbl_fifo_err_cnt),
4617[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4618 0, CNTR_NORMAL,
4619 access_pio_credit_ret_fifo_parity_err_cnt),
4620[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4621 CNTR_NORMAL,
4622 access_pio_v1_len_mem_bank1_cor_err_cnt),
4623[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4624 CNTR_NORMAL,
4625 access_pio_v1_len_mem_bank0_cor_err_cnt),
4626[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4627 CNTR_NORMAL,
4628 access_pio_v1_len_mem_bank1_unc_err_cnt),
4629[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4630 CNTR_NORMAL,
4631 access_pio_v1_len_mem_bank0_unc_err_cnt),
4632[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4633 CNTR_NORMAL,
4634 access_pio_sm_pkt_reset_parity_err_cnt),
4635[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4636 CNTR_NORMAL,
4637 access_pio_pkt_evict_fifo_parity_err_cnt),
4638[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4639 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4640 CNTR_NORMAL,
4641 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4642[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4643 CNTR_NORMAL,
4644 access_pio_sbrdctl_crrel_parity_err_cnt),
4645[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4646 CNTR_NORMAL,
4647 access_pio_pec_fifo_parity_err_cnt),
4648[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4649 CNTR_NORMAL,
4650 access_pio_pcc_fifo_parity_err_cnt),
4651[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4652 CNTR_NORMAL,
4653 access_pio_sb_mem_fifo1_err_cnt),
4654[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4655 CNTR_NORMAL,
4656 access_pio_sb_mem_fifo0_err_cnt),
4657[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4658 CNTR_NORMAL,
4659 access_pio_csr_parity_err_cnt),
4660[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4661 CNTR_NORMAL,
4662 access_pio_write_addr_parity_err_cnt),
4663[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4664 CNTR_NORMAL,
4665 access_pio_write_bad_ctxt_err_cnt),
4666/* SendDmaErrStatus */
4667[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4668 0, CNTR_NORMAL,
4669 access_sdma_pcie_req_tracking_cor_err_cnt),
4670[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4671 0, CNTR_NORMAL,
4672 access_sdma_pcie_req_tracking_unc_err_cnt),
4673[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4674 CNTR_NORMAL,
4675 access_sdma_csr_parity_err_cnt),
4676[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4677 CNTR_NORMAL,
4678 access_sdma_rpy_tag_err_cnt),
4679/* SendEgressErrStatus */
4680[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4681 CNTR_NORMAL,
4682 access_tx_read_pio_memory_csr_unc_err_cnt),
4683[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4684 0, CNTR_NORMAL,
4685 access_tx_read_sdma_memory_csr_err_cnt),
4686[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4687 CNTR_NORMAL,
4688 access_tx_egress_fifo_cor_err_cnt),
4689[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4690 CNTR_NORMAL,
4691 access_tx_read_pio_memory_cor_err_cnt),
4692[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4693 CNTR_NORMAL,
4694 access_tx_read_sdma_memory_cor_err_cnt),
4695[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4696 CNTR_NORMAL,
4697 access_tx_sb_hdr_cor_err_cnt),
4698[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4699 CNTR_NORMAL,
4700 access_tx_credit_overrun_err_cnt),
4701[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4702 CNTR_NORMAL,
4703 access_tx_launch_fifo8_cor_err_cnt),
4704[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4705 CNTR_NORMAL,
4706 access_tx_launch_fifo7_cor_err_cnt),
4707[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4708 CNTR_NORMAL,
4709 access_tx_launch_fifo6_cor_err_cnt),
4710[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4711 CNTR_NORMAL,
4712 access_tx_launch_fifo5_cor_err_cnt),
4713[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4714 CNTR_NORMAL,
4715 access_tx_launch_fifo4_cor_err_cnt),
4716[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4717 CNTR_NORMAL,
4718 access_tx_launch_fifo3_cor_err_cnt),
4719[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4720 CNTR_NORMAL,
4721 access_tx_launch_fifo2_cor_err_cnt),
4722[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4723 CNTR_NORMAL,
4724 access_tx_launch_fifo1_cor_err_cnt),
4725[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4726 CNTR_NORMAL,
4727 access_tx_launch_fifo0_cor_err_cnt),
4728[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4729 CNTR_NORMAL,
4730 access_tx_credit_return_vl_err_cnt),
4731[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4732 CNTR_NORMAL,
4733 access_tx_hcrc_insertion_err_cnt),
4734[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4735 CNTR_NORMAL,
4736 access_tx_egress_fifo_unc_err_cnt),
4737[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4738 CNTR_NORMAL,
4739 access_tx_read_pio_memory_unc_err_cnt),
4740[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4741 CNTR_NORMAL,
4742 access_tx_read_sdma_memory_unc_err_cnt),
4743[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4744 CNTR_NORMAL,
4745 access_tx_sb_hdr_unc_err_cnt),
4746[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4747 CNTR_NORMAL,
4748 access_tx_credit_return_partiy_err_cnt),
4749[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4750 0, 0, CNTR_NORMAL,
4751 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4752[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4753 0, 0, CNTR_NORMAL,
4754 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4755[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4756 0, 0, CNTR_NORMAL,
4757 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4758[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4759 0, 0, CNTR_NORMAL,
4760 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4761[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4762 0, 0, CNTR_NORMAL,
4763 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4764[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4765 0, 0, CNTR_NORMAL,
4766 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4767[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4768 0, 0, CNTR_NORMAL,
4769 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4770[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4771 0, 0, CNTR_NORMAL,
4772 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4773[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4774 0, 0, CNTR_NORMAL,
4775 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4776[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4777 0, 0, CNTR_NORMAL,
4778 access_tx_sdma15_disallowed_packet_err_cnt),
4779[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4780 0, 0, CNTR_NORMAL,
4781 access_tx_sdma14_disallowed_packet_err_cnt),
4782[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4783 0, 0, CNTR_NORMAL,
4784 access_tx_sdma13_disallowed_packet_err_cnt),
4785[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4786 0, 0, CNTR_NORMAL,
4787 access_tx_sdma12_disallowed_packet_err_cnt),
4788[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4789 0, 0, CNTR_NORMAL,
4790 access_tx_sdma11_disallowed_packet_err_cnt),
4791[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4792 0, 0, CNTR_NORMAL,
4793 access_tx_sdma10_disallowed_packet_err_cnt),
4794[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4795 0, 0, CNTR_NORMAL,
4796 access_tx_sdma9_disallowed_packet_err_cnt),
4797[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4798 0, 0, CNTR_NORMAL,
4799 access_tx_sdma8_disallowed_packet_err_cnt),
4800[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4801 0, 0, CNTR_NORMAL,
4802 access_tx_sdma7_disallowed_packet_err_cnt),
4803[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4804 0, 0, CNTR_NORMAL,
4805 access_tx_sdma6_disallowed_packet_err_cnt),
4806[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4807 0, 0, CNTR_NORMAL,
4808 access_tx_sdma5_disallowed_packet_err_cnt),
4809[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4810 0, 0, CNTR_NORMAL,
4811 access_tx_sdma4_disallowed_packet_err_cnt),
4812[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4813 0, 0, CNTR_NORMAL,
4814 access_tx_sdma3_disallowed_packet_err_cnt),
4815[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4816 0, 0, CNTR_NORMAL,
4817 access_tx_sdma2_disallowed_packet_err_cnt),
4818[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4819 0, 0, CNTR_NORMAL,
4820 access_tx_sdma1_disallowed_packet_err_cnt),
4821[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4822 0, 0, CNTR_NORMAL,
4823 access_tx_sdma0_disallowed_packet_err_cnt),
4824[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4825 CNTR_NORMAL,
4826 access_tx_config_parity_err_cnt),
4827[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4828 CNTR_NORMAL,
4829 access_tx_sbrd_ctl_csr_parity_err_cnt),
4830[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4831 CNTR_NORMAL,
4832 access_tx_launch_csr_parity_err_cnt),
4833[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4834 CNTR_NORMAL,
4835 access_tx_illegal_vl_err_cnt),
4836[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4837 "TxSbrdCtlStateMachineParityErr", 0, 0,
4838 CNTR_NORMAL,
4839 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4840[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4841 CNTR_NORMAL,
4842 access_egress_reserved_10_err_cnt),
4843[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4844 CNTR_NORMAL,
4845 access_egress_reserved_9_err_cnt),
4846[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4847 0, 0, CNTR_NORMAL,
4848 access_tx_sdma_launch_intf_parity_err_cnt),
4849[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4850 CNTR_NORMAL,
4851 access_tx_pio_launch_intf_parity_err_cnt),
4852[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4853 CNTR_NORMAL,
4854 access_egress_reserved_6_err_cnt),
4855[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4856 CNTR_NORMAL,
4857 access_tx_incorrect_link_state_err_cnt),
4858[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4859 CNTR_NORMAL,
4860 access_tx_linkdown_err_cnt),
4861[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4862 "EgressFifoUnderrunOrParityErr", 0, 0,
4863 CNTR_NORMAL,
4864 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4865[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4866 CNTR_NORMAL,
4867 access_egress_reserved_2_err_cnt),
4868[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4869 CNTR_NORMAL,
4870 access_tx_pkt_integrity_mem_unc_err_cnt),
4871[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4872 CNTR_NORMAL,
4873 access_tx_pkt_integrity_mem_cor_err_cnt),
4874/* SendErrStatus */
4875[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4876 CNTR_NORMAL,
4877 access_send_csr_write_bad_addr_err_cnt),
4878[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4879 CNTR_NORMAL,
4880 access_send_csr_read_bad_addr_err_cnt),
4881[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4882 CNTR_NORMAL,
4883 access_send_csr_parity_cnt),
4884/* SendCtxtErrStatus */
4885[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4886 CNTR_NORMAL,
4887 access_pio_write_out_of_bounds_err_cnt),
4888[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4889 CNTR_NORMAL,
4890 access_pio_write_overflow_err_cnt),
4891[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4892 0, 0, CNTR_NORMAL,
4893 access_pio_write_crosses_boundary_err_cnt),
4894[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4895 CNTR_NORMAL,
4896 access_pio_disallowed_packet_err_cnt),
4897[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4898 CNTR_NORMAL,
4899 access_pio_inconsistent_sop_err_cnt),
4900/* SendDmaEngErrStatus */
4901[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4902 0, 0, CNTR_NORMAL,
4903 access_sdma_header_request_fifo_cor_err_cnt),
4904[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4905 CNTR_NORMAL,
4906 access_sdma_header_storage_cor_err_cnt),
4907[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4908 CNTR_NORMAL,
4909 access_sdma_packet_tracking_cor_err_cnt),
4910[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4911 CNTR_NORMAL,
4912 access_sdma_assembly_cor_err_cnt),
4913[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4914 CNTR_NORMAL,
4915 access_sdma_desc_table_cor_err_cnt),
4916[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4917 0, 0, CNTR_NORMAL,
4918 access_sdma_header_request_fifo_unc_err_cnt),
4919[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4920 CNTR_NORMAL,
4921 access_sdma_header_storage_unc_err_cnt),
4922[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4923 CNTR_NORMAL,
4924 access_sdma_packet_tracking_unc_err_cnt),
4925[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4926 CNTR_NORMAL,
4927 access_sdma_assembly_unc_err_cnt),
4928[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4929 CNTR_NORMAL,
4930 access_sdma_desc_table_unc_err_cnt),
4931[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4932 CNTR_NORMAL,
4933 access_sdma_timeout_err_cnt),
4934[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4935 CNTR_NORMAL,
4936 access_sdma_header_length_err_cnt),
4937[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4938 CNTR_NORMAL,
4939 access_sdma_header_address_err_cnt),
4940[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4941 CNTR_NORMAL,
4942 access_sdma_header_select_err_cnt),
4943[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4944 CNTR_NORMAL,
4945 access_sdma_reserved_9_err_cnt),
4946[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4947 CNTR_NORMAL,
4948 access_sdma_packet_desc_overflow_err_cnt),
4949[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4950 CNTR_NORMAL,
4951 access_sdma_length_mismatch_err_cnt),
4952[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4953 CNTR_NORMAL,
4954 access_sdma_halt_err_cnt),
4955[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4956 CNTR_NORMAL,
4957 access_sdma_mem_read_err_cnt),
4958[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4959 CNTR_NORMAL,
4960 access_sdma_first_desc_err_cnt),
4961[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4962 CNTR_NORMAL,
4963 access_sdma_tail_out_of_bounds_err_cnt),
4964[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4965 CNTR_NORMAL,
4966 access_sdma_too_long_err_cnt),
4967[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4968 CNTR_NORMAL,
4969 access_sdma_gen_mismatch_err_cnt),
4970[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4971 CNTR_NORMAL,
4972 access_sdma_wrong_dw_err_cnt),
77241056
MM
4973};
4974
4975static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4976[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4977 CNTR_NORMAL),
4978[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4979 CNTR_NORMAL),
4980[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4981 CNTR_NORMAL),
4982[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4983 CNTR_NORMAL),
4984[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4985 CNTR_NORMAL),
4986[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4987 CNTR_NORMAL),
4988[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4989 CNTR_NORMAL),
4990[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4991[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4992[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4993[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
17fb4f29 4994 CNTR_SYNTH | CNTR_VL),
77241056 4995[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
17fb4f29 4996 CNTR_SYNTH | CNTR_VL),
77241056 4997[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
17fb4f29 4998 CNTR_SYNTH | CNTR_VL),
77241056
MM
4999[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5000[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5001[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
17fb4f29 5002 access_sw_link_dn_cnt),
77241056 5003[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
17fb4f29 5004 access_sw_link_up_cnt),
6d014530
DL
5005[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5006 access_sw_unknown_frame_cnt),
77241056 5007[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
17fb4f29 5008 access_sw_xmit_discards),
77241056 5009[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
17fb4f29
JJ
5010 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5011 access_sw_xmit_discards),
77241056 5012[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
17fb4f29 5013 access_xmit_constraint_errs),
77241056 5014[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
17fb4f29 5015 access_rcv_constraint_errs),
77241056
MM
5016[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5017[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5018[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5019[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5020[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5021[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5022[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5023[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5024[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5025[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5026[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5027[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5028[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5029 access_sw_cpu_rc_acks),
5030[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
17fb4f29 5031 access_sw_cpu_rc_qacks),
77241056 5032[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
17fb4f29 5033 access_sw_cpu_rc_delayed_comp),
77241056
MM
5034[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5035[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5036[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5037[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5038[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5039[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5040[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5041[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5042[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5043[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5044[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5045[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5046[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5047[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5048[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5049[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5050[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5051[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5052[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5053[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5054[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5055[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5056[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5057[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5058[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5059[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5060[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5061[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5062[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5063[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5064[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5065[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5066[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5067[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5068[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5069[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5070[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5071[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5072[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5073[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5074[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5075[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5076[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5077[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5078[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5079[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5080[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5081[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5082[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5083[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5084[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5085[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5086[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5087[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5088[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5089[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5090[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5091[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5092[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5093[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5094[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5095[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5096[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5097[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5098[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5099[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5100[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5101[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5102[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5103[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5104[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5105[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5106[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5107[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5108[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5109[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5110[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5111[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5112[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5113[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5114};
5115
5116/* ======================================================================== */
5117
77241056
MM
5118/* return true if this is chip revision revision a */
5119int is_ax(struct hfi1_devdata *dd)
5120{
5121 u8 chip_rev_minor =
5122 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5123 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5124 return (chip_rev_minor & 0xf0) == 0;
5125}
5126
5127/* return true if this is chip revision revision b */
5128int is_bx(struct hfi1_devdata *dd)
5129{
5130 u8 chip_rev_minor =
5131 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5132 & CCE_REVISION_CHIP_REV_MINOR_MASK;
995deafa 5133 return (chip_rev_minor & 0xF0) == 0x10;
77241056
MM
5134}
5135
5136/*
5137 * Append string s to buffer buf. Arguments curp and len are the current
5138 * position and remaining length, respectively.
5139 *
5140 * return 0 on success, 1 on out of room
5141 */
5142static int append_str(char *buf, char **curp, int *lenp, const char *s)
5143{
5144 char *p = *curp;
5145 int len = *lenp;
5146 int result = 0; /* success */
5147 char c;
5148
5149 /* add a comma, if first in the buffer */
5150 if (p != buf) {
5151 if (len == 0) {
5152 result = 1; /* out of room */
5153 goto done;
5154 }
5155 *p++ = ',';
5156 len--;
5157 }
5158
5159 /* copy the string */
5160 while ((c = *s++) != 0) {
5161 if (len == 0) {
5162 result = 1; /* out of room */
5163 goto done;
5164 }
5165 *p++ = c;
5166 len--;
5167 }
5168
5169done:
5170 /* write return values */
5171 *curp = p;
5172 *lenp = len;
5173
5174 return result;
5175}
5176
5177/*
5178 * Using the given flag table, print a comma separated string into
5179 * the buffer. End in '*' if the buffer is too short.
5180 */
5181static char *flag_string(char *buf, int buf_len, u64 flags,
17fb4f29 5182 struct flag_table *table, int table_size)
77241056
MM
5183{
5184 char extra[32];
5185 char *p = buf;
5186 int len = buf_len;
5187 int no_room = 0;
5188 int i;
5189
5190 /* make sure there is at least 2 so we can form "*" */
5191 if (len < 2)
5192 return "";
5193
5194 len--; /* leave room for a nul */
5195 for (i = 0; i < table_size; i++) {
5196 if (flags & table[i].flag) {
5197 no_room = append_str(buf, &p, &len, table[i].str);
5198 if (no_room)
5199 break;
5200 flags &= ~table[i].flag;
5201 }
5202 }
5203
5204 /* any undocumented bits left? */
5205 if (!no_room && flags) {
5206 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5207 no_room = append_str(buf, &p, &len, extra);
5208 }
5209
5210 /* add * if ran out of room */
5211 if (no_room) {
5212 /* may need to back up to add space for a '*' */
5213 if (len == 0)
5214 --p;
5215 *p++ = '*';
5216 }
5217
5218 /* add final nul - space already allocated above */
5219 *p = 0;
5220 return buf;
5221}
5222
5223/* first 8 CCE error interrupt source names */
5224static const char * const cce_misc_names[] = {
5225 "CceErrInt", /* 0 */
5226 "RxeErrInt", /* 1 */
5227 "MiscErrInt", /* 2 */
5228 "Reserved3", /* 3 */
5229 "PioErrInt", /* 4 */
5230 "SDmaErrInt", /* 5 */
5231 "EgressErrInt", /* 6 */
5232 "TxeErrInt" /* 7 */
5233};
5234
5235/*
5236 * Return the miscellaneous error interrupt name.
5237 */
5238static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5239{
5240 if (source < ARRAY_SIZE(cce_misc_names))
5241 strncpy(buf, cce_misc_names[source], bsize);
5242 else
17fb4f29
JJ
5243 snprintf(buf, bsize, "Reserved%u",
5244 source + IS_GENERAL_ERR_START);
77241056
MM
5245
5246 return buf;
5247}
5248
5249/*
5250 * Return the SDMA engine error interrupt name.
5251 */
5252static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5253{
5254 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5255 return buf;
5256}
5257
5258/*
5259 * Return the send context error interrupt name.
5260 */
5261static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5262{
5263 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5264 return buf;
5265}
5266
5267static const char * const various_names[] = {
5268 "PbcInt",
5269 "GpioAssertInt",
5270 "Qsfp1Int",
5271 "Qsfp2Int",
5272 "TCritInt"
5273};
5274
5275/*
5276 * Return the various interrupt name.
5277 */
5278static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5279{
5280 if (source < ARRAY_SIZE(various_names))
5281 strncpy(buf, various_names[source], bsize);
5282 else
8638b77f 5283 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
77241056
MM
5284 return buf;
5285}
5286
5287/*
5288 * Return the DC interrupt name.
5289 */
5290static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5291{
5292 static const char * const dc_int_names[] = {
5293 "common",
5294 "lcb",
5295 "8051",
5296 "lbm" /* local block merge */
5297 };
5298
5299 if (source < ARRAY_SIZE(dc_int_names))
5300 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5301 else
5302 snprintf(buf, bsize, "DCInt%u", source);
5303 return buf;
5304}
5305
5306static const char * const sdma_int_names[] = {
5307 "SDmaInt",
5308 "SdmaIdleInt",
5309 "SdmaProgressInt",
5310};
5311
5312/*
5313 * Return the SDMA engine interrupt name.
5314 */
5315static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5316{
5317 /* what interrupt */
5318 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5319 /* which engine */
5320 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5321
5322 if (likely(what < 3))
5323 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5324 else
5325 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5326 return buf;
5327}
5328
5329/*
5330 * Return the receive available interrupt name.
5331 */
5332static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5333{
5334 snprintf(buf, bsize, "RcvAvailInt%u", source);
5335 return buf;
5336}
5337
5338/*
5339 * Return the receive urgent interrupt name.
5340 */
5341static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5342{
5343 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5344 return buf;
5345}
5346
5347/*
5348 * Return the send credit interrupt name.
5349 */
5350static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5351{
5352 snprintf(buf, bsize, "SendCreditInt%u", source);
5353 return buf;
5354}
5355
5356/*
5357 * Return the reserved interrupt name.
5358 */
5359static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5360{
5361 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5362 return buf;
5363}
5364
5365static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5366{
5367 return flag_string(buf, buf_len, flags,
17fb4f29
JJ
5368 cce_err_status_flags,
5369 ARRAY_SIZE(cce_err_status_flags));
77241056
MM
5370}
5371
5372static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5373{
5374 return flag_string(buf, buf_len, flags,
17fb4f29
JJ
5375 rxe_err_status_flags,
5376 ARRAY_SIZE(rxe_err_status_flags));
77241056
MM
5377}
5378
5379static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5380{
5381 return flag_string(buf, buf_len, flags, misc_err_status_flags,
17fb4f29 5382 ARRAY_SIZE(misc_err_status_flags));
77241056
MM
5383}
5384
5385static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5386{
5387 return flag_string(buf, buf_len, flags,
17fb4f29
JJ
5388 pio_err_status_flags,
5389 ARRAY_SIZE(pio_err_status_flags));
77241056
MM
5390}
5391
5392static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5393{
5394 return flag_string(buf, buf_len, flags,
17fb4f29
JJ
5395 sdma_err_status_flags,
5396 ARRAY_SIZE(sdma_err_status_flags));
77241056
MM
5397}
5398
5399static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5400{
5401 return flag_string(buf, buf_len, flags,
17fb4f29
JJ
5402 egress_err_status_flags,
5403 ARRAY_SIZE(egress_err_status_flags));
77241056
MM
5404}
5405
5406static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5407{
5408 return flag_string(buf, buf_len, flags,
17fb4f29
JJ
5409 egress_err_info_flags,
5410 ARRAY_SIZE(egress_err_info_flags));
77241056
MM
5411}
5412
5413static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5414{
5415 return flag_string(buf, buf_len, flags,
17fb4f29
JJ
5416 send_err_status_flags,
5417 ARRAY_SIZE(send_err_status_flags));
77241056
MM
5418}
5419
5420static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5421{
5422 char buf[96];
2c5b521a 5423 int i = 0;
77241056
MM
5424
5425 /*
5426 * For most these errors, there is nothing that can be done except
5427 * report or record it.
5428 */
5429 dd_dev_info(dd, "CCE Error: %s\n",
17fb4f29 5430 cce_err_status_string(buf, sizeof(buf), reg));
77241056 5431
995deafa
MM
5432 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5433 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
77241056
MM
5434 /* this error requires a manual drop into SPC freeze mode */
5435 /* then a fix up */
5436 start_freeze_handling(dd->pport, FREEZE_SELF);
5437 }
2c5b521a
JR
5438
5439 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5440 if (reg & (1ull << i)) {
5441 incr_cntr64(&dd->cce_err_status_cnt[i]);
5442 /* maintain a counter over all cce_err_status errors */
5443 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5444 }
5445 }
77241056
MM
5446}
5447
5448/*
5449 * Check counters for receive errors that do not have an interrupt
5450 * associated with them.
5451 */
5452#define RCVERR_CHECK_TIME 10
5453static void update_rcverr_timer(unsigned long opaque)
5454{
5455 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5456 struct hfi1_pportdata *ppd = dd->pport;
5457 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5458
5459 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
17fb4f29 5460 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
77241056 5461 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
17fb4f29
JJ
5462 set_link_down_reason(
5463 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5464 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
77241056
MM
5465 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5466 }
50e5dcbe 5467 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
77241056
MM
5468
5469 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5470}
5471
5472static int init_rcverr(struct hfi1_devdata *dd)
5473{
24523a94 5474 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
77241056
MM
5475 /* Assume the hardware counter has been reset */
5476 dd->rcv_ovfl_cnt = 0;
5477 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5478}
5479
5480static void free_rcverr(struct hfi1_devdata *dd)
5481{
5482 if (dd->rcverr_timer.data)
5483 del_timer_sync(&dd->rcverr_timer);
5484 dd->rcverr_timer.data = 0;
5485}
5486
5487static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5488{
5489 char buf[96];
2c5b521a 5490 int i = 0;
77241056
MM
5491
5492 dd_dev_info(dd, "Receive Error: %s\n",
17fb4f29 5493 rxe_err_status_string(buf, sizeof(buf), reg));
77241056
MM
5494
5495 if (reg & ALL_RXE_FREEZE_ERR) {
5496 int flags = 0;
5497
5498 /*
5499 * Freeze mode recovery is disabled for the errors
5500 * in RXE_FREEZE_ABORT_MASK
5501 */
995deafa 5502 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
77241056
MM
5503 flags = FREEZE_ABORT;
5504
5505 start_freeze_handling(dd->pport, flags);
5506 }
2c5b521a
JR
5507
5508 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5509 if (reg & (1ull << i))
5510 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5511 }
77241056
MM
5512}
5513
5514static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5515{
5516 char buf[96];
2c5b521a 5517 int i = 0;
77241056
MM
5518
5519 dd_dev_info(dd, "Misc Error: %s",
17fb4f29 5520 misc_err_status_string(buf, sizeof(buf), reg));
2c5b521a
JR
5521 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5522 if (reg & (1ull << i))
5523 incr_cntr64(&dd->misc_err_status_cnt[i]);
5524 }
77241056
MM
5525}
5526
5527static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5528{
5529 char buf[96];
2c5b521a 5530 int i = 0;
77241056
MM
5531
5532 dd_dev_info(dd, "PIO Error: %s\n",
17fb4f29 5533 pio_err_status_string(buf, sizeof(buf), reg));
77241056
MM
5534
5535 if (reg & ALL_PIO_FREEZE_ERR)
5536 start_freeze_handling(dd->pport, 0);
2c5b521a
JR
5537
5538 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5539 if (reg & (1ull << i))
5540 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5541 }
77241056
MM
5542}
5543
5544static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5545{
5546 char buf[96];
2c5b521a 5547 int i = 0;
77241056
MM
5548
5549 dd_dev_info(dd, "SDMA Error: %s\n",
17fb4f29 5550 sdma_err_status_string(buf, sizeof(buf), reg));
77241056
MM
5551
5552 if (reg & ALL_SDMA_FREEZE_ERR)
5553 start_freeze_handling(dd->pport, 0);
2c5b521a
JR
5554
5555 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5556 if (reg & (1ull << i))
5557 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5558 }
77241056
MM
5559}
5560
69a00b8e 5561static inline void __count_port_discards(struct hfi1_pportdata *ppd)
77241056 5562{
69a00b8e
MM
5563 incr_cntr64(&ppd->port_xmit_discards);
5564}
77241056 5565
69a00b8e
MM
5566static void count_port_inactive(struct hfi1_devdata *dd)
5567{
5568 __count_port_discards(dd->pport);
77241056
MM
5569}
5570
5571/*
5572 * We have had a "disallowed packet" error during egress. Determine the
5573 * integrity check which failed, and update relevant error counter, etc.
5574 *
5575 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5576 * bit of state per integrity check, and so we can miss the reason for an
5577 * egress error if more than one packet fails the same integrity check
5578 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5579 */
69a00b8e
MM
5580static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5581 int vl)
77241056
MM
5582{
5583 struct hfi1_pportdata *ppd = dd->pport;
5584 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5585 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5586 char buf[96];
5587
5588 /* clear down all observed info as quickly as possible after read */
5589 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5590
5591 dd_dev_info(dd,
17fb4f29
JJ
5592 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5593 info, egress_err_info_string(buf, sizeof(buf), info), src);
77241056
MM
5594
5595 /* Eventually add other counters for each bit */
69a00b8e
MM
5596 if (info & PORT_DISCARD_EGRESS_ERRS) {
5597 int weight, i;
77241056 5598
69a00b8e 5599 /*
4c9e7aac
DL
5600 * Count all applicable bits as individual errors and
5601 * attribute them to the packet that triggered this handler.
5602 * This may not be completely accurate due to limitations
5603 * on the available hardware error information. There is
5604 * a single information register and any number of error
5605 * packets may have occurred and contributed to it before
5606 * this routine is called. This means that:
5607 * a) If multiple packets with the same error occur before
5608 * this routine is called, earlier packets are missed.
5609 * There is only a single bit for each error type.
5610 * b) Errors may not be attributed to the correct VL.
5611 * The driver is attributing all bits in the info register
5612 * to the packet that triggered this call, but bits
5613 * could be an accumulation of different packets with
5614 * different VLs.
5615 * c) A single error packet may have multiple counts attached
5616 * to it. There is no way for the driver to know if
5617 * multiple bits set in the info register are due to a
5618 * single packet or multiple packets. The driver assumes
5619 * multiple packets.
69a00b8e 5620 */
4c9e7aac 5621 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
69a00b8e
MM
5622 for (i = 0; i < weight; i++) {
5623 __count_port_discards(ppd);
5624 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5625 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5626 else if (vl == 15)
5627 incr_cntr64(&ppd->port_xmit_discards_vl
5628 [C_VL_15]);
5629 }
77241056
MM
5630 }
5631}
5632
5633/*
5634 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5635 * register. Does it represent a 'port inactive' error?
5636 */
5637static inline int port_inactive_err(u64 posn)
5638{
5639 return (posn >= SEES(TX_LINKDOWN) &&
5640 posn <= SEES(TX_INCORRECT_LINK_STATE));
5641}
5642
5643/*
5644 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5645 * register. Does it represent a 'disallowed packet' error?
5646 */
69a00b8e 5647static inline int disallowed_pkt_err(int posn)
77241056
MM
5648{
5649 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5650 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5651}
5652
69a00b8e
MM
5653/*
5654 * Input value is a bit position of one of the SDMA engine disallowed
5655 * packet errors. Return which engine. Use of this must be guarded by
5656 * disallowed_pkt_err().
5657 */
5658static inline int disallowed_pkt_engine(int posn)
5659{
5660 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5661}
5662
5663/*
5664 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5665 * be done.
5666 */
5667static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5668{
5669 struct sdma_vl_map *m;
5670 int vl;
5671
5672 /* range check */
5673 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5674 return -1;
5675
5676 rcu_read_lock();
5677 m = rcu_dereference(dd->sdma_map);
5678 vl = m->engine_to_vl[engine];
5679 rcu_read_unlock();
5680
5681 return vl;
5682}
5683
5684/*
5685 * Translate the send context (sofware index) into a VL. Return -1 if the
5686 * translation cannot be done.
5687 */
5688static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5689{
5690 struct send_context_info *sci;
5691 struct send_context *sc;
5692 int i;
5693
5694 sci = &dd->send_contexts[sw_index];
5695
5696 /* there is no information for user (PSM) and ack contexts */
44306f15 5697 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
69a00b8e
MM
5698 return -1;
5699
5700 sc = sci->sc;
5701 if (!sc)
5702 return -1;
5703 if (dd->vld[15].sc == sc)
5704 return 15;
5705 for (i = 0; i < num_vls; i++)
5706 if (dd->vld[i].sc == sc)
5707 return i;
5708
5709 return -1;
5710}
5711
77241056
MM
5712static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5713{
5714 u64 reg_copy = reg, handled = 0;
5715 char buf[96];
2c5b521a 5716 int i = 0;
77241056
MM
5717
5718 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5719 start_freeze_handling(dd->pport, 0);
69a00b8e
MM
5720 else if (is_ax(dd) &&
5721 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5722 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
77241056
MM
5723 start_freeze_handling(dd->pport, 0);
5724
5725 while (reg_copy) {
5726 int posn = fls64(reg_copy);
69a00b8e 5727 /* fls64() returns a 1-based offset, we want it zero based */
77241056 5728 int shift = posn - 1;
69a00b8e 5729 u64 mask = 1ULL << shift;
77241056
MM
5730
5731 if (port_inactive_err(shift)) {
5732 count_port_inactive(dd);
69a00b8e 5733 handled |= mask;
77241056 5734 } else if (disallowed_pkt_err(shift)) {
69a00b8e
MM
5735 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5736
5737 handle_send_egress_err_info(dd, vl);
5738 handled |= mask;
77241056 5739 }
69a00b8e 5740 reg_copy &= ~mask;
77241056
MM
5741 }
5742
5743 reg &= ~handled;
5744
5745 if (reg)
5746 dd_dev_info(dd, "Egress Error: %s\n",
17fb4f29 5747 egress_err_status_string(buf, sizeof(buf), reg));
2c5b521a
JR
5748
5749 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5750 if (reg & (1ull << i))
5751 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5752 }
77241056
MM
5753}
5754
5755static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5756{
5757 char buf[96];
2c5b521a 5758 int i = 0;
77241056
MM
5759
5760 dd_dev_info(dd, "Send Error: %s\n",
17fb4f29 5761 send_err_status_string(buf, sizeof(buf), reg));
77241056 5762
2c5b521a
JR
5763 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5764 if (reg & (1ull << i))
5765 incr_cntr64(&dd->send_err_status_cnt[i]);
5766 }
77241056
MM
5767}
5768
5769/*
5770 * The maximum number of times the error clear down will loop before
5771 * blocking a repeating error. This value is arbitrary.
5772 */
5773#define MAX_CLEAR_COUNT 20
5774
5775/*
5776 * Clear and handle an error register. All error interrupts are funneled
5777 * through here to have a central location to correctly handle single-
5778 * or multi-shot errors.
5779 *
5780 * For non per-context registers, call this routine with a context value
5781 * of 0 so the per-context offset is zero.
5782 *
5783 * If the handler loops too many times, assume that something is wrong
5784 * and can't be fixed, so mask the error bits.
5785 */
5786static void interrupt_clear_down(struct hfi1_devdata *dd,
5787 u32 context,
5788 const struct err_reg_info *eri)
5789{
5790 u64 reg;
5791 u32 count;
5792
5793 /* read in a loop until no more errors are seen */
5794 count = 0;
5795 while (1) {
5796 reg = read_kctxt_csr(dd, context, eri->status);
5797 if (reg == 0)
5798 break;
5799 write_kctxt_csr(dd, context, eri->clear, reg);
5800 if (likely(eri->handler))
5801 eri->handler(dd, context, reg);
5802 count++;
5803 if (count > MAX_CLEAR_COUNT) {
5804 u64 mask;
5805
5806 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
17fb4f29 5807 eri->desc, reg);
77241056
MM
5808 /*
5809 * Read-modify-write so any other masked bits
5810 * remain masked.
5811 */
5812 mask = read_kctxt_csr(dd, context, eri->mask);
5813 mask &= ~reg;
5814 write_kctxt_csr(dd, context, eri->mask, mask);
5815 break;
5816 }
5817 }
5818}
5819
5820/*
5821 * CCE block "misc" interrupt. Source is < 16.
5822 */
5823static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5824{
5825 const struct err_reg_info *eri = &misc_errs[source];
5826
5827 if (eri->handler) {
5828 interrupt_clear_down(dd, 0, eri);
5829 } else {
5830 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
17fb4f29 5831 source);
77241056
MM
5832 }
5833}
5834
5835static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5836{
5837 return flag_string(buf, buf_len, flags,
17fb4f29
JJ
5838 sc_err_status_flags,
5839 ARRAY_SIZE(sc_err_status_flags));
77241056
MM
5840}
5841
5842/*
5843 * Send context error interrupt. Source (hw_context) is < 160.
5844 *
5845 * All send context errors cause the send context to halt. The normal
5846 * clear-down mechanism cannot be used because we cannot clear the
5847 * error bits until several other long-running items are done first.
5848 * This is OK because with the context halted, nothing else is going
5849 * to happen on it anyway.
5850 */
5851static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5852 unsigned int hw_context)
5853{
5854 struct send_context_info *sci;
5855 struct send_context *sc;
5856 char flags[96];
5857 u64 status;
5858 u32 sw_index;
2c5b521a 5859 int i = 0;
77241056
MM
5860
5861 sw_index = dd->hw_to_sw[hw_context];
5862 if (sw_index >= dd->num_send_contexts) {
5863 dd_dev_err(dd,
17fb4f29
JJ
5864 "out of range sw index %u for send context %u\n",
5865 sw_index, hw_context);
77241056
MM
5866 return;
5867 }
5868 sci = &dd->send_contexts[sw_index];
5869 sc = sci->sc;
5870 if (!sc) {
5871 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
17fb4f29 5872 sw_index, hw_context);
77241056
MM
5873 return;
5874 }
5875
5876 /* tell the software that a halt has begun */
5877 sc_stop(sc, SCF_HALTED);
5878
5879 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5880
5881 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
17fb4f29
JJ
5882 send_context_err_status_string(flags, sizeof(flags),
5883 status));
77241056
MM
5884
5885 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
69a00b8e 5886 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
77241056
MM
5887
5888 /*
5889 * Automatically restart halted kernel contexts out of interrupt
5890 * context. User contexts must ask the driver to restart the context.
5891 */
5892 if (sc->type != SC_USER)
5893 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
2c5b521a
JR
5894
5895 /*
5896 * Update the counters for the corresponding status bits.
5897 * Note that these particular counters are aggregated over all
5898 * 160 contexts.
5899 */
5900 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5901 if (status & (1ull << i))
5902 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5903 }
77241056
MM
5904}
5905
5906static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5907 unsigned int source, u64 status)
5908{
5909 struct sdma_engine *sde;
2c5b521a 5910 int i = 0;
77241056
MM
5911
5912 sde = &dd->per_sdma[source];
5913#ifdef CONFIG_SDMA_VERBOSITY
5914 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5915 slashstrip(__FILE__), __LINE__, __func__);
5916 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5917 sde->this_idx, source, (unsigned long long)status);
5918#endif
a699c6c2 5919 sde->err_cnt++;
77241056 5920 sdma_engine_error(sde, status);
2c5b521a
JR
5921
5922 /*
5923 * Update the counters for the corresponding status bits.
5924 * Note that these particular counters are aggregated over
5925 * all 16 DMA engines.
5926 */
5927 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5928 if (status & (1ull << i))
5929 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5930 }
77241056
MM
5931}
5932
5933/*
5934 * CCE block SDMA error interrupt. Source is < 16.
5935 */
5936static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5937{
5938#ifdef CONFIG_SDMA_VERBOSITY
5939 struct sdma_engine *sde = &dd->per_sdma[source];
5940
5941 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5942 slashstrip(__FILE__), __LINE__, __func__);
5943 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5944 source);
5945 sdma_dumpstate(sde);
5946#endif
5947 interrupt_clear_down(dd, source, &sdma_eng_err);
5948}
5949
5950/*
5951 * CCE block "various" interrupt. Source is < 8.
5952 */
5953static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5954{
5955 const struct err_reg_info *eri = &various_err[source];
5956
5957 /*
5958 * TCritInt cannot go through interrupt_clear_down()
5959 * because it is not a second tier interrupt. The handler
5960 * should be called directly.
5961 */
5962 if (source == TCRIT_INT_SOURCE)
5963 handle_temp_err(dd);
5964 else if (eri->handler)
5965 interrupt_clear_down(dd, 0, eri);
5966 else
5967 dd_dev_info(dd,
17fb4f29
JJ
5968 "%s: Unimplemented/reserved interrupt %d\n",
5969 __func__, source);
77241056
MM
5970}
5971
5972static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5973{
8ebd4cf1 5974 /* src_ctx is always zero */
77241056
MM
5975 struct hfi1_pportdata *ppd = dd->pport;
5976 unsigned long flags;
5977 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5978
5979 if (reg & QSFP_HFI0_MODPRST_N) {
77241056 5980 if (!qsfp_mod_present(ppd)) {
e8aa284b
EH
5981 dd_dev_info(dd, "%s: QSFP module removed\n",
5982 __func__);
5983
77241056
MM
5984 ppd->driver_link_ready = 0;
5985 /*
5986 * Cable removed, reset all our information about the
5987 * cache and cable capabilities
5988 */
5989
5990 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5991 /*
5992 * We don't set cache_refresh_required here as we expect
5993 * an interrupt when a cable is inserted
5994 */
5995 ppd->qsfp_info.cache_valid = 0;
8ebd4cf1
EH
5996 ppd->qsfp_info.reset_needed = 0;
5997 ppd->qsfp_info.limiting_active = 0;
77241056 5998 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
17fb4f29 5999 flags);
8ebd4cf1
EH
6000 /* Invert the ModPresent pin now to detect plug-in */
6001 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6002 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
a9c05e35
BM
6003
6004 if ((ppd->offline_disabled_reason >
6005 HFI1_ODR_MASK(
e1bf0d5e 6006 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
a9c05e35
BM
6007 (ppd->offline_disabled_reason ==
6008 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6009 ppd->offline_disabled_reason =
6010 HFI1_ODR_MASK(
e1bf0d5e 6011 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
a9c05e35 6012
77241056
MM
6013 if (ppd->host_link_state == HLS_DN_POLL) {
6014 /*
6015 * The link is still in POLL. This means
6016 * that the normal link down processing
6017 * will not happen. We have to do it here
6018 * before turning the DC off.
6019 */
6020 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
6021 }
6022 } else {
e8aa284b
EH
6023 dd_dev_info(dd, "%s: QSFP module inserted\n",
6024 __func__);
6025
77241056
MM
6026 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6027 ppd->qsfp_info.cache_valid = 0;
6028 ppd->qsfp_info.cache_refresh_required = 1;
6029 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
17fb4f29 6030 flags);
77241056 6031
8ebd4cf1
EH
6032 /*
6033 * Stop inversion of ModPresent pin to detect
6034 * removal of the cable
6035 */
77241056 6036 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
8ebd4cf1
EH
6037 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6038 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6039
6040 ppd->offline_disabled_reason =
6041 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
77241056
MM
6042 }
6043 }
6044
6045 if (reg & QSFP_HFI0_INT_N) {
e8aa284b 6046 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
17fb4f29 6047 __func__);
77241056
MM
6048 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6049 ppd->qsfp_info.check_interrupt_flags = 1;
77241056
MM
6050 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6051 }
6052
6053 /* Schedule the QSFP work only if there is a cable attached. */
6054 if (qsfp_mod_present(ppd))
6055 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6056}
6057
6058static int request_host_lcb_access(struct hfi1_devdata *dd)
6059{
6060 int ret;
6061
6062 ret = do_8051_command(dd, HCMD_MISC,
17fb4f29
JJ
6063 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6064 LOAD_DATA_FIELD_ID_SHIFT, NULL);
77241056
MM
6065 if (ret != HCMD_SUCCESS) {
6066 dd_dev_err(dd, "%s: command failed with error %d\n",
17fb4f29 6067 __func__, ret);
77241056
MM
6068 }
6069 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6070}
6071
6072static int request_8051_lcb_access(struct hfi1_devdata *dd)
6073{
6074 int ret;
6075
6076 ret = do_8051_command(dd, HCMD_MISC,
17fb4f29
JJ
6077 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6078 LOAD_DATA_FIELD_ID_SHIFT, NULL);
77241056
MM
6079 if (ret != HCMD_SUCCESS) {
6080 dd_dev_err(dd, "%s: command failed with error %d\n",
17fb4f29 6081 __func__, ret);
77241056
MM
6082 }
6083 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6084}
6085
6086/*
6087 * Set the LCB selector - allow host access. The DCC selector always
6088 * points to the host.
6089 */
6090static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6091{
6092 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
17fb4f29
JJ
6093 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6094 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
77241056
MM
6095}
6096
6097/*
6098 * Clear the LCB selector - allow 8051 access. The DCC selector always
6099 * points to the host.
6100 */
6101static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6102{
6103 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
17fb4f29 6104 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
77241056
MM
6105}
6106
6107/*
6108 * Acquire LCB access from the 8051. If the host already has access,
6109 * just increment a counter. Otherwise, inform the 8051 that the
6110 * host is taking access.
6111 *
6112 * Returns:
6113 * 0 on success
6114 * -EBUSY if the 8051 has control and cannot be disturbed
6115 * -errno if unable to acquire access from the 8051
6116 */
6117int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6118{
6119 struct hfi1_pportdata *ppd = dd->pport;
6120 int ret = 0;
6121
6122 /*
6123 * Use the host link state lock so the operation of this routine
6124 * { link state check, selector change, count increment } can occur
6125 * as a unit against a link state change. Otherwise there is a
6126 * race between the state change and the count increment.
6127 */
6128 if (sleep_ok) {
6129 mutex_lock(&ppd->hls_lock);
6130 } else {
951842b0 6131 while (!mutex_trylock(&ppd->hls_lock))
77241056
MM
6132 udelay(1);
6133 }
6134
6135 /* this access is valid only when the link is up */
0c7f77af 6136 if (ppd->host_link_state & HLS_DOWN) {
77241056 6137 dd_dev_info(dd, "%s: link state %s not up\n",
17fb4f29 6138 __func__, link_state_name(ppd->host_link_state));
77241056
MM
6139 ret = -EBUSY;
6140 goto done;
6141 }
6142
6143 if (dd->lcb_access_count == 0) {
6144 ret = request_host_lcb_access(dd);
6145 if (ret) {
6146 dd_dev_err(dd,
17fb4f29
JJ
6147 "%s: unable to acquire LCB access, err %d\n",
6148 __func__, ret);
77241056
MM
6149 goto done;
6150 }
6151 set_host_lcb_access(dd);
6152 }
6153 dd->lcb_access_count++;
6154done:
6155 mutex_unlock(&ppd->hls_lock);
6156 return ret;
6157}
6158
6159/*
6160 * Release LCB access by decrementing the use count. If the count is moving
6161 * from 1 to 0, inform 8051 that it has control back.
6162 *
6163 * Returns:
6164 * 0 on success
6165 * -errno if unable to release access to the 8051
6166 */
6167int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6168{
6169 int ret = 0;
6170
6171 /*
6172 * Use the host link state lock because the acquire needed it.
6173 * Here, we only need to keep { selector change, count decrement }
6174 * as a unit.
6175 */
6176 if (sleep_ok) {
6177 mutex_lock(&dd->pport->hls_lock);
6178 } else {
951842b0 6179 while (!mutex_trylock(&dd->pport->hls_lock))
77241056
MM
6180 udelay(1);
6181 }
6182
6183 if (dd->lcb_access_count == 0) {
6184 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
17fb4f29 6185 __func__);
77241056
MM
6186 goto done;
6187 }
6188
6189 if (dd->lcb_access_count == 1) {
6190 set_8051_lcb_access(dd);
6191 ret = request_8051_lcb_access(dd);
6192 if (ret) {
6193 dd_dev_err(dd,
17fb4f29
JJ
6194 "%s: unable to release LCB access, err %d\n",
6195 __func__, ret);
77241056
MM
6196 /* restore host access if the grant didn't work */
6197 set_host_lcb_access(dd);
6198 goto done;
6199 }
6200 }
6201 dd->lcb_access_count--;
6202done:
6203 mutex_unlock(&dd->pport->hls_lock);
6204 return ret;
6205}
6206
6207/*
6208 * Initialize LCB access variables and state. Called during driver load,
6209 * after most of the initialization is finished.
6210 *
6211 * The DC default is LCB access on for the host. The driver defaults to
6212 * leaving access to the 8051. Assign access now - this constrains the call
6213 * to this routine to be after all LCB set-up is done. In particular, after
6214 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6215 */
6216static void init_lcb_access(struct hfi1_devdata *dd)
6217{
6218 dd->lcb_access_count = 0;
6219}
6220
6221/*
6222 * Write a response back to a 8051 request.
6223 */
6224static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6225{
6226 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
17fb4f29
JJ
6227 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6228 (u64)return_code <<
6229 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6230 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
77241056
MM
6231}
6232
6233/*
cbac386a 6234 * Handle host requests from the 8051.
77241056 6235 */
145dd2b3 6236static void handle_8051_request(struct hfi1_pportdata *ppd)
77241056 6237{
cbac386a 6238 struct hfi1_devdata *dd = ppd->dd;
77241056 6239 u64 reg;
cbac386a 6240 u16 data = 0;
145dd2b3 6241 u8 type;
77241056
MM
6242
6243 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6244 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6245 return; /* no request */
6246
6247 /* zero out COMPLETED so the response is seen */
6248 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6249
6250 /* extract request details */
6251 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6252 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6253 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6254 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6255
6256 switch (type) {
6257 case HREQ_LOAD_CONFIG:
6258 case HREQ_SAVE_CONFIG:
6259 case HREQ_READ_CONFIG:
6260 case HREQ_SET_TX_EQ_ABS:
6261 case HREQ_SET_TX_EQ_REL:
145dd2b3 6262 case HREQ_ENABLE:
77241056 6263 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
17fb4f29 6264 type);
77241056
MM
6265 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6266 break;
77241056
MM
6267 case HREQ_CONFIG_DONE:
6268 hreq_response(dd, HREQ_SUCCESS, 0);
6269 break;
6270
6271 case HREQ_INTERFACE_TEST:
6272 hreq_response(dd, HREQ_SUCCESS, data);
6273 break;
77241056
MM
6274 default:
6275 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6276 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6277 break;
6278 }
6279}
6280
6281static void write_global_credit(struct hfi1_devdata *dd,
6282 u8 vau, u16 total, u16 shared)
6283{
6284 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
17fb4f29
JJ
6285 ((u64)total <<
6286 SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
6287 ((u64)shared <<
6288 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
6289 ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
77241056
MM
6290}
6291
6292/*
6293 * Set up initial VL15 credits of the remote. Assumes the rest of
6294 * the CM credit registers are zero from a previous global or credit reset .
6295 */
6296void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6297{
6298 /* leave shared count at zero for both global and VL15 */
6299 write_global_credit(dd, vau, vl15buf, 0);
6300
6301 /* We may need some credits for another VL when sending packets
6302 * with the snoop interface. Dividing it down the middle for VL15
6303 * and VL0 should suffice.
6304 */
6305 if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6306 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6307 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6308 write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6309 << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6310 } else {
6311 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6312 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6313 }
6314}
6315
6316/*
6317 * Zero all credit details from the previous connection and
6318 * reset the CM manager's internal counters.
6319 */
6320void reset_link_credits(struct hfi1_devdata *dd)
6321{
6322 int i;
6323
6324 /* remove all previous VL credit limits */
6325 for (i = 0; i < TXE_NUM_DATA_VL; i++)
8638b77f 6326 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
77241056
MM
6327 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6328 write_global_credit(dd, 0, 0, 0);
6329 /* reset the CM block */
6330 pio_send_control(dd, PSC_CM_RESET);
6331}
6332
6333/* convert a vCU to a CU */
6334static u32 vcu_to_cu(u8 vcu)
6335{
6336 return 1 << vcu;
6337}
6338
6339/* convert a CU to a vCU */
6340static u8 cu_to_vcu(u32 cu)
6341{
6342 return ilog2(cu);
6343}
6344
6345/* convert a vAU to an AU */
6346static u32 vau_to_au(u8 vau)
6347{
6348 return 8 * (1 << vau);
6349}
6350
6351static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6352{
6353 ppd->sm_trap_qp = 0x0;
6354 ppd->sa_qp = 0x1;
6355}
6356
6357/*
6358 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6359 */
6360static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6361{
6362 u64 reg;
6363
6364 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6365 write_csr(dd, DC_LCB_CFG_RUN, 0);
6366 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6367 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
17fb4f29 6368 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
77241056
MM
6369 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6370 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6371 reg = read_csr(dd, DCC_CFG_RESET);
17fb4f29
JJ
6372 write_csr(dd, DCC_CFG_RESET, reg |
6373 (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6374 (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
50e5dcbe 6375 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
77241056
MM
6376 if (!abort) {
6377 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6378 write_csr(dd, DCC_CFG_RESET, reg);
6379 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6380 }
6381}
6382
6383/*
6384 * This routine should be called after the link has been transitioned to
6385 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6386 * reset).
6387 *
6388 * The expectation is that the caller of this routine would have taken
6389 * care of properly transitioning the link into the correct state.
6390 */
6391static void dc_shutdown(struct hfi1_devdata *dd)
6392{
6393 unsigned long flags;
6394
6395 spin_lock_irqsave(&dd->dc8051_lock, flags);
6396 if (dd->dc_shutdown) {
6397 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6398 return;
6399 }
6400 dd->dc_shutdown = 1;
6401 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6402 /* Shutdown the LCB */
6403 lcb_shutdown(dd, 1);
4d114fdd
JJ
6404 /*
6405 * Going to OFFLINE would have causes the 8051 to put the
77241056 6406 * SerDes into reset already. Just need to shut down the 8051,
4d114fdd
JJ
6407 * itself.
6408 */
77241056
MM
6409 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6410}
6411
4d114fdd
JJ
6412/*
6413 * Calling this after the DC has been brought out of reset should not
6414 * do any damage.
6415 */
77241056
MM
6416static void dc_start(struct hfi1_devdata *dd)
6417{
6418 unsigned long flags;
6419 int ret;
6420
6421 spin_lock_irqsave(&dd->dc8051_lock, flags);
6422 if (!dd->dc_shutdown)
6423 goto done;
6424 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6425 /* Take the 8051 out of reset */
6426 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6427 /* Wait until 8051 is ready */
6428 ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6429 if (ret) {
6430 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
17fb4f29 6431 __func__);
77241056
MM
6432 }
6433 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6434 write_csr(dd, DCC_CFG_RESET, 0x10);
6435 /* lcb_shutdown() with abort=1 does not restore these */
6436 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6437 spin_lock_irqsave(&dd->dc8051_lock, flags);
6438 dd->dc_shutdown = 0;
6439done:
6440 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6441}
6442
6443/*
6444 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6445 */
6446static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6447{
6448 u64 rx_radr, tx_radr;
6449 u32 version;
6450
6451 if (dd->icode != ICODE_FPGA_EMULATION)
6452 return;
6453
6454 /*
6455 * These LCB defaults on emulator _s are good, nothing to do here:
6456 * LCB_CFG_TX_FIFOS_RADR
6457 * LCB_CFG_RX_FIFOS_RADR
6458 * LCB_CFG_LN_DCLK
6459 * LCB_CFG_IGNORE_LOST_RCLK
6460 */
6461 if (is_emulator_s(dd))
6462 return;
6463 /* else this is _p */
6464
6465 version = emulator_rev(dd);
995deafa 6466 if (!is_ax(dd))
77241056
MM
6467 version = 0x2d; /* all B0 use 0x2d or higher settings */
6468
6469 if (version <= 0x12) {
6470 /* release 0x12 and below */
6471
6472 /*
6473 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6474 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6475 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6476 */
6477 rx_radr =
6478 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6479 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6480 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6481 /*
6482 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6483 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6484 */
6485 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6486 } else if (version <= 0x18) {
6487 /* release 0x13 up to 0x18 */
6488 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6489 rx_radr =
6490 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6491 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6492 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6493 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6494 } else if (version == 0x19) {
6495 /* release 0x19 */
6496 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6497 rx_radr =
6498 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6499 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6500 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6501 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6502 } else if (version == 0x1a) {
6503 /* release 0x1a */
6504 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6505 rx_radr =
6506 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6507 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6508 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6509 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6510 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6511 } else {
6512 /* release 0x1b and higher */
6513 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6514 rx_radr =
6515 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6516 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6517 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6518 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6519 }
6520
6521 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6522 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6523 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
17fb4f29 6524 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
77241056
MM
6525 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6526}
6527
6528/*
6529 * Handle a SMA idle message
6530 *
6531 * This is a work-queue function outside of the interrupt.
6532 */
6533void handle_sma_message(struct work_struct *work)
6534{
6535 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6536 sma_message_work);
6537 struct hfi1_devdata *dd = ppd->dd;
6538 u64 msg;
6539 int ret;
6540
4d114fdd
JJ
6541 /*
6542 * msg is bytes 1-4 of the 40-bit idle message - the command code
6543 * is stripped off
6544 */
77241056
MM
6545 ret = read_idle_sma(dd, &msg);
6546 if (ret)
6547 return;
6548 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6549 /*
6550 * React to the SMA message. Byte[1] (0 for us) is the command.
6551 */
6552 switch (msg & 0xff) {
6553 case SMA_IDLE_ARM:
6554 /*
6555 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6556 * State Transitions
6557 *
6558 * Only expected in INIT or ARMED, discard otherwise.
6559 */
6560 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6561 ppd->neighbor_normal = 1;
6562 break;
6563 case SMA_IDLE_ACTIVE:
6564 /*
6565 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6566 * State Transitions
6567 *
6568 * Can activate the node. Discard otherwise.
6569 */
d0d236ea
JJ
6570 if (ppd->host_link_state == HLS_UP_ARMED &&
6571 ppd->is_active_optimize_enabled) {
77241056
MM
6572 ppd->neighbor_normal = 1;
6573 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6574 if (ret)
6575 dd_dev_err(
6576 dd,
6577 "%s: received Active SMA idle message, couldn't set link to Active\n",
6578 __func__);
6579 }
6580 break;
6581 default:
6582 dd_dev_err(dd,
17fb4f29
JJ
6583 "%s: received unexpected SMA idle message 0x%llx\n",
6584 __func__, msg);
77241056
MM
6585 break;
6586 }
6587}
6588
6589static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6590{
6591 u64 rcvctrl;
6592 unsigned long flags;
6593
6594 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6595 rcvctrl = read_csr(dd, RCV_CTRL);
6596 rcvctrl |= add;
6597 rcvctrl &= ~clear;
6598 write_csr(dd, RCV_CTRL, rcvctrl);
6599 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6600}
6601
6602static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6603{
6604 adjust_rcvctrl(dd, add, 0);
6605}
6606
6607static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6608{
6609 adjust_rcvctrl(dd, 0, clear);
6610}
6611
6612/*
6613 * Called from all interrupt handlers to start handling an SPC freeze.
6614 */
6615void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6616{
6617 struct hfi1_devdata *dd = ppd->dd;
6618 struct send_context *sc;
6619 int i;
6620
6621 if (flags & FREEZE_SELF)
6622 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6623
6624 /* enter frozen mode */
6625 dd->flags |= HFI1_FROZEN;
6626
6627 /* notify all SDMA engines that they are going into a freeze */
6628 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6629
6630 /* do halt pre-handling on all enabled send contexts */
6631 for (i = 0; i < dd->num_send_contexts; i++) {
6632 sc = dd->send_contexts[i].sc;
6633 if (sc && (sc->flags & SCF_ENABLED))
6634 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6635 }
6636
6637 /* Send context are frozen. Notify user space */
6638 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6639
6640 if (flags & FREEZE_ABORT) {
6641 dd_dev_err(dd,
6642 "Aborted freeze recovery. Please REBOOT system\n");
6643 return;
6644 }
6645 /* queue non-interrupt handler */
6646 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6647}
6648
6649/*
6650 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6651 * depending on the "freeze" parameter.
6652 *
6653 * No need to return an error if it times out, our only option
6654 * is to proceed anyway.
6655 */
6656static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6657{
6658 unsigned long timeout;
6659 u64 reg;
6660
6661 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6662 while (1) {
6663 reg = read_csr(dd, CCE_STATUS);
6664 if (freeze) {
6665 /* waiting until all indicators are set */
6666 if ((reg & ALL_FROZE) == ALL_FROZE)
6667 return; /* all done */
6668 } else {
6669 /* waiting until all indicators are clear */
6670 if ((reg & ALL_FROZE) == 0)
6671 return; /* all done */
6672 }
6673
6674 if (time_after(jiffies, timeout)) {
6675 dd_dev_err(dd,
17fb4f29
JJ
6676 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6677 freeze ? "" : "un", reg & ALL_FROZE,
6678 freeze ? ALL_FROZE : 0ull);
77241056
MM
6679 return;
6680 }
6681 usleep_range(80, 120);
6682 }
6683}
6684
6685/*
6686 * Do all freeze handling for the RXE block.
6687 */
6688static void rxe_freeze(struct hfi1_devdata *dd)
6689{
6690 int i;
6691
6692 /* disable port */
6693 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6694
6695 /* disable all receive contexts */
6696 for (i = 0; i < dd->num_rcv_contexts; i++)
6697 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6698}
6699
6700/*
6701 * Unfreeze handling for the RXE block - kernel contexts only.
6702 * This will also enable the port. User contexts will do unfreeze
6703 * handling on a per-context basis as they call into the driver.
6704 *
6705 */
6706static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6707{
566c157c 6708 u32 rcvmask;
77241056
MM
6709 int i;
6710
6711 /* enable all kernel contexts */
566c157c
MH
6712 for (i = 0; i < dd->n_krcv_queues; i++) {
6713 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6714 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6715 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6716 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6717 hfi1_rcvctrl(dd, rcvmask, i);
6718 }
77241056
MM
6719
6720 /* enable port */
6721 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6722}
6723
6724/*
6725 * Non-interrupt SPC freeze handling.
6726 *
6727 * This is a work-queue function outside of the triggering interrupt.
6728 */
6729void handle_freeze(struct work_struct *work)
6730{
6731 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6732 freeze_work);
6733 struct hfi1_devdata *dd = ppd->dd;
6734
6735 /* wait for freeze indicators on all affected blocks */
77241056
MM
6736 wait_for_freeze_status(dd, 1);
6737
6738 /* SPC is now frozen */
6739
6740 /* do send PIO freeze steps */
6741 pio_freeze(dd);
6742
6743 /* do send DMA freeze steps */
6744 sdma_freeze(dd);
6745
6746 /* do send egress freeze steps - nothing to do */
6747
6748 /* do receive freeze steps */
6749 rxe_freeze(dd);
6750
6751 /*
6752 * Unfreeze the hardware - clear the freeze, wait for each
6753 * block's frozen bit to clear, then clear the frozen flag.
6754 */
6755 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6756 wait_for_freeze_status(dd, 0);
6757
995deafa 6758 if (is_ax(dd)) {
77241056
MM
6759 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6760 wait_for_freeze_status(dd, 1);
6761 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6762 wait_for_freeze_status(dd, 0);
6763 }
6764
6765 /* do send PIO unfreeze steps for kernel contexts */
6766 pio_kernel_unfreeze(dd);
6767
6768 /* do send DMA unfreeze steps */
6769 sdma_unfreeze(dd);
6770
6771 /* do send egress unfreeze steps - nothing to do */
6772
6773 /* do receive unfreeze steps for kernel contexts */
6774 rxe_kernel_unfreeze(dd);
6775
6776 /*
6777 * The unfreeze procedure touches global device registers when
6778 * it disables and re-enables RXE. Mark the device unfrozen
6779 * after all that is done so other parts of the driver waiting
6780 * for the device to unfreeze don't do things out of order.
6781 *
6782 * The above implies that the meaning of HFI1_FROZEN flag is
6783 * "Device has gone into freeze mode and freeze mode handling
6784 * is still in progress."
6785 *
6786 * The flag will be removed when freeze mode processing has
6787 * completed.
6788 */
6789 dd->flags &= ~HFI1_FROZEN;
6790 wake_up(&dd->event_queue);
6791
6792 /* no longer frozen */
77241056
MM
6793}
6794
6795/*
6796 * Handle a link up interrupt from the 8051.
6797 *
6798 * This is a work-queue function outside of the interrupt.
6799 */
6800void handle_link_up(struct work_struct *work)
6801{
6802 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
17fb4f29 6803 link_up_work);
77241056
MM
6804 set_link_state(ppd, HLS_UP_INIT);
6805
6806 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6807 read_ltp_rtt(ppd->dd);
6808 /*
6809 * OPA specifies that certain counters are cleared on a transition
6810 * to link up, so do that.
6811 */
6812 clear_linkup_counters(ppd->dd);
6813 /*
6814 * And (re)set link up default values.
6815 */
6816 set_linkup_defaults(ppd);
6817
6818 /* enforce link speed enabled */
6819 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6820 /* oops - current speed is not enabled, bounce */
6821 dd_dev_err(ppd->dd,
17fb4f29
JJ
6822 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6823 ppd->link_speed_active, ppd->link_speed_enabled);
77241056 6824 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
17fb4f29 6825 OPA_LINKDOWN_REASON_SPEED_POLICY);
77241056 6826 set_link_state(ppd, HLS_DN_OFFLINE);
8ebd4cf1 6827 tune_serdes(ppd);
77241056
MM
6828 start_link(ppd);
6829 }
6830}
6831
4d114fdd
JJ
6832/*
6833 * Several pieces of LNI information were cached for SMA in ppd.
6834 * Reset these on link down
6835 */
77241056
MM
6836static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6837{
6838 ppd->neighbor_guid = 0;
6839 ppd->neighbor_port_number = 0;
6840 ppd->neighbor_type = 0;
6841 ppd->neighbor_fm_security = 0;
6842}
6843
feb831dd
DL
6844static const char * const link_down_reason_strs[] = {
6845 [OPA_LINKDOWN_REASON_NONE] = "None",
6846 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Recive error 0",
6847 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
6848 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
6849 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
6850 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
6851 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
6852 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
6853 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
6854 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
6855 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
6856 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
6857 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
6858 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
6859 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
6860 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
6861 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
6862 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
6863 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
6864 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
6865 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
6866 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
6867 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
6868 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
6869 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
6870 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
6871 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
6872 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
6873 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
6874 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
6875 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
6876 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
6877 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
6878 "Excessive buffer overrun",
6879 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
6880 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
6881 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
6882 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
6883 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
6884 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
6885 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
6886 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
6887 "Local media not installed",
6888 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
6889 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
6890 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
6891 "End to end not installed",
6892 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
6893 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
6894 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
6895 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
6896 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
6897 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
6898};
6899
6900/* return the neighbor link down reason string */
6901static const char *link_down_reason_str(u8 reason)
6902{
6903 const char *str = NULL;
6904
6905 if (reason < ARRAY_SIZE(link_down_reason_strs))
6906 str = link_down_reason_strs[reason];
6907 if (!str)
6908 str = "(invalid)";
6909
6910 return str;
6911}
6912
77241056
MM
6913/*
6914 * Handle a link down interrupt from the 8051.
6915 *
6916 * This is a work-queue function outside of the interrupt.
6917 */
6918void handle_link_down(struct work_struct *work)
6919{
6920 u8 lcl_reason, neigh_reason = 0;
feb831dd 6921 u8 link_down_reason;
77241056 6922 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
feb831dd
DL
6923 link_down_work);
6924 int was_up;
6925 static const char ldr_str[] = "Link down reason: ";
77241056 6926
8ebd4cf1
EH
6927 if ((ppd->host_link_state &
6928 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6929 ppd->port_type == PORT_TYPE_FIXED)
6930 ppd->offline_disabled_reason =
6931 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6932
6933 /* Go offline first, then deal with reading/writing through 8051 */
feb831dd 6934 was_up = !!(ppd->host_link_state & HLS_UP);
77241056
MM
6935 set_link_state(ppd, HLS_DN_OFFLINE);
6936
feb831dd
DL
6937 if (was_up) {
6938 lcl_reason = 0;
6939 /* link down reason is only valid if the link was up */
6940 read_link_down_reason(ppd->dd, &link_down_reason);
6941 switch (link_down_reason) {
6942 case LDR_LINK_TRANSFER_ACTIVE_LOW:
6943 /* the link went down, no idle message reason */
6944 dd_dev_info(ppd->dd, "%sUnexpected link down\n",
6945 ldr_str);
6946 break;
6947 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
6948 /*
6949 * The neighbor reason is only valid if an idle message
6950 * was received for it.
6951 */
6952 read_planned_down_reason_code(ppd->dd, &neigh_reason);
6953 dd_dev_info(ppd->dd,
6954 "%sNeighbor link down message %d, %s\n",
6955 ldr_str, neigh_reason,
6956 link_down_reason_str(neigh_reason));
6957 break;
6958 case LDR_RECEIVED_HOST_OFFLINE_REQ:
6959 dd_dev_info(ppd->dd,
6960 "%sHost requested link to go offline\n",
6961 ldr_str);
6962 break;
6963 default:
6964 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
6965 ldr_str, link_down_reason);
6966 break;
6967 }
77241056 6968
feb831dd
DL
6969 /*
6970 * If no reason, assume peer-initiated but missed
6971 * LinkGoingDown idle flits.
6972 */
6973 if (neigh_reason == 0)
6974 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6975 } else {
6976 /* went down while polling or going up */
6977 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
6978 }
77241056
MM
6979
6980 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6981
015e91fb
DL
6982 /* inform the SMA when the link transitions from up to down */
6983 if (was_up && ppd->local_link_down_reason.sma == 0 &&
6984 ppd->neigh_link_down_reason.sma == 0) {
6985 ppd->local_link_down_reason.sma =
6986 ppd->local_link_down_reason.latest;
6987 ppd->neigh_link_down_reason.sma =
6988 ppd->neigh_link_down_reason.latest;
6989 }
6990
77241056
MM
6991 reset_neighbor_info(ppd);
6992
6993 /* disable the port */
6994 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6995
4d114fdd
JJ
6996 /*
6997 * If there is no cable attached, turn the DC off. Otherwise,
6998 * start the link bring up.
6999 */
623bba2d 7000 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd)) {
77241056 7001 dc_shutdown(ppd->dd);
8ebd4cf1
EH
7002 } else {
7003 tune_serdes(ppd);
77241056 7004 start_link(ppd);
8ebd4cf1 7005 }
77241056
MM
7006}
7007
7008void handle_link_bounce(struct work_struct *work)
7009{
7010 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7011 link_bounce_work);
7012
7013 /*
7014 * Only do something if the link is currently up.
7015 */
7016 if (ppd->host_link_state & HLS_UP) {
7017 set_link_state(ppd, HLS_DN_OFFLINE);
8ebd4cf1 7018 tune_serdes(ppd);
77241056
MM
7019 start_link(ppd);
7020 } else {
7021 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
17fb4f29 7022 __func__, link_state_name(ppd->host_link_state));
77241056
MM
7023 }
7024}
7025
7026/*
7027 * Mask conversion: Capability exchange to Port LTP. The capability
7028 * exchange has an implicit 16b CRC that is mandatory.
7029 */
7030static int cap_to_port_ltp(int cap)
7031{
7032 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7033
7034 if (cap & CAP_CRC_14B)
7035 port_ltp |= PORT_LTP_CRC_MODE_14;
7036 if (cap & CAP_CRC_48B)
7037 port_ltp |= PORT_LTP_CRC_MODE_48;
7038 if (cap & CAP_CRC_12B_16B_PER_LANE)
7039 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7040
7041 return port_ltp;
7042}
7043
7044/*
7045 * Convert an OPA Port LTP mask to capability mask
7046 */
7047int port_ltp_to_cap(int port_ltp)
7048{
7049 int cap_mask = 0;
7050
7051 if (port_ltp & PORT_LTP_CRC_MODE_14)
7052 cap_mask |= CAP_CRC_14B;
7053 if (port_ltp & PORT_LTP_CRC_MODE_48)
7054 cap_mask |= CAP_CRC_48B;
7055 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7056 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7057
7058 return cap_mask;
7059}
7060
7061/*
7062 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7063 */
7064static int lcb_to_port_ltp(int lcb_crc)
7065{
7066 int port_ltp = 0;
7067
7068 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7069 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7070 else if (lcb_crc == LCB_CRC_48B)
7071 port_ltp = PORT_LTP_CRC_MODE_48;
7072 else if (lcb_crc == LCB_CRC_14B)
7073 port_ltp = PORT_LTP_CRC_MODE_14;
7074 else
7075 port_ltp = PORT_LTP_CRC_MODE_16;
7076
7077 return port_ltp;
7078}
7079
7080/*
7081 * Our neighbor has indicated that we are allowed to act as a fabric
7082 * manager, so place the full management partition key in the second
7083 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7084 * that we should already have the limited management partition key in
7085 * array element 1, and also that the port is not yet up when
7086 * add_full_mgmt_pkey() is invoked.
7087 */
7088static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7089{
7090 struct hfi1_devdata *dd = ppd->dd;
7091
8764522e
DL
7092 /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
7093 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
7094 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7095 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
77241056
MM
7096 ppd->pkeys[2] = FULL_MGMT_P_KEY;
7097 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
34d351f8 7098 hfi1_event_pkey_change(ppd->dd, ppd->port);
77241056
MM
7099}
7100
3ec5fa28 7101static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
ce8b2fd0 7102{
3ec5fa28
SS
7103 if (ppd->pkeys[2] != 0) {
7104 ppd->pkeys[2] = 0;
7105 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
34d351f8 7106 hfi1_event_pkey_change(ppd->dd, ppd->port);
3ec5fa28 7107 }
ce8b2fd0
SS
7108}
7109
77241056
MM
7110/*
7111 * Convert the given link width to the OPA link width bitmask.
7112 */
7113static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7114{
7115 switch (width) {
7116 case 0:
7117 /*
7118 * Simulator and quick linkup do not set the width.
7119 * Just set it to 4x without complaint.
7120 */
7121 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7122 return OPA_LINK_WIDTH_4X;
7123 return 0; /* no lanes up */
7124 case 1: return OPA_LINK_WIDTH_1X;
7125 case 2: return OPA_LINK_WIDTH_2X;
7126 case 3: return OPA_LINK_WIDTH_3X;
7127 default:
7128 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
17fb4f29 7129 __func__, width);
77241056
MM
7130 /* fall through */
7131 case 4: return OPA_LINK_WIDTH_4X;
7132 }
7133}
7134
7135/*
7136 * Do a population count on the bottom nibble.
7137 */
7138static const u8 bit_counts[16] = {
7139 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7140};
f4d507cd 7141
77241056
MM
7142static inline u8 nibble_to_count(u8 nibble)
7143{
7144 return bit_counts[nibble & 0xf];
7145}
7146
7147/*
7148 * Read the active lane information from the 8051 registers and return
7149 * their widths.
7150 *
7151 * Active lane information is found in these 8051 registers:
7152 * enable_lane_tx
7153 * enable_lane_rx
7154 */
7155static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7156 u16 *rx_width)
7157{
7158 u16 tx, rx;
7159 u8 enable_lane_rx;
7160 u8 enable_lane_tx;
7161 u8 tx_polarity_inversion;
7162 u8 rx_polarity_inversion;
7163 u8 max_rate;
7164
7165 /* read the active lanes */
7166 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
17fb4f29 7167 &rx_polarity_inversion, &max_rate);
77241056
MM
7168 read_local_lni(dd, &enable_lane_rx);
7169
7170 /* convert to counts */
7171 tx = nibble_to_count(enable_lane_tx);
7172 rx = nibble_to_count(enable_lane_rx);
7173
7174 /*
7175 * Set link_speed_active here, overriding what was set in
7176 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7177 * set the max_rate field in handle_verify_cap until v0.19.
7178 */
d0d236ea
JJ
7179 if ((dd->icode == ICODE_RTL_SILICON) &&
7180 (dd->dc8051_ver < dc8051_ver(0, 19))) {
77241056
MM
7181 /* max_rate: 0 = 12.5G, 1 = 25G */
7182 switch (max_rate) {
7183 case 0:
7184 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7185 break;
7186 default:
7187 dd_dev_err(dd,
17fb4f29
JJ
7188 "%s: unexpected max rate %d, using 25Gb\n",
7189 __func__, (int)max_rate);
77241056
MM
7190 /* fall through */
7191 case 1:
7192 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7193 break;
7194 }
7195 }
7196
7197 dd_dev_info(dd,
17fb4f29
JJ
7198 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7199 enable_lane_tx, tx, enable_lane_rx, rx);
77241056
MM
7200 *tx_width = link_width_to_bits(dd, tx);
7201 *rx_width = link_width_to_bits(dd, rx);
7202}
7203
7204/*
7205 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7206 * Valid after the end of VerifyCap and during LinkUp. Does not change
7207 * after link up. I.e. look elsewhere for downgrade information.
7208 *
7209 * Bits are:
7210 * + bits [7:4] contain the number of active transmitters
7211 * + bits [3:0] contain the number of active receivers
7212 * These are numbers 1 through 4 and can be different values if the
7213 * link is asymmetric.
7214 *
7215 * verify_cap_local_fm_link_width[0] retains its original value.
7216 */
7217static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7218 u16 *rx_width)
7219{
7220 u16 widths, tx, rx;
7221 u8 misc_bits, local_flags;
7222 u16 active_tx, active_rx;
7223
7224 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7225 tx = widths >> 12;
7226 rx = (widths >> 8) & 0xf;
7227
7228 *tx_width = link_width_to_bits(dd, tx);
7229 *rx_width = link_width_to_bits(dd, rx);
7230
7231 /* print the active widths */
7232 get_link_widths(dd, &active_tx, &active_rx);
7233}
7234
7235/*
7236 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7237 * hardware information when the link first comes up.
7238 *
7239 * The link width is not available until after VerifyCap.AllFramesReceived
7240 * (the trigger for handle_verify_cap), so this is outside that routine
7241 * and should be called when the 8051 signals linkup.
7242 */
7243void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7244{
7245 u16 tx_width, rx_width;
7246
7247 /* get end-of-LNI link widths */
7248 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7249
7250 /* use tx_width as the link is supposed to be symmetric on link up */
7251 ppd->link_width_active = tx_width;
7252 /* link width downgrade active (LWD.A) starts out matching LW.A */
7253 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7254 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7255 /* per OPA spec, on link up LWD.E resets to LWD.S */
7256 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7257 /* cache the active egress rate (units {10^6 bits/sec]) */
7258 ppd->current_egress_rate = active_egress_rate(ppd);
7259}
7260
7261/*
7262 * Handle a verify capabilities interrupt from the 8051.
7263 *
7264 * This is a work-queue function outside of the interrupt.
7265 */
7266void handle_verify_cap(struct work_struct *work)
7267{
7268 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7269 link_vc_work);
7270 struct hfi1_devdata *dd = ppd->dd;
7271 u64 reg;
7272 u8 power_management;
7273 u8 continious;
7274 u8 vcu;
7275 u8 vau;
7276 u8 z;
7277 u16 vl15buf;
7278 u16 link_widths;
7279 u16 crc_mask;
7280 u16 crc_val;
7281 u16 device_id;
7282 u16 active_tx, active_rx;
7283 u8 partner_supported_crc;
7284 u8 remote_tx_rate;
7285 u8 device_rev;
7286
7287 set_link_state(ppd, HLS_VERIFY_CAP);
7288
7289 lcb_shutdown(dd, 0);
7290 adjust_lcb_for_fpga_serdes(dd);
7291
7292 /*
7293 * These are now valid:
7294 * remote VerifyCap fields in the general LNI config
7295 * CSR DC8051_STS_REMOTE_GUID
7296 * CSR DC8051_STS_REMOTE_NODE_TYPE
7297 * CSR DC8051_STS_REMOTE_FM_SECURITY
7298 * CSR DC8051_STS_REMOTE_PORT_NO
7299 */
7300
7301 read_vc_remote_phy(dd, &power_management, &continious);
17fb4f29
JJ
7302 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7303 &partner_supported_crc);
77241056
MM
7304 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7305 read_remote_device_id(dd, &device_id, &device_rev);
7306 /*
7307 * And the 'MgmtAllowed' information, which is exchanged during
7308 * LNI, is also be available at this point.
7309 */
7310 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7311 /* print the active widths */
7312 get_link_widths(dd, &active_tx, &active_rx);
7313 dd_dev_info(dd,
17fb4f29
JJ
7314 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7315 (int)power_management, (int)continious);
77241056 7316 dd_dev_info(dd,
17fb4f29
JJ
7317 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7318 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7319 (int)partner_supported_crc);
77241056 7320 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
17fb4f29 7321 (u32)remote_tx_rate, (u32)link_widths);
77241056 7322 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
17fb4f29 7323 (u32)device_id, (u32)device_rev);
77241056
MM
7324 /*
7325 * The peer vAU value just read is the peer receiver value. HFI does
7326 * not support a transmit vAU of 0 (AU == 8). We advertised that
7327 * with Z=1 in the fabric capabilities sent to the peer. The peer
7328 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7329 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7330 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7331 * subject to the Z value exception.
7332 */
7333 if (vau == 0)
7334 vau = 1;
7335 set_up_vl15(dd, vau, vl15buf);
7336
7337 /* set up the LCB CRC mode */
7338 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7339
7340 /* order is important: use the lowest bit in common */
7341 if (crc_mask & CAP_CRC_14B)
7342 crc_val = LCB_CRC_14B;
7343 else if (crc_mask & CAP_CRC_48B)
7344 crc_val = LCB_CRC_48B;
7345 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7346 crc_val = LCB_CRC_12B_16B_PER_LANE;
7347 else
7348 crc_val = LCB_CRC_16B;
7349
7350 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7351 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7352 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7353
7354 /* set (14b only) or clear sideband credit */
7355 reg = read_csr(dd, SEND_CM_CTRL);
7356 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7357 write_csr(dd, SEND_CM_CTRL,
17fb4f29 7358 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
77241056
MM
7359 } else {
7360 write_csr(dd, SEND_CM_CTRL,
17fb4f29 7361 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
77241056
MM
7362 }
7363
7364 ppd->link_speed_active = 0; /* invalid value */
7365 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7366 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7367 switch (remote_tx_rate) {
7368 case 0:
7369 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7370 break;
7371 case 1:
7372 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7373 break;
7374 }
7375 } else {
7376 /* actual rate is highest bit of the ANDed rates */
7377 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7378
7379 if (rate & 2)
7380 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7381 else if (rate & 1)
7382 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7383 }
7384 if (ppd->link_speed_active == 0) {
7385 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
17fb4f29 7386 __func__, (int)remote_tx_rate);
77241056
MM
7387 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7388 }
7389
7390 /*
7391 * Cache the values of the supported, enabled, and active
7392 * LTP CRC modes to return in 'portinfo' queries. But the bit
7393 * flags that are returned in the portinfo query differ from
7394 * what's in the link_crc_mask, crc_sizes, and crc_val
7395 * variables. Convert these here.
7396 */
7397 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7398 /* supported crc modes */
7399 ppd->port_ltp_crc_mode |=
7400 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7401 /* enabled crc modes */
7402 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7403 /* active crc mode */
7404
7405 /* set up the remote credit return table */
7406 assign_remote_cm_au_table(dd, vcu);
7407
7408 /*
7409 * The LCB is reset on entry to handle_verify_cap(), so this must
7410 * be applied on every link up.
7411 *
7412 * Adjust LCB error kill enable to kill the link if
7413 * these RBUF errors are seen:
7414 * REPLAY_BUF_MBE_SMASK
7415 * FLIT_INPUT_BUF_MBE_SMASK
7416 */
995deafa 7417 if (is_ax(dd)) { /* fixed in B0 */
77241056
MM
7418 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7419 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7420 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7421 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7422 }
7423
7424 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7425 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7426
7427 /* give 8051 access to the LCB CSRs */
7428 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7429 set_8051_lcb_access(dd);
7430
7431 ppd->neighbor_guid =
7432 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7433 ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7434 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7435 ppd->neighbor_type =
7436 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7437 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7438 ppd->neighbor_fm_security =
7439 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7440 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7441 dd_dev_info(dd,
17fb4f29
JJ
7442 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7443 ppd->neighbor_guid, ppd->neighbor_type,
7444 ppd->mgmt_allowed, ppd->neighbor_fm_security);
77241056
MM
7445 if (ppd->mgmt_allowed)
7446 add_full_mgmt_pkey(ppd);
7447
7448 /* tell the 8051 to go to LinkUp */
7449 set_link_state(ppd, HLS_GOING_UP);
7450}
7451
7452/*
7453 * Apply the link width downgrade enabled policy against the current active
7454 * link widths.
7455 *
7456 * Called when the enabled policy changes or the active link widths change.
7457 */
7458void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7459{
77241056 7460 int do_bounce = 0;
323fd785
DL
7461 int tries;
7462 u16 lwde;
77241056
MM
7463 u16 tx, rx;
7464
323fd785
DL
7465 /* use the hls lock to avoid a race with actual link up */
7466 tries = 0;
7467retry:
77241056
MM
7468 mutex_lock(&ppd->hls_lock);
7469 /* only apply if the link is up */
0c7f77af 7470 if (ppd->host_link_state & HLS_DOWN) {
323fd785
DL
7471 /* still going up..wait and retry */
7472 if (ppd->host_link_state & HLS_GOING_UP) {
7473 if (++tries < 1000) {
7474 mutex_unlock(&ppd->hls_lock);
7475 usleep_range(100, 120); /* arbitrary */
7476 goto retry;
7477 }
7478 dd_dev_err(ppd->dd,
7479 "%s: giving up waiting for link state change\n",
7480 __func__);
7481 }
7482 goto done;
7483 }
7484
7485 lwde = ppd->link_width_downgrade_enabled;
77241056
MM
7486
7487 if (refresh_widths) {
7488 get_link_widths(ppd->dd, &tx, &rx);
7489 ppd->link_width_downgrade_tx_active = tx;
7490 ppd->link_width_downgrade_rx_active = rx;
7491 }
7492
f9b5635c
DL
7493 if (ppd->link_width_downgrade_tx_active == 0 ||
7494 ppd->link_width_downgrade_rx_active == 0) {
7495 /* the 8051 reported a dead link as a downgrade */
7496 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7497 } else if (lwde == 0) {
77241056
MM
7498 /* downgrade is disabled */
7499
7500 /* bounce if not at starting active width */
7501 if ((ppd->link_width_active !=
17fb4f29
JJ
7502 ppd->link_width_downgrade_tx_active) ||
7503 (ppd->link_width_active !=
7504 ppd->link_width_downgrade_rx_active)) {
77241056 7505 dd_dev_err(ppd->dd,
17fb4f29 7506 "Link downgrade is disabled and link has downgraded, downing link\n");
77241056 7507 dd_dev_err(ppd->dd,
17fb4f29
JJ
7508 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7509 ppd->link_width_active,
7510 ppd->link_width_downgrade_tx_active,
7511 ppd->link_width_downgrade_rx_active);
77241056
MM
7512 do_bounce = 1;
7513 }
d0d236ea
JJ
7514 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7515 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
77241056
MM
7516 /* Tx or Rx is outside the enabled policy */
7517 dd_dev_err(ppd->dd,
17fb4f29 7518 "Link is outside of downgrade allowed, downing link\n");
77241056 7519 dd_dev_err(ppd->dd,
17fb4f29
JJ
7520 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7521 lwde, ppd->link_width_downgrade_tx_active,
7522 ppd->link_width_downgrade_rx_active);
77241056
MM
7523 do_bounce = 1;
7524 }
7525
323fd785
DL
7526done:
7527 mutex_unlock(&ppd->hls_lock);
7528
77241056
MM
7529 if (do_bounce) {
7530 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
17fb4f29 7531 OPA_LINKDOWN_REASON_WIDTH_POLICY);
77241056 7532 set_link_state(ppd, HLS_DN_OFFLINE);
8ebd4cf1 7533 tune_serdes(ppd);
77241056
MM
7534 start_link(ppd);
7535 }
7536}
7537
7538/*
7539 * Handle a link downgrade interrupt from the 8051.
7540 *
7541 * This is a work-queue function outside of the interrupt.
7542 */
7543void handle_link_downgrade(struct work_struct *work)
7544{
7545 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7546 link_downgrade_work);
7547
7548 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7549 apply_link_downgrade_policy(ppd, 1);
7550}
7551
7552static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7553{
7554 return flag_string(buf, buf_len, flags, dcc_err_flags,
7555 ARRAY_SIZE(dcc_err_flags));
7556}
7557
7558static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7559{
7560 return flag_string(buf, buf_len, flags, lcb_err_flags,
7561 ARRAY_SIZE(lcb_err_flags));
7562}
7563
7564static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7565{
7566 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7567 ARRAY_SIZE(dc8051_err_flags));
7568}
7569
7570static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7571{
7572 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7573 ARRAY_SIZE(dc8051_info_err_flags));
7574}
7575
7576static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7577{
7578 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7579 ARRAY_SIZE(dc8051_info_host_msg_flags));
7580}
7581
7582static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7583{
7584 struct hfi1_pportdata *ppd = dd->pport;
7585 u64 info, err, host_msg;
7586 int queue_link_down = 0;
7587 char buf[96];
7588
7589 /* look at the flags */
7590 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7591 /* 8051 information set by firmware */
7592 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7593 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7594 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7595 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7596 host_msg = (info >>
7597 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7598 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7599
7600 /*
7601 * Handle error flags.
7602 */
7603 if (err & FAILED_LNI) {
7604 /*
7605 * LNI error indications are cleared by the 8051
7606 * only when starting polling. Only pay attention
7607 * to them when in the states that occur during
7608 * LNI.
7609 */
7610 if (ppd->host_link_state
7611 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7612 queue_link_down = 1;
7613 dd_dev_info(dd, "Link error: %s\n",
17fb4f29
JJ
7614 dc8051_info_err_string(buf,
7615 sizeof(buf),
7616 err &
7617 FAILED_LNI));
77241056
MM
7618 }
7619 err &= ~(u64)FAILED_LNI;
7620 }
6d014530
DL
7621 /* unknown frames can happen durning LNI, just count */
7622 if (err & UNKNOWN_FRAME) {
7623 ppd->unknown_frame_count++;
7624 err &= ~(u64)UNKNOWN_FRAME;
7625 }
77241056
MM
7626 if (err) {
7627 /* report remaining errors, but do not do anything */
7628 dd_dev_err(dd, "8051 info error: %s\n",
17fb4f29
JJ
7629 dc8051_info_err_string(buf, sizeof(buf),
7630 err));
77241056
MM
7631 }
7632
7633 /*
7634 * Handle host message flags.
7635 */
7636 if (host_msg & HOST_REQ_DONE) {
7637 /*
7638 * Presently, the driver does a busy wait for
7639 * host requests to complete. This is only an
7640 * informational message.
7641 * NOTE: The 8051 clears the host message
7642 * information *on the next 8051 command*.
7643 * Therefore, when linkup is achieved,
7644 * this flag will still be set.
7645 */
7646 host_msg &= ~(u64)HOST_REQ_DONE;
7647 }
7648 if (host_msg & BC_SMA_MSG) {
7649 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7650 host_msg &= ~(u64)BC_SMA_MSG;
7651 }
7652 if (host_msg & LINKUP_ACHIEVED) {
7653 dd_dev_info(dd, "8051: Link up\n");
7654 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7655 host_msg &= ~(u64)LINKUP_ACHIEVED;
7656 }
7657 if (host_msg & EXT_DEVICE_CFG_REQ) {
145dd2b3 7658 handle_8051_request(ppd);
77241056
MM
7659 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7660 }
7661 if (host_msg & VERIFY_CAP_FRAME) {
7662 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7663 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7664 }
7665 if (host_msg & LINK_GOING_DOWN) {
7666 const char *extra = "";
7667 /* no downgrade action needed if going down */
7668 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7669 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7670 extra = " (ignoring downgrade)";
7671 }
7672 dd_dev_info(dd, "8051: Link down%s\n", extra);
7673 queue_link_down = 1;
7674 host_msg &= ~(u64)LINK_GOING_DOWN;
7675 }
7676 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7677 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7678 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7679 }
7680 if (host_msg) {
7681 /* report remaining messages, but do not do anything */
7682 dd_dev_info(dd, "8051 info host message: %s\n",
17fb4f29
JJ
7683 dc8051_info_host_msg_string(buf,
7684 sizeof(buf),
7685 host_msg));
77241056
MM
7686 }
7687
7688 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7689 }
7690 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7691 /*
7692 * Lost the 8051 heartbeat. If this happens, we
7693 * receive constant interrupts about it. Disable
7694 * the interrupt after the first.
7695 */
7696 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7697 write_csr(dd, DC_DC8051_ERR_EN,
17fb4f29
JJ
7698 read_csr(dd, DC_DC8051_ERR_EN) &
7699 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
77241056
MM
7700
7701 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7702 }
7703 if (reg) {
7704 /* report the error, but do not do anything */
7705 dd_dev_err(dd, "8051 error: %s\n",
17fb4f29 7706 dc8051_err_string(buf, sizeof(buf), reg));
77241056
MM
7707 }
7708
7709 if (queue_link_down) {
4d114fdd
JJ
7710 /*
7711 * if the link is already going down or disabled, do not
7712 * queue another
7713 */
d0d236ea
JJ
7714 if ((ppd->host_link_state &
7715 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7716 ppd->link_enabled == 0) {
77241056 7717 dd_dev_info(dd, "%s: not queuing link down\n",
17fb4f29 7718 __func__);
77241056
MM
7719 } else {
7720 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7721 }
7722 }
7723}
7724
7725static const char * const fm_config_txt[] = {
7726[0] =
7727 "BadHeadDist: Distance violation between two head flits",
7728[1] =
7729 "BadTailDist: Distance violation between two tail flits",
7730[2] =
7731 "BadCtrlDist: Distance violation between two credit control flits",
7732[3] =
7733 "BadCrdAck: Credits return for unsupported VL",
7734[4] =
7735 "UnsupportedVLMarker: Received VL Marker",
7736[5] =
7737 "BadPreempt: Exceeded the preemption nesting level",
7738[6] =
7739 "BadControlFlit: Received unsupported control flit",
7740/* no 7 */
7741[8] =
7742 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7743};
7744
7745static const char * const port_rcv_txt[] = {
7746[1] =
7747 "BadPktLen: Illegal PktLen",
7748[2] =
7749 "PktLenTooLong: Packet longer than PktLen",
7750[3] =
7751 "PktLenTooShort: Packet shorter than PktLen",
7752[4] =
7753 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7754[5] =
7755 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7756[6] =
7757 "BadL2: Illegal L2 opcode",
7758[7] =
7759 "BadSC: Unsupported SC",
7760[9] =
7761 "BadRC: Illegal RC",
7762[11] =
7763 "PreemptError: Preempting with same VL",
7764[12] =
7765 "PreemptVL15: Preempting a VL15 packet",
7766};
7767
7768#define OPA_LDR_FMCONFIG_OFFSET 16
7769#define OPA_LDR_PORTRCV_OFFSET 0
7770static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7771{
7772 u64 info, hdr0, hdr1;
7773 const char *extra;
7774 char buf[96];
7775 struct hfi1_pportdata *ppd = dd->pport;
7776 u8 lcl_reason = 0;
7777 int do_bounce = 0;
7778
7779 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7780 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7781 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7782 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7783 /* set status bit */
7784 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7785 }
7786 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7787 }
7788
7789 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7790 struct hfi1_pportdata *ppd = dd->pport;
7791 /* this counter saturates at (2^32) - 1 */
7792 if (ppd->link_downed < (u32)UINT_MAX)
7793 ppd->link_downed++;
7794 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7795 }
7796
7797 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7798 u8 reason_valid = 1;
7799
7800 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7801 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7802 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7803 /* set status bit */
7804 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7805 }
7806 switch (info) {
7807 case 0:
7808 case 1:
7809 case 2:
7810 case 3:
7811 case 4:
7812 case 5:
7813 case 6:
7814 extra = fm_config_txt[info];
7815 break;
7816 case 8:
7817 extra = fm_config_txt[info];
7818 if (ppd->port_error_action &
7819 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7820 do_bounce = 1;
7821 /*
7822 * lcl_reason cannot be derived from info
7823 * for this error
7824 */
7825 lcl_reason =
7826 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7827 }
7828 break;
7829 default:
7830 reason_valid = 0;
7831 snprintf(buf, sizeof(buf), "reserved%lld", info);
7832 extra = buf;
7833 break;
7834 }
7835
7836 if (reason_valid && !do_bounce) {
7837 do_bounce = ppd->port_error_action &
7838 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7839 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7840 }
7841
7842 /* just report this */
7843 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7844 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7845 }
7846
7847 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7848 u8 reason_valid = 1;
7849
7850 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7851 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7852 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7853 if (!(dd->err_info_rcvport.status_and_code &
7854 OPA_EI_STATUS_SMASK)) {
7855 dd->err_info_rcvport.status_and_code =
7856 info & OPA_EI_CODE_SMASK;
7857 /* set status bit */
7858 dd->err_info_rcvport.status_and_code |=
7859 OPA_EI_STATUS_SMASK;
4d114fdd
JJ
7860 /*
7861 * save first 2 flits in the packet that caused
7862 * the error
7863 */
48a0cc13
BVA
7864 dd->err_info_rcvport.packet_flit1 = hdr0;
7865 dd->err_info_rcvport.packet_flit2 = hdr1;
77241056
MM
7866 }
7867 switch (info) {
7868 case 1:
7869 case 2:
7870 case 3:
7871 case 4:
7872 case 5:
7873 case 6:
7874 case 7:
7875 case 9:
7876 case 11:
7877 case 12:
7878 extra = port_rcv_txt[info];
7879 break;
7880 default:
7881 reason_valid = 0;
7882 snprintf(buf, sizeof(buf), "reserved%lld", info);
7883 extra = buf;
7884 break;
7885 }
7886
7887 if (reason_valid && !do_bounce) {
7888 do_bounce = ppd->port_error_action &
7889 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7890 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7891 }
7892
7893 /* just report this */
7894 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7895 dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
17fb4f29 7896 hdr0, hdr1);
77241056
MM
7897
7898 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7899 }
7900
7901 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7902 /* informative only */
7903 dd_dev_info(dd, "8051 access to LCB blocked\n");
7904 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7905 }
7906 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7907 /* informative only */
7908 dd_dev_info(dd, "host access to LCB blocked\n");
7909 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7910 }
7911
7912 /* report any remaining errors */
7913 if (reg)
7914 dd_dev_info(dd, "DCC Error: %s\n",
17fb4f29 7915 dcc_err_string(buf, sizeof(buf), reg));
77241056
MM
7916
7917 if (lcl_reason == 0)
7918 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7919
7920 if (do_bounce) {
7921 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7922 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7923 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7924 }
7925}
7926
7927static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7928{
7929 char buf[96];
7930
7931 dd_dev_info(dd, "LCB Error: %s\n",
17fb4f29 7932 lcb_err_string(buf, sizeof(buf), reg));
77241056
MM
7933}
7934
7935/*
7936 * CCE block DC interrupt. Source is < 8.
7937 */
7938static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7939{
7940 const struct err_reg_info *eri = &dc_errs[source];
7941
7942 if (eri->handler) {
7943 interrupt_clear_down(dd, 0, eri);
7944 } else if (source == 3 /* dc_lbm_int */) {
7945 /*
7946 * This indicates that a parity error has occurred on the
7947 * address/control lines presented to the LBM. The error
7948 * is a single pulse, there is no associated error flag,
7949 * and it is non-maskable. This is because if a parity
7950 * error occurs on the request the request is dropped.
7951 * This should never occur, but it is nice to know if it
7952 * ever does.
7953 */
7954 dd_dev_err(dd, "Parity error in DC LBM block\n");
7955 } else {
7956 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7957 }
7958}
7959
7960/*
7961 * TX block send credit interrupt. Source is < 160.
7962 */
7963static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7964{
7965 sc_group_release_update(dd, source);
7966}
7967
7968/*
7969 * TX block SDMA interrupt. Source is < 48.
7970 *
7971 * SDMA interrupts are grouped by type:
7972 *
7973 * 0 - N-1 = SDma
7974 * N - 2N-1 = SDmaProgress
7975 * 2N - 3N-1 = SDmaIdle
7976 */
7977static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7978{
7979 /* what interrupt */
7980 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
7981 /* which engine */
7982 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7983
7984#ifdef CONFIG_SDMA_VERBOSITY
7985 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7986 slashstrip(__FILE__), __LINE__, __func__);
7987 sdma_dumpstate(&dd->per_sdma[which]);
7988#endif
7989
7990 if (likely(what < 3 && which < dd->num_sdma)) {
7991 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7992 } else {
7993 /* should not happen */
7994 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7995 }
7996}
7997
7998/*
7999 * RX block receive available interrupt. Source is < 160.
8000 */
8001static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8002{
8003 struct hfi1_ctxtdata *rcd;
8004 char *err_detail;
8005
8006 if (likely(source < dd->num_rcv_contexts)) {
8007 rcd = dd->rcd[source];
8008 if (rcd) {
8009 if (source < dd->first_user_ctxt)
f4f30031 8010 rcd->do_interrupt(rcd, 0);
77241056
MM
8011 else
8012 handle_user_interrupt(rcd);
8013 return; /* OK */
8014 }
8015 /* received an interrupt, but no rcd */
8016 err_detail = "dataless";
8017 } else {
8018 /* received an interrupt, but are not using that context */
8019 err_detail = "out of range";
8020 }
8021 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
17fb4f29 8022 err_detail, source);
77241056
MM
8023}
8024
8025/*
8026 * RX block receive urgent interrupt. Source is < 160.
8027 */
8028static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8029{
8030 struct hfi1_ctxtdata *rcd;
8031 char *err_detail;
8032
8033 if (likely(source < dd->num_rcv_contexts)) {
8034 rcd = dd->rcd[source];
8035 if (rcd) {
8036 /* only pay attention to user urgent interrupts */
8037 if (source >= dd->first_user_ctxt)
8038 handle_user_interrupt(rcd);
8039 return; /* OK */
8040 }
8041 /* received an interrupt, but no rcd */
8042 err_detail = "dataless";
8043 } else {
8044 /* received an interrupt, but are not using that context */
8045 err_detail = "out of range";
8046 }
8047 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
17fb4f29 8048 err_detail, source);
77241056
MM
8049}
8050
8051/*
8052 * Reserved range interrupt. Should not be called in normal operation.
8053 */
8054static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8055{
8056 char name[64];
8057
8058 dd_dev_err(dd, "unexpected %s interrupt\n",
17fb4f29 8059 is_reserved_name(name, sizeof(name), source));
77241056
MM
8060}
8061
8062static const struct is_table is_table[] = {
4d114fdd
JJ
8063/*
8064 * start end
8065 * name func interrupt func
8066 */
77241056
MM
8067{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
8068 is_misc_err_name, is_misc_err_int },
8069{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
8070 is_sdma_eng_err_name, is_sdma_eng_err_int },
8071{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8072 is_sendctxt_err_name, is_sendctxt_err_int },
8073{ IS_SDMA_START, IS_SDMA_END,
8074 is_sdma_eng_name, is_sdma_eng_int },
8075{ IS_VARIOUS_START, IS_VARIOUS_END,
8076 is_various_name, is_various_int },
8077{ IS_DC_START, IS_DC_END,
8078 is_dc_name, is_dc_int },
8079{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
8080 is_rcv_avail_name, is_rcv_avail_int },
8081{ IS_RCVURGENT_START, IS_RCVURGENT_END,
8082 is_rcv_urgent_name, is_rcv_urgent_int },
8083{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
8084 is_send_credit_name, is_send_credit_int},
8085{ IS_RESERVED_START, IS_RESERVED_END,
8086 is_reserved_name, is_reserved_int},
8087};
8088
8089/*
8090 * Interrupt source interrupt - called when the given source has an interrupt.
8091 * Source is a bit index into an array of 64-bit integers.
8092 */
8093static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8094{
8095 const struct is_table *entry;
8096
8097 /* avoids a double compare by walking the table in-order */
8098 for (entry = &is_table[0]; entry->is_name; entry++) {
8099 if (source < entry->end) {
8100 trace_hfi1_interrupt(dd, entry, source);
8101 entry->is_int(dd, source - entry->start);
8102 return;
8103 }
8104 }
8105 /* fell off the end */
8106 dd_dev_err(dd, "invalid interrupt source %u\n", source);
8107}
8108
8109/*
8110 * General interrupt handler. This is able to correctly handle
8111 * all interrupts in case INTx is used.
8112 */
8113static irqreturn_t general_interrupt(int irq, void *data)
8114{
8115 struct hfi1_devdata *dd = data;
8116 u64 regs[CCE_NUM_INT_CSRS];
8117 u32 bit;
8118 int i;
8119
8120 this_cpu_inc(*dd->int_counter);
8121
8122 /* phase 1: scan and clear all handled interrupts */
8123 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8124 if (dd->gi_mask[i] == 0) {
8125 regs[i] = 0; /* used later */
8126 continue;
8127 }
8128 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8129 dd->gi_mask[i];
8130 /* only clear if anything is set */
8131 if (regs[i])
8132 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8133 }
8134
8135 /* phase 2: call the appropriate handler */
8136 for_each_set_bit(bit, (unsigned long *)&regs[0],
17fb4f29 8137 CCE_NUM_INT_CSRS * 64) {
77241056
MM
8138 is_interrupt(dd, bit);
8139 }
8140
8141 return IRQ_HANDLED;
8142}
8143
8144static irqreturn_t sdma_interrupt(int irq, void *data)
8145{
8146 struct sdma_engine *sde = data;
8147 struct hfi1_devdata *dd = sde->dd;
8148 u64 status;
8149
8150#ifdef CONFIG_SDMA_VERBOSITY
8151 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8152 slashstrip(__FILE__), __LINE__, __func__);
8153 sdma_dumpstate(sde);
8154#endif
8155
8156 this_cpu_inc(*dd->int_counter);
8157
8158 /* This read_csr is really bad in the hot path */
8159 status = read_csr(dd,
17fb4f29
JJ
8160 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8161 & sde->imask;
77241056
MM
8162 if (likely(status)) {
8163 /* clear the interrupt(s) */
8164 write_csr(dd,
17fb4f29
JJ
8165 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8166 status);
77241056
MM
8167
8168 /* handle the interrupt(s) */
8169 sdma_engine_interrupt(sde, status);
8170 } else
8171 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
17fb4f29 8172 sde->this_idx);
77241056
MM
8173
8174 return IRQ_HANDLED;
8175}
8176
8177/*
ecd42f8d
DL
8178 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8179 * to insure that the write completed. This does NOT guarantee that
8180 * queued DMA writes to memory from the chip are pushed.
f4f30031
DL
8181 */
8182static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8183{
8184 struct hfi1_devdata *dd = rcd->dd;
8185 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8186
8187 mmiowb(); /* make sure everything before is written */
8188 write_csr(dd, addr, rcd->imask);
8189 /* force the above write on the chip and get a value back */
8190 (void)read_csr(dd, addr);
8191}
8192
8193/* force the receive interrupt */
fb9036dd 8194void force_recv_intr(struct hfi1_ctxtdata *rcd)
f4f30031
DL
8195{
8196 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8197}
8198
ecd42f8d
DL
8199/*
8200 * Return non-zero if a packet is present.
8201 *
8202 * This routine is called when rechecking for packets after the RcvAvail
8203 * interrupt has been cleared down. First, do a quick check of memory for
8204 * a packet present. If not found, use an expensive CSR read of the context
8205 * tail to determine the actual tail. The CSR read is necessary because there
8206 * is no method to push pending DMAs to memory other than an interrupt and we
8207 * are trying to determine if we need to force an interrupt.
8208 */
f4f30031
DL
8209static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8210{
ecd42f8d
DL
8211 u32 tail;
8212 int present;
8213
f4f30031 8214 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
ecd42f8d 8215 present = (rcd->seq_cnt ==
f4f30031 8216 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
ecd42f8d
DL
8217 else /* is RDMA rtail */
8218 present = (rcd->head != get_rcvhdrtail(rcd));
8219
8220 if (present)
8221 return 1;
f4f30031 8222
ecd42f8d
DL
8223 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8224 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8225 return rcd->head != tail;
f4f30031
DL
8226}
8227
8228/*
8229 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8230 * This routine will try to handle packets immediately (latency), but if
8231 * it finds too many, it will invoke the thread handler (bandwitdh). The
16733b88 8232 * chip receive interrupt is *not* cleared down until this or the thread (if
f4f30031
DL
8233 * invoked) is finished. The intent is to avoid extra interrupts while we
8234 * are processing packets anyway.
77241056
MM
8235 */
8236static irqreturn_t receive_context_interrupt(int irq, void *data)
8237{
8238 struct hfi1_ctxtdata *rcd = data;
8239 struct hfi1_devdata *dd = rcd->dd;
f4f30031
DL
8240 int disposition;
8241 int present;
77241056
MM
8242
8243 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8244 this_cpu_inc(*dd->int_counter);
affa48de 8245 aspm_ctx_disable(rcd);
77241056 8246
f4f30031
DL
8247 /* receive interrupt remains blocked while processing packets */
8248 disposition = rcd->do_interrupt(rcd, 0);
77241056 8249
f4f30031
DL
8250 /*
8251 * Too many packets were seen while processing packets in this
8252 * IRQ handler. Invoke the handler thread. The receive interrupt
8253 * remains blocked.
8254 */
8255 if (disposition == RCV_PKT_LIMIT)
8256 return IRQ_WAKE_THREAD;
8257
8258 /*
8259 * The packet processor detected no more packets. Clear the receive
8260 * interrupt and recheck for a packet packet that may have arrived
8261 * after the previous check and interrupt clear. If a packet arrived,
8262 * force another interrupt.
8263 */
8264 clear_recv_intr(rcd);
8265 present = check_packet_present(rcd);
8266 if (present)
8267 force_recv_intr(rcd);
8268
8269 return IRQ_HANDLED;
8270}
8271
8272/*
8273 * Receive packet thread handler. This expects to be invoked with the
8274 * receive interrupt still blocked.
8275 */
8276static irqreturn_t receive_context_thread(int irq, void *data)
8277{
8278 struct hfi1_ctxtdata *rcd = data;
8279 int present;
8280
8281 /* receive interrupt is still blocked from the IRQ handler */
8282 (void)rcd->do_interrupt(rcd, 1);
8283
8284 /*
8285 * The packet processor will only return if it detected no more
8286 * packets. Hold IRQs here so we can safely clear the interrupt and
8287 * recheck for a packet that may have arrived after the previous
8288 * check and the interrupt clear. If a packet arrived, force another
8289 * interrupt.
8290 */
8291 local_irq_disable();
8292 clear_recv_intr(rcd);
8293 present = check_packet_present(rcd);
8294 if (present)
8295 force_recv_intr(rcd);
8296 local_irq_enable();
77241056
MM
8297
8298 return IRQ_HANDLED;
8299}
8300
8301/* ========================================================================= */
8302
8303u32 read_physical_state(struct hfi1_devdata *dd)
8304{
8305 u64 reg;
8306
8307 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8308 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8309 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8310}
8311
fb9036dd 8312u32 read_logical_state(struct hfi1_devdata *dd)
77241056
MM
8313{
8314 u64 reg;
8315
8316 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8317 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8318 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8319}
8320
8321static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8322{
8323 u64 reg;
8324
8325 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8326 /* clear current state, set new state */
8327 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8328 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8329 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8330}
8331
8332/*
8333 * Use the 8051 to read a LCB CSR.
8334 */
8335static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8336{
8337 u32 regno;
8338 int ret;
8339
8340 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8341 if (acquire_lcb_access(dd, 0) == 0) {
8342 *data = read_csr(dd, addr);
8343 release_lcb_access(dd, 0);
8344 return 0;
8345 }
8346 return -EBUSY;
8347 }
8348
8349 /* register is an index of LCB registers: (offset - base) / 8 */
8350 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8351 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8352 if (ret != HCMD_SUCCESS)
8353 return -EBUSY;
8354 return 0;
8355}
8356
8357/*
8358 * Read an LCB CSR. Access may not be in host control, so check.
8359 * Return 0 on success, -EBUSY on failure.
8360 */
8361int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8362{
8363 struct hfi1_pportdata *ppd = dd->pport;
8364
8365 /* if up, go through the 8051 for the value */
8366 if (ppd->host_link_state & HLS_UP)
8367 return read_lcb_via_8051(dd, addr, data);
8368 /* if going up or down, no access */
8369 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8370 return -EBUSY;
8371 /* otherwise, host has access */
8372 *data = read_csr(dd, addr);
8373 return 0;
8374}
8375
8376/*
8377 * Use the 8051 to write a LCB CSR.
8378 */
8379static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8380{
3bf40d65
DL
8381 u32 regno;
8382 int ret;
77241056 8383
3bf40d65
DL
8384 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8385 (dd->dc8051_ver < dc8051_ver(0, 20))) {
8386 if (acquire_lcb_access(dd, 0) == 0) {
8387 write_csr(dd, addr, data);
8388 release_lcb_access(dd, 0);
8389 return 0;
8390 }
8391 return -EBUSY;
77241056 8392 }
3bf40d65
DL
8393
8394 /* register is an index of LCB registers: (offset - base) / 8 */
8395 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8396 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8397 if (ret != HCMD_SUCCESS)
8398 return -EBUSY;
8399 return 0;
77241056
MM
8400}
8401
8402/*
8403 * Write an LCB CSR. Access may not be in host control, so check.
8404 * Return 0 on success, -EBUSY on failure.
8405 */
8406int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8407{
8408 struct hfi1_pportdata *ppd = dd->pport;
8409
8410 /* if up, go through the 8051 for the value */
8411 if (ppd->host_link_state & HLS_UP)
8412 return write_lcb_via_8051(dd, addr, data);
8413 /* if going up or down, no access */
8414 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8415 return -EBUSY;
8416 /* otherwise, host has access */
8417 write_csr(dd, addr, data);
8418 return 0;
8419}
8420
8421/*
8422 * Returns:
8423 * < 0 = Linux error, not able to get access
8424 * > 0 = 8051 command RETURN_CODE
8425 */
8426static int do_8051_command(
8427 struct hfi1_devdata *dd,
8428 u32 type,
8429 u64 in_data,
8430 u64 *out_data)
8431{
8432 u64 reg, completed;
8433 int return_code;
8434 unsigned long flags;
8435 unsigned long timeout;
8436
8437 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8438
8439 /*
8440 * Alternative to holding the lock for a long time:
8441 * - keep busy wait - have other users bounce off
8442 */
8443 spin_lock_irqsave(&dd->dc8051_lock, flags);
8444
8445 /* We can't send any commands to the 8051 if it's in reset */
8446 if (dd->dc_shutdown) {
8447 return_code = -ENODEV;
8448 goto fail;
8449 }
8450
8451 /*
8452 * If an 8051 host command timed out previously, then the 8051 is
8453 * stuck.
8454 *
8455 * On first timeout, attempt to reset and restart the entire DC
8456 * block (including 8051). (Is this too big of a hammer?)
8457 *
8458 * If the 8051 times out a second time, the reset did not bring it
8459 * back to healthy life. In that case, fail any subsequent commands.
8460 */
8461 if (dd->dc8051_timed_out) {
8462 if (dd->dc8051_timed_out > 1) {
8463 dd_dev_err(dd,
8464 "Previous 8051 host command timed out, skipping command %u\n",
8465 type);
8466 return_code = -ENXIO;
8467 goto fail;
8468 }
8469 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8470 dc_shutdown(dd);
8471 dc_start(dd);
8472 spin_lock_irqsave(&dd->dc8051_lock, flags);
8473 }
8474
8475 /*
8476 * If there is no timeout, then the 8051 command interface is
8477 * waiting for a command.
8478 */
8479
3bf40d65
DL
8480 /*
8481 * When writing a LCB CSR, out_data contains the full value to
8482 * to be written, while in_data contains the relative LCB
8483 * address in 7:0. Do the work here, rather than the caller,
8484 * of distrubting the write data to where it needs to go:
8485 *
8486 * Write data
8487 * 39:00 -> in_data[47:8]
8488 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8489 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8490 */
8491 if (type == HCMD_WRITE_LCB_CSR) {
8492 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8493 reg = ((((*out_data) >> 40) & 0xff) <<
8494 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8495 | ((((*out_data) >> 48) & 0xffff) <<
8496 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8497 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8498 }
8499
77241056
MM
8500 /*
8501 * Do two writes: the first to stabilize the type and req_data, the
8502 * second to activate.
8503 */
8504 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8505 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8506 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8507 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8508 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8509 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8510 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8511
8512 /* wait for completion, alternate: interrupt */
8513 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8514 while (1) {
8515 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8516 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8517 if (completed)
8518 break;
8519 if (time_after(jiffies, timeout)) {
8520 dd->dc8051_timed_out++;
8521 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8522 if (out_data)
8523 *out_data = 0;
8524 return_code = -ETIMEDOUT;
8525 goto fail;
8526 }
8527 udelay(2);
8528 }
8529
8530 if (out_data) {
8531 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8532 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8533 if (type == HCMD_READ_LCB_CSR) {
8534 /* top 16 bits are in a different register */
8535 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8536 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8537 << (48
8538 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8539 }
8540 }
8541 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8542 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8543 dd->dc8051_timed_out = 0;
8544 /*
8545 * Clear command for next user.
8546 */
8547 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8548
8549fail:
8550 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8551
8552 return return_code;
8553}
8554
8555static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8556{
8557 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8558}
8559
8ebd4cf1
EH
8560int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8561 u8 lane_id, u32 config_data)
77241056
MM
8562{
8563 u64 data;
8564 int ret;
8565
8566 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8567 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8568 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8569 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8570 if (ret != HCMD_SUCCESS) {
8571 dd_dev_err(dd,
17fb4f29
JJ
8572 "load 8051 config: field id %d, lane %d, err %d\n",
8573 (int)field_id, (int)lane_id, ret);
77241056
MM
8574 }
8575 return ret;
8576}
8577
8578/*
8579 * Read the 8051 firmware "registers". Use the RAM directly. Always
8580 * set the result, even on error.
8581 * Return 0 on success, -errno on failure
8582 */
8ebd4cf1
EH
8583int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8584 u32 *result)
77241056
MM
8585{
8586 u64 big_data;
8587 u32 addr;
8588 int ret;
8589
8590 /* address start depends on the lane_id */
8591 if (lane_id < 4)
8592 addr = (4 * NUM_GENERAL_FIELDS)
8593 + (lane_id * 4 * NUM_LANE_FIELDS);
8594 else
8595 addr = 0;
8596 addr += field_id * 4;
8597
8598 /* read is in 8-byte chunks, hardware will truncate the address down */
8599 ret = read_8051_data(dd, addr, 8, &big_data);
8600
8601 if (ret == 0) {
8602 /* extract the 4 bytes we want */
8603 if (addr & 0x4)
8604 *result = (u32)(big_data >> 32);
8605 else
8606 *result = (u32)big_data;
8607 } else {
8608 *result = 0;
8609 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
17fb4f29 8610 __func__, lane_id, field_id);
77241056
MM
8611 }
8612
8613 return ret;
8614}
8615
8616static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8617 u8 continuous)
8618{
8619 u32 frame;
8620
8621 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8622 | power_management << POWER_MANAGEMENT_SHIFT;
8623 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8624 GENERAL_CONFIG, frame);
8625}
8626
8627static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8628 u16 vl15buf, u8 crc_sizes)
8629{
8630 u32 frame;
8631
8632 frame = (u32)vau << VAU_SHIFT
8633 | (u32)z << Z_SHIFT
8634 | (u32)vcu << VCU_SHIFT
8635 | (u32)vl15buf << VL15BUF_SHIFT
8636 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8637 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8638 GENERAL_CONFIG, frame);
8639}
8640
8641static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8642 u8 *flag_bits, u16 *link_widths)
8643{
8644 u32 frame;
8645
8646 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
17fb4f29 8647 &frame);
77241056
MM
8648 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8649 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8650 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8651}
8652
8653static int write_vc_local_link_width(struct hfi1_devdata *dd,
8654 u8 misc_bits,
8655 u8 flag_bits,
8656 u16 link_widths)
8657{
8658 u32 frame;
8659
8660 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8661 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8662 | (u32)link_widths << LINK_WIDTH_SHIFT;
8663 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8664 frame);
8665}
8666
8667static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8668 u8 device_rev)
8669{
8670 u32 frame;
8671
8672 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8673 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8674 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8675}
8676
8677static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8678 u8 *device_rev)
8679{
8680 u32 frame;
8681
8682 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8683 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8684 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8685 & REMOTE_DEVICE_REV_MASK;
8686}
8687
8688void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8689{
8690 u32 frame;
8691
8692 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8693 *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8694 *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8695}
8696
8697static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8698 u8 *continuous)
8699{
8700 u32 frame;
8701
8702 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8703 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8704 & POWER_MANAGEMENT_MASK;
8705 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8706 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8707}
8708
8709static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8710 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8711{
8712 u32 frame;
8713
8714 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8715 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8716 *z = (frame >> Z_SHIFT) & Z_MASK;
8717 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8718 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8719 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8720}
8721
8722static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8723 u8 *remote_tx_rate,
8724 u16 *link_widths)
8725{
8726 u32 frame;
8727
8728 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
17fb4f29 8729 &frame);
77241056
MM
8730 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8731 & REMOTE_TX_RATE_MASK;
8732 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8733}
8734
8735static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8736{
8737 u32 frame;
8738
8739 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8740 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8741}
8742
8743static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8744{
8745 u32 frame;
8746
8747 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8748 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8749}
8750
8751static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8752{
8753 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8754}
8755
8756static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8757{
8758 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8759}
8760
8761void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8762{
8763 u32 frame;
8764 int ret;
8765
8766 *link_quality = 0;
8767 if (dd->pport->host_link_state & HLS_UP) {
8768 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
17fb4f29 8769 &frame);
77241056
MM
8770 if (ret == 0)
8771 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8772 & LINK_QUALITY_MASK;
8773 }
8774}
8775
8776static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8777{
8778 u32 frame;
8779
8780 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8781 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8782}
8783
feb831dd
DL
8784static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
8785{
8786 u32 frame;
8787
8788 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
8789 *ldr = (frame & 0xff);
8790}
8791
77241056
MM
8792static int read_tx_settings(struct hfi1_devdata *dd,
8793 u8 *enable_lane_tx,
8794 u8 *tx_polarity_inversion,
8795 u8 *rx_polarity_inversion,
8796 u8 *max_rate)
8797{
8798 u32 frame;
8799 int ret;
8800
8801 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8802 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8803 & ENABLE_LANE_TX_MASK;
8804 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8805 & TX_POLARITY_INVERSION_MASK;
8806 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8807 & RX_POLARITY_INVERSION_MASK;
8808 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8809 return ret;
8810}
8811
8812static int write_tx_settings(struct hfi1_devdata *dd,
8813 u8 enable_lane_tx,
8814 u8 tx_polarity_inversion,
8815 u8 rx_polarity_inversion,
8816 u8 max_rate)
8817{
8818 u32 frame;
8819
8820 /* no need to mask, all variable sizes match field widths */
8821 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8822 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8823 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8824 | max_rate << MAX_RATE_SHIFT;
8825 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8826}
8827
8828static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
8829{
8830 u32 frame, version, prod_id;
8831 int ret, lane;
8832
8833 /* 4 lanes */
8834 for (lane = 0; lane < 4; lane++) {
8835 ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
8836 if (ret) {
17fb4f29
JJ
8837 dd_dev_err(dd,
8838 "Unable to read lane %d firmware details\n",
8839 lane);
77241056
MM
8840 continue;
8841 }
8842 version = (frame >> SPICO_ROM_VERSION_SHIFT)
8843 & SPICO_ROM_VERSION_MASK;
8844 prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
8845 & SPICO_ROM_PROD_ID_MASK;
8846 dd_dev_info(dd,
17fb4f29
JJ
8847 "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8848 lane, version, prod_id);
77241056
MM
8849 }
8850}
8851
8852/*
8853 * Read an idle LCB message.
8854 *
8855 * Returns 0 on success, -EINVAL on error
8856 */
8857static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8858{
8859 int ret;
8860
17fb4f29 8861 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
77241056
MM
8862 if (ret != HCMD_SUCCESS) {
8863 dd_dev_err(dd, "read idle message: type %d, err %d\n",
17fb4f29 8864 (u32)type, ret);
77241056
MM
8865 return -EINVAL;
8866 }
8867 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8868 /* return only the payload as we already know the type */
8869 *data_out >>= IDLE_PAYLOAD_SHIFT;
8870 return 0;
8871}
8872
8873/*
8874 * Read an idle SMA message. To be done in response to a notification from
8875 * the 8051.
8876 *
8877 * Returns 0 on success, -EINVAL on error
8878 */
8879static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8880{
17fb4f29
JJ
8881 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
8882 data);
77241056
MM
8883}
8884
8885/*
8886 * Send an idle LCB message.
8887 *
8888 * Returns 0 on success, -EINVAL on error
8889 */
8890static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8891{
8892 int ret;
8893
8894 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8895 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8896 if (ret != HCMD_SUCCESS) {
8897 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
17fb4f29 8898 data, ret);
77241056
MM
8899 return -EINVAL;
8900 }
8901 return 0;
8902}
8903
8904/*
8905 * Send an idle SMA message.
8906 *
8907 * Returns 0 on success, -EINVAL on error
8908 */
8909int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8910{
8911 u64 data;
8912
17fb4f29
JJ
8913 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
8914 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
77241056
MM
8915 return send_idle_message(dd, data);
8916}
8917
8918/*
8919 * Initialize the LCB then do a quick link up. This may or may not be
8920 * in loopback.
8921 *
8922 * return 0 on success, -errno on error
8923 */
8924static int do_quick_linkup(struct hfi1_devdata *dd)
8925{
8926 u64 reg;
8927 unsigned long timeout;
8928 int ret;
8929
8930 lcb_shutdown(dd, 0);
8931
8932 if (loopback) {
8933 /* LCB_CFG_LOOPBACK.VAL = 2 */
8934 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8935 write_csr(dd, DC_LCB_CFG_LOOPBACK,
17fb4f29 8936 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
77241056
MM
8937 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8938 }
8939
8940 /* start the LCBs */
8941 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8942 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8943
8944 /* simulator only loopback steps */
8945 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8946 /* LCB_CFG_RUN.EN = 1 */
8947 write_csr(dd, DC_LCB_CFG_RUN,
17fb4f29 8948 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
77241056
MM
8949
8950 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8951 timeout = jiffies + msecs_to_jiffies(10);
8952 while (1) {
17fb4f29 8953 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
77241056
MM
8954 if (reg)
8955 break;
8956 if (time_after(jiffies, timeout)) {
8957 dd_dev_err(dd,
17fb4f29 8958 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
77241056
MM
8959 return -ETIMEDOUT;
8960 }
8961 udelay(2);
8962 }
8963
8964 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
17fb4f29 8965 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
77241056
MM
8966 }
8967
8968 if (!loopback) {
8969 /*
8970 * When doing quick linkup and not in loopback, both
8971 * sides must be done with LCB set-up before either
8972 * starts the quick linkup. Put a delay here so that
8973 * both sides can be started and have a chance to be
8974 * done with LCB set up before resuming.
8975 */
8976 dd_dev_err(dd,
17fb4f29 8977 "Pausing for peer to be finished with LCB set up\n");
77241056 8978 msleep(5000);
17fb4f29 8979 dd_dev_err(dd, "Continuing with quick linkup\n");
77241056
MM
8980 }
8981
8982 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8983 set_8051_lcb_access(dd);
8984
8985 /*
8986 * State "quick" LinkUp request sets the physical link state to
8987 * LinkUp without a verify capability sequence.
8988 * This state is in simulator v37 and later.
8989 */
8990 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8991 if (ret != HCMD_SUCCESS) {
8992 dd_dev_err(dd,
17fb4f29
JJ
8993 "%s: set physical link state to quick LinkUp failed with return %d\n",
8994 __func__, ret);
77241056
MM
8995
8996 set_host_lcb_access(dd);
8997 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8998
8999 if (ret >= 0)
9000 ret = -EINVAL;
9001 return ret;
9002 }
9003
9004 return 0; /* success */
9005}
9006
9007/*
9008 * Set the SerDes to internal loopback mode.
9009 * Returns 0 on success, -errno on error.
9010 */
9011static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
9012{
9013 int ret;
9014
9015 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
9016 if (ret == HCMD_SUCCESS)
9017 return 0;
9018 dd_dev_err(dd,
17fb4f29
JJ
9019 "Set physical link state to SerDes Loopback failed with return %d\n",
9020 ret);
77241056
MM
9021 if (ret >= 0)
9022 ret = -EINVAL;
9023 return ret;
9024}
9025
9026/*
9027 * Do all special steps to set up loopback.
9028 */
9029static int init_loopback(struct hfi1_devdata *dd)
9030{
9031 dd_dev_info(dd, "Entering loopback mode\n");
9032
9033 /* all loopbacks should disable self GUID check */
9034 write_csr(dd, DC_DC8051_CFG_MODE,
17fb4f29 9035 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
77241056
MM
9036
9037 /*
9038 * The simulator has only one loopback option - LCB. Switch
9039 * to that option, which includes quick link up.
9040 *
9041 * Accept all valid loopback values.
9042 */
d0d236ea
JJ
9043 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9044 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9045 loopback == LOOPBACK_CABLE)) {
77241056
MM
9046 loopback = LOOPBACK_LCB;
9047 quick_linkup = 1;
9048 return 0;
9049 }
9050
9051 /* handle serdes loopback */
9052 if (loopback == LOOPBACK_SERDES) {
9053 /* internal serdes loopack needs quick linkup on RTL */
9054 if (dd->icode == ICODE_RTL_SILICON)
9055 quick_linkup = 1;
9056 return set_serdes_loopback_mode(dd);
9057 }
9058
9059 /* LCB loopback - handled at poll time */
9060 if (loopback == LOOPBACK_LCB) {
9061 quick_linkup = 1; /* LCB is always quick linkup */
9062
9063 /* not supported in emulation due to emulation RTL changes */
9064 if (dd->icode == ICODE_FPGA_EMULATION) {
9065 dd_dev_err(dd,
17fb4f29 9066 "LCB loopback not supported in emulation\n");
77241056
MM
9067 return -EINVAL;
9068 }
9069 return 0;
9070 }
9071
9072 /* external cable loopback requires no extra steps */
9073 if (loopback == LOOPBACK_CABLE)
9074 return 0;
9075
9076 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9077 return -EINVAL;
9078}
9079
9080/*
9081 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9082 * used in the Verify Capability link width attribute.
9083 */
9084static u16 opa_to_vc_link_widths(u16 opa_widths)
9085{
9086 int i;
9087 u16 result = 0;
9088
9089 static const struct link_bits {
9090 u16 from;
9091 u16 to;
9092 } opa_link_xlate[] = {
8638b77f
JJ
9093 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
9094 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
9095 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
9096 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
77241056
MM
9097 };
9098
9099 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9100 if (opa_widths & opa_link_xlate[i].from)
9101 result |= opa_link_xlate[i].to;
9102 }
9103 return result;
9104}
9105
9106/*
9107 * Set link attributes before moving to polling.
9108 */
9109static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9110{
9111 struct hfi1_devdata *dd = ppd->dd;
9112 u8 enable_lane_tx;
9113 u8 tx_polarity_inversion;
9114 u8 rx_polarity_inversion;
9115 int ret;
9116
9117 /* reset our fabric serdes to clear any lingering problems */
9118 fabric_serdes_reset(dd);
9119
9120 /* set the local tx rate - need to read-modify-write */
9121 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
17fb4f29 9122 &rx_polarity_inversion, &ppd->local_tx_rate);
77241056
MM
9123 if (ret)
9124 goto set_local_link_attributes_fail;
9125
9126 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
9127 /* set the tx rate to the fastest enabled */
9128 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9129 ppd->local_tx_rate = 1;
9130 else
9131 ppd->local_tx_rate = 0;
9132 } else {
9133 /* set the tx rate to all enabled */
9134 ppd->local_tx_rate = 0;
9135 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9136 ppd->local_tx_rate |= 2;
9137 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9138 ppd->local_tx_rate |= 1;
9139 }
febffe2c
EH
9140
9141 enable_lane_tx = 0xF; /* enable all four lanes */
77241056 9142 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
17fb4f29 9143 rx_polarity_inversion, ppd->local_tx_rate);
77241056
MM
9144 if (ret != HCMD_SUCCESS)
9145 goto set_local_link_attributes_fail;
9146
9147 /*
9148 * DC supports continuous updates.
9149 */
17fb4f29
JJ
9150 ret = write_vc_local_phy(dd,
9151 0 /* no power management */,
9152 1 /* continuous updates */);
77241056
MM
9153 if (ret != HCMD_SUCCESS)
9154 goto set_local_link_attributes_fail;
9155
9156 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9157 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9158 ppd->port_crc_mode_enabled);
9159 if (ret != HCMD_SUCCESS)
9160 goto set_local_link_attributes_fail;
9161
9162 ret = write_vc_local_link_width(dd, 0, 0,
17fb4f29
JJ
9163 opa_to_vc_link_widths(
9164 ppd->link_width_enabled));
77241056
MM
9165 if (ret != HCMD_SUCCESS)
9166 goto set_local_link_attributes_fail;
9167
9168 /* let peer know who we are */
9169 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9170 if (ret == HCMD_SUCCESS)
9171 return 0;
9172
9173set_local_link_attributes_fail:
9174 dd_dev_err(dd,
17fb4f29
JJ
9175 "Failed to set local link attributes, return 0x%x\n",
9176 ret);
77241056
MM
9177 return ret;
9178}
9179
9180/*
623bba2d
EH
9181 * Call this to start the link.
9182 * Do not do anything if the link is disabled.
9183 * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
77241056
MM
9184 */
9185int start_link(struct hfi1_pportdata *ppd)
9186{
9187 if (!ppd->link_enabled) {
9188 dd_dev_info(ppd->dd,
17fb4f29
JJ
9189 "%s: stopping link start because link is disabled\n",
9190 __func__);
77241056
MM
9191 return 0;
9192 }
9193 if (!ppd->driver_link_ready) {
9194 dd_dev_info(ppd->dd,
17fb4f29
JJ
9195 "%s: stopping link start because driver is not ready\n",
9196 __func__);
77241056
MM
9197 return 0;
9198 }
9199
3ec5fa28
SS
9200 /*
9201 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9202 * pkey table can be configured properly if the HFI unit is connected
9203 * to switch port with MgmtAllowed=NO
9204 */
9205 clear_full_mgmt_pkey(ppd);
9206
623bba2d 9207 return set_link_state(ppd, HLS_DN_POLL);
77241056
MM
9208}
9209
8ebd4cf1
EH
9210static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9211{
9212 struct hfi1_devdata *dd = ppd->dd;
9213 u64 mask;
9214 unsigned long timeout;
9215
9216 /*
9217 * Check for QSFP interrupt for t_init (SFF 8679)
9218 */
9219 timeout = jiffies + msecs_to_jiffies(2000);
9220 while (1) {
9221 mask = read_csr(dd, dd->hfi1_id ?
9222 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9223 if (!(mask & QSFP_HFI0_INT_N)) {
9224 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR :
9225 ASIC_QSFP1_CLEAR, QSFP_HFI0_INT_N);
9226 break;
9227 }
9228 if (time_after(jiffies, timeout)) {
9229 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9230 __func__);
9231 break;
9232 }
9233 udelay(2);
9234 }
9235}
9236
9237static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9238{
9239 struct hfi1_devdata *dd = ppd->dd;
9240 u64 mask;
9241
9242 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9243 if (enable)
9244 mask |= (u64)QSFP_HFI0_INT_N;
9245 else
9246 mask &= ~(u64)QSFP_HFI0_INT_N;
9247 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9248}
9249
9250void reset_qsfp(struct hfi1_pportdata *ppd)
77241056
MM
9251{
9252 struct hfi1_devdata *dd = ppd->dd;
9253 u64 mask, qsfp_mask;
9254
8ebd4cf1
EH
9255 /* Disable INT_N from triggering QSFP interrupts */
9256 set_qsfp_int_n(ppd, 0);
9257
9258 /* Reset the QSFP */
77241056 9259 mask = (u64)QSFP_HFI0_RESET_N;
77241056
MM
9260
9261 qsfp_mask = read_csr(dd,
17fb4f29 9262 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
77241056
MM
9263 qsfp_mask &= ~mask;
9264 write_csr(dd,
17fb4f29 9265 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
77241056
MM
9266
9267 udelay(10);
9268
9269 qsfp_mask |= mask;
9270 write_csr(dd,
17fb4f29 9271 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
8ebd4cf1
EH
9272
9273 wait_for_qsfp_init(ppd);
9274
9275 /*
9276 * Allow INT_N to trigger the QSFP interrupt to watch
9277 * for alarms and warnings
9278 */
9279 set_qsfp_int_n(ppd, 1);
77241056
MM
9280}
9281
9282static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9283 u8 *qsfp_interrupt_status)
9284{
9285 struct hfi1_devdata *dd = ppd->dd;
9286
9287 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
17fb4f29
JJ
9288 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9289 dd_dev_info(dd, "%s: QSFP cable on fire\n",
9290 __func__);
77241056
MM
9291
9292 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
17fb4f29
JJ
9293 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9294 dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
9295 __func__);
77241056 9296
0c7f77af
EH
9297 /*
9298 * The remaining alarms/warnings don't matter if the link is down.
9299 */
9300 if (ppd->host_link_state & HLS_DOWN)
9301 return 0;
9302
77241056 9303 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
17fb4f29
JJ
9304 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9305 dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
9306 __func__);
77241056
MM
9307
9308 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
17fb4f29
JJ
9309 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9310 dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
9311 __func__);
77241056
MM
9312
9313 /* Byte 2 is vendor specific */
9314
9315 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
17fb4f29
JJ
9316 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9317 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
9318 __func__);
77241056
MM
9319
9320 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
17fb4f29
JJ
9321 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9322 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
9323 __func__);
77241056
MM
9324
9325 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
17fb4f29
JJ
9326 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9327 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
9328 __func__);
77241056
MM
9329
9330 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
17fb4f29
JJ
9331 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9332 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
9333 __func__);
77241056
MM
9334
9335 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
17fb4f29
JJ
9336 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9337 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
9338 __func__);
77241056
MM
9339
9340 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
17fb4f29
JJ
9341 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9342 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
9343 __func__);
77241056
MM
9344
9345 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
17fb4f29
JJ
9346 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9347 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
9348 __func__);
77241056
MM
9349
9350 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
17fb4f29
JJ
9351 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9352 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
9353 __func__);
77241056
MM
9354
9355 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
17fb4f29
JJ
9356 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9357 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
9358 __func__);
77241056
MM
9359
9360 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
17fb4f29
JJ
9361 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9362 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
9363 __func__);
77241056
MM
9364
9365 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
17fb4f29
JJ
9366 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9367 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
9368 __func__);
77241056
MM
9369
9370 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
17fb4f29
JJ
9371 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9372 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
9373 __func__);
77241056
MM
9374
9375 /* Bytes 9-10 and 11-12 are reserved */
9376 /* Bytes 13-15 are vendor specific */
9377
9378 return 0;
9379}
9380
623bba2d 9381/* This routine will only be scheduled if the QSFP module present is asserted */
8ebd4cf1 9382void qsfp_event(struct work_struct *work)
77241056
MM
9383{
9384 struct qsfp_data *qd;
9385 struct hfi1_pportdata *ppd;
9386 struct hfi1_devdata *dd;
9387
9388 qd = container_of(work, struct qsfp_data, qsfp_work);
9389 ppd = qd->ppd;
9390 dd = ppd->dd;
9391
9392 /* Sanity check */
9393 if (!qsfp_mod_present(ppd))
9394 return;
9395
9396 /*
0c7f77af
EH
9397 * Turn DC back on after cable has been re-inserted. Up until
9398 * now, the DC has been in reset to save power.
77241056
MM
9399 */
9400 dc_start(dd);
9401
9402 if (qd->cache_refresh_required) {
8ebd4cf1 9403 set_qsfp_int_n(ppd, 0);
77241056 9404
8ebd4cf1
EH
9405 wait_for_qsfp_init(ppd);
9406
9407 /*
9408 * Allow INT_N to trigger the QSFP interrupt to watch
9409 * for alarms and warnings
77241056 9410 */
8ebd4cf1
EH
9411 set_qsfp_int_n(ppd, 1);
9412
9413 tune_serdes(ppd);
9414
9415 start_link(ppd);
77241056
MM
9416 }
9417
9418 if (qd->check_interrupt_flags) {
9419 u8 qsfp_interrupt_status[16] = {0,};
9420
765a6fac
DL
9421 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9422 &qsfp_interrupt_status[0], 16) != 16) {
77241056 9423 dd_dev_info(dd,
17fb4f29
JJ
9424 "%s: Failed to read status of QSFP module\n",
9425 __func__);
77241056
MM
9426 } else {
9427 unsigned long flags;
77241056 9428
8ebd4cf1
EH
9429 handle_qsfp_error_conditions(
9430 ppd, qsfp_interrupt_status);
77241056
MM
9431 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9432 ppd->qsfp_info.check_interrupt_flags = 0;
9433 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
17fb4f29 9434 flags);
77241056
MM
9435 }
9436 }
9437}
9438
8ebd4cf1 9439static void init_qsfp_int(struct hfi1_devdata *dd)
77241056 9440{
8ebd4cf1
EH
9441 struct hfi1_pportdata *ppd = dd->pport;
9442 u64 qsfp_mask, cce_int_mask;
9443 const int qsfp1_int_smask = QSFP1_INT % 64;
9444 const int qsfp2_int_smask = QSFP2_INT % 64;
77241056 9445
8ebd4cf1
EH
9446 /*
9447 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9448 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9449 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9450 * the index of the appropriate CSR in the CCEIntMask CSR array
9451 */
9452 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9453 (8 * (QSFP1_INT / 64)));
9454 if (dd->hfi1_id) {
9455 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9456 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9457 cce_int_mask);
9458 } else {
9459 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9460 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9461 cce_int_mask);
77241056
MM
9462 }
9463
77241056
MM
9464 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9465 /* Clear current status to avoid spurious interrupts */
8ebd4cf1
EH
9466 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9467 qsfp_mask);
9468 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9469 qsfp_mask);
9470
9471 set_qsfp_int_n(ppd, 0);
77241056
MM
9472
9473 /* Handle active low nature of INT_N and MODPRST_N pins */
9474 if (qsfp_mod_present(ppd))
9475 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9476 write_csr(dd,
9477 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9478 qsfp_mask);
77241056
MM
9479}
9480
bbdeb33d
DL
9481/*
9482 * Do a one-time initialize of the LCB block.
9483 */
9484static void init_lcb(struct hfi1_devdata *dd)
9485{
a59329d5
DL
9486 /* simulator does not correctly handle LCB cclk loopback, skip */
9487 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9488 return;
9489
bbdeb33d
DL
9490 /* the DC has been reset earlier in the driver load */
9491
9492 /* set LCB for cclk loopback on the port */
9493 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9494 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9495 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9496 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9497 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9498 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9499 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9500}
9501
77241056
MM
9502int bringup_serdes(struct hfi1_pportdata *ppd)
9503{
9504 struct hfi1_devdata *dd = ppd->dd;
9505 u64 guid;
9506 int ret;
9507
9508 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9509 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9510
9511 guid = ppd->guid;
9512 if (!guid) {
9513 if (dd->base_guid)
9514 guid = dd->base_guid + ppd->port - 1;
9515 ppd->guid = guid;
9516 }
9517
77241056
MM
9518 /* Set linkinit_reason on power up per OPA spec */
9519 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9520
bbdeb33d
DL
9521 /* one-time init of the LCB */
9522 init_lcb(dd);
9523
77241056
MM
9524 if (loopback) {
9525 ret = init_loopback(dd);
9526 if (ret < 0)
9527 return ret;
9528 }
9529
9775a991
EH
9530 get_port_type(ppd);
9531 if (ppd->port_type == PORT_TYPE_QSFP) {
9532 set_qsfp_int_n(ppd, 0);
9533 wait_for_qsfp_init(ppd);
9534 set_qsfp_int_n(ppd, 1);
9535 }
9536
9537 /*
9538 * Tune the SerDes to a ballpark setting for
8ebd4cf1
EH
9539 * optimal signal and bit error rate
9540 * Needs to be done before starting the link
9541 */
9542 tune_serdes(ppd);
9543
77241056
MM
9544 return start_link(ppd);
9545}
9546
9547void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9548{
9549 struct hfi1_devdata *dd = ppd->dd;
9550
9551 /*
9552 * Shut down the link and keep it down. First turn off that the
9553 * driver wants to allow the link to be up (driver_link_ready).
9554 * Then make sure the link is not automatically restarted
9555 * (link_enabled). Cancel any pending restart. And finally
9556 * go offline.
9557 */
9558 ppd->driver_link_ready = 0;
9559 ppd->link_enabled = 0;
9560
8ebd4cf1
EH
9561 ppd->offline_disabled_reason =
9562 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
77241056 9563 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
17fb4f29 9564 OPA_LINKDOWN_REASON_SMA_DISABLED);
77241056
MM
9565 set_link_state(ppd, HLS_DN_OFFLINE);
9566
9567 /* disable the port */
9568 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9569}
9570
9571static inline int init_cpu_counters(struct hfi1_devdata *dd)
9572{
9573 struct hfi1_pportdata *ppd;
9574 int i;
9575
9576 ppd = (struct hfi1_pportdata *)(dd + 1);
9577 for (i = 0; i < dd->num_pports; i++, ppd++) {
4eb06882
DD
9578 ppd->ibport_data.rvp.rc_acks = NULL;
9579 ppd->ibport_data.rvp.rc_qacks = NULL;
9580 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9581 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9582 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9583 if (!ppd->ibport_data.rvp.rc_acks ||
9584 !ppd->ibport_data.rvp.rc_delayed_comp ||
9585 !ppd->ibport_data.rvp.rc_qacks)
77241056
MM
9586 return -ENOMEM;
9587 }
9588
9589 return 0;
9590}
9591
9592static const char * const pt_names[] = {
9593 "expected",
9594 "eager",
9595 "invalid"
9596};
9597
9598static const char *pt_name(u32 type)
9599{
9600 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9601}
9602
9603/*
9604 * index is the index into the receive array
9605 */
9606void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9607 u32 type, unsigned long pa, u16 order)
9608{
9609 u64 reg;
9610 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9611 (dd->kregbase + RCV_ARRAY));
9612
9613 if (!(dd->flags & HFI1_PRESENT))
9614 goto done;
9615
9616 if (type == PT_INVALID) {
9617 pa = 0;
9618 } else if (type > PT_INVALID) {
9619 dd_dev_err(dd,
17fb4f29
JJ
9620 "unexpected receive array type %u for index %u, not handled\n",
9621 type, index);
77241056
MM
9622 goto done;
9623 }
9624
9625 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9626 pt_name(type), index, pa, (unsigned long)order);
9627
9628#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9629 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9630 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9631 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9632 << RCV_ARRAY_RT_ADDR_SHIFT;
9633 writeq(reg, base + (index * 8));
9634
9635 if (type == PT_EAGER)
9636 /*
9637 * Eager entries are written one-by-one so we have to push them
9638 * after we write the entry.
9639 */
9640 flush_wc();
9641done:
9642 return;
9643}
9644
9645void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9646{
9647 struct hfi1_devdata *dd = rcd->dd;
9648 u32 i;
9649
9650 /* this could be optimized */
9651 for (i = rcd->eager_base; i < rcd->eager_base +
9652 rcd->egrbufs.alloced; i++)
9653 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9654
9655 for (i = rcd->expected_base;
9656 i < rcd->expected_base + rcd->expected_count; i++)
9657 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9658}
9659
9660int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
9661 struct hfi1_ctxt_info *kinfo)
9662{
9663 kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
9664 HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
9665 return 0;
9666}
9667
9668struct hfi1_message_header *hfi1_get_msgheader(
9669 struct hfi1_devdata *dd, __le32 *rhf_addr)
9670{
9671 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9672
9673 return (struct hfi1_message_header *)
9674 (rhf_addr - dd->rhf_offset + offset);
9675}
9676
9677static const char * const ib_cfg_name_strings[] = {
9678 "HFI1_IB_CFG_LIDLMC",
9679 "HFI1_IB_CFG_LWID_DG_ENB",
9680 "HFI1_IB_CFG_LWID_ENB",
9681 "HFI1_IB_CFG_LWID",
9682 "HFI1_IB_CFG_SPD_ENB",
9683 "HFI1_IB_CFG_SPD",
9684 "HFI1_IB_CFG_RXPOL_ENB",
9685 "HFI1_IB_CFG_LREV_ENB",
9686 "HFI1_IB_CFG_LINKLATENCY",
9687 "HFI1_IB_CFG_HRTBT",
9688 "HFI1_IB_CFG_OP_VLS",
9689 "HFI1_IB_CFG_VL_HIGH_CAP",
9690 "HFI1_IB_CFG_VL_LOW_CAP",
9691 "HFI1_IB_CFG_OVERRUN_THRESH",
9692 "HFI1_IB_CFG_PHYERR_THRESH",
9693 "HFI1_IB_CFG_LINKDEFAULT",
9694 "HFI1_IB_CFG_PKEYS",
9695 "HFI1_IB_CFG_MTU",
9696 "HFI1_IB_CFG_LSTATE",
9697 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9698 "HFI1_IB_CFG_PMA_TICKS",
9699 "HFI1_IB_CFG_PORT"
9700};
9701
9702static const char *ib_cfg_name(int which)
9703{
9704 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9705 return "invalid";
9706 return ib_cfg_name_strings[which];
9707}
9708
9709int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9710{
9711 struct hfi1_devdata *dd = ppd->dd;
9712 int val = 0;
9713
9714 switch (which) {
9715 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9716 val = ppd->link_width_enabled;
9717 break;
9718 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9719 val = ppd->link_width_active;
9720 break;
9721 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9722 val = ppd->link_speed_enabled;
9723 break;
9724 case HFI1_IB_CFG_SPD: /* current Link speed */
9725 val = ppd->link_speed_active;
9726 break;
9727
9728 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9729 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9730 case HFI1_IB_CFG_LINKLATENCY:
9731 goto unimplemented;
9732
9733 case HFI1_IB_CFG_OP_VLS:
9734 val = ppd->vls_operational;
9735 break;
9736 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9737 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9738 break;
9739 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9740 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9741 break;
9742 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9743 val = ppd->overrun_threshold;
9744 break;
9745 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9746 val = ppd->phy_error_threshold;
9747 break;
9748 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9749 val = dd->link_default;
9750 break;
9751
9752 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9753 case HFI1_IB_CFG_PMA_TICKS:
9754 default:
9755unimplemented:
9756 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9757 dd_dev_info(
9758 dd,
9759 "%s: which %s: not implemented\n",
9760 __func__,
9761 ib_cfg_name(which));
9762 break;
9763 }
9764
9765 return val;
9766}
9767
9768/*
9769 * The largest MAD packet size.
9770 */
9771#define MAX_MAD_PACKET 2048
9772
9773/*
9774 * Return the maximum header bytes that can go on the _wire_
9775 * for this device. This count includes the ICRC which is
9776 * not part of the packet held in memory but it is appended
9777 * by the HW.
9778 * This is dependent on the device's receive header entry size.
9779 * HFI allows this to be set per-receive context, but the
9780 * driver presently enforces a global value.
9781 */
9782u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9783{
9784 /*
9785 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9786 * the Receive Header Entry Size minus the PBC (or RHF) size
9787 * plus one DW for the ICRC appended by HW.
9788 *
9789 * dd->rcd[0].rcvhdrqentsize is in DW.
9790 * We use rcd[0] as all context will have the same value. Also,
9791 * the first kernel context would have been allocated by now so
9792 * we are guaranteed a valid value.
9793 */
9794 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9795}
9796
9797/*
9798 * Set Send Length
9799 * @ppd - per port data
9800 *
9801 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9802 * registers compare against LRH.PktLen, so use the max bytes included
9803 * in the LRH.
9804 *
9805 * This routine changes all VL values except VL15, which it maintains at
9806 * the same value.
9807 */
9808static void set_send_length(struct hfi1_pportdata *ppd)
9809{
9810 struct hfi1_devdata *dd = ppd->dd;
6cc6ad2e
HC
9811 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9812 u32 maxvlmtu = dd->vld[15].mtu;
77241056
MM
9813 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9814 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9815 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
b4ba6633 9816 int i, j;
44306f15 9817 u32 thres;
77241056
MM
9818
9819 for (i = 0; i < ppd->vls_supported; i++) {
9820 if (dd->vld[i].mtu > maxvlmtu)
9821 maxvlmtu = dd->vld[i].mtu;
9822 if (i <= 3)
9823 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9824 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9825 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9826 else
9827 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9828 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9829 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9830 }
9831 write_csr(dd, SEND_LEN_CHECK0, len1);
9832 write_csr(dd, SEND_LEN_CHECK1, len2);
9833 /* adjust kernel credit return thresholds based on new MTUs */
9834 /* all kernel receive contexts have the same hdrqentsize */
9835 for (i = 0; i < ppd->vls_supported; i++) {
44306f15
JX
9836 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
9837 sc_mtu_to_threshold(dd->vld[i].sc,
9838 dd->vld[i].mtu,
17fb4f29 9839 dd->rcd[0]->rcvhdrqentsize));
b4ba6633
JJ
9840 for (j = 0; j < INIT_SC_PER_VL; j++)
9841 sc_set_cr_threshold(
9842 pio_select_send_context_vl(dd, j, i),
9843 thres);
44306f15
JX
9844 }
9845 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
9846 sc_mtu_to_threshold(dd->vld[15].sc,
9847 dd->vld[15].mtu,
9848 dd->rcd[0]->rcvhdrqentsize));
9849 sc_set_cr_threshold(dd->vld[15].sc, thres);
77241056
MM
9850
9851 /* Adjust maximum MTU for the port in DC */
9852 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9853 (ilog2(maxvlmtu >> 8) + 1);
9854 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9855 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9856 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9857 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9858 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9859}
9860
9861static void set_lidlmc(struct hfi1_pportdata *ppd)
9862{
9863 int i;
9864 u64 sreg = 0;
9865 struct hfi1_devdata *dd = ppd->dd;
9866 u32 mask = ~((1U << ppd->lmc) - 1);
9867 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9868
9869 if (dd->hfi1_snoop.mode_flag)
9870 dd_dev_info(dd, "Set lid/lmc while snooping");
9871
9872 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9873 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9874 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
8638b77f 9875 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
77241056
MM
9876 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9877 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9878 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9879
9880 /*
9881 * Iterate over all the send contexts and set their SLID check
9882 */
9883 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9884 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9885 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9886 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9887
9888 for (i = 0; i < dd->chip_send_contexts; i++) {
9889 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9890 i, (u32)sreg);
9891 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9892 }
9893
9894 /* Now we have to do the same thing for the sdma engines */
9895 sdma_update_lmc(dd, mask, ppd->lid);
9896}
9897
9898static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9899{
9900 unsigned long timeout;
9901 u32 curr_state;
9902
9903 timeout = jiffies + msecs_to_jiffies(msecs);
9904 while (1) {
9905 curr_state = read_physical_state(dd);
9906 if (curr_state == state)
9907 break;
9908 if (time_after(jiffies, timeout)) {
9909 dd_dev_err(dd,
17fb4f29
JJ
9910 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9911 state, curr_state);
77241056
MM
9912 return -ETIMEDOUT;
9913 }
9914 usleep_range(1950, 2050); /* sleep 2ms-ish */
9915 }
9916
9917 return 0;
9918}
9919
9920/*
9921 * Helper for set_link_state(). Do not call except from that routine.
9922 * Expects ppd->hls_mutex to be held.
9923 *
9924 * @rem_reason value to be sent to the neighbor
9925 *
9926 * LinkDownReasons only set if transition succeeds.
9927 */
9928static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
9929{
9930 struct hfi1_devdata *dd = ppd->dd;
9931 u32 pstate, previous_state;
9932 u32 last_local_state;
9933 u32 last_remote_state;
9934 int ret;
9935 int do_transition;
9936 int do_wait;
9937
9938 previous_state = ppd->host_link_state;
9939 ppd->host_link_state = HLS_GOING_OFFLINE;
9940 pstate = read_physical_state(dd);
9941 if (pstate == PLS_OFFLINE) {
9942 do_transition = 0; /* in right state */
9943 do_wait = 0; /* ...no need to wait */
9944 } else if ((pstate & 0xff) == PLS_OFFLINE) {
9945 do_transition = 0; /* in an offline transient state */
9946 do_wait = 1; /* ...wait for it to settle */
9947 } else {
9948 do_transition = 1; /* need to move to offline */
9949 do_wait = 1; /* ...will need to wait */
9950 }
9951
9952 if (do_transition) {
9953 ret = set_physical_link_state(dd,
bf640096 9954 (rem_reason << 8) | PLS_OFFLINE);
77241056
MM
9955
9956 if (ret != HCMD_SUCCESS) {
9957 dd_dev_err(dd,
17fb4f29
JJ
9958 "Failed to transition to Offline link state, return %d\n",
9959 ret);
77241056
MM
9960 return -EINVAL;
9961 }
a9c05e35
BM
9962 if (ppd->offline_disabled_reason ==
9963 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
77241056 9964 ppd->offline_disabled_reason =
a9c05e35 9965 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
77241056
MM
9966 }
9967
9968 if (do_wait) {
9969 /* it can take a while for the link to go down */
dc060245 9970 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
77241056
MM
9971 if (ret < 0)
9972 return ret;
9973 }
9974
9975 /* make sure the logical state is also down */
9976 wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
9977
9978 /*
9979 * Now in charge of LCB - must be after the physical state is
9980 * offline.quiet and before host_link_state is changed.
9981 */
9982 set_host_lcb_access(dd);
9983 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9984 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
9985
8ebd4cf1
EH
9986 if (ppd->port_type == PORT_TYPE_QSFP &&
9987 ppd->qsfp_info.limiting_active &&
9988 qsfp_mod_present(ppd)) {
765a6fac
DL
9989 int ret;
9990
9991 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
9992 if (ret == 0) {
9993 set_qsfp_tx(ppd, 0);
9994 release_chip_resource(dd, qsfp_resource(dd));
9995 } else {
9996 /* not fatal, but should warn */
9997 dd_dev_err(dd,
9998 "Unable to acquire lock to turn off QSFP TX\n");
9999 }
8ebd4cf1
EH
10000 }
10001
77241056
MM
10002 /*
10003 * The LNI has a mandatory wait time after the physical state
10004 * moves to Offline.Quiet. The wait time may be different
10005 * depending on how the link went down. The 8051 firmware
10006 * will observe the needed wait time and only move to ready
10007 * when that is completed. The largest of the quiet timeouts
05087f3b
DL
10008 * is 6s, so wait that long and then at least 0.5s more for
10009 * other transitions, and another 0.5s for a buffer.
77241056 10010 */
05087f3b 10011 ret = wait_fm_ready(dd, 7000);
77241056
MM
10012 if (ret) {
10013 dd_dev_err(dd,
17fb4f29 10014 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
77241056
MM
10015 /* state is really offline, so make it so */
10016 ppd->host_link_state = HLS_DN_OFFLINE;
10017 return ret;
10018 }
10019
10020 /*
10021 * The state is now offline and the 8051 is ready to accept host
10022 * requests.
10023 * - change our state
10024 * - notify others if we were previously in a linkup state
10025 */
10026 ppd->host_link_state = HLS_DN_OFFLINE;
10027 if (previous_state & HLS_UP) {
10028 /* went down while link was up */
10029 handle_linkup_change(dd, 0);
10030 } else if (previous_state
10031 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10032 /* went down while attempting link up */
10033 /* byte 1 of last_*_state is the failure reason */
10034 read_last_local_state(dd, &last_local_state);
10035 read_last_remote_state(dd, &last_remote_state);
10036 dd_dev_err(dd,
17fb4f29
JJ
10037 "LNI failure last states: local 0x%08x, remote 0x%08x\n",
10038 last_local_state, last_remote_state);
77241056
MM
10039 }
10040
10041 /* the active link width (downgrade) is 0 on link down */
10042 ppd->link_width_active = 0;
10043 ppd->link_width_downgrade_tx_active = 0;
10044 ppd->link_width_downgrade_rx_active = 0;
10045 ppd->current_egress_rate = 0;
10046 return 0;
10047}
10048
10049/* return the link state name */
10050static const char *link_state_name(u32 state)
10051{
10052 const char *name;
10053 int n = ilog2(state);
10054 static const char * const names[] = {
10055 [__HLS_UP_INIT_BP] = "INIT",
10056 [__HLS_UP_ARMED_BP] = "ARMED",
10057 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
10058 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
10059 [__HLS_DN_POLL_BP] = "POLL",
10060 [__HLS_DN_DISABLE_BP] = "DISABLE",
10061 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
10062 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
10063 [__HLS_GOING_UP_BP] = "GOING_UP",
10064 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10065 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10066 };
10067
10068 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10069 return name ? name : "unknown";
10070}
10071
10072/* return the link state reason name */
10073static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10074{
10075 if (state == HLS_UP_INIT) {
10076 switch (ppd->linkinit_reason) {
10077 case OPA_LINKINIT_REASON_LINKUP:
10078 return "(LINKUP)";
10079 case OPA_LINKINIT_REASON_FLAPPING:
10080 return "(FLAPPING)";
10081 case OPA_LINKINIT_OUTSIDE_POLICY:
10082 return "(OUTSIDE_POLICY)";
10083 case OPA_LINKINIT_QUARANTINED:
10084 return "(QUARANTINED)";
10085 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10086 return "(INSUFIC_CAPABILITY)";
10087 default:
10088 break;
10089 }
10090 }
10091 return "";
10092}
10093
10094/*
10095 * driver_physical_state - convert the driver's notion of a port's
10096 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10097 * Return -1 (converted to a u32) to indicate error.
10098 */
10099u32 driver_physical_state(struct hfi1_pportdata *ppd)
10100{
10101 switch (ppd->host_link_state) {
10102 case HLS_UP_INIT:
10103 case HLS_UP_ARMED:
10104 case HLS_UP_ACTIVE:
10105 return IB_PORTPHYSSTATE_LINKUP;
10106 case HLS_DN_POLL:
10107 return IB_PORTPHYSSTATE_POLLING;
10108 case HLS_DN_DISABLE:
10109 return IB_PORTPHYSSTATE_DISABLED;
10110 case HLS_DN_OFFLINE:
10111 return OPA_PORTPHYSSTATE_OFFLINE;
10112 case HLS_VERIFY_CAP:
10113 return IB_PORTPHYSSTATE_POLLING;
10114 case HLS_GOING_UP:
10115 return IB_PORTPHYSSTATE_POLLING;
10116 case HLS_GOING_OFFLINE:
10117 return OPA_PORTPHYSSTATE_OFFLINE;
10118 case HLS_LINK_COOLDOWN:
10119 return OPA_PORTPHYSSTATE_OFFLINE;
10120 case HLS_DN_DOWNDEF:
10121 default:
10122 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10123 ppd->host_link_state);
10124 return -1;
10125 }
10126}
10127
10128/*
10129 * driver_logical_state - convert the driver's notion of a port's
10130 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10131 * (converted to a u32) to indicate error.
10132 */
10133u32 driver_logical_state(struct hfi1_pportdata *ppd)
10134{
0c7f77af 10135 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
77241056
MM
10136 return IB_PORT_DOWN;
10137
10138 switch (ppd->host_link_state & HLS_UP) {
10139 case HLS_UP_INIT:
10140 return IB_PORT_INIT;
10141 case HLS_UP_ARMED:
10142 return IB_PORT_ARMED;
10143 case HLS_UP_ACTIVE:
10144 return IB_PORT_ACTIVE;
10145 default:
10146 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10147 ppd->host_link_state);
10148 return -1;
10149 }
10150}
10151
10152void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10153 u8 neigh_reason, u8 rem_reason)
10154{
10155 if (ppd->local_link_down_reason.latest == 0 &&
10156 ppd->neigh_link_down_reason.latest == 0) {
10157 ppd->local_link_down_reason.latest = lcl_reason;
10158 ppd->neigh_link_down_reason.latest = neigh_reason;
10159 ppd->remote_link_down_reason = rem_reason;
10160 }
10161}
10162
10163/*
10164 * Change the physical and/or logical link state.
10165 *
10166 * Do not call this routine while inside an interrupt. It contains
10167 * calls to routines that can take multiple seconds to finish.
10168 *
10169 * Returns 0 on success, -errno on failure.
10170 */
10171int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10172{
10173 struct hfi1_devdata *dd = ppd->dd;
10174 struct ib_event event = {.device = NULL};
10175 int ret1, ret = 0;
77241056
MM
10176 int orig_new_state, poll_bounce;
10177
10178 mutex_lock(&ppd->hls_lock);
10179
10180 orig_new_state = state;
10181 if (state == HLS_DN_DOWNDEF)
10182 state = dd->link_default;
10183
10184 /* interpret poll -> poll as a link bounce */
d0d236ea
JJ
10185 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10186 state == HLS_DN_POLL;
77241056
MM
10187
10188 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
17fb4f29
JJ
10189 link_state_name(ppd->host_link_state),
10190 link_state_name(orig_new_state),
10191 poll_bounce ? "(bounce) " : "",
10192 link_state_reason_name(ppd, state));
77241056 10193
77241056
MM
10194 /*
10195 * If we're going to a (HLS_*) link state that implies the logical
10196 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10197 * reset is_sm_config_started to 0.
10198 */
10199 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10200 ppd->is_sm_config_started = 0;
10201
10202 /*
10203 * Do nothing if the states match. Let a poll to poll link bounce
10204 * go through.
10205 */
10206 if (ppd->host_link_state == state && !poll_bounce)
10207 goto done;
10208
10209 switch (state) {
10210 case HLS_UP_INIT:
d0d236ea
JJ
10211 if (ppd->host_link_state == HLS_DN_POLL &&
10212 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
77241056
MM
10213 /*
10214 * Quick link up jumps from polling to here.
10215 *
10216 * Whether in normal or loopback mode, the
10217 * simulator jumps from polling to link up.
10218 * Accept that here.
10219 */
17fb4f29 10220 /* OK */
77241056
MM
10221 } else if (ppd->host_link_state != HLS_GOING_UP) {
10222 goto unexpected;
10223 }
10224
10225 ppd->host_link_state = HLS_UP_INIT;
10226 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10227 if (ret) {
10228 /* logical state didn't change, stay at going_up */
10229 ppd->host_link_state = HLS_GOING_UP;
10230 dd_dev_err(dd,
17fb4f29
JJ
10231 "%s: logical state did not change to INIT\n",
10232 __func__);
77241056
MM
10233 } else {
10234 /* clear old transient LINKINIT_REASON code */
10235 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10236 ppd->linkinit_reason =
10237 OPA_LINKINIT_REASON_LINKUP;
10238
10239 /* enable the port */
10240 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10241
10242 handle_linkup_change(dd, 1);
10243 }
10244 break;
10245 case HLS_UP_ARMED:
10246 if (ppd->host_link_state != HLS_UP_INIT)
10247 goto unexpected;
10248
10249 ppd->host_link_state = HLS_UP_ARMED;
10250 set_logical_state(dd, LSTATE_ARMED);
10251 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10252 if (ret) {
10253 /* logical state didn't change, stay at init */
10254 ppd->host_link_state = HLS_UP_INIT;
10255 dd_dev_err(dd,
17fb4f29
JJ
10256 "%s: logical state did not change to ARMED\n",
10257 __func__);
77241056
MM
10258 }
10259 /*
10260 * The simulator does not currently implement SMA messages,
10261 * so neighbor_normal is not set. Set it here when we first
10262 * move to Armed.
10263 */
10264 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10265 ppd->neighbor_normal = 1;
10266 break;
10267 case HLS_UP_ACTIVE:
10268 if (ppd->host_link_state != HLS_UP_ARMED)
10269 goto unexpected;
10270
10271 ppd->host_link_state = HLS_UP_ACTIVE;
10272 set_logical_state(dd, LSTATE_ACTIVE);
10273 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10274 if (ret) {
10275 /* logical state didn't change, stay at armed */
10276 ppd->host_link_state = HLS_UP_ARMED;
10277 dd_dev_err(dd,
17fb4f29
JJ
10278 "%s: logical state did not change to ACTIVE\n",
10279 __func__);
77241056 10280 } else {
77241056
MM
10281 /* tell all engines to go running */
10282 sdma_all_running(dd);
10283
10284 /* Signal the IB layer that the port has went active */
ec3f2c12 10285 event.device = &dd->verbs_dev.rdi.ibdev;
77241056
MM
10286 event.element.port_num = ppd->port;
10287 event.event = IB_EVENT_PORT_ACTIVE;
10288 }
10289 break;
10290 case HLS_DN_POLL:
10291 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10292 ppd->host_link_state == HLS_DN_OFFLINE) &&
10293 dd->dc_shutdown)
10294 dc_start(dd);
10295 /* Hand LED control to the DC */
10296 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10297
10298 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10299 u8 tmp = ppd->link_enabled;
10300
10301 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10302 if (ret) {
10303 ppd->link_enabled = tmp;
10304 break;
10305 }
10306 ppd->remote_link_down_reason = 0;
10307
10308 if (ppd->driver_link_ready)
10309 ppd->link_enabled = 1;
10310 }
10311
fb9036dd 10312 set_all_slowpath(ppd->dd);
77241056
MM
10313 ret = set_local_link_attributes(ppd);
10314 if (ret)
10315 break;
10316
10317 ppd->port_error_action = 0;
10318 ppd->host_link_state = HLS_DN_POLL;
10319
10320 if (quick_linkup) {
10321 /* quick linkup does not go into polling */
10322 ret = do_quick_linkup(dd);
10323 } else {
10324 ret1 = set_physical_link_state(dd, PLS_POLLING);
10325 if (ret1 != HCMD_SUCCESS) {
10326 dd_dev_err(dd,
17fb4f29
JJ
10327 "Failed to transition to Polling link state, return 0x%x\n",
10328 ret1);
77241056
MM
10329 ret = -EINVAL;
10330 }
10331 }
a9c05e35
BM
10332 ppd->offline_disabled_reason =
10333 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
77241056
MM
10334 /*
10335 * If an error occurred above, go back to offline. The
10336 * caller may reschedule another attempt.
10337 */
10338 if (ret)
10339 goto_offline(ppd, 0);
10340 break;
10341 case HLS_DN_DISABLE:
10342 /* link is disabled */
10343 ppd->link_enabled = 0;
10344
10345 /* allow any state to transition to disabled */
10346
10347 /* must transition to offline first */
10348 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10349 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10350 if (ret)
10351 break;
10352 ppd->remote_link_down_reason = 0;
10353 }
10354
10355 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10356 if (ret1 != HCMD_SUCCESS) {
10357 dd_dev_err(dd,
17fb4f29
JJ
10358 "Failed to transition to Disabled link state, return 0x%x\n",
10359 ret1);
77241056
MM
10360 ret = -EINVAL;
10361 break;
10362 }
10363 ppd->host_link_state = HLS_DN_DISABLE;
10364 dc_shutdown(dd);
10365 break;
10366 case HLS_DN_OFFLINE:
10367 if (ppd->host_link_state == HLS_DN_DISABLE)
10368 dc_start(dd);
10369
10370 /* allow any state to transition to offline */
10371 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10372 if (!ret)
10373 ppd->remote_link_down_reason = 0;
10374 break;
10375 case HLS_VERIFY_CAP:
10376 if (ppd->host_link_state != HLS_DN_POLL)
10377 goto unexpected;
10378 ppd->host_link_state = HLS_VERIFY_CAP;
10379 break;
10380 case HLS_GOING_UP:
10381 if (ppd->host_link_state != HLS_VERIFY_CAP)
10382 goto unexpected;
10383
10384 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10385 if (ret1 != HCMD_SUCCESS) {
10386 dd_dev_err(dd,
17fb4f29
JJ
10387 "Failed to transition to link up state, return 0x%x\n",
10388 ret1);
77241056
MM
10389 ret = -EINVAL;
10390 break;
10391 }
10392 ppd->host_link_state = HLS_GOING_UP;
10393 break;
10394
10395 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10396 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10397 default:
10398 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
17fb4f29 10399 __func__, state);
77241056
MM
10400 ret = -EINVAL;
10401 break;
10402 }
10403
77241056
MM
10404 goto done;
10405
10406unexpected:
10407 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
17fb4f29
JJ
10408 __func__, link_state_name(ppd->host_link_state),
10409 link_state_name(state));
77241056
MM
10410 ret = -EINVAL;
10411
10412done:
10413 mutex_unlock(&ppd->hls_lock);
10414
10415 if (event.device)
10416 ib_dispatch_event(&event);
10417
10418 return ret;
10419}
10420
10421int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10422{
10423 u64 reg;
10424 int ret = 0;
10425
10426 switch (which) {
10427 case HFI1_IB_CFG_LIDLMC:
10428 set_lidlmc(ppd);
10429 break;
10430 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10431 /*
10432 * The VL Arbitrator high limit is sent in units of 4k
10433 * bytes, while HFI stores it in units of 64 bytes.
10434 */
8638b77f 10435 val *= 4096 / 64;
77241056
MM
10436 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10437 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10438 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10439 break;
10440 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10441 /* HFI only supports POLL as the default link down state */
10442 if (val != HLS_DN_POLL)
10443 ret = -EINVAL;
10444 break;
10445 case HFI1_IB_CFG_OP_VLS:
10446 if (ppd->vls_operational != val) {
10447 ppd->vls_operational = val;
10448 if (!ppd->port)
10449 ret = -EINVAL;
77241056
MM
10450 }
10451 break;
10452 /*
10453 * For link width, link width downgrade, and speed enable, always AND
10454 * the setting with what is actually supported. This has two benefits.
10455 * First, enabled can't have unsupported values, no matter what the
10456 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10457 * "fill in with your supported value" have all the bits in the
10458 * field set, so simply ANDing with supported has the desired result.
10459 */
10460 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10461 ppd->link_width_enabled = val & ppd->link_width_supported;
10462 break;
10463 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10464 ppd->link_width_downgrade_enabled =
10465 val & ppd->link_width_downgrade_supported;
10466 break;
10467 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10468 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10469 break;
10470 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10471 /*
10472 * HFI does not follow IB specs, save this value
10473 * so we can report it, if asked.
10474 */
10475 ppd->overrun_threshold = val;
10476 break;
10477 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10478 /*
10479 * HFI does not follow IB specs, save this value
10480 * so we can report it, if asked.
10481 */
10482 ppd->phy_error_threshold = val;
10483 break;
10484
10485 case HFI1_IB_CFG_MTU:
10486 set_send_length(ppd);
10487 break;
10488
10489 case HFI1_IB_CFG_PKEYS:
10490 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10491 set_partition_keys(ppd);
10492 break;
10493
10494 default:
10495 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10496 dd_dev_info(ppd->dd,
17fb4f29
JJ
10497 "%s: which %s, val 0x%x: not implemented\n",
10498 __func__, ib_cfg_name(which), val);
77241056
MM
10499 break;
10500 }
10501 return ret;
10502}
10503
10504/* begin functions related to vl arbitration table caching */
10505static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10506{
10507 int i;
10508
10509 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10510 VL_ARB_LOW_PRIO_TABLE_SIZE);
10511 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10512 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10513
10514 /*
10515 * Note that we always return values directly from the
10516 * 'vl_arb_cache' (and do no CSR reads) in response to a
10517 * 'Get(VLArbTable)'. This is obviously correct after a
10518 * 'Set(VLArbTable)', since the cache will then be up to
10519 * date. But it's also correct prior to any 'Set(VLArbTable)'
10520 * since then both the cache, and the relevant h/w registers
10521 * will be zeroed.
10522 */
10523
10524 for (i = 0; i < MAX_PRIO_TABLE; i++)
10525 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10526}
10527
10528/*
10529 * vl_arb_lock_cache
10530 *
10531 * All other vl_arb_* functions should be called only after locking
10532 * the cache.
10533 */
10534static inline struct vl_arb_cache *
10535vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10536{
10537 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10538 return NULL;
10539 spin_lock(&ppd->vl_arb_cache[idx].lock);
10540 return &ppd->vl_arb_cache[idx];
10541}
10542
10543static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10544{
10545 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10546}
10547
10548static void vl_arb_get_cache(struct vl_arb_cache *cache,
10549 struct ib_vl_weight_elem *vl)
10550{
10551 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10552}
10553
10554static void vl_arb_set_cache(struct vl_arb_cache *cache,
10555 struct ib_vl_weight_elem *vl)
10556{
10557 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10558}
10559
10560static int vl_arb_match_cache(struct vl_arb_cache *cache,
10561 struct ib_vl_weight_elem *vl)
10562{
10563 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10564}
f4d507cd 10565
77241056
MM
10566/* end functions related to vl arbitration table caching */
10567
10568static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10569 u32 size, struct ib_vl_weight_elem *vl)
10570{
10571 struct hfi1_devdata *dd = ppd->dd;
10572 u64 reg;
10573 unsigned int i, is_up = 0;
10574 int drain, ret = 0;
10575
10576 mutex_lock(&ppd->hls_lock);
10577
10578 if (ppd->host_link_state & HLS_UP)
10579 is_up = 1;
10580
10581 drain = !is_ax(dd) && is_up;
10582
10583 if (drain)
10584 /*
10585 * Before adjusting VL arbitration weights, empty per-VL
10586 * FIFOs, otherwise a packet whose VL weight is being
10587 * set to 0 could get stuck in a FIFO with no chance to
10588 * egress.
10589 */
10590 ret = stop_drain_data_vls(dd);
10591
10592 if (ret) {
10593 dd_dev_err(
10594 dd,
10595 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10596 __func__);
10597 goto err;
10598 }
10599
10600 for (i = 0; i < size; i++, vl++) {
10601 /*
10602 * NOTE: The low priority shift and mask are used here, but
10603 * they are the same for both the low and high registers.
10604 */
10605 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10606 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10607 | (((u64)vl->weight
10608 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10609 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10610 write_csr(dd, target + (i * 8), reg);
10611 }
10612 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10613
10614 if (drain)
10615 open_fill_data_vls(dd); /* reopen all VLs */
10616
10617err:
10618 mutex_unlock(&ppd->hls_lock);
10619
10620 return ret;
10621}
10622
10623/*
10624 * Read one credit merge VL register.
10625 */
10626static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10627 struct vl_limit *vll)
10628{
10629 u64 reg = read_csr(dd, csr);
10630
10631 vll->dedicated = cpu_to_be16(
10632 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10633 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10634 vll->shared = cpu_to_be16(
10635 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10636 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10637}
10638
10639/*
10640 * Read the current credit merge limits.
10641 */
10642static int get_buffer_control(struct hfi1_devdata *dd,
10643 struct buffer_control *bc, u16 *overall_limit)
10644{
10645 u64 reg;
10646 int i;
10647
10648 /* not all entries are filled in */
10649 memset(bc, 0, sizeof(*bc));
10650
10651 /* OPA and HFI have a 1-1 mapping */
10652 for (i = 0; i < TXE_NUM_DATA_VL; i++)
8638b77f 10653 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
77241056
MM
10654
10655 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10656 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10657
10658 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10659 bc->overall_shared_limit = cpu_to_be16(
10660 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10661 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10662 if (overall_limit)
10663 *overall_limit = (reg
10664 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10665 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10666 return sizeof(struct buffer_control);
10667}
10668
10669static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10670{
10671 u64 reg;
10672 int i;
10673
10674 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10675 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10676 for (i = 0; i < sizeof(u64); i++) {
10677 u8 byte = *(((u8 *)&reg) + i);
10678
10679 dp->vlnt[2 * i] = byte & 0xf;
10680 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10681 }
10682
10683 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10684 for (i = 0; i < sizeof(u64); i++) {
10685 u8 byte = *(((u8 *)&reg) + i);
10686
10687 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10688 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10689 }
10690 return sizeof(struct sc2vlnt);
10691}
10692
10693static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10694 struct ib_vl_weight_elem *vl)
10695{
10696 unsigned int i;
10697
10698 for (i = 0; i < nelems; i++, vl++) {
10699 vl->vl = 0xf;
10700 vl->weight = 0;
10701 }
10702}
10703
10704static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10705{
10706 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
17fb4f29
JJ
10707 DC_SC_VL_VAL(15_0,
10708 0, dp->vlnt[0] & 0xf,
10709 1, dp->vlnt[1] & 0xf,
10710 2, dp->vlnt[2] & 0xf,
10711 3, dp->vlnt[3] & 0xf,
10712 4, dp->vlnt[4] & 0xf,
10713 5, dp->vlnt[5] & 0xf,
10714 6, dp->vlnt[6] & 0xf,
10715 7, dp->vlnt[7] & 0xf,
10716 8, dp->vlnt[8] & 0xf,
10717 9, dp->vlnt[9] & 0xf,
10718 10, dp->vlnt[10] & 0xf,
10719 11, dp->vlnt[11] & 0xf,
10720 12, dp->vlnt[12] & 0xf,
10721 13, dp->vlnt[13] & 0xf,
10722 14, dp->vlnt[14] & 0xf,
10723 15, dp->vlnt[15] & 0xf));
77241056 10724 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
17fb4f29
JJ
10725 DC_SC_VL_VAL(31_16,
10726 16, dp->vlnt[16] & 0xf,
10727 17, dp->vlnt[17] & 0xf,
10728 18, dp->vlnt[18] & 0xf,
10729 19, dp->vlnt[19] & 0xf,
10730 20, dp->vlnt[20] & 0xf,
10731 21, dp->vlnt[21] & 0xf,
10732 22, dp->vlnt[22] & 0xf,
10733 23, dp->vlnt[23] & 0xf,
10734 24, dp->vlnt[24] & 0xf,
10735 25, dp->vlnt[25] & 0xf,
10736 26, dp->vlnt[26] & 0xf,
10737 27, dp->vlnt[27] & 0xf,
10738 28, dp->vlnt[28] & 0xf,
10739 29, dp->vlnt[29] & 0xf,
10740 30, dp->vlnt[30] & 0xf,
10741 31, dp->vlnt[31] & 0xf));
77241056
MM
10742}
10743
10744static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10745 u16 limit)
10746{
10747 if (limit != 0)
10748 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
17fb4f29 10749 what, (int)limit, idx);
77241056
MM
10750}
10751
10752/* change only the shared limit portion of SendCmGLobalCredit */
10753static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10754{
10755 u64 reg;
10756
10757 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10758 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10759 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10760 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10761}
10762
10763/* change only the total credit limit portion of SendCmGLobalCredit */
10764static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10765{
10766 u64 reg;
10767
10768 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10769 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10770 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10771 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10772}
10773
10774/* set the given per-VL shared limit */
10775static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10776{
10777 u64 reg;
10778 u32 addr;
10779
10780 if (vl < TXE_NUM_DATA_VL)
10781 addr = SEND_CM_CREDIT_VL + (8 * vl);
10782 else
10783 addr = SEND_CM_CREDIT_VL15;
10784
10785 reg = read_csr(dd, addr);
10786 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10787 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10788 write_csr(dd, addr, reg);
10789}
10790
10791/* set the given per-VL dedicated limit */
10792static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10793{
10794 u64 reg;
10795 u32 addr;
10796
10797 if (vl < TXE_NUM_DATA_VL)
10798 addr = SEND_CM_CREDIT_VL + (8 * vl);
10799 else
10800 addr = SEND_CM_CREDIT_VL15;
10801
10802 reg = read_csr(dd, addr);
10803 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10804 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10805 write_csr(dd, addr, reg);
10806}
10807
10808/* spin until the given per-VL status mask bits clear */
10809static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10810 const char *which)
10811{
10812 unsigned long timeout;
10813 u64 reg;
10814
10815 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10816 while (1) {
10817 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10818
10819 if (reg == 0)
10820 return; /* success */
10821 if (time_after(jiffies, timeout))
10822 break; /* timed out */
10823 udelay(1);
10824 }
10825
10826 dd_dev_err(dd,
17fb4f29
JJ
10827 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10828 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
77241056
MM
10829 /*
10830 * If this occurs, it is likely there was a credit loss on the link.
10831 * The only recovery from that is a link bounce.
10832 */
10833 dd_dev_err(dd,
17fb4f29 10834 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
77241056
MM
10835}
10836
10837/*
10838 * The number of credits on the VLs may be changed while everything
10839 * is "live", but the following algorithm must be followed due to
10840 * how the hardware is actually implemented. In particular,
10841 * Return_Credit_Status[] is the only correct status check.
10842 *
10843 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10844 * set Global_Shared_Credit_Limit = 0
10845 * use_all_vl = 1
10846 * mask0 = all VLs that are changing either dedicated or shared limits
10847 * set Shared_Limit[mask0] = 0
10848 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10849 * if (changing any dedicated limit)
10850 * mask1 = all VLs that are lowering dedicated limits
10851 * lower Dedicated_Limit[mask1]
10852 * spin until Return_Credit_Status[mask1] == 0
10853 * raise Dedicated_Limits
10854 * raise Shared_Limits
10855 * raise Global_Shared_Credit_Limit
10856 *
10857 * lower = if the new limit is lower, set the limit to the new value
10858 * raise = if the new limit is higher than the current value (may be changed
10859 * earlier in the algorithm), set the new limit to the new value
10860 */
8a4d3444
MM
10861int set_buffer_control(struct hfi1_pportdata *ppd,
10862 struct buffer_control *new_bc)
77241056 10863{
8a4d3444 10864 struct hfi1_devdata *dd = ppd->dd;
77241056
MM
10865 u64 changing_mask, ld_mask, stat_mask;
10866 int change_count;
10867 int i, use_all_mask;
10868 int this_shared_changing;
8a4d3444 10869 int vl_count = 0, ret;
77241056
MM
10870 /*
10871 * A0: add the variable any_shared_limit_changing below and in the
10872 * algorithm above. If removing A0 support, it can be removed.
10873 */
10874 int any_shared_limit_changing;
10875 struct buffer_control cur_bc;
10876 u8 changing[OPA_MAX_VLS];
10877 u8 lowering_dedicated[OPA_MAX_VLS];
10878 u16 cur_total;
10879 u32 new_total = 0;
10880 const u64 all_mask =
10881 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10882 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10883 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10884 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10885 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10886 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10887 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10888 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10889 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
10890
10891#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10892#define NUM_USABLE_VLS 16 /* look at VL15 and less */
10893
77241056
MM
10894 /* find the new total credits, do sanity check on unused VLs */
10895 for (i = 0; i < OPA_MAX_VLS; i++) {
10896 if (valid_vl(i)) {
10897 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
10898 continue;
10899 }
10900 nonzero_msg(dd, i, "dedicated",
17fb4f29 10901 be16_to_cpu(new_bc->vl[i].dedicated));
77241056 10902 nonzero_msg(dd, i, "shared",
17fb4f29 10903 be16_to_cpu(new_bc->vl[i].shared));
77241056
MM
10904 new_bc->vl[i].dedicated = 0;
10905 new_bc->vl[i].shared = 0;
10906 }
10907 new_total += be16_to_cpu(new_bc->overall_shared_limit);
bff14bb6 10908
77241056
MM
10909 /* fetch the current values */
10910 get_buffer_control(dd, &cur_bc, &cur_total);
10911
10912 /*
10913 * Create the masks we will use.
10914 */
10915 memset(changing, 0, sizeof(changing));
10916 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
4d114fdd
JJ
10917 /*
10918 * NOTE: Assumes that the individual VL bits are adjacent and in
10919 * increasing order
10920 */
77241056
MM
10921 stat_mask =
10922 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
10923 changing_mask = 0;
10924 ld_mask = 0;
10925 change_count = 0;
10926 any_shared_limit_changing = 0;
10927 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
10928 if (!valid_vl(i))
10929 continue;
10930 this_shared_changing = new_bc->vl[i].shared
10931 != cur_bc.vl[i].shared;
10932 if (this_shared_changing)
10933 any_shared_limit_changing = 1;
d0d236ea
JJ
10934 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
10935 this_shared_changing) {
77241056
MM
10936 changing[i] = 1;
10937 changing_mask |= stat_mask;
10938 change_count++;
10939 }
10940 if (be16_to_cpu(new_bc->vl[i].dedicated) <
10941 be16_to_cpu(cur_bc.vl[i].dedicated)) {
10942 lowering_dedicated[i] = 1;
10943 ld_mask |= stat_mask;
10944 }
10945 }
10946
10947 /* bracket the credit change with a total adjustment */
10948 if (new_total > cur_total)
10949 set_global_limit(dd, new_total);
10950
10951 /*
10952 * Start the credit change algorithm.
10953 */
10954 use_all_mask = 0;
10955 if ((be16_to_cpu(new_bc->overall_shared_limit) <
995deafa
MM
10956 be16_to_cpu(cur_bc.overall_shared_limit)) ||
10957 (is_ax(dd) && any_shared_limit_changing)) {
77241056
MM
10958 set_global_shared(dd, 0);
10959 cur_bc.overall_shared_limit = 0;
10960 use_all_mask = 1;
10961 }
10962
10963 for (i = 0; i < NUM_USABLE_VLS; i++) {
10964 if (!valid_vl(i))
10965 continue;
10966
10967 if (changing[i]) {
10968 set_vl_shared(dd, i, 0);
10969 cur_bc.vl[i].shared = 0;
10970 }
10971 }
10972
10973 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
17fb4f29 10974 "shared");
77241056
MM
10975
10976 if (change_count > 0) {
10977 for (i = 0; i < NUM_USABLE_VLS; i++) {
10978 if (!valid_vl(i))
10979 continue;
10980
10981 if (lowering_dedicated[i]) {
10982 set_vl_dedicated(dd, i,
17fb4f29
JJ
10983 be16_to_cpu(new_bc->
10984 vl[i].dedicated));
77241056
MM
10985 cur_bc.vl[i].dedicated =
10986 new_bc->vl[i].dedicated;
10987 }
10988 }
10989
10990 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
10991
10992 /* now raise all dedicated that are going up */
10993 for (i = 0; i < NUM_USABLE_VLS; i++) {
10994 if (!valid_vl(i))
10995 continue;
10996
10997 if (be16_to_cpu(new_bc->vl[i].dedicated) >
10998 be16_to_cpu(cur_bc.vl[i].dedicated))
10999 set_vl_dedicated(dd, i,
17fb4f29
JJ
11000 be16_to_cpu(new_bc->
11001 vl[i].dedicated));
77241056
MM
11002 }
11003 }
11004
11005 /* next raise all shared that are going up */
11006 for (i = 0; i < NUM_USABLE_VLS; i++) {
11007 if (!valid_vl(i))
11008 continue;
11009
11010 if (be16_to_cpu(new_bc->vl[i].shared) >
11011 be16_to_cpu(cur_bc.vl[i].shared))
11012 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11013 }
11014
11015 /* finally raise the global shared */
11016 if (be16_to_cpu(new_bc->overall_shared_limit) >
17fb4f29 11017 be16_to_cpu(cur_bc.overall_shared_limit))
77241056 11018 set_global_shared(dd,
17fb4f29 11019 be16_to_cpu(new_bc->overall_shared_limit));
77241056
MM
11020
11021 /* bracket the credit change with a total adjustment */
11022 if (new_total < cur_total)
11023 set_global_limit(dd, new_total);
8a4d3444
MM
11024
11025 /*
11026 * Determine the actual number of operational VLS using the number of
11027 * dedicated and shared credits for each VL.
11028 */
11029 if (change_count > 0) {
11030 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11031 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11032 be16_to_cpu(new_bc->vl[i].shared) > 0)
11033 vl_count++;
11034 ppd->actual_vls_operational = vl_count;
11035 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11036 ppd->actual_vls_operational :
11037 ppd->vls_operational,
11038 NULL);
11039 if (ret == 0)
11040 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11041 ppd->actual_vls_operational :
11042 ppd->vls_operational, NULL);
11043 if (ret)
11044 return ret;
11045 }
77241056
MM
11046 return 0;
11047}
11048
11049/*
11050 * Read the given fabric manager table. Return the size of the
11051 * table (in bytes) on success, and a negative error code on
11052 * failure.
11053 */
11054int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11055
11056{
11057 int size;
11058 struct vl_arb_cache *vlc;
11059
11060 switch (which) {
11061 case FM_TBL_VL_HIGH_ARB:
11062 size = 256;
11063 /*
11064 * OPA specifies 128 elements (of 2 bytes each), though
11065 * HFI supports only 16 elements in h/w.
11066 */
11067 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11068 vl_arb_get_cache(vlc, t);
11069 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11070 break;
11071 case FM_TBL_VL_LOW_ARB:
11072 size = 256;
11073 /*
11074 * OPA specifies 128 elements (of 2 bytes each), though
11075 * HFI supports only 16 elements in h/w.
11076 */
11077 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11078 vl_arb_get_cache(vlc, t);
11079 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11080 break;
11081 case FM_TBL_BUFFER_CONTROL:
11082 size = get_buffer_control(ppd->dd, t, NULL);
11083 break;
11084 case FM_TBL_SC2VLNT:
11085 size = get_sc2vlnt(ppd->dd, t);
11086 break;
11087 case FM_TBL_VL_PREEMPT_ELEMS:
11088 size = 256;
11089 /* OPA specifies 128 elements, of 2 bytes each */
11090 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11091 break;
11092 case FM_TBL_VL_PREEMPT_MATRIX:
11093 size = 256;
11094 /*
11095 * OPA specifies that this is the same size as the VL
11096 * arbitration tables (i.e., 256 bytes).
11097 */
11098 break;
11099 default:
11100 return -EINVAL;
11101 }
11102 return size;
11103}
11104
11105/*
11106 * Write the given fabric manager table.
11107 */
11108int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11109{
11110 int ret = 0;
11111 struct vl_arb_cache *vlc;
11112
11113 switch (which) {
11114 case FM_TBL_VL_HIGH_ARB:
11115 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11116 if (vl_arb_match_cache(vlc, t)) {
11117 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11118 break;
11119 }
11120 vl_arb_set_cache(vlc, t);
11121 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11122 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11123 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11124 break;
11125 case FM_TBL_VL_LOW_ARB:
11126 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11127 if (vl_arb_match_cache(vlc, t)) {
11128 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11129 break;
11130 }
11131 vl_arb_set_cache(vlc, t);
11132 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11133 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11134 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11135 break;
11136 case FM_TBL_BUFFER_CONTROL:
8a4d3444 11137 ret = set_buffer_control(ppd, t);
77241056
MM
11138 break;
11139 case FM_TBL_SC2VLNT:
11140 set_sc2vlnt(ppd->dd, t);
11141 break;
11142 default:
11143 ret = -EINVAL;
11144 }
11145 return ret;
11146}
11147
11148/*
11149 * Disable all data VLs.
11150 *
11151 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11152 */
11153static int disable_data_vls(struct hfi1_devdata *dd)
11154{
995deafa 11155 if (is_ax(dd))
77241056
MM
11156 return 1;
11157
11158 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11159
11160 return 0;
11161}
11162
11163/*
11164 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11165 * Just re-enables all data VLs (the "fill" part happens
11166 * automatically - the name was chosen for symmetry with
11167 * stop_drain_data_vls()).
11168 *
11169 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11170 */
11171int open_fill_data_vls(struct hfi1_devdata *dd)
11172{
995deafa 11173 if (is_ax(dd))
77241056
MM
11174 return 1;
11175
11176 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11177
11178 return 0;
11179}
11180
11181/*
11182 * drain_data_vls() - assumes that disable_data_vls() has been called,
11183 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11184 * engines to drop to 0.
11185 */
11186static void drain_data_vls(struct hfi1_devdata *dd)
11187{
11188 sc_wait(dd);
11189 sdma_wait(dd);
11190 pause_for_credit_return(dd);
11191}
11192
11193/*
11194 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11195 *
11196 * Use open_fill_data_vls() to resume using data VLs. This pair is
11197 * meant to be used like this:
11198 *
11199 * stop_drain_data_vls(dd);
11200 * // do things with per-VL resources
11201 * open_fill_data_vls(dd);
11202 */
11203int stop_drain_data_vls(struct hfi1_devdata *dd)
11204{
11205 int ret;
11206
11207 ret = disable_data_vls(dd);
11208 if (ret == 0)
11209 drain_data_vls(dd);
11210
11211 return ret;
11212}
11213
11214/*
11215 * Convert a nanosecond time to a cclock count. No matter how slow
11216 * the cclock, a non-zero ns will always have a non-zero result.
11217 */
11218u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11219{
11220 u32 cclocks;
11221
11222 if (dd->icode == ICODE_FPGA_EMULATION)
11223 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11224 else /* simulation pretends to be ASIC */
11225 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11226 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11227 cclocks = 1;
11228 return cclocks;
11229}
11230
11231/*
11232 * Convert a cclock count to nanoseconds. Not matter how slow
11233 * the cclock, a non-zero cclocks will always have a non-zero result.
11234 */
11235u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11236{
11237 u32 ns;
11238
11239 if (dd->icode == ICODE_FPGA_EMULATION)
11240 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11241 else /* simulation pretends to be ASIC */
11242 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11243 if (cclocks && !ns)
11244 ns = 1;
11245 return ns;
11246}
11247
11248/*
11249 * Dynamically adjust the receive interrupt timeout for a context based on
11250 * incoming packet rate.
11251 *
11252 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11253 */
11254static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11255{
11256 struct hfi1_devdata *dd = rcd->dd;
11257 u32 timeout = rcd->rcvavail_timeout;
11258
11259 /*
11260 * This algorithm doubles or halves the timeout depending on whether
11261 * the number of packets received in this interrupt were less than or
11262 * greater equal the interrupt count.
11263 *
11264 * The calculations below do not allow a steady state to be achieved.
11265 * Only at the endpoints it is possible to have an unchanging
11266 * timeout.
11267 */
11268 if (npkts < rcv_intr_count) {
11269 /*
11270 * Not enough packets arrived before the timeout, adjust
11271 * timeout downward.
11272 */
11273 if (timeout < 2) /* already at minimum? */
11274 return;
11275 timeout >>= 1;
11276 } else {
11277 /*
11278 * More than enough packets arrived before the timeout, adjust
11279 * timeout upward.
11280 */
11281 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11282 return;
11283 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11284 }
11285
11286 rcd->rcvavail_timeout = timeout;
4d114fdd
JJ
11287 /*
11288 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11289 * been verified to be in range
11290 */
77241056 11291 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
17fb4f29
JJ
11292 (u64)timeout <<
11293 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
77241056
MM
11294}
11295
11296void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11297 u32 intr_adjust, u32 npkts)
11298{
11299 struct hfi1_devdata *dd = rcd->dd;
11300 u64 reg;
11301 u32 ctxt = rcd->ctxt;
11302
11303 /*
11304 * Need to write timeout register before updating RcvHdrHead to ensure
11305 * that a new value is used when the HW decides to restart counting.
11306 */
11307 if (intr_adjust)
11308 adjust_rcv_timeout(rcd, npkts);
11309 if (updegr) {
11310 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11311 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11312 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11313 }
11314 mmiowb();
11315 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11316 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11317 << RCV_HDR_HEAD_HEAD_SHIFT);
11318 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11319 mmiowb();
11320}
11321
11322u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11323{
11324 u32 head, tail;
11325
11326 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11327 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11328
11329 if (rcd->rcvhdrtail_kvaddr)
11330 tail = get_rcvhdrtail(rcd);
11331 else
11332 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11333
11334 return head == tail;
11335}
11336
11337/*
11338 * Context Control and Receive Array encoding for buffer size:
11339 * 0x0 invalid
11340 * 0x1 4 KB
11341 * 0x2 8 KB
11342 * 0x3 16 KB
11343 * 0x4 32 KB
11344 * 0x5 64 KB
11345 * 0x6 128 KB
11346 * 0x7 256 KB
11347 * 0x8 512 KB (Receive Array only)
11348 * 0x9 1 MB (Receive Array only)
11349 * 0xa 2 MB (Receive Array only)
11350 *
11351 * 0xB-0xF - reserved (Receive Array only)
11352 *
11353 *
11354 * This routine assumes that the value has already been sanity checked.
11355 */
11356static u32 encoded_size(u32 size)
11357{
11358 switch (size) {
8638b77f
JJ
11359 case 4 * 1024: return 0x1;
11360 case 8 * 1024: return 0x2;
11361 case 16 * 1024: return 0x3;
11362 case 32 * 1024: return 0x4;
11363 case 64 * 1024: return 0x5;
11364 case 128 * 1024: return 0x6;
11365 case 256 * 1024: return 0x7;
11366 case 512 * 1024: return 0x8;
11367 case 1 * 1024 * 1024: return 0x9;
11368 case 2 * 1024 * 1024: return 0xa;
77241056
MM
11369 }
11370 return 0x1; /* if invalid, go with the minimum size */
11371}
11372
11373void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11374{
11375 struct hfi1_ctxtdata *rcd;
11376 u64 rcvctrl, reg;
11377 int did_enable = 0;
11378
11379 rcd = dd->rcd[ctxt];
11380 if (!rcd)
11381 return;
11382
11383 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11384
11385 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11386 /* if the context already enabled, don't do the extra steps */
d0d236ea
JJ
11387 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11388 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
77241056
MM
11389 /* reset the tail and hdr addresses, and sequence count */
11390 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11391 rcd->rcvhdrq_phys);
11392 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11393 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11394 rcd->rcvhdrqtailaddr_phys);
11395 rcd->seq_cnt = 1;
11396
11397 /* reset the cached receive header queue head value */
11398 rcd->head = 0;
11399
11400 /*
11401 * Zero the receive header queue so we don't get false
11402 * positives when checking the sequence number. The
11403 * sequence numbers could land exactly on the same spot.
11404 * E.g. a rcd restart before the receive header wrapped.
11405 */
11406 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11407
11408 /* starting timeout */
11409 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11410
11411 /* enable the context */
11412 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11413
11414 /* clean the egr buffer size first */
11415 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11416 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11417 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11418 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11419
11420 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11421 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11422 did_enable = 1;
11423
11424 /* zero RcvEgrIndexHead */
11425 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11426
11427 /* set eager count and base index */
11428 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11429 & RCV_EGR_CTRL_EGR_CNT_MASK)
11430 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11431 (((rcd->eager_base >> RCV_SHIFT)
11432 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11433 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11434 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11435
11436 /*
11437 * Set TID (expected) count and base index.
11438 * rcd->expected_count is set to individual RcvArray entries,
11439 * not pairs, and the CSR takes a pair-count in groups of
11440 * four, so divide by 8.
11441 */
11442 reg = (((rcd->expected_count >> RCV_SHIFT)
11443 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11444 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11445 (((rcd->expected_base >> RCV_SHIFT)
11446 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11447 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11448 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
82c2611d
NV
11449 if (ctxt == HFI1_CTRL_CTXT)
11450 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
77241056
MM
11451 }
11452 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11453 write_csr(dd, RCV_VL15, 0);
46b010d3
MB
11454 /*
11455 * When receive context is being disabled turn on tail
11456 * update with a dummy tail address and then disable
11457 * receive context.
11458 */
11459 if (dd->rcvhdrtail_dummy_physaddr) {
11460 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11461 dd->rcvhdrtail_dummy_physaddr);
566c157c 11462 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
46b010d3
MB
11463 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11464 }
11465
77241056
MM
11466 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11467 }
11468 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11469 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11470 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11471 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11472 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
11473 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
566c157c
MH
11474 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11475 /* See comment on RcvCtxtCtrl.TailUpd above */
11476 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11477 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11478 }
77241056
MM
11479 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11480 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11481 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11482 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11483 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
4d114fdd
JJ
11484 /*
11485 * In one-packet-per-eager mode, the size comes from
11486 * the RcvArray entry.
11487 */
77241056
MM
11488 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11489 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11490 }
11491 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11492 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11493 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11494 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11495 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11496 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11497 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11498 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11499 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11500 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11501 rcd->rcvctrl = rcvctrl;
11502 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11503 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11504
11505 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
d0d236ea
JJ
11506 if (did_enable &&
11507 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
77241056
MM
11508 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11509 if (reg != 0) {
11510 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
17fb4f29 11511 ctxt, reg);
77241056
MM
11512 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11513 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11514 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11515 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11516 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11517 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
17fb4f29 11518 ctxt, reg, reg == 0 ? "not" : "still");
77241056
MM
11519 }
11520 }
11521
11522 if (did_enable) {
11523 /*
11524 * The interrupt timeout and count must be set after
11525 * the context is enabled to take effect.
11526 */
11527 /* set interrupt timeout */
11528 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
17fb4f29 11529 (u64)rcd->rcvavail_timeout <<
77241056
MM
11530 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11531
11532 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11533 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11534 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11535 }
11536
11537 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11538 /*
11539 * If the context has been disabled and the Tail Update has
46b010d3
MB
11540 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11541 * so it doesn't contain an address that is invalid.
77241056 11542 */
46b010d3
MB
11543 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11544 dd->rcvhdrtail_dummy_physaddr);
77241056
MM
11545}
11546
582e05c3 11547u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
77241056
MM
11548{
11549 int ret;
11550 u64 val = 0;
11551
11552 if (namep) {
11553 ret = dd->cntrnameslen;
77241056
MM
11554 *namep = dd->cntrnames;
11555 } else {
11556 const struct cntr_entry *entry;
11557 int i, j;
11558
11559 ret = (dd->ndevcntrs) * sizeof(u64);
77241056
MM
11560
11561 /* Get the start of the block of counters */
11562 *cntrp = dd->cntrs;
11563
11564 /*
11565 * Now go and fill in each counter in the block.
11566 */
11567 for (i = 0; i < DEV_CNTR_LAST; i++) {
11568 entry = &dev_cntrs[i];
11569 hfi1_cdbg(CNTR, "reading %s", entry->name);
11570 if (entry->flags & CNTR_DISABLED) {
11571 /* Nothing */
11572 hfi1_cdbg(CNTR, "\tDisabled\n");
11573 } else {
11574 if (entry->flags & CNTR_VL) {
11575 hfi1_cdbg(CNTR, "\tPer VL\n");
11576 for (j = 0; j < C_VL_COUNT; j++) {
11577 val = entry->rw_cntr(entry,
11578 dd, j,
11579 CNTR_MODE_R,
11580 0);
11581 hfi1_cdbg(
11582 CNTR,
11583 "\t\tRead 0x%llx for %d\n",
11584 val, j);
11585 dd->cntrs[entry->offset + j] =
11586 val;
11587 }
a699c6c2
VM
11588 } else if (entry->flags & CNTR_SDMA) {
11589 hfi1_cdbg(CNTR,
11590 "\t Per SDMA Engine\n");
11591 for (j = 0; j < dd->chip_sdma_engines;
11592 j++) {
11593 val =
11594 entry->rw_cntr(entry, dd, j,
11595 CNTR_MODE_R, 0);
11596 hfi1_cdbg(CNTR,
11597 "\t\tRead 0x%llx for %d\n",
11598 val, j);
11599 dd->cntrs[entry->offset + j] =
11600 val;
11601 }
77241056
MM
11602 } else {
11603 val = entry->rw_cntr(entry, dd,
11604 CNTR_INVALID_VL,
11605 CNTR_MODE_R, 0);
11606 dd->cntrs[entry->offset] = val;
11607 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11608 }
11609 }
11610 }
11611 }
11612 return ret;
11613}
11614
11615/*
11616 * Used by sysfs to create files for hfi stats to read
11617 */
582e05c3 11618u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
77241056
MM
11619{
11620 int ret;
11621 u64 val = 0;
11622
11623 if (namep) {
582e05c3
DL
11624 ret = ppd->dd->portcntrnameslen;
11625 *namep = ppd->dd->portcntrnames;
77241056
MM
11626 } else {
11627 const struct cntr_entry *entry;
77241056
MM
11628 int i, j;
11629
582e05c3 11630 ret = ppd->dd->nportcntrs * sizeof(u64);
77241056
MM
11631 *cntrp = ppd->cntrs;
11632
11633 for (i = 0; i < PORT_CNTR_LAST; i++) {
11634 entry = &port_cntrs[i];
11635 hfi1_cdbg(CNTR, "reading %s", entry->name);
11636 if (entry->flags & CNTR_DISABLED) {
11637 /* Nothing */
11638 hfi1_cdbg(CNTR, "\tDisabled\n");
11639 continue;
11640 }
11641
11642 if (entry->flags & CNTR_VL) {
11643 hfi1_cdbg(CNTR, "\tPer VL");
11644 for (j = 0; j < C_VL_COUNT; j++) {
11645 val = entry->rw_cntr(entry, ppd, j,
11646 CNTR_MODE_R,
11647 0);
11648 hfi1_cdbg(
11649 CNTR,
11650 "\t\tRead 0x%llx for %d",
11651 val, j);
11652 ppd->cntrs[entry->offset + j] = val;
11653 }
11654 } else {
11655 val = entry->rw_cntr(entry, ppd,
11656 CNTR_INVALID_VL,
11657 CNTR_MODE_R,
11658 0);
11659 ppd->cntrs[entry->offset] = val;
11660 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11661 }
11662 }
11663 }
11664 return ret;
11665}
11666
11667static void free_cntrs(struct hfi1_devdata *dd)
11668{
11669 struct hfi1_pportdata *ppd;
11670 int i;
11671
11672 if (dd->synth_stats_timer.data)
11673 del_timer_sync(&dd->synth_stats_timer);
11674 dd->synth_stats_timer.data = 0;
11675 ppd = (struct hfi1_pportdata *)(dd + 1);
11676 for (i = 0; i < dd->num_pports; i++, ppd++) {
11677 kfree(ppd->cntrs);
11678 kfree(ppd->scntrs);
4eb06882
DD
11679 free_percpu(ppd->ibport_data.rvp.rc_acks);
11680 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11681 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
77241056
MM
11682 ppd->cntrs = NULL;
11683 ppd->scntrs = NULL;
4eb06882
DD
11684 ppd->ibport_data.rvp.rc_acks = NULL;
11685 ppd->ibport_data.rvp.rc_qacks = NULL;
11686 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
77241056
MM
11687 }
11688 kfree(dd->portcntrnames);
11689 dd->portcntrnames = NULL;
11690 kfree(dd->cntrs);
11691 dd->cntrs = NULL;
11692 kfree(dd->scntrs);
11693 dd->scntrs = NULL;
11694 kfree(dd->cntrnames);
11695 dd->cntrnames = NULL;
11696}
11697
77241056
MM
11698static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11699 u64 *psval, void *context, int vl)
11700{
11701 u64 val;
11702 u64 sval = *psval;
11703
11704 if (entry->flags & CNTR_DISABLED) {
11705 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11706 return 0;
11707 }
11708
11709 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11710
11711 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11712
11713 /* If its a synthetic counter there is more work we need to do */
11714 if (entry->flags & CNTR_SYNTH) {
11715 if (sval == CNTR_MAX) {
11716 /* No need to read already saturated */
11717 return CNTR_MAX;
11718 }
11719
11720 if (entry->flags & CNTR_32BIT) {
11721 /* 32bit counters can wrap multiple times */
11722 u64 upper = sval >> 32;
11723 u64 lower = (sval << 32) >> 32;
11724
11725 if (lower > val) { /* hw wrapped */
11726 if (upper == CNTR_32BIT_MAX)
11727 val = CNTR_MAX;
11728 else
11729 upper++;
11730 }
11731
11732 if (val != CNTR_MAX)
11733 val = (upper << 32) | val;
11734
11735 } else {
11736 /* If we rolled we are saturated */
11737 if ((val < sval) || (val > CNTR_MAX))
11738 val = CNTR_MAX;
11739 }
11740 }
11741
11742 *psval = val;
11743
11744 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11745
11746 return val;
11747}
11748
11749static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11750 struct cntr_entry *entry,
11751 u64 *psval, void *context, int vl, u64 data)
11752{
11753 u64 val;
11754
11755 if (entry->flags & CNTR_DISABLED) {
11756 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11757 return 0;
11758 }
11759
11760 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11761
11762 if (entry->flags & CNTR_SYNTH) {
11763 *psval = data;
11764 if (entry->flags & CNTR_32BIT) {
11765 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11766 (data << 32) >> 32);
11767 val = data; /* return the full 64bit value */
11768 } else {
11769 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11770 data);
11771 }
11772 } else {
11773 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11774 }
11775
11776 *psval = val;
11777
11778 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11779
11780 return val;
11781}
11782
11783u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11784{
11785 struct cntr_entry *entry;
11786 u64 *sval;
11787
11788 entry = &dev_cntrs[index];
11789 sval = dd->scntrs + entry->offset;
11790
11791 if (vl != CNTR_INVALID_VL)
11792 sval += vl;
11793
11794 return read_dev_port_cntr(dd, entry, sval, dd, vl);
11795}
11796
11797u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11798{
11799 struct cntr_entry *entry;
11800 u64 *sval;
11801
11802 entry = &dev_cntrs[index];
11803 sval = dd->scntrs + entry->offset;
11804
11805 if (vl != CNTR_INVALID_VL)
11806 sval += vl;
11807
11808 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11809}
11810
11811u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11812{
11813 struct cntr_entry *entry;
11814 u64 *sval;
11815
11816 entry = &port_cntrs[index];
11817 sval = ppd->scntrs + entry->offset;
11818
11819 if (vl != CNTR_INVALID_VL)
11820 sval += vl;
11821
11822 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11823 (index <= C_RCV_HDR_OVF_LAST)) {
11824 /* We do not want to bother for disabled contexts */
11825 return 0;
11826 }
11827
11828 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11829}
11830
11831u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11832{
11833 struct cntr_entry *entry;
11834 u64 *sval;
11835
11836 entry = &port_cntrs[index];
11837 sval = ppd->scntrs + entry->offset;
11838
11839 if (vl != CNTR_INVALID_VL)
11840 sval += vl;
11841
11842 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11843 (index <= C_RCV_HDR_OVF_LAST)) {
11844 /* We do not want to bother for disabled contexts */
11845 return 0;
11846 }
11847
11848 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
11849}
11850
11851static void update_synth_timer(unsigned long opaque)
11852{
11853 u64 cur_tx;
11854 u64 cur_rx;
11855 u64 total_flits;
11856 u8 update = 0;
11857 int i, j, vl;
11858 struct hfi1_pportdata *ppd;
11859 struct cntr_entry *entry;
11860
11861 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
11862
11863 /*
11864 * Rather than keep beating on the CSRs pick a minimal set that we can
11865 * check to watch for potential roll over. We can do this by looking at
11866 * the number of flits sent/recv. If the total flits exceeds 32bits then
11867 * we have to iterate all the counters and update.
11868 */
11869 entry = &dev_cntrs[C_DC_RCV_FLITS];
11870 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11871
11872 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11873 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11874
11875 hfi1_cdbg(
11876 CNTR,
11877 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11878 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
11879
11880 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
11881 /*
11882 * May not be strictly necessary to update but it won't hurt and
11883 * simplifies the logic here.
11884 */
11885 update = 1;
11886 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
11887 dd->unit);
11888 } else {
11889 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
11890 hfi1_cdbg(CNTR,
11891 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
11892 total_flits, (u64)CNTR_32BIT_MAX);
11893 if (total_flits >= CNTR_32BIT_MAX) {
11894 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
11895 dd->unit);
11896 update = 1;
11897 }
11898 }
11899
11900 if (update) {
11901 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
11902 for (i = 0; i < DEV_CNTR_LAST; i++) {
11903 entry = &dev_cntrs[i];
11904 if (entry->flags & CNTR_VL) {
11905 for (vl = 0; vl < C_VL_COUNT; vl++)
11906 read_dev_cntr(dd, i, vl);
11907 } else {
11908 read_dev_cntr(dd, i, CNTR_INVALID_VL);
11909 }
11910 }
11911 ppd = (struct hfi1_pportdata *)(dd + 1);
11912 for (i = 0; i < dd->num_pports; i++, ppd++) {
11913 for (j = 0; j < PORT_CNTR_LAST; j++) {
11914 entry = &port_cntrs[j];
11915 if (entry->flags & CNTR_VL) {
11916 for (vl = 0; vl < C_VL_COUNT; vl++)
11917 read_port_cntr(ppd, j, vl);
11918 } else {
11919 read_port_cntr(ppd, j, CNTR_INVALID_VL);
11920 }
11921 }
11922 }
11923
11924 /*
11925 * We want the value in the register. The goal is to keep track
11926 * of the number of "ticks" not the counter value. In other
11927 * words if the register rolls we want to notice it and go ahead
11928 * and force an update.
11929 */
11930 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11931 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11932 CNTR_MODE_R, 0);
11933
11934 entry = &dev_cntrs[C_DC_RCV_FLITS];
11935 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11936 CNTR_MODE_R, 0);
11937
11938 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11939 dd->unit, dd->last_tx, dd->last_rx);
11940
11941 } else {
11942 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
11943 }
11944
48a0cc13 11945 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
77241056
MM
11946}
11947
11948#define C_MAX_NAME 13 /* 12 chars + one for /0 */
11949static int init_cntrs(struct hfi1_devdata *dd)
11950{
c024c554 11951 int i, rcv_ctxts, j;
77241056
MM
11952 size_t sz;
11953 char *p;
11954 char name[C_MAX_NAME];
11955 struct hfi1_pportdata *ppd;
11d2b114
SS
11956 const char *bit_type_32 = ",32";
11957 const int bit_type_32_sz = strlen(bit_type_32);
77241056
MM
11958
11959 /* set up the stats timer; the add_timer is done at the end */
24523a94
MFW
11960 setup_timer(&dd->synth_stats_timer, update_synth_timer,
11961 (unsigned long)dd);
77241056
MM
11962
11963 /***********************/
11964 /* per device counters */
11965 /***********************/
11966
11967 /* size names and determine how many we have*/
11968 dd->ndevcntrs = 0;
11969 sz = 0;
77241056
MM
11970
11971 for (i = 0; i < DEV_CNTR_LAST; i++) {
77241056
MM
11972 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11973 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
11974 continue;
11975 }
11976
11977 if (dev_cntrs[i].flags & CNTR_VL) {
c024c554 11978 dev_cntrs[i].offset = dd->ndevcntrs;
77241056 11979 for (j = 0; j < C_VL_COUNT; j++) {
77241056 11980 snprintf(name, C_MAX_NAME, "%s%d",
17fb4f29 11981 dev_cntrs[i].name, vl_from_idx(j));
77241056 11982 sz += strlen(name);
11d2b114
SS
11983 /* Add ",32" for 32-bit counters */
11984 if (dev_cntrs[i].flags & CNTR_32BIT)
11985 sz += bit_type_32_sz;
77241056 11986 sz++;
77241056 11987 dd->ndevcntrs++;
77241056 11988 }
a699c6c2 11989 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
c024c554 11990 dev_cntrs[i].offset = dd->ndevcntrs;
a699c6c2 11991 for (j = 0; j < dd->chip_sdma_engines; j++) {
a699c6c2
VM
11992 snprintf(name, C_MAX_NAME, "%s%d",
11993 dev_cntrs[i].name, j);
77241056 11994 sz += strlen(name);
11d2b114
SS
11995 /* Add ",32" for 32-bit counters */
11996 if (dev_cntrs[i].flags & CNTR_32BIT)
11997 sz += bit_type_32_sz;
77241056 11998 sz++;
77241056 11999 dd->ndevcntrs++;
77241056
MM
12000 }
12001 } else {
11d2b114 12002 /* +1 for newline. */
77241056 12003 sz += strlen(dev_cntrs[i].name) + 1;
11d2b114
SS
12004 /* Add ",32" for 32-bit counters */
12005 if (dev_cntrs[i].flags & CNTR_32BIT)
12006 sz += bit_type_32_sz;
c024c554 12007 dev_cntrs[i].offset = dd->ndevcntrs;
77241056 12008 dd->ndevcntrs++;
77241056
MM
12009 }
12010 }
12011
12012 /* allocate space for the counter values */
c024c554 12013 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
77241056
MM
12014 if (!dd->cntrs)
12015 goto bail;
12016
c024c554 12017 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
77241056
MM
12018 if (!dd->scntrs)
12019 goto bail;
12020
77241056
MM
12021 /* allocate space for the counter names */
12022 dd->cntrnameslen = sz;
12023 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12024 if (!dd->cntrnames)
12025 goto bail;
12026
12027 /* fill in the names */
c024c554 12028 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
77241056
MM
12029 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12030 /* Nothing */
11d2b114
SS
12031 } else if (dev_cntrs[i].flags & CNTR_VL) {
12032 for (j = 0; j < C_VL_COUNT; j++) {
11d2b114
SS
12033 snprintf(name, C_MAX_NAME, "%s%d",
12034 dev_cntrs[i].name,
12035 vl_from_idx(j));
12036 memcpy(p, name, strlen(name));
12037 p += strlen(name);
12038
12039 /* Counter is 32 bits */
12040 if (dev_cntrs[i].flags & CNTR_32BIT) {
12041 memcpy(p, bit_type_32, bit_type_32_sz);
12042 p += bit_type_32_sz;
77241056 12043 }
11d2b114
SS
12044
12045 *p++ = '\n';
12046 }
12047 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12048 for (j = 0; j < dd->chip_sdma_engines; j++) {
11d2b114
SS
12049 snprintf(name, C_MAX_NAME, "%s%d",
12050 dev_cntrs[i].name, j);
12051 memcpy(p, name, strlen(name));
12052 p += strlen(name);
12053
12054 /* Counter is 32 bits */
12055 if (dev_cntrs[i].flags & CNTR_32BIT) {
12056 memcpy(p, bit_type_32, bit_type_32_sz);
12057 p += bit_type_32_sz;
a699c6c2 12058 }
11d2b114 12059
77241056
MM
12060 *p++ = '\n';
12061 }
11d2b114
SS
12062 } else {
12063 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12064 p += strlen(dev_cntrs[i].name);
12065
12066 /* Counter is 32 bits */
12067 if (dev_cntrs[i].flags & CNTR_32BIT) {
12068 memcpy(p, bit_type_32, bit_type_32_sz);
12069 p += bit_type_32_sz;
12070 }
12071
12072 *p++ = '\n';
77241056
MM
12073 }
12074 }
12075
12076 /*********************/
12077 /* per port counters */
12078 /*********************/
12079
12080 /*
12081 * Go through the counters for the overflows and disable the ones we
12082 * don't need. This varies based on platform so we need to do it
12083 * dynamically here.
12084 */
12085 rcv_ctxts = dd->num_rcv_contexts;
12086 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12087 i <= C_RCV_HDR_OVF_LAST; i++) {
12088 port_cntrs[i].flags |= CNTR_DISABLED;
12089 }
12090
12091 /* size port counter names and determine how many we have*/
12092 sz = 0;
12093 dd->nportcntrs = 0;
12094 for (i = 0; i < PORT_CNTR_LAST; i++) {
77241056
MM
12095 if (port_cntrs[i].flags & CNTR_DISABLED) {
12096 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12097 continue;
12098 }
12099
12100 if (port_cntrs[i].flags & CNTR_VL) {
77241056
MM
12101 port_cntrs[i].offset = dd->nportcntrs;
12102 for (j = 0; j < C_VL_COUNT; j++) {
77241056 12103 snprintf(name, C_MAX_NAME, "%s%d",
17fb4f29 12104 port_cntrs[i].name, vl_from_idx(j));
77241056 12105 sz += strlen(name);
11d2b114
SS
12106 /* Add ",32" for 32-bit counters */
12107 if (port_cntrs[i].flags & CNTR_32BIT)
12108 sz += bit_type_32_sz;
77241056 12109 sz++;
77241056
MM
12110 dd->nportcntrs++;
12111 }
12112 } else {
11d2b114 12113 /* +1 for newline */
77241056 12114 sz += strlen(port_cntrs[i].name) + 1;
11d2b114
SS
12115 /* Add ",32" for 32-bit counters */
12116 if (port_cntrs[i].flags & CNTR_32BIT)
12117 sz += bit_type_32_sz;
77241056
MM
12118 port_cntrs[i].offset = dd->nportcntrs;
12119 dd->nportcntrs++;
77241056
MM
12120 }
12121 }
12122
12123 /* allocate space for the counter names */
12124 dd->portcntrnameslen = sz;
12125 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12126 if (!dd->portcntrnames)
12127 goto bail;
12128
12129 /* fill in port cntr names */
12130 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12131 if (port_cntrs[i].flags & CNTR_DISABLED)
12132 continue;
12133
12134 if (port_cntrs[i].flags & CNTR_VL) {
12135 for (j = 0; j < C_VL_COUNT; j++) {
77241056 12136 snprintf(name, C_MAX_NAME, "%s%d",
17fb4f29 12137 port_cntrs[i].name, vl_from_idx(j));
77241056
MM
12138 memcpy(p, name, strlen(name));
12139 p += strlen(name);
11d2b114
SS
12140
12141 /* Counter is 32 bits */
12142 if (port_cntrs[i].flags & CNTR_32BIT) {
12143 memcpy(p, bit_type_32, bit_type_32_sz);
12144 p += bit_type_32_sz;
12145 }
12146
77241056
MM
12147 *p++ = '\n';
12148 }
12149 } else {
12150 memcpy(p, port_cntrs[i].name,
12151 strlen(port_cntrs[i].name));
12152 p += strlen(port_cntrs[i].name);
11d2b114
SS
12153
12154 /* Counter is 32 bits */
12155 if (port_cntrs[i].flags & CNTR_32BIT) {
12156 memcpy(p, bit_type_32, bit_type_32_sz);
12157 p += bit_type_32_sz;
12158 }
12159
77241056
MM
12160 *p++ = '\n';
12161 }
12162 }
12163
12164 /* allocate per port storage for counter values */
12165 ppd = (struct hfi1_pportdata *)(dd + 1);
12166 for (i = 0; i < dd->num_pports; i++, ppd++) {
12167 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12168 if (!ppd->cntrs)
12169 goto bail;
12170
12171 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12172 if (!ppd->scntrs)
12173 goto bail;
12174 }
12175
12176 /* CPU counters need to be allocated and zeroed */
12177 if (init_cpu_counters(dd))
12178 goto bail;
12179
12180 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12181 return 0;
12182bail:
12183 free_cntrs(dd);
12184 return -ENOMEM;
12185}
12186
77241056
MM
12187static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12188{
12189 switch (chip_lstate) {
12190 default:
12191 dd_dev_err(dd,
17fb4f29
JJ
12192 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12193 chip_lstate);
77241056
MM
12194 /* fall through */
12195 case LSTATE_DOWN:
12196 return IB_PORT_DOWN;
12197 case LSTATE_INIT:
12198 return IB_PORT_INIT;
12199 case LSTATE_ARMED:
12200 return IB_PORT_ARMED;
12201 case LSTATE_ACTIVE:
12202 return IB_PORT_ACTIVE;
12203 }
12204}
12205
12206u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12207{
12208 /* look at the HFI meta-states only */
12209 switch (chip_pstate & 0xf0) {
12210 default:
12211 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
17fb4f29 12212 chip_pstate);
77241056
MM
12213 /* fall through */
12214 case PLS_DISABLED:
12215 return IB_PORTPHYSSTATE_DISABLED;
12216 case PLS_OFFLINE:
12217 return OPA_PORTPHYSSTATE_OFFLINE;
12218 case PLS_POLLING:
12219 return IB_PORTPHYSSTATE_POLLING;
12220 case PLS_CONFIGPHY:
12221 return IB_PORTPHYSSTATE_TRAINING;
12222 case PLS_LINKUP:
12223 return IB_PORTPHYSSTATE_LINKUP;
12224 case PLS_PHYTEST:
12225 return IB_PORTPHYSSTATE_PHY_TEST;
12226 }
12227}
12228
12229/* return the OPA port logical state name */
12230const char *opa_lstate_name(u32 lstate)
12231{
12232 static const char * const port_logical_names[] = {
12233 "PORT_NOP",
12234 "PORT_DOWN",
12235 "PORT_INIT",
12236 "PORT_ARMED",
12237 "PORT_ACTIVE",
12238 "PORT_ACTIVE_DEFER",
12239 };
12240 if (lstate < ARRAY_SIZE(port_logical_names))
12241 return port_logical_names[lstate];
12242 return "unknown";
12243}
12244
12245/* return the OPA port physical state name */
12246const char *opa_pstate_name(u32 pstate)
12247{
12248 static const char * const port_physical_names[] = {
12249 "PHYS_NOP",
12250 "reserved1",
12251 "PHYS_POLL",
12252 "PHYS_DISABLED",
12253 "PHYS_TRAINING",
12254 "PHYS_LINKUP",
12255 "PHYS_LINK_ERR_RECOVER",
12256 "PHYS_PHY_TEST",
12257 "reserved8",
12258 "PHYS_OFFLINE",
12259 "PHYS_GANGED",
12260 "PHYS_TEST",
12261 };
12262 if (pstate < ARRAY_SIZE(port_physical_names))
12263 return port_physical_names[pstate];
12264 return "unknown";
12265}
12266
12267/*
12268 * Read the hardware link state and set the driver's cached value of it.
12269 * Return the (new) current value.
12270 */
12271u32 get_logical_state(struct hfi1_pportdata *ppd)
12272{
12273 u32 new_state;
12274
12275 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12276 if (new_state != ppd->lstate) {
12277 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
17fb4f29 12278 opa_lstate_name(new_state), new_state);
77241056
MM
12279 ppd->lstate = new_state;
12280 }
12281 /*
12282 * Set port status flags in the page mapped into userspace
12283 * memory. Do it here to ensure a reliable state - this is
12284 * the only function called by all state handling code.
12285 * Always set the flags due to the fact that the cache value
12286 * might have been changed explicitly outside of this
12287 * function.
12288 */
12289 if (ppd->statusp) {
12290 switch (ppd->lstate) {
12291 case IB_PORT_DOWN:
12292 case IB_PORT_INIT:
12293 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12294 HFI1_STATUS_IB_READY);
12295 break;
12296 case IB_PORT_ARMED:
12297 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12298 break;
12299 case IB_PORT_ACTIVE:
12300 *ppd->statusp |= HFI1_STATUS_IB_READY;
12301 break;
12302 }
12303 }
12304 return ppd->lstate;
12305}
12306
12307/**
12308 * wait_logical_linkstate - wait for an IB link state change to occur
12309 * @ppd: port device
12310 * @state: the state to wait for
12311 * @msecs: the number of milliseconds to wait
12312 *
12313 * Wait up to msecs milliseconds for IB link state change to occur.
12314 * For now, take the easy polling route.
12315 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12316 */
12317static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12318 int msecs)
12319{
12320 unsigned long timeout;
12321
12322 timeout = jiffies + msecs_to_jiffies(msecs);
12323 while (1) {
12324 if (get_logical_state(ppd) == state)
12325 return 0;
12326 if (time_after(jiffies, timeout))
12327 break;
12328 msleep(20);
12329 }
12330 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12331
12332 return -ETIMEDOUT;
12333}
12334
12335u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
12336{
77241056
MM
12337 u32 pstate;
12338 u32 ib_pstate;
12339
12340 pstate = read_physical_state(ppd->dd);
12341 ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
f45c8dc8 12342 if (ppd->last_pstate != ib_pstate) {
77241056 12343 dd_dev_info(ppd->dd,
17fb4f29
JJ
12344 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12345 __func__, opa_pstate_name(ib_pstate), ib_pstate,
12346 pstate);
f45c8dc8 12347 ppd->last_pstate = ib_pstate;
77241056
MM
12348 }
12349 return ib_pstate;
12350}
12351
77241056
MM
12352#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12353(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12354
12355#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12356(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12357
12358int hfi1_init_ctxt(struct send_context *sc)
12359{
d125a6c6 12360 if (sc) {
77241056
MM
12361 struct hfi1_devdata *dd = sc->dd;
12362 u64 reg;
12363 u8 set = (sc->type == SC_USER ?
12364 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12365 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12366 reg = read_kctxt_csr(dd, sc->hw_context,
12367 SEND_CTXT_CHECK_ENABLE);
12368 if (set)
12369 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12370 else
12371 SET_STATIC_RATE_CONTROL_SMASK(reg);
12372 write_kctxt_csr(dd, sc->hw_context,
12373 SEND_CTXT_CHECK_ENABLE, reg);
12374 }
12375 return 0;
12376}
12377
12378int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12379{
12380 int ret = 0;
12381 u64 reg;
12382
12383 if (dd->icode != ICODE_RTL_SILICON) {
12384 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12385 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12386 __func__);
12387 return -EINVAL;
12388 }
12389 reg = read_csr(dd, ASIC_STS_THERM);
12390 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12391 ASIC_STS_THERM_CURR_TEMP_MASK);
12392 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12393 ASIC_STS_THERM_LO_TEMP_MASK);
12394 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12395 ASIC_STS_THERM_HI_TEMP_MASK);
12396 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12397 ASIC_STS_THERM_CRIT_TEMP_MASK);
12398 /* triggers is a 3-bit value - 1 bit per trigger. */
12399 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12400
12401 return ret;
12402}
12403
12404/* ========================================================================= */
12405
12406/*
12407 * Enable/disable chip from delivering interrupts.
12408 */
12409void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12410{
12411 int i;
12412
12413 /*
12414 * In HFI, the mask needs to be 1 to allow interrupts.
12415 */
12416 if (enable) {
77241056
MM
12417 /* enable all interrupts */
12418 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
8638b77f 12419 write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
77241056 12420
8ebd4cf1 12421 init_qsfp_int(dd);
77241056
MM
12422 } else {
12423 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
8638b77f 12424 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
77241056
MM
12425 }
12426}
12427
12428/*
12429 * Clear all interrupt sources on the chip.
12430 */
12431static void clear_all_interrupts(struct hfi1_devdata *dd)
12432{
12433 int i;
12434
12435 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
8638b77f 12436 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
77241056
MM
12437
12438 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12439 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12440 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12441 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12442 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12443 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12444 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12445 for (i = 0; i < dd->chip_send_contexts; i++)
12446 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12447 for (i = 0; i < dd->chip_sdma_engines; i++)
12448 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12449
12450 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12451 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12452 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12453}
12454
12455/* Move to pcie.c? */
12456static void disable_intx(struct pci_dev *pdev)
12457{
12458 pci_intx(pdev, 0);
12459}
12460
12461static void clean_up_interrupts(struct hfi1_devdata *dd)
12462{
12463 int i;
12464
12465 /* remove irqs - must happen before disabling/turning off */
12466 if (dd->num_msix_entries) {
12467 /* MSI-X */
12468 struct hfi1_msix_entry *me = dd->msix_entries;
12469
12470 for (i = 0; i < dd->num_msix_entries; i++, me++) {
d125a6c6 12471 if (!me->arg) /* => no irq, no affinity */
957558c9
MH
12472 continue;
12473 hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
77241056
MM
12474 free_irq(me->msix.vector, me->arg);
12475 }
12476 } else {
12477 /* INTx */
12478 if (dd->requested_intx_irq) {
12479 free_irq(dd->pcidev->irq, dd);
12480 dd->requested_intx_irq = 0;
12481 }
12482 }
12483
12484 /* turn off interrupts */
12485 if (dd->num_msix_entries) {
12486 /* MSI-X */
6e5b6131 12487 pci_disable_msix(dd->pcidev);
77241056
MM
12488 } else {
12489 /* INTx */
12490 disable_intx(dd->pcidev);
12491 }
12492
12493 /* clean structures */
77241056
MM
12494 kfree(dd->msix_entries);
12495 dd->msix_entries = NULL;
12496 dd->num_msix_entries = 0;
12497}
12498
12499/*
12500 * Remap the interrupt source from the general handler to the given MSI-X
12501 * interrupt.
12502 */
12503static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12504{
12505 u64 reg;
12506 int m, n;
12507
12508 /* clear from the handled mask of the general interrupt */
12509 m = isrc / 64;
12510 n = isrc % 64;
12511 dd->gi_mask[m] &= ~((u64)1 << n);
12512
12513 /* direct the chip source to the given MSI-X interrupt */
12514 m = isrc / 8;
12515 n = isrc % 8;
8638b77f
JJ
12516 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12517 reg &= ~((u64)0xff << (8 * n));
12518 reg |= ((u64)msix_intr & 0xff) << (8 * n);
12519 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
77241056
MM
12520}
12521
12522static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12523 int engine, int msix_intr)
12524{
12525 /*
12526 * SDMA engine interrupt sources grouped by type, rather than
12527 * engine. Per-engine interrupts are as follows:
12528 * SDMA
12529 * SDMAProgress
12530 * SDMAIdle
12531 */
8638b77f 12532 remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
17fb4f29 12533 msix_intr);
8638b77f 12534 remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
17fb4f29 12535 msix_intr);
8638b77f 12536 remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
17fb4f29 12537 msix_intr);
77241056
MM
12538}
12539
77241056
MM
12540static int request_intx_irq(struct hfi1_devdata *dd)
12541{
12542 int ret;
12543
9805071e
JJ
12544 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12545 dd->unit);
77241056 12546 ret = request_irq(dd->pcidev->irq, general_interrupt,
17fb4f29 12547 IRQF_SHARED, dd->intx_name, dd);
77241056
MM
12548 if (ret)
12549 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
17fb4f29 12550 ret);
77241056
MM
12551 else
12552 dd->requested_intx_irq = 1;
12553 return ret;
12554}
12555
12556static int request_msix_irqs(struct hfi1_devdata *dd)
12557{
77241056
MM
12558 int first_general, last_general;
12559 int first_sdma, last_sdma;
12560 int first_rx, last_rx;
957558c9 12561 int i, ret = 0;
77241056
MM
12562
12563 /* calculate the ranges we are going to use */
12564 first_general = 0;
f3ff8189
JJ
12565 last_general = first_general + 1;
12566 first_sdma = last_general;
12567 last_sdma = first_sdma + dd->num_sdma;
12568 first_rx = last_sdma;
77241056
MM
12569 last_rx = first_rx + dd->n_krcv_queues;
12570
77241056
MM
12571 /*
12572 * Sanity check - the code expects all SDMA chip source
12573 * interrupts to be in the same CSR, starting at bit 0. Verify
12574 * that this is true by checking the bit location of the start.
12575 */
12576 BUILD_BUG_ON(IS_SDMA_START % 64);
12577
12578 for (i = 0; i < dd->num_msix_entries; i++) {
12579 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12580 const char *err_info;
12581 irq_handler_t handler;
f4f30031 12582 irq_handler_t thread = NULL;
77241056
MM
12583 void *arg;
12584 int idx;
12585 struct hfi1_ctxtdata *rcd = NULL;
12586 struct sdma_engine *sde = NULL;
12587
12588 /* obtain the arguments to request_irq */
12589 if (first_general <= i && i < last_general) {
12590 idx = i - first_general;
12591 handler = general_interrupt;
12592 arg = dd;
12593 snprintf(me->name, sizeof(me->name),
9805071e 12594 DRIVER_NAME "_%d", dd->unit);
77241056 12595 err_info = "general";
957558c9 12596 me->type = IRQ_GENERAL;
77241056
MM
12597 } else if (first_sdma <= i && i < last_sdma) {
12598 idx = i - first_sdma;
12599 sde = &dd->per_sdma[idx];
12600 handler = sdma_interrupt;
12601 arg = sde;
12602 snprintf(me->name, sizeof(me->name),
9805071e 12603 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
77241056
MM
12604 err_info = "sdma";
12605 remap_sdma_interrupts(dd, idx, i);
957558c9 12606 me->type = IRQ_SDMA;
77241056
MM
12607 } else if (first_rx <= i && i < last_rx) {
12608 idx = i - first_rx;
12609 rcd = dd->rcd[idx];
12610 /* no interrupt if no rcd */
12611 if (!rcd)
12612 continue;
12613 /*
12614 * Set the interrupt register and mask for this
12615 * context's interrupt.
12616 */
8638b77f 12617 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
77241056 12618 rcd->imask = ((u64)1) <<
8638b77f 12619 ((IS_RCVAVAIL_START + idx) % 64);
77241056 12620 handler = receive_context_interrupt;
f4f30031 12621 thread = receive_context_thread;
77241056
MM
12622 arg = rcd;
12623 snprintf(me->name, sizeof(me->name),
9805071e 12624 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
77241056 12625 err_info = "receive context";
66c0933b 12626 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
957558c9 12627 me->type = IRQ_RCVCTXT;
77241056
MM
12628 } else {
12629 /* not in our expected range - complain, then
4d114fdd
JJ
12630 * ignore it
12631 */
77241056 12632 dd_dev_err(dd,
17fb4f29 12633 "Unexpected extra MSI-X interrupt %d\n", i);
77241056
MM
12634 continue;
12635 }
12636 /* no argument, no interrupt */
d125a6c6 12637 if (!arg)
77241056
MM
12638 continue;
12639 /* make sure the name is terminated */
8638b77f 12640 me->name[sizeof(me->name) - 1] = 0;
77241056 12641
f4f30031 12642 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
17fb4f29 12643 me->name, arg);
77241056
MM
12644 if (ret) {
12645 dd_dev_err(dd,
17fb4f29
JJ
12646 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12647 err_info, me->msix.vector, idx, ret);
77241056
MM
12648 return ret;
12649 }
12650 /*
12651 * assign arg after request_irq call, so it will be
12652 * cleaned up
12653 */
12654 me->arg = arg;
12655
957558c9
MH
12656 ret = hfi1_get_irq_affinity(dd, me);
12657 if (ret)
12658 dd_dev_err(dd,
12659 "unable to pin IRQ %d\n", ret);
77241056
MM
12660 }
12661
77241056 12662 return ret;
77241056
MM
12663}
12664
12665/*
12666 * Set the general handler to accept all interrupts, remap all
12667 * chip interrupts back to MSI-X 0.
12668 */
12669static void reset_interrupts(struct hfi1_devdata *dd)
12670{
12671 int i;
12672
12673 /* all interrupts handled by the general handler */
12674 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12675 dd->gi_mask[i] = ~(u64)0;
12676
12677 /* all chip interrupts map to MSI-X 0 */
12678 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
8638b77f 12679 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
77241056
MM
12680}
12681
12682static int set_up_interrupts(struct hfi1_devdata *dd)
12683{
12684 struct hfi1_msix_entry *entries;
12685 u32 total, request;
12686 int i, ret;
12687 int single_interrupt = 0; /* we expect to have all the interrupts */
12688
12689 /*
12690 * Interrupt count:
12691 * 1 general, "slow path" interrupt (includes the SDMA engines
12692 * slow source, SDMACleanupDone)
12693 * N interrupts - one per used SDMA engine
12694 * M interrupt - one per kernel receive context
12695 */
12696 total = 1 + dd->num_sdma + dd->n_krcv_queues;
12697
12698 entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12699 if (!entries) {
77241056
MM
12700 ret = -ENOMEM;
12701 goto fail;
12702 }
12703 /* 1-1 MSI-X entry assignment */
12704 for (i = 0; i < total; i++)
12705 entries[i].msix.entry = i;
12706
12707 /* ask for MSI-X interrupts */
12708 request = total;
12709 request_msix(dd, &request, entries);
12710
12711 if (request == 0) {
12712 /* using INTx */
12713 /* dd->num_msix_entries already zero */
12714 kfree(entries);
12715 single_interrupt = 1;
12716 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12717 } else {
12718 /* using MSI-X */
12719 dd->num_msix_entries = request;
12720 dd->msix_entries = entries;
12721
12722 if (request != total) {
12723 /* using MSI-X, with reduced interrupts */
12724 dd_dev_err(
12725 dd,
12726 "cannot handle reduced interrupt case, want %u, got %u\n",
12727 total, request);
12728 ret = -EINVAL;
12729 goto fail;
12730 }
12731 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12732 }
12733
12734 /* mask all interrupts */
12735 set_intr_state(dd, 0);
12736 /* clear all pending interrupts */
12737 clear_all_interrupts(dd);
12738
12739 /* reset general handler mask, chip MSI-X mappings */
12740 reset_interrupts(dd);
12741
12742 if (single_interrupt)
12743 ret = request_intx_irq(dd);
12744 else
12745 ret = request_msix_irqs(dd);
12746 if (ret)
12747 goto fail;
12748
12749 return 0;
12750
12751fail:
12752 clean_up_interrupts(dd);
12753 return ret;
12754}
12755
12756/*
12757 * Set up context values in dd. Sets:
12758 *
12759 * num_rcv_contexts - number of contexts being used
12760 * n_krcv_queues - number of kernel contexts
12761 * first_user_ctxt - first non-kernel context in array of contexts
12762 * freectxts - number of free user contexts
12763 * num_send_contexts - number of PIO send contexts being used
12764 */
12765static int set_up_context_variables(struct hfi1_devdata *dd)
12766{
12767 int num_kernel_contexts;
77241056
MM
12768 int total_contexts;
12769 int ret;
12770 unsigned ngroups;
8f000f7f
DL
12771 int qos_rmt_count;
12772 int user_rmt_reduced;
77241056
MM
12773
12774 /*
33a9eb52
DL
12775 * Kernel receive contexts:
12776 * - min of 2 or 1 context/numa (excluding control context)
82c2611d 12777 * - Context 0 - control context (VL15/multicast/error)
33a9eb52
DL
12778 * - Context 1 - first kernel context
12779 * - Context 2 - second kernel context
12780 * ...
77241056
MM
12781 */
12782 if (n_krcvqs)
82c2611d 12783 /*
33a9eb52
DL
12784 * n_krcvqs is the sum of module parameter kernel receive
12785 * contexts, krcvqs[]. It does not include the control
12786 * context, so add that.
82c2611d 12787 */
33a9eb52 12788 num_kernel_contexts = n_krcvqs + 1;
77241056 12789 else
0edf80ea 12790 num_kernel_contexts = num_online_nodes() + 1;
77241056
MM
12791 num_kernel_contexts =
12792 max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
12793 /*
12794 * Every kernel receive context needs an ACK send context.
12795 * one send context is allocated for each VL{0-7} and VL15
12796 */
12797 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12798 dd_dev_err(dd,
12799 "Reducing # kernel rcv contexts to: %d, from %d\n",
12800 (int)(dd->chip_send_contexts - num_vls - 1),
12801 (int)num_kernel_contexts);
12802 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12803 }
12804 /*
0852d241
JJ
12805 * User contexts:
12806 * - default to 1 user context per real (non-HT) CPU core if
12807 * num_user_contexts is negative
77241056 12808 */
2ce6bf22 12809 if (num_user_contexts < 0)
0852d241 12810 num_user_contexts =
4197344b 12811 cpumask_weight(&node_affinity.real_cpu_mask);
77241056
MM
12812
12813 total_contexts = num_kernel_contexts + num_user_contexts;
12814
12815 /*
12816 * Adjust the counts given a global max.
12817 */
12818 if (total_contexts > dd->chip_rcv_contexts) {
12819 dd_dev_err(dd,
12820 "Reducing # user receive contexts to: %d, from %d\n",
12821 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12822 (int)num_user_contexts);
12823 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12824 /* recalculate */
12825 total_contexts = num_kernel_contexts + num_user_contexts;
12826 }
12827
8f000f7f
DL
12828 /* each user context requires an entry in the RMT */
12829 qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
12830 if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
12831 user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
12832 dd_dev_err(dd,
12833 "RMT size is reducing the number of user receive contexts from %d to %d\n",
12834 (int)num_user_contexts,
12835 user_rmt_reduced);
12836 /* recalculate */
12837 num_user_contexts = user_rmt_reduced;
12838 total_contexts = num_kernel_contexts + num_user_contexts;
12839 }
12840
77241056
MM
12841 /* the first N are kernel contexts, the rest are user contexts */
12842 dd->num_rcv_contexts = total_contexts;
12843 dd->n_krcv_queues = num_kernel_contexts;
12844 dd->first_user_ctxt = num_kernel_contexts;
affa48de 12845 dd->num_user_contexts = num_user_contexts;
77241056
MM
12846 dd->freectxts = num_user_contexts;
12847 dd_dev_info(dd,
17fb4f29
JJ
12848 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12849 (int)dd->chip_rcv_contexts,
12850 (int)dd->num_rcv_contexts,
12851 (int)dd->n_krcv_queues,
12852 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
77241056
MM
12853
12854 /*
12855 * Receive array allocation:
12856 * All RcvArray entries are divided into groups of 8. This
12857 * is required by the hardware and will speed up writes to
12858 * consecutive entries by using write-combining of the entire
12859 * cacheline.
12860 *
12861 * The number of groups are evenly divided among all contexts.
12862 * any left over groups will be given to the first N user
12863 * contexts.
12864 */
12865 dd->rcv_entries.group_size = RCV_INCREMENT;
12866 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
12867 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
12868 dd->rcv_entries.nctxt_extra = ngroups -
12869 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
12870 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
12871 dd->rcv_entries.ngroups,
12872 dd->rcv_entries.nctxt_extra);
12873 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
12874 MAX_EAGER_ENTRIES * 2) {
12875 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
12876 dd->rcv_entries.group_size;
12877 dd_dev_info(dd,
17fb4f29
JJ
12878 "RcvArray group count too high, change to %u\n",
12879 dd->rcv_entries.ngroups);
77241056
MM
12880 dd->rcv_entries.nctxt_extra = 0;
12881 }
12882 /*
12883 * PIO send contexts
12884 */
12885 ret = init_sc_pools_and_sizes(dd);
12886 if (ret >= 0) { /* success */
12887 dd->num_send_contexts = ret;
12888 dd_dev_info(
12889 dd,
44306f15 12890 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
77241056
MM
12891 dd->chip_send_contexts,
12892 dd->num_send_contexts,
12893 dd->sc_sizes[SC_KERNEL].count,
12894 dd->sc_sizes[SC_ACK].count,
44306f15
JX
12895 dd->sc_sizes[SC_USER].count,
12896 dd->sc_sizes[SC_VL15].count);
77241056
MM
12897 ret = 0; /* success */
12898 }
12899
12900 return ret;
12901}
12902
12903/*
12904 * Set the device/port partition key table. The MAD code
12905 * will ensure that, at least, the partial management
12906 * partition key is present in the table.
12907 */
12908static void set_partition_keys(struct hfi1_pportdata *ppd)
12909{
12910 struct hfi1_devdata *dd = ppd->dd;
12911 u64 reg = 0;
12912 int i;
12913
12914 dd_dev_info(dd, "Setting partition keys\n");
12915 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
12916 reg |= (ppd->pkeys[i] &
12917 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
12918 ((i % 4) *
12919 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
12920 /* Each register holds 4 PKey values. */
12921 if ((i % 4) == 3) {
12922 write_csr(dd, RCV_PARTITION_KEY +
12923 ((i - 3) * 2), reg);
12924 reg = 0;
12925 }
12926 }
12927
12928 /* Always enable HW pkeys check when pkeys table is set */
12929 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
12930}
12931
12932/*
12933 * These CSRs and memories are uninitialized on reset and must be
12934 * written before reading to set the ECC/parity bits.
12935 *
12936 * NOTE: All user context CSRs that are not mmaped write-only
12937 * (e.g. the TID flows) must be initialized even if the driver never
12938 * reads them.
12939 */
12940static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
12941{
12942 int i, j;
12943
12944 /* CceIntMap */
12945 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
8638b77f 12946 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
77241056
MM
12947
12948 /* SendCtxtCreditReturnAddr */
12949 for (i = 0; i < dd->chip_send_contexts; i++)
12950 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12951
12952 /* PIO Send buffers */
12953 /* SDMA Send buffers */
4d114fdd
JJ
12954 /*
12955 * These are not normally read, and (presently) have no method
12956 * to be read, so are not pre-initialized
12957 */
77241056
MM
12958
12959 /* RcvHdrAddr */
12960 /* RcvHdrTailAddr */
12961 /* RcvTidFlowTable */
12962 for (i = 0; i < dd->chip_rcv_contexts; i++) {
12963 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
12964 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
12965 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
8638b77f 12966 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
77241056
MM
12967 }
12968
12969 /* RcvArray */
12970 for (i = 0; i < dd->chip_rcv_array_count; i++)
8638b77f 12971 write_csr(dd, RCV_ARRAY + (8 * i),
17fb4f29 12972 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
77241056
MM
12973
12974 /* RcvQPMapTable */
12975 for (i = 0; i < 32; i++)
12976 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
12977}
12978
12979/*
12980 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
12981 */
12982static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
12983 u64 ctrl_bits)
12984{
12985 unsigned long timeout;
12986 u64 reg;
12987
12988 /* is the condition present? */
12989 reg = read_csr(dd, CCE_STATUS);
12990 if ((reg & status_bits) == 0)
12991 return;
12992
12993 /* clear the condition */
12994 write_csr(dd, CCE_CTRL, ctrl_bits);
12995
12996 /* wait for the condition to clear */
12997 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
12998 while (1) {
12999 reg = read_csr(dd, CCE_STATUS);
13000 if ((reg & status_bits) == 0)
13001 return;
13002 if (time_after(jiffies, timeout)) {
13003 dd_dev_err(dd,
17fb4f29
JJ
13004 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13005 status_bits, reg & status_bits);
77241056
MM
13006 return;
13007 }
13008 udelay(1);
13009 }
13010}
13011
13012/* set CCE CSRs to chip reset defaults */
13013static void reset_cce_csrs(struct hfi1_devdata *dd)
13014{
13015 int i;
13016
13017 /* CCE_REVISION read-only */
13018 /* CCE_REVISION2 read-only */
13019 /* CCE_CTRL - bits clear automatically */
13020 /* CCE_STATUS read-only, use CceCtrl to clear */
13021 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13022 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13023 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13024 for (i = 0; i < CCE_NUM_SCRATCH; i++)
13025 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13026 /* CCE_ERR_STATUS read-only */
13027 write_csr(dd, CCE_ERR_MASK, 0);
13028 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13029 /* CCE_ERR_FORCE leave alone */
13030 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13031 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13032 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13033 /* CCE_PCIE_CTRL leave alone */
13034 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13035 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13036 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
17fb4f29 13037 CCE_MSIX_TABLE_UPPER_RESETCSR);
77241056
MM
13038 }
13039 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13040 /* CCE_MSIX_PBA read-only */
13041 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13042 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13043 }
13044 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13045 write_csr(dd, CCE_INT_MAP, 0);
13046 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13047 /* CCE_INT_STATUS read-only */
13048 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13049 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13050 /* CCE_INT_FORCE leave alone */
13051 /* CCE_INT_BLOCKED read-only */
13052 }
13053 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13054 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13055}
13056
77241056
MM
13057/* set MISC CSRs to chip reset defaults */
13058static void reset_misc_csrs(struct hfi1_devdata *dd)
13059{
13060 int i;
13061
13062 for (i = 0; i < 32; i++) {
13063 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13064 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13065 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13066 }
4d114fdd
JJ
13067 /*
13068 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13069 * only be written 128-byte chunks
13070 */
77241056
MM
13071 /* init RSA engine to clear lingering errors */
13072 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13073 write_csr(dd, MISC_CFG_RSA_MU, 0);
13074 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13075 /* MISC_STS_8051_DIGEST read-only */
13076 /* MISC_STS_SBM_DIGEST read-only */
13077 /* MISC_STS_PCIE_DIGEST read-only */
13078 /* MISC_STS_FAB_DIGEST read-only */
13079 /* MISC_ERR_STATUS read-only */
13080 write_csr(dd, MISC_ERR_MASK, 0);
13081 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13082 /* MISC_ERR_FORCE leave alone */
13083}
13084
13085/* set TXE CSRs to chip reset defaults */
13086static void reset_txe_csrs(struct hfi1_devdata *dd)
13087{
13088 int i;
13089
13090 /*
13091 * TXE Kernel CSRs
13092 */
13093 write_csr(dd, SEND_CTRL, 0);
13094 __cm_reset(dd, 0); /* reset CM internal state */
13095 /* SEND_CONTEXTS read-only */
13096 /* SEND_DMA_ENGINES read-only */
13097 /* SEND_PIO_MEM_SIZE read-only */
13098 /* SEND_DMA_MEM_SIZE read-only */
13099 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13100 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13101 /* SEND_PIO_ERR_STATUS read-only */
13102 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13103 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13104 /* SEND_PIO_ERR_FORCE leave alone */
13105 /* SEND_DMA_ERR_STATUS read-only */
13106 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13107 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13108 /* SEND_DMA_ERR_FORCE leave alone */
13109 /* SEND_EGRESS_ERR_STATUS read-only */
13110 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13111 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13112 /* SEND_EGRESS_ERR_FORCE leave alone */
13113 write_csr(dd, SEND_BTH_QP, 0);
13114 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13115 write_csr(dd, SEND_SC2VLT0, 0);
13116 write_csr(dd, SEND_SC2VLT1, 0);
13117 write_csr(dd, SEND_SC2VLT2, 0);
13118 write_csr(dd, SEND_SC2VLT3, 0);
13119 write_csr(dd, SEND_LEN_CHECK0, 0);
13120 write_csr(dd, SEND_LEN_CHECK1, 0);
13121 /* SEND_ERR_STATUS read-only */
13122 write_csr(dd, SEND_ERR_MASK, 0);
13123 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13124 /* SEND_ERR_FORCE read-only */
13125 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
8638b77f 13126 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
77241056 13127 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
8638b77f
JJ
13128 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13129 for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13130 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
77241056 13131 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
8638b77f 13132 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
77241056 13133 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
8638b77f 13134 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
77241056 13135 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
17fb4f29 13136 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
77241056
MM
13137 /* SEND_CM_CREDIT_USED_STATUS read-only */
13138 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13139 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13140 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13141 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13142 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13143 for (i = 0; i < TXE_NUM_DATA_VL; i++)
8638b77f 13144 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
77241056
MM
13145 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13146 /* SEND_CM_CREDIT_USED_VL read-only */
13147 /* SEND_CM_CREDIT_USED_VL15 read-only */
13148 /* SEND_EGRESS_CTXT_STATUS read-only */
13149 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13150 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13151 /* SEND_EGRESS_ERR_INFO read-only */
13152 /* SEND_EGRESS_ERR_SOURCE read-only */
13153
13154 /*
13155 * TXE Per-Context CSRs
13156 */
13157 for (i = 0; i < dd->chip_send_contexts; i++) {
13158 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13159 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13160 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13161 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13162 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13163 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13164 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13165 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13166 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13167 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13168 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13169 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13170 }
13171
13172 /*
13173 * TXE Per-SDMA CSRs
13174 */
13175 for (i = 0; i < dd->chip_sdma_engines; i++) {
13176 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13177 /* SEND_DMA_STATUS read-only */
13178 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13179 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13180 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13181 /* SEND_DMA_HEAD read-only */
13182 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13183 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13184 /* SEND_DMA_IDLE_CNT read-only */
13185 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13186 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13187 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13188 /* SEND_DMA_ENG_ERR_STATUS read-only */
13189 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13190 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13191 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13192 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13193 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13194 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13195 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13196 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13197 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13198 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13199 }
13200}
13201
13202/*
13203 * Expect on entry:
13204 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13205 */
13206static void init_rbufs(struct hfi1_devdata *dd)
13207{
13208 u64 reg;
13209 int count;
13210
13211 /*
13212 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13213 * clear.
13214 */
13215 count = 0;
13216 while (1) {
13217 reg = read_csr(dd, RCV_STATUS);
13218 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13219 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13220 break;
13221 /*
13222 * Give up after 1ms - maximum wait time.
13223 *
13224 * RBuf size is 148KiB. Slowest possible is PCIe Gen1 x1 at
13225 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13226 * 148 KB / (66% * 250MB/s) = 920us
13227 */
13228 if (count++ > 500) {
13229 dd_dev_err(dd,
17fb4f29
JJ
13230 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13231 __func__, reg);
77241056
MM
13232 break;
13233 }
13234 udelay(2); /* do not busy-wait the CSR */
13235 }
13236
13237 /* start the init - expect RcvCtrl to be 0 */
13238 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13239
13240 /*
13241 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13242 * period after the write before RcvStatus.RxRbufInitDone is valid.
13243 * The delay in the first run through the loop below is sufficient and
13244 * required before the first read of RcvStatus.RxRbufInintDone.
13245 */
13246 read_csr(dd, RCV_CTRL);
13247
13248 /* wait for the init to finish */
13249 count = 0;
13250 while (1) {
13251 /* delay is required first time through - see above */
13252 udelay(2); /* do not busy-wait the CSR */
13253 reg = read_csr(dd, RCV_STATUS);
13254 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13255 break;
13256
13257 /* give up after 100us - slowest possible at 33MHz is 73us */
13258 if (count++ > 50) {
13259 dd_dev_err(dd,
17fb4f29
JJ
13260 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13261 __func__);
77241056
MM
13262 break;
13263 }
13264 }
13265}
13266
13267/* set RXE CSRs to chip reset defaults */
13268static void reset_rxe_csrs(struct hfi1_devdata *dd)
13269{
13270 int i, j;
13271
13272 /*
13273 * RXE Kernel CSRs
13274 */
13275 write_csr(dd, RCV_CTRL, 0);
13276 init_rbufs(dd);
13277 /* RCV_STATUS read-only */
13278 /* RCV_CONTEXTS read-only */
13279 /* RCV_ARRAY_CNT read-only */
13280 /* RCV_BUF_SIZE read-only */
13281 write_csr(dd, RCV_BTH_QP, 0);
13282 write_csr(dd, RCV_MULTICAST, 0);
13283 write_csr(dd, RCV_BYPASS, 0);
13284 write_csr(dd, RCV_VL15, 0);
13285 /* this is a clear-down */
13286 write_csr(dd, RCV_ERR_INFO,
17fb4f29 13287 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
77241056
MM
13288 /* RCV_ERR_STATUS read-only */
13289 write_csr(dd, RCV_ERR_MASK, 0);
13290 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13291 /* RCV_ERR_FORCE leave alone */
13292 for (i = 0; i < 32; i++)
13293 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13294 for (i = 0; i < 4; i++)
13295 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13296 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13297 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13298 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13299 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13300 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13301 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13302 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13303 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13304 }
13305 for (i = 0; i < 32; i++)
13306 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13307
13308 /*
13309 * RXE Kernel and User Per-Context CSRs
13310 */
13311 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13312 /* kernel */
13313 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13314 /* RCV_CTXT_STATUS read-only */
13315 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13316 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13317 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13318 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13319 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13320 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13321 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13322 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13323 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13324 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13325
13326 /* user */
13327 /* RCV_HDR_TAIL read-only */
13328 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13329 /* RCV_EGR_INDEX_TAIL read-only */
13330 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13331 /* RCV_EGR_OFFSET_TAIL read-only */
13332 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
17fb4f29
JJ
13333 write_uctxt_csr(dd, i,
13334 RCV_TID_FLOW_TABLE + (8 * j), 0);
77241056
MM
13335 }
13336 }
13337}
13338
13339/*
13340 * Set sc2vl tables.
13341 *
13342 * They power on to zeros, so to avoid send context errors
13343 * they need to be set:
13344 *
13345 * SC 0-7 -> VL 0-7 (respectively)
13346 * SC 15 -> VL 15
13347 * otherwise
13348 * -> VL 0
13349 */
13350static void init_sc2vl_tables(struct hfi1_devdata *dd)
13351{
13352 int i;
13353 /* init per architecture spec, constrained by hardware capability */
13354
13355 /* HFI maps sent packets */
13356 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13357 0,
13358 0, 0, 1, 1,
13359 2, 2, 3, 3,
13360 4, 4, 5, 5,
13361 6, 6, 7, 7));
13362 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13363 1,
13364 8, 0, 9, 0,
13365 10, 0, 11, 0,
13366 12, 0, 13, 0,
13367 14, 0, 15, 15));
13368 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13369 2,
13370 16, 0, 17, 0,
13371 18, 0, 19, 0,
13372 20, 0, 21, 0,
13373 22, 0, 23, 0));
13374 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13375 3,
13376 24, 0, 25, 0,
13377 26, 0, 27, 0,
13378 28, 0, 29, 0,
13379 30, 0, 31, 0));
13380
13381 /* DC maps received packets */
13382 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13383 15_0,
13384 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13385 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13386 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13387 31_16,
13388 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13389 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13390
13391 /* initialize the cached sc2vl values consistently with h/w */
13392 for (i = 0; i < 32; i++) {
13393 if (i < 8 || i == 15)
13394 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13395 else
13396 *((u8 *)(dd->sc2vl) + i) = 0;
13397 }
13398}
13399
13400/*
13401 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13402 * depend on the chip going through a power-on reset - a driver may be loaded
13403 * and unloaded many times.
13404 *
13405 * Do not write any CSR values to the chip in this routine - there may be
13406 * a reset following the (possible) FLR in this routine.
13407 *
13408 */
13409static void init_chip(struct hfi1_devdata *dd)
13410{
13411 int i;
13412
13413 /*
13414 * Put the HFI CSRs in a known state.
13415 * Combine this with a DC reset.
13416 *
13417 * Stop the device from doing anything while we do a
13418 * reset. We know there are no other active users of
13419 * the device since we are now in charge. Turn off
13420 * off all outbound and inbound traffic and make sure
13421 * the device does not generate any interrupts.
13422 */
13423
13424 /* disable send contexts and SDMA engines */
13425 write_csr(dd, SEND_CTRL, 0);
13426 for (i = 0; i < dd->chip_send_contexts; i++)
13427 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13428 for (i = 0; i < dd->chip_sdma_engines; i++)
13429 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13430 /* disable port (turn off RXE inbound traffic) and contexts */
13431 write_csr(dd, RCV_CTRL, 0);
13432 for (i = 0; i < dd->chip_rcv_contexts; i++)
13433 write_csr(dd, RCV_CTXT_CTRL, 0);
13434 /* mask all interrupt sources */
13435 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
8638b77f 13436 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
77241056
MM
13437
13438 /*
13439 * DC Reset: do a full DC reset before the register clear.
13440 * A recommended length of time to hold is one CSR read,
13441 * so reread the CceDcCtrl. Then, hold the DC in reset
13442 * across the clear.
13443 */
13444 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
50e5dcbe 13445 (void)read_csr(dd, CCE_DC_CTRL);
77241056
MM
13446
13447 if (use_flr) {
13448 /*
13449 * A FLR will reset the SPC core and part of the PCIe.
13450 * The parts that need to be restored have already been
13451 * saved.
13452 */
13453 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13454
13455 /* do the FLR, the DC reset will remain */
13456 hfi1_pcie_flr(dd);
13457
13458 /* restore command and BARs */
13459 restore_pci_variables(dd);
13460
995deafa 13461 if (is_ax(dd)) {
77241056
MM
13462 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13463 hfi1_pcie_flr(dd);
13464 restore_pci_variables(dd);
13465 }
77241056
MM
13466 } else {
13467 dd_dev_info(dd, "Resetting CSRs with writes\n");
13468 reset_cce_csrs(dd);
13469 reset_txe_csrs(dd);
13470 reset_rxe_csrs(dd);
77241056
MM
13471 reset_misc_csrs(dd);
13472 }
13473 /* clear the DC reset */
13474 write_csr(dd, CCE_DC_CTRL, 0);
7c03ed85 13475
77241056 13476 /* Set the LED off */
773d0451
SS
13477 setextled(dd, 0);
13478
77241056
MM
13479 /*
13480 * Clear the QSFP reset.
72a67ba2 13481 * An FLR enforces a 0 on all out pins. The driver does not touch
77241056 13482 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
72a67ba2 13483 * anything plugged constantly in reset, if it pays attention
77241056 13484 * to RESET_N.
72a67ba2 13485 * Prime examples of this are optical cables. Set all pins high.
77241056
MM
13486 * I2CCLK and I2CDAT will change per direction, and INT_N and
13487 * MODPRS_N are input only and their value is ignored.
13488 */
72a67ba2
EH
13489 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13490 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
a2ee27a4 13491 init_chip_resources(dd);
77241056
MM
13492}
13493
13494static void init_early_variables(struct hfi1_devdata *dd)
13495{
13496 int i;
13497
13498 /* assign link credit variables */
13499 dd->vau = CM_VAU;
13500 dd->link_credits = CM_GLOBAL_CREDITS;
995deafa 13501 if (is_ax(dd))
77241056
MM
13502 dd->link_credits--;
13503 dd->vcu = cu_to_vcu(hfi1_cu);
13504 /* enough room for 8 MAD packets plus header - 17K */
13505 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13506 if (dd->vl15_init > dd->link_credits)
13507 dd->vl15_init = dd->link_credits;
13508
13509 write_uninitialized_csrs_and_memories(dd);
13510
13511 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13512 for (i = 0; i < dd->num_pports; i++) {
13513 struct hfi1_pportdata *ppd = &dd->pport[i];
13514
13515 set_partition_keys(ppd);
13516 }
13517 init_sc2vl_tables(dd);
13518}
13519
13520static void init_kdeth_qp(struct hfi1_devdata *dd)
13521{
13522 /* user changed the KDETH_QP */
13523 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13524 /* out of range or illegal value */
13525 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13526 kdeth_qp = 0;
13527 }
13528 if (kdeth_qp == 0) /* not set, or failed range check */
13529 kdeth_qp = DEFAULT_KDETH_QP;
13530
13531 write_csr(dd, SEND_BTH_QP,
17fb4f29
JJ
13532 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
13533 SEND_BTH_QP_KDETH_QP_SHIFT);
77241056
MM
13534
13535 write_csr(dd, RCV_BTH_QP,
17fb4f29
JJ
13536 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
13537 RCV_BTH_QP_KDETH_QP_SHIFT);
77241056
MM
13538}
13539
13540/**
13541 * init_qpmap_table
13542 * @dd - device data
13543 * @first_ctxt - first context
13544 * @last_ctxt - first context
13545 *
13546 * This return sets the qpn mapping table that
13547 * is indexed by qpn[8:1].
13548 *
13549 * The routine will round robin the 256 settings
13550 * from first_ctxt to last_ctxt.
13551 *
13552 * The first/last looks ahead to having specialized
13553 * receive contexts for mgmt and bypass. Normal
13554 * verbs traffic will assumed to be on a range
13555 * of receive contexts.
13556 */
13557static void init_qpmap_table(struct hfi1_devdata *dd,
13558 u32 first_ctxt,
13559 u32 last_ctxt)
13560{
13561 u64 reg = 0;
13562 u64 regno = RCV_QP_MAP_TABLE;
13563 int i;
13564 u64 ctxt = first_ctxt;
13565
60d585ad 13566 for (i = 0; i < 256; i++) {
77241056 13567 reg |= ctxt << (8 * (i % 8));
77241056
MM
13568 ctxt++;
13569 if (ctxt > last_ctxt)
13570 ctxt = first_ctxt;
60d585ad 13571 if (i % 8 == 7) {
77241056
MM
13572 write_csr(dd, regno, reg);
13573 reg = 0;
13574 regno += 8;
13575 }
13576 }
77241056
MM
13577
13578 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13579 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13580}
13581
372cc85a
DL
13582struct rsm_map_table {
13583 u64 map[NUM_MAP_REGS];
13584 unsigned int used;
13585};
13586
b12349ae
DL
13587struct rsm_rule_data {
13588 u8 offset;
13589 u8 pkt_type;
13590 u32 field1_off;
13591 u32 field2_off;
13592 u32 index1_off;
13593 u32 index1_width;
13594 u32 index2_off;
13595 u32 index2_width;
13596 u32 mask1;
13597 u32 value1;
13598 u32 mask2;
13599 u32 value2;
13600};
13601
372cc85a
DL
13602/*
13603 * Return an initialized RMT map table for users to fill in. OK if it
13604 * returns NULL, indicating no table.
13605 */
13606static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
13607{
13608 struct rsm_map_table *rmt;
13609 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
13610
13611 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
13612 if (rmt) {
13613 memset(rmt->map, rxcontext, sizeof(rmt->map));
13614 rmt->used = 0;
13615 }
13616
13617 return rmt;
13618}
13619
13620/*
13621 * Write the final RMT map table to the chip and free the table. OK if
13622 * table is NULL.
13623 */
13624static void complete_rsm_map_table(struct hfi1_devdata *dd,
13625 struct rsm_map_table *rmt)
13626{
13627 int i;
13628
13629 if (rmt) {
13630 /* write table to chip */
13631 for (i = 0; i < NUM_MAP_REGS; i++)
13632 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
13633
13634 /* enable RSM */
13635 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13636 }
13637}
13638
b12349ae
DL
13639/*
13640 * Add a receive side mapping rule.
13641 */
13642static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
13643 struct rsm_rule_data *rrd)
13644{
13645 write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
13646 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
13647 1ull << rule_index | /* enable bit */
13648 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13649 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
13650 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13651 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13652 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13653 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13654 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13655 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13656 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
13657 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
13658 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
13659 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
13660 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
13661}
13662
4a818bed
DL
13663/* return the number of RSM map table entries that will be used for QOS */
13664static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
13665 unsigned int *np)
13666{
13667 int i;
13668 unsigned int m, n;
13669 u8 max_by_vl = 0;
13670
13671 /* is QOS active at all? */
13672 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13673 num_vls == 1 ||
13674 krcvqsset <= 1)
13675 goto no_qos;
13676
13677 /* determine bits for qpn */
13678 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
13679 if (krcvqs[i] > max_by_vl)
13680 max_by_vl = krcvqs[i];
13681 if (max_by_vl > 32)
13682 goto no_qos;
13683 m = ilog2(__roundup_pow_of_two(max_by_vl));
13684
13685 /* determine bits for vl */
13686 n = ilog2(__roundup_pow_of_two(num_vls));
13687
13688 /* reject if too much is used */
13689 if ((m + n) > 7)
13690 goto no_qos;
13691
13692 if (mp)
13693 *mp = m;
13694 if (np)
13695 *np = n;
13696
13697 return 1 << (m + n);
13698
13699no_qos:
13700 if (mp)
13701 *mp = 0;
13702 if (np)
13703 *np = 0;
13704 return 0;
13705}
13706
77241056
MM
13707/**
13708 * init_qos - init RX qos
13709 * @dd - device data
372cc85a 13710 * @rmt - RSM map table
77241056 13711 *
33a9eb52
DL
13712 * This routine initializes Rule 0 and the RSM map table to implement
13713 * quality of service (qos).
77241056 13714 *
33a9eb52
DL
13715 * If all of the limit tests succeed, qos is applied based on the array
13716 * interpretation of krcvqs where entry 0 is VL0.
77241056 13717 *
33a9eb52
DL
13718 * The number of vl bits (n) and the number of qpn bits (m) are computed to
13719 * feed both the RSM map table and the single rule.
77241056 13720 */
372cc85a 13721static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
77241056 13722{
b12349ae 13723 struct rsm_rule_data rrd;
77241056 13724 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
372cc85a 13725 unsigned int rmt_entries;
77241056 13726 u64 reg;
77241056 13727
4a818bed 13728 if (!rmt)
77241056 13729 goto bail;
4a818bed
DL
13730 rmt_entries = qos_rmt_entries(dd, &m, &n);
13731 if (rmt_entries == 0)
77241056 13732 goto bail;
4a818bed
DL
13733 qpns_per_vl = 1 << m;
13734
372cc85a
DL
13735 /* enough room in the map table? */
13736 rmt_entries = 1 << (m + n);
13737 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
859bcad9 13738 goto bail;
4a818bed 13739
372cc85a 13740 /* add qos entries to the the RSM map table */
33a9eb52 13741 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
77241056
MM
13742 unsigned tctxt;
13743
13744 for (qpn = 0, tctxt = ctxt;
13745 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13746 unsigned idx, regoff, regidx;
13747
372cc85a
DL
13748 /* generate the index the hardware will produce */
13749 idx = rmt->used + ((qpn << n) ^ i);
77241056
MM
13750 regoff = (idx % 8) * 8;
13751 regidx = idx / 8;
372cc85a
DL
13752 /* replace default with context number */
13753 reg = rmt->map[regidx];
77241056
MM
13754 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13755 << regoff);
13756 reg |= (u64)(tctxt++) << regoff;
372cc85a 13757 rmt->map[regidx] = reg;
77241056
MM
13758 if (tctxt == ctxt + krcvqs[i])
13759 tctxt = ctxt;
13760 }
13761 ctxt += krcvqs[i];
13762 }
b12349ae
DL
13763
13764 rrd.offset = rmt->used;
13765 rrd.pkt_type = 2;
13766 rrd.field1_off = LRH_BTH_MATCH_OFFSET;
13767 rrd.field2_off = LRH_SC_MATCH_OFFSET;
13768 rrd.index1_off = LRH_SC_SELECT_OFFSET;
13769 rrd.index1_width = n;
13770 rrd.index2_off = QPN_SELECT_OFFSET;
13771 rrd.index2_width = m + n;
13772 rrd.mask1 = LRH_BTH_MASK;
13773 rrd.value1 = LRH_BTH_VALUE;
13774 rrd.mask2 = LRH_SC_MASK;
13775 rrd.value2 = LRH_SC_VALUE;
13776
13777 /* add rule 0 */
13778 add_rsm_rule(dd, 0, &rrd);
13779
372cc85a
DL
13780 /* mark RSM map entries as used */
13781 rmt->used += rmt_entries;
33a9eb52
DL
13782 /* map everything else to the mcast/err/vl15 context */
13783 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
77241056
MM
13784 dd->qos_shift = n + 1;
13785 return;
13786bail:
13787 dd->qos_shift = 1;
82c2611d 13788 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
77241056
MM
13789}
13790
8f000f7f
DL
13791static void init_user_fecn_handling(struct hfi1_devdata *dd,
13792 struct rsm_map_table *rmt)
13793{
13794 struct rsm_rule_data rrd;
13795 u64 reg;
13796 int i, idx, regoff, regidx;
13797 u8 offset;
13798
13799 /* there needs to be enough room in the map table */
13800 if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
13801 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
13802 return;
13803 }
13804
13805 /*
13806 * RSM will extract the destination context as an index into the
13807 * map table. The destination contexts are a sequential block
13808 * in the range first_user_ctxt...num_rcv_contexts-1 (inclusive).
13809 * Map entries are accessed as offset + extracted value. Adjust
13810 * the added offset so this sequence can be placed anywhere in
13811 * the table - as long as the entries themselves do not wrap.
13812 * There are only enough bits in offset for the table size, so
13813 * start with that to allow for a "negative" offset.
13814 */
13815 offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
13816 (int)dd->first_user_ctxt);
13817
13818 for (i = dd->first_user_ctxt, idx = rmt->used;
13819 i < dd->num_rcv_contexts; i++, idx++) {
13820 /* replace with identity mapping */
13821 regoff = (idx % 8) * 8;
13822 regidx = idx / 8;
13823 reg = rmt->map[regidx];
13824 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
13825 reg |= (u64)i << regoff;
13826 rmt->map[regidx] = reg;
13827 }
13828
13829 /*
13830 * For RSM intercept of Expected FECN packets:
13831 * o packet type 0 - expected
13832 * o match on F (bit 95), using select/match 1, and
13833 * o match on SH (bit 133), using select/match 2.
13834 *
13835 * Use index 1 to extract the 8-bit receive context from DestQP
13836 * (start at bit 64). Use that as the RSM map table index.
13837 */
13838 rrd.offset = offset;
13839 rrd.pkt_type = 0;
13840 rrd.field1_off = 95;
13841 rrd.field2_off = 133;
13842 rrd.index1_off = 64;
13843 rrd.index1_width = 8;
13844 rrd.index2_off = 0;
13845 rrd.index2_width = 0;
13846 rrd.mask1 = 1;
13847 rrd.value1 = 1;
13848 rrd.mask2 = 1;
13849 rrd.value2 = 1;
13850
13851 /* add rule 1 */
13852 add_rsm_rule(dd, 1, &rrd);
13853
13854 rmt->used += dd->num_user_contexts;
13855}
13856
77241056
MM
13857static void init_rxe(struct hfi1_devdata *dd)
13858{
372cc85a
DL
13859 struct rsm_map_table *rmt;
13860
77241056
MM
13861 /* enable all receive errors */
13862 write_csr(dd, RCV_ERR_MASK, ~0ull);
372cc85a
DL
13863
13864 rmt = alloc_rsm_map_table(dd);
13865 /* set up QOS, including the QPN map table */
13866 init_qos(dd, rmt);
8f000f7f 13867 init_user_fecn_handling(dd, rmt);
372cc85a
DL
13868 complete_rsm_map_table(dd, rmt);
13869 kfree(rmt);
13870
77241056
MM
13871 /*
13872 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13873 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13874 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
13875 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13876 * Max_PayLoad_Size set to its minimum of 128.
13877 *
13878 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13879 * (64 bytes). Max_Payload_Size is possibly modified upward in
13880 * tune_pcie_caps() which is called after this routine.
13881 */
13882}
13883
13884static void init_other(struct hfi1_devdata *dd)
13885{
13886 /* enable all CCE errors */
13887 write_csr(dd, CCE_ERR_MASK, ~0ull);
13888 /* enable *some* Misc errors */
13889 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
13890 /* enable all DC errors, except LCB */
13891 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
13892 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
13893}
13894
13895/*
13896 * Fill out the given AU table using the given CU. A CU is defined in terms
13897 * AUs. The table is a an encoding: given the index, how many AUs does that
13898 * represent?
13899 *
13900 * NOTE: Assumes that the register layout is the same for the
13901 * local and remote tables.
13902 */
13903static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
13904 u32 csr0to3, u32 csr4to7)
13905{
13906 write_csr(dd, csr0to3,
17fb4f29
JJ
13907 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
13908 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
13909 2ull * cu <<
13910 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
13911 4ull * cu <<
13912 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
77241056 13913 write_csr(dd, csr4to7,
17fb4f29
JJ
13914 8ull * cu <<
13915 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
13916 16ull * cu <<
13917 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
13918 32ull * cu <<
13919 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
13920 64ull * cu <<
13921 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
77241056
MM
13922}
13923
13924static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13925{
13926 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
17fb4f29 13927 SEND_CM_LOCAL_AU_TABLE4_TO7);
77241056
MM
13928}
13929
13930void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13931{
13932 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
17fb4f29 13933 SEND_CM_REMOTE_AU_TABLE4_TO7);
77241056
MM
13934}
13935
13936static void init_txe(struct hfi1_devdata *dd)
13937{
13938 int i;
13939
13940 /* enable all PIO, SDMA, general, and Egress errors */
13941 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
13942 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
13943 write_csr(dd, SEND_ERR_MASK, ~0ull);
13944 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
13945
13946 /* enable all per-context and per-SDMA engine errors */
13947 for (i = 0; i < dd->chip_send_contexts; i++)
13948 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
13949 for (i = 0; i < dd->chip_sdma_engines; i++)
13950 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
13951
13952 /* set the local CU to AU mapping */
13953 assign_local_cm_au_table(dd, dd->vcu);
13954
13955 /*
13956 * Set reasonable default for Credit Return Timer
13957 * Don't set on Simulator - causes it to choke.
13958 */
13959 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
13960 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
13961}
13962
13963int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
13964{
13965 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13966 unsigned sctxt;
13967 int ret = 0;
13968 u64 reg;
13969
13970 if (!rcd || !rcd->sc) {
13971 ret = -EINVAL;
13972 goto done;
13973 }
13974 sctxt = rcd->sc->hw_context;
13975 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
13976 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
13977 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
13978 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
13979 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
13980 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
13981 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
13982 /*
13983 * Enable send-side J_KEY integrity check, unless this is A0 h/w
77241056 13984 */
995deafa 13985 if (!is_ax(dd)) {
77241056
MM
13986 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13987 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13988 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13989 }
13990
13991 /* Enable J_KEY check on receive context. */
13992 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
13993 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
13994 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
13995 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
13996done:
13997 return ret;
13998}
13999
14000int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
14001{
14002 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
14003 unsigned sctxt;
14004 int ret = 0;
14005 u64 reg;
14006
14007 if (!rcd || !rcd->sc) {
14008 ret = -EINVAL;
14009 goto done;
14010 }
14011 sctxt = rcd->sc->hw_context;
14012 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14013 /*
14014 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14015 * This check would not have been enabled for A0 h/w, see
14016 * set_ctxt_jkey().
14017 */
995deafa 14018 if (!is_ax(dd)) {
77241056
MM
14019 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14020 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14021 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14022 }
14023 /* Turn off the J_KEY on the receive side */
14024 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
14025done:
14026 return ret;
14027}
14028
14029int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
14030{
14031 struct hfi1_ctxtdata *rcd;
14032 unsigned sctxt;
14033 int ret = 0;
14034 u64 reg;
14035
e490974e 14036 if (ctxt < dd->num_rcv_contexts) {
77241056 14037 rcd = dd->rcd[ctxt];
e490974e 14038 } else {
77241056
MM
14039 ret = -EINVAL;
14040 goto done;
14041 }
14042 if (!rcd || !rcd->sc) {
14043 ret = -EINVAL;
14044 goto done;
14045 }
14046 sctxt = rcd->sc->hw_context;
14047 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14048 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14049 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14050 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14051 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
e38d1e4f 14052 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
77241056
MM
14053 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14054done:
14055 return ret;
14056}
14057
14058int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
14059{
14060 struct hfi1_ctxtdata *rcd;
14061 unsigned sctxt;
14062 int ret = 0;
14063 u64 reg;
14064
e490974e 14065 if (ctxt < dd->num_rcv_contexts) {
77241056 14066 rcd = dd->rcd[ctxt];
e490974e 14067 } else {
77241056
MM
14068 ret = -EINVAL;
14069 goto done;
14070 }
14071 if (!rcd || !rcd->sc) {
14072 ret = -EINVAL;
14073 goto done;
14074 }
14075 sctxt = rcd->sc->hw_context;
14076 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14077 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14078 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14079 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14080done:
14081 return ret;
14082}
14083
14084/*
14085 * Start doing the clean up the the chip. Our clean up happens in multiple
14086 * stages and this is just the first.
14087 */
14088void hfi1_start_cleanup(struct hfi1_devdata *dd)
14089{
affa48de 14090 aspm_exit(dd);
77241056
MM
14091 free_cntrs(dd);
14092 free_rcverr(dd);
14093 clean_up_interrupts(dd);
a2ee27a4 14094 finish_chip_resources(dd);
77241056
MM
14095}
14096
14097#define HFI_BASE_GUID(dev) \
14098 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14099
14100/*
78eb129d
DL
14101 * Information can be shared between the two HFIs on the same ASIC
14102 * in the same OS. This function finds the peer device and sets
14103 * up a shared structure.
77241056 14104 */
78eb129d 14105static int init_asic_data(struct hfi1_devdata *dd)
77241056
MM
14106{
14107 unsigned long flags;
14108 struct hfi1_devdata *tmp, *peer = NULL;
98f179a5 14109 struct hfi1_asic_data *asic_data;
78eb129d 14110 int ret = 0;
77241056 14111
98f179a5
TS
14112 /* pre-allocate the asic structure in case we are the first device */
14113 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14114 if (!asic_data)
14115 return -ENOMEM;
14116
77241056
MM
14117 spin_lock_irqsave(&hfi1_devs_lock, flags);
14118 /* Find our peer device */
14119 list_for_each_entry(tmp, &hfi1_dev_list, list) {
14120 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14121 dd->unit != tmp->unit) {
14122 peer = tmp;
14123 break;
14124 }
14125 }
14126
78eb129d 14127 if (peer) {
98f179a5 14128 /* use already allocated structure */
78eb129d 14129 dd->asic_data = peer->asic_data;
98f179a5 14130 kfree(asic_data);
78eb129d 14131 } else {
98f179a5 14132 dd->asic_data = asic_data;
78eb129d
DL
14133 mutex_init(&dd->asic_data->asic_resource_mutex);
14134 }
14135 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
77241056 14136 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
dba715f0
DL
14137
14138 /* first one through - set up i2c devices */
14139 if (!peer)
14140 ret = set_up_i2c(dd, dd->asic_data);
14141
78eb129d 14142 return ret;
77241056
MM
14143}
14144
5d9157aa
DL
14145/*
14146 * Set dd->boardname. Use a generic name if a name is not returned from
14147 * EFI variable space.
14148 *
14149 * Return 0 on success, -ENOMEM if space could not be allocated.
14150 */
14151static int obtain_boardname(struct hfi1_devdata *dd)
14152{
14153 /* generic board description */
14154 const char generic[] =
14155 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14156 unsigned long size;
14157 int ret;
14158
14159 ret = read_hfi1_efi_var(dd, "description", &size,
14160 (void **)&dd->boardname);
14161 if (ret) {
845f876d 14162 dd_dev_info(dd, "Board description not found\n");
5d9157aa
DL
14163 /* use generic description */
14164 dd->boardname = kstrdup(generic, GFP_KERNEL);
14165 if (!dd->boardname)
14166 return -ENOMEM;
14167 }
14168 return 0;
14169}
14170
24487dd3
KW
14171/*
14172 * Check the interrupt registers to make sure that they are mapped correctly.
14173 * It is intended to help user identify any mismapping by VMM when the driver
14174 * is running in a VM. This function should only be called before interrupt
14175 * is set up properly.
14176 *
14177 * Return 0 on success, -EINVAL on failure.
14178 */
14179static int check_int_registers(struct hfi1_devdata *dd)
14180{
14181 u64 reg;
14182 u64 all_bits = ~(u64)0;
14183 u64 mask;
14184
14185 /* Clear CceIntMask[0] to avoid raising any interrupts */
14186 mask = read_csr(dd, CCE_INT_MASK);
14187 write_csr(dd, CCE_INT_MASK, 0ull);
14188 reg = read_csr(dd, CCE_INT_MASK);
14189 if (reg)
14190 goto err_exit;
14191
14192 /* Clear all interrupt status bits */
14193 write_csr(dd, CCE_INT_CLEAR, all_bits);
14194 reg = read_csr(dd, CCE_INT_STATUS);
14195 if (reg)
14196 goto err_exit;
14197
14198 /* Set all interrupt status bits */
14199 write_csr(dd, CCE_INT_FORCE, all_bits);
14200 reg = read_csr(dd, CCE_INT_STATUS);
14201 if (reg != all_bits)
14202 goto err_exit;
14203
14204 /* Restore the interrupt mask */
14205 write_csr(dd, CCE_INT_CLEAR, all_bits);
14206 write_csr(dd, CCE_INT_MASK, mask);
14207
14208 return 0;
14209err_exit:
14210 write_csr(dd, CCE_INT_MASK, mask);
14211 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14212 return -EINVAL;
14213}
14214
77241056 14215/**
7c03ed85 14216 * Allocate and initialize the device structure for the hfi.
77241056
MM
14217 * @dev: the pci_dev for hfi1_ib device
14218 * @ent: pci_device_id struct for this dev
14219 *
14220 * Also allocates, initializes, and returns the devdata struct for this
14221 * device instance
14222 *
14223 * This is global, and is called directly at init to set up the
14224 * chip-specific function pointers for later use.
14225 */
14226struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14227 const struct pci_device_id *ent)
14228{
14229 struct hfi1_devdata *dd;
14230 struct hfi1_pportdata *ppd;
14231 u64 reg;
14232 int i, ret;
14233 static const char * const inames[] = { /* implementation names */
14234 "RTL silicon",
14235 "RTL VCS simulation",
14236 "RTL FPGA emulation",
14237 "Functional simulator"
14238 };
24487dd3 14239 struct pci_dev *parent = pdev->bus->self;
77241056 14240
17fb4f29
JJ
14241 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14242 sizeof(struct hfi1_pportdata));
77241056
MM
14243 if (IS_ERR(dd))
14244 goto bail;
14245 ppd = dd->pport;
14246 for (i = 0; i < dd->num_pports; i++, ppd++) {
14247 int vl;
14248 /* init common fields */
14249 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14250 /* DC supports 4 link widths */
14251 ppd->link_width_supported =
14252 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14253 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14254 ppd->link_width_downgrade_supported =
14255 ppd->link_width_supported;
14256 /* start out enabling only 4X */
14257 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14258 ppd->link_width_downgrade_enabled =
14259 ppd->link_width_downgrade_supported;
14260 /* link width active is 0 when link is down */
14261 /* link width downgrade active is 0 when link is down */
14262
d0d236ea
JJ
14263 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14264 num_vls > HFI1_MAX_VLS_SUPPORTED) {
77241056
MM
14265 hfi1_early_err(&pdev->dev,
14266 "Invalid num_vls %u, using %u VLs\n",
14267 num_vls, HFI1_MAX_VLS_SUPPORTED);
14268 num_vls = HFI1_MAX_VLS_SUPPORTED;
14269 }
14270 ppd->vls_supported = num_vls;
14271 ppd->vls_operational = ppd->vls_supported;
8a4d3444 14272 ppd->actual_vls_operational = ppd->vls_supported;
77241056
MM
14273 /* Set the default MTU. */
14274 for (vl = 0; vl < num_vls; vl++)
14275 dd->vld[vl].mtu = hfi1_max_mtu;
14276 dd->vld[15].mtu = MAX_MAD_PACKET;
14277 /*
14278 * Set the initial values to reasonable default, will be set
14279 * for real when link is up.
14280 */
14281 ppd->lstate = IB_PORT_DOWN;
14282 ppd->overrun_threshold = 0x4;
14283 ppd->phy_error_threshold = 0xf;
14284 ppd->port_crc_mode_enabled = link_crc_mask;
14285 /* initialize supported LTP CRC mode */
14286 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14287 /* initialize enabled LTP CRC mode */
14288 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14289 /* start in offline */
14290 ppd->host_link_state = HLS_DN_OFFLINE;
14291 init_vl_arb_caches(ppd);
f45c8dc8 14292 ppd->last_pstate = 0xff; /* invalid value */
77241056
MM
14293 }
14294
14295 dd->link_default = HLS_DN_POLL;
14296
14297 /*
14298 * Do remaining PCIe setup and save PCIe values in dd.
14299 * Any error printing is already done by the init code.
14300 * On return, we have the chip mapped.
14301 */
14302 ret = hfi1_pcie_ddinit(dd, pdev, ent);
14303 if (ret < 0)
14304 goto bail_free;
14305
14306 /* verify that reads actually work, save revision for reset check */
14307 dd->revision = read_csr(dd, CCE_REVISION);
14308 if (dd->revision == ~(u64)0) {
14309 dd_dev_err(dd, "cannot read chip CSRs\n");
14310 ret = -EINVAL;
14311 goto bail_cleanup;
14312 }
14313 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14314 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14315 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14316 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14317
24487dd3
KW
14318 /*
14319 * Check interrupt registers mapping if the driver has no access to
14320 * the upstream component. In this case, it is likely that the driver
14321 * is running in a VM.
14322 */
14323 if (!parent) {
14324 ret = check_int_registers(dd);
14325 if (ret)
14326 goto bail_cleanup;
14327 }
14328
4d114fdd
JJ
14329 /*
14330 * obtain the hardware ID - NOT related to unit, which is a
14331 * software enumeration
14332 */
77241056
MM
14333 reg = read_csr(dd, CCE_REVISION2);
14334 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14335 & CCE_REVISION2_HFI_ID_MASK;
14336 /* the variable size will remove unwanted bits */
14337 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14338 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14339 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
17fb4f29
JJ
14340 dd->icode < ARRAY_SIZE(inames) ?
14341 inames[dd->icode] : "unknown", (int)dd->irev);
77241056
MM
14342
14343 /* speeds the hardware can support */
14344 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14345 /* speeds allowed to run at */
14346 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14347 /* give a reasonable active value, will be set on link up */
14348 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14349
14350 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14351 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14352 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14353 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14354 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14355 /* fix up link widths for emulation _p */
14356 ppd = dd->pport;
14357 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14358 ppd->link_width_supported =
14359 ppd->link_width_enabled =
14360 ppd->link_width_downgrade_supported =
14361 ppd->link_width_downgrade_enabled =
14362 OPA_LINK_WIDTH_1X;
14363 }
14364 /* insure num_vls isn't larger than number of sdma engines */
14365 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14366 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
11a5909b
DL
14367 num_vls, dd->chip_sdma_engines);
14368 num_vls = dd->chip_sdma_engines;
14369 ppd->vls_supported = dd->chip_sdma_engines;
8a4d3444 14370 ppd->vls_operational = ppd->vls_supported;
77241056
MM
14371 }
14372
14373 /*
14374 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14375 * Limit the max if larger than the field holds. If timeout is
14376 * non-zero, then the calculated field will be at least 1.
14377 *
14378 * Must be after icode is set up - the cclock rate depends
14379 * on knowing the hardware being used.
14380 */
14381 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14382 if (dd->rcv_intr_timeout_csr >
14383 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14384 dd->rcv_intr_timeout_csr =
14385 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14386 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14387 dd->rcv_intr_timeout_csr = 1;
14388
7c03ed85
EH
14389 /* needs to be done before we look for the peer device */
14390 read_guid(dd);
14391
78eb129d
DL
14392 /* set up shared ASIC data with peer device */
14393 ret = init_asic_data(dd);
14394 if (ret)
14395 goto bail_cleanup;
7c03ed85 14396
77241056
MM
14397 /* obtain chip sizes, reset chip CSRs */
14398 init_chip(dd);
14399
14400 /* read in the PCIe link speed information */
14401 ret = pcie_speeds(dd);
14402 if (ret)
14403 goto bail_cleanup;
14404
c3838b39
EH
14405 /* Needs to be called before hfi1_firmware_init */
14406 get_platform_config(dd);
14407
77241056
MM
14408 /* read in firmware */
14409 ret = hfi1_firmware_init(dd);
14410 if (ret)
14411 goto bail_cleanup;
14412
14413 /*
14414 * In general, the PCIe Gen3 transition must occur after the
14415 * chip has been idled (so it won't initiate any PCIe transactions
14416 * e.g. an interrupt) and before the driver changes any registers
14417 * (the transition will reset the registers).
14418 *
14419 * In particular, place this call after:
14420 * - init_chip() - the chip will not initiate any PCIe transactions
14421 * - pcie_speeds() - reads the current link speed
14422 * - hfi1_firmware_init() - the needed firmware is ready to be
14423 * downloaded
14424 */
14425 ret = do_pcie_gen3_transition(dd);
14426 if (ret)
14427 goto bail_cleanup;
14428
14429 /* start setting dd values and adjusting CSRs */
14430 init_early_variables(dd);
14431
14432 parse_platform_config(dd);
14433
5d9157aa
DL
14434 ret = obtain_boardname(dd);
14435 if (ret)
77241056 14436 goto bail_cleanup;
77241056
MM
14437
14438 snprintf(dd->boardversion, BOARD_VERS_MAX,
5d9157aa 14439 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
77241056 14440 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
77241056
MM
14441 (u32)dd->majrev,
14442 (u32)dd->minrev,
14443 (dd->revision >> CCE_REVISION_SW_SHIFT)
14444 & CCE_REVISION_SW_MASK);
14445
14446 ret = set_up_context_variables(dd);
14447 if (ret)
14448 goto bail_cleanup;
14449
14450 /* set initial RXE CSRs */
14451 init_rxe(dd);
14452 /* set initial TXE CSRs */
14453 init_txe(dd);
14454 /* set initial non-RXE, non-TXE CSRs */
14455 init_other(dd);
14456 /* set up KDETH QP prefix in both RX and TX CSRs */
14457 init_kdeth_qp(dd);
14458
4197344b
DD
14459 ret = hfi1_dev_affinity_init(dd);
14460 if (ret)
14461 goto bail_cleanup;
957558c9 14462
77241056
MM
14463 /* send contexts must be set up before receive contexts */
14464 ret = init_send_contexts(dd);
14465 if (ret)
14466 goto bail_cleanup;
14467
14468 ret = hfi1_create_ctxts(dd);
14469 if (ret)
14470 goto bail_cleanup;
14471
14472 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14473 /*
14474 * rcd[0] is guaranteed to be valid by this point. Also, all
14475 * context are using the same value, as per the module parameter.
14476 */
14477 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14478
14479 ret = init_pervl_scs(dd);
14480 if (ret)
14481 goto bail_cleanup;
14482
14483 /* sdma init */
14484 for (i = 0; i < dd->num_pports; ++i) {
14485 ret = sdma_init(dd, i);
14486 if (ret)
14487 goto bail_cleanup;
14488 }
14489
14490 /* use contexts created by hfi1_create_ctxts */
14491 ret = set_up_interrupts(dd);
14492 if (ret)
14493 goto bail_cleanup;
14494
14495 /* set up LCB access - must be after set_up_interrupts() */
14496 init_lcb_access(dd);
14497
14498 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
14499 dd->base_guid & 0xFFFFFF);
14500
14501 dd->oui1 = dd->base_guid >> 56 & 0xFF;
14502 dd->oui2 = dd->base_guid >> 48 & 0xFF;
14503 dd->oui3 = dd->base_guid >> 40 & 0xFF;
14504
14505 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14506 if (ret)
14507 goto bail_clear_intr;
14508 check_fabric_firmware_versions(dd);
14509
14510 thermal_init(dd);
14511
14512 ret = init_cntrs(dd);
14513 if (ret)
14514 goto bail_clear_intr;
14515
14516 ret = init_rcverr(dd);
14517 if (ret)
14518 goto bail_free_cntrs;
14519
14520 ret = eprom_init(dd);
14521 if (ret)
14522 goto bail_free_rcverr;
14523
14524 goto bail;
14525
14526bail_free_rcverr:
14527 free_rcverr(dd);
14528bail_free_cntrs:
14529 free_cntrs(dd);
14530bail_clear_intr:
14531 clean_up_interrupts(dd);
14532bail_cleanup:
14533 hfi1_pcie_ddcleanup(dd);
14534bail_free:
14535 hfi1_free_devdata(dd);
14536 dd = ERR_PTR(ret);
14537bail:
14538 return dd;
14539}
14540
14541static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14542 u32 dw_len)
14543{
14544 u32 delta_cycles;
14545 u32 current_egress_rate = ppd->current_egress_rate;
14546 /* rates here are in units of 10^6 bits/sec */
14547
14548 if (desired_egress_rate == -1)
14549 return 0; /* shouldn't happen */
14550
14551 if (desired_egress_rate >= current_egress_rate)
14552 return 0; /* we can't help go faster, only slower */
14553
14554 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14555 egress_cycles(dw_len * 4, current_egress_rate);
14556
14557 return (u16)delta_cycles;
14558}
14559
77241056
MM
14560/**
14561 * create_pbc - build a pbc for transmission
14562 * @flags: special case flags or-ed in built pbc
14563 * @srate: static rate
14564 * @vl: vl
14565 * @dwlen: dword length (header words + data words + pbc words)
14566 *
14567 * Create a PBC with the given flags, rate, VL, and length.
14568 *
14569 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14570 * for verbs, which does not use this PSM feature. The lone other caller
14571 * is for the diagnostic interface which calls this if the user does not
14572 * supply their own PBC.
14573 */
14574u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14575 u32 dw_len)
14576{
14577 u64 pbc, delay = 0;
14578
14579 if (unlikely(srate_mbs))
14580 delay = delay_cycles(ppd, srate_mbs, dw_len);
14581
14582 pbc = flags
14583 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14584 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14585 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14586 | (dw_len & PBC_LENGTH_DWS_MASK)
14587 << PBC_LENGTH_DWS_SHIFT;
14588
14589 return pbc;
14590}
14591
14592#define SBUS_THERMAL 0x4f
14593#define SBUS_THERM_MONITOR_MODE 0x1
14594
14595#define THERM_FAILURE(dev, ret, reason) \
14596 dd_dev_err((dd), \
14597 "Thermal sensor initialization failed: %s (%d)\n", \
14598 (reason), (ret))
14599
14600/*
cde10afa 14601 * Initialize the thermal sensor.
77241056
MM
14602 *
14603 * After initialization, enable polling of thermal sensor through
14604 * SBus interface. In order for this to work, the SBus Master
14605 * firmware has to be loaded due to the fact that the HW polling
14606 * logic uses SBus interrupts, which are not supported with
14607 * default firmware. Otherwise, no data will be returned through
14608 * the ASIC_STS_THERM CSR.
14609 */
14610static int thermal_init(struct hfi1_devdata *dd)
14611{
14612 int ret = 0;
14613
14614 if (dd->icode != ICODE_RTL_SILICON ||
a453698b 14615 check_chip_resource(dd, CR_THERM_INIT, NULL))
77241056
MM
14616 return ret;
14617
576531fd
DL
14618 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
14619 if (ret) {
14620 THERM_FAILURE(dd, ret, "Acquire SBus");
14621 return ret;
14622 }
14623
77241056 14624 dd_dev_info(dd, "Initializing thermal sensor\n");
4ef98989
JAQ
14625 /* Disable polling of thermal readings */
14626 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14627 msleep(100);
77241056
MM
14628 /* Thermal Sensor Initialization */
14629 /* Step 1: Reset the Thermal SBus Receiver */
14630 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14631 RESET_SBUS_RECEIVER, 0);
14632 if (ret) {
14633 THERM_FAILURE(dd, ret, "Bus Reset");
14634 goto done;
14635 }
14636 /* Step 2: Set Reset bit in Thermal block */
14637 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14638 WRITE_SBUS_RECEIVER, 0x1);
14639 if (ret) {
14640 THERM_FAILURE(dd, ret, "Therm Block Reset");
14641 goto done;
14642 }
14643 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
14644 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14645 WRITE_SBUS_RECEIVER, 0x32);
14646 if (ret) {
14647 THERM_FAILURE(dd, ret, "Write Clock Div");
14648 goto done;
14649 }
14650 /* Step 4: Select temperature mode */
14651 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14652 WRITE_SBUS_RECEIVER,
14653 SBUS_THERM_MONITOR_MODE);
14654 if (ret) {
14655 THERM_FAILURE(dd, ret, "Write Mode Sel");
14656 goto done;
14657 }
14658 /* Step 5: De-assert block reset and start conversion */
14659 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14660 WRITE_SBUS_RECEIVER, 0x2);
14661 if (ret) {
14662 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14663 goto done;
14664 }
14665 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
14666 msleep(22);
14667
14668 /* Enable polling of thermal readings */
14669 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
a453698b
DL
14670
14671 /* Set initialized flag */
14672 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
14673 if (ret)
14674 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
14675
77241056 14676done:
576531fd 14677 release_chip_resource(dd, CR_SBUS);
77241056
MM
14678 return ret;
14679}
14680
14681static void handle_temp_err(struct hfi1_devdata *dd)
14682{
14683 struct hfi1_pportdata *ppd = &dd->pport[0];
14684 /*
14685 * Thermal Critical Interrupt
14686 * Put the device into forced freeze mode, take link down to
14687 * offline, and put DC into reset.
14688 */
14689 dd_dev_emerg(dd,
14690 "Critical temperature reached! Forcing device into freeze mode!\n");
14691 dd->flags |= HFI1_FORCED_FREEZE;
8638b77f 14692 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
77241056
MM
14693 /*
14694 * Shut DC down as much and as quickly as possible.
14695 *
14696 * Step 1: Take the link down to OFFLINE. This will cause the
14697 * 8051 to put the Serdes in reset. However, we don't want to
14698 * go through the entire link state machine since we want to
14699 * shutdown ASAP. Furthermore, this is not a graceful shutdown
14700 * but rather an attempt to save the chip.
14701 * Code below is almost the same as quiet_serdes() but avoids
14702 * all the extra work and the sleeps.
14703 */
14704 ppd->driver_link_ready = 0;
14705 ppd->link_enabled = 0;
bf640096
HC
14706 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
14707 PLS_OFFLINE);
77241056
MM
14708 /*
14709 * Step 2: Shutdown LCB and 8051
14710 * After shutdown, do not restore DC_CFG_RESET value.
14711 */
14712 dc_shutdown(dd);
14713}