]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/infiniband/hw/hfi1/chip.c
IB/hfi1: Race hazard avoidance in user SDMA driver
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / hw / hfi1 / chip.c
CommitLineData
77241056 1/*
05d6ac1d 2 * Copyright(c) 2015, 2016 Intel Corporation.
77241056
MM
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
77241056
MM
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
77241056
MM
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48/*
49 * This file contains all of the code that is specific to the HFI chip
50 */
51
52#include <linux/pci.h>
53#include <linux/delay.h>
54#include <linux/interrupt.h>
55#include <linux/module.h>
56
57#include "hfi.h"
58#include "trace.h"
59#include "mad.h"
60#include "pio.h"
61#include "sdma.h"
62#include "eprom.h"
5d9157aa 63#include "efivar.h"
8ebd4cf1 64#include "platform.h"
affa48de 65#include "aspm.h"
4197344b 66#include "affinity.h"
77241056
MM
67
68#define NUM_IB_PORTS 1
69
70uint kdeth_qp;
71module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
72MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
73
74uint num_vls = HFI1_MAX_VLS_SUPPORTED;
75module_param(num_vls, uint, S_IRUGO);
76MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
77
78/*
79 * Default time to aggregate two 10K packets from the idle state
80 * (timer not running). The timer starts at the end of the first packet,
81 * so only the time for one 10K packet and header plus a bit extra is needed.
82 * 10 * 1024 + 64 header byte = 10304 byte
83 * 10304 byte / 12.5 GB/s = 824.32ns
84 */
85uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
86module_param(rcv_intr_timeout, uint, S_IRUGO);
87MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
88
89uint rcv_intr_count = 16; /* same as qib */
90module_param(rcv_intr_count, uint, S_IRUGO);
91MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
92
93ushort link_crc_mask = SUPPORTED_CRCS;
94module_param(link_crc_mask, ushort, S_IRUGO);
95MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
96
97uint loopback;
98module_param_named(loopback, loopback, uint, S_IRUGO);
99MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
100
101/* Other driver tunables */
102uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
103static ushort crc_14b_sideband = 1;
104static uint use_flr = 1;
105uint quick_linkup; /* skip LNI */
106
107struct flag_table {
108 u64 flag; /* the flag */
109 char *str; /* description string */
110 u16 extra; /* extra information */
111 u16 unused0;
112 u32 unused1;
113};
114
115/* str must be a string constant */
116#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
117#define FLAG_ENTRY0(str, flag) {flag, str, 0}
118
119/* Send Error Consequences */
120#define SEC_WRITE_DROPPED 0x1
121#define SEC_PACKET_DROPPED 0x2
122#define SEC_SC_HALTED 0x4 /* per-context only */
123#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
124
8784ac02 125#define DEFAULT_KRCVQS 2
77241056 126#define MIN_KERNEL_KCTXTS 2
82c2611d 127#define FIRST_KERNEL_KCTXT 1
372cc85a
DL
128/* sizes for both the QP and RSM map tables */
129#define NUM_MAP_ENTRIES 256
77241056
MM
130#define NUM_MAP_REGS 32
131
132/* Bit offset into the GUID which carries HFI id information */
133#define GUID_HFI_INDEX_SHIFT 39
134
135/* extract the emulation revision */
136#define emulator_rev(dd) ((dd)->irev >> 8)
137/* parallel and serial emulation versions are 3 and 4 respectively */
138#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
139#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
140
141/* RSM fields */
142
143/* packet type */
144#define IB_PACKET_TYPE 2ull
145#define QW_SHIFT 6ull
146/* QPN[7..1] */
147#define QPN_WIDTH 7ull
148
149/* LRH.BTH: QW 0, OFFSET 48 - for match */
150#define LRH_BTH_QW 0ull
151#define LRH_BTH_BIT_OFFSET 48ull
152#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
153#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
154#define LRH_BTH_SELECT
155#define LRH_BTH_MASK 3ull
156#define LRH_BTH_VALUE 2ull
157
158/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
159#define LRH_SC_QW 0ull
160#define LRH_SC_BIT_OFFSET 56ull
161#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
162#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
163#define LRH_SC_MASK 128ull
164#define LRH_SC_VALUE 0ull
165
166/* SC[n..0] QW 0, OFFSET 60 - for select */
167#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
168
169/* QPN[m+n:1] QW 1, OFFSET 1 */
170#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
171
172/* defines to build power on SC2VL table */
173#define SC2VL_VAL( \
174 num, \
175 sc0, sc0val, \
176 sc1, sc1val, \
177 sc2, sc2val, \
178 sc3, sc3val, \
179 sc4, sc4val, \
180 sc5, sc5val, \
181 sc6, sc6val, \
182 sc7, sc7val) \
183( \
184 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
185 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
186 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
187 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
188 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
189 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
190 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
191 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
192)
193
194#define DC_SC_VL_VAL( \
195 range, \
196 e0, e0val, \
197 e1, e1val, \
198 e2, e2val, \
199 e3, e3val, \
200 e4, e4val, \
201 e5, e5val, \
202 e6, e6val, \
203 e7, e7val, \
204 e8, e8val, \
205 e9, e9val, \
206 e10, e10val, \
207 e11, e11val, \
208 e12, e12val, \
209 e13, e13val, \
210 e14, e14val, \
211 e15, e15val) \
212( \
213 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
214 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
215 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
216 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
217 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
218 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
219 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
220 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
221 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
222 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
223 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
224 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
225 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
226 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
227 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
228 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
229)
230
231/* all CceStatus sub-block freeze bits */
232#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
233 | CCE_STATUS_RXE_FROZE_SMASK \
234 | CCE_STATUS_TXE_FROZE_SMASK \
235 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
236/* all CceStatus sub-block TXE pause bits */
237#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
238 | CCE_STATUS_TXE_PAUSED_SMASK \
239 | CCE_STATUS_SDMA_PAUSED_SMASK)
240/* all CceStatus sub-block RXE pause bits */
241#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
242
2b719046
JP
243#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
244#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
245
77241056
MM
246/*
247 * CCE Error flags.
248 */
249static struct flag_table cce_err_status_flags[] = {
250/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
251 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
252/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
253 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
254/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
255 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
256/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
257 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
258/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
259 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
260/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
261 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
262/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
263 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
264/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
265 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
266/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
267 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
268/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
269 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
270/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
271 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
272/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
273 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
274/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
275 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
276/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
277 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
278/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
279 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
280/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
281 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
282/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
283 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
284/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
285 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
286/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
287 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
288/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
289 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
290/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
291 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
292/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
293 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
294/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
295 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
296/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
297 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
298/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
299 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
300/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
301 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
302/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
303 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
304/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
305 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
306/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
307 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
308/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
309 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
310/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
311 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
312/*31*/ FLAG_ENTRY0("LATriggered",
313 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
314/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
315 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
316/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
317 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
318/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
319 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
320/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
321 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
322/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
323 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
324/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
325 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
326/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
327 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
328/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
329 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
330/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
331 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
332/*41-63 reserved*/
333};
334
335/*
336 * Misc Error flags
337 */
338#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
339static struct flag_table misc_err_status_flags[] = {
340/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
341/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
342/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
343/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
344/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
345/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
346/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
347/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
348/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
349/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
350/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
351/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
352/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
353};
354
355/*
356 * TXE PIO Error flags and consequences
357 */
358static struct flag_table pio_err_status_flags[] = {
359/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
360 SEC_WRITE_DROPPED,
361 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
362/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
363 SEC_SPC_FREEZE,
364 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
365/* 2*/ FLAG_ENTRY("PioCsrParity",
366 SEC_SPC_FREEZE,
367 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
368/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
369 SEC_SPC_FREEZE,
370 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
371/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
372 SEC_SPC_FREEZE,
373 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
374/* 5*/ FLAG_ENTRY("PioPccFifoParity",
375 SEC_SPC_FREEZE,
376 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
377/* 6*/ FLAG_ENTRY("PioPecFifoParity",
378 SEC_SPC_FREEZE,
379 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
380/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
381 SEC_SPC_FREEZE,
382 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
383/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
384 SEC_SPC_FREEZE,
385 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
386/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
387 SEC_SPC_FREEZE,
388 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
389/*10*/ FLAG_ENTRY("PioSmPktResetParity",
390 SEC_SPC_FREEZE,
391 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
392/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
393 SEC_SPC_FREEZE,
394 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
395/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
396 SEC_SPC_FREEZE,
397 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
398/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
399 0,
400 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
401/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
402 0,
403 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
404/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
405 SEC_SPC_FREEZE,
406 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
407/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
408 SEC_SPC_FREEZE,
409 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
410/*17*/ FLAG_ENTRY("PioInitSmIn",
411 0,
412 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
413/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
414 SEC_SPC_FREEZE,
415 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
416/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
417 SEC_SPC_FREEZE,
418 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
419/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
420 0,
421 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
422/*21*/ FLAG_ENTRY("PioWriteDataParity",
423 SEC_SPC_FREEZE,
424 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
425/*22*/ FLAG_ENTRY("PioStateMachine",
426 SEC_SPC_FREEZE,
427 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
428/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
8638b77f 429 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
77241056
MM
430 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
431/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
8638b77f 432 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
77241056
MM
433 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
434/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
435 SEC_SPC_FREEZE,
436 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
437/*26*/ FLAG_ENTRY("PioVlfSopParity",
438 SEC_SPC_FREEZE,
439 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
440/*27*/ FLAG_ENTRY("PioVlFifoParity",
441 SEC_SPC_FREEZE,
442 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
443/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
444 SEC_SPC_FREEZE,
445 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
446/*29*/ FLAG_ENTRY("PioPpmcSopLen",
447 SEC_SPC_FREEZE,
448 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
449/*30-31 reserved*/
450/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
451 SEC_SPC_FREEZE,
452 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
453/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
454 SEC_SPC_FREEZE,
455 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
456/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
457 SEC_SPC_FREEZE,
458 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
459/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
460 SEC_SPC_FREEZE,
461 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
462/*36-63 reserved*/
463};
464
465/* TXE PIO errors that cause an SPC freeze */
466#define ALL_PIO_FREEZE_ERR \
467 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
468 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
490 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
491 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
492 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
493 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
494 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
495 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
496
497/*
498 * TXE SDMA Error flags
499 */
500static struct flag_table sdma_err_status_flags[] = {
501/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
502 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
503/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
504 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
505/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
506 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
507/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
508 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
509/*04-63 reserved*/
510};
511
512/* TXE SDMA errors that cause an SPC freeze */
513#define ALL_SDMA_FREEZE_ERR \
514 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
515 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
516 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
517
69a00b8e
MM
518/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
519#define PORT_DISCARD_EGRESS_ERRS \
520 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
521 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
522 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
523
77241056
MM
524/*
525 * TXE Egress Error flags
526 */
527#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
528static struct flag_table egress_err_status_flags[] = {
529/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
530/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
531/* 2 reserved */
532/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
533 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
534/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
535/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
536/* 6 reserved */
537/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
538 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
539/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
540 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
541/* 9-10 reserved */
542/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
543 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
544/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
545/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
546/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
547/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
548/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
549 SEES(TX_SDMA0_DISALLOWED_PACKET)),
550/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
551 SEES(TX_SDMA1_DISALLOWED_PACKET)),
552/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
553 SEES(TX_SDMA2_DISALLOWED_PACKET)),
554/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
555 SEES(TX_SDMA3_DISALLOWED_PACKET)),
556/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
557 SEES(TX_SDMA4_DISALLOWED_PACKET)),
558/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
559 SEES(TX_SDMA5_DISALLOWED_PACKET)),
560/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
561 SEES(TX_SDMA6_DISALLOWED_PACKET)),
562/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
563 SEES(TX_SDMA7_DISALLOWED_PACKET)),
564/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
565 SEES(TX_SDMA8_DISALLOWED_PACKET)),
566/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
567 SEES(TX_SDMA9_DISALLOWED_PACKET)),
568/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
569 SEES(TX_SDMA10_DISALLOWED_PACKET)),
570/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
571 SEES(TX_SDMA11_DISALLOWED_PACKET)),
572/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
573 SEES(TX_SDMA12_DISALLOWED_PACKET)),
574/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
575 SEES(TX_SDMA13_DISALLOWED_PACKET)),
576/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
577 SEES(TX_SDMA14_DISALLOWED_PACKET)),
578/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
579 SEES(TX_SDMA15_DISALLOWED_PACKET)),
580/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
581 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
582/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
583 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
584/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
585 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
586/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
587 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
588/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
589 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
590/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
591 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
592/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
593 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
594/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
595 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
596/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
597 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
598/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
599/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
600/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
601/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
602/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
603/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
604/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
605/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
606/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
607/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
608/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
609/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
610/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
611/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
612/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
613/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
614/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
615/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
616/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
617/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
618/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
619/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
620 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
621/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
622 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
623};
624
625/*
626 * TXE Egress Error Info flags
627 */
628#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
629static struct flag_table egress_err_info_flags[] = {
630/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
631/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
632/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
633/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
634/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
635/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
636/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
637/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
638/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
639/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
640/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
641/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
642/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
643/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
644/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
645/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
646/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
647/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
648/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
649/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
650/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
651/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
652};
653
654/* TXE Egress errors that cause an SPC freeze */
655#define ALL_TXE_EGRESS_FREEZE_ERR \
656 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
657 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
658 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
659 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
660 | SEES(TX_LAUNCH_CSR_PARITY) \
661 | SEES(TX_SBRD_CTL_CSR_PARITY) \
662 | SEES(TX_CONFIG_PARITY) \
663 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
664 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
665 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
666 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
667 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
668 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
669 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
670 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
671 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
672 | SEES(TX_CREDIT_RETURN_PARITY))
673
674/*
675 * TXE Send error flags
676 */
677#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
678static struct flag_table send_err_status_flags[] = {
2c5b521a 679/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
77241056
MM
680/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
681/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
682};
683
684/*
685 * TXE Send Context Error flags and consequences
686 */
687static struct flag_table sc_err_status_flags[] = {
688/* 0*/ FLAG_ENTRY("InconsistentSop",
689 SEC_PACKET_DROPPED | SEC_SC_HALTED,
690 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
691/* 1*/ FLAG_ENTRY("DisallowedPacket",
692 SEC_PACKET_DROPPED | SEC_SC_HALTED,
693 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
694/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
695 SEC_WRITE_DROPPED | SEC_SC_HALTED,
696 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
697/* 3*/ FLAG_ENTRY("WriteOverflow",
698 SEC_WRITE_DROPPED | SEC_SC_HALTED,
699 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
700/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
701 SEC_WRITE_DROPPED | SEC_SC_HALTED,
702 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
703/* 5-63 reserved*/
704};
705
706/*
707 * RXE Receive Error flags
708 */
709#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
710static struct flag_table rxe_err_status_flags[] = {
711/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
712/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
713/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
714/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
715/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
716/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
717/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
718/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
719/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
720/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
721/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
722/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
723/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
724/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
725/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
726/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
727/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
728 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
729/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
730/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
731/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
732 RXES(RBUF_BLOCK_LIST_READ_UNC)),
733/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
734 RXES(RBUF_BLOCK_LIST_READ_COR)),
735/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
736 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
737/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
738 RXES(RBUF_CSR_QENT_CNT_PARITY)),
739/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
740 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
741/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
742 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
743/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
744/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
745/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
746 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
747/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
748/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
749/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
750/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
751/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
752/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
753/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
754/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
755 RXES(RBUF_FL_INITDONE_PARITY)),
756/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
757 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
758/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
759/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
760/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
761/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
762 RXES(LOOKUP_DES_PART1_UNC_COR)),
763/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
764 RXES(LOOKUP_DES_PART2_PARITY)),
765/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
766/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
767/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
768/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
769/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
770/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
771/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
772/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
773/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
774/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
775/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
776/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
777/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
778/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
779/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
780/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
781/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
782/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
783/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
784/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
785/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
786/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
787};
788
789/* RXE errors that will trigger an SPC freeze */
790#define ALL_RXE_FREEZE_ERR \
791 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
792 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
831 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
832 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
833 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
834 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
835
836#define RXE_FREEZE_ABORT_MASK \
837 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
838 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
839 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
840
841/*
842 * DCC Error Flags
843 */
844#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
845static struct flag_table dcc_err_flags[] = {
846 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
847 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
848 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
849 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
850 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
851 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
852 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
853 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
854 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
855 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
856 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
857 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
858 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
859 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
860 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
861 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
862 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
863 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
864 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
865 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
866 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
867 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
868 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
869 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
870 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
871 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
872 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
873 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
874 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
875 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
876 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
877 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
878 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
879 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
880 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
881 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
882 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
883 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
884 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
885 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
886 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
887 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
888 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
889 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
890 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
891 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
892};
893
894/*
895 * LCB error flags
896 */
897#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
898static struct flag_table lcb_err_flags[] = {
899/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
900/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
901/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
902/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
903 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
904/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
905/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
906/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
907/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
908/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
909/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
910/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
911/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
912/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
913/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
914 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
915/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
916/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
917/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
918/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
919/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
920/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
921 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
922/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
923/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
924/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
925/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
926/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
927/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
928/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
929 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
930/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
931/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
932 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
933/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
934 LCBE(REDUNDANT_FLIT_PARITY_ERR))
935};
936
937/*
938 * DC8051 Error Flags
939 */
940#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
941static struct flag_table dc8051_err_flags[] = {
942 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
943 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
944 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
945 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
946 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
947 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
948 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
949 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
950 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
17fb4f29 951 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
77241056
MM
952 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
953};
954
955/*
956 * DC8051 Information Error flags
957 *
958 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
959 */
960static struct flag_table dc8051_info_err_flags[] = {
961 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
962 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
963 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
964 FLAG_ENTRY0("Serdes internal loopback failure",
17fb4f29 965 FAILED_SERDES_INTERNAL_LOOPBACK),
77241056
MM
966 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
967 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
968 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
969 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
970 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
971 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
972 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
8fefef12 973 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
50921be0
DL
974 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT),
975 FLAG_ENTRY0("External Device Request Timeout",
976 EXTERNAL_DEVICE_REQ_TIMEOUT),
77241056
MM
977};
978
979/*
980 * DC8051 Information Host Information flags
981 *
982 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
983 */
984static struct flag_table dc8051_info_host_msg_flags[] = {
985 FLAG_ENTRY0("Host request done", 0x0001),
986 FLAG_ENTRY0("BC SMA message", 0x0002),
987 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
988 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
989 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
990 FLAG_ENTRY0("External device config request", 0x0020),
991 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
992 FLAG_ENTRY0("LinkUp achieved", 0x0080),
993 FLAG_ENTRY0("Link going down", 0x0100),
994};
995
77241056
MM
996static u32 encoded_size(u32 size);
997static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
998static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
999static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1000 u8 *continuous);
1001static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1002 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1003static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1004 u8 *remote_tx_rate, u16 *link_widths);
1005static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
1006 u8 *flag_bits, u16 *link_widths);
1007static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1008 u8 *device_rev);
1009static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1010static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1011static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1012 u8 *tx_polarity_inversion,
1013 u8 *rx_polarity_inversion, u8 *max_rate);
1014static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1015 unsigned int context, u64 err_status);
1016static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1017static void handle_dcc_err(struct hfi1_devdata *dd,
1018 unsigned int context, u64 err_status);
1019static void handle_lcb_err(struct hfi1_devdata *dd,
1020 unsigned int context, u64 err_status);
1021static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1022static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1023static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1024static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1025static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1026static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1027static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1028static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1029static void set_partition_keys(struct hfi1_pportdata *);
1030static const char *link_state_name(u32 state);
1031static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1032 u32 state);
1033static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1034 u64 *out_data);
1035static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1036static int thermal_init(struct hfi1_devdata *dd);
1037
1038static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1039 int msecs);
1040static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
feb831dd 1041static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
77241056
MM
1042static void handle_temp_err(struct hfi1_devdata *);
1043static void dc_shutdown(struct hfi1_devdata *);
1044static void dc_start(struct hfi1_devdata *);
8f000f7f
DL
1045static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1046 unsigned int *np);
3ec5fa28 1047static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
ec8a1423 1048static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
77241056
MM
1049
1050/*
1051 * Error interrupt table entry. This is used as input to the interrupt
1052 * "clear down" routine used for all second tier error interrupt register.
1053 * Second tier interrupt registers have a single bit representing them
1054 * in the top-level CceIntStatus.
1055 */
1056struct err_reg_info {
1057 u32 status; /* status CSR offset */
1058 u32 clear; /* clear CSR offset */
1059 u32 mask; /* mask CSR offset */
1060 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1061 const char *desc;
1062};
1063
1064#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1065#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1066#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1067
1068/*
1069 * Helpers for building HFI and DC error interrupt table entries. Different
1070 * helpers are needed because of inconsistent register names.
1071 */
1072#define EE(reg, handler, desc) \
1073 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1074 handler, desc }
1075#define DC_EE1(reg, handler, desc) \
1076 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1077#define DC_EE2(reg, handler, desc) \
1078 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1079
1080/*
1081 * Table of the "misc" grouping of error interrupts. Each entry refers to
1082 * another register containing more information.
1083 */
1084static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1085/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1086/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1087/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1088/* 3*/ { 0, 0, 0, NULL }, /* reserved */
1089/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1090/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1091/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1092/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1093 /* the rest are reserved */
1094};
1095
1096/*
1097 * Index into the Various section of the interrupt sources
1098 * corresponding to the Critical Temperature interrupt.
1099 */
1100#define TCRIT_INT_SOURCE 4
1101
1102/*
1103 * SDMA error interrupt entry - refers to another register containing more
1104 * information.
1105 */
1106static const struct err_reg_info sdma_eng_err =
1107 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1108
1109static const struct err_reg_info various_err[NUM_VARIOUS] = {
1110/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1111/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1112/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1113/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1114/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1115 /* rest are reserved */
1116};
1117
1118/*
1119 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1120 * register can not be derived from the MTU value because 10K is not
1121 * a power of 2. Therefore, we need a constant. Everything else can
1122 * be calculated.
1123 */
1124#define DCC_CFG_PORT_MTU_CAP_10240 7
1125
1126/*
1127 * Table of the DC grouping of error interrupts. Each entry refers to
1128 * another register containing more information.
1129 */
1130static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1131/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1132/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1133/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1134/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1135 /* the rest are reserved */
1136};
1137
1138struct cntr_entry {
1139 /*
1140 * counter name
1141 */
1142 char *name;
1143
1144 /*
1145 * csr to read for name (if applicable)
1146 */
1147 u64 csr;
1148
1149 /*
1150 * offset into dd or ppd to store the counter's value
1151 */
1152 int offset;
1153
1154 /*
1155 * flags
1156 */
1157 u8 flags;
1158
1159 /*
1160 * accessor for stat element, context either dd or ppd
1161 */
17fb4f29
JJ
1162 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1163 int mode, u64 data);
77241056
MM
1164};
1165
1166#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1167#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1168
1169#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1170{ \
1171 name, \
1172 csr, \
1173 offset, \
1174 flags, \
1175 accessor \
1176}
1177
1178/* 32bit RXE */
1179#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1180CNTR_ELEM(#name, \
1181 (counter * 8 + RCV_COUNTER_ARRAY32), \
1182 0, flags | CNTR_32BIT, \
1183 port_access_u32_csr)
1184
1185#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1186CNTR_ELEM(#name, \
1187 (counter * 8 + RCV_COUNTER_ARRAY32), \
1188 0, flags | CNTR_32BIT, \
1189 dev_access_u32_csr)
1190
1191/* 64bit RXE */
1192#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1193CNTR_ELEM(#name, \
1194 (counter * 8 + RCV_COUNTER_ARRAY64), \
1195 0, flags, \
1196 port_access_u64_csr)
1197
1198#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1199CNTR_ELEM(#name, \
1200 (counter * 8 + RCV_COUNTER_ARRAY64), \
1201 0, flags, \
1202 dev_access_u64_csr)
1203
1204#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1205#define OVR_ELM(ctx) \
1206CNTR_ELEM("RcvHdrOvr" #ctx, \
8638b77f 1207 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
77241056
MM
1208 0, CNTR_NORMAL, port_access_u64_csr)
1209
1210/* 32bit TXE */
1211#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1212CNTR_ELEM(#name, \
1213 (counter * 8 + SEND_COUNTER_ARRAY32), \
1214 0, flags | CNTR_32BIT, \
1215 port_access_u32_csr)
1216
1217/* 64bit TXE */
1218#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1219CNTR_ELEM(#name, \
1220 (counter * 8 + SEND_COUNTER_ARRAY64), \
1221 0, flags, \
1222 port_access_u64_csr)
1223
1224# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1225CNTR_ELEM(#name,\
1226 counter * 8 + SEND_COUNTER_ARRAY64, \
1227 0, \
1228 flags, \
1229 dev_access_u64_csr)
1230
1231/* CCE */
1232#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1233CNTR_ELEM(#name, \
1234 (counter * 8 + CCE_COUNTER_ARRAY32), \
1235 0, flags | CNTR_32BIT, \
1236 dev_access_u32_csr)
1237
1238#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1239CNTR_ELEM(#name, \
1240 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1241 0, flags | CNTR_32BIT, \
1242 dev_access_u32_csr)
1243
1244/* DC */
1245#define DC_PERF_CNTR(name, counter, flags) \
1246CNTR_ELEM(#name, \
1247 counter, \
1248 0, \
1249 flags, \
1250 dev_access_u64_csr)
1251
1252#define DC_PERF_CNTR_LCB(name, counter, flags) \
1253CNTR_ELEM(#name, \
1254 counter, \
1255 0, \
1256 flags, \
1257 dc_access_lcb_cntr)
1258
1259/* ibp counters */
1260#define SW_IBP_CNTR(name, cntr) \
1261CNTR_ELEM(#name, \
1262 0, \
1263 0, \
1264 CNTR_SYNTH, \
1265 access_ibp_##cntr)
1266
1267u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1268{
77241056 1269 if (dd->flags & HFI1_PRESENT) {
6d210eef 1270 return readq((void __iomem *)dd->kregbase + offset);
77241056
MM
1271 }
1272 return -1;
1273}
1274
1275void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1276{
1277 if (dd->flags & HFI1_PRESENT)
1278 writeq(value, (void __iomem *)dd->kregbase + offset);
1279}
1280
1281void __iomem *get_csr_addr(
1282 struct hfi1_devdata *dd,
1283 u32 offset)
1284{
1285 return (void __iomem *)dd->kregbase + offset;
1286}
1287
1288static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1289 int mode, u64 value)
1290{
1291 u64 ret;
1292
77241056
MM
1293 if (mode == CNTR_MODE_R) {
1294 ret = read_csr(dd, csr);
1295 } else if (mode == CNTR_MODE_W) {
1296 write_csr(dd, csr, value);
1297 ret = value;
1298 } else {
1299 dd_dev_err(dd, "Invalid cntr register access mode");
1300 return 0;
1301 }
1302
1303 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1304 return ret;
1305}
1306
1307/* Dev Access */
1308static u64 dev_access_u32_csr(const struct cntr_entry *entry,
17fb4f29 1309 void *context, int vl, int mode, u64 data)
77241056 1310{
a787bde8 1311 struct hfi1_devdata *dd = context;
a699c6c2 1312 u64 csr = entry->csr;
77241056 1313
a699c6c2
VM
1314 if (entry->flags & CNTR_SDMA) {
1315 if (vl == CNTR_INVALID_VL)
1316 return 0;
1317 csr += 0x100 * vl;
1318 } else {
1319 if (vl != CNTR_INVALID_VL)
1320 return 0;
1321 }
1322 return read_write_csr(dd, csr, mode, data);
1323}
1324
1325static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1326 void *context, int idx, int mode, u64 data)
1327{
1328 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1329
1330 if (dd->per_sdma && idx < dd->num_sdma)
1331 return dd->per_sdma[idx].err_cnt;
1332 return 0;
1333}
1334
1335static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1336 void *context, int idx, int mode, u64 data)
1337{
1338 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1339
1340 if (dd->per_sdma && idx < dd->num_sdma)
1341 return dd->per_sdma[idx].sdma_int_cnt;
1342 return 0;
1343}
1344
1345static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1346 void *context, int idx, int mode, u64 data)
1347{
1348 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1349
1350 if (dd->per_sdma && idx < dd->num_sdma)
1351 return dd->per_sdma[idx].idle_int_cnt;
1352 return 0;
1353}
1354
1355static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1356 void *context, int idx, int mode,
1357 u64 data)
1358{
1359 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1360
1361 if (dd->per_sdma && idx < dd->num_sdma)
1362 return dd->per_sdma[idx].progress_int_cnt;
1363 return 0;
77241056
MM
1364}
1365
1366static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
17fb4f29 1367 int vl, int mode, u64 data)
77241056 1368{
a787bde8 1369 struct hfi1_devdata *dd = context;
77241056
MM
1370
1371 u64 val = 0;
1372 u64 csr = entry->csr;
1373
1374 if (entry->flags & CNTR_VL) {
1375 if (vl == CNTR_INVALID_VL)
1376 return 0;
1377 csr += 8 * vl;
1378 } else {
1379 if (vl != CNTR_INVALID_VL)
1380 return 0;
1381 }
1382
1383 val = read_write_csr(dd, csr, mode, data);
1384 return val;
1385}
1386
1387static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
17fb4f29 1388 int vl, int mode, u64 data)
77241056 1389{
a787bde8 1390 struct hfi1_devdata *dd = context;
77241056
MM
1391 u32 csr = entry->csr;
1392 int ret = 0;
1393
1394 if (vl != CNTR_INVALID_VL)
1395 return 0;
1396 if (mode == CNTR_MODE_R)
1397 ret = read_lcb_csr(dd, csr, &data);
1398 else if (mode == CNTR_MODE_W)
1399 ret = write_lcb_csr(dd, csr, data);
1400
1401 if (ret) {
1402 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1403 return 0;
1404 }
1405
1406 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1407 return data;
1408}
1409
1410/* Port Access */
1411static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
17fb4f29 1412 int vl, int mode, u64 data)
77241056 1413{
a787bde8 1414 struct hfi1_pportdata *ppd = context;
77241056
MM
1415
1416 if (vl != CNTR_INVALID_VL)
1417 return 0;
1418 return read_write_csr(ppd->dd, entry->csr, mode, data);
1419}
1420
1421static u64 port_access_u64_csr(const struct cntr_entry *entry,
17fb4f29 1422 void *context, int vl, int mode, u64 data)
77241056 1423{
a787bde8 1424 struct hfi1_pportdata *ppd = context;
77241056
MM
1425 u64 val;
1426 u64 csr = entry->csr;
1427
1428 if (entry->flags & CNTR_VL) {
1429 if (vl == CNTR_INVALID_VL)
1430 return 0;
1431 csr += 8 * vl;
1432 } else {
1433 if (vl != CNTR_INVALID_VL)
1434 return 0;
1435 }
1436 val = read_write_csr(ppd->dd, csr, mode, data);
1437 return val;
1438}
1439
1440/* Software defined */
1441static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1442 u64 data)
1443{
1444 u64 ret;
1445
1446 if (mode == CNTR_MODE_R) {
1447 ret = *cntr;
1448 } else if (mode == CNTR_MODE_W) {
1449 *cntr = data;
1450 ret = data;
1451 } else {
1452 dd_dev_err(dd, "Invalid cntr sw access mode");
1453 return 0;
1454 }
1455
1456 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1457
1458 return ret;
1459}
1460
1461static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
17fb4f29 1462 int vl, int mode, u64 data)
77241056 1463{
a787bde8 1464 struct hfi1_pportdata *ppd = context;
77241056
MM
1465
1466 if (vl != CNTR_INVALID_VL)
1467 return 0;
1468 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1469}
1470
1471static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
17fb4f29 1472 int vl, int mode, u64 data)
77241056 1473{
a787bde8 1474 struct hfi1_pportdata *ppd = context;
77241056
MM
1475
1476 if (vl != CNTR_INVALID_VL)
1477 return 0;
1478 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1479}
1480
6d014530
DL
1481static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1482 void *context, int vl, int mode,
1483 u64 data)
1484{
1485 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1486
1487 if (vl != CNTR_INVALID_VL)
1488 return 0;
1489 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1490}
1491
77241056 1492static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
17fb4f29 1493 void *context, int vl, int mode, u64 data)
77241056 1494{
69a00b8e
MM
1495 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1496 u64 zero = 0;
1497 u64 *counter;
77241056 1498
69a00b8e
MM
1499 if (vl == CNTR_INVALID_VL)
1500 counter = &ppd->port_xmit_discards;
1501 else if (vl >= 0 && vl < C_VL_COUNT)
1502 counter = &ppd->port_xmit_discards_vl[vl];
1503 else
1504 counter = &zero;
77241056 1505
69a00b8e 1506 return read_write_sw(ppd->dd, counter, mode, data);
77241056
MM
1507}
1508
1509static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
17fb4f29
JJ
1510 void *context, int vl, int mode,
1511 u64 data)
77241056 1512{
a787bde8 1513 struct hfi1_pportdata *ppd = context;
77241056
MM
1514
1515 if (vl != CNTR_INVALID_VL)
1516 return 0;
1517
1518 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1519 mode, data);
1520}
1521
1522static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
17fb4f29 1523 void *context, int vl, int mode, u64 data)
77241056 1524{
a787bde8 1525 struct hfi1_pportdata *ppd = context;
77241056
MM
1526
1527 if (vl != CNTR_INVALID_VL)
1528 return 0;
1529
1530 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1531 mode, data);
1532}
1533
1534u64 get_all_cpu_total(u64 __percpu *cntr)
1535{
1536 int cpu;
1537 u64 counter = 0;
1538
1539 for_each_possible_cpu(cpu)
1540 counter += *per_cpu_ptr(cntr, cpu);
1541 return counter;
1542}
1543
1544static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1545 u64 __percpu *cntr,
1546 int vl, int mode, u64 data)
1547{
77241056
MM
1548 u64 ret = 0;
1549
1550 if (vl != CNTR_INVALID_VL)
1551 return 0;
1552
1553 if (mode == CNTR_MODE_R) {
1554 ret = get_all_cpu_total(cntr) - *z_val;
1555 } else if (mode == CNTR_MODE_W) {
1556 /* A write can only zero the counter */
1557 if (data == 0)
1558 *z_val = get_all_cpu_total(cntr);
1559 else
1560 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1561 } else {
1562 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1563 return 0;
1564 }
1565
1566 return ret;
1567}
1568
1569static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1570 void *context, int vl, int mode, u64 data)
1571{
a787bde8 1572 struct hfi1_devdata *dd = context;
77241056
MM
1573
1574 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1575 mode, data);
1576}
1577
1578static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
17fb4f29 1579 void *context, int vl, int mode, u64 data)
77241056 1580{
a787bde8 1581 struct hfi1_devdata *dd = context;
77241056
MM
1582
1583 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1584 mode, data);
1585}
1586
1587static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1588 void *context, int vl, int mode, u64 data)
1589{
a787bde8 1590 struct hfi1_devdata *dd = context;
77241056
MM
1591
1592 return dd->verbs_dev.n_piowait;
1593}
1594
14553ca1
MM
1595static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1596 void *context, int vl, int mode, u64 data)
1597{
1598 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1599
1600 return dd->verbs_dev.n_piodrain;
1601}
1602
77241056
MM
1603static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1604 void *context, int vl, int mode, u64 data)
1605{
a787bde8 1606 struct hfi1_devdata *dd = context;
77241056
MM
1607
1608 return dd->verbs_dev.n_txwait;
1609}
1610
1611static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1612 void *context, int vl, int mode, u64 data)
1613{
a787bde8 1614 struct hfi1_devdata *dd = context;
77241056
MM
1615
1616 return dd->verbs_dev.n_kmem_wait;
1617}
1618
b421922e 1619static u64 access_sw_send_schedule(const struct cntr_entry *entry,
17fb4f29 1620 void *context, int vl, int mode, u64 data)
b421922e
DL
1621{
1622 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1623
89abfc8d
VM
1624 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1625 mode, data);
b421922e
DL
1626}
1627
2c5b521a
JR
1628/* Software counters for the error status bits within MISC_ERR_STATUS */
1629static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1630 void *context, int vl, int mode,
1631 u64 data)
1632{
1633 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1634
1635 return dd->misc_err_status_cnt[12];
1636}
1637
1638static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1639 void *context, int vl, int mode,
1640 u64 data)
1641{
1642 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1643
1644 return dd->misc_err_status_cnt[11];
1645}
1646
1647static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1648 void *context, int vl, int mode,
1649 u64 data)
1650{
1651 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1652
1653 return dd->misc_err_status_cnt[10];
1654}
1655
1656static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1657 void *context, int vl,
1658 int mode, u64 data)
1659{
1660 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1661
1662 return dd->misc_err_status_cnt[9];
1663}
1664
1665static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1666 void *context, int vl, int mode,
1667 u64 data)
1668{
1669 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1670
1671 return dd->misc_err_status_cnt[8];
1672}
1673
1674static u64 access_misc_efuse_read_bad_addr_err_cnt(
1675 const struct cntr_entry *entry,
1676 void *context, int vl, int mode, u64 data)
1677{
1678 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1679
1680 return dd->misc_err_status_cnt[7];
1681}
1682
1683static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1684 void *context, int vl,
1685 int mode, u64 data)
1686{
1687 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1688
1689 return dd->misc_err_status_cnt[6];
1690}
1691
1692static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1693 void *context, int vl, int mode,
1694 u64 data)
1695{
1696 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1697
1698 return dd->misc_err_status_cnt[5];
1699}
1700
1701static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1702 void *context, int vl, int mode,
1703 u64 data)
1704{
1705 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1706
1707 return dd->misc_err_status_cnt[4];
1708}
1709
1710static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1711 void *context, int vl,
1712 int mode, u64 data)
1713{
1714 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1715
1716 return dd->misc_err_status_cnt[3];
1717}
1718
1719static u64 access_misc_csr_write_bad_addr_err_cnt(
1720 const struct cntr_entry *entry,
1721 void *context, int vl, int mode, u64 data)
1722{
1723 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1724
1725 return dd->misc_err_status_cnt[2];
1726}
1727
1728static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1729 void *context, int vl,
1730 int mode, u64 data)
1731{
1732 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1733
1734 return dd->misc_err_status_cnt[1];
1735}
1736
1737static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1738 void *context, int vl, int mode,
1739 u64 data)
1740{
1741 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1742
1743 return dd->misc_err_status_cnt[0];
1744}
1745
1746/*
1747 * Software counter for the aggregate of
1748 * individual CceErrStatus counters
1749 */
1750static u64 access_sw_cce_err_status_aggregated_cnt(
1751 const struct cntr_entry *entry,
1752 void *context, int vl, int mode, u64 data)
1753{
1754 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1755
1756 return dd->sw_cce_err_status_aggregate;
1757}
1758
1759/*
1760 * Software counters corresponding to each of the
1761 * error status bits within CceErrStatus
1762 */
1763static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1764 void *context, int vl, int mode,
1765 u64 data)
1766{
1767 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1768
1769 return dd->cce_err_status_cnt[40];
1770}
1771
1772static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1773 void *context, int vl, int mode,
1774 u64 data)
1775{
1776 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1777
1778 return dd->cce_err_status_cnt[39];
1779}
1780
1781static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1782 void *context, int vl, int mode,
1783 u64 data)
1784{
1785 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1786
1787 return dd->cce_err_status_cnt[38];
1788}
1789
1790static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1791 void *context, int vl, int mode,
1792 u64 data)
1793{
1794 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1795
1796 return dd->cce_err_status_cnt[37];
1797}
1798
1799static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1800 void *context, int vl, int mode,
1801 u64 data)
1802{
1803 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1804
1805 return dd->cce_err_status_cnt[36];
1806}
1807
1808static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1809 const struct cntr_entry *entry,
1810 void *context, int vl, int mode, u64 data)
1811{
1812 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1813
1814 return dd->cce_err_status_cnt[35];
1815}
1816
1817static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1818 const struct cntr_entry *entry,
1819 void *context, int vl, int mode, u64 data)
1820{
1821 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1822
1823 return dd->cce_err_status_cnt[34];
1824}
1825
1826static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1827 void *context, int vl,
1828 int mode, u64 data)
1829{
1830 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1831
1832 return dd->cce_err_status_cnt[33];
1833}
1834
1835static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1836 void *context, int vl, int mode,
1837 u64 data)
1838{
1839 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1840
1841 return dd->cce_err_status_cnt[32];
1842}
1843
1844static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1845 void *context, int vl, int mode, u64 data)
1846{
1847 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1848
1849 return dd->cce_err_status_cnt[31];
1850}
1851
1852static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1853 void *context, int vl, int mode,
1854 u64 data)
1855{
1856 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1857
1858 return dd->cce_err_status_cnt[30];
1859}
1860
1861static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1862 void *context, int vl, int mode,
1863 u64 data)
1864{
1865 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1866
1867 return dd->cce_err_status_cnt[29];
1868}
1869
1870static u64 access_pcic_transmit_back_parity_err_cnt(
1871 const struct cntr_entry *entry,
1872 void *context, int vl, int mode, u64 data)
1873{
1874 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1875
1876 return dd->cce_err_status_cnt[28];
1877}
1878
1879static u64 access_pcic_transmit_front_parity_err_cnt(
1880 const struct cntr_entry *entry,
1881 void *context, int vl, int mode, u64 data)
1882{
1883 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1884
1885 return dd->cce_err_status_cnt[27];
1886}
1887
1888static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1889 void *context, int vl, int mode,
1890 u64 data)
1891{
1892 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1893
1894 return dd->cce_err_status_cnt[26];
1895}
1896
1897static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1898 void *context, int vl, int mode,
1899 u64 data)
1900{
1901 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1902
1903 return dd->cce_err_status_cnt[25];
1904}
1905
1906static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1907 void *context, int vl, int mode,
1908 u64 data)
1909{
1910 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1911
1912 return dd->cce_err_status_cnt[24];
1913}
1914
1915static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1916 void *context, int vl, int mode,
1917 u64 data)
1918{
1919 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1920
1921 return dd->cce_err_status_cnt[23];
1922}
1923
1924static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1925 void *context, int vl,
1926 int mode, u64 data)
1927{
1928 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1929
1930 return dd->cce_err_status_cnt[22];
1931}
1932
1933static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1934 void *context, int vl, int mode,
1935 u64 data)
1936{
1937 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1938
1939 return dd->cce_err_status_cnt[21];
1940}
1941
1942static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1943 const struct cntr_entry *entry,
1944 void *context, int vl, int mode, u64 data)
1945{
1946 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1947
1948 return dd->cce_err_status_cnt[20];
1949}
1950
1951static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1952 void *context, int vl,
1953 int mode, u64 data)
1954{
1955 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1956
1957 return dd->cce_err_status_cnt[19];
1958}
1959
1960static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1961 void *context, int vl, int mode,
1962 u64 data)
1963{
1964 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1965
1966 return dd->cce_err_status_cnt[18];
1967}
1968
1969static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1970 void *context, int vl, int mode,
1971 u64 data)
1972{
1973 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1974
1975 return dd->cce_err_status_cnt[17];
1976}
1977
1978static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1979 void *context, int vl, int mode,
1980 u64 data)
1981{
1982 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1983
1984 return dd->cce_err_status_cnt[16];
1985}
1986
1987static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1988 void *context, int vl, int mode,
1989 u64 data)
1990{
1991 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1992
1993 return dd->cce_err_status_cnt[15];
1994}
1995
1996static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1997 void *context, int vl,
1998 int mode, u64 data)
1999{
2000 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2001
2002 return dd->cce_err_status_cnt[14];
2003}
2004
2005static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2006 void *context, int vl, int mode,
2007 u64 data)
2008{
2009 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2010
2011 return dd->cce_err_status_cnt[13];
2012}
2013
2014static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2015 const struct cntr_entry *entry,
2016 void *context, int vl, int mode, u64 data)
2017{
2018 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2019
2020 return dd->cce_err_status_cnt[12];
2021}
2022
2023static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2024 const struct cntr_entry *entry,
2025 void *context, int vl, int mode, u64 data)
2026{
2027 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2028
2029 return dd->cce_err_status_cnt[11];
2030}
2031
2032static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2033 const struct cntr_entry *entry,
2034 void *context, int vl, int mode, u64 data)
2035{
2036 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2037
2038 return dd->cce_err_status_cnt[10];
2039}
2040
2041static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2042 const struct cntr_entry *entry,
2043 void *context, int vl, int mode, u64 data)
2044{
2045 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2046
2047 return dd->cce_err_status_cnt[9];
2048}
2049
2050static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2051 const struct cntr_entry *entry,
2052 void *context, int vl, int mode, u64 data)
2053{
2054 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2055
2056 return dd->cce_err_status_cnt[8];
2057}
2058
2059static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2060 void *context, int vl,
2061 int mode, u64 data)
2062{
2063 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2064
2065 return dd->cce_err_status_cnt[7];
2066}
2067
2068static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2069 const struct cntr_entry *entry,
2070 void *context, int vl, int mode, u64 data)
2071{
2072 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2073
2074 return dd->cce_err_status_cnt[6];
2075}
2076
2077static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2078 void *context, int vl, int mode,
2079 u64 data)
2080{
2081 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2082
2083 return dd->cce_err_status_cnt[5];
2084}
2085
2086static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2087 void *context, int vl, int mode,
2088 u64 data)
2089{
2090 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2091
2092 return dd->cce_err_status_cnt[4];
2093}
2094
2095static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2096 const struct cntr_entry *entry,
2097 void *context, int vl, int mode, u64 data)
2098{
2099 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2100
2101 return dd->cce_err_status_cnt[3];
2102}
2103
2104static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2105 void *context, int vl,
2106 int mode, u64 data)
2107{
2108 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2109
2110 return dd->cce_err_status_cnt[2];
2111}
2112
2113static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2114 void *context, int vl,
2115 int mode, u64 data)
2116{
2117 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2118
2119 return dd->cce_err_status_cnt[1];
2120}
2121
2122static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2123 void *context, int vl, int mode,
2124 u64 data)
2125{
2126 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2127
2128 return dd->cce_err_status_cnt[0];
2129}
2130
2131/*
2132 * Software counters corresponding to each of the
2133 * error status bits within RcvErrStatus
2134 */
2135static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2136 void *context, int vl, int mode,
2137 u64 data)
2138{
2139 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2140
2141 return dd->rcv_err_status_cnt[63];
2142}
2143
2144static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2145 void *context, int vl,
2146 int mode, u64 data)
2147{
2148 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2149
2150 return dd->rcv_err_status_cnt[62];
2151}
2152
2153static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2154 void *context, int vl, int mode,
2155 u64 data)
2156{
2157 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2158
2159 return dd->rcv_err_status_cnt[61];
2160}
2161
2162static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2163 void *context, int vl, int mode,
2164 u64 data)
2165{
2166 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2167
2168 return dd->rcv_err_status_cnt[60];
2169}
2170
2171static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2172 void *context, int vl,
2173 int mode, u64 data)
2174{
2175 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2176
2177 return dd->rcv_err_status_cnt[59];
2178}
2179
2180static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2181 void *context, int vl,
2182 int mode, u64 data)
2183{
2184 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2185
2186 return dd->rcv_err_status_cnt[58];
2187}
2188
2189static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2190 void *context, int vl, int mode,
2191 u64 data)
2192{
2193 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2194
2195 return dd->rcv_err_status_cnt[57];
2196}
2197
2198static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2199 void *context, int vl, int mode,
2200 u64 data)
2201{
2202 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2203
2204 return dd->rcv_err_status_cnt[56];
2205}
2206
2207static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2208 void *context, int vl, int mode,
2209 u64 data)
2210{
2211 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2212
2213 return dd->rcv_err_status_cnt[55];
2214}
2215
2216static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2217 const struct cntr_entry *entry,
2218 void *context, int vl, int mode, u64 data)
2219{
2220 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2221
2222 return dd->rcv_err_status_cnt[54];
2223}
2224
2225static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2226 const struct cntr_entry *entry,
2227 void *context, int vl, int mode, u64 data)
2228{
2229 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2230
2231 return dd->rcv_err_status_cnt[53];
2232}
2233
2234static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2235 void *context, int vl,
2236 int mode, u64 data)
2237{
2238 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2239
2240 return dd->rcv_err_status_cnt[52];
2241}
2242
2243static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2244 void *context, int vl,
2245 int mode, u64 data)
2246{
2247 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2248
2249 return dd->rcv_err_status_cnt[51];
2250}
2251
2252static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2253 void *context, int vl,
2254 int mode, u64 data)
2255{
2256 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2257
2258 return dd->rcv_err_status_cnt[50];
2259}
2260
2261static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2262 void *context, int vl,
2263 int mode, u64 data)
2264{
2265 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2266
2267 return dd->rcv_err_status_cnt[49];
2268}
2269
2270static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2271 void *context, int vl,
2272 int mode, u64 data)
2273{
2274 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2275
2276 return dd->rcv_err_status_cnt[48];
2277}
2278
2279static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2280 void *context, int vl,
2281 int mode, u64 data)
2282{
2283 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2284
2285 return dd->rcv_err_status_cnt[47];
2286}
2287
2288static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2289 void *context, int vl, int mode,
2290 u64 data)
2291{
2292 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2293
2294 return dd->rcv_err_status_cnt[46];
2295}
2296
2297static u64 access_rx_hq_intr_csr_parity_err_cnt(
2298 const struct cntr_entry *entry,
2299 void *context, int vl, int mode, u64 data)
2300{
2301 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2302
2303 return dd->rcv_err_status_cnt[45];
2304}
2305
2306static u64 access_rx_lookup_csr_parity_err_cnt(
2307 const struct cntr_entry *entry,
2308 void *context, int vl, int mode, u64 data)
2309{
2310 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2311
2312 return dd->rcv_err_status_cnt[44];
2313}
2314
2315static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2316 const struct cntr_entry *entry,
2317 void *context, int vl, int mode, u64 data)
2318{
2319 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2320
2321 return dd->rcv_err_status_cnt[43];
2322}
2323
2324static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2325 const struct cntr_entry *entry,
2326 void *context, int vl, int mode, u64 data)
2327{
2328 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2329
2330 return dd->rcv_err_status_cnt[42];
2331}
2332
2333static u64 access_rx_lookup_des_part2_parity_err_cnt(
2334 const struct cntr_entry *entry,
2335 void *context, int vl, int mode, u64 data)
2336{
2337 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2338
2339 return dd->rcv_err_status_cnt[41];
2340}
2341
2342static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2343 const struct cntr_entry *entry,
2344 void *context, int vl, int mode, u64 data)
2345{
2346 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2347
2348 return dd->rcv_err_status_cnt[40];
2349}
2350
2351static u64 access_rx_lookup_des_part1_unc_err_cnt(
2352 const struct cntr_entry *entry,
2353 void *context, int vl, int mode, u64 data)
2354{
2355 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2356
2357 return dd->rcv_err_status_cnt[39];
2358}
2359
2360static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2361 const struct cntr_entry *entry,
2362 void *context, int vl, int mode, u64 data)
2363{
2364 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2365
2366 return dd->rcv_err_status_cnt[38];
2367}
2368
2369static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2370 const struct cntr_entry *entry,
2371 void *context, int vl, int mode, u64 data)
2372{
2373 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2374
2375 return dd->rcv_err_status_cnt[37];
2376}
2377
2378static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2379 const struct cntr_entry *entry,
2380 void *context, int vl, int mode, u64 data)
2381{
2382 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2383
2384 return dd->rcv_err_status_cnt[36];
2385}
2386
2387static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2388 const struct cntr_entry *entry,
2389 void *context, int vl, int mode, u64 data)
2390{
2391 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2392
2393 return dd->rcv_err_status_cnt[35];
2394}
2395
2396static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2397 const struct cntr_entry *entry,
2398 void *context, int vl, int mode, u64 data)
2399{
2400 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2401
2402 return dd->rcv_err_status_cnt[34];
2403}
2404
2405static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2406 const struct cntr_entry *entry,
2407 void *context, int vl, int mode, u64 data)
2408{
2409 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2410
2411 return dd->rcv_err_status_cnt[33];
2412}
2413
2414static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2415 void *context, int vl, int mode,
2416 u64 data)
2417{
2418 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2419
2420 return dd->rcv_err_status_cnt[32];
2421}
2422
2423static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2424 void *context, int vl, int mode,
2425 u64 data)
2426{
2427 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2428
2429 return dd->rcv_err_status_cnt[31];
2430}
2431
2432static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2433 void *context, int vl, int mode,
2434 u64 data)
2435{
2436 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2437
2438 return dd->rcv_err_status_cnt[30];
2439}
2440
2441static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2442 void *context, int vl, int mode,
2443 u64 data)
2444{
2445 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2446
2447 return dd->rcv_err_status_cnt[29];
2448}
2449
2450static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2451 void *context, int vl,
2452 int mode, u64 data)
2453{
2454 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2455
2456 return dd->rcv_err_status_cnt[28];
2457}
2458
2459static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2460 const struct cntr_entry *entry,
2461 void *context, int vl, int mode, u64 data)
2462{
2463 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2464
2465 return dd->rcv_err_status_cnt[27];
2466}
2467
2468static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2469 const struct cntr_entry *entry,
2470 void *context, int vl, int mode, u64 data)
2471{
2472 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2473
2474 return dd->rcv_err_status_cnt[26];
2475}
2476
2477static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2478 const struct cntr_entry *entry,
2479 void *context, int vl, int mode, u64 data)
2480{
2481 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2482
2483 return dd->rcv_err_status_cnt[25];
2484}
2485
2486static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2487 const struct cntr_entry *entry,
2488 void *context, int vl, int mode, u64 data)
2489{
2490 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2491
2492 return dd->rcv_err_status_cnt[24];
2493}
2494
2495static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2496 const struct cntr_entry *entry,
2497 void *context, int vl, int mode, u64 data)
2498{
2499 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2500
2501 return dd->rcv_err_status_cnt[23];
2502}
2503
2504static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2505 const struct cntr_entry *entry,
2506 void *context, int vl, int mode, u64 data)
2507{
2508 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2509
2510 return dd->rcv_err_status_cnt[22];
2511}
2512
2513static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2514 const struct cntr_entry *entry,
2515 void *context, int vl, int mode, u64 data)
2516{
2517 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2518
2519 return dd->rcv_err_status_cnt[21];
2520}
2521
2522static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2523 const struct cntr_entry *entry,
2524 void *context, int vl, int mode, u64 data)
2525{
2526 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2527
2528 return dd->rcv_err_status_cnt[20];
2529}
2530
2531static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2532 const struct cntr_entry *entry,
2533 void *context, int vl, int mode, u64 data)
2534{
2535 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2536
2537 return dd->rcv_err_status_cnt[19];
2538}
2539
2540static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2541 void *context, int vl,
2542 int mode, u64 data)
2543{
2544 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2545
2546 return dd->rcv_err_status_cnt[18];
2547}
2548
2549static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2550 void *context, int vl,
2551 int mode, u64 data)
2552{
2553 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2554
2555 return dd->rcv_err_status_cnt[17];
2556}
2557
2558static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2559 const struct cntr_entry *entry,
2560 void *context, int vl, int mode, u64 data)
2561{
2562 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2563
2564 return dd->rcv_err_status_cnt[16];
2565}
2566
2567static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2568 const struct cntr_entry *entry,
2569 void *context, int vl, int mode, u64 data)
2570{
2571 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2572
2573 return dd->rcv_err_status_cnt[15];
2574}
2575
2576static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2577 void *context, int vl,
2578 int mode, u64 data)
2579{
2580 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2581
2582 return dd->rcv_err_status_cnt[14];
2583}
2584
2585static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2586 void *context, int vl,
2587 int mode, u64 data)
2588{
2589 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2590
2591 return dd->rcv_err_status_cnt[13];
2592}
2593
2594static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2595 void *context, int vl, int mode,
2596 u64 data)
2597{
2598 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2599
2600 return dd->rcv_err_status_cnt[12];
2601}
2602
2603static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2604 void *context, int vl, int mode,
2605 u64 data)
2606{
2607 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2608
2609 return dd->rcv_err_status_cnt[11];
2610}
2611
2612static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2613 void *context, int vl, int mode,
2614 u64 data)
2615{
2616 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2617
2618 return dd->rcv_err_status_cnt[10];
2619}
2620
2621static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2622 void *context, int vl, int mode,
2623 u64 data)
2624{
2625 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2626
2627 return dd->rcv_err_status_cnt[9];
2628}
2629
2630static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2631 void *context, int vl, int mode,
2632 u64 data)
2633{
2634 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2635
2636 return dd->rcv_err_status_cnt[8];
2637}
2638
2639static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2640 const struct cntr_entry *entry,
2641 void *context, int vl, int mode, u64 data)
2642{
2643 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2644
2645 return dd->rcv_err_status_cnt[7];
2646}
2647
2648static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2649 const struct cntr_entry *entry,
2650 void *context, int vl, int mode, u64 data)
2651{
2652 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2653
2654 return dd->rcv_err_status_cnt[6];
2655}
2656
2657static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2658 void *context, int vl, int mode,
2659 u64 data)
2660{
2661 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2662
2663 return dd->rcv_err_status_cnt[5];
2664}
2665
2666static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2667 void *context, int vl, int mode,
2668 u64 data)
2669{
2670 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2671
2672 return dd->rcv_err_status_cnt[4];
2673}
2674
2675static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2676 void *context, int vl, int mode,
2677 u64 data)
2678{
2679 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2680
2681 return dd->rcv_err_status_cnt[3];
2682}
2683
2684static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2685 void *context, int vl, int mode,
2686 u64 data)
2687{
2688 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2689
2690 return dd->rcv_err_status_cnt[2];
2691}
2692
2693static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2694 void *context, int vl, int mode,
2695 u64 data)
2696{
2697 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2698
2699 return dd->rcv_err_status_cnt[1];
2700}
2701
2702static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2703 void *context, int vl, int mode,
2704 u64 data)
2705{
2706 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2707
2708 return dd->rcv_err_status_cnt[0];
2709}
2710
2711/*
2712 * Software counters corresponding to each of the
2713 * error status bits within SendPioErrStatus
2714 */
2715static u64 access_pio_pec_sop_head_parity_err_cnt(
2716 const struct cntr_entry *entry,
2717 void *context, int vl, int mode, u64 data)
2718{
2719 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2720
2721 return dd->send_pio_err_status_cnt[35];
2722}
2723
2724static u64 access_pio_pcc_sop_head_parity_err_cnt(
2725 const struct cntr_entry *entry,
2726 void *context, int vl, int mode, u64 data)
2727{
2728 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2729
2730 return dd->send_pio_err_status_cnt[34];
2731}
2732
2733static u64 access_pio_last_returned_cnt_parity_err_cnt(
2734 const struct cntr_entry *entry,
2735 void *context, int vl, int mode, u64 data)
2736{
2737 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2738
2739 return dd->send_pio_err_status_cnt[33];
2740}
2741
2742static u64 access_pio_current_free_cnt_parity_err_cnt(
2743 const struct cntr_entry *entry,
2744 void *context, int vl, int mode, u64 data)
2745{
2746 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2747
2748 return dd->send_pio_err_status_cnt[32];
2749}
2750
2751static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2752 void *context, int vl, int mode,
2753 u64 data)
2754{
2755 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2756
2757 return dd->send_pio_err_status_cnt[31];
2758}
2759
2760static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2761 void *context, int vl, int mode,
2762 u64 data)
2763{
2764 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2765
2766 return dd->send_pio_err_status_cnt[30];
2767}
2768
2769static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2770 void *context, int vl, int mode,
2771 u64 data)
2772{
2773 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2774
2775 return dd->send_pio_err_status_cnt[29];
2776}
2777
2778static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2779 const struct cntr_entry *entry,
2780 void *context, int vl, int mode, u64 data)
2781{
2782 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2783
2784 return dd->send_pio_err_status_cnt[28];
2785}
2786
2787static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2788 void *context, int vl, int mode,
2789 u64 data)
2790{
2791 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2792
2793 return dd->send_pio_err_status_cnt[27];
2794}
2795
2796static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2797 void *context, int vl, int mode,
2798 u64 data)
2799{
2800 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2801
2802 return dd->send_pio_err_status_cnt[26];
2803}
2804
2805static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2806 void *context, int vl,
2807 int mode, u64 data)
2808{
2809 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2810
2811 return dd->send_pio_err_status_cnt[25];
2812}
2813
2814static u64 access_pio_block_qw_count_parity_err_cnt(
2815 const struct cntr_entry *entry,
2816 void *context, int vl, int mode, u64 data)
2817{
2818 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2819
2820 return dd->send_pio_err_status_cnt[24];
2821}
2822
2823static u64 access_pio_write_qw_valid_parity_err_cnt(
2824 const struct cntr_entry *entry,
2825 void *context, int vl, int mode, u64 data)
2826{
2827 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2828
2829 return dd->send_pio_err_status_cnt[23];
2830}
2831
2832static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2833 void *context, int vl, int mode,
2834 u64 data)
2835{
2836 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2837
2838 return dd->send_pio_err_status_cnt[22];
2839}
2840
2841static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2842 void *context, int vl,
2843 int mode, u64 data)
2844{
2845 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2846
2847 return dd->send_pio_err_status_cnt[21];
2848}
2849
2850static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2851 void *context, int vl,
2852 int mode, u64 data)
2853{
2854 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2855
2856 return dd->send_pio_err_status_cnt[20];
2857}
2858
2859static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2860 void *context, int vl,
2861 int mode, u64 data)
2862{
2863 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2864
2865 return dd->send_pio_err_status_cnt[19];
2866}
2867
2868static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2869 const struct cntr_entry *entry,
2870 void *context, int vl, int mode, u64 data)
2871{
2872 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2873
2874 return dd->send_pio_err_status_cnt[18];
2875}
2876
2877static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2878 void *context, int vl, int mode,
2879 u64 data)
2880{
2881 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2882
2883 return dd->send_pio_err_status_cnt[17];
2884}
2885
2886static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2887 void *context, int vl, int mode,
2888 u64 data)
2889{
2890 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2891
2892 return dd->send_pio_err_status_cnt[16];
2893}
2894
2895static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2896 const struct cntr_entry *entry,
2897 void *context, int vl, int mode, u64 data)
2898{
2899 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2900
2901 return dd->send_pio_err_status_cnt[15];
2902}
2903
2904static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2905 const struct cntr_entry *entry,
2906 void *context, int vl, int mode, u64 data)
2907{
2908 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2909
2910 return dd->send_pio_err_status_cnt[14];
2911}
2912
2913static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2914 const struct cntr_entry *entry,
2915 void *context, int vl, int mode, u64 data)
2916{
2917 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2918
2919 return dd->send_pio_err_status_cnt[13];
2920}
2921
2922static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2923 const struct cntr_entry *entry,
2924 void *context, int vl, int mode, u64 data)
2925{
2926 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2927
2928 return dd->send_pio_err_status_cnt[12];
2929}
2930
2931static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2932 const struct cntr_entry *entry,
2933 void *context, int vl, int mode, u64 data)
2934{
2935 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2936
2937 return dd->send_pio_err_status_cnt[11];
2938}
2939
2940static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2941 const struct cntr_entry *entry,
2942 void *context, int vl, int mode, u64 data)
2943{
2944 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2945
2946 return dd->send_pio_err_status_cnt[10];
2947}
2948
2949static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2950 const struct cntr_entry *entry,
2951 void *context, int vl, int mode, u64 data)
2952{
2953 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2954
2955 return dd->send_pio_err_status_cnt[9];
2956}
2957
2958static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2959 const struct cntr_entry *entry,
2960 void *context, int vl, int mode, u64 data)
2961{
2962 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2963
2964 return dd->send_pio_err_status_cnt[8];
2965}
2966
2967static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2968 const struct cntr_entry *entry,
2969 void *context, int vl, int mode, u64 data)
2970{
2971 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2972
2973 return dd->send_pio_err_status_cnt[7];
2974}
2975
2976static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2977 void *context, int vl, int mode,
2978 u64 data)
2979{
2980 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2981
2982 return dd->send_pio_err_status_cnt[6];
2983}
2984
2985static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2986 void *context, int vl, int mode,
2987 u64 data)
2988{
2989 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2990
2991 return dd->send_pio_err_status_cnt[5];
2992}
2993
2994static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2995 void *context, int vl, int mode,
2996 u64 data)
2997{
2998 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2999
3000 return dd->send_pio_err_status_cnt[4];
3001}
3002
3003static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3004 void *context, int vl, int mode,
3005 u64 data)
3006{
3007 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3008
3009 return dd->send_pio_err_status_cnt[3];
3010}
3011
3012static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3013 void *context, int vl, int mode,
3014 u64 data)
3015{
3016 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3017
3018 return dd->send_pio_err_status_cnt[2];
3019}
3020
3021static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3022 void *context, int vl,
3023 int mode, u64 data)
3024{
3025 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3026
3027 return dd->send_pio_err_status_cnt[1];
3028}
3029
3030static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3031 void *context, int vl, int mode,
3032 u64 data)
3033{
3034 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3035
3036 return dd->send_pio_err_status_cnt[0];
3037}
3038
3039/*
3040 * Software counters corresponding to each of the
3041 * error status bits within SendDmaErrStatus
3042 */
3043static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3044 const struct cntr_entry *entry,
3045 void *context, int vl, int mode, u64 data)
3046{
3047 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3048
3049 return dd->send_dma_err_status_cnt[3];
3050}
3051
3052static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3053 const struct cntr_entry *entry,
3054 void *context, int vl, int mode, u64 data)
3055{
3056 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3057
3058 return dd->send_dma_err_status_cnt[2];
3059}
3060
3061static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3062 void *context, int vl, int mode,
3063 u64 data)
3064{
3065 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3066
3067 return dd->send_dma_err_status_cnt[1];
3068}
3069
3070static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3071 void *context, int vl, int mode,
3072 u64 data)
3073{
3074 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3075
3076 return dd->send_dma_err_status_cnt[0];
3077}
3078
3079/*
3080 * Software counters corresponding to each of the
3081 * error status bits within SendEgressErrStatus
3082 */
3083static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3084 const struct cntr_entry *entry,
3085 void *context, int vl, int mode, u64 data)
3086{
3087 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3088
3089 return dd->send_egress_err_status_cnt[63];
3090}
3091
3092static u64 access_tx_read_sdma_memory_csr_err_cnt(
3093 const struct cntr_entry *entry,
3094 void *context, int vl, int mode, u64 data)
3095{
3096 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3097
3098 return dd->send_egress_err_status_cnt[62];
3099}
3100
3101static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3102 void *context, int vl, int mode,
3103 u64 data)
3104{
3105 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3106
3107 return dd->send_egress_err_status_cnt[61];
3108}
3109
3110static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3111 void *context, int vl,
3112 int mode, u64 data)
3113{
3114 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3115
3116 return dd->send_egress_err_status_cnt[60];
3117}
3118
3119static u64 access_tx_read_sdma_memory_cor_err_cnt(
3120 const struct cntr_entry *entry,
3121 void *context, int vl, int mode, u64 data)
3122{
3123 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3124
3125 return dd->send_egress_err_status_cnt[59];
3126}
3127
3128static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3129 void *context, int vl, int mode,
3130 u64 data)
3131{
3132 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3133
3134 return dd->send_egress_err_status_cnt[58];
3135}
3136
3137static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3138 void *context, int vl, int mode,
3139 u64 data)
3140{
3141 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3142
3143 return dd->send_egress_err_status_cnt[57];
3144}
3145
3146static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3147 void *context, int vl, int mode,
3148 u64 data)
3149{
3150 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3151
3152 return dd->send_egress_err_status_cnt[56];
3153}
3154
3155static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3156 void *context, int vl, int mode,
3157 u64 data)
3158{
3159 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3160
3161 return dd->send_egress_err_status_cnt[55];
3162}
3163
3164static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3165 void *context, int vl, int mode,
3166 u64 data)
3167{
3168 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3169
3170 return dd->send_egress_err_status_cnt[54];
3171}
3172
3173static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3174 void *context, int vl, int mode,
3175 u64 data)
3176{
3177 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3178
3179 return dd->send_egress_err_status_cnt[53];
3180}
3181
3182static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3183 void *context, int vl, int mode,
3184 u64 data)
3185{
3186 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3187
3188 return dd->send_egress_err_status_cnt[52];
3189}
3190
3191static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3192 void *context, int vl, int mode,
3193 u64 data)
3194{
3195 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3196
3197 return dd->send_egress_err_status_cnt[51];
3198}
3199
3200static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3201 void *context, int vl, int mode,
3202 u64 data)
3203{
3204 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3205
3206 return dd->send_egress_err_status_cnt[50];
3207}
3208
3209static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3210 void *context, int vl, int mode,
3211 u64 data)
3212{
3213 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3214
3215 return dd->send_egress_err_status_cnt[49];
3216}
3217
3218static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3219 void *context, int vl, int mode,
3220 u64 data)
3221{
3222 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3223
3224 return dd->send_egress_err_status_cnt[48];
3225}
3226
3227static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3228 void *context, int vl, int mode,
3229 u64 data)
3230{
3231 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3232
3233 return dd->send_egress_err_status_cnt[47];
3234}
3235
3236static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3237 void *context, int vl, int mode,
3238 u64 data)
3239{
3240 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3241
3242 return dd->send_egress_err_status_cnt[46];
3243}
3244
3245static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3246 void *context, int vl, int mode,
3247 u64 data)
3248{
3249 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3250
3251 return dd->send_egress_err_status_cnt[45];
3252}
3253
3254static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3255 void *context, int vl,
3256 int mode, u64 data)
3257{
3258 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3259
3260 return dd->send_egress_err_status_cnt[44];
3261}
3262
3263static u64 access_tx_read_sdma_memory_unc_err_cnt(
3264 const struct cntr_entry *entry,
3265 void *context, int vl, int mode, u64 data)
3266{
3267 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3268
3269 return dd->send_egress_err_status_cnt[43];
3270}
3271
3272static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3273 void *context, int vl, int mode,
3274 u64 data)
3275{
3276 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3277
3278 return dd->send_egress_err_status_cnt[42];
3279}
3280
3281static u64 access_tx_credit_return_partiy_err_cnt(
3282 const struct cntr_entry *entry,
3283 void *context, int vl, int mode, u64 data)
3284{
3285 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3286
3287 return dd->send_egress_err_status_cnt[41];
3288}
3289
3290static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3291 const struct cntr_entry *entry,
3292 void *context, int vl, int mode, u64 data)
3293{
3294 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3295
3296 return dd->send_egress_err_status_cnt[40];
3297}
3298
3299static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3300 const struct cntr_entry *entry,
3301 void *context, int vl, int mode, u64 data)
3302{
3303 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3304
3305 return dd->send_egress_err_status_cnt[39];
3306}
3307
3308static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3309 const struct cntr_entry *entry,
3310 void *context, int vl, int mode, u64 data)
3311{
3312 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3313
3314 return dd->send_egress_err_status_cnt[38];
3315}
3316
3317static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3318 const struct cntr_entry *entry,
3319 void *context, int vl, int mode, u64 data)
3320{
3321 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3322
3323 return dd->send_egress_err_status_cnt[37];
3324}
3325
3326static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3327 const struct cntr_entry *entry,
3328 void *context, int vl, int mode, u64 data)
3329{
3330 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3331
3332 return dd->send_egress_err_status_cnt[36];
3333}
3334
3335static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3336 const struct cntr_entry *entry,
3337 void *context, int vl, int mode, u64 data)
3338{
3339 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3340
3341 return dd->send_egress_err_status_cnt[35];
3342}
3343
3344static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3345 const struct cntr_entry *entry,
3346 void *context, int vl, int mode, u64 data)
3347{
3348 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3349
3350 return dd->send_egress_err_status_cnt[34];
3351}
3352
3353static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3354 const struct cntr_entry *entry,
3355 void *context, int vl, int mode, u64 data)
3356{
3357 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3358
3359 return dd->send_egress_err_status_cnt[33];
3360}
3361
3362static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3363 const struct cntr_entry *entry,
3364 void *context, int vl, int mode, u64 data)
3365{
3366 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3367
3368 return dd->send_egress_err_status_cnt[32];
3369}
3370
3371static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3372 const struct cntr_entry *entry,
3373 void *context, int vl, int mode, u64 data)
3374{
3375 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3376
3377 return dd->send_egress_err_status_cnt[31];
3378}
3379
3380static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3381 const struct cntr_entry *entry,
3382 void *context, int vl, int mode, u64 data)
3383{
3384 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3385
3386 return dd->send_egress_err_status_cnt[30];
3387}
3388
3389static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3390 const struct cntr_entry *entry,
3391 void *context, int vl, int mode, u64 data)
3392{
3393 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3394
3395 return dd->send_egress_err_status_cnt[29];
3396}
3397
3398static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3399 const struct cntr_entry *entry,
3400 void *context, int vl, int mode, u64 data)
3401{
3402 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3403
3404 return dd->send_egress_err_status_cnt[28];
3405}
3406
3407static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3408 const struct cntr_entry *entry,
3409 void *context, int vl, int mode, u64 data)
3410{
3411 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3412
3413 return dd->send_egress_err_status_cnt[27];
3414}
3415
3416static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3417 const struct cntr_entry *entry,
3418 void *context, int vl, int mode, u64 data)
3419{
3420 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3421
3422 return dd->send_egress_err_status_cnt[26];
3423}
3424
3425static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3426 const struct cntr_entry *entry,
3427 void *context, int vl, int mode, u64 data)
3428{
3429 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3430
3431 return dd->send_egress_err_status_cnt[25];
3432}
3433
3434static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3435 const struct cntr_entry *entry,
3436 void *context, int vl, int mode, u64 data)
3437{
3438 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3439
3440 return dd->send_egress_err_status_cnt[24];
3441}
3442
3443static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3444 const struct cntr_entry *entry,
3445 void *context, int vl, int mode, u64 data)
3446{
3447 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3448
3449 return dd->send_egress_err_status_cnt[23];
3450}
3451
3452static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3453 const struct cntr_entry *entry,
3454 void *context, int vl, int mode, u64 data)
3455{
3456 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3457
3458 return dd->send_egress_err_status_cnt[22];
3459}
3460
3461static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3462 const struct cntr_entry *entry,
3463 void *context, int vl, int mode, u64 data)
3464{
3465 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3466
3467 return dd->send_egress_err_status_cnt[21];
3468}
3469
3470static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3471 const struct cntr_entry *entry,
3472 void *context, int vl, int mode, u64 data)
3473{
3474 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3475
3476 return dd->send_egress_err_status_cnt[20];
3477}
3478
3479static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3480 const struct cntr_entry *entry,
3481 void *context, int vl, int mode, u64 data)
3482{
3483 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3484
3485 return dd->send_egress_err_status_cnt[19];
3486}
3487
3488static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3489 const struct cntr_entry *entry,
3490 void *context, int vl, int mode, u64 data)
3491{
3492 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3493
3494 return dd->send_egress_err_status_cnt[18];
3495}
3496
3497static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3498 const struct cntr_entry *entry,
3499 void *context, int vl, int mode, u64 data)
3500{
3501 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3502
3503 return dd->send_egress_err_status_cnt[17];
3504}
3505
3506static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3507 const struct cntr_entry *entry,
3508 void *context, int vl, int mode, u64 data)
3509{
3510 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3511
3512 return dd->send_egress_err_status_cnt[16];
3513}
3514
3515static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3516 void *context, int vl, int mode,
3517 u64 data)
3518{
3519 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3520
3521 return dd->send_egress_err_status_cnt[15];
3522}
3523
3524static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3525 void *context, int vl,
3526 int mode, u64 data)
3527{
3528 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3529
3530 return dd->send_egress_err_status_cnt[14];
3531}
3532
3533static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3534 void *context, int vl, int mode,
3535 u64 data)
3536{
3537 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3538
3539 return dd->send_egress_err_status_cnt[13];
3540}
3541
3542static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3543 void *context, int vl, int mode,
3544 u64 data)
3545{
3546 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3547
3548 return dd->send_egress_err_status_cnt[12];
3549}
3550
3551static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3552 const struct cntr_entry *entry,
3553 void *context, int vl, int mode, u64 data)
3554{
3555 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3556
3557 return dd->send_egress_err_status_cnt[11];
3558}
3559
3560static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3561 void *context, int vl, int mode,
3562 u64 data)
3563{
3564 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3565
3566 return dd->send_egress_err_status_cnt[10];
3567}
3568
3569static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3570 void *context, int vl, int mode,
3571 u64 data)
3572{
3573 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3574
3575 return dd->send_egress_err_status_cnt[9];
3576}
3577
3578static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3579 const struct cntr_entry *entry,
3580 void *context, int vl, int mode, u64 data)
3581{
3582 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3583
3584 return dd->send_egress_err_status_cnt[8];
3585}
3586
3587static u64 access_tx_pio_launch_intf_parity_err_cnt(
3588 const struct cntr_entry *entry,
3589 void *context, int vl, int mode, u64 data)
3590{
3591 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3592
3593 return dd->send_egress_err_status_cnt[7];
3594}
3595
3596static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3597 void *context, int vl, int mode,
3598 u64 data)
3599{
3600 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3601
3602 return dd->send_egress_err_status_cnt[6];
3603}
3604
3605static u64 access_tx_incorrect_link_state_err_cnt(
3606 const struct cntr_entry *entry,
3607 void *context, int vl, int mode, u64 data)
3608{
3609 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3610
3611 return dd->send_egress_err_status_cnt[5];
3612}
3613
3614static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3615 void *context, int vl, int mode,
3616 u64 data)
3617{
3618 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3619
3620 return dd->send_egress_err_status_cnt[4];
3621}
3622
3623static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3624 const struct cntr_entry *entry,
3625 void *context, int vl, int mode, u64 data)
3626{
3627 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3628
3629 return dd->send_egress_err_status_cnt[3];
3630}
3631
3632static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3633 void *context, int vl, int mode,
3634 u64 data)
3635{
3636 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3637
3638 return dd->send_egress_err_status_cnt[2];
3639}
3640
3641static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3642 const struct cntr_entry *entry,
3643 void *context, int vl, int mode, u64 data)
3644{
3645 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3646
3647 return dd->send_egress_err_status_cnt[1];
3648}
3649
3650static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3651 const struct cntr_entry *entry,
3652 void *context, int vl, int mode, u64 data)
3653{
3654 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3655
3656 return dd->send_egress_err_status_cnt[0];
3657}
3658
3659/*
3660 * Software counters corresponding to each of the
3661 * error status bits within SendErrStatus
3662 */
3663static u64 access_send_csr_write_bad_addr_err_cnt(
3664 const struct cntr_entry *entry,
3665 void *context, int vl, int mode, u64 data)
3666{
3667 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3668
3669 return dd->send_err_status_cnt[2];
3670}
3671
3672static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3673 void *context, int vl,
3674 int mode, u64 data)
3675{
3676 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3677
3678 return dd->send_err_status_cnt[1];
3679}
3680
3681static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3682 void *context, int vl, int mode,
3683 u64 data)
3684{
3685 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3686
3687 return dd->send_err_status_cnt[0];
3688}
3689
3690/*
3691 * Software counters corresponding to each of the
3692 * error status bits within SendCtxtErrStatus
3693 */
3694static u64 access_pio_write_out_of_bounds_err_cnt(
3695 const struct cntr_entry *entry,
3696 void *context, int vl, int mode, u64 data)
3697{
3698 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3699
3700 return dd->sw_ctxt_err_status_cnt[4];
3701}
3702
3703static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3704 void *context, int vl, int mode,
3705 u64 data)
3706{
3707 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3708
3709 return dd->sw_ctxt_err_status_cnt[3];
3710}
3711
3712static u64 access_pio_write_crosses_boundary_err_cnt(
3713 const struct cntr_entry *entry,
3714 void *context, int vl, int mode, u64 data)
3715{
3716 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3717
3718 return dd->sw_ctxt_err_status_cnt[2];
3719}
3720
3721static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3722 void *context, int vl,
3723 int mode, u64 data)
3724{
3725 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3726
3727 return dd->sw_ctxt_err_status_cnt[1];
3728}
3729
3730static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3731 void *context, int vl, int mode,
3732 u64 data)
3733{
3734 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3735
3736 return dd->sw_ctxt_err_status_cnt[0];
3737}
3738
3739/*
3740 * Software counters corresponding to each of the
3741 * error status bits within SendDmaEngErrStatus
3742 */
3743static u64 access_sdma_header_request_fifo_cor_err_cnt(
3744 const struct cntr_entry *entry,
3745 void *context, int vl, int mode, u64 data)
3746{
3747 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3748
3749 return dd->sw_send_dma_eng_err_status_cnt[23];
3750}
3751
3752static u64 access_sdma_header_storage_cor_err_cnt(
3753 const struct cntr_entry *entry,
3754 void *context, int vl, int mode, u64 data)
3755{
3756 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3757
3758 return dd->sw_send_dma_eng_err_status_cnt[22];
3759}
3760
3761static u64 access_sdma_packet_tracking_cor_err_cnt(
3762 const struct cntr_entry *entry,
3763 void *context, int vl, int mode, u64 data)
3764{
3765 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3766
3767 return dd->sw_send_dma_eng_err_status_cnt[21];
3768}
3769
3770static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3771 void *context, int vl, int mode,
3772 u64 data)
3773{
3774 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3775
3776 return dd->sw_send_dma_eng_err_status_cnt[20];
3777}
3778
3779static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3780 void *context, int vl, int mode,
3781 u64 data)
3782{
3783 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3784
3785 return dd->sw_send_dma_eng_err_status_cnt[19];
3786}
3787
3788static u64 access_sdma_header_request_fifo_unc_err_cnt(
3789 const struct cntr_entry *entry,
3790 void *context, int vl, int mode, u64 data)
3791{
3792 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3793
3794 return dd->sw_send_dma_eng_err_status_cnt[18];
3795}
3796
3797static u64 access_sdma_header_storage_unc_err_cnt(
3798 const struct cntr_entry *entry,
3799 void *context, int vl, int mode, u64 data)
3800{
3801 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3802
3803 return dd->sw_send_dma_eng_err_status_cnt[17];
3804}
3805
3806static u64 access_sdma_packet_tracking_unc_err_cnt(
3807 const struct cntr_entry *entry,
3808 void *context, int vl, int mode, u64 data)
3809{
3810 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3811
3812 return dd->sw_send_dma_eng_err_status_cnt[16];
3813}
3814
3815static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3816 void *context, int vl, int mode,
3817 u64 data)
3818{
3819 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3820
3821 return dd->sw_send_dma_eng_err_status_cnt[15];
3822}
3823
3824static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3825 void *context, int vl, int mode,
3826 u64 data)
3827{
3828 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3829
3830 return dd->sw_send_dma_eng_err_status_cnt[14];
3831}
3832
3833static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3834 void *context, int vl, int mode,
3835 u64 data)
3836{
3837 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3838
3839 return dd->sw_send_dma_eng_err_status_cnt[13];
3840}
3841
3842static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3843 void *context, int vl, int mode,
3844 u64 data)
3845{
3846 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3847
3848 return dd->sw_send_dma_eng_err_status_cnt[12];
3849}
3850
3851static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3852 void *context, int vl, int mode,
3853 u64 data)
3854{
3855 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3856
3857 return dd->sw_send_dma_eng_err_status_cnt[11];
3858}
3859
3860static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3861 void *context, int vl, int mode,
3862 u64 data)
3863{
3864 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3865
3866 return dd->sw_send_dma_eng_err_status_cnt[10];
3867}
3868
3869static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3870 void *context, int vl, int mode,
3871 u64 data)
3872{
3873 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3874
3875 return dd->sw_send_dma_eng_err_status_cnt[9];
3876}
3877
3878static u64 access_sdma_packet_desc_overflow_err_cnt(
3879 const struct cntr_entry *entry,
3880 void *context, int vl, int mode, u64 data)
3881{
3882 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3883
3884 return dd->sw_send_dma_eng_err_status_cnt[8];
3885}
3886
3887static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3888 void *context, int vl,
3889 int mode, u64 data)
3890{
3891 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3892
3893 return dd->sw_send_dma_eng_err_status_cnt[7];
3894}
3895
3896static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3897 void *context, int vl, int mode, u64 data)
3898{
3899 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3900
3901 return dd->sw_send_dma_eng_err_status_cnt[6];
3902}
3903
3904static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3905 void *context, int vl, int mode,
3906 u64 data)
3907{
3908 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3909
3910 return dd->sw_send_dma_eng_err_status_cnt[5];
3911}
3912
3913static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3914 void *context, int vl, int mode,
3915 u64 data)
3916{
3917 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3918
3919 return dd->sw_send_dma_eng_err_status_cnt[4];
3920}
3921
3922static u64 access_sdma_tail_out_of_bounds_err_cnt(
3923 const struct cntr_entry *entry,
3924 void *context, int vl, int mode, u64 data)
3925{
3926 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3927
3928 return dd->sw_send_dma_eng_err_status_cnt[3];
3929}
3930
3931static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3932 void *context, int vl, int mode,
3933 u64 data)
3934{
3935 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3936
3937 return dd->sw_send_dma_eng_err_status_cnt[2];
3938}
3939
3940static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3941 void *context, int vl, int mode,
3942 u64 data)
3943{
3944 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3945
3946 return dd->sw_send_dma_eng_err_status_cnt[1];
3947}
3948
3949static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3950 void *context, int vl, int mode,
3951 u64 data)
3952{
3953 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3954
3955 return dd->sw_send_dma_eng_err_status_cnt[0];
3956}
3957
2b719046
JP
3958static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
3959 void *context, int vl, int mode,
3960 u64 data)
3961{
3962 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3963
3964 u64 val = 0;
3965 u64 csr = entry->csr;
3966
3967 val = read_write_csr(dd, csr, mode, data);
3968 if (mode == CNTR_MODE_R) {
3969 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
3970 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
3971 } else if (mode == CNTR_MODE_W) {
3972 dd->sw_rcv_bypass_packet_errors = 0;
3973 } else {
3974 dd_dev_err(dd, "Invalid cntr register access mode");
3975 return 0;
3976 }
3977 return val;
3978}
3979
77241056
MM
3980#define def_access_sw_cpu(cntr) \
3981static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3982 void *context, int vl, int mode, u64 data) \
3983{ \
3984 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4eb06882
DD
3985 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
3986 ppd->ibport_data.rvp.cntr, vl, \
77241056
MM
3987 mode, data); \
3988}
3989
3990def_access_sw_cpu(rc_acks);
3991def_access_sw_cpu(rc_qacks);
3992def_access_sw_cpu(rc_delayed_comp);
3993
3994#define def_access_ibp_counter(cntr) \
3995static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3996 void *context, int vl, int mode, u64 data) \
3997{ \
3998 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3999 \
4000 if (vl != CNTR_INVALID_VL) \
4001 return 0; \
4002 \
4eb06882 4003 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
77241056
MM
4004 mode, data); \
4005}
4006
4007def_access_ibp_counter(loop_pkts);
4008def_access_ibp_counter(rc_resends);
4009def_access_ibp_counter(rnr_naks);
4010def_access_ibp_counter(other_naks);
4011def_access_ibp_counter(rc_timeouts);
4012def_access_ibp_counter(pkt_drops);
4013def_access_ibp_counter(dmawait);
4014def_access_ibp_counter(rc_seqnak);
4015def_access_ibp_counter(rc_dupreq);
4016def_access_ibp_counter(rdma_seq);
4017def_access_ibp_counter(unaligned);
4018def_access_ibp_counter(seq_naks);
4019
4020static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4021[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4022[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4023 CNTR_NORMAL),
4024[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4025 CNTR_NORMAL),
4026[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4027 RCV_TID_FLOW_GEN_MISMATCH_CNT,
4028 CNTR_NORMAL),
77241056
MM
4029[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4030 CNTR_NORMAL),
4031[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4032 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4033[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4034 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4035[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4036 CNTR_NORMAL),
4037[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4038 CNTR_NORMAL),
4039[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4040 CNTR_NORMAL),
4041[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4042 CNTR_NORMAL),
4043[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4044 CNTR_NORMAL),
4045[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4046 CNTR_NORMAL),
4047[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4048 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4049[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4050 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4051[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4052 CNTR_SYNTH),
2b719046
JP
4053[C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4054 access_dc_rcv_err_cnt),
77241056
MM
4055[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4056 CNTR_SYNTH),
4057[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4058 CNTR_SYNTH),
4059[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4060 CNTR_SYNTH),
4061[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4062 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4063[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4064 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4065 CNTR_SYNTH),
4066[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4067 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4068[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4069 CNTR_SYNTH),
4070[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4071 CNTR_SYNTH),
4072[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4073 CNTR_SYNTH),
4074[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4075 CNTR_SYNTH),
4076[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4077 CNTR_SYNTH),
4078[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4079 CNTR_SYNTH),
4080[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4081 CNTR_SYNTH),
4082[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4083 CNTR_SYNTH | CNTR_VL),
4084[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4085 CNTR_SYNTH | CNTR_VL),
4086[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4087[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4088 CNTR_SYNTH | CNTR_VL),
4089[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4090[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4091 CNTR_SYNTH | CNTR_VL),
4092[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4093 CNTR_SYNTH),
4094[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4095 CNTR_SYNTH | CNTR_VL),
4096[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4097 CNTR_SYNTH),
4098[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4099 CNTR_SYNTH | CNTR_VL),
4100[C_DC_TOTAL_CRC] =
4101 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4102 CNTR_SYNTH),
4103[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4104 CNTR_SYNTH),
4105[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4106 CNTR_SYNTH),
4107[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4108 CNTR_SYNTH),
4109[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4110 CNTR_SYNTH),
4111[C_DC_CRC_MULT_LN] =
4112 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4113 CNTR_SYNTH),
4114[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4115 CNTR_SYNTH),
4116[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4117 CNTR_SYNTH),
4118[C_DC_SEQ_CRC_CNT] =
4119 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4120 CNTR_SYNTH),
4121[C_DC_ESC0_ONLY_CNT] =
4122 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4123 CNTR_SYNTH),
4124[C_DC_ESC0_PLUS1_CNT] =
4125 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4126 CNTR_SYNTH),
4127[C_DC_ESC0_PLUS2_CNT] =
4128 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4129 CNTR_SYNTH),
4130[C_DC_REINIT_FROM_PEER_CNT] =
4131 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4132 CNTR_SYNTH),
4133[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4134 CNTR_SYNTH),
4135[C_DC_MISC_FLG_CNT] =
4136 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4137 CNTR_SYNTH),
4138[C_DC_PRF_GOOD_LTP_CNT] =
4139 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4140[C_DC_PRF_ACCEPTED_LTP_CNT] =
4141 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4142 CNTR_SYNTH),
4143[C_DC_PRF_RX_FLIT_CNT] =
4144 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4145[C_DC_PRF_TX_FLIT_CNT] =
4146 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4147[C_DC_PRF_CLK_CNTR] =
4148 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4149[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4150 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4151[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4152 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4153 CNTR_SYNTH),
4154[C_DC_PG_STS_TX_SBE_CNT] =
4155 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4156[C_DC_PG_STS_TX_MBE_CNT] =
4157 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4158 CNTR_SYNTH),
4159[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4160 access_sw_cpu_intr),
4161[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4162 access_sw_cpu_rcv_limit),
4163[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4164 access_sw_vtx_wait),
4165[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4166 access_sw_pio_wait),
14553ca1
MM
4167[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4168 access_sw_pio_drain),
77241056
MM
4169[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4170 access_sw_kmem_wait),
b421922e
DL
4171[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4172 access_sw_send_schedule),
a699c6c2
VM
4173[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4174 SEND_DMA_DESC_FETCHED_CNT, 0,
4175 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4176 dev_access_u32_csr),
4177[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4178 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4179 access_sde_int_cnt),
4180[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4181 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4182 access_sde_err_cnt),
4183[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4184 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4185 access_sde_idle_int_cnt),
4186[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4187 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4188 access_sde_progress_int_cnt),
2c5b521a
JR
4189/* MISC_ERR_STATUS */
4190[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4191 CNTR_NORMAL,
4192 access_misc_pll_lock_fail_err_cnt),
4193[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4194 CNTR_NORMAL,
4195 access_misc_mbist_fail_err_cnt),
4196[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4197 CNTR_NORMAL,
4198 access_misc_invalid_eep_cmd_err_cnt),
4199[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4200 CNTR_NORMAL,
4201 access_misc_efuse_done_parity_err_cnt),
4202[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4203 CNTR_NORMAL,
4204 access_misc_efuse_write_err_cnt),
4205[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4206 0, CNTR_NORMAL,
4207 access_misc_efuse_read_bad_addr_err_cnt),
4208[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4209 CNTR_NORMAL,
4210 access_misc_efuse_csr_parity_err_cnt),
4211[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4212 CNTR_NORMAL,
4213 access_misc_fw_auth_failed_err_cnt),
4214[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4215 CNTR_NORMAL,
4216 access_misc_key_mismatch_err_cnt),
4217[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4218 CNTR_NORMAL,
4219 access_misc_sbus_write_failed_err_cnt),
4220[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4221 CNTR_NORMAL,
4222 access_misc_csr_write_bad_addr_err_cnt),
4223[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4224 CNTR_NORMAL,
4225 access_misc_csr_read_bad_addr_err_cnt),
4226[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4227 CNTR_NORMAL,
4228 access_misc_csr_parity_err_cnt),
4229/* CceErrStatus */
4230[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4231 CNTR_NORMAL,
4232 access_sw_cce_err_status_aggregated_cnt),
4233[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4234 CNTR_NORMAL,
4235 access_cce_msix_csr_parity_err_cnt),
4236[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4237 CNTR_NORMAL,
4238 access_cce_int_map_unc_err_cnt),
4239[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4240 CNTR_NORMAL,
4241 access_cce_int_map_cor_err_cnt),
4242[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4243 CNTR_NORMAL,
4244 access_cce_msix_table_unc_err_cnt),
4245[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4246 CNTR_NORMAL,
4247 access_cce_msix_table_cor_err_cnt),
4248[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4249 0, CNTR_NORMAL,
4250 access_cce_rxdma_conv_fifo_parity_err_cnt),
4251[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4252 0, CNTR_NORMAL,
4253 access_cce_rcpl_async_fifo_parity_err_cnt),
4254[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4255 CNTR_NORMAL,
4256 access_cce_seg_write_bad_addr_err_cnt),
4257[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4258 CNTR_NORMAL,
4259 access_cce_seg_read_bad_addr_err_cnt),
4260[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4261 CNTR_NORMAL,
4262 access_la_triggered_cnt),
4263[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4264 CNTR_NORMAL,
4265 access_cce_trgt_cpl_timeout_err_cnt),
4266[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4267 CNTR_NORMAL,
4268 access_pcic_receive_parity_err_cnt),
4269[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4270 CNTR_NORMAL,
4271 access_pcic_transmit_back_parity_err_cnt),
4272[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4273 0, CNTR_NORMAL,
4274 access_pcic_transmit_front_parity_err_cnt),
4275[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4276 CNTR_NORMAL,
4277 access_pcic_cpl_dat_q_unc_err_cnt),
4278[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4279 CNTR_NORMAL,
4280 access_pcic_cpl_hd_q_unc_err_cnt),
4281[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4282 CNTR_NORMAL,
4283 access_pcic_post_dat_q_unc_err_cnt),
4284[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4285 CNTR_NORMAL,
4286 access_pcic_post_hd_q_unc_err_cnt),
4287[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4288 CNTR_NORMAL,
4289 access_pcic_retry_sot_mem_unc_err_cnt),
4290[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4291 CNTR_NORMAL,
4292 access_pcic_retry_mem_unc_err),
4293[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4294 CNTR_NORMAL,
4295 access_pcic_n_post_dat_q_parity_err_cnt),
4296[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4297 CNTR_NORMAL,
4298 access_pcic_n_post_h_q_parity_err_cnt),
4299[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4300 CNTR_NORMAL,
4301 access_pcic_cpl_dat_q_cor_err_cnt),
4302[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4303 CNTR_NORMAL,
4304 access_pcic_cpl_hd_q_cor_err_cnt),
4305[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4306 CNTR_NORMAL,
4307 access_pcic_post_dat_q_cor_err_cnt),
4308[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4309 CNTR_NORMAL,
4310 access_pcic_post_hd_q_cor_err_cnt),
4311[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4312 CNTR_NORMAL,
4313 access_pcic_retry_sot_mem_cor_err_cnt),
4314[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4315 CNTR_NORMAL,
4316 access_pcic_retry_mem_cor_err_cnt),
4317[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4318 "CceCli1AsyncFifoDbgParityError", 0, 0,
4319 CNTR_NORMAL,
4320 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4321[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4322 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4323 CNTR_NORMAL,
4324 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4325 ),
4326[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4327 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4328 CNTR_NORMAL,
4329 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4330[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4331 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4332 CNTR_NORMAL,
4333 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4334[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4335 0, CNTR_NORMAL,
4336 access_cce_cli2_async_fifo_parity_err_cnt),
4337[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4338 CNTR_NORMAL,
4339 access_cce_csr_cfg_bus_parity_err_cnt),
4340[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4341 0, CNTR_NORMAL,
4342 access_cce_cli0_async_fifo_parity_err_cnt),
4343[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4344 CNTR_NORMAL,
4345 access_cce_rspd_data_parity_err_cnt),
4346[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4347 CNTR_NORMAL,
4348 access_cce_trgt_access_err_cnt),
4349[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4350 0, CNTR_NORMAL,
4351 access_cce_trgt_async_fifo_parity_err_cnt),
4352[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4353 CNTR_NORMAL,
4354 access_cce_csr_write_bad_addr_err_cnt),
4355[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4356 CNTR_NORMAL,
4357 access_cce_csr_read_bad_addr_err_cnt),
4358[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4359 CNTR_NORMAL,
4360 access_ccs_csr_parity_err_cnt),
4361
4362/* RcvErrStatus */
4363[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4364 CNTR_NORMAL,
4365 access_rx_csr_parity_err_cnt),
4366[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4367 CNTR_NORMAL,
4368 access_rx_csr_write_bad_addr_err_cnt),
4369[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4370 CNTR_NORMAL,
4371 access_rx_csr_read_bad_addr_err_cnt),
4372[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4373 CNTR_NORMAL,
4374 access_rx_dma_csr_unc_err_cnt),
4375[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4376 CNTR_NORMAL,
4377 access_rx_dma_dq_fsm_encoding_err_cnt),
4378[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4379 CNTR_NORMAL,
4380 access_rx_dma_eq_fsm_encoding_err_cnt),
4381[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4382 CNTR_NORMAL,
4383 access_rx_dma_csr_parity_err_cnt),
4384[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4385 CNTR_NORMAL,
4386 access_rx_rbuf_data_cor_err_cnt),
4387[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4388 CNTR_NORMAL,
4389 access_rx_rbuf_data_unc_err_cnt),
4390[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4391 CNTR_NORMAL,
4392 access_rx_dma_data_fifo_rd_cor_err_cnt),
4393[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4394 CNTR_NORMAL,
4395 access_rx_dma_data_fifo_rd_unc_err_cnt),
4396[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4397 CNTR_NORMAL,
4398 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4399[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4400 CNTR_NORMAL,
4401 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4402[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4403 CNTR_NORMAL,
4404 access_rx_rbuf_desc_part2_cor_err_cnt),
4405[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4406 CNTR_NORMAL,
4407 access_rx_rbuf_desc_part2_unc_err_cnt),
4408[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4409 CNTR_NORMAL,
4410 access_rx_rbuf_desc_part1_cor_err_cnt),
4411[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4412 CNTR_NORMAL,
4413 access_rx_rbuf_desc_part1_unc_err_cnt),
4414[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4415 CNTR_NORMAL,
4416 access_rx_hq_intr_fsm_err_cnt),
4417[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4418 CNTR_NORMAL,
4419 access_rx_hq_intr_csr_parity_err_cnt),
4420[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4421 CNTR_NORMAL,
4422 access_rx_lookup_csr_parity_err_cnt),
4423[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4424 CNTR_NORMAL,
4425 access_rx_lookup_rcv_array_cor_err_cnt),
4426[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4427 CNTR_NORMAL,
4428 access_rx_lookup_rcv_array_unc_err_cnt),
4429[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4430 0, CNTR_NORMAL,
4431 access_rx_lookup_des_part2_parity_err_cnt),
4432[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4433 0, CNTR_NORMAL,
4434 access_rx_lookup_des_part1_unc_cor_err_cnt),
4435[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4436 CNTR_NORMAL,
4437 access_rx_lookup_des_part1_unc_err_cnt),
4438[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4439 CNTR_NORMAL,
4440 access_rx_rbuf_next_free_buf_cor_err_cnt),
4441[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4442 CNTR_NORMAL,
4443 access_rx_rbuf_next_free_buf_unc_err_cnt),
4444[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4445 "RxRbufFlInitWrAddrParityErr", 0, 0,
4446 CNTR_NORMAL,
4447 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4448[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4449 0, CNTR_NORMAL,
4450 access_rx_rbuf_fl_initdone_parity_err_cnt),
4451[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4452 0, CNTR_NORMAL,
4453 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4454[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4455 CNTR_NORMAL,
4456 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4457[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4458 CNTR_NORMAL,
4459 access_rx_rbuf_empty_err_cnt),
4460[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4461 CNTR_NORMAL,
4462 access_rx_rbuf_full_err_cnt),
4463[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4464 CNTR_NORMAL,
4465 access_rbuf_bad_lookup_err_cnt),
4466[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4467 CNTR_NORMAL,
4468 access_rbuf_ctx_id_parity_err_cnt),
4469[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4470 CNTR_NORMAL,
4471 access_rbuf_csr_qeopdw_parity_err_cnt),
4472[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4473 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4474 CNTR_NORMAL,
4475 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4476[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4477 "RxRbufCsrQTlPtrParityErr", 0, 0,
4478 CNTR_NORMAL,
4479 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4480[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4481 0, CNTR_NORMAL,
4482 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4483[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4484 0, CNTR_NORMAL,
4485 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4486[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4487 0, 0, CNTR_NORMAL,
4488 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4489[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4490 0, CNTR_NORMAL,
4491 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4492[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4493 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4494 CNTR_NORMAL,
4495 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4496[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4497 0, CNTR_NORMAL,
4498 access_rx_rbuf_block_list_read_cor_err_cnt),
4499[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4500 0, CNTR_NORMAL,
4501 access_rx_rbuf_block_list_read_unc_err_cnt),
4502[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4503 CNTR_NORMAL,
4504 access_rx_rbuf_lookup_des_cor_err_cnt),
4505[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4506 CNTR_NORMAL,
4507 access_rx_rbuf_lookup_des_unc_err_cnt),
4508[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4509 "RxRbufLookupDesRegUncCorErr", 0, 0,
4510 CNTR_NORMAL,
4511 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4512[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4513 CNTR_NORMAL,
4514 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4515[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4516 CNTR_NORMAL,
4517 access_rx_rbuf_free_list_cor_err_cnt),
4518[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4519 CNTR_NORMAL,
4520 access_rx_rbuf_free_list_unc_err_cnt),
4521[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4522 CNTR_NORMAL,
4523 access_rx_rcv_fsm_encoding_err_cnt),
4524[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4525 CNTR_NORMAL,
4526 access_rx_dma_flag_cor_err_cnt),
4527[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4528 CNTR_NORMAL,
4529 access_rx_dma_flag_unc_err_cnt),
4530[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4531 CNTR_NORMAL,
4532 access_rx_dc_sop_eop_parity_err_cnt),
4533[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4534 CNTR_NORMAL,
4535 access_rx_rcv_csr_parity_err_cnt),
4536[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4537 CNTR_NORMAL,
4538 access_rx_rcv_qp_map_table_cor_err_cnt),
4539[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4540 CNTR_NORMAL,
4541 access_rx_rcv_qp_map_table_unc_err_cnt),
4542[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4543 CNTR_NORMAL,
4544 access_rx_rcv_data_cor_err_cnt),
4545[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4546 CNTR_NORMAL,
4547 access_rx_rcv_data_unc_err_cnt),
4548[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4549 CNTR_NORMAL,
4550 access_rx_rcv_hdr_cor_err_cnt),
4551[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4552 CNTR_NORMAL,
4553 access_rx_rcv_hdr_unc_err_cnt),
4554[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4555 CNTR_NORMAL,
4556 access_rx_dc_intf_parity_err_cnt),
4557[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4558 CNTR_NORMAL,
4559 access_rx_dma_csr_cor_err_cnt),
4560/* SendPioErrStatus */
4561[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4562 CNTR_NORMAL,
4563 access_pio_pec_sop_head_parity_err_cnt),
4564[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4565 CNTR_NORMAL,
4566 access_pio_pcc_sop_head_parity_err_cnt),
4567[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4568 0, 0, CNTR_NORMAL,
4569 access_pio_last_returned_cnt_parity_err_cnt),
4570[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4571 0, CNTR_NORMAL,
4572 access_pio_current_free_cnt_parity_err_cnt),
4573[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4574 CNTR_NORMAL,
4575 access_pio_reserved_31_err_cnt),
4576[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4577 CNTR_NORMAL,
4578 access_pio_reserved_30_err_cnt),
4579[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4580 CNTR_NORMAL,
4581 access_pio_ppmc_sop_len_err_cnt),
4582[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4583 CNTR_NORMAL,
4584 access_pio_ppmc_bqc_mem_parity_err_cnt),
4585[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4586 CNTR_NORMAL,
4587 access_pio_vl_fifo_parity_err_cnt),
4588[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4589 CNTR_NORMAL,
4590 access_pio_vlf_sop_parity_err_cnt),
4591[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4592 CNTR_NORMAL,
4593 access_pio_vlf_v1_len_parity_err_cnt),
4594[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4595 CNTR_NORMAL,
4596 access_pio_block_qw_count_parity_err_cnt),
4597[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4598 CNTR_NORMAL,
4599 access_pio_write_qw_valid_parity_err_cnt),
4600[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4601 CNTR_NORMAL,
4602 access_pio_state_machine_err_cnt),
4603[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4604 CNTR_NORMAL,
4605 access_pio_write_data_parity_err_cnt),
4606[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4607 CNTR_NORMAL,
4608 access_pio_host_addr_mem_cor_err_cnt),
4609[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4610 CNTR_NORMAL,
4611 access_pio_host_addr_mem_unc_err_cnt),
4612[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4613 CNTR_NORMAL,
4614 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4615[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4616 CNTR_NORMAL,
4617 access_pio_init_sm_in_err_cnt),
4618[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4619 CNTR_NORMAL,
4620 access_pio_ppmc_pbl_fifo_err_cnt),
4621[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4622 0, CNTR_NORMAL,
4623 access_pio_credit_ret_fifo_parity_err_cnt),
4624[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4625 CNTR_NORMAL,
4626 access_pio_v1_len_mem_bank1_cor_err_cnt),
4627[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4628 CNTR_NORMAL,
4629 access_pio_v1_len_mem_bank0_cor_err_cnt),
4630[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4631 CNTR_NORMAL,
4632 access_pio_v1_len_mem_bank1_unc_err_cnt),
4633[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4634 CNTR_NORMAL,
4635 access_pio_v1_len_mem_bank0_unc_err_cnt),
4636[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4637 CNTR_NORMAL,
4638 access_pio_sm_pkt_reset_parity_err_cnt),
4639[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4640 CNTR_NORMAL,
4641 access_pio_pkt_evict_fifo_parity_err_cnt),
4642[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4643 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4644 CNTR_NORMAL,
4645 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4646[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4647 CNTR_NORMAL,
4648 access_pio_sbrdctl_crrel_parity_err_cnt),
4649[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4650 CNTR_NORMAL,
4651 access_pio_pec_fifo_parity_err_cnt),
4652[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4653 CNTR_NORMAL,
4654 access_pio_pcc_fifo_parity_err_cnt),
4655[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4656 CNTR_NORMAL,
4657 access_pio_sb_mem_fifo1_err_cnt),
4658[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4659 CNTR_NORMAL,
4660 access_pio_sb_mem_fifo0_err_cnt),
4661[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4662 CNTR_NORMAL,
4663 access_pio_csr_parity_err_cnt),
4664[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4665 CNTR_NORMAL,
4666 access_pio_write_addr_parity_err_cnt),
4667[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4668 CNTR_NORMAL,
4669 access_pio_write_bad_ctxt_err_cnt),
4670/* SendDmaErrStatus */
4671[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4672 0, CNTR_NORMAL,
4673 access_sdma_pcie_req_tracking_cor_err_cnt),
4674[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4675 0, CNTR_NORMAL,
4676 access_sdma_pcie_req_tracking_unc_err_cnt),
4677[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4678 CNTR_NORMAL,
4679 access_sdma_csr_parity_err_cnt),
4680[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4681 CNTR_NORMAL,
4682 access_sdma_rpy_tag_err_cnt),
4683/* SendEgressErrStatus */
4684[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4685 CNTR_NORMAL,
4686 access_tx_read_pio_memory_csr_unc_err_cnt),
4687[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4688 0, CNTR_NORMAL,
4689 access_tx_read_sdma_memory_csr_err_cnt),
4690[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4691 CNTR_NORMAL,
4692 access_tx_egress_fifo_cor_err_cnt),
4693[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4694 CNTR_NORMAL,
4695 access_tx_read_pio_memory_cor_err_cnt),
4696[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4697 CNTR_NORMAL,
4698 access_tx_read_sdma_memory_cor_err_cnt),
4699[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4700 CNTR_NORMAL,
4701 access_tx_sb_hdr_cor_err_cnt),
4702[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4703 CNTR_NORMAL,
4704 access_tx_credit_overrun_err_cnt),
4705[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4706 CNTR_NORMAL,
4707 access_tx_launch_fifo8_cor_err_cnt),
4708[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4709 CNTR_NORMAL,
4710 access_tx_launch_fifo7_cor_err_cnt),
4711[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4712 CNTR_NORMAL,
4713 access_tx_launch_fifo6_cor_err_cnt),
4714[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4715 CNTR_NORMAL,
4716 access_tx_launch_fifo5_cor_err_cnt),
4717[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4718 CNTR_NORMAL,
4719 access_tx_launch_fifo4_cor_err_cnt),
4720[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4721 CNTR_NORMAL,
4722 access_tx_launch_fifo3_cor_err_cnt),
4723[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4724 CNTR_NORMAL,
4725 access_tx_launch_fifo2_cor_err_cnt),
4726[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4727 CNTR_NORMAL,
4728 access_tx_launch_fifo1_cor_err_cnt),
4729[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4730 CNTR_NORMAL,
4731 access_tx_launch_fifo0_cor_err_cnt),
4732[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4733 CNTR_NORMAL,
4734 access_tx_credit_return_vl_err_cnt),
4735[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4736 CNTR_NORMAL,
4737 access_tx_hcrc_insertion_err_cnt),
4738[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4739 CNTR_NORMAL,
4740 access_tx_egress_fifo_unc_err_cnt),
4741[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4742 CNTR_NORMAL,
4743 access_tx_read_pio_memory_unc_err_cnt),
4744[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4745 CNTR_NORMAL,
4746 access_tx_read_sdma_memory_unc_err_cnt),
4747[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4748 CNTR_NORMAL,
4749 access_tx_sb_hdr_unc_err_cnt),
4750[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4751 CNTR_NORMAL,
4752 access_tx_credit_return_partiy_err_cnt),
4753[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4754 0, 0, CNTR_NORMAL,
4755 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4756[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4757 0, 0, CNTR_NORMAL,
4758 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4759[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4760 0, 0, CNTR_NORMAL,
4761 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4762[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4763 0, 0, CNTR_NORMAL,
4764 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4765[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4766 0, 0, CNTR_NORMAL,
4767 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4768[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4769 0, 0, CNTR_NORMAL,
4770 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4771[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4772 0, 0, CNTR_NORMAL,
4773 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4774[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4775 0, 0, CNTR_NORMAL,
4776 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4777[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4778 0, 0, CNTR_NORMAL,
4779 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4780[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4781 0, 0, CNTR_NORMAL,
4782 access_tx_sdma15_disallowed_packet_err_cnt),
4783[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4784 0, 0, CNTR_NORMAL,
4785 access_tx_sdma14_disallowed_packet_err_cnt),
4786[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4787 0, 0, CNTR_NORMAL,
4788 access_tx_sdma13_disallowed_packet_err_cnt),
4789[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4790 0, 0, CNTR_NORMAL,
4791 access_tx_sdma12_disallowed_packet_err_cnt),
4792[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4793 0, 0, CNTR_NORMAL,
4794 access_tx_sdma11_disallowed_packet_err_cnt),
4795[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4796 0, 0, CNTR_NORMAL,
4797 access_tx_sdma10_disallowed_packet_err_cnt),
4798[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4799 0, 0, CNTR_NORMAL,
4800 access_tx_sdma9_disallowed_packet_err_cnt),
4801[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4802 0, 0, CNTR_NORMAL,
4803 access_tx_sdma8_disallowed_packet_err_cnt),
4804[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4805 0, 0, CNTR_NORMAL,
4806 access_tx_sdma7_disallowed_packet_err_cnt),
4807[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4808 0, 0, CNTR_NORMAL,
4809 access_tx_sdma6_disallowed_packet_err_cnt),
4810[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4811 0, 0, CNTR_NORMAL,
4812 access_tx_sdma5_disallowed_packet_err_cnt),
4813[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4814 0, 0, CNTR_NORMAL,
4815 access_tx_sdma4_disallowed_packet_err_cnt),
4816[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4817 0, 0, CNTR_NORMAL,
4818 access_tx_sdma3_disallowed_packet_err_cnt),
4819[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4820 0, 0, CNTR_NORMAL,
4821 access_tx_sdma2_disallowed_packet_err_cnt),
4822[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4823 0, 0, CNTR_NORMAL,
4824 access_tx_sdma1_disallowed_packet_err_cnt),
4825[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4826 0, 0, CNTR_NORMAL,
4827 access_tx_sdma0_disallowed_packet_err_cnt),
4828[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4829 CNTR_NORMAL,
4830 access_tx_config_parity_err_cnt),
4831[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4832 CNTR_NORMAL,
4833 access_tx_sbrd_ctl_csr_parity_err_cnt),
4834[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4835 CNTR_NORMAL,
4836 access_tx_launch_csr_parity_err_cnt),
4837[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4838 CNTR_NORMAL,
4839 access_tx_illegal_vl_err_cnt),
4840[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4841 "TxSbrdCtlStateMachineParityErr", 0, 0,
4842 CNTR_NORMAL,
4843 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4844[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4845 CNTR_NORMAL,
4846 access_egress_reserved_10_err_cnt),
4847[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4848 CNTR_NORMAL,
4849 access_egress_reserved_9_err_cnt),
4850[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4851 0, 0, CNTR_NORMAL,
4852 access_tx_sdma_launch_intf_parity_err_cnt),
4853[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4854 CNTR_NORMAL,
4855 access_tx_pio_launch_intf_parity_err_cnt),
4856[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4857 CNTR_NORMAL,
4858 access_egress_reserved_6_err_cnt),
4859[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4860 CNTR_NORMAL,
4861 access_tx_incorrect_link_state_err_cnt),
4862[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4863 CNTR_NORMAL,
4864 access_tx_linkdown_err_cnt),
4865[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4866 "EgressFifoUnderrunOrParityErr", 0, 0,
4867 CNTR_NORMAL,
4868 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4869[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4870 CNTR_NORMAL,
4871 access_egress_reserved_2_err_cnt),
4872[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4873 CNTR_NORMAL,
4874 access_tx_pkt_integrity_mem_unc_err_cnt),
4875[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4876 CNTR_NORMAL,
4877 access_tx_pkt_integrity_mem_cor_err_cnt),
4878/* SendErrStatus */
4879[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4880 CNTR_NORMAL,
4881 access_send_csr_write_bad_addr_err_cnt),
4882[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4883 CNTR_NORMAL,
4884 access_send_csr_read_bad_addr_err_cnt),
4885[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4886 CNTR_NORMAL,
4887 access_send_csr_parity_cnt),
4888/* SendCtxtErrStatus */
4889[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4890 CNTR_NORMAL,
4891 access_pio_write_out_of_bounds_err_cnt),
4892[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4893 CNTR_NORMAL,
4894 access_pio_write_overflow_err_cnt),
4895[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4896 0, 0, CNTR_NORMAL,
4897 access_pio_write_crosses_boundary_err_cnt),
4898[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4899 CNTR_NORMAL,
4900 access_pio_disallowed_packet_err_cnt),
4901[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4902 CNTR_NORMAL,
4903 access_pio_inconsistent_sop_err_cnt),
4904/* SendDmaEngErrStatus */
4905[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4906 0, 0, CNTR_NORMAL,
4907 access_sdma_header_request_fifo_cor_err_cnt),
4908[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4909 CNTR_NORMAL,
4910 access_sdma_header_storage_cor_err_cnt),
4911[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4912 CNTR_NORMAL,
4913 access_sdma_packet_tracking_cor_err_cnt),
4914[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4915 CNTR_NORMAL,
4916 access_sdma_assembly_cor_err_cnt),
4917[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4918 CNTR_NORMAL,
4919 access_sdma_desc_table_cor_err_cnt),
4920[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4921 0, 0, CNTR_NORMAL,
4922 access_sdma_header_request_fifo_unc_err_cnt),
4923[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4924 CNTR_NORMAL,
4925 access_sdma_header_storage_unc_err_cnt),
4926[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4927 CNTR_NORMAL,
4928 access_sdma_packet_tracking_unc_err_cnt),
4929[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4930 CNTR_NORMAL,
4931 access_sdma_assembly_unc_err_cnt),
4932[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4933 CNTR_NORMAL,
4934 access_sdma_desc_table_unc_err_cnt),
4935[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4936 CNTR_NORMAL,
4937 access_sdma_timeout_err_cnt),
4938[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4939 CNTR_NORMAL,
4940 access_sdma_header_length_err_cnt),
4941[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4942 CNTR_NORMAL,
4943 access_sdma_header_address_err_cnt),
4944[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4945 CNTR_NORMAL,
4946 access_sdma_header_select_err_cnt),
4947[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4948 CNTR_NORMAL,
4949 access_sdma_reserved_9_err_cnt),
4950[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4951 CNTR_NORMAL,
4952 access_sdma_packet_desc_overflow_err_cnt),
4953[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4954 CNTR_NORMAL,
4955 access_sdma_length_mismatch_err_cnt),
4956[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4957 CNTR_NORMAL,
4958 access_sdma_halt_err_cnt),
4959[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4960 CNTR_NORMAL,
4961 access_sdma_mem_read_err_cnt),
4962[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4963 CNTR_NORMAL,
4964 access_sdma_first_desc_err_cnt),
4965[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4966 CNTR_NORMAL,
4967 access_sdma_tail_out_of_bounds_err_cnt),
4968[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4969 CNTR_NORMAL,
4970 access_sdma_too_long_err_cnt),
4971[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4972 CNTR_NORMAL,
4973 access_sdma_gen_mismatch_err_cnt),
4974[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4975 CNTR_NORMAL,
4976 access_sdma_wrong_dw_err_cnt),
77241056
MM
4977};
4978
4979static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4980[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4981 CNTR_NORMAL),
4982[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4983 CNTR_NORMAL),
4984[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4985 CNTR_NORMAL),
4986[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4987 CNTR_NORMAL),
4988[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4989 CNTR_NORMAL),
4990[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4991 CNTR_NORMAL),
4992[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4993 CNTR_NORMAL),
4994[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4995[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4996[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4997[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
17fb4f29 4998 CNTR_SYNTH | CNTR_VL),
77241056 4999[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
17fb4f29 5000 CNTR_SYNTH | CNTR_VL),
77241056 5001[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
17fb4f29 5002 CNTR_SYNTH | CNTR_VL),
77241056
MM
5003[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5004[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5005[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
17fb4f29 5006 access_sw_link_dn_cnt),
77241056 5007[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
17fb4f29 5008 access_sw_link_up_cnt),
6d014530
DL
5009[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5010 access_sw_unknown_frame_cnt),
77241056 5011[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
17fb4f29 5012 access_sw_xmit_discards),
77241056 5013[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
17fb4f29
JJ
5014 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5015 access_sw_xmit_discards),
77241056 5016[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
17fb4f29 5017 access_xmit_constraint_errs),
77241056 5018[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
17fb4f29 5019 access_rcv_constraint_errs),
77241056
MM
5020[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5021[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5022[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5023[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5024[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5025[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5026[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5027[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5028[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5029[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5030[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5031[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5032[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5033 access_sw_cpu_rc_acks),
5034[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
17fb4f29 5035 access_sw_cpu_rc_qacks),
77241056 5036[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
17fb4f29 5037 access_sw_cpu_rc_delayed_comp),
77241056
MM
5038[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5039[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5040[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5041[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5042[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5043[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5044[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5045[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5046[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5047[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5048[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5049[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5050[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5051[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5052[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5053[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5054[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5055[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5056[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5057[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5058[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5059[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5060[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5061[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5062[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5063[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5064[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5065[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5066[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5067[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5068[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5069[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5070[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5071[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5072[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5073[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5074[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5075[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5076[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5077[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5078[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5079[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5080[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5081[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5082[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5083[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5084[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5085[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5086[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5087[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5088[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5089[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5090[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5091[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5092[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5093[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5094[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5095[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5096[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5097[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5098[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5099[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5100[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5101[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5102[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5103[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5104[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5105[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5106[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5107[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5108[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5109[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5110[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5111[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5112[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5113[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5114[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5115[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5116[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5117[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5118};
5119
5120/* ======================================================================== */
5121
77241056
MM
5122/* return true if this is chip revision revision a */
5123int is_ax(struct hfi1_devdata *dd)
5124{
5125 u8 chip_rev_minor =
5126 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5127 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5128 return (chip_rev_minor & 0xf0) == 0;
5129}
5130
5131/* return true if this is chip revision revision b */
5132int is_bx(struct hfi1_devdata *dd)
5133{
5134 u8 chip_rev_minor =
5135 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5136 & CCE_REVISION_CHIP_REV_MINOR_MASK;
995deafa 5137 return (chip_rev_minor & 0xF0) == 0x10;
77241056
MM
5138}
5139
5140/*
5141 * Append string s to buffer buf. Arguments curp and len are the current
5142 * position and remaining length, respectively.
5143 *
5144 * return 0 on success, 1 on out of room
5145 */
5146static int append_str(char *buf, char **curp, int *lenp, const char *s)
5147{
5148 char *p = *curp;
5149 int len = *lenp;
5150 int result = 0; /* success */
5151 char c;
5152
5153 /* add a comma, if first in the buffer */
5154 if (p != buf) {
5155 if (len == 0) {
5156 result = 1; /* out of room */
5157 goto done;
5158 }
5159 *p++ = ',';
5160 len--;
5161 }
5162
5163 /* copy the string */
5164 while ((c = *s++) != 0) {
5165 if (len == 0) {
5166 result = 1; /* out of room */
5167 goto done;
5168 }
5169 *p++ = c;
5170 len--;
5171 }
5172
5173done:
5174 /* write return values */
5175 *curp = p;
5176 *lenp = len;
5177
5178 return result;
5179}
5180
5181/*
5182 * Using the given flag table, print a comma separated string into
5183 * the buffer. End in '*' if the buffer is too short.
5184 */
5185static char *flag_string(char *buf, int buf_len, u64 flags,
17fb4f29 5186 struct flag_table *table, int table_size)
77241056
MM
5187{
5188 char extra[32];
5189 char *p = buf;
5190 int len = buf_len;
5191 int no_room = 0;
5192 int i;
5193
5194 /* make sure there is at least 2 so we can form "*" */
5195 if (len < 2)
5196 return "";
5197
5198 len--; /* leave room for a nul */
5199 for (i = 0; i < table_size; i++) {
5200 if (flags & table[i].flag) {
5201 no_room = append_str(buf, &p, &len, table[i].str);
5202 if (no_room)
5203 break;
5204 flags &= ~table[i].flag;
5205 }
5206 }
5207
5208 /* any undocumented bits left? */
5209 if (!no_room && flags) {
5210 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5211 no_room = append_str(buf, &p, &len, extra);
5212 }
5213
5214 /* add * if ran out of room */
5215 if (no_room) {
5216 /* may need to back up to add space for a '*' */
5217 if (len == 0)
5218 --p;
5219 *p++ = '*';
5220 }
5221
5222 /* add final nul - space already allocated above */
5223 *p = 0;
5224 return buf;
5225}
5226
5227/* first 8 CCE error interrupt source names */
5228static const char * const cce_misc_names[] = {
5229 "CceErrInt", /* 0 */
5230 "RxeErrInt", /* 1 */
5231 "MiscErrInt", /* 2 */
5232 "Reserved3", /* 3 */
5233 "PioErrInt", /* 4 */
5234 "SDmaErrInt", /* 5 */
5235 "EgressErrInt", /* 6 */
5236 "TxeErrInt" /* 7 */
5237};
5238
5239/*
5240 * Return the miscellaneous error interrupt name.
5241 */
5242static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5243{
5244 if (source < ARRAY_SIZE(cce_misc_names))
5245 strncpy(buf, cce_misc_names[source], bsize);
5246 else
17fb4f29
JJ
5247 snprintf(buf, bsize, "Reserved%u",
5248 source + IS_GENERAL_ERR_START);
77241056
MM
5249
5250 return buf;
5251}
5252
5253/*
5254 * Return the SDMA engine error interrupt name.
5255 */
5256static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5257{
5258 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5259 return buf;
5260}
5261
5262/*
5263 * Return the send context error interrupt name.
5264 */
5265static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5266{
5267 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5268 return buf;
5269}
5270
5271static const char * const various_names[] = {
5272 "PbcInt",
5273 "GpioAssertInt",
5274 "Qsfp1Int",
5275 "Qsfp2Int",
5276 "TCritInt"
5277};
5278
5279/*
5280 * Return the various interrupt name.
5281 */
5282static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5283{
5284 if (source < ARRAY_SIZE(various_names))
5285 strncpy(buf, various_names[source], bsize);
5286 else
8638b77f 5287 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
77241056
MM
5288 return buf;
5289}
5290
5291/*
5292 * Return the DC interrupt name.
5293 */
5294static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5295{
5296 static const char * const dc_int_names[] = {
5297 "common",
5298 "lcb",
5299 "8051",
5300 "lbm" /* local block merge */
5301 };
5302
5303 if (source < ARRAY_SIZE(dc_int_names))
5304 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5305 else
5306 snprintf(buf, bsize, "DCInt%u", source);
5307 return buf;
5308}
5309
5310static const char * const sdma_int_names[] = {
5311 "SDmaInt",
5312 "SdmaIdleInt",
5313 "SdmaProgressInt",
5314};
5315
5316/*
5317 * Return the SDMA engine interrupt name.
5318 */
5319static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5320{
5321 /* what interrupt */
5322 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5323 /* which engine */
5324 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5325
5326 if (likely(what < 3))
5327 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5328 else
5329 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5330 return buf;
5331}
5332
5333/*
5334 * Return the receive available interrupt name.
5335 */
5336static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5337{
5338 snprintf(buf, bsize, "RcvAvailInt%u", source);
5339 return buf;
5340}
5341
5342/*
5343 * Return the receive urgent interrupt name.
5344 */
5345static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5346{
5347 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5348 return buf;
5349}
5350
5351/*
5352 * Return the send credit interrupt name.
5353 */
5354static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5355{
5356 snprintf(buf, bsize, "SendCreditInt%u", source);
5357 return buf;
5358}
5359
5360/*
5361 * Return the reserved interrupt name.
5362 */
5363static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5364{
5365 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5366 return buf;
5367}
5368
5369static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5370{
5371 return flag_string(buf, buf_len, flags,
17fb4f29
JJ
5372 cce_err_status_flags,
5373 ARRAY_SIZE(cce_err_status_flags));
77241056
MM
5374}
5375
5376static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5377{
5378 return flag_string(buf, buf_len, flags,
17fb4f29
JJ
5379 rxe_err_status_flags,
5380 ARRAY_SIZE(rxe_err_status_flags));
77241056
MM
5381}
5382
5383static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5384{
5385 return flag_string(buf, buf_len, flags, misc_err_status_flags,
17fb4f29 5386 ARRAY_SIZE(misc_err_status_flags));
77241056
MM
5387}
5388
5389static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5390{
5391 return flag_string(buf, buf_len, flags,
17fb4f29
JJ
5392 pio_err_status_flags,
5393 ARRAY_SIZE(pio_err_status_flags));
77241056
MM
5394}
5395
5396static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5397{
5398 return flag_string(buf, buf_len, flags,
17fb4f29
JJ
5399 sdma_err_status_flags,
5400 ARRAY_SIZE(sdma_err_status_flags));
77241056
MM
5401}
5402
5403static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5404{
5405 return flag_string(buf, buf_len, flags,
17fb4f29
JJ
5406 egress_err_status_flags,
5407 ARRAY_SIZE(egress_err_status_flags));
77241056
MM
5408}
5409
5410static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5411{
5412 return flag_string(buf, buf_len, flags,
17fb4f29
JJ
5413 egress_err_info_flags,
5414 ARRAY_SIZE(egress_err_info_flags));
77241056
MM
5415}
5416
5417static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5418{
5419 return flag_string(buf, buf_len, flags,
17fb4f29
JJ
5420 send_err_status_flags,
5421 ARRAY_SIZE(send_err_status_flags));
77241056
MM
5422}
5423
5424static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5425{
5426 char buf[96];
2c5b521a 5427 int i = 0;
77241056
MM
5428
5429 /*
5430 * For most these errors, there is nothing that can be done except
5431 * report or record it.
5432 */
5433 dd_dev_info(dd, "CCE Error: %s\n",
17fb4f29 5434 cce_err_status_string(buf, sizeof(buf), reg));
77241056 5435
995deafa
MM
5436 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5437 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
77241056
MM
5438 /* this error requires a manual drop into SPC freeze mode */
5439 /* then a fix up */
5440 start_freeze_handling(dd->pport, FREEZE_SELF);
5441 }
2c5b521a
JR
5442
5443 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5444 if (reg & (1ull << i)) {
5445 incr_cntr64(&dd->cce_err_status_cnt[i]);
5446 /* maintain a counter over all cce_err_status errors */
5447 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5448 }
5449 }
77241056
MM
5450}
5451
5452/*
5453 * Check counters for receive errors that do not have an interrupt
5454 * associated with them.
5455 */
5456#define RCVERR_CHECK_TIME 10
5457static void update_rcverr_timer(unsigned long opaque)
5458{
5459 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5460 struct hfi1_pportdata *ppd = dd->pport;
5461 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5462
5463 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
17fb4f29 5464 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
77241056 5465 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
17fb4f29
JJ
5466 set_link_down_reason(
5467 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5468 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
77241056
MM
5469 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5470 }
50e5dcbe 5471 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
77241056
MM
5472
5473 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5474}
5475
5476static int init_rcverr(struct hfi1_devdata *dd)
5477{
24523a94 5478 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
77241056
MM
5479 /* Assume the hardware counter has been reset */
5480 dd->rcv_ovfl_cnt = 0;
5481 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5482}
5483
5484static void free_rcverr(struct hfi1_devdata *dd)
5485{
5486 if (dd->rcverr_timer.data)
5487 del_timer_sync(&dd->rcverr_timer);
5488 dd->rcverr_timer.data = 0;
5489}
5490
5491static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5492{
5493 char buf[96];
2c5b521a 5494 int i = 0;
77241056
MM
5495
5496 dd_dev_info(dd, "Receive Error: %s\n",
17fb4f29 5497 rxe_err_status_string(buf, sizeof(buf), reg));
77241056
MM
5498
5499 if (reg & ALL_RXE_FREEZE_ERR) {
5500 int flags = 0;
5501
5502 /*
5503 * Freeze mode recovery is disabled for the errors
5504 * in RXE_FREEZE_ABORT_MASK
5505 */
995deafa 5506 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
77241056
MM
5507 flags = FREEZE_ABORT;
5508
5509 start_freeze_handling(dd->pport, flags);
5510 }
2c5b521a
JR
5511
5512 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5513 if (reg & (1ull << i))
5514 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5515 }
77241056
MM
5516}
5517
5518static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5519{
5520 char buf[96];
2c5b521a 5521 int i = 0;
77241056
MM
5522
5523 dd_dev_info(dd, "Misc Error: %s",
17fb4f29 5524 misc_err_status_string(buf, sizeof(buf), reg));
2c5b521a
JR
5525 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5526 if (reg & (1ull << i))
5527 incr_cntr64(&dd->misc_err_status_cnt[i]);
5528 }
77241056
MM
5529}
5530
5531static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5532{
5533 char buf[96];
2c5b521a 5534 int i = 0;
77241056
MM
5535
5536 dd_dev_info(dd, "PIO Error: %s\n",
17fb4f29 5537 pio_err_status_string(buf, sizeof(buf), reg));
77241056
MM
5538
5539 if (reg & ALL_PIO_FREEZE_ERR)
5540 start_freeze_handling(dd->pport, 0);
2c5b521a
JR
5541
5542 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5543 if (reg & (1ull << i))
5544 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5545 }
77241056
MM
5546}
5547
5548static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5549{
5550 char buf[96];
2c5b521a 5551 int i = 0;
77241056
MM
5552
5553 dd_dev_info(dd, "SDMA Error: %s\n",
17fb4f29 5554 sdma_err_status_string(buf, sizeof(buf), reg));
77241056
MM
5555
5556 if (reg & ALL_SDMA_FREEZE_ERR)
5557 start_freeze_handling(dd->pport, 0);
2c5b521a
JR
5558
5559 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5560 if (reg & (1ull << i))
5561 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5562 }
77241056
MM
5563}
5564
69a00b8e 5565static inline void __count_port_discards(struct hfi1_pportdata *ppd)
77241056 5566{
69a00b8e
MM
5567 incr_cntr64(&ppd->port_xmit_discards);
5568}
77241056 5569
69a00b8e
MM
5570static void count_port_inactive(struct hfi1_devdata *dd)
5571{
5572 __count_port_discards(dd->pport);
77241056
MM
5573}
5574
5575/*
5576 * We have had a "disallowed packet" error during egress. Determine the
5577 * integrity check which failed, and update relevant error counter, etc.
5578 *
5579 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5580 * bit of state per integrity check, and so we can miss the reason for an
5581 * egress error if more than one packet fails the same integrity check
5582 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5583 */
69a00b8e
MM
5584static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5585 int vl)
77241056
MM
5586{
5587 struct hfi1_pportdata *ppd = dd->pport;
5588 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5589 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5590 char buf[96];
5591
5592 /* clear down all observed info as quickly as possible after read */
5593 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5594
5595 dd_dev_info(dd,
17fb4f29
JJ
5596 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5597 info, egress_err_info_string(buf, sizeof(buf), info), src);
77241056
MM
5598
5599 /* Eventually add other counters for each bit */
69a00b8e
MM
5600 if (info & PORT_DISCARD_EGRESS_ERRS) {
5601 int weight, i;
77241056 5602
69a00b8e 5603 /*
4c9e7aac
DL
5604 * Count all applicable bits as individual errors and
5605 * attribute them to the packet that triggered this handler.
5606 * This may not be completely accurate due to limitations
5607 * on the available hardware error information. There is
5608 * a single information register and any number of error
5609 * packets may have occurred and contributed to it before
5610 * this routine is called. This means that:
5611 * a) If multiple packets with the same error occur before
5612 * this routine is called, earlier packets are missed.
5613 * There is only a single bit for each error type.
5614 * b) Errors may not be attributed to the correct VL.
5615 * The driver is attributing all bits in the info register
5616 * to the packet that triggered this call, but bits
5617 * could be an accumulation of different packets with
5618 * different VLs.
5619 * c) A single error packet may have multiple counts attached
5620 * to it. There is no way for the driver to know if
5621 * multiple bits set in the info register are due to a
5622 * single packet or multiple packets. The driver assumes
5623 * multiple packets.
69a00b8e 5624 */
4c9e7aac 5625 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
69a00b8e
MM
5626 for (i = 0; i < weight; i++) {
5627 __count_port_discards(ppd);
5628 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5629 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5630 else if (vl == 15)
5631 incr_cntr64(&ppd->port_xmit_discards_vl
5632 [C_VL_15]);
5633 }
77241056
MM
5634 }
5635}
5636
5637/*
5638 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5639 * register. Does it represent a 'port inactive' error?
5640 */
5641static inline int port_inactive_err(u64 posn)
5642{
5643 return (posn >= SEES(TX_LINKDOWN) &&
5644 posn <= SEES(TX_INCORRECT_LINK_STATE));
5645}
5646
5647/*
5648 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5649 * register. Does it represent a 'disallowed packet' error?
5650 */
69a00b8e 5651static inline int disallowed_pkt_err(int posn)
77241056
MM
5652{
5653 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5654 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5655}
5656
69a00b8e
MM
5657/*
5658 * Input value is a bit position of one of the SDMA engine disallowed
5659 * packet errors. Return which engine. Use of this must be guarded by
5660 * disallowed_pkt_err().
5661 */
5662static inline int disallowed_pkt_engine(int posn)
5663{
5664 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5665}
5666
5667/*
5668 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5669 * be done.
5670 */
5671static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5672{
5673 struct sdma_vl_map *m;
5674 int vl;
5675
5676 /* range check */
5677 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5678 return -1;
5679
5680 rcu_read_lock();
5681 m = rcu_dereference(dd->sdma_map);
5682 vl = m->engine_to_vl[engine];
5683 rcu_read_unlock();
5684
5685 return vl;
5686}
5687
5688/*
5689 * Translate the send context (sofware index) into a VL. Return -1 if the
5690 * translation cannot be done.
5691 */
5692static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5693{
5694 struct send_context_info *sci;
5695 struct send_context *sc;
5696 int i;
5697
5698 sci = &dd->send_contexts[sw_index];
5699
5700 /* there is no information for user (PSM) and ack contexts */
44306f15 5701 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
69a00b8e
MM
5702 return -1;
5703
5704 sc = sci->sc;
5705 if (!sc)
5706 return -1;
5707 if (dd->vld[15].sc == sc)
5708 return 15;
5709 for (i = 0; i < num_vls; i++)
5710 if (dd->vld[i].sc == sc)
5711 return i;
5712
5713 return -1;
5714}
5715
77241056
MM
5716static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5717{
5718 u64 reg_copy = reg, handled = 0;
5719 char buf[96];
2c5b521a 5720 int i = 0;
77241056
MM
5721
5722 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5723 start_freeze_handling(dd->pport, 0);
69a00b8e
MM
5724 else if (is_ax(dd) &&
5725 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5726 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
77241056
MM
5727 start_freeze_handling(dd->pport, 0);
5728
5729 while (reg_copy) {
5730 int posn = fls64(reg_copy);
69a00b8e 5731 /* fls64() returns a 1-based offset, we want it zero based */
77241056 5732 int shift = posn - 1;
69a00b8e 5733 u64 mask = 1ULL << shift;
77241056
MM
5734
5735 if (port_inactive_err(shift)) {
5736 count_port_inactive(dd);
69a00b8e 5737 handled |= mask;
77241056 5738 } else if (disallowed_pkt_err(shift)) {
69a00b8e
MM
5739 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5740
5741 handle_send_egress_err_info(dd, vl);
5742 handled |= mask;
77241056 5743 }
69a00b8e 5744 reg_copy &= ~mask;
77241056
MM
5745 }
5746
5747 reg &= ~handled;
5748
5749 if (reg)
5750 dd_dev_info(dd, "Egress Error: %s\n",
17fb4f29 5751 egress_err_status_string(buf, sizeof(buf), reg));
2c5b521a
JR
5752
5753 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5754 if (reg & (1ull << i))
5755 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5756 }
77241056
MM
5757}
5758
5759static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5760{
5761 char buf[96];
2c5b521a 5762 int i = 0;
77241056
MM
5763
5764 dd_dev_info(dd, "Send Error: %s\n",
17fb4f29 5765 send_err_status_string(buf, sizeof(buf), reg));
77241056 5766
2c5b521a
JR
5767 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5768 if (reg & (1ull << i))
5769 incr_cntr64(&dd->send_err_status_cnt[i]);
5770 }
77241056
MM
5771}
5772
5773/*
5774 * The maximum number of times the error clear down will loop before
5775 * blocking a repeating error. This value is arbitrary.
5776 */
5777#define MAX_CLEAR_COUNT 20
5778
5779/*
5780 * Clear and handle an error register. All error interrupts are funneled
5781 * through here to have a central location to correctly handle single-
5782 * or multi-shot errors.
5783 *
5784 * For non per-context registers, call this routine with a context value
5785 * of 0 so the per-context offset is zero.
5786 *
5787 * If the handler loops too many times, assume that something is wrong
5788 * and can't be fixed, so mask the error bits.
5789 */
5790static void interrupt_clear_down(struct hfi1_devdata *dd,
5791 u32 context,
5792 const struct err_reg_info *eri)
5793{
5794 u64 reg;
5795 u32 count;
5796
5797 /* read in a loop until no more errors are seen */
5798 count = 0;
5799 while (1) {
5800 reg = read_kctxt_csr(dd, context, eri->status);
5801 if (reg == 0)
5802 break;
5803 write_kctxt_csr(dd, context, eri->clear, reg);
5804 if (likely(eri->handler))
5805 eri->handler(dd, context, reg);
5806 count++;
5807 if (count > MAX_CLEAR_COUNT) {
5808 u64 mask;
5809
5810 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
17fb4f29 5811 eri->desc, reg);
77241056
MM
5812 /*
5813 * Read-modify-write so any other masked bits
5814 * remain masked.
5815 */
5816 mask = read_kctxt_csr(dd, context, eri->mask);
5817 mask &= ~reg;
5818 write_kctxt_csr(dd, context, eri->mask, mask);
5819 break;
5820 }
5821 }
5822}
5823
5824/*
5825 * CCE block "misc" interrupt. Source is < 16.
5826 */
5827static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5828{
5829 const struct err_reg_info *eri = &misc_errs[source];
5830
5831 if (eri->handler) {
5832 interrupt_clear_down(dd, 0, eri);
5833 } else {
5834 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
17fb4f29 5835 source);
77241056
MM
5836 }
5837}
5838
5839static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5840{
5841 return flag_string(buf, buf_len, flags,
17fb4f29
JJ
5842 sc_err_status_flags,
5843 ARRAY_SIZE(sc_err_status_flags));
77241056
MM
5844}
5845
5846/*
5847 * Send context error interrupt. Source (hw_context) is < 160.
5848 *
5849 * All send context errors cause the send context to halt. The normal
5850 * clear-down mechanism cannot be used because we cannot clear the
5851 * error bits until several other long-running items are done first.
5852 * This is OK because with the context halted, nothing else is going
5853 * to happen on it anyway.
5854 */
5855static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5856 unsigned int hw_context)
5857{
5858 struct send_context_info *sci;
5859 struct send_context *sc;
5860 char flags[96];
5861 u64 status;
5862 u32 sw_index;
2c5b521a 5863 int i = 0;
77241056
MM
5864
5865 sw_index = dd->hw_to_sw[hw_context];
5866 if (sw_index >= dd->num_send_contexts) {
5867 dd_dev_err(dd,
17fb4f29
JJ
5868 "out of range sw index %u for send context %u\n",
5869 sw_index, hw_context);
77241056
MM
5870 return;
5871 }
5872 sci = &dd->send_contexts[sw_index];
5873 sc = sci->sc;
5874 if (!sc) {
5875 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
17fb4f29 5876 sw_index, hw_context);
77241056
MM
5877 return;
5878 }
5879
5880 /* tell the software that a halt has begun */
5881 sc_stop(sc, SCF_HALTED);
5882
5883 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5884
5885 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
17fb4f29
JJ
5886 send_context_err_status_string(flags, sizeof(flags),
5887 status));
77241056
MM
5888
5889 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
69a00b8e 5890 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
77241056
MM
5891
5892 /*
5893 * Automatically restart halted kernel contexts out of interrupt
5894 * context. User contexts must ask the driver to restart the context.
5895 */
5896 if (sc->type != SC_USER)
5897 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
2c5b521a
JR
5898
5899 /*
5900 * Update the counters for the corresponding status bits.
5901 * Note that these particular counters are aggregated over all
5902 * 160 contexts.
5903 */
5904 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5905 if (status & (1ull << i))
5906 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5907 }
77241056
MM
5908}
5909
5910static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5911 unsigned int source, u64 status)
5912{
5913 struct sdma_engine *sde;
2c5b521a 5914 int i = 0;
77241056
MM
5915
5916 sde = &dd->per_sdma[source];
5917#ifdef CONFIG_SDMA_VERBOSITY
5918 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5919 slashstrip(__FILE__), __LINE__, __func__);
5920 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5921 sde->this_idx, source, (unsigned long long)status);
5922#endif
a699c6c2 5923 sde->err_cnt++;
77241056 5924 sdma_engine_error(sde, status);
2c5b521a
JR
5925
5926 /*
5927 * Update the counters for the corresponding status bits.
5928 * Note that these particular counters are aggregated over
5929 * all 16 DMA engines.
5930 */
5931 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5932 if (status & (1ull << i))
5933 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5934 }
77241056
MM
5935}
5936
5937/*
5938 * CCE block SDMA error interrupt. Source is < 16.
5939 */
5940static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5941{
5942#ifdef CONFIG_SDMA_VERBOSITY
5943 struct sdma_engine *sde = &dd->per_sdma[source];
5944
5945 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5946 slashstrip(__FILE__), __LINE__, __func__);
5947 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5948 source);
5949 sdma_dumpstate(sde);
5950#endif
5951 interrupt_clear_down(dd, source, &sdma_eng_err);
5952}
5953
5954/*
5955 * CCE block "various" interrupt. Source is < 8.
5956 */
5957static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5958{
5959 const struct err_reg_info *eri = &various_err[source];
5960
5961 /*
5962 * TCritInt cannot go through interrupt_clear_down()
5963 * because it is not a second tier interrupt. The handler
5964 * should be called directly.
5965 */
5966 if (source == TCRIT_INT_SOURCE)
5967 handle_temp_err(dd);
5968 else if (eri->handler)
5969 interrupt_clear_down(dd, 0, eri);
5970 else
5971 dd_dev_info(dd,
17fb4f29
JJ
5972 "%s: Unimplemented/reserved interrupt %d\n",
5973 __func__, source);
77241056
MM
5974}
5975
5976static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5977{
8ebd4cf1 5978 /* src_ctx is always zero */
77241056
MM
5979 struct hfi1_pportdata *ppd = dd->pport;
5980 unsigned long flags;
5981 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5982
5983 if (reg & QSFP_HFI0_MODPRST_N) {
77241056 5984 if (!qsfp_mod_present(ppd)) {
e8aa284b
EH
5985 dd_dev_info(dd, "%s: QSFP module removed\n",
5986 __func__);
5987
77241056
MM
5988 ppd->driver_link_ready = 0;
5989 /*
5990 * Cable removed, reset all our information about the
5991 * cache and cable capabilities
5992 */
5993
5994 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5995 /*
5996 * We don't set cache_refresh_required here as we expect
5997 * an interrupt when a cable is inserted
5998 */
5999 ppd->qsfp_info.cache_valid = 0;
8ebd4cf1
EH
6000 ppd->qsfp_info.reset_needed = 0;
6001 ppd->qsfp_info.limiting_active = 0;
77241056 6002 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
17fb4f29 6003 flags);
8ebd4cf1
EH
6004 /* Invert the ModPresent pin now to detect plug-in */
6005 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6006 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
a9c05e35
BM
6007
6008 if ((ppd->offline_disabled_reason >
6009 HFI1_ODR_MASK(
e1bf0d5e 6010 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
a9c05e35
BM
6011 (ppd->offline_disabled_reason ==
6012 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6013 ppd->offline_disabled_reason =
6014 HFI1_ODR_MASK(
e1bf0d5e 6015 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
a9c05e35 6016
77241056
MM
6017 if (ppd->host_link_state == HLS_DN_POLL) {
6018 /*
6019 * The link is still in POLL. This means
6020 * that the normal link down processing
6021 * will not happen. We have to do it here
6022 * before turning the DC off.
6023 */
6024 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
6025 }
6026 } else {
e8aa284b
EH
6027 dd_dev_info(dd, "%s: QSFP module inserted\n",
6028 __func__);
6029
77241056
MM
6030 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6031 ppd->qsfp_info.cache_valid = 0;
6032 ppd->qsfp_info.cache_refresh_required = 1;
6033 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
17fb4f29 6034 flags);
77241056 6035
8ebd4cf1
EH
6036 /*
6037 * Stop inversion of ModPresent pin to detect
6038 * removal of the cable
6039 */
77241056 6040 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
8ebd4cf1
EH
6041 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6042 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6043
6044 ppd->offline_disabled_reason =
6045 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
77241056
MM
6046 }
6047 }
6048
6049 if (reg & QSFP_HFI0_INT_N) {
e8aa284b 6050 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
17fb4f29 6051 __func__);
77241056
MM
6052 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6053 ppd->qsfp_info.check_interrupt_flags = 1;
77241056
MM
6054 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6055 }
6056
6057 /* Schedule the QSFP work only if there is a cable attached. */
6058 if (qsfp_mod_present(ppd))
6059 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6060}
6061
6062static int request_host_lcb_access(struct hfi1_devdata *dd)
6063{
6064 int ret;
6065
6066 ret = do_8051_command(dd, HCMD_MISC,
17fb4f29
JJ
6067 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6068 LOAD_DATA_FIELD_ID_SHIFT, NULL);
77241056
MM
6069 if (ret != HCMD_SUCCESS) {
6070 dd_dev_err(dd, "%s: command failed with error %d\n",
17fb4f29 6071 __func__, ret);
77241056
MM
6072 }
6073 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6074}
6075
6076static int request_8051_lcb_access(struct hfi1_devdata *dd)
6077{
6078 int ret;
6079
6080 ret = do_8051_command(dd, HCMD_MISC,
17fb4f29
JJ
6081 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6082 LOAD_DATA_FIELD_ID_SHIFT, NULL);
77241056
MM
6083 if (ret != HCMD_SUCCESS) {
6084 dd_dev_err(dd, "%s: command failed with error %d\n",
17fb4f29 6085 __func__, ret);
77241056
MM
6086 }
6087 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6088}
6089
6090/*
6091 * Set the LCB selector - allow host access. The DCC selector always
6092 * points to the host.
6093 */
6094static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6095{
6096 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
17fb4f29
JJ
6097 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6098 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
77241056
MM
6099}
6100
6101/*
6102 * Clear the LCB selector - allow 8051 access. The DCC selector always
6103 * points to the host.
6104 */
6105static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6106{
6107 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
17fb4f29 6108 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
77241056
MM
6109}
6110
6111/*
6112 * Acquire LCB access from the 8051. If the host already has access,
6113 * just increment a counter. Otherwise, inform the 8051 that the
6114 * host is taking access.
6115 *
6116 * Returns:
6117 * 0 on success
6118 * -EBUSY if the 8051 has control and cannot be disturbed
6119 * -errno if unable to acquire access from the 8051
6120 */
6121int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6122{
6123 struct hfi1_pportdata *ppd = dd->pport;
6124 int ret = 0;
6125
6126 /*
6127 * Use the host link state lock so the operation of this routine
6128 * { link state check, selector change, count increment } can occur
6129 * as a unit against a link state change. Otherwise there is a
6130 * race between the state change and the count increment.
6131 */
6132 if (sleep_ok) {
6133 mutex_lock(&ppd->hls_lock);
6134 } else {
951842b0 6135 while (!mutex_trylock(&ppd->hls_lock))
77241056
MM
6136 udelay(1);
6137 }
6138
6139 /* this access is valid only when the link is up */
0c7f77af 6140 if (ppd->host_link_state & HLS_DOWN) {
77241056 6141 dd_dev_info(dd, "%s: link state %s not up\n",
17fb4f29 6142 __func__, link_state_name(ppd->host_link_state));
77241056
MM
6143 ret = -EBUSY;
6144 goto done;
6145 }
6146
6147 if (dd->lcb_access_count == 0) {
6148 ret = request_host_lcb_access(dd);
6149 if (ret) {
6150 dd_dev_err(dd,
17fb4f29
JJ
6151 "%s: unable to acquire LCB access, err %d\n",
6152 __func__, ret);
77241056
MM
6153 goto done;
6154 }
6155 set_host_lcb_access(dd);
6156 }
6157 dd->lcb_access_count++;
6158done:
6159 mutex_unlock(&ppd->hls_lock);
6160 return ret;
6161}
6162
6163/*
6164 * Release LCB access by decrementing the use count. If the count is moving
6165 * from 1 to 0, inform 8051 that it has control back.
6166 *
6167 * Returns:
6168 * 0 on success
6169 * -errno if unable to release access to the 8051
6170 */
6171int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6172{
6173 int ret = 0;
6174
6175 /*
6176 * Use the host link state lock because the acquire needed it.
6177 * Here, we only need to keep { selector change, count decrement }
6178 * as a unit.
6179 */
6180 if (sleep_ok) {
6181 mutex_lock(&dd->pport->hls_lock);
6182 } else {
951842b0 6183 while (!mutex_trylock(&dd->pport->hls_lock))
77241056
MM
6184 udelay(1);
6185 }
6186
6187 if (dd->lcb_access_count == 0) {
6188 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
17fb4f29 6189 __func__);
77241056
MM
6190 goto done;
6191 }
6192
6193 if (dd->lcb_access_count == 1) {
6194 set_8051_lcb_access(dd);
6195 ret = request_8051_lcb_access(dd);
6196 if (ret) {
6197 dd_dev_err(dd,
17fb4f29
JJ
6198 "%s: unable to release LCB access, err %d\n",
6199 __func__, ret);
77241056
MM
6200 /* restore host access if the grant didn't work */
6201 set_host_lcb_access(dd);
6202 goto done;
6203 }
6204 }
6205 dd->lcb_access_count--;
6206done:
6207 mutex_unlock(&dd->pport->hls_lock);
6208 return ret;
6209}
6210
6211/*
6212 * Initialize LCB access variables and state. Called during driver load,
6213 * after most of the initialization is finished.
6214 *
6215 * The DC default is LCB access on for the host. The driver defaults to
6216 * leaving access to the 8051. Assign access now - this constrains the call
6217 * to this routine to be after all LCB set-up is done. In particular, after
6218 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6219 */
6220static void init_lcb_access(struct hfi1_devdata *dd)
6221{
6222 dd->lcb_access_count = 0;
6223}
6224
6225/*
6226 * Write a response back to a 8051 request.
6227 */
6228static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6229{
6230 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
17fb4f29
JJ
6231 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6232 (u64)return_code <<
6233 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6234 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
77241056
MM
6235}
6236
6237/*
cbac386a 6238 * Handle host requests from the 8051.
77241056 6239 */
145dd2b3 6240static void handle_8051_request(struct hfi1_pportdata *ppd)
77241056 6241{
cbac386a 6242 struct hfi1_devdata *dd = ppd->dd;
77241056 6243 u64 reg;
cbac386a 6244 u16 data = 0;
145dd2b3 6245 u8 type;
77241056
MM
6246
6247 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6248 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6249 return; /* no request */
6250
6251 /* zero out COMPLETED so the response is seen */
6252 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6253
6254 /* extract request details */
6255 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6256 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6257 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6258 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6259
6260 switch (type) {
6261 case HREQ_LOAD_CONFIG:
6262 case HREQ_SAVE_CONFIG:
6263 case HREQ_READ_CONFIG:
6264 case HREQ_SET_TX_EQ_ABS:
6265 case HREQ_SET_TX_EQ_REL:
145dd2b3 6266 case HREQ_ENABLE:
77241056 6267 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
17fb4f29 6268 type);
77241056
MM
6269 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6270 break;
77241056
MM
6271 case HREQ_CONFIG_DONE:
6272 hreq_response(dd, HREQ_SUCCESS, 0);
6273 break;
6274
6275 case HREQ_INTERFACE_TEST:
6276 hreq_response(dd, HREQ_SUCCESS, data);
6277 break;
77241056
MM
6278 default:
6279 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6280 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6281 break;
6282 }
6283}
6284
6285static void write_global_credit(struct hfi1_devdata *dd,
6286 u8 vau, u16 total, u16 shared)
6287{
6288 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
17fb4f29
JJ
6289 ((u64)total <<
6290 SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
6291 ((u64)shared <<
6292 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
6293 ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
77241056
MM
6294}
6295
6296/*
6297 * Set up initial VL15 credits of the remote. Assumes the rest of
6298 * the CM credit registers are zero from a previous global or credit reset .
6299 */
6300void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6301{
6302 /* leave shared count at zero for both global and VL15 */
6303 write_global_credit(dd, vau, vl15buf, 0);
6304
eacc830f
DD
6305 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6306 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
77241056
MM
6307}
6308
6309/*
6310 * Zero all credit details from the previous connection and
6311 * reset the CM manager's internal counters.
6312 */
6313void reset_link_credits(struct hfi1_devdata *dd)
6314{
6315 int i;
6316
6317 /* remove all previous VL credit limits */
6318 for (i = 0; i < TXE_NUM_DATA_VL; i++)
8638b77f 6319 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
77241056
MM
6320 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6321 write_global_credit(dd, 0, 0, 0);
6322 /* reset the CM block */
6323 pio_send_control(dd, PSC_CM_RESET);
6324}
6325
6326/* convert a vCU to a CU */
6327static u32 vcu_to_cu(u8 vcu)
6328{
6329 return 1 << vcu;
6330}
6331
6332/* convert a CU to a vCU */
6333static u8 cu_to_vcu(u32 cu)
6334{
6335 return ilog2(cu);
6336}
6337
6338/* convert a vAU to an AU */
6339static u32 vau_to_au(u8 vau)
6340{
6341 return 8 * (1 << vau);
6342}
6343
6344static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6345{
6346 ppd->sm_trap_qp = 0x0;
6347 ppd->sa_qp = 0x1;
6348}
6349
6350/*
6351 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6352 */
6353static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6354{
6355 u64 reg;
6356
6357 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6358 write_csr(dd, DC_LCB_CFG_RUN, 0);
6359 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6360 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
17fb4f29 6361 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
77241056
MM
6362 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6363 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6364 reg = read_csr(dd, DCC_CFG_RESET);
17fb4f29
JJ
6365 write_csr(dd, DCC_CFG_RESET, reg |
6366 (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6367 (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
50e5dcbe 6368 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
77241056
MM
6369 if (!abort) {
6370 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6371 write_csr(dd, DCC_CFG_RESET, reg);
6372 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6373 }
6374}
6375
6376/*
6377 * This routine should be called after the link has been transitioned to
6378 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6379 * reset).
6380 *
6381 * The expectation is that the caller of this routine would have taken
6382 * care of properly transitioning the link into the correct state.
6383 */
6384static void dc_shutdown(struct hfi1_devdata *dd)
6385{
6386 unsigned long flags;
6387
6388 spin_lock_irqsave(&dd->dc8051_lock, flags);
6389 if (dd->dc_shutdown) {
6390 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6391 return;
6392 }
6393 dd->dc_shutdown = 1;
6394 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6395 /* Shutdown the LCB */
6396 lcb_shutdown(dd, 1);
4d114fdd
JJ
6397 /*
6398 * Going to OFFLINE would have causes the 8051 to put the
77241056 6399 * SerDes into reset already. Just need to shut down the 8051,
4d114fdd
JJ
6400 * itself.
6401 */
77241056
MM
6402 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6403}
6404
4d114fdd
JJ
6405/*
6406 * Calling this after the DC has been brought out of reset should not
6407 * do any damage.
6408 */
77241056
MM
6409static void dc_start(struct hfi1_devdata *dd)
6410{
6411 unsigned long flags;
6412 int ret;
6413
6414 spin_lock_irqsave(&dd->dc8051_lock, flags);
6415 if (!dd->dc_shutdown)
6416 goto done;
6417 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6418 /* Take the 8051 out of reset */
6419 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6420 /* Wait until 8051 is ready */
6421 ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6422 if (ret) {
6423 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
17fb4f29 6424 __func__);
77241056
MM
6425 }
6426 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6427 write_csr(dd, DCC_CFG_RESET, 0x10);
6428 /* lcb_shutdown() with abort=1 does not restore these */
6429 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6430 spin_lock_irqsave(&dd->dc8051_lock, flags);
6431 dd->dc_shutdown = 0;
6432done:
6433 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6434}
6435
6436/*
6437 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6438 */
6439static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6440{
6441 u64 rx_radr, tx_radr;
6442 u32 version;
6443
6444 if (dd->icode != ICODE_FPGA_EMULATION)
6445 return;
6446
6447 /*
6448 * These LCB defaults on emulator _s are good, nothing to do here:
6449 * LCB_CFG_TX_FIFOS_RADR
6450 * LCB_CFG_RX_FIFOS_RADR
6451 * LCB_CFG_LN_DCLK
6452 * LCB_CFG_IGNORE_LOST_RCLK
6453 */
6454 if (is_emulator_s(dd))
6455 return;
6456 /* else this is _p */
6457
6458 version = emulator_rev(dd);
995deafa 6459 if (!is_ax(dd))
77241056
MM
6460 version = 0x2d; /* all B0 use 0x2d or higher settings */
6461
6462 if (version <= 0x12) {
6463 /* release 0x12 and below */
6464
6465 /*
6466 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6467 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6468 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6469 */
6470 rx_radr =
6471 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6472 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6473 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6474 /*
6475 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6476 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6477 */
6478 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6479 } else if (version <= 0x18) {
6480 /* release 0x13 up to 0x18 */
6481 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6482 rx_radr =
6483 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6484 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6485 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6486 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6487 } else if (version == 0x19) {
6488 /* release 0x19 */
6489 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6490 rx_radr =
6491 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6492 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6493 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6494 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6495 } else if (version == 0x1a) {
6496 /* release 0x1a */
6497 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6498 rx_radr =
6499 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6500 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6501 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6502 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6503 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6504 } else {
6505 /* release 0x1b and higher */
6506 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6507 rx_radr =
6508 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6509 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6510 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6511 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6512 }
6513
6514 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6515 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6516 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
17fb4f29 6517 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
77241056
MM
6518 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6519}
6520
6521/*
6522 * Handle a SMA idle message
6523 *
6524 * This is a work-queue function outside of the interrupt.
6525 */
6526void handle_sma_message(struct work_struct *work)
6527{
6528 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6529 sma_message_work);
6530 struct hfi1_devdata *dd = ppd->dd;
6531 u64 msg;
6532 int ret;
6533
4d114fdd
JJ
6534 /*
6535 * msg is bytes 1-4 of the 40-bit idle message - the command code
6536 * is stripped off
6537 */
77241056
MM
6538 ret = read_idle_sma(dd, &msg);
6539 if (ret)
6540 return;
6541 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6542 /*
6543 * React to the SMA message. Byte[1] (0 for us) is the command.
6544 */
6545 switch (msg & 0xff) {
6546 case SMA_IDLE_ARM:
6547 /*
6548 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6549 * State Transitions
6550 *
6551 * Only expected in INIT or ARMED, discard otherwise.
6552 */
6553 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6554 ppd->neighbor_normal = 1;
6555 break;
6556 case SMA_IDLE_ACTIVE:
6557 /*
6558 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6559 * State Transitions
6560 *
6561 * Can activate the node. Discard otherwise.
6562 */
d0d236ea
JJ
6563 if (ppd->host_link_state == HLS_UP_ARMED &&
6564 ppd->is_active_optimize_enabled) {
77241056
MM
6565 ppd->neighbor_normal = 1;
6566 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6567 if (ret)
6568 dd_dev_err(
6569 dd,
6570 "%s: received Active SMA idle message, couldn't set link to Active\n",
6571 __func__);
6572 }
6573 break;
6574 default:
6575 dd_dev_err(dd,
17fb4f29
JJ
6576 "%s: received unexpected SMA idle message 0x%llx\n",
6577 __func__, msg);
77241056
MM
6578 break;
6579 }
6580}
6581
6582static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6583{
6584 u64 rcvctrl;
6585 unsigned long flags;
6586
6587 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6588 rcvctrl = read_csr(dd, RCV_CTRL);
6589 rcvctrl |= add;
6590 rcvctrl &= ~clear;
6591 write_csr(dd, RCV_CTRL, rcvctrl);
6592 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6593}
6594
6595static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6596{
6597 adjust_rcvctrl(dd, add, 0);
6598}
6599
6600static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6601{
6602 adjust_rcvctrl(dd, 0, clear);
6603}
6604
6605/*
6606 * Called from all interrupt handlers to start handling an SPC freeze.
6607 */
6608void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6609{
6610 struct hfi1_devdata *dd = ppd->dd;
6611 struct send_context *sc;
6612 int i;
6613
6614 if (flags & FREEZE_SELF)
6615 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6616
6617 /* enter frozen mode */
6618 dd->flags |= HFI1_FROZEN;
6619
6620 /* notify all SDMA engines that they are going into a freeze */
6621 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6622
6623 /* do halt pre-handling on all enabled send contexts */
6624 for (i = 0; i < dd->num_send_contexts; i++) {
6625 sc = dd->send_contexts[i].sc;
6626 if (sc && (sc->flags & SCF_ENABLED))
6627 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6628 }
6629
6630 /* Send context are frozen. Notify user space */
6631 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6632
6633 if (flags & FREEZE_ABORT) {
6634 dd_dev_err(dd,
6635 "Aborted freeze recovery. Please REBOOT system\n");
6636 return;
6637 }
6638 /* queue non-interrupt handler */
6639 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6640}
6641
6642/*
6643 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6644 * depending on the "freeze" parameter.
6645 *
6646 * No need to return an error if it times out, our only option
6647 * is to proceed anyway.
6648 */
6649static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6650{
6651 unsigned long timeout;
6652 u64 reg;
6653
6654 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6655 while (1) {
6656 reg = read_csr(dd, CCE_STATUS);
6657 if (freeze) {
6658 /* waiting until all indicators are set */
6659 if ((reg & ALL_FROZE) == ALL_FROZE)
6660 return; /* all done */
6661 } else {
6662 /* waiting until all indicators are clear */
6663 if ((reg & ALL_FROZE) == 0)
6664 return; /* all done */
6665 }
6666
6667 if (time_after(jiffies, timeout)) {
6668 dd_dev_err(dd,
17fb4f29
JJ
6669 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6670 freeze ? "" : "un", reg & ALL_FROZE,
6671 freeze ? ALL_FROZE : 0ull);
77241056
MM
6672 return;
6673 }
6674 usleep_range(80, 120);
6675 }
6676}
6677
6678/*
6679 * Do all freeze handling for the RXE block.
6680 */
6681static void rxe_freeze(struct hfi1_devdata *dd)
6682{
6683 int i;
6684
6685 /* disable port */
6686 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6687
6688 /* disable all receive contexts */
6689 for (i = 0; i < dd->num_rcv_contexts; i++)
6690 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6691}
6692
6693/*
6694 * Unfreeze handling for the RXE block - kernel contexts only.
6695 * This will also enable the port. User contexts will do unfreeze
6696 * handling on a per-context basis as they call into the driver.
6697 *
6698 */
6699static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6700{
566c157c 6701 u32 rcvmask;
77241056
MM
6702 int i;
6703
6704 /* enable all kernel contexts */
566c157c
MH
6705 for (i = 0; i < dd->n_krcv_queues; i++) {
6706 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6707 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6708 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6709 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6710 hfi1_rcvctrl(dd, rcvmask, i);
6711 }
77241056
MM
6712
6713 /* enable port */
6714 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6715}
6716
6717/*
6718 * Non-interrupt SPC freeze handling.
6719 *
6720 * This is a work-queue function outside of the triggering interrupt.
6721 */
6722void handle_freeze(struct work_struct *work)
6723{
6724 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6725 freeze_work);
6726 struct hfi1_devdata *dd = ppd->dd;
6727
6728 /* wait for freeze indicators on all affected blocks */
77241056
MM
6729 wait_for_freeze_status(dd, 1);
6730
6731 /* SPC is now frozen */
6732
6733 /* do send PIO freeze steps */
6734 pio_freeze(dd);
6735
6736 /* do send DMA freeze steps */
6737 sdma_freeze(dd);
6738
6739 /* do send egress freeze steps - nothing to do */
6740
6741 /* do receive freeze steps */
6742 rxe_freeze(dd);
6743
6744 /*
6745 * Unfreeze the hardware - clear the freeze, wait for each
6746 * block's frozen bit to clear, then clear the frozen flag.
6747 */
6748 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6749 wait_for_freeze_status(dd, 0);
6750
995deafa 6751 if (is_ax(dd)) {
77241056
MM
6752 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6753 wait_for_freeze_status(dd, 1);
6754 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6755 wait_for_freeze_status(dd, 0);
6756 }
6757
6758 /* do send PIO unfreeze steps for kernel contexts */
6759 pio_kernel_unfreeze(dd);
6760
6761 /* do send DMA unfreeze steps */
6762 sdma_unfreeze(dd);
6763
6764 /* do send egress unfreeze steps - nothing to do */
6765
6766 /* do receive unfreeze steps for kernel contexts */
6767 rxe_kernel_unfreeze(dd);
6768
6769 /*
6770 * The unfreeze procedure touches global device registers when
6771 * it disables and re-enables RXE. Mark the device unfrozen
6772 * after all that is done so other parts of the driver waiting
6773 * for the device to unfreeze don't do things out of order.
6774 *
6775 * The above implies that the meaning of HFI1_FROZEN flag is
6776 * "Device has gone into freeze mode and freeze mode handling
6777 * is still in progress."
6778 *
6779 * The flag will be removed when freeze mode processing has
6780 * completed.
6781 */
6782 dd->flags &= ~HFI1_FROZEN;
6783 wake_up(&dd->event_queue);
6784
6785 /* no longer frozen */
77241056
MM
6786}
6787
6788/*
6789 * Handle a link up interrupt from the 8051.
6790 *
6791 * This is a work-queue function outside of the interrupt.
6792 */
6793void handle_link_up(struct work_struct *work)
6794{
6795 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
17fb4f29 6796 link_up_work);
77241056
MM
6797 set_link_state(ppd, HLS_UP_INIT);
6798
6799 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6800 read_ltp_rtt(ppd->dd);
6801 /*
6802 * OPA specifies that certain counters are cleared on a transition
6803 * to link up, so do that.
6804 */
6805 clear_linkup_counters(ppd->dd);
6806 /*
6807 * And (re)set link up default values.
6808 */
6809 set_linkup_defaults(ppd);
6810
6811 /* enforce link speed enabled */
6812 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6813 /* oops - current speed is not enabled, bounce */
6814 dd_dev_err(ppd->dd,
17fb4f29
JJ
6815 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6816 ppd->link_speed_active, ppd->link_speed_enabled);
77241056 6817 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
17fb4f29 6818 OPA_LINKDOWN_REASON_SPEED_POLICY);
77241056
MM
6819 set_link_state(ppd, HLS_DN_OFFLINE);
6820 start_link(ppd);
6821 }
6822}
6823
4d114fdd
JJ
6824/*
6825 * Several pieces of LNI information were cached for SMA in ppd.
6826 * Reset these on link down
6827 */
77241056
MM
6828static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6829{
6830 ppd->neighbor_guid = 0;
6831 ppd->neighbor_port_number = 0;
6832 ppd->neighbor_type = 0;
6833 ppd->neighbor_fm_security = 0;
6834}
6835
feb831dd
DL
6836static const char * const link_down_reason_strs[] = {
6837 [OPA_LINKDOWN_REASON_NONE] = "None",
6838 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Recive error 0",
6839 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
6840 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
6841 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
6842 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
6843 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
6844 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
6845 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
6846 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
6847 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
6848 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
6849 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
6850 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
6851 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
6852 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
6853 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
6854 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
6855 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
6856 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
6857 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
6858 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
6859 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
6860 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
6861 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
6862 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
6863 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
6864 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
6865 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
6866 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
6867 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
6868 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
6869 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
6870 "Excessive buffer overrun",
6871 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
6872 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
6873 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
6874 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
6875 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
6876 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
6877 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
6878 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
6879 "Local media not installed",
6880 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
6881 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
6882 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
6883 "End to end not installed",
6884 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
6885 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
6886 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
6887 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
6888 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
6889 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
6890};
6891
6892/* return the neighbor link down reason string */
6893static const char *link_down_reason_str(u8 reason)
6894{
6895 const char *str = NULL;
6896
6897 if (reason < ARRAY_SIZE(link_down_reason_strs))
6898 str = link_down_reason_strs[reason];
6899 if (!str)
6900 str = "(invalid)";
6901
6902 return str;
6903}
6904
77241056
MM
6905/*
6906 * Handle a link down interrupt from the 8051.
6907 *
6908 * This is a work-queue function outside of the interrupt.
6909 */
6910void handle_link_down(struct work_struct *work)
6911{
6912 u8 lcl_reason, neigh_reason = 0;
feb831dd 6913 u8 link_down_reason;
77241056 6914 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
feb831dd
DL
6915 link_down_work);
6916 int was_up;
6917 static const char ldr_str[] = "Link down reason: ";
77241056 6918
8ebd4cf1
EH
6919 if ((ppd->host_link_state &
6920 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6921 ppd->port_type == PORT_TYPE_FIXED)
6922 ppd->offline_disabled_reason =
6923 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6924
6925 /* Go offline first, then deal with reading/writing through 8051 */
feb831dd 6926 was_up = !!(ppd->host_link_state & HLS_UP);
77241056
MM
6927 set_link_state(ppd, HLS_DN_OFFLINE);
6928
feb831dd
DL
6929 if (was_up) {
6930 lcl_reason = 0;
6931 /* link down reason is only valid if the link was up */
6932 read_link_down_reason(ppd->dd, &link_down_reason);
6933 switch (link_down_reason) {
6934 case LDR_LINK_TRANSFER_ACTIVE_LOW:
6935 /* the link went down, no idle message reason */
6936 dd_dev_info(ppd->dd, "%sUnexpected link down\n",
6937 ldr_str);
6938 break;
6939 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
6940 /*
6941 * The neighbor reason is only valid if an idle message
6942 * was received for it.
6943 */
6944 read_planned_down_reason_code(ppd->dd, &neigh_reason);
6945 dd_dev_info(ppd->dd,
6946 "%sNeighbor link down message %d, %s\n",
6947 ldr_str, neigh_reason,
6948 link_down_reason_str(neigh_reason));
6949 break;
6950 case LDR_RECEIVED_HOST_OFFLINE_REQ:
6951 dd_dev_info(ppd->dd,
6952 "%sHost requested link to go offline\n",
6953 ldr_str);
6954 break;
6955 default:
6956 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
6957 ldr_str, link_down_reason);
6958 break;
6959 }
77241056 6960
feb831dd
DL
6961 /*
6962 * If no reason, assume peer-initiated but missed
6963 * LinkGoingDown idle flits.
6964 */
6965 if (neigh_reason == 0)
6966 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6967 } else {
6968 /* went down while polling or going up */
6969 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
6970 }
77241056
MM
6971
6972 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6973
015e91fb
DL
6974 /* inform the SMA when the link transitions from up to down */
6975 if (was_up && ppd->local_link_down_reason.sma == 0 &&
6976 ppd->neigh_link_down_reason.sma == 0) {
6977 ppd->local_link_down_reason.sma =
6978 ppd->local_link_down_reason.latest;
6979 ppd->neigh_link_down_reason.sma =
6980 ppd->neigh_link_down_reason.latest;
6981 }
6982
77241056
MM
6983 reset_neighbor_info(ppd);
6984
6985 /* disable the port */
6986 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6987
4d114fdd
JJ
6988 /*
6989 * If there is no cable attached, turn the DC off. Otherwise,
6990 * start the link bring up.
6991 */
0db9dec2 6992 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
77241056 6993 dc_shutdown(ppd->dd);
0db9dec2 6994 else
77241056
MM
6995 start_link(ppd);
6996}
6997
6998void handle_link_bounce(struct work_struct *work)
6999{
7000 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7001 link_bounce_work);
7002
7003 /*
7004 * Only do something if the link is currently up.
7005 */
7006 if (ppd->host_link_state & HLS_UP) {
7007 set_link_state(ppd, HLS_DN_OFFLINE);
7008 start_link(ppd);
7009 } else {
7010 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
17fb4f29 7011 __func__, link_state_name(ppd->host_link_state));
77241056
MM
7012 }
7013}
7014
7015/*
7016 * Mask conversion: Capability exchange to Port LTP. The capability
7017 * exchange has an implicit 16b CRC that is mandatory.
7018 */
7019static int cap_to_port_ltp(int cap)
7020{
7021 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7022
7023 if (cap & CAP_CRC_14B)
7024 port_ltp |= PORT_LTP_CRC_MODE_14;
7025 if (cap & CAP_CRC_48B)
7026 port_ltp |= PORT_LTP_CRC_MODE_48;
7027 if (cap & CAP_CRC_12B_16B_PER_LANE)
7028 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7029
7030 return port_ltp;
7031}
7032
7033/*
7034 * Convert an OPA Port LTP mask to capability mask
7035 */
7036int port_ltp_to_cap(int port_ltp)
7037{
7038 int cap_mask = 0;
7039
7040 if (port_ltp & PORT_LTP_CRC_MODE_14)
7041 cap_mask |= CAP_CRC_14B;
7042 if (port_ltp & PORT_LTP_CRC_MODE_48)
7043 cap_mask |= CAP_CRC_48B;
7044 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7045 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7046
7047 return cap_mask;
7048}
7049
7050/*
7051 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7052 */
7053static int lcb_to_port_ltp(int lcb_crc)
7054{
7055 int port_ltp = 0;
7056
7057 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7058 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7059 else if (lcb_crc == LCB_CRC_48B)
7060 port_ltp = PORT_LTP_CRC_MODE_48;
7061 else if (lcb_crc == LCB_CRC_14B)
7062 port_ltp = PORT_LTP_CRC_MODE_14;
7063 else
7064 port_ltp = PORT_LTP_CRC_MODE_16;
7065
7066 return port_ltp;
7067}
7068
7069/*
7070 * Our neighbor has indicated that we are allowed to act as a fabric
7071 * manager, so place the full management partition key in the second
7072 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7073 * that we should already have the limited management partition key in
7074 * array element 1, and also that the port is not yet up when
7075 * add_full_mgmt_pkey() is invoked.
7076 */
7077static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7078{
7079 struct hfi1_devdata *dd = ppd->dd;
7080
8764522e
DL
7081 /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
7082 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
7083 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7084 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
77241056
MM
7085 ppd->pkeys[2] = FULL_MGMT_P_KEY;
7086 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
34d351f8 7087 hfi1_event_pkey_change(ppd->dd, ppd->port);
77241056
MM
7088}
7089
3ec5fa28 7090static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
ce8b2fd0 7091{
3ec5fa28
SS
7092 if (ppd->pkeys[2] != 0) {
7093 ppd->pkeys[2] = 0;
7094 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
34d351f8 7095 hfi1_event_pkey_change(ppd->dd, ppd->port);
3ec5fa28 7096 }
ce8b2fd0
SS
7097}
7098
77241056
MM
7099/*
7100 * Convert the given link width to the OPA link width bitmask.
7101 */
7102static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7103{
7104 switch (width) {
7105 case 0:
7106 /*
7107 * Simulator and quick linkup do not set the width.
7108 * Just set it to 4x without complaint.
7109 */
7110 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7111 return OPA_LINK_WIDTH_4X;
7112 return 0; /* no lanes up */
7113 case 1: return OPA_LINK_WIDTH_1X;
7114 case 2: return OPA_LINK_WIDTH_2X;
7115 case 3: return OPA_LINK_WIDTH_3X;
7116 default:
7117 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
17fb4f29 7118 __func__, width);
77241056
MM
7119 /* fall through */
7120 case 4: return OPA_LINK_WIDTH_4X;
7121 }
7122}
7123
7124/*
7125 * Do a population count on the bottom nibble.
7126 */
7127static const u8 bit_counts[16] = {
7128 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7129};
f4d507cd 7130
77241056
MM
7131static inline u8 nibble_to_count(u8 nibble)
7132{
7133 return bit_counts[nibble & 0xf];
7134}
7135
7136/*
7137 * Read the active lane information from the 8051 registers and return
7138 * their widths.
7139 *
7140 * Active lane information is found in these 8051 registers:
7141 * enable_lane_tx
7142 * enable_lane_rx
7143 */
7144static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7145 u16 *rx_width)
7146{
7147 u16 tx, rx;
7148 u8 enable_lane_rx;
7149 u8 enable_lane_tx;
7150 u8 tx_polarity_inversion;
7151 u8 rx_polarity_inversion;
7152 u8 max_rate;
7153
7154 /* read the active lanes */
7155 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
17fb4f29 7156 &rx_polarity_inversion, &max_rate);
77241056
MM
7157 read_local_lni(dd, &enable_lane_rx);
7158
7159 /* convert to counts */
7160 tx = nibble_to_count(enable_lane_tx);
7161 rx = nibble_to_count(enable_lane_rx);
7162
7163 /*
7164 * Set link_speed_active here, overriding what was set in
7165 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7166 * set the max_rate field in handle_verify_cap until v0.19.
7167 */
d0d236ea
JJ
7168 if ((dd->icode == ICODE_RTL_SILICON) &&
7169 (dd->dc8051_ver < dc8051_ver(0, 19))) {
77241056
MM
7170 /* max_rate: 0 = 12.5G, 1 = 25G */
7171 switch (max_rate) {
7172 case 0:
7173 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7174 break;
7175 default:
7176 dd_dev_err(dd,
17fb4f29
JJ
7177 "%s: unexpected max rate %d, using 25Gb\n",
7178 __func__, (int)max_rate);
77241056
MM
7179 /* fall through */
7180 case 1:
7181 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7182 break;
7183 }
7184 }
7185
7186 dd_dev_info(dd,
17fb4f29
JJ
7187 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7188 enable_lane_tx, tx, enable_lane_rx, rx);
77241056
MM
7189 *tx_width = link_width_to_bits(dd, tx);
7190 *rx_width = link_width_to_bits(dd, rx);
7191}
7192
7193/*
7194 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7195 * Valid after the end of VerifyCap and during LinkUp. Does not change
7196 * after link up. I.e. look elsewhere for downgrade information.
7197 *
7198 * Bits are:
7199 * + bits [7:4] contain the number of active transmitters
7200 * + bits [3:0] contain the number of active receivers
7201 * These are numbers 1 through 4 and can be different values if the
7202 * link is asymmetric.
7203 *
7204 * verify_cap_local_fm_link_width[0] retains its original value.
7205 */
7206static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7207 u16 *rx_width)
7208{
7209 u16 widths, tx, rx;
7210 u8 misc_bits, local_flags;
7211 u16 active_tx, active_rx;
7212
7213 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7214 tx = widths >> 12;
7215 rx = (widths >> 8) & 0xf;
7216
7217 *tx_width = link_width_to_bits(dd, tx);
7218 *rx_width = link_width_to_bits(dd, rx);
7219
7220 /* print the active widths */
7221 get_link_widths(dd, &active_tx, &active_rx);
7222}
7223
7224/*
7225 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7226 * hardware information when the link first comes up.
7227 *
7228 * The link width is not available until after VerifyCap.AllFramesReceived
7229 * (the trigger for handle_verify_cap), so this is outside that routine
7230 * and should be called when the 8051 signals linkup.
7231 */
7232void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7233{
7234 u16 tx_width, rx_width;
7235
7236 /* get end-of-LNI link widths */
7237 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7238
7239 /* use tx_width as the link is supposed to be symmetric on link up */
7240 ppd->link_width_active = tx_width;
7241 /* link width downgrade active (LWD.A) starts out matching LW.A */
7242 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7243 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7244 /* per OPA spec, on link up LWD.E resets to LWD.S */
7245 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7246 /* cache the active egress rate (units {10^6 bits/sec]) */
7247 ppd->current_egress_rate = active_egress_rate(ppd);
7248}
7249
7250/*
7251 * Handle a verify capabilities interrupt from the 8051.
7252 *
7253 * This is a work-queue function outside of the interrupt.
7254 */
7255void handle_verify_cap(struct work_struct *work)
7256{
7257 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7258 link_vc_work);
7259 struct hfi1_devdata *dd = ppd->dd;
7260 u64 reg;
7261 u8 power_management;
7262 u8 continious;
7263 u8 vcu;
7264 u8 vau;
7265 u8 z;
7266 u16 vl15buf;
7267 u16 link_widths;
7268 u16 crc_mask;
7269 u16 crc_val;
7270 u16 device_id;
7271 u16 active_tx, active_rx;
7272 u8 partner_supported_crc;
7273 u8 remote_tx_rate;
7274 u8 device_rev;
7275
7276 set_link_state(ppd, HLS_VERIFY_CAP);
7277
7278 lcb_shutdown(dd, 0);
7279 adjust_lcb_for_fpga_serdes(dd);
7280
7281 /*
7282 * These are now valid:
7283 * remote VerifyCap fields in the general LNI config
7284 * CSR DC8051_STS_REMOTE_GUID
7285 * CSR DC8051_STS_REMOTE_NODE_TYPE
7286 * CSR DC8051_STS_REMOTE_FM_SECURITY
7287 * CSR DC8051_STS_REMOTE_PORT_NO
7288 */
7289
7290 read_vc_remote_phy(dd, &power_management, &continious);
17fb4f29
JJ
7291 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7292 &partner_supported_crc);
77241056
MM
7293 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7294 read_remote_device_id(dd, &device_id, &device_rev);
7295 /*
7296 * And the 'MgmtAllowed' information, which is exchanged during
7297 * LNI, is also be available at this point.
7298 */
7299 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7300 /* print the active widths */
7301 get_link_widths(dd, &active_tx, &active_rx);
7302 dd_dev_info(dd,
17fb4f29
JJ
7303 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7304 (int)power_management, (int)continious);
77241056 7305 dd_dev_info(dd,
17fb4f29
JJ
7306 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7307 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7308 (int)partner_supported_crc);
77241056 7309 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
17fb4f29 7310 (u32)remote_tx_rate, (u32)link_widths);
77241056 7311 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
17fb4f29 7312 (u32)device_id, (u32)device_rev);
77241056
MM
7313 /*
7314 * The peer vAU value just read is the peer receiver value. HFI does
7315 * not support a transmit vAU of 0 (AU == 8). We advertised that
7316 * with Z=1 in the fabric capabilities sent to the peer. The peer
7317 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7318 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7319 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7320 * subject to the Z value exception.
7321 */
7322 if (vau == 0)
7323 vau = 1;
7324 set_up_vl15(dd, vau, vl15buf);
7325
7326 /* set up the LCB CRC mode */
7327 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7328
7329 /* order is important: use the lowest bit in common */
7330 if (crc_mask & CAP_CRC_14B)
7331 crc_val = LCB_CRC_14B;
7332 else if (crc_mask & CAP_CRC_48B)
7333 crc_val = LCB_CRC_48B;
7334 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7335 crc_val = LCB_CRC_12B_16B_PER_LANE;
7336 else
7337 crc_val = LCB_CRC_16B;
7338
7339 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7340 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7341 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7342
7343 /* set (14b only) or clear sideband credit */
7344 reg = read_csr(dd, SEND_CM_CTRL);
7345 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7346 write_csr(dd, SEND_CM_CTRL,
17fb4f29 7347 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
77241056
MM
7348 } else {
7349 write_csr(dd, SEND_CM_CTRL,
17fb4f29 7350 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
77241056
MM
7351 }
7352
7353 ppd->link_speed_active = 0; /* invalid value */
7354 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7355 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7356 switch (remote_tx_rate) {
7357 case 0:
7358 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7359 break;
7360 case 1:
7361 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7362 break;
7363 }
7364 } else {
7365 /* actual rate is highest bit of the ANDed rates */
7366 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7367
7368 if (rate & 2)
7369 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7370 else if (rate & 1)
7371 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7372 }
7373 if (ppd->link_speed_active == 0) {
7374 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
17fb4f29 7375 __func__, (int)remote_tx_rate);
77241056
MM
7376 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7377 }
7378
7379 /*
7380 * Cache the values of the supported, enabled, and active
7381 * LTP CRC modes to return in 'portinfo' queries. But the bit
7382 * flags that are returned in the portinfo query differ from
7383 * what's in the link_crc_mask, crc_sizes, and crc_val
7384 * variables. Convert these here.
7385 */
7386 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7387 /* supported crc modes */
7388 ppd->port_ltp_crc_mode |=
7389 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7390 /* enabled crc modes */
7391 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7392 /* active crc mode */
7393
7394 /* set up the remote credit return table */
7395 assign_remote_cm_au_table(dd, vcu);
7396
7397 /*
7398 * The LCB is reset on entry to handle_verify_cap(), so this must
7399 * be applied on every link up.
7400 *
7401 * Adjust LCB error kill enable to kill the link if
7402 * these RBUF errors are seen:
7403 * REPLAY_BUF_MBE_SMASK
7404 * FLIT_INPUT_BUF_MBE_SMASK
7405 */
995deafa 7406 if (is_ax(dd)) { /* fixed in B0 */
77241056
MM
7407 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7408 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7409 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7410 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7411 }
7412
7413 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7414 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7415
7416 /* give 8051 access to the LCB CSRs */
7417 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7418 set_8051_lcb_access(dd);
7419
7420 ppd->neighbor_guid =
7421 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7422 ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7423 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7424 ppd->neighbor_type =
7425 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7426 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7427 ppd->neighbor_fm_security =
7428 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7429 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7430 dd_dev_info(dd,
17fb4f29
JJ
7431 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7432 ppd->neighbor_guid, ppd->neighbor_type,
7433 ppd->mgmt_allowed, ppd->neighbor_fm_security);
77241056
MM
7434 if (ppd->mgmt_allowed)
7435 add_full_mgmt_pkey(ppd);
7436
7437 /* tell the 8051 to go to LinkUp */
7438 set_link_state(ppd, HLS_GOING_UP);
7439}
7440
7441/*
7442 * Apply the link width downgrade enabled policy against the current active
7443 * link widths.
7444 *
7445 * Called when the enabled policy changes or the active link widths change.
7446 */
7447void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7448{
77241056 7449 int do_bounce = 0;
323fd785
DL
7450 int tries;
7451 u16 lwde;
77241056
MM
7452 u16 tx, rx;
7453
323fd785
DL
7454 /* use the hls lock to avoid a race with actual link up */
7455 tries = 0;
7456retry:
77241056
MM
7457 mutex_lock(&ppd->hls_lock);
7458 /* only apply if the link is up */
0c7f77af 7459 if (ppd->host_link_state & HLS_DOWN) {
323fd785
DL
7460 /* still going up..wait and retry */
7461 if (ppd->host_link_state & HLS_GOING_UP) {
7462 if (++tries < 1000) {
7463 mutex_unlock(&ppd->hls_lock);
7464 usleep_range(100, 120); /* arbitrary */
7465 goto retry;
7466 }
7467 dd_dev_err(ppd->dd,
7468 "%s: giving up waiting for link state change\n",
7469 __func__);
7470 }
7471 goto done;
7472 }
7473
7474 lwde = ppd->link_width_downgrade_enabled;
77241056
MM
7475
7476 if (refresh_widths) {
7477 get_link_widths(ppd->dd, &tx, &rx);
7478 ppd->link_width_downgrade_tx_active = tx;
7479 ppd->link_width_downgrade_rx_active = rx;
7480 }
7481
f9b5635c
DL
7482 if (ppd->link_width_downgrade_tx_active == 0 ||
7483 ppd->link_width_downgrade_rx_active == 0) {
7484 /* the 8051 reported a dead link as a downgrade */
7485 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7486 } else if (lwde == 0) {
77241056
MM
7487 /* downgrade is disabled */
7488
7489 /* bounce if not at starting active width */
7490 if ((ppd->link_width_active !=
17fb4f29
JJ
7491 ppd->link_width_downgrade_tx_active) ||
7492 (ppd->link_width_active !=
7493 ppd->link_width_downgrade_rx_active)) {
77241056 7494 dd_dev_err(ppd->dd,
17fb4f29 7495 "Link downgrade is disabled and link has downgraded, downing link\n");
77241056 7496 dd_dev_err(ppd->dd,
17fb4f29
JJ
7497 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7498 ppd->link_width_active,
7499 ppd->link_width_downgrade_tx_active,
7500 ppd->link_width_downgrade_rx_active);
77241056
MM
7501 do_bounce = 1;
7502 }
d0d236ea
JJ
7503 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7504 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
77241056
MM
7505 /* Tx or Rx is outside the enabled policy */
7506 dd_dev_err(ppd->dd,
17fb4f29 7507 "Link is outside of downgrade allowed, downing link\n");
77241056 7508 dd_dev_err(ppd->dd,
17fb4f29
JJ
7509 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7510 lwde, ppd->link_width_downgrade_tx_active,
7511 ppd->link_width_downgrade_rx_active);
77241056
MM
7512 do_bounce = 1;
7513 }
7514
323fd785
DL
7515done:
7516 mutex_unlock(&ppd->hls_lock);
7517
77241056
MM
7518 if (do_bounce) {
7519 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
17fb4f29 7520 OPA_LINKDOWN_REASON_WIDTH_POLICY);
77241056
MM
7521 set_link_state(ppd, HLS_DN_OFFLINE);
7522 start_link(ppd);
7523 }
7524}
7525
7526/*
7527 * Handle a link downgrade interrupt from the 8051.
7528 *
7529 * This is a work-queue function outside of the interrupt.
7530 */
7531void handle_link_downgrade(struct work_struct *work)
7532{
7533 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7534 link_downgrade_work);
7535
7536 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7537 apply_link_downgrade_policy(ppd, 1);
7538}
7539
7540static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7541{
7542 return flag_string(buf, buf_len, flags, dcc_err_flags,
7543 ARRAY_SIZE(dcc_err_flags));
7544}
7545
7546static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7547{
7548 return flag_string(buf, buf_len, flags, lcb_err_flags,
7549 ARRAY_SIZE(lcb_err_flags));
7550}
7551
7552static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7553{
7554 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7555 ARRAY_SIZE(dc8051_err_flags));
7556}
7557
7558static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7559{
7560 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7561 ARRAY_SIZE(dc8051_info_err_flags));
7562}
7563
7564static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7565{
7566 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7567 ARRAY_SIZE(dc8051_info_host_msg_flags));
7568}
7569
7570static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7571{
7572 struct hfi1_pportdata *ppd = dd->pport;
7573 u64 info, err, host_msg;
7574 int queue_link_down = 0;
7575 char buf[96];
7576
7577 /* look at the flags */
7578 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7579 /* 8051 information set by firmware */
7580 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7581 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7582 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7583 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7584 host_msg = (info >>
7585 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7586 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7587
7588 /*
7589 * Handle error flags.
7590 */
7591 if (err & FAILED_LNI) {
7592 /*
7593 * LNI error indications are cleared by the 8051
7594 * only when starting polling. Only pay attention
7595 * to them when in the states that occur during
7596 * LNI.
7597 */
7598 if (ppd->host_link_state
7599 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7600 queue_link_down = 1;
7601 dd_dev_info(dd, "Link error: %s\n",
17fb4f29
JJ
7602 dc8051_info_err_string(buf,
7603 sizeof(buf),
7604 err &
7605 FAILED_LNI));
77241056
MM
7606 }
7607 err &= ~(u64)FAILED_LNI;
7608 }
6d014530
DL
7609 /* unknown frames can happen durning LNI, just count */
7610 if (err & UNKNOWN_FRAME) {
7611 ppd->unknown_frame_count++;
7612 err &= ~(u64)UNKNOWN_FRAME;
7613 }
77241056
MM
7614 if (err) {
7615 /* report remaining errors, but do not do anything */
7616 dd_dev_err(dd, "8051 info error: %s\n",
17fb4f29
JJ
7617 dc8051_info_err_string(buf, sizeof(buf),
7618 err));
77241056
MM
7619 }
7620
7621 /*
7622 * Handle host message flags.
7623 */
7624 if (host_msg & HOST_REQ_DONE) {
7625 /*
7626 * Presently, the driver does a busy wait for
7627 * host requests to complete. This is only an
7628 * informational message.
7629 * NOTE: The 8051 clears the host message
7630 * information *on the next 8051 command*.
7631 * Therefore, when linkup is achieved,
7632 * this flag will still be set.
7633 */
7634 host_msg &= ~(u64)HOST_REQ_DONE;
7635 }
7636 if (host_msg & BC_SMA_MSG) {
7637 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7638 host_msg &= ~(u64)BC_SMA_MSG;
7639 }
7640 if (host_msg & LINKUP_ACHIEVED) {
7641 dd_dev_info(dd, "8051: Link up\n");
7642 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7643 host_msg &= ~(u64)LINKUP_ACHIEVED;
7644 }
7645 if (host_msg & EXT_DEVICE_CFG_REQ) {
145dd2b3 7646 handle_8051_request(ppd);
77241056
MM
7647 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7648 }
7649 if (host_msg & VERIFY_CAP_FRAME) {
7650 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7651 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7652 }
7653 if (host_msg & LINK_GOING_DOWN) {
7654 const char *extra = "";
7655 /* no downgrade action needed if going down */
7656 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7657 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7658 extra = " (ignoring downgrade)";
7659 }
7660 dd_dev_info(dd, "8051: Link down%s\n", extra);
7661 queue_link_down = 1;
7662 host_msg &= ~(u64)LINK_GOING_DOWN;
7663 }
7664 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7665 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7666 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7667 }
7668 if (host_msg) {
7669 /* report remaining messages, but do not do anything */
7670 dd_dev_info(dd, "8051 info host message: %s\n",
17fb4f29
JJ
7671 dc8051_info_host_msg_string(buf,
7672 sizeof(buf),
7673 host_msg));
77241056
MM
7674 }
7675
7676 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7677 }
7678 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7679 /*
7680 * Lost the 8051 heartbeat. If this happens, we
7681 * receive constant interrupts about it. Disable
7682 * the interrupt after the first.
7683 */
7684 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7685 write_csr(dd, DC_DC8051_ERR_EN,
17fb4f29
JJ
7686 read_csr(dd, DC_DC8051_ERR_EN) &
7687 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
77241056
MM
7688
7689 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7690 }
7691 if (reg) {
7692 /* report the error, but do not do anything */
7693 dd_dev_err(dd, "8051 error: %s\n",
17fb4f29 7694 dc8051_err_string(buf, sizeof(buf), reg));
77241056
MM
7695 }
7696
7697 if (queue_link_down) {
4d114fdd
JJ
7698 /*
7699 * if the link is already going down or disabled, do not
7700 * queue another
7701 */
d0d236ea
JJ
7702 if ((ppd->host_link_state &
7703 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7704 ppd->link_enabled == 0) {
77241056 7705 dd_dev_info(dd, "%s: not queuing link down\n",
17fb4f29 7706 __func__);
77241056
MM
7707 } else {
7708 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7709 }
7710 }
7711}
7712
7713static const char * const fm_config_txt[] = {
7714[0] =
7715 "BadHeadDist: Distance violation between two head flits",
7716[1] =
7717 "BadTailDist: Distance violation between two tail flits",
7718[2] =
7719 "BadCtrlDist: Distance violation between two credit control flits",
7720[3] =
7721 "BadCrdAck: Credits return for unsupported VL",
7722[4] =
7723 "UnsupportedVLMarker: Received VL Marker",
7724[5] =
7725 "BadPreempt: Exceeded the preemption nesting level",
7726[6] =
7727 "BadControlFlit: Received unsupported control flit",
7728/* no 7 */
7729[8] =
7730 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7731};
7732
7733static const char * const port_rcv_txt[] = {
7734[1] =
7735 "BadPktLen: Illegal PktLen",
7736[2] =
7737 "PktLenTooLong: Packet longer than PktLen",
7738[3] =
7739 "PktLenTooShort: Packet shorter than PktLen",
7740[4] =
7741 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7742[5] =
7743 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7744[6] =
7745 "BadL2: Illegal L2 opcode",
7746[7] =
7747 "BadSC: Unsupported SC",
7748[9] =
7749 "BadRC: Illegal RC",
7750[11] =
7751 "PreemptError: Preempting with same VL",
7752[12] =
7753 "PreemptVL15: Preempting a VL15 packet",
7754};
7755
7756#define OPA_LDR_FMCONFIG_OFFSET 16
7757#define OPA_LDR_PORTRCV_OFFSET 0
7758static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7759{
7760 u64 info, hdr0, hdr1;
7761 const char *extra;
7762 char buf[96];
7763 struct hfi1_pportdata *ppd = dd->pport;
7764 u8 lcl_reason = 0;
7765 int do_bounce = 0;
7766
7767 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7768 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7769 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7770 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7771 /* set status bit */
7772 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7773 }
7774 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7775 }
7776
7777 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7778 struct hfi1_pportdata *ppd = dd->pport;
7779 /* this counter saturates at (2^32) - 1 */
7780 if (ppd->link_downed < (u32)UINT_MAX)
7781 ppd->link_downed++;
7782 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7783 }
7784
7785 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7786 u8 reason_valid = 1;
7787
7788 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7789 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7790 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7791 /* set status bit */
7792 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7793 }
7794 switch (info) {
7795 case 0:
7796 case 1:
7797 case 2:
7798 case 3:
7799 case 4:
7800 case 5:
7801 case 6:
7802 extra = fm_config_txt[info];
7803 break;
7804 case 8:
7805 extra = fm_config_txt[info];
7806 if (ppd->port_error_action &
7807 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7808 do_bounce = 1;
7809 /*
7810 * lcl_reason cannot be derived from info
7811 * for this error
7812 */
7813 lcl_reason =
7814 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7815 }
7816 break;
7817 default:
7818 reason_valid = 0;
7819 snprintf(buf, sizeof(buf), "reserved%lld", info);
7820 extra = buf;
7821 break;
7822 }
7823
7824 if (reason_valid && !do_bounce) {
7825 do_bounce = ppd->port_error_action &
7826 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7827 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7828 }
7829
7830 /* just report this */
c27aad00
JB
7831 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
7832 extra);
77241056
MM
7833 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7834 }
7835
7836 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7837 u8 reason_valid = 1;
7838
7839 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7840 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7841 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7842 if (!(dd->err_info_rcvport.status_and_code &
7843 OPA_EI_STATUS_SMASK)) {
7844 dd->err_info_rcvport.status_and_code =
7845 info & OPA_EI_CODE_SMASK;
7846 /* set status bit */
7847 dd->err_info_rcvport.status_and_code |=
7848 OPA_EI_STATUS_SMASK;
4d114fdd
JJ
7849 /*
7850 * save first 2 flits in the packet that caused
7851 * the error
7852 */
48a0cc13
BVA
7853 dd->err_info_rcvport.packet_flit1 = hdr0;
7854 dd->err_info_rcvport.packet_flit2 = hdr1;
77241056
MM
7855 }
7856 switch (info) {
7857 case 1:
7858 case 2:
7859 case 3:
7860 case 4:
7861 case 5:
7862 case 6:
7863 case 7:
7864 case 9:
7865 case 11:
7866 case 12:
7867 extra = port_rcv_txt[info];
7868 break;
7869 default:
7870 reason_valid = 0;
7871 snprintf(buf, sizeof(buf), "reserved%lld", info);
7872 extra = buf;
7873 break;
7874 }
7875
7876 if (reason_valid && !do_bounce) {
7877 do_bounce = ppd->port_error_action &
7878 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7879 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7880 }
7881
7882 /* just report this */
c27aad00
JB
7883 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
7884 " hdr0 0x%llx, hdr1 0x%llx\n",
7885 extra, hdr0, hdr1);
77241056
MM
7886
7887 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7888 }
7889
7890 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7891 /* informative only */
c27aad00 7892 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
77241056
MM
7893 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7894 }
7895 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7896 /* informative only */
c27aad00 7897 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
77241056
MM
7898 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7899 }
7900
7901 /* report any remaining errors */
7902 if (reg)
c27aad00
JB
7903 dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
7904 dcc_err_string(buf, sizeof(buf), reg));
77241056
MM
7905
7906 if (lcl_reason == 0)
7907 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7908
7909 if (do_bounce) {
c27aad00
JB
7910 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
7911 __func__);
77241056
MM
7912 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7913 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7914 }
7915}
7916
7917static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7918{
7919 char buf[96];
7920
7921 dd_dev_info(dd, "LCB Error: %s\n",
17fb4f29 7922 lcb_err_string(buf, sizeof(buf), reg));
77241056
MM
7923}
7924
7925/*
7926 * CCE block DC interrupt. Source is < 8.
7927 */
7928static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7929{
7930 const struct err_reg_info *eri = &dc_errs[source];
7931
7932 if (eri->handler) {
7933 interrupt_clear_down(dd, 0, eri);
7934 } else if (source == 3 /* dc_lbm_int */) {
7935 /*
7936 * This indicates that a parity error has occurred on the
7937 * address/control lines presented to the LBM. The error
7938 * is a single pulse, there is no associated error flag,
7939 * and it is non-maskable. This is because if a parity
7940 * error occurs on the request the request is dropped.
7941 * This should never occur, but it is nice to know if it
7942 * ever does.
7943 */
7944 dd_dev_err(dd, "Parity error in DC LBM block\n");
7945 } else {
7946 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7947 }
7948}
7949
7950/*
7951 * TX block send credit interrupt. Source is < 160.
7952 */
7953static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7954{
7955 sc_group_release_update(dd, source);
7956}
7957
7958/*
7959 * TX block SDMA interrupt. Source is < 48.
7960 *
7961 * SDMA interrupts are grouped by type:
7962 *
7963 * 0 - N-1 = SDma
7964 * N - 2N-1 = SDmaProgress
7965 * 2N - 3N-1 = SDmaIdle
7966 */
7967static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7968{
7969 /* what interrupt */
7970 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
7971 /* which engine */
7972 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7973
7974#ifdef CONFIG_SDMA_VERBOSITY
7975 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7976 slashstrip(__FILE__), __LINE__, __func__);
7977 sdma_dumpstate(&dd->per_sdma[which]);
7978#endif
7979
7980 if (likely(what < 3 && which < dd->num_sdma)) {
7981 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7982 } else {
7983 /* should not happen */
7984 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7985 }
7986}
7987
7988/*
7989 * RX block receive available interrupt. Source is < 160.
7990 */
7991static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7992{
7993 struct hfi1_ctxtdata *rcd;
7994 char *err_detail;
7995
7996 if (likely(source < dd->num_rcv_contexts)) {
7997 rcd = dd->rcd[source];
7998 if (rcd) {
7999 if (source < dd->first_user_ctxt)
f4f30031 8000 rcd->do_interrupt(rcd, 0);
77241056
MM
8001 else
8002 handle_user_interrupt(rcd);
8003 return; /* OK */
8004 }
8005 /* received an interrupt, but no rcd */
8006 err_detail = "dataless";
8007 } else {
8008 /* received an interrupt, but are not using that context */
8009 err_detail = "out of range";
8010 }
8011 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
17fb4f29 8012 err_detail, source);
77241056
MM
8013}
8014
8015/*
8016 * RX block receive urgent interrupt. Source is < 160.
8017 */
8018static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8019{
8020 struct hfi1_ctxtdata *rcd;
8021 char *err_detail;
8022
8023 if (likely(source < dd->num_rcv_contexts)) {
8024 rcd = dd->rcd[source];
8025 if (rcd) {
8026 /* only pay attention to user urgent interrupts */
8027 if (source >= dd->first_user_ctxt)
8028 handle_user_interrupt(rcd);
8029 return; /* OK */
8030 }
8031 /* received an interrupt, but no rcd */
8032 err_detail = "dataless";
8033 } else {
8034 /* received an interrupt, but are not using that context */
8035 err_detail = "out of range";
8036 }
8037 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
17fb4f29 8038 err_detail, source);
77241056
MM
8039}
8040
8041/*
8042 * Reserved range interrupt. Should not be called in normal operation.
8043 */
8044static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8045{
8046 char name[64];
8047
8048 dd_dev_err(dd, "unexpected %s interrupt\n",
17fb4f29 8049 is_reserved_name(name, sizeof(name), source));
77241056
MM
8050}
8051
8052static const struct is_table is_table[] = {
4d114fdd
JJ
8053/*
8054 * start end
8055 * name func interrupt func
8056 */
77241056
MM
8057{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
8058 is_misc_err_name, is_misc_err_int },
8059{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
8060 is_sdma_eng_err_name, is_sdma_eng_err_int },
8061{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8062 is_sendctxt_err_name, is_sendctxt_err_int },
8063{ IS_SDMA_START, IS_SDMA_END,
8064 is_sdma_eng_name, is_sdma_eng_int },
8065{ IS_VARIOUS_START, IS_VARIOUS_END,
8066 is_various_name, is_various_int },
8067{ IS_DC_START, IS_DC_END,
8068 is_dc_name, is_dc_int },
8069{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
8070 is_rcv_avail_name, is_rcv_avail_int },
8071{ IS_RCVURGENT_START, IS_RCVURGENT_END,
8072 is_rcv_urgent_name, is_rcv_urgent_int },
8073{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
8074 is_send_credit_name, is_send_credit_int},
8075{ IS_RESERVED_START, IS_RESERVED_END,
8076 is_reserved_name, is_reserved_int},
8077};
8078
8079/*
8080 * Interrupt source interrupt - called when the given source has an interrupt.
8081 * Source is a bit index into an array of 64-bit integers.
8082 */
8083static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8084{
8085 const struct is_table *entry;
8086
8087 /* avoids a double compare by walking the table in-order */
8088 for (entry = &is_table[0]; entry->is_name; entry++) {
8089 if (source < entry->end) {
8090 trace_hfi1_interrupt(dd, entry, source);
8091 entry->is_int(dd, source - entry->start);
8092 return;
8093 }
8094 }
8095 /* fell off the end */
8096 dd_dev_err(dd, "invalid interrupt source %u\n", source);
8097}
8098
8099/*
8100 * General interrupt handler. This is able to correctly handle
8101 * all interrupts in case INTx is used.
8102 */
8103static irqreturn_t general_interrupt(int irq, void *data)
8104{
8105 struct hfi1_devdata *dd = data;
8106 u64 regs[CCE_NUM_INT_CSRS];
8107 u32 bit;
8108 int i;
8109
8110 this_cpu_inc(*dd->int_counter);
8111
8112 /* phase 1: scan and clear all handled interrupts */
8113 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8114 if (dd->gi_mask[i] == 0) {
8115 regs[i] = 0; /* used later */
8116 continue;
8117 }
8118 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8119 dd->gi_mask[i];
8120 /* only clear if anything is set */
8121 if (regs[i])
8122 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8123 }
8124
8125 /* phase 2: call the appropriate handler */
8126 for_each_set_bit(bit, (unsigned long *)&regs[0],
17fb4f29 8127 CCE_NUM_INT_CSRS * 64) {
77241056
MM
8128 is_interrupt(dd, bit);
8129 }
8130
8131 return IRQ_HANDLED;
8132}
8133
8134static irqreturn_t sdma_interrupt(int irq, void *data)
8135{
8136 struct sdma_engine *sde = data;
8137 struct hfi1_devdata *dd = sde->dd;
8138 u64 status;
8139
8140#ifdef CONFIG_SDMA_VERBOSITY
8141 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8142 slashstrip(__FILE__), __LINE__, __func__);
8143 sdma_dumpstate(sde);
8144#endif
8145
8146 this_cpu_inc(*dd->int_counter);
8147
8148 /* This read_csr is really bad in the hot path */
8149 status = read_csr(dd,
17fb4f29
JJ
8150 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8151 & sde->imask;
77241056
MM
8152 if (likely(status)) {
8153 /* clear the interrupt(s) */
8154 write_csr(dd,
17fb4f29
JJ
8155 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8156 status);
77241056
MM
8157
8158 /* handle the interrupt(s) */
8159 sdma_engine_interrupt(sde, status);
8160 } else
8161 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
17fb4f29 8162 sde->this_idx);
77241056
MM
8163
8164 return IRQ_HANDLED;
8165}
8166
8167/*
ecd42f8d
DL
8168 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8169 * to insure that the write completed. This does NOT guarantee that
8170 * queued DMA writes to memory from the chip are pushed.
f4f30031
DL
8171 */
8172static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8173{
8174 struct hfi1_devdata *dd = rcd->dd;
8175 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8176
8177 mmiowb(); /* make sure everything before is written */
8178 write_csr(dd, addr, rcd->imask);
8179 /* force the above write on the chip and get a value back */
8180 (void)read_csr(dd, addr);
8181}
8182
8183/* force the receive interrupt */
fb9036dd 8184void force_recv_intr(struct hfi1_ctxtdata *rcd)
f4f30031
DL
8185{
8186 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8187}
8188
ecd42f8d
DL
8189/*
8190 * Return non-zero if a packet is present.
8191 *
8192 * This routine is called when rechecking for packets after the RcvAvail
8193 * interrupt has been cleared down. First, do a quick check of memory for
8194 * a packet present. If not found, use an expensive CSR read of the context
8195 * tail to determine the actual tail. The CSR read is necessary because there
8196 * is no method to push pending DMAs to memory other than an interrupt and we
8197 * are trying to determine if we need to force an interrupt.
8198 */
f4f30031
DL
8199static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8200{
ecd42f8d
DL
8201 u32 tail;
8202 int present;
8203
f4f30031 8204 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
ecd42f8d 8205 present = (rcd->seq_cnt ==
f4f30031 8206 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
ecd42f8d
DL
8207 else /* is RDMA rtail */
8208 present = (rcd->head != get_rcvhdrtail(rcd));
8209
8210 if (present)
8211 return 1;
f4f30031 8212
ecd42f8d
DL
8213 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8214 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8215 return rcd->head != tail;
f4f30031
DL
8216}
8217
8218/*
8219 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8220 * This routine will try to handle packets immediately (latency), but if
8221 * it finds too many, it will invoke the thread handler (bandwitdh). The
16733b88 8222 * chip receive interrupt is *not* cleared down until this or the thread (if
f4f30031
DL
8223 * invoked) is finished. The intent is to avoid extra interrupts while we
8224 * are processing packets anyway.
77241056
MM
8225 */
8226static irqreturn_t receive_context_interrupt(int irq, void *data)
8227{
8228 struct hfi1_ctxtdata *rcd = data;
8229 struct hfi1_devdata *dd = rcd->dd;
f4f30031
DL
8230 int disposition;
8231 int present;
77241056
MM
8232
8233 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8234 this_cpu_inc(*dd->int_counter);
affa48de 8235 aspm_ctx_disable(rcd);
77241056 8236
f4f30031
DL
8237 /* receive interrupt remains blocked while processing packets */
8238 disposition = rcd->do_interrupt(rcd, 0);
77241056 8239
f4f30031
DL
8240 /*
8241 * Too many packets were seen while processing packets in this
8242 * IRQ handler. Invoke the handler thread. The receive interrupt
8243 * remains blocked.
8244 */
8245 if (disposition == RCV_PKT_LIMIT)
8246 return IRQ_WAKE_THREAD;
8247
8248 /*
8249 * The packet processor detected no more packets. Clear the receive
8250 * interrupt and recheck for a packet packet that may have arrived
8251 * after the previous check and interrupt clear. If a packet arrived,
8252 * force another interrupt.
8253 */
8254 clear_recv_intr(rcd);
8255 present = check_packet_present(rcd);
8256 if (present)
8257 force_recv_intr(rcd);
8258
8259 return IRQ_HANDLED;
8260}
8261
8262/*
8263 * Receive packet thread handler. This expects to be invoked with the
8264 * receive interrupt still blocked.
8265 */
8266static irqreturn_t receive_context_thread(int irq, void *data)
8267{
8268 struct hfi1_ctxtdata *rcd = data;
8269 int present;
8270
8271 /* receive interrupt is still blocked from the IRQ handler */
8272 (void)rcd->do_interrupt(rcd, 1);
8273
8274 /*
8275 * The packet processor will only return if it detected no more
8276 * packets. Hold IRQs here so we can safely clear the interrupt and
8277 * recheck for a packet that may have arrived after the previous
8278 * check and the interrupt clear. If a packet arrived, force another
8279 * interrupt.
8280 */
8281 local_irq_disable();
8282 clear_recv_intr(rcd);
8283 present = check_packet_present(rcd);
8284 if (present)
8285 force_recv_intr(rcd);
8286 local_irq_enable();
77241056
MM
8287
8288 return IRQ_HANDLED;
8289}
8290
8291/* ========================================================================= */
8292
8293u32 read_physical_state(struct hfi1_devdata *dd)
8294{
8295 u64 reg;
8296
8297 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8298 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8299 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8300}
8301
fb9036dd 8302u32 read_logical_state(struct hfi1_devdata *dd)
77241056
MM
8303{
8304 u64 reg;
8305
8306 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8307 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8308 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8309}
8310
8311static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8312{
8313 u64 reg;
8314
8315 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8316 /* clear current state, set new state */
8317 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8318 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8319 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8320}
8321
8322/*
8323 * Use the 8051 to read a LCB CSR.
8324 */
8325static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8326{
8327 u32 regno;
8328 int ret;
8329
8330 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8331 if (acquire_lcb_access(dd, 0) == 0) {
8332 *data = read_csr(dd, addr);
8333 release_lcb_access(dd, 0);
8334 return 0;
8335 }
8336 return -EBUSY;
8337 }
8338
8339 /* register is an index of LCB registers: (offset - base) / 8 */
8340 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8341 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8342 if (ret != HCMD_SUCCESS)
8343 return -EBUSY;
8344 return 0;
8345}
8346
8347/*
8348 * Read an LCB CSR. Access may not be in host control, so check.
8349 * Return 0 on success, -EBUSY on failure.
8350 */
8351int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8352{
8353 struct hfi1_pportdata *ppd = dd->pport;
8354
8355 /* if up, go through the 8051 for the value */
8356 if (ppd->host_link_state & HLS_UP)
8357 return read_lcb_via_8051(dd, addr, data);
8358 /* if going up or down, no access */
8359 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8360 return -EBUSY;
8361 /* otherwise, host has access */
8362 *data = read_csr(dd, addr);
8363 return 0;
8364}
8365
8366/*
8367 * Use the 8051 to write a LCB CSR.
8368 */
8369static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8370{
3bf40d65
DL
8371 u32 regno;
8372 int ret;
77241056 8373
3bf40d65
DL
8374 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8375 (dd->dc8051_ver < dc8051_ver(0, 20))) {
8376 if (acquire_lcb_access(dd, 0) == 0) {
8377 write_csr(dd, addr, data);
8378 release_lcb_access(dd, 0);
8379 return 0;
8380 }
8381 return -EBUSY;
77241056 8382 }
3bf40d65
DL
8383
8384 /* register is an index of LCB registers: (offset - base) / 8 */
8385 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8386 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8387 if (ret != HCMD_SUCCESS)
8388 return -EBUSY;
8389 return 0;
77241056
MM
8390}
8391
8392/*
8393 * Write an LCB CSR. Access may not be in host control, so check.
8394 * Return 0 on success, -EBUSY on failure.
8395 */
8396int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8397{
8398 struct hfi1_pportdata *ppd = dd->pport;
8399
8400 /* if up, go through the 8051 for the value */
8401 if (ppd->host_link_state & HLS_UP)
8402 return write_lcb_via_8051(dd, addr, data);
8403 /* if going up or down, no access */
8404 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8405 return -EBUSY;
8406 /* otherwise, host has access */
8407 write_csr(dd, addr, data);
8408 return 0;
8409}
8410
8411/*
8412 * Returns:
8413 * < 0 = Linux error, not able to get access
8414 * > 0 = 8051 command RETURN_CODE
8415 */
8416static int do_8051_command(
8417 struct hfi1_devdata *dd,
8418 u32 type,
8419 u64 in_data,
8420 u64 *out_data)
8421{
8422 u64 reg, completed;
8423 int return_code;
8424 unsigned long flags;
8425 unsigned long timeout;
8426
8427 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8428
8429 /*
8430 * Alternative to holding the lock for a long time:
8431 * - keep busy wait - have other users bounce off
8432 */
8433 spin_lock_irqsave(&dd->dc8051_lock, flags);
8434
8435 /* We can't send any commands to the 8051 if it's in reset */
8436 if (dd->dc_shutdown) {
8437 return_code = -ENODEV;
8438 goto fail;
8439 }
8440
8441 /*
8442 * If an 8051 host command timed out previously, then the 8051 is
8443 * stuck.
8444 *
8445 * On first timeout, attempt to reset and restart the entire DC
8446 * block (including 8051). (Is this too big of a hammer?)
8447 *
8448 * If the 8051 times out a second time, the reset did not bring it
8449 * back to healthy life. In that case, fail any subsequent commands.
8450 */
8451 if (dd->dc8051_timed_out) {
8452 if (dd->dc8051_timed_out > 1) {
8453 dd_dev_err(dd,
8454 "Previous 8051 host command timed out, skipping command %u\n",
8455 type);
8456 return_code = -ENXIO;
8457 goto fail;
8458 }
8459 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8460 dc_shutdown(dd);
8461 dc_start(dd);
8462 spin_lock_irqsave(&dd->dc8051_lock, flags);
8463 }
8464
8465 /*
8466 * If there is no timeout, then the 8051 command interface is
8467 * waiting for a command.
8468 */
8469
3bf40d65
DL
8470 /*
8471 * When writing a LCB CSR, out_data contains the full value to
8472 * to be written, while in_data contains the relative LCB
8473 * address in 7:0. Do the work here, rather than the caller,
8474 * of distrubting the write data to where it needs to go:
8475 *
8476 * Write data
8477 * 39:00 -> in_data[47:8]
8478 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8479 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8480 */
8481 if (type == HCMD_WRITE_LCB_CSR) {
8482 in_data |= ((*out_data) & 0xffffffffffull) << 8;
00801674
DL
8483 /* must preserve COMPLETED - it is tied to hardware */
8484 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8485 reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8486 reg |= ((((*out_data) >> 40) & 0xff) <<
3bf40d65
DL
8487 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8488 | ((((*out_data) >> 48) & 0xffff) <<
8489 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8490 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8491 }
8492
77241056
MM
8493 /*
8494 * Do two writes: the first to stabilize the type and req_data, the
8495 * second to activate.
8496 */
8497 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8498 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8499 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8500 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8501 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8502 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8503 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8504
8505 /* wait for completion, alternate: interrupt */
8506 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8507 while (1) {
8508 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8509 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8510 if (completed)
8511 break;
8512 if (time_after(jiffies, timeout)) {
8513 dd->dc8051_timed_out++;
8514 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8515 if (out_data)
8516 *out_data = 0;
8517 return_code = -ETIMEDOUT;
8518 goto fail;
8519 }
8520 udelay(2);
8521 }
8522
8523 if (out_data) {
8524 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8525 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8526 if (type == HCMD_READ_LCB_CSR) {
8527 /* top 16 bits are in a different register */
8528 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8529 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8530 << (48
8531 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8532 }
8533 }
8534 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8535 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8536 dd->dc8051_timed_out = 0;
8537 /*
8538 * Clear command for next user.
8539 */
8540 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8541
8542fail:
8543 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8544
8545 return return_code;
8546}
8547
8548static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8549{
8550 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8551}
8552
8ebd4cf1
EH
8553int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8554 u8 lane_id, u32 config_data)
77241056
MM
8555{
8556 u64 data;
8557 int ret;
8558
8559 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8560 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8561 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8562 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8563 if (ret != HCMD_SUCCESS) {
8564 dd_dev_err(dd,
17fb4f29
JJ
8565 "load 8051 config: field id %d, lane %d, err %d\n",
8566 (int)field_id, (int)lane_id, ret);
77241056
MM
8567 }
8568 return ret;
8569}
8570
8571/*
8572 * Read the 8051 firmware "registers". Use the RAM directly. Always
8573 * set the result, even on error.
8574 * Return 0 on success, -errno on failure
8575 */
8ebd4cf1
EH
8576int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8577 u32 *result)
77241056
MM
8578{
8579 u64 big_data;
8580 u32 addr;
8581 int ret;
8582
8583 /* address start depends on the lane_id */
8584 if (lane_id < 4)
8585 addr = (4 * NUM_GENERAL_FIELDS)
8586 + (lane_id * 4 * NUM_LANE_FIELDS);
8587 else
8588 addr = 0;
8589 addr += field_id * 4;
8590
8591 /* read is in 8-byte chunks, hardware will truncate the address down */
8592 ret = read_8051_data(dd, addr, 8, &big_data);
8593
8594 if (ret == 0) {
8595 /* extract the 4 bytes we want */
8596 if (addr & 0x4)
8597 *result = (u32)(big_data >> 32);
8598 else
8599 *result = (u32)big_data;
8600 } else {
8601 *result = 0;
8602 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
17fb4f29 8603 __func__, lane_id, field_id);
77241056
MM
8604 }
8605
8606 return ret;
8607}
8608
8609static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8610 u8 continuous)
8611{
8612 u32 frame;
8613
8614 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8615 | power_management << POWER_MANAGEMENT_SHIFT;
8616 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8617 GENERAL_CONFIG, frame);
8618}
8619
8620static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8621 u16 vl15buf, u8 crc_sizes)
8622{
8623 u32 frame;
8624
8625 frame = (u32)vau << VAU_SHIFT
8626 | (u32)z << Z_SHIFT
8627 | (u32)vcu << VCU_SHIFT
8628 | (u32)vl15buf << VL15BUF_SHIFT
8629 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8630 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8631 GENERAL_CONFIG, frame);
8632}
8633
8634static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8635 u8 *flag_bits, u16 *link_widths)
8636{
8637 u32 frame;
8638
8639 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
17fb4f29 8640 &frame);
77241056
MM
8641 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8642 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8643 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8644}
8645
8646static int write_vc_local_link_width(struct hfi1_devdata *dd,
8647 u8 misc_bits,
8648 u8 flag_bits,
8649 u16 link_widths)
8650{
8651 u32 frame;
8652
8653 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8654 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8655 | (u32)link_widths << LINK_WIDTH_SHIFT;
8656 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8657 frame);
8658}
8659
8660static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8661 u8 device_rev)
8662{
8663 u32 frame;
8664
8665 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8666 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8667 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8668}
8669
8670static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8671 u8 *device_rev)
8672{
8673 u32 frame;
8674
8675 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8676 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8677 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8678 & REMOTE_DEVICE_REV_MASK;
8679}
8680
8681void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8682{
8683 u32 frame;
8684
8685 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8686 *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8687 *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8688}
8689
8690static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8691 u8 *continuous)
8692{
8693 u32 frame;
8694
8695 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8696 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8697 & POWER_MANAGEMENT_MASK;
8698 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8699 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8700}
8701
8702static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8703 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8704{
8705 u32 frame;
8706
8707 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8708 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8709 *z = (frame >> Z_SHIFT) & Z_MASK;
8710 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8711 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8712 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8713}
8714
8715static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8716 u8 *remote_tx_rate,
8717 u16 *link_widths)
8718{
8719 u32 frame;
8720
8721 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
17fb4f29 8722 &frame);
77241056
MM
8723 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8724 & REMOTE_TX_RATE_MASK;
8725 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8726}
8727
8728static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8729{
8730 u32 frame;
8731
8732 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8733 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8734}
8735
8736static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8737{
8738 u32 frame;
8739
8740 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8741 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8742}
8743
8744static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8745{
8746 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8747}
8748
8749static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8750{
8751 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8752}
8753
8754void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8755{
8756 u32 frame;
8757 int ret;
8758
8759 *link_quality = 0;
8760 if (dd->pport->host_link_state & HLS_UP) {
8761 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
17fb4f29 8762 &frame);
77241056
MM
8763 if (ret == 0)
8764 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8765 & LINK_QUALITY_MASK;
8766 }
8767}
8768
8769static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8770{
8771 u32 frame;
8772
8773 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8774 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8775}
8776
feb831dd
DL
8777static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
8778{
8779 u32 frame;
8780
8781 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
8782 *ldr = (frame & 0xff);
8783}
8784
77241056
MM
8785static int read_tx_settings(struct hfi1_devdata *dd,
8786 u8 *enable_lane_tx,
8787 u8 *tx_polarity_inversion,
8788 u8 *rx_polarity_inversion,
8789 u8 *max_rate)
8790{
8791 u32 frame;
8792 int ret;
8793
8794 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8795 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8796 & ENABLE_LANE_TX_MASK;
8797 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8798 & TX_POLARITY_INVERSION_MASK;
8799 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8800 & RX_POLARITY_INVERSION_MASK;
8801 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8802 return ret;
8803}
8804
8805static int write_tx_settings(struct hfi1_devdata *dd,
8806 u8 enable_lane_tx,
8807 u8 tx_polarity_inversion,
8808 u8 rx_polarity_inversion,
8809 u8 max_rate)
8810{
8811 u32 frame;
8812
8813 /* no need to mask, all variable sizes match field widths */
8814 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8815 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8816 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8817 | max_rate << MAX_RATE_SHIFT;
8818 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8819}
8820
77241056
MM
8821/*
8822 * Read an idle LCB message.
8823 *
8824 * Returns 0 on success, -EINVAL on error
8825 */
8826static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8827{
8828 int ret;
8829
17fb4f29 8830 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
77241056
MM
8831 if (ret != HCMD_SUCCESS) {
8832 dd_dev_err(dd, "read idle message: type %d, err %d\n",
17fb4f29 8833 (u32)type, ret);
77241056
MM
8834 return -EINVAL;
8835 }
8836 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8837 /* return only the payload as we already know the type */
8838 *data_out >>= IDLE_PAYLOAD_SHIFT;
8839 return 0;
8840}
8841
8842/*
8843 * Read an idle SMA message. To be done in response to a notification from
8844 * the 8051.
8845 *
8846 * Returns 0 on success, -EINVAL on error
8847 */
8848static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8849{
17fb4f29
JJ
8850 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
8851 data);
77241056
MM
8852}
8853
8854/*
8855 * Send an idle LCB message.
8856 *
8857 * Returns 0 on success, -EINVAL on error
8858 */
8859static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8860{
8861 int ret;
8862
8863 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8864 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8865 if (ret != HCMD_SUCCESS) {
8866 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
17fb4f29 8867 data, ret);
77241056
MM
8868 return -EINVAL;
8869 }
8870 return 0;
8871}
8872
8873/*
8874 * Send an idle SMA message.
8875 *
8876 * Returns 0 on success, -EINVAL on error
8877 */
8878int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8879{
8880 u64 data;
8881
17fb4f29
JJ
8882 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
8883 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
77241056
MM
8884 return send_idle_message(dd, data);
8885}
8886
8887/*
8888 * Initialize the LCB then do a quick link up. This may or may not be
8889 * in loopback.
8890 *
8891 * return 0 on success, -errno on error
8892 */
8893static int do_quick_linkup(struct hfi1_devdata *dd)
8894{
77241056
MM
8895 int ret;
8896
8897 lcb_shutdown(dd, 0);
8898
8899 if (loopback) {
8900 /* LCB_CFG_LOOPBACK.VAL = 2 */
8901 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8902 write_csr(dd, DC_LCB_CFG_LOOPBACK,
17fb4f29 8903 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
77241056
MM
8904 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8905 }
8906
8907 /* start the LCBs */
8908 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8909 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8910
8911 /* simulator only loopback steps */
8912 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8913 /* LCB_CFG_RUN.EN = 1 */
8914 write_csr(dd, DC_LCB_CFG_RUN,
17fb4f29 8915 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
77241056 8916
ec8a1423
DL
8917 ret = wait_link_transfer_active(dd, 10);
8918 if (ret)
8919 return ret;
77241056
MM
8920
8921 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
17fb4f29 8922 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
77241056
MM
8923 }
8924
8925 if (!loopback) {
8926 /*
8927 * When doing quick linkup and not in loopback, both
8928 * sides must be done with LCB set-up before either
8929 * starts the quick linkup. Put a delay here so that
8930 * both sides can be started and have a chance to be
8931 * done with LCB set up before resuming.
8932 */
8933 dd_dev_err(dd,
17fb4f29 8934 "Pausing for peer to be finished with LCB set up\n");
77241056 8935 msleep(5000);
17fb4f29 8936 dd_dev_err(dd, "Continuing with quick linkup\n");
77241056
MM
8937 }
8938
8939 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8940 set_8051_lcb_access(dd);
8941
8942 /*
8943 * State "quick" LinkUp request sets the physical link state to
8944 * LinkUp without a verify capability sequence.
8945 * This state is in simulator v37 and later.
8946 */
8947 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8948 if (ret != HCMD_SUCCESS) {
8949 dd_dev_err(dd,
17fb4f29
JJ
8950 "%s: set physical link state to quick LinkUp failed with return %d\n",
8951 __func__, ret);
77241056
MM
8952
8953 set_host_lcb_access(dd);
8954 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8955
8956 if (ret >= 0)
8957 ret = -EINVAL;
8958 return ret;
8959 }
8960
8961 return 0; /* success */
8962}
8963
8964/*
8965 * Set the SerDes to internal loopback mode.
8966 * Returns 0 on success, -errno on error.
8967 */
8968static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8969{
8970 int ret;
8971
8972 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8973 if (ret == HCMD_SUCCESS)
8974 return 0;
8975 dd_dev_err(dd,
17fb4f29
JJ
8976 "Set physical link state to SerDes Loopback failed with return %d\n",
8977 ret);
77241056
MM
8978 if (ret >= 0)
8979 ret = -EINVAL;
8980 return ret;
8981}
8982
8983/*
8984 * Do all special steps to set up loopback.
8985 */
8986static int init_loopback(struct hfi1_devdata *dd)
8987{
8988 dd_dev_info(dd, "Entering loopback mode\n");
8989
8990 /* all loopbacks should disable self GUID check */
8991 write_csr(dd, DC_DC8051_CFG_MODE,
17fb4f29 8992 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
77241056
MM
8993
8994 /*
8995 * The simulator has only one loopback option - LCB. Switch
8996 * to that option, which includes quick link up.
8997 *
8998 * Accept all valid loopback values.
8999 */
d0d236ea
JJ
9000 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9001 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9002 loopback == LOOPBACK_CABLE)) {
77241056
MM
9003 loopback = LOOPBACK_LCB;
9004 quick_linkup = 1;
9005 return 0;
9006 }
9007
9008 /* handle serdes loopback */
9009 if (loopback == LOOPBACK_SERDES) {
9010 /* internal serdes loopack needs quick linkup on RTL */
9011 if (dd->icode == ICODE_RTL_SILICON)
9012 quick_linkup = 1;
9013 return set_serdes_loopback_mode(dd);
9014 }
9015
9016 /* LCB loopback - handled at poll time */
9017 if (loopback == LOOPBACK_LCB) {
9018 quick_linkup = 1; /* LCB is always quick linkup */
9019
9020 /* not supported in emulation due to emulation RTL changes */
9021 if (dd->icode == ICODE_FPGA_EMULATION) {
9022 dd_dev_err(dd,
17fb4f29 9023 "LCB loopback not supported in emulation\n");
77241056
MM
9024 return -EINVAL;
9025 }
9026 return 0;
9027 }
9028
9029 /* external cable loopback requires no extra steps */
9030 if (loopback == LOOPBACK_CABLE)
9031 return 0;
9032
9033 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9034 return -EINVAL;
9035}
9036
9037/*
9038 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9039 * used in the Verify Capability link width attribute.
9040 */
9041static u16 opa_to_vc_link_widths(u16 opa_widths)
9042{
9043 int i;
9044 u16 result = 0;
9045
9046 static const struct link_bits {
9047 u16 from;
9048 u16 to;
9049 } opa_link_xlate[] = {
8638b77f
JJ
9050 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
9051 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
9052 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
9053 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
77241056
MM
9054 };
9055
9056 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9057 if (opa_widths & opa_link_xlate[i].from)
9058 result |= opa_link_xlate[i].to;
9059 }
9060 return result;
9061}
9062
9063/*
9064 * Set link attributes before moving to polling.
9065 */
9066static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9067{
9068 struct hfi1_devdata *dd = ppd->dd;
9069 u8 enable_lane_tx;
9070 u8 tx_polarity_inversion;
9071 u8 rx_polarity_inversion;
9072 int ret;
9073
9074 /* reset our fabric serdes to clear any lingering problems */
9075 fabric_serdes_reset(dd);
9076
9077 /* set the local tx rate - need to read-modify-write */
9078 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
17fb4f29 9079 &rx_polarity_inversion, &ppd->local_tx_rate);
77241056
MM
9080 if (ret)
9081 goto set_local_link_attributes_fail;
9082
9083 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
9084 /* set the tx rate to the fastest enabled */
9085 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9086 ppd->local_tx_rate = 1;
9087 else
9088 ppd->local_tx_rate = 0;
9089 } else {
9090 /* set the tx rate to all enabled */
9091 ppd->local_tx_rate = 0;
9092 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9093 ppd->local_tx_rate |= 2;
9094 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9095 ppd->local_tx_rate |= 1;
9096 }
febffe2c
EH
9097
9098 enable_lane_tx = 0xF; /* enable all four lanes */
77241056 9099 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
17fb4f29 9100 rx_polarity_inversion, ppd->local_tx_rate);
77241056
MM
9101 if (ret != HCMD_SUCCESS)
9102 goto set_local_link_attributes_fail;
9103
9104 /*
9105 * DC supports continuous updates.
9106 */
17fb4f29
JJ
9107 ret = write_vc_local_phy(dd,
9108 0 /* no power management */,
9109 1 /* continuous updates */);
77241056
MM
9110 if (ret != HCMD_SUCCESS)
9111 goto set_local_link_attributes_fail;
9112
9113 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9114 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9115 ppd->port_crc_mode_enabled);
9116 if (ret != HCMD_SUCCESS)
9117 goto set_local_link_attributes_fail;
9118
9119 ret = write_vc_local_link_width(dd, 0, 0,
17fb4f29
JJ
9120 opa_to_vc_link_widths(
9121 ppd->link_width_enabled));
77241056
MM
9122 if (ret != HCMD_SUCCESS)
9123 goto set_local_link_attributes_fail;
9124
9125 /* let peer know who we are */
9126 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9127 if (ret == HCMD_SUCCESS)
9128 return 0;
9129
9130set_local_link_attributes_fail:
9131 dd_dev_err(dd,
17fb4f29
JJ
9132 "Failed to set local link attributes, return 0x%x\n",
9133 ret);
77241056
MM
9134 return ret;
9135}
9136
9137/*
623bba2d
EH
9138 * Call this to start the link.
9139 * Do not do anything if the link is disabled.
9140 * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
77241056
MM
9141 */
9142int start_link(struct hfi1_pportdata *ppd)
9143{
0db9dec2
DL
9144 /*
9145 * Tune the SerDes to a ballpark setting for optimal signal and bit
9146 * error rate. Needs to be done before starting the link.
9147 */
9148 tune_serdes(ppd);
9149
77241056
MM
9150 if (!ppd->link_enabled) {
9151 dd_dev_info(ppd->dd,
17fb4f29
JJ
9152 "%s: stopping link start because link is disabled\n",
9153 __func__);
77241056
MM
9154 return 0;
9155 }
9156 if (!ppd->driver_link_ready) {
9157 dd_dev_info(ppd->dd,
17fb4f29
JJ
9158 "%s: stopping link start because driver is not ready\n",
9159 __func__);
77241056
MM
9160 return 0;
9161 }
9162
3ec5fa28
SS
9163 /*
9164 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9165 * pkey table can be configured properly if the HFI unit is connected
9166 * to switch port with MgmtAllowed=NO
9167 */
9168 clear_full_mgmt_pkey(ppd);
9169
623bba2d 9170 return set_link_state(ppd, HLS_DN_POLL);
77241056
MM
9171}
9172
8ebd4cf1
EH
9173static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9174{
9175 struct hfi1_devdata *dd = ppd->dd;
9176 u64 mask;
9177 unsigned long timeout;
9178
9179 /*
5fbd98dd
EH
9180 * Some QSFP cables have a quirk that asserts the IntN line as a side
9181 * effect of power up on plug-in. We ignore this false positive
9182 * interrupt until the module has finished powering up by waiting for
9183 * a minimum timeout of the module inrush initialization time of
9184 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9185 * module have stabilized.
9186 */
9187 msleep(500);
9188
9189 /*
9190 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
8ebd4cf1
EH
9191 */
9192 timeout = jiffies + msecs_to_jiffies(2000);
9193 while (1) {
9194 mask = read_csr(dd, dd->hfi1_id ?
9195 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
5fbd98dd 9196 if (!(mask & QSFP_HFI0_INT_N))
8ebd4cf1 9197 break;
8ebd4cf1
EH
9198 if (time_after(jiffies, timeout)) {
9199 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9200 __func__);
9201 break;
9202 }
9203 udelay(2);
9204 }
9205}
9206
9207static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9208{
9209 struct hfi1_devdata *dd = ppd->dd;
9210 u64 mask;
9211
9212 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
5fbd98dd
EH
9213 if (enable) {
9214 /*
9215 * Clear the status register to avoid an immediate interrupt
9216 * when we re-enable the IntN pin
9217 */
9218 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9219 QSFP_HFI0_INT_N);
8ebd4cf1 9220 mask |= (u64)QSFP_HFI0_INT_N;
5fbd98dd 9221 } else {
8ebd4cf1 9222 mask &= ~(u64)QSFP_HFI0_INT_N;
5fbd98dd 9223 }
8ebd4cf1
EH
9224 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9225}
9226
9227void reset_qsfp(struct hfi1_pportdata *ppd)
77241056
MM
9228{
9229 struct hfi1_devdata *dd = ppd->dd;
9230 u64 mask, qsfp_mask;
9231
8ebd4cf1
EH
9232 /* Disable INT_N from triggering QSFP interrupts */
9233 set_qsfp_int_n(ppd, 0);
9234
9235 /* Reset the QSFP */
77241056 9236 mask = (u64)QSFP_HFI0_RESET_N;
77241056
MM
9237
9238 qsfp_mask = read_csr(dd,
17fb4f29 9239 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
77241056
MM
9240 qsfp_mask &= ~mask;
9241 write_csr(dd,
17fb4f29 9242 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
77241056
MM
9243
9244 udelay(10);
9245
9246 qsfp_mask |= mask;
9247 write_csr(dd,
17fb4f29 9248 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
8ebd4cf1
EH
9249
9250 wait_for_qsfp_init(ppd);
9251
9252 /*
9253 * Allow INT_N to trigger the QSFP interrupt to watch
9254 * for alarms and warnings
9255 */
9256 set_qsfp_int_n(ppd, 1);
77241056
MM
9257}
9258
9259static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9260 u8 *qsfp_interrupt_status)
9261{
9262 struct hfi1_devdata *dd = ppd->dd;
9263
9264 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
17fb4f29
JJ
9265 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9266 dd_dev_info(dd, "%s: QSFP cable on fire\n",
9267 __func__);
77241056
MM
9268
9269 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
17fb4f29
JJ
9270 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9271 dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
9272 __func__);
77241056 9273
0c7f77af
EH
9274 /*
9275 * The remaining alarms/warnings don't matter if the link is down.
9276 */
9277 if (ppd->host_link_state & HLS_DOWN)
9278 return 0;
9279
77241056 9280 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
17fb4f29
JJ
9281 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9282 dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
9283 __func__);
77241056
MM
9284
9285 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
17fb4f29
JJ
9286 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9287 dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
9288 __func__);
77241056
MM
9289
9290 /* Byte 2 is vendor specific */
9291
9292 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
17fb4f29
JJ
9293 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9294 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
9295 __func__);
77241056
MM
9296
9297 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
17fb4f29
JJ
9298 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9299 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
9300 __func__);
77241056
MM
9301
9302 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
17fb4f29
JJ
9303 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9304 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
9305 __func__);
77241056
MM
9306
9307 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
17fb4f29
JJ
9308 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9309 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
9310 __func__);
77241056
MM
9311
9312 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
17fb4f29
JJ
9313 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9314 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
9315 __func__);
77241056
MM
9316
9317 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
17fb4f29
JJ
9318 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9319 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
9320 __func__);
77241056
MM
9321
9322 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
17fb4f29
JJ
9323 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9324 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
9325 __func__);
77241056
MM
9326
9327 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
17fb4f29
JJ
9328 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9329 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
9330 __func__);
77241056
MM
9331
9332 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
17fb4f29
JJ
9333 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9334 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
9335 __func__);
77241056
MM
9336
9337 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
17fb4f29
JJ
9338 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9339 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
9340 __func__);
77241056
MM
9341
9342 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
17fb4f29
JJ
9343 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9344 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
9345 __func__);
77241056
MM
9346
9347 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
17fb4f29
JJ
9348 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9349 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
9350 __func__);
77241056
MM
9351
9352 /* Bytes 9-10 and 11-12 are reserved */
9353 /* Bytes 13-15 are vendor specific */
9354
9355 return 0;
9356}
9357
623bba2d 9358/* This routine will only be scheduled if the QSFP module present is asserted */
8ebd4cf1 9359void qsfp_event(struct work_struct *work)
77241056
MM
9360{
9361 struct qsfp_data *qd;
9362 struct hfi1_pportdata *ppd;
9363 struct hfi1_devdata *dd;
9364
9365 qd = container_of(work, struct qsfp_data, qsfp_work);
9366 ppd = qd->ppd;
9367 dd = ppd->dd;
9368
9369 /* Sanity check */
9370 if (!qsfp_mod_present(ppd))
9371 return;
9372
9373 /*
0c7f77af
EH
9374 * Turn DC back on after cable has been re-inserted. Up until
9375 * now, the DC has been in reset to save power.
77241056
MM
9376 */
9377 dc_start(dd);
9378
9379 if (qd->cache_refresh_required) {
8ebd4cf1 9380 set_qsfp_int_n(ppd, 0);
77241056 9381
8ebd4cf1
EH
9382 wait_for_qsfp_init(ppd);
9383
9384 /*
9385 * Allow INT_N to trigger the QSFP interrupt to watch
9386 * for alarms and warnings
77241056 9387 */
8ebd4cf1
EH
9388 set_qsfp_int_n(ppd, 1);
9389
8ebd4cf1 9390 start_link(ppd);
77241056
MM
9391 }
9392
9393 if (qd->check_interrupt_flags) {
9394 u8 qsfp_interrupt_status[16] = {0,};
9395
765a6fac
DL
9396 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9397 &qsfp_interrupt_status[0], 16) != 16) {
77241056 9398 dd_dev_info(dd,
17fb4f29
JJ
9399 "%s: Failed to read status of QSFP module\n",
9400 __func__);
77241056
MM
9401 } else {
9402 unsigned long flags;
77241056 9403
8ebd4cf1
EH
9404 handle_qsfp_error_conditions(
9405 ppd, qsfp_interrupt_status);
77241056
MM
9406 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9407 ppd->qsfp_info.check_interrupt_flags = 0;
9408 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
17fb4f29 9409 flags);
77241056
MM
9410 }
9411 }
9412}
9413
8ebd4cf1 9414static void init_qsfp_int(struct hfi1_devdata *dd)
77241056 9415{
8ebd4cf1
EH
9416 struct hfi1_pportdata *ppd = dd->pport;
9417 u64 qsfp_mask, cce_int_mask;
9418 const int qsfp1_int_smask = QSFP1_INT % 64;
9419 const int qsfp2_int_smask = QSFP2_INT % 64;
77241056 9420
8ebd4cf1
EH
9421 /*
9422 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9423 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9424 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9425 * the index of the appropriate CSR in the CCEIntMask CSR array
9426 */
9427 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9428 (8 * (QSFP1_INT / 64)));
9429 if (dd->hfi1_id) {
9430 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9431 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9432 cce_int_mask);
9433 } else {
9434 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9435 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9436 cce_int_mask);
77241056
MM
9437 }
9438
77241056
MM
9439 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9440 /* Clear current status to avoid spurious interrupts */
8ebd4cf1
EH
9441 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9442 qsfp_mask);
9443 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9444 qsfp_mask);
9445
9446 set_qsfp_int_n(ppd, 0);
77241056
MM
9447
9448 /* Handle active low nature of INT_N and MODPRST_N pins */
9449 if (qsfp_mod_present(ppd))
9450 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9451 write_csr(dd,
9452 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9453 qsfp_mask);
77241056
MM
9454}
9455
bbdeb33d
DL
9456/*
9457 * Do a one-time initialize of the LCB block.
9458 */
9459static void init_lcb(struct hfi1_devdata *dd)
9460{
a59329d5
DL
9461 /* simulator does not correctly handle LCB cclk loopback, skip */
9462 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9463 return;
9464
bbdeb33d
DL
9465 /* the DC has been reset earlier in the driver load */
9466
9467 /* set LCB for cclk loopback on the port */
9468 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9469 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9470 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9471 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9472 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9473 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9474 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9475}
9476
673b975f
DL
9477/*
9478 * Perform a test read on the QSFP. Return 0 on success, -ERRNO
9479 * on error.
9480 */
9481static int test_qsfp_read(struct hfi1_pportdata *ppd)
9482{
9483 int ret;
9484 u8 status;
9485
9486 /* report success if not a QSFP */
9487 if (ppd->port_type != PORT_TYPE_QSFP)
9488 return 0;
9489
9490 /* read byte 2, the status byte */
9491 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9492 if (ret < 0)
9493 return ret;
9494 if (ret != 1)
9495 return -EIO;
9496
9497 return 0; /* success */
9498}
9499
9500/*
9501 * Values for QSFP retry.
9502 *
9503 * Give up after 10s (20 x 500ms). The overall timeout was empirically
9504 * arrived at from experience on a large cluster.
9505 */
9506#define MAX_QSFP_RETRIES 20
9507#define QSFP_RETRY_WAIT 500 /* msec */
9508
9509/*
9510 * Try a QSFP read. If it fails, schedule a retry for later.
9511 * Called on first link activation after driver load.
9512 */
9513static void try_start_link(struct hfi1_pportdata *ppd)
9514{
9515 if (test_qsfp_read(ppd)) {
9516 /* read failed */
9517 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9518 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9519 return;
9520 }
9521 dd_dev_info(ppd->dd,
9522 "QSFP not responding, waiting and retrying %d\n",
9523 (int)ppd->qsfp_retry_count);
9524 ppd->qsfp_retry_count++;
9525 queue_delayed_work(ppd->hfi1_wq, &ppd->start_link_work,
9526 msecs_to_jiffies(QSFP_RETRY_WAIT));
9527 return;
9528 }
9529 ppd->qsfp_retry_count = 0;
9530
673b975f
DL
9531 start_link(ppd);
9532}
9533
9534/*
9535 * Workqueue function to start the link after a delay.
9536 */
9537void handle_start_link(struct work_struct *work)
9538{
9539 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9540 start_link_work.work);
9541 try_start_link(ppd);
9542}
9543
77241056
MM
9544int bringup_serdes(struct hfi1_pportdata *ppd)
9545{
9546 struct hfi1_devdata *dd = ppd->dd;
9547 u64 guid;
9548 int ret;
9549
9550 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9551 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9552
a6cd5f08 9553 guid = ppd->guids[HFI1_PORT_GUID_INDEX];
77241056
MM
9554 if (!guid) {
9555 if (dd->base_guid)
9556 guid = dd->base_guid + ppd->port - 1;
a6cd5f08 9557 ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
77241056
MM
9558 }
9559
77241056
MM
9560 /* Set linkinit_reason on power up per OPA spec */
9561 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9562
bbdeb33d
DL
9563 /* one-time init of the LCB */
9564 init_lcb(dd);
9565
77241056
MM
9566 if (loopback) {
9567 ret = init_loopback(dd);
9568 if (ret < 0)
9569 return ret;
9570 }
9571
9775a991
EH
9572 get_port_type(ppd);
9573 if (ppd->port_type == PORT_TYPE_QSFP) {
9574 set_qsfp_int_n(ppd, 0);
9575 wait_for_qsfp_init(ppd);
9576 set_qsfp_int_n(ppd, 1);
9577 }
9578
673b975f
DL
9579 try_start_link(ppd);
9580 return 0;
77241056
MM
9581}
9582
9583void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9584{
9585 struct hfi1_devdata *dd = ppd->dd;
9586
9587 /*
9588 * Shut down the link and keep it down. First turn off that the
9589 * driver wants to allow the link to be up (driver_link_ready).
9590 * Then make sure the link is not automatically restarted
9591 * (link_enabled). Cancel any pending restart. And finally
9592 * go offline.
9593 */
9594 ppd->driver_link_ready = 0;
9595 ppd->link_enabled = 0;
9596
673b975f
DL
9597 ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9598 flush_delayed_work(&ppd->start_link_work);
9599 cancel_delayed_work_sync(&ppd->start_link_work);
9600
8ebd4cf1
EH
9601 ppd->offline_disabled_reason =
9602 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
77241056 9603 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
17fb4f29 9604 OPA_LINKDOWN_REASON_SMA_DISABLED);
77241056
MM
9605 set_link_state(ppd, HLS_DN_OFFLINE);
9606
9607 /* disable the port */
9608 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9609}
9610
9611static inline int init_cpu_counters(struct hfi1_devdata *dd)
9612{
9613 struct hfi1_pportdata *ppd;
9614 int i;
9615
9616 ppd = (struct hfi1_pportdata *)(dd + 1);
9617 for (i = 0; i < dd->num_pports; i++, ppd++) {
4eb06882
DD
9618 ppd->ibport_data.rvp.rc_acks = NULL;
9619 ppd->ibport_data.rvp.rc_qacks = NULL;
9620 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9621 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9622 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9623 if (!ppd->ibport_data.rvp.rc_acks ||
9624 !ppd->ibport_data.rvp.rc_delayed_comp ||
9625 !ppd->ibport_data.rvp.rc_qacks)
77241056
MM
9626 return -ENOMEM;
9627 }
9628
9629 return 0;
9630}
9631
9632static const char * const pt_names[] = {
9633 "expected",
9634 "eager",
9635 "invalid"
9636};
9637
9638static const char *pt_name(u32 type)
9639{
9640 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9641}
9642
9643/*
9644 * index is the index into the receive array
9645 */
9646void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9647 u32 type, unsigned long pa, u16 order)
9648{
9649 u64 reg;
9650 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9651 (dd->kregbase + RCV_ARRAY));
9652
9653 if (!(dd->flags & HFI1_PRESENT))
9654 goto done;
9655
9656 if (type == PT_INVALID) {
9657 pa = 0;
9658 } else if (type > PT_INVALID) {
9659 dd_dev_err(dd,
17fb4f29
JJ
9660 "unexpected receive array type %u for index %u, not handled\n",
9661 type, index);
77241056
MM
9662 goto done;
9663 }
9664
9665 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9666 pt_name(type), index, pa, (unsigned long)order);
9667
9668#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9669 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9670 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9671 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9672 << RCV_ARRAY_RT_ADDR_SHIFT;
9673 writeq(reg, base + (index * 8));
9674
9675 if (type == PT_EAGER)
9676 /*
9677 * Eager entries are written one-by-one so we have to push them
9678 * after we write the entry.
9679 */
9680 flush_wc();
9681done:
9682 return;
9683}
9684
9685void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9686{
9687 struct hfi1_devdata *dd = rcd->dd;
9688 u32 i;
9689
9690 /* this could be optimized */
9691 for (i = rcd->eager_base; i < rcd->eager_base +
9692 rcd->egrbufs.alloced; i++)
9693 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9694
9695 for (i = rcd->expected_base;
9696 i < rcd->expected_base + rcd->expected_count; i++)
9697 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9698}
9699
261a4351
MM
9700struct ib_header *hfi1_get_msgheader(
9701 struct hfi1_devdata *dd, __le32 *rhf_addr)
77241056
MM
9702{
9703 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9704
261a4351 9705 return (struct ib_header *)
77241056
MM
9706 (rhf_addr - dd->rhf_offset + offset);
9707}
9708
9709static const char * const ib_cfg_name_strings[] = {
9710 "HFI1_IB_CFG_LIDLMC",
9711 "HFI1_IB_CFG_LWID_DG_ENB",
9712 "HFI1_IB_CFG_LWID_ENB",
9713 "HFI1_IB_CFG_LWID",
9714 "HFI1_IB_CFG_SPD_ENB",
9715 "HFI1_IB_CFG_SPD",
9716 "HFI1_IB_CFG_RXPOL_ENB",
9717 "HFI1_IB_CFG_LREV_ENB",
9718 "HFI1_IB_CFG_LINKLATENCY",
9719 "HFI1_IB_CFG_HRTBT",
9720 "HFI1_IB_CFG_OP_VLS",
9721 "HFI1_IB_CFG_VL_HIGH_CAP",
9722 "HFI1_IB_CFG_VL_LOW_CAP",
9723 "HFI1_IB_CFG_OVERRUN_THRESH",
9724 "HFI1_IB_CFG_PHYERR_THRESH",
9725 "HFI1_IB_CFG_LINKDEFAULT",
9726 "HFI1_IB_CFG_PKEYS",
9727 "HFI1_IB_CFG_MTU",
9728 "HFI1_IB_CFG_LSTATE",
9729 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9730 "HFI1_IB_CFG_PMA_TICKS",
9731 "HFI1_IB_CFG_PORT"
9732};
9733
9734static const char *ib_cfg_name(int which)
9735{
9736 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9737 return "invalid";
9738 return ib_cfg_name_strings[which];
9739}
9740
9741int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9742{
9743 struct hfi1_devdata *dd = ppd->dd;
9744 int val = 0;
9745
9746 switch (which) {
9747 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9748 val = ppd->link_width_enabled;
9749 break;
9750 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9751 val = ppd->link_width_active;
9752 break;
9753 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9754 val = ppd->link_speed_enabled;
9755 break;
9756 case HFI1_IB_CFG_SPD: /* current Link speed */
9757 val = ppd->link_speed_active;
9758 break;
9759
9760 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9761 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9762 case HFI1_IB_CFG_LINKLATENCY:
9763 goto unimplemented;
9764
9765 case HFI1_IB_CFG_OP_VLS:
9766 val = ppd->vls_operational;
9767 break;
9768 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9769 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9770 break;
9771 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9772 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9773 break;
9774 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9775 val = ppd->overrun_threshold;
9776 break;
9777 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9778 val = ppd->phy_error_threshold;
9779 break;
9780 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9781 val = dd->link_default;
9782 break;
9783
9784 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9785 case HFI1_IB_CFG_PMA_TICKS:
9786 default:
9787unimplemented:
9788 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9789 dd_dev_info(
9790 dd,
9791 "%s: which %s: not implemented\n",
9792 __func__,
9793 ib_cfg_name(which));
9794 break;
9795 }
9796
9797 return val;
9798}
9799
9800/*
9801 * The largest MAD packet size.
9802 */
9803#define MAX_MAD_PACKET 2048
9804
9805/*
9806 * Return the maximum header bytes that can go on the _wire_
9807 * for this device. This count includes the ICRC which is
9808 * not part of the packet held in memory but it is appended
9809 * by the HW.
9810 * This is dependent on the device's receive header entry size.
9811 * HFI allows this to be set per-receive context, but the
9812 * driver presently enforces a global value.
9813 */
9814u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9815{
9816 /*
9817 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9818 * the Receive Header Entry Size minus the PBC (or RHF) size
9819 * plus one DW for the ICRC appended by HW.
9820 *
9821 * dd->rcd[0].rcvhdrqentsize is in DW.
9822 * We use rcd[0] as all context will have the same value. Also,
9823 * the first kernel context would have been allocated by now so
9824 * we are guaranteed a valid value.
9825 */
9826 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9827}
9828
9829/*
9830 * Set Send Length
9831 * @ppd - per port data
9832 *
9833 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9834 * registers compare against LRH.PktLen, so use the max bytes included
9835 * in the LRH.
9836 *
9837 * This routine changes all VL values except VL15, which it maintains at
9838 * the same value.
9839 */
9840static void set_send_length(struct hfi1_pportdata *ppd)
9841{
9842 struct hfi1_devdata *dd = ppd->dd;
6cc6ad2e
HC
9843 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9844 u32 maxvlmtu = dd->vld[15].mtu;
77241056
MM
9845 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9846 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9847 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
b4ba6633 9848 int i, j;
44306f15 9849 u32 thres;
77241056
MM
9850
9851 for (i = 0; i < ppd->vls_supported; i++) {
9852 if (dd->vld[i].mtu > maxvlmtu)
9853 maxvlmtu = dd->vld[i].mtu;
9854 if (i <= 3)
9855 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9856 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9857 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9858 else
9859 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9860 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9861 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9862 }
9863 write_csr(dd, SEND_LEN_CHECK0, len1);
9864 write_csr(dd, SEND_LEN_CHECK1, len2);
9865 /* adjust kernel credit return thresholds based on new MTUs */
9866 /* all kernel receive contexts have the same hdrqentsize */
9867 for (i = 0; i < ppd->vls_supported; i++) {
44306f15
JX
9868 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
9869 sc_mtu_to_threshold(dd->vld[i].sc,
9870 dd->vld[i].mtu,
17fb4f29 9871 dd->rcd[0]->rcvhdrqentsize));
b4ba6633
JJ
9872 for (j = 0; j < INIT_SC_PER_VL; j++)
9873 sc_set_cr_threshold(
9874 pio_select_send_context_vl(dd, j, i),
9875 thres);
44306f15
JX
9876 }
9877 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
9878 sc_mtu_to_threshold(dd->vld[15].sc,
9879 dd->vld[15].mtu,
9880 dd->rcd[0]->rcvhdrqentsize));
9881 sc_set_cr_threshold(dd->vld[15].sc, thres);
77241056
MM
9882
9883 /* Adjust maximum MTU for the port in DC */
9884 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9885 (ilog2(maxvlmtu >> 8) + 1);
9886 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9887 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9888 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9889 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9890 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9891}
9892
9893static void set_lidlmc(struct hfi1_pportdata *ppd)
9894{
9895 int i;
9896 u64 sreg = 0;
9897 struct hfi1_devdata *dd = ppd->dd;
9898 u32 mask = ~((1U << ppd->lmc) - 1);
9899 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9900
77241056
MM
9901 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9902 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9903 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
8638b77f 9904 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
77241056
MM
9905 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9906 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9907 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9908
9909 /*
9910 * Iterate over all the send contexts and set their SLID check
9911 */
9912 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9913 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9914 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9915 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9916
9917 for (i = 0; i < dd->chip_send_contexts; i++) {
9918 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9919 i, (u32)sreg);
9920 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9921 }
9922
9923 /* Now we have to do the same thing for the sdma engines */
9924 sdma_update_lmc(dd, mask, ppd->lid);
9925}
9926
9927static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9928{
9929 unsigned long timeout;
9930 u32 curr_state;
9931
9932 timeout = jiffies + msecs_to_jiffies(msecs);
9933 while (1) {
9934 curr_state = read_physical_state(dd);
9935 if (curr_state == state)
9936 break;
9937 if (time_after(jiffies, timeout)) {
9938 dd_dev_err(dd,
17fb4f29
JJ
9939 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9940 state, curr_state);
77241056
MM
9941 return -ETIMEDOUT;
9942 }
9943 usleep_range(1950, 2050); /* sleep 2ms-ish */
9944 }
9945
9946 return 0;
9947}
9948
6854c692
DL
9949static const char *state_completed_string(u32 completed)
9950{
9951 static const char * const state_completed[] = {
9952 "EstablishComm",
9953 "OptimizeEQ",
9954 "VerifyCap"
9955 };
9956
9957 if (completed < ARRAY_SIZE(state_completed))
9958 return state_completed[completed];
9959
9960 return "unknown";
9961}
9962
9963static const char all_lanes_dead_timeout_expired[] =
9964 "All lanes were inactive – was the interconnect media removed?";
9965static const char tx_out_of_policy[] =
9966 "Passing lanes on local port do not meet the local link width policy";
9967static const char no_state_complete[] =
9968 "State timeout occurred before link partner completed the state";
9969static const char * const state_complete_reasons[] = {
9970 [0x00] = "Reason unknown",
9971 [0x01] = "Link was halted by driver, refer to LinkDownReason",
9972 [0x02] = "Link partner reported failure",
9973 [0x10] = "Unable to achieve frame sync on any lane",
9974 [0x11] =
9975 "Unable to find a common bit rate with the link partner",
9976 [0x12] =
9977 "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
9978 [0x13] =
9979 "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
9980 [0x14] = no_state_complete,
9981 [0x15] =
9982 "State timeout occurred before link partner identified equalization presets",
9983 [0x16] =
9984 "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
9985 [0x17] = tx_out_of_policy,
9986 [0x20] = all_lanes_dead_timeout_expired,
9987 [0x21] =
9988 "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
9989 [0x22] = no_state_complete,
9990 [0x23] =
9991 "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
9992 [0x24] = tx_out_of_policy,
9993 [0x30] = all_lanes_dead_timeout_expired,
9994 [0x31] =
9995 "State timeout occurred waiting for host to process received frames",
9996 [0x32] = no_state_complete,
9997 [0x33] =
9998 "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
9999 [0x34] = tx_out_of_policy,
10000};
10001
10002static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10003 u32 code)
10004{
10005 const char *str = NULL;
10006
10007 if (code < ARRAY_SIZE(state_complete_reasons))
10008 str = state_complete_reasons[code];
10009
10010 if (str)
10011 return str;
10012 return "Reserved";
10013}
10014
10015/* describe the given last state complete frame */
10016static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10017 const char *prefix)
10018{
10019 struct hfi1_devdata *dd = ppd->dd;
10020 u32 success;
10021 u32 state;
10022 u32 reason;
10023 u32 lanes;
10024
10025 /*
10026 * Decode frame:
10027 * [ 0: 0] - success
10028 * [ 3: 1] - state
10029 * [ 7: 4] - next state timeout
10030 * [15: 8] - reason code
10031 * [31:16] - lanes
10032 */
10033 success = frame & 0x1;
10034 state = (frame >> 1) & 0x7;
10035 reason = (frame >> 8) & 0xff;
10036 lanes = (frame >> 16) & 0xffff;
10037
10038 dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10039 prefix, frame);
10040 dd_dev_err(dd, " last reported state state: %s (0x%x)\n",
10041 state_completed_string(state), state);
10042 dd_dev_err(dd, " state successfully completed: %s\n",
10043 success ? "yes" : "no");
10044 dd_dev_err(dd, " fail reason 0x%x: %s\n",
10045 reason, state_complete_reason_code_string(ppd, reason));
10046 dd_dev_err(dd, " passing lane mask: 0x%x", lanes);
10047}
10048
10049/*
10050 * Read the last state complete frames and explain them. This routine
10051 * expects to be called if the link went down during link negotiation
10052 * and initialization (LNI). That is, anywhere between polling and link up.
10053 */
10054static void check_lni_states(struct hfi1_pportdata *ppd)
10055{
10056 u32 last_local_state;
10057 u32 last_remote_state;
10058
10059 read_last_local_state(ppd->dd, &last_local_state);
10060 read_last_remote_state(ppd->dd, &last_remote_state);
10061
10062 /*
10063 * Don't report anything if there is nothing to report. A value of
10064 * 0 means the link was taken down while polling and there was no
10065 * training in-process.
10066 */
10067 if (last_local_state == 0 && last_remote_state == 0)
10068 return;
10069
10070 decode_state_complete(ppd, last_local_state, "transmitted");
10071 decode_state_complete(ppd, last_remote_state, "received");
10072}
10073
ec8a1423
DL
10074/* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
10075static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10076{
10077 u64 reg;
10078 unsigned long timeout;
10079
10080 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
10081 timeout = jiffies + msecs_to_jiffies(wait_ms);
10082 while (1) {
10083 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10084 if (reg)
10085 break;
10086 if (time_after(jiffies, timeout)) {
10087 dd_dev_err(dd,
10088 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10089 return -ETIMEDOUT;
10090 }
10091 udelay(2);
10092 }
10093 return 0;
10094}
10095
10096/* called when the logical link state is not down as it should be */
10097static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10098{
10099 struct hfi1_devdata *dd = ppd->dd;
10100
10101 /*
10102 * Bring link up in LCB loopback
10103 */
10104 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10105 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10106 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10107
10108 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10109 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10110 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10111 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10112
10113 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10114 (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10115 udelay(3);
10116 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10117 write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10118
10119 wait_link_transfer_active(dd, 100);
10120
10121 /*
10122 * Bring the link down again.
10123 */
10124 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10125 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10126 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10127
10128 /* call again to adjust ppd->statusp, if needed */
10129 get_logical_state(ppd);
10130}
10131
77241056
MM
10132/*
10133 * Helper for set_link_state(). Do not call except from that routine.
10134 * Expects ppd->hls_mutex to be held.
10135 *
10136 * @rem_reason value to be sent to the neighbor
10137 *
10138 * LinkDownReasons only set if transition succeeds.
10139 */
10140static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10141{
10142 struct hfi1_devdata *dd = ppd->dd;
10143 u32 pstate, previous_state;
77241056
MM
10144 int ret;
10145 int do_transition;
10146 int do_wait;
10147
10148 previous_state = ppd->host_link_state;
10149 ppd->host_link_state = HLS_GOING_OFFLINE;
10150 pstate = read_physical_state(dd);
10151 if (pstate == PLS_OFFLINE) {
10152 do_transition = 0; /* in right state */
10153 do_wait = 0; /* ...no need to wait */
10154 } else if ((pstate & 0xff) == PLS_OFFLINE) {
10155 do_transition = 0; /* in an offline transient state */
10156 do_wait = 1; /* ...wait for it to settle */
10157 } else {
10158 do_transition = 1; /* need to move to offline */
10159 do_wait = 1; /* ...will need to wait */
10160 }
10161
10162 if (do_transition) {
10163 ret = set_physical_link_state(dd,
bf640096 10164 (rem_reason << 8) | PLS_OFFLINE);
77241056
MM
10165
10166 if (ret != HCMD_SUCCESS) {
10167 dd_dev_err(dd,
17fb4f29
JJ
10168 "Failed to transition to Offline link state, return %d\n",
10169 ret);
77241056
MM
10170 return -EINVAL;
10171 }
a9c05e35
BM
10172 if (ppd->offline_disabled_reason ==
10173 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
77241056 10174 ppd->offline_disabled_reason =
a9c05e35 10175 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
77241056
MM
10176 }
10177
10178 if (do_wait) {
10179 /* it can take a while for the link to go down */
dc060245 10180 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
77241056
MM
10181 if (ret < 0)
10182 return ret;
10183 }
10184
77241056
MM
10185 /*
10186 * Now in charge of LCB - must be after the physical state is
10187 * offline.quiet and before host_link_state is changed.
10188 */
10189 set_host_lcb_access(dd);
10190 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
ec8a1423
DL
10191
10192 /* make sure the logical state is also down */
10193 ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10194 if (ret)
10195 force_logical_link_state_down(ppd);
10196
77241056
MM
10197 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10198
8ebd4cf1
EH
10199 if (ppd->port_type == PORT_TYPE_QSFP &&
10200 ppd->qsfp_info.limiting_active &&
10201 qsfp_mod_present(ppd)) {
765a6fac
DL
10202 int ret;
10203
10204 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10205 if (ret == 0) {
10206 set_qsfp_tx(ppd, 0);
10207 release_chip_resource(dd, qsfp_resource(dd));
10208 } else {
10209 /* not fatal, but should warn */
10210 dd_dev_err(dd,
10211 "Unable to acquire lock to turn off QSFP TX\n");
10212 }
8ebd4cf1
EH
10213 }
10214
77241056
MM
10215 /*
10216 * The LNI has a mandatory wait time after the physical state
10217 * moves to Offline.Quiet. The wait time may be different
10218 * depending on how the link went down. The 8051 firmware
10219 * will observe the needed wait time and only move to ready
10220 * when that is completed. The largest of the quiet timeouts
05087f3b
DL
10221 * is 6s, so wait that long and then at least 0.5s more for
10222 * other transitions, and another 0.5s for a buffer.
77241056 10223 */
05087f3b 10224 ret = wait_fm_ready(dd, 7000);
77241056
MM
10225 if (ret) {
10226 dd_dev_err(dd,
17fb4f29 10227 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
77241056
MM
10228 /* state is really offline, so make it so */
10229 ppd->host_link_state = HLS_DN_OFFLINE;
10230 return ret;
10231 }
10232
10233 /*
10234 * The state is now offline and the 8051 is ready to accept host
10235 * requests.
10236 * - change our state
10237 * - notify others if we were previously in a linkup state
10238 */
10239 ppd->host_link_state = HLS_DN_OFFLINE;
10240 if (previous_state & HLS_UP) {
10241 /* went down while link was up */
10242 handle_linkup_change(dd, 0);
10243 } else if (previous_state
10244 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10245 /* went down while attempting link up */
6854c692 10246 check_lni_states(ppd);
77241056
MM
10247 }
10248
10249 /* the active link width (downgrade) is 0 on link down */
10250 ppd->link_width_active = 0;
10251 ppd->link_width_downgrade_tx_active = 0;
10252 ppd->link_width_downgrade_rx_active = 0;
10253 ppd->current_egress_rate = 0;
10254 return 0;
10255}
10256
10257/* return the link state name */
10258static const char *link_state_name(u32 state)
10259{
10260 const char *name;
10261 int n = ilog2(state);
10262 static const char * const names[] = {
10263 [__HLS_UP_INIT_BP] = "INIT",
10264 [__HLS_UP_ARMED_BP] = "ARMED",
10265 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
10266 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
10267 [__HLS_DN_POLL_BP] = "POLL",
10268 [__HLS_DN_DISABLE_BP] = "DISABLE",
10269 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
10270 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
10271 [__HLS_GOING_UP_BP] = "GOING_UP",
10272 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10273 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10274 };
10275
10276 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10277 return name ? name : "unknown";
10278}
10279
10280/* return the link state reason name */
10281static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10282{
10283 if (state == HLS_UP_INIT) {
10284 switch (ppd->linkinit_reason) {
10285 case OPA_LINKINIT_REASON_LINKUP:
10286 return "(LINKUP)";
10287 case OPA_LINKINIT_REASON_FLAPPING:
10288 return "(FLAPPING)";
10289 case OPA_LINKINIT_OUTSIDE_POLICY:
10290 return "(OUTSIDE_POLICY)";
10291 case OPA_LINKINIT_QUARANTINED:
10292 return "(QUARANTINED)";
10293 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10294 return "(INSUFIC_CAPABILITY)";
10295 default:
10296 break;
10297 }
10298 }
10299 return "";
10300}
10301
10302/*
10303 * driver_physical_state - convert the driver's notion of a port's
10304 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10305 * Return -1 (converted to a u32) to indicate error.
10306 */
10307u32 driver_physical_state(struct hfi1_pportdata *ppd)
10308{
10309 switch (ppd->host_link_state) {
10310 case HLS_UP_INIT:
10311 case HLS_UP_ARMED:
10312 case HLS_UP_ACTIVE:
10313 return IB_PORTPHYSSTATE_LINKUP;
10314 case HLS_DN_POLL:
10315 return IB_PORTPHYSSTATE_POLLING;
10316 case HLS_DN_DISABLE:
10317 return IB_PORTPHYSSTATE_DISABLED;
10318 case HLS_DN_OFFLINE:
10319 return OPA_PORTPHYSSTATE_OFFLINE;
10320 case HLS_VERIFY_CAP:
10321 return IB_PORTPHYSSTATE_POLLING;
10322 case HLS_GOING_UP:
10323 return IB_PORTPHYSSTATE_POLLING;
10324 case HLS_GOING_OFFLINE:
10325 return OPA_PORTPHYSSTATE_OFFLINE;
10326 case HLS_LINK_COOLDOWN:
10327 return OPA_PORTPHYSSTATE_OFFLINE;
10328 case HLS_DN_DOWNDEF:
10329 default:
10330 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10331 ppd->host_link_state);
10332 return -1;
10333 }
10334}
10335
10336/*
10337 * driver_logical_state - convert the driver's notion of a port's
10338 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10339 * (converted to a u32) to indicate error.
10340 */
10341u32 driver_logical_state(struct hfi1_pportdata *ppd)
10342{
0c7f77af 10343 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
77241056
MM
10344 return IB_PORT_DOWN;
10345
10346 switch (ppd->host_link_state & HLS_UP) {
10347 case HLS_UP_INIT:
10348 return IB_PORT_INIT;
10349 case HLS_UP_ARMED:
10350 return IB_PORT_ARMED;
10351 case HLS_UP_ACTIVE:
10352 return IB_PORT_ACTIVE;
10353 default:
10354 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10355 ppd->host_link_state);
10356 return -1;
10357 }
10358}
10359
10360void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10361 u8 neigh_reason, u8 rem_reason)
10362{
10363 if (ppd->local_link_down_reason.latest == 0 &&
10364 ppd->neigh_link_down_reason.latest == 0) {
10365 ppd->local_link_down_reason.latest = lcl_reason;
10366 ppd->neigh_link_down_reason.latest = neigh_reason;
10367 ppd->remote_link_down_reason = rem_reason;
10368 }
10369}
10370
10371/*
10372 * Change the physical and/or logical link state.
10373 *
10374 * Do not call this routine while inside an interrupt. It contains
10375 * calls to routines that can take multiple seconds to finish.
10376 *
10377 * Returns 0 on success, -errno on failure.
10378 */
10379int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10380{
10381 struct hfi1_devdata *dd = ppd->dd;
10382 struct ib_event event = {.device = NULL};
10383 int ret1, ret = 0;
77241056
MM
10384 int orig_new_state, poll_bounce;
10385
10386 mutex_lock(&ppd->hls_lock);
10387
10388 orig_new_state = state;
10389 if (state == HLS_DN_DOWNDEF)
10390 state = dd->link_default;
10391
10392 /* interpret poll -> poll as a link bounce */
d0d236ea
JJ
10393 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10394 state == HLS_DN_POLL;
77241056
MM
10395
10396 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
17fb4f29
JJ
10397 link_state_name(ppd->host_link_state),
10398 link_state_name(orig_new_state),
10399 poll_bounce ? "(bounce) " : "",
10400 link_state_reason_name(ppd, state));
77241056 10401
77241056
MM
10402 /*
10403 * If we're going to a (HLS_*) link state that implies the logical
10404 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10405 * reset is_sm_config_started to 0.
10406 */
10407 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10408 ppd->is_sm_config_started = 0;
10409
10410 /*
10411 * Do nothing if the states match. Let a poll to poll link bounce
10412 * go through.
10413 */
10414 if (ppd->host_link_state == state && !poll_bounce)
10415 goto done;
10416
10417 switch (state) {
10418 case HLS_UP_INIT:
d0d236ea
JJ
10419 if (ppd->host_link_state == HLS_DN_POLL &&
10420 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
77241056
MM
10421 /*
10422 * Quick link up jumps from polling to here.
10423 *
10424 * Whether in normal or loopback mode, the
10425 * simulator jumps from polling to link up.
10426 * Accept that here.
10427 */
17fb4f29 10428 /* OK */
77241056
MM
10429 } else if (ppd->host_link_state != HLS_GOING_UP) {
10430 goto unexpected;
10431 }
10432
10433 ppd->host_link_state = HLS_UP_INIT;
10434 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10435 if (ret) {
10436 /* logical state didn't change, stay at going_up */
10437 ppd->host_link_state = HLS_GOING_UP;
10438 dd_dev_err(dd,
17fb4f29
JJ
10439 "%s: logical state did not change to INIT\n",
10440 __func__);
77241056
MM
10441 } else {
10442 /* clear old transient LINKINIT_REASON code */
10443 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10444 ppd->linkinit_reason =
10445 OPA_LINKINIT_REASON_LINKUP;
10446
10447 /* enable the port */
10448 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10449
10450 handle_linkup_change(dd, 1);
10451 }
10452 break;
10453 case HLS_UP_ARMED:
10454 if (ppd->host_link_state != HLS_UP_INIT)
10455 goto unexpected;
10456
10457 ppd->host_link_state = HLS_UP_ARMED;
10458 set_logical_state(dd, LSTATE_ARMED);
10459 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10460 if (ret) {
10461 /* logical state didn't change, stay at init */
10462 ppd->host_link_state = HLS_UP_INIT;
10463 dd_dev_err(dd,
17fb4f29
JJ
10464 "%s: logical state did not change to ARMED\n",
10465 __func__);
77241056
MM
10466 }
10467 /*
10468 * The simulator does not currently implement SMA messages,
10469 * so neighbor_normal is not set. Set it here when we first
10470 * move to Armed.
10471 */
10472 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10473 ppd->neighbor_normal = 1;
10474 break;
10475 case HLS_UP_ACTIVE:
10476 if (ppd->host_link_state != HLS_UP_ARMED)
10477 goto unexpected;
10478
10479 ppd->host_link_state = HLS_UP_ACTIVE;
10480 set_logical_state(dd, LSTATE_ACTIVE);
10481 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10482 if (ret) {
10483 /* logical state didn't change, stay at armed */
10484 ppd->host_link_state = HLS_UP_ARMED;
10485 dd_dev_err(dd,
17fb4f29
JJ
10486 "%s: logical state did not change to ACTIVE\n",
10487 __func__);
77241056 10488 } else {
77241056
MM
10489 /* tell all engines to go running */
10490 sdma_all_running(dd);
10491
10492 /* Signal the IB layer that the port has went active */
ec3f2c12 10493 event.device = &dd->verbs_dev.rdi.ibdev;
77241056
MM
10494 event.element.port_num = ppd->port;
10495 event.event = IB_EVENT_PORT_ACTIVE;
10496 }
10497 break;
10498 case HLS_DN_POLL:
10499 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10500 ppd->host_link_state == HLS_DN_OFFLINE) &&
10501 dd->dc_shutdown)
10502 dc_start(dd);
10503 /* Hand LED control to the DC */
10504 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10505
10506 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10507 u8 tmp = ppd->link_enabled;
10508
10509 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10510 if (ret) {
10511 ppd->link_enabled = tmp;
10512 break;
10513 }
10514 ppd->remote_link_down_reason = 0;
10515
10516 if (ppd->driver_link_ready)
10517 ppd->link_enabled = 1;
10518 }
10519
fb9036dd 10520 set_all_slowpath(ppd->dd);
77241056
MM
10521 ret = set_local_link_attributes(ppd);
10522 if (ret)
10523 break;
10524
10525 ppd->port_error_action = 0;
10526 ppd->host_link_state = HLS_DN_POLL;
10527
10528 if (quick_linkup) {
10529 /* quick linkup does not go into polling */
10530 ret = do_quick_linkup(dd);
10531 } else {
10532 ret1 = set_physical_link_state(dd, PLS_POLLING);
10533 if (ret1 != HCMD_SUCCESS) {
10534 dd_dev_err(dd,
17fb4f29
JJ
10535 "Failed to transition to Polling link state, return 0x%x\n",
10536 ret1);
77241056
MM
10537 ret = -EINVAL;
10538 }
10539 }
a9c05e35
BM
10540 ppd->offline_disabled_reason =
10541 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
77241056
MM
10542 /*
10543 * If an error occurred above, go back to offline. The
10544 * caller may reschedule another attempt.
10545 */
10546 if (ret)
10547 goto_offline(ppd, 0);
10548 break;
10549 case HLS_DN_DISABLE:
10550 /* link is disabled */
10551 ppd->link_enabled = 0;
10552
10553 /* allow any state to transition to disabled */
10554
10555 /* must transition to offline first */
10556 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10557 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10558 if (ret)
10559 break;
10560 ppd->remote_link_down_reason = 0;
10561 }
10562
db069ecb
MR
10563 if (!dd->dc_shutdown) {
10564 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10565 if (ret1 != HCMD_SUCCESS) {
10566 dd_dev_err(dd,
10567 "Failed to transition to Disabled link state, return 0x%x\n",
10568 ret1);
10569 ret = -EINVAL;
10570 break;
10571 }
10572 dc_shutdown(dd);
77241056
MM
10573 }
10574 ppd->host_link_state = HLS_DN_DISABLE;
77241056
MM
10575 break;
10576 case HLS_DN_OFFLINE:
10577 if (ppd->host_link_state == HLS_DN_DISABLE)
10578 dc_start(dd);
10579
10580 /* allow any state to transition to offline */
10581 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10582 if (!ret)
10583 ppd->remote_link_down_reason = 0;
10584 break;
10585 case HLS_VERIFY_CAP:
10586 if (ppd->host_link_state != HLS_DN_POLL)
10587 goto unexpected;
10588 ppd->host_link_state = HLS_VERIFY_CAP;
10589 break;
10590 case HLS_GOING_UP:
10591 if (ppd->host_link_state != HLS_VERIFY_CAP)
10592 goto unexpected;
10593
10594 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10595 if (ret1 != HCMD_SUCCESS) {
10596 dd_dev_err(dd,
17fb4f29
JJ
10597 "Failed to transition to link up state, return 0x%x\n",
10598 ret1);
77241056
MM
10599 ret = -EINVAL;
10600 break;
10601 }
10602 ppd->host_link_state = HLS_GOING_UP;
10603 break;
10604
10605 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10606 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10607 default:
10608 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
17fb4f29 10609 __func__, state);
77241056
MM
10610 ret = -EINVAL;
10611 break;
10612 }
10613
77241056
MM
10614 goto done;
10615
10616unexpected:
10617 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
17fb4f29
JJ
10618 __func__, link_state_name(ppd->host_link_state),
10619 link_state_name(state));
77241056
MM
10620 ret = -EINVAL;
10621
10622done:
10623 mutex_unlock(&ppd->hls_lock);
10624
10625 if (event.device)
10626 ib_dispatch_event(&event);
10627
10628 return ret;
10629}
10630
10631int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10632{
10633 u64 reg;
10634 int ret = 0;
10635
10636 switch (which) {
10637 case HFI1_IB_CFG_LIDLMC:
10638 set_lidlmc(ppd);
10639 break;
10640 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10641 /*
10642 * The VL Arbitrator high limit is sent in units of 4k
10643 * bytes, while HFI stores it in units of 64 bytes.
10644 */
8638b77f 10645 val *= 4096 / 64;
77241056
MM
10646 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10647 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10648 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10649 break;
10650 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10651 /* HFI only supports POLL as the default link down state */
10652 if (val != HLS_DN_POLL)
10653 ret = -EINVAL;
10654 break;
10655 case HFI1_IB_CFG_OP_VLS:
10656 if (ppd->vls_operational != val) {
10657 ppd->vls_operational = val;
10658 if (!ppd->port)
10659 ret = -EINVAL;
77241056
MM
10660 }
10661 break;
10662 /*
10663 * For link width, link width downgrade, and speed enable, always AND
10664 * the setting with what is actually supported. This has two benefits.
10665 * First, enabled can't have unsupported values, no matter what the
10666 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10667 * "fill in with your supported value" have all the bits in the
10668 * field set, so simply ANDing with supported has the desired result.
10669 */
10670 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10671 ppd->link_width_enabled = val & ppd->link_width_supported;
10672 break;
10673 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10674 ppd->link_width_downgrade_enabled =
10675 val & ppd->link_width_downgrade_supported;
10676 break;
10677 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10678 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10679 break;
10680 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10681 /*
10682 * HFI does not follow IB specs, save this value
10683 * so we can report it, if asked.
10684 */
10685 ppd->overrun_threshold = val;
10686 break;
10687 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10688 /*
10689 * HFI does not follow IB specs, save this value
10690 * so we can report it, if asked.
10691 */
10692 ppd->phy_error_threshold = val;
10693 break;
10694
10695 case HFI1_IB_CFG_MTU:
10696 set_send_length(ppd);
10697 break;
10698
10699 case HFI1_IB_CFG_PKEYS:
10700 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10701 set_partition_keys(ppd);
10702 break;
10703
10704 default:
10705 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10706 dd_dev_info(ppd->dd,
17fb4f29
JJ
10707 "%s: which %s, val 0x%x: not implemented\n",
10708 __func__, ib_cfg_name(which), val);
77241056
MM
10709 break;
10710 }
10711 return ret;
10712}
10713
10714/* begin functions related to vl arbitration table caching */
10715static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10716{
10717 int i;
10718
10719 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10720 VL_ARB_LOW_PRIO_TABLE_SIZE);
10721 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10722 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10723
10724 /*
10725 * Note that we always return values directly from the
10726 * 'vl_arb_cache' (and do no CSR reads) in response to a
10727 * 'Get(VLArbTable)'. This is obviously correct after a
10728 * 'Set(VLArbTable)', since the cache will then be up to
10729 * date. But it's also correct prior to any 'Set(VLArbTable)'
10730 * since then both the cache, and the relevant h/w registers
10731 * will be zeroed.
10732 */
10733
10734 for (i = 0; i < MAX_PRIO_TABLE; i++)
10735 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10736}
10737
10738/*
10739 * vl_arb_lock_cache
10740 *
10741 * All other vl_arb_* functions should be called only after locking
10742 * the cache.
10743 */
10744static inline struct vl_arb_cache *
10745vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10746{
10747 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10748 return NULL;
10749 spin_lock(&ppd->vl_arb_cache[idx].lock);
10750 return &ppd->vl_arb_cache[idx];
10751}
10752
10753static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10754{
10755 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10756}
10757
10758static void vl_arb_get_cache(struct vl_arb_cache *cache,
10759 struct ib_vl_weight_elem *vl)
10760{
10761 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10762}
10763
10764static void vl_arb_set_cache(struct vl_arb_cache *cache,
10765 struct ib_vl_weight_elem *vl)
10766{
10767 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10768}
10769
10770static int vl_arb_match_cache(struct vl_arb_cache *cache,
10771 struct ib_vl_weight_elem *vl)
10772{
10773 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10774}
f4d507cd 10775
77241056
MM
10776/* end functions related to vl arbitration table caching */
10777
10778static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10779 u32 size, struct ib_vl_weight_elem *vl)
10780{
10781 struct hfi1_devdata *dd = ppd->dd;
10782 u64 reg;
10783 unsigned int i, is_up = 0;
10784 int drain, ret = 0;
10785
10786 mutex_lock(&ppd->hls_lock);
10787
10788 if (ppd->host_link_state & HLS_UP)
10789 is_up = 1;
10790
10791 drain = !is_ax(dd) && is_up;
10792
10793 if (drain)
10794 /*
10795 * Before adjusting VL arbitration weights, empty per-VL
10796 * FIFOs, otherwise a packet whose VL weight is being
10797 * set to 0 could get stuck in a FIFO with no chance to
10798 * egress.
10799 */
10800 ret = stop_drain_data_vls(dd);
10801
10802 if (ret) {
10803 dd_dev_err(
10804 dd,
10805 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10806 __func__);
10807 goto err;
10808 }
10809
10810 for (i = 0; i < size; i++, vl++) {
10811 /*
10812 * NOTE: The low priority shift and mask are used here, but
10813 * they are the same for both the low and high registers.
10814 */
10815 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10816 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10817 | (((u64)vl->weight
10818 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10819 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10820 write_csr(dd, target + (i * 8), reg);
10821 }
10822 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10823
10824 if (drain)
10825 open_fill_data_vls(dd); /* reopen all VLs */
10826
10827err:
10828 mutex_unlock(&ppd->hls_lock);
10829
10830 return ret;
10831}
10832
10833/*
10834 * Read one credit merge VL register.
10835 */
10836static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10837 struct vl_limit *vll)
10838{
10839 u64 reg = read_csr(dd, csr);
10840
10841 vll->dedicated = cpu_to_be16(
10842 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10843 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10844 vll->shared = cpu_to_be16(
10845 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10846 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10847}
10848
10849/*
10850 * Read the current credit merge limits.
10851 */
10852static int get_buffer_control(struct hfi1_devdata *dd,
10853 struct buffer_control *bc, u16 *overall_limit)
10854{
10855 u64 reg;
10856 int i;
10857
10858 /* not all entries are filled in */
10859 memset(bc, 0, sizeof(*bc));
10860
10861 /* OPA and HFI have a 1-1 mapping */
10862 for (i = 0; i < TXE_NUM_DATA_VL; i++)
8638b77f 10863 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
77241056
MM
10864
10865 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10866 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10867
10868 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10869 bc->overall_shared_limit = cpu_to_be16(
10870 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10871 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10872 if (overall_limit)
10873 *overall_limit = (reg
10874 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10875 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10876 return sizeof(struct buffer_control);
10877}
10878
10879static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10880{
10881 u64 reg;
10882 int i;
10883
10884 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10885 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10886 for (i = 0; i < sizeof(u64); i++) {
10887 u8 byte = *(((u8 *)&reg) + i);
10888
10889 dp->vlnt[2 * i] = byte & 0xf;
10890 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10891 }
10892
10893 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10894 for (i = 0; i < sizeof(u64); i++) {
10895 u8 byte = *(((u8 *)&reg) + i);
10896
10897 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10898 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10899 }
10900 return sizeof(struct sc2vlnt);
10901}
10902
10903static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10904 struct ib_vl_weight_elem *vl)
10905{
10906 unsigned int i;
10907
10908 for (i = 0; i < nelems; i++, vl++) {
10909 vl->vl = 0xf;
10910 vl->weight = 0;
10911 }
10912}
10913
10914static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10915{
10916 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
17fb4f29
JJ
10917 DC_SC_VL_VAL(15_0,
10918 0, dp->vlnt[0] & 0xf,
10919 1, dp->vlnt[1] & 0xf,
10920 2, dp->vlnt[2] & 0xf,
10921 3, dp->vlnt[3] & 0xf,
10922 4, dp->vlnt[4] & 0xf,
10923 5, dp->vlnt[5] & 0xf,
10924 6, dp->vlnt[6] & 0xf,
10925 7, dp->vlnt[7] & 0xf,
10926 8, dp->vlnt[8] & 0xf,
10927 9, dp->vlnt[9] & 0xf,
10928 10, dp->vlnt[10] & 0xf,
10929 11, dp->vlnt[11] & 0xf,
10930 12, dp->vlnt[12] & 0xf,
10931 13, dp->vlnt[13] & 0xf,
10932 14, dp->vlnt[14] & 0xf,
10933 15, dp->vlnt[15] & 0xf));
77241056 10934 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
17fb4f29
JJ
10935 DC_SC_VL_VAL(31_16,
10936 16, dp->vlnt[16] & 0xf,
10937 17, dp->vlnt[17] & 0xf,
10938 18, dp->vlnt[18] & 0xf,
10939 19, dp->vlnt[19] & 0xf,
10940 20, dp->vlnt[20] & 0xf,
10941 21, dp->vlnt[21] & 0xf,
10942 22, dp->vlnt[22] & 0xf,
10943 23, dp->vlnt[23] & 0xf,
10944 24, dp->vlnt[24] & 0xf,
10945 25, dp->vlnt[25] & 0xf,
10946 26, dp->vlnt[26] & 0xf,
10947 27, dp->vlnt[27] & 0xf,
10948 28, dp->vlnt[28] & 0xf,
10949 29, dp->vlnt[29] & 0xf,
10950 30, dp->vlnt[30] & 0xf,
10951 31, dp->vlnt[31] & 0xf));
77241056
MM
10952}
10953
10954static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10955 u16 limit)
10956{
10957 if (limit != 0)
10958 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
17fb4f29 10959 what, (int)limit, idx);
77241056
MM
10960}
10961
10962/* change only the shared limit portion of SendCmGLobalCredit */
10963static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10964{
10965 u64 reg;
10966
10967 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10968 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10969 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10970 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10971}
10972
10973/* change only the total credit limit portion of SendCmGLobalCredit */
10974static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10975{
10976 u64 reg;
10977
10978 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10979 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10980 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10981 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10982}
10983
10984/* set the given per-VL shared limit */
10985static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10986{
10987 u64 reg;
10988 u32 addr;
10989
10990 if (vl < TXE_NUM_DATA_VL)
10991 addr = SEND_CM_CREDIT_VL + (8 * vl);
10992 else
10993 addr = SEND_CM_CREDIT_VL15;
10994
10995 reg = read_csr(dd, addr);
10996 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10997 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10998 write_csr(dd, addr, reg);
10999}
11000
11001/* set the given per-VL dedicated limit */
11002static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11003{
11004 u64 reg;
11005 u32 addr;
11006
11007 if (vl < TXE_NUM_DATA_VL)
11008 addr = SEND_CM_CREDIT_VL + (8 * vl);
11009 else
11010 addr = SEND_CM_CREDIT_VL15;
11011
11012 reg = read_csr(dd, addr);
11013 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11014 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11015 write_csr(dd, addr, reg);
11016}
11017
11018/* spin until the given per-VL status mask bits clear */
11019static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11020 const char *which)
11021{
11022 unsigned long timeout;
11023 u64 reg;
11024
11025 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11026 while (1) {
11027 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11028
11029 if (reg == 0)
11030 return; /* success */
11031 if (time_after(jiffies, timeout))
11032 break; /* timed out */
11033 udelay(1);
11034 }
11035
11036 dd_dev_err(dd,
17fb4f29
JJ
11037 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11038 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
77241056
MM
11039 /*
11040 * If this occurs, it is likely there was a credit loss on the link.
11041 * The only recovery from that is a link bounce.
11042 */
11043 dd_dev_err(dd,
17fb4f29 11044 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
77241056
MM
11045}
11046
11047/*
11048 * The number of credits on the VLs may be changed while everything
11049 * is "live", but the following algorithm must be followed due to
11050 * how the hardware is actually implemented. In particular,
11051 * Return_Credit_Status[] is the only correct status check.
11052 *
11053 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11054 * set Global_Shared_Credit_Limit = 0
11055 * use_all_vl = 1
11056 * mask0 = all VLs that are changing either dedicated or shared limits
11057 * set Shared_Limit[mask0] = 0
11058 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11059 * if (changing any dedicated limit)
11060 * mask1 = all VLs that are lowering dedicated limits
11061 * lower Dedicated_Limit[mask1]
11062 * spin until Return_Credit_Status[mask1] == 0
11063 * raise Dedicated_Limits
11064 * raise Shared_Limits
11065 * raise Global_Shared_Credit_Limit
11066 *
11067 * lower = if the new limit is lower, set the limit to the new value
11068 * raise = if the new limit is higher than the current value (may be changed
11069 * earlier in the algorithm), set the new limit to the new value
11070 */
8a4d3444
MM
11071int set_buffer_control(struct hfi1_pportdata *ppd,
11072 struct buffer_control *new_bc)
77241056 11073{
8a4d3444 11074 struct hfi1_devdata *dd = ppd->dd;
77241056
MM
11075 u64 changing_mask, ld_mask, stat_mask;
11076 int change_count;
11077 int i, use_all_mask;
11078 int this_shared_changing;
8a4d3444 11079 int vl_count = 0, ret;
77241056
MM
11080 /*
11081 * A0: add the variable any_shared_limit_changing below and in the
11082 * algorithm above. If removing A0 support, it can be removed.
11083 */
11084 int any_shared_limit_changing;
11085 struct buffer_control cur_bc;
11086 u8 changing[OPA_MAX_VLS];
11087 u8 lowering_dedicated[OPA_MAX_VLS];
11088 u16 cur_total;
11089 u32 new_total = 0;
11090 const u64 all_mask =
11091 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11092 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11093 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11094 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11095 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11096 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11097 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11098 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11099 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11100
11101#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11102#define NUM_USABLE_VLS 16 /* look at VL15 and less */
11103
77241056
MM
11104 /* find the new total credits, do sanity check on unused VLs */
11105 for (i = 0; i < OPA_MAX_VLS; i++) {
11106 if (valid_vl(i)) {
11107 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11108 continue;
11109 }
11110 nonzero_msg(dd, i, "dedicated",
17fb4f29 11111 be16_to_cpu(new_bc->vl[i].dedicated));
77241056 11112 nonzero_msg(dd, i, "shared",
17fb4f29 11113 be16_to_cpu(new_bc->vl[i].shared));
77241056
MM
11114 new_bc->vl[i].dedicated = 0;
11115 new_bc->vl[i].shared = 0;
11116 }
11117 new_total += be16_to_cpu(new_bc->overall_shared_limit);
bff14bb6 11118
77241056
MM
11119 /* fetch the current values */
11120 get_buffer_control(dd, &cur_bc, &cur_total);
11121
11122 /*
11123 * Create the masks we will use.
11124 */
11125 memset(changing, 0, sizeof(changing));
11126 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
4d114fdd
JJ
11127 /*
11128 * NOTE: Assumes that the individual VL bits are adjacent and in
11129 * increasing order
11130 */
77241056
MM
11131 stat_mask =
11132 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11133 changing_mask = 0;
11134 ld_mask = 0;
11135 change_count = 0;
11136 any_shared_limit_changing = 0;
11137 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11138 if (!valid_vl(i))
11139 continue;
11140 this_shared_changing = new_bc->vl[i].shared
11141 != cur_bc.vl[i].shared;
11142 if (this_shared_changing)
11143 any_shared_limit_changing = 1;
d0d236ea
JJ
11144 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11145 this_shared_changing) {
77241056
MM
11146 changing[i] = 1;
11147 changing_mask |= stat_mask;
11148 change_count++;
11149 }
11150 if (be16_to_cpu(new_bc->vl[i].dedicated) <
11151 be16_to_cpu(cur_bc.vl[i].dedicated)) {
11152 lowering_dedicated[i] = 1;
11153 ld_mask |= stat_mask;
11154 }
11155 }
11156
11157 /* bracket the credit change with a total adjustment */
11158 if (new_total > cur_total)
11159 set_global_limit(dd, new_total);
11160
11161 /*
11162 * Start the credit change algorithm.
11163 */
11164 use_all_mask = 0;
11165 if ((be16_to_cpu(new_bc->overall_shared_limit) <
995deafa
MM
11166 be16_to_cpu(cur_bc.overall_shared_limit)) ||
11167 (is_ax(dd) && any_shared_limit_changing)) {
77241056
MM
11168 set_global_shared(dd, 0);
11169 cur_bc.overall_shared_limit = 0;
11170 use_all_mask = 1;
11171 }
11172
11173 for (i = 0; i < NUM_USABLE_VLS; i++) {
11174 if (!valid_vl(i))
11175 continue;
11176
11177 if (changing[i]) {
11178 set_vl_shared(dd, i, 0);
11179 cur_bc.vl[i].shared = 0;
11180 }
11181 }
11182
11183 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
17fb4f29 11184 "shared");
77241056
MM
11185
11186 if (change_count > 0) {
11187 for (i = 0; i < NUM_USABLE_VLS; i++) {
11188 if (!valid_vl(i))
11189 continue;
11190
11191 if (lowering_dedicated[i]) {
11192 set_vl_dedicated(dd, i,
17fb4f29
JJ
11193 be16_to_cpu(new_bc->
11194 vl[i].dedicated));
77241056
MM
11195 cur_bc.vl[i].dedicated =
11196 new_bc->vl[i].dedicated;
11197 }
11198 }
11199
11200 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11201
11202 /* now raise all dedicated that are going up */
11203 for (i = 0; i < NUM_USABLE_VLS; i++) {
11204 if (!valid_vl(i))
11205 continue;
11206
11207 if (be16_to_cpu(new_bc->vl[i].dedicated) >
11208 be16_to_cpu(cur_bc.vl[i].dedicated))
11209 set_vl_dedicated(dd, i,
17fb4f29
JJ
11210 be16_to_cpu(new_bc->
11211 vl[i].dedicated));
77241056
MM
11212 }
11213 }
11214
11215 /* next raise all shared that are going up */
11216 for (i = 0; i < NUM_USABLE_VLS; i++) {
11217 if (!valid_vl(i))
11218 continue;
11219
11220 if (be16_to_cpu(new_bc->vl[i].shared) >
11221 be16_to_cpu(cur_bc.vl[i].shared))
11222 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11223 }
11224
11225 /* finally raise the global shared */
11226 if (be16_to_cpu(new_bc->overall_shared_limit) >
17fb4f29 11227 be16_to_cpu(cur_bc.overall_shared_limit))
77241056 11228 set_global_shared(dd,
17fb4f29 11229 be16_to_cpu(new_bc->overall_shared_limit));
77241056
MM
11230
11231 /* bracket the credit change with a total adjustment */
11232 if (new_total < cur_total)
11233 set_global_limit(dd, new_total);
8a4d3444
MM
11234
11235 /*
11236 * Determine the actual number of operational VLS using the number of
11237 * dedicated and shared credits for each VL.
11238 */
11239 if (change_count > 0) {
11240 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11241 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11242 be16_to_cpu(new_bc->vl[i].shared) > 0)
11243 vl_count++;
11244 ppd->actual_vls_operational = vl_count;
11245 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11246 ppd->actual_vls_operational :
11247 ppd->vls_operational,
11248 NULL);
11249 if (ret == 0)
11250 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11251 ppd->actual_vls_operational :
11252 ppd->vls_operational, NULL);
11253 if (ret)
11254 return ret;
11255 }
77241056
MM
11256 return 0;
11257}
11258
11259/*
11260 * Read the given fabric manager table. Return the size of the
11261 * table (in bytes) on success, and a negative error code on
11262 * failure.
11263 */
11264int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11265
11266{
11267 int size;
11268 struct vl_arb_cache *vlc;
11269
11270 switch (which) {
11271 case FM_TBL_VL_HIGH_ARB:
11272 size = 256;
11273 /*
11274 * OPA specifies 128 elements (of 2 bytes each), though
11275 * HFI supports only 16 elements in h/w.
11276 */
11277 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11278 vl_arb_get_cache(vlc, t);
11279 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11280 break;
11281 case FM_TBL_VL_LOW_ARB:
11282 size = 256;
11283 /*
11284 * OPA specifies 128 elements (of 2 bytes each), though
11285 * HFI supports only 16 elements in h/w.
11286 */
11287 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11288 vl_arb_get_cache(vlc, t);
11289 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11290 break;
11291 case FM_TBL_BUFFER_CONTROL:
11292 size = get_buffer_control(ppd->dd, t, NULL);
11293 break;
11294 case FM_TBL_SC2VLNT:
11295 size = get_sc2vlnt(ppd->dd, t);
11296 break;
11297 case FM_TBL_VL_PREEMPT_ELEMS:
11298 size = 256;
11299 /* OPA specifies 128 elements, of 2 bytes each */
11300 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11301 break;
11302 case FM_TBL_VL_PREEMPT_MATRIX:
11303 size = 256;
11304 /*
11305 * OPA specifies that this is the same size as the VL
11306 * arbitration tables (i.e., 256 bytes).
11307 */
11308 break;
11309 default:
11310 return -EINVAL;
11311 }
11312 return size;
11313}
11314
11315/*
11316 * Write the given fabric manager table.
11317 */
11318int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11319{
11320 int ret = 0;
11321 struct vl_arb_cache *vlc;
11322
11323 switch (which) {
11324 case FM_TBL_VL_HIGH_ARB:
11325 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11326 if (vl_arb_match_cache(vlc, t)) {
11327 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11328 break;
11329 }
11330 vl_arb_set_cache(vlc, t);
11331 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11332 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11333 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11334 break;
11335 case FM_TBL_VL_LOW_ARB:
11336 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11337 if (vl_arb_match_cache(vlc, t)) {
11338 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11339 break;
11340 }
11341 vl_arb_set_cache(vlc, t);
11342 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11343 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11344 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11345 break;
11346 case FM_TBL_BUFFER_CONTROL:
8a4d3444 11347 ret = set_buffer_control(ppd, t);
77241056
MM
11348 break;
11349 case FM_TBL_SC2VLNT:
11350 set_sc2vlnt(ppd->dd, t);
11351 break;
11352 default:
11353 ret = -EINVAL;
11354 }
11355 return ret;
11356}
11357
11358/*
11359 * Disable all data VLs.
11360 *
11361 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11362 */
11363static int disable_data_vls(struct hfi1_devdata *dd)
11364{
995deafa 11365 if (is_ax(dd))
77241056
MM
11366 return 1;
11367
11368 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11369
11370 return 0;
11371}
11372
11373/*
11374 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11375 * Just re-enables all data VLs (the "fill" part happens
11376 * automatically - the name was chosen for symmetry with
11377 * stop_drain_data_vls()).
11378 *
11379 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11380 */
11381int open_fill_data_vls(struct hfi1_devdata *dd)
11382{
995deafa 11383 if (is_ax(dd))
77241056
MM
11384 return 1;
11385
11386 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11387
11388 return 0;
11389}
11390
11391/*
11392 * drain_data_vls() - assumes that disable_data_vls() has been called,
11393 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11394 * engines to drop to 0.
11395 */
11396static void drain_data_vls(struct hfi1_devdata *dd)
11397{
11398 sc_wait(dd);
11399 sdma_wait(dd);
11400 pause_for_credit_return(dd);
11401}
11402
11403/*
11404 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11405 *
11406 * Use open_fill_data_vls() to resume using data VLs. This pair is
11407 * meant to be used like this:
11408 *
11409 * stop_drain_data_vls(dd);
11410 * // do things with per-VL resources
11411 * open_fill_data_vls(dd);
11412 */
11413int stop_drain_data_vls(struct hfi1_devdata *dd)
11414{
11415 int ret;
11416
11417 ret = disable_data_vls(dd);
11418 if (ret == 0)
11419 drain_data_vls(dd);
11420
11421 return ret;
11422}
11423
11424/*
11425 * Convert a nanosecond time to a cclock count. No matter how slow
11426 * the cclock, a non-zero ns will always have a non-zero result.
11427 */
11428u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11429{
11430 u32 cclocks;
11431
11432 if (dd->icode == ICODE_FPGA_EMULATION)
11433 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11434 else /* simulation pretends to be ASIC */
11435 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11436 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11437 cclocks = 1;
11438 return cclocks;
11439}
11440
11441/*
11442 * Convert a cclock count to nanoseconds. Not matter how slow
11443 * the cclock, a non-zero cclocks will always have a non-zero result.
11444 */
11445u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11446{
11447 u32 ns;
11448
11449 if (dd->icode == ICODE_FPGA_EMULATION)
11450 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11451 else /* simulation pretends to be ASIC */
11452 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11453 if (cclocks && !ns)
11454 ns = 1;
11455 return ns;
11456}
11457
11458/*
11459 * Dynamically adjust the receive interrupt timeout for a context based on
11460 * incoming packet rate.
11461 *
11462 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11463 */
11464static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11465{
11466 struct hfi1_devdata *dd = rcd->dd;
11467 u32 timeout = rcd->rcvavail_timeout;
11468
11469 /*
11470 * This algorithm doubles or halves the timeout depending on whether
11471 * the number of packets received in this interrupt were less than or
11472 * greater equal the interrupt count.
11473 *
11474 * The calculations below do not allow a steady state to be achieved.
11475 * Only at the endpoints it is possible to have an unchanging
11476 * timeout.
11477 */
11478 if (npkts < rcv_intr_count) {
11479 /*
11480 * Not enough packets arrived before the timeout, adjust
11481 * timeout downward.
11482 */
11483 if (timeout < 2) /* already at minimum? */
11484 return;
11485 timeout >>= 1;
11486 } else {
11487 /*
11488 * More than enough packets arrived before the timeout, adjust
11489 * timeout upward.
11490 */
11491 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11492 return;
11493 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11494 }
11495
11496 rcd->rcvavail_timeout = timeout;
4d114fdd
JJ
11497 /*
11498 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11499 * been verified to be in range
11500 */
77241056 11501 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
17fb4f29
JJ
11502 (u64)timeout <<
11503 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
77241056
MM
11504}
11505
11506void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11507 u32 intr_adjust, u32 npkts)
11508{
11509 struct hfi1_devdata *dd = rcd->dd;
11510 u64 reg;
11511 u32 ctxt = rcd->ctxt;
11512
11513 /*
11514 * Need to write timeout register before updating RcvHdrHead to ensure
11515 * that a new value is used when the HW decides to restart counting.
11516 */
11517 if (intr_adjust)
11518 adjust_rcv_timeout(rcd, npkts);
11519 if (updegr) {
11520 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11521 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11522 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11523 }
11524 mmiowb();
11525 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11526 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11527 << RCV_HDR_HEAD_HEAD_SHIFT);
11528 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11529 mmiowb();
11530}
11531
11532u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11533{
11534 u32 head, tail;
11535
11536 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11537 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11538
11539 if (rcd->rcvhdrtail_kvaddr)
11540 tail = get_rcvhdrtail(rcd);
11541 else
11542 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11543
11544 return head == tail;
11545}
11546
11547/*
11548 * Context Control and Receive Array encoding for buffer size:
11549 * 0x0 invalid
11550 * 0x1 4 KB
11551 * 0x2 8 KB
11552 * 0x3 16 KB
11553 * 0x4 32 KB
11554 * 0x5 64 KB
11555 * 0x6 128 KB
11556 * 0x7 256 KB
11557 * 0x8 512 KB (Receive Array only)
11558 * 0x9 1 MB (Receive Array only)
11559 * 0xa 2 MB (Receive Array only)
11560 *
11561 * 0xB-0xF - reserved (Receive Array only)
11562 *
11563 *
11564 * This routine assumes that the value has already been sanity checked.
11565 */
11566static u32 encoded_size(u32 size)
11567{
11568 switch (size) {
8638b77f
JJ
11569 case 4 * 1024: return 0x1;
11570 case 8 * 1024: return 0x2;
11571 case 16 * 1024: return 0x3;
11572 case 32 * 1024: return 0x4;
11573 case 64 * 1024: return 0x5;
11574 case 128 * 1024: return 0x6;
11575 case 256 * 1024: return 0x7;
11576 case 512 * 1024: return 0x8;
11577 case 1 * 1024 * 1024: return 0x9;
11578 case 2 * 1024 * 1024: return 0xa;
77241056
MM
11579 }
11580 return 0x1; /* if invalid, go with the minimum size */
11581}
11582
11583void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11584{
11585 struct hfi1_ctxtdata *rcd;
11586 u64 rcvctrl, reg;
11587 int did_enable = 0;
11588
11589 rcd = dd->rcd[ctxt];
11590 if (!rcd)
11591 return;
11592
11593 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11594
11595 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11596 /* if the context already enabled, don't do the extra steps */
d0d236ea
JJ
11597 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11598 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
77241056
MM
11599 /* reset the tail and hdr addresses, and sequence count */
11600 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
60368186 11601 rcd->rcvhdrq_dma);
77241056
MM
11602 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11603 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
60368186 11604 rcd->rcvhdrqtailaddr_dma);
77241056
MM
11605 rcd->seq_cnt = 1;
11606
11607 /* reset the cached receive header queue head value */
11608 rcd->head = 0;
11609
11610 /*
11611 * Zero the receive header queue so we don't get false
11612 * positives when checking the sequence number. The
11613 * sequence numbers could land exactly on the same spot.
11614 * E.g. a rcd restart before the receive header wrapped.
11615 */
11616 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11617
11618 /* starting timeout */
11619 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11620
11621 /* enable the context */
11622 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11623
11624 /* clean the egr buffer size first */
11625 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11626 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11627 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11628 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11629
11630 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11631 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11632 did_enable = 1;
11633
11634 /* zero RcvEgrIndexHead */
11635 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11636
11637 /* set eager count and base index */
11638 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11639 & RCV_EGR_CTRL_EGR_CNT_MASK)
11640 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11641 (((rcd->eager_base >> RCV_SHIFT)
11642 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11643 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11644 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11645
11646 /*
11647 * Set TID (expected) count and base index.
11648 * rcd->expected_count is set to individual RcvArray entries,
11649 * not pairs, and the CSR takes a pair-count in groups of
11650 * four, so divide by 8.
11651 */
11652 reg = (((rcd->expected_count >> RCV_SHIFT)
11653 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11654 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11655 (((rcd->expected_base >> RCV_SHIFT)
11656 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11657 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11658 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
82c2611d
NV
11659 if (ctxt == HFI1_CTRL_CTXT)
11660 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
77241056
MM
11661 }
11662 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11663 write_csr(dd, RCV_VL15, 0);
46b010d3
MB
11664 /*
11665 * When receive context is being disabled turn on tail
11666 * update with a dummy tail address and then disable
11667 * receive context.
11668 */
60368186 11669 if (dd->rcvhdrtail_dummy_dma) {
46b010d3 11670 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
60368186 11671 dd->rcvhdrtail_dummy_dma);
566c157c 11672 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
46b010d3
MB
11673 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11674 }
11675
77241056
MM
11676 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11677 }
11678 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11679 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11680 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11681 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
60368186 11682 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma)
77241056 11683 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
566c157c
MH
11684 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11685 /* See comment on RcvCtxtCtrl.TailUpd above */
11686 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11687 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11688 }
77241056
MM
11689 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11690 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11691 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11692 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11693 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
4d114fdd
JJ
11694 /*
11695 * In one-packet-per-eager mode, the size comes from
11696 * the RcvArray entry.
11697 */
77241056
MM
11698 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11699 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11700 }
11701 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11702 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11703 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11704 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11705 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11706 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11707 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11708 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11709 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11710 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11711 rcd->rcvctrl = rcvctrl;
11712 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11713 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11714
11715 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
d0d236ea
JJ
11716 if (did_enable &&
11717 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
77241056
MM
11718 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11719 if (reg != 0) {
11720 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
17fb4f29 11721 ctxt, reg);
77241056
MM
11722 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11723 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11724 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11725 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11726 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11727 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
17fb4f29 11728 ctxt, reg, reg == 0 ? "not" : "still");
77241056
MM
11729 }
11730 }
11731
11732 if (did_enable) {
11733 /*
11734 * The interrupt timeout and count must be set after
11735 * the context is enabled to take effect.
11736 */
11737 /* set interrupt timeout */
11738 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
17fb4f29 11739 (u64)rcd->rcvavail_timeout <<
77241056
MM
11740 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11741
11742 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11743 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11744 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11745 }
11746
11747 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11748 /*
11749 * If the context has been disabled and the Tail Update has
46b010d3
MB
11750 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11751 * so it doesn't contain an address that is invalid.
77241056 11752 */
46b010d3 11753 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
60368186 11754 dd->rcvhdrtail_dummy_dma);
77241056
MM
11755}
11756
582e05c3 11757u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
77241056
MM
11758{
11759 int ret;
11760 u64 val = 0;
11761
11762 if (namep) {
11763 ret = dd->cntrnameslen;
77241056
MM
11764 *namep = dd->cntrnames;
11765 } else {
11766 const struct cntr_entry *entry;
11767 int i, j;
11768
11769 ret = (dd->ndevcntrs) * sizeof(u64);
77241056
MM
11770
11771 /* Get the start of the block of counters */
11772 *cntrp = dd->cntrs;
11773
11774 /*
11775 * Now go and fill in each counter in the block.
11776 */
11777 for (i = 0; i < DEV_CNTR_LAST; i++) {
11778 entry = &dev_cntrs[i];
11779 hfi1_cdbg(CNTR, "reading %s", entry->name);
11780 if (entry->flags & CNTR_DISABLED) {
11781 /* Nothing */
11782 hfi1_cdbg(CNTR, "\tDisabled\n");
11783 } else {
11784 if (entry->flags & CNTR_VL) {
11785 hfi1_cdbg(CNTR, "\tPer VL\n");
11786 for (j = 0; j < C_VL_COUNT; j++) {
11787 val = entry->rw_cntr(entry,
11788 dd, j,
11789 CNTR_MODE_R,
11790 0);
11791 hfi1_cdbg(
11792 CNTR,
11793 "\t\tRead 0x%llx for %d\n",
11794 val, j);
11795 dd->cntrs[entry->offset + j] =
11796 val;
11797 }
a699c6c2
VM
11798 } else if (entry->flags & CNTR_SDMA) {
11799 hfi1_cdbg(CNTR,
11800 "\t Per SDMA Engine\n");
11801 for (j = 0; j < dd->chip_sdma_engines;
11802 j++) {
11803 val =
11804 entry->rw_cntr(entry, dd, j,
11805 CNTR_MODE_R, 0);
11806 hfi1_cdbg(CNTR,
11807 "\t\tRead 0x%llx for %d\n",
11808 val, j);
11809 dd->cntrs[entry->offset + j] =
11810 val;
11811 }
77241056
MM
11812 } else {
11813 val = entry->rw_cntr(entry, dd,
11814 CNTR_INVALID_VL,
11815 CNTR_MODE_R, 0);
11816 dd->cntrs[entry->offset] = val;
11817 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11818 }
11819 }
11820 }
11821 }
11822 return ret;
11823}
11824
11825/*
11826 * Used by sysfs to create files for hfi stats to read
11827 */
582e05c3 11828u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
77241056
MM
11829{
11830 int ret;
11831 u64 val = 0;
11832
11833 if (namep) {
582e05c3
DL
11834 ret = ppd->dd->portcntrnameslen;
11835 *namep = ppd->dd->portcntrnames;
77241056
MM
11836 } else {
11837 const struct cntr_entry *entry;
77241056
MM
11838 int i, j;
11839
582e05c3 11840 ret = ppd->dd->nportcntrs * sizeof(u64);
77241056
MM
11841 *cntrp = ppd->cntrs;
11842
11843 for (i = 0; i < PORT_CNTR_LAST; i++) {
11844 entry = &port_cntrs[i];
11845 hfi1_cdbg(CNTR, "reading %s", entry->name);
11846 if (entry->flags & CNTR_DISABLED) {
11847 /* Nothing */
11848 hfi1_cdbg(CNTR, "\tDisabled\n");
11849 continue;
11850 }
11851
11852 if (entry->flags & CNTR_VL) {
11853 hfi1_cdbg(CNTR, "\tPer VL");
11854 for (j = 0; j < C_VL_COUNT; j++) {
11855 val = entry->rw_cntr(entry, ppd, j,
11856 CNTR_MODE_R,
11857 0);
11858 hfi1_cdbg(
11859 CNTR,
11860 "\t\tRead 0x%llx for %d",
11861 val, j);
11862 ppd->cntrs[entry->offset + j] = val;
11863 }
11864 } else {
11865 val = entry->rw_cntr(entry, ppd,
11866 CNTR_INVALID_VL,
11867 CNTR_MODE_R,
11868 0);
11869 ppd->cntrs[entry->offset] = val;
11870 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11871 }
11872 }
11873 }
11874 return ret;
11875}
11876
11877static void free_cntrs(struct hfi1_devdata *dd)
11878{
11879 struct hfi1_pportdata *ppd;
11880 int i;
11881
11882 if (dd->synth_stats_timer.data)
11883 del_timer_sync(&dd->synth_stats_timer);
11884 dd->synth_stats_timer.data = 0;
11885 ppd = (struct hfi1_pportdata *)(dd + 1);
11886 for (i = 0; i < dd->num_pports; i++, ppd++) {
11887 kfree(ppd->cntrs);
11888 kfree(ppd->scntrs);
4eb06882
DD
11889 free_percpu(ppd->ibport_data.rvp.rc_acks);
11890 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11891 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
77241056
MM
11892 ppd->cntrs = NULL;
11893 ppd->scntrs = NULL;
4eb06882
DD
11894 ppd->ibport_data.rvp.rc_acks = NULL;
11895 ppd->ibport_data.rvp.rc_qacks = NULL;
11896 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
77241056
MM
11897 }
11898 kfree(dd->portcntrnames);
11899 dd->portcntrnames = NULL;
11900 kfree(dd->cntrs);
11901 dd->cntrs = NULL;
11902 kfree(dd->scntrs);
11903 dd->scntrs = NULL;
11904 kfree(dd->cntrnames);
11905 dd->cntrnames = NULL;
11906}
11907
77241056
MM
11908static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11909 u64 *psval, void *context, int vl)
11910{
11911 u64 val;
11912 u64 sval = *psval;
11913
11914 if (entry->flags & CNTR_DISABLED) {
11915 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11916 return 0;
11917 }
11918
11919 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11920
11921 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11922
11923 /* If its a synthetic counter there is more work we need to do */
11924 if (entry->flags & CNTR_SYNTH) {
11925 if (sval == CNTR_MAX) {
11926 /* No need to read already saturated */
11927 return CNTR_MAX;
11928 }
11929
11930 if (entry->flags & CNTR_32BIT) {
11931 /* 32bit counters can wrap multiple times */
11932 u64 upper = sval >> 32;
11933 u64 lower = (sval << 32) >> 32;
11934
11935 if (lower > val) { /* hw wrapped */
11936 if (upper == CNTR_32BIT_MAX)
11937 val = CNTR_MAX;
11938 else
11939 upper++;
11940 }
11941
11942 if (val != CNTR_MAX)
11943 val = (upper << 32) | val;
11944
11945 } else {
11946 /* If we rolled we are saturated */
11947 if ((val < sval) || (val > CNTR_MAX))
11948 val = CNTR_MAX;
11949 }
11950 }
11951
11952 *psval = val;
11953
11954 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11955
11956 return val;
11957}
11958
11959static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11960 struct cntr_entry *entry,
11961 u64 *psval, void *context, int vl, u64 data)
11962{
11963 u64 val;
11964
11965 if (entry->flags & CNTR_DISABLED) {
11966 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11967 return 0;
11968 }
11969
11970 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11971
11972 if (entry->flags & CNTR_SYNTH) {
11973 *psval = data;
11974 if (entry->flags & CNTR_32BIT) {
11975 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11976 (data << 32) >> 32);
11977 val = data; /* return the full 64bit value */
11978 } else {
11979 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11980 data);
11981 }
11982 } else {
11983 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11984 }
11985
11986 *psval = val;
11987
11988 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11989
11990 return val;
11991}
11992
11993u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11994{
11995 struct cntr_entry *entry;
11996 u64 *sval;
11997
11998 entry = &dev_cntrs[index];
11999 sval = dd->scntrs + entry->offset;
12000
12001 if (vl != CNTR_INVALID_VL)
12002 sval += vl;
12003
12004 return read_dev_port_cntr(dd, entry, sval, dd, vl);
12005}
12006
12007u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12008{
12009 struct cntr_entry *entry;
12010 u64 *sval;
12011
12012 entry = &dev_cntrs[index];
12013 sval = dd->scntrs + entry->offset;
12014
12015 if (vl != CNTR_INVALID_VL)
12016 sval += vl;
12017
12018 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12019}
12020
12021u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12022{
12023 struct cntr_entry *entry;
12024 u64 *sval;
12025
12026 entry = &port_cntrs[index];
12027 sval = ppd->scntrs + entry->offset;
12028
12029 if (vl != CNTR_INVALID_VL)
12030 sval += vl;
12031
12032 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12033 (index <= C_RCV_HDR_OVF_LAST)) {
12034 /* We do not want to bother for disabled contexts */
12035 return 0;
12036 }
12037
12038 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12039}
12040
12041u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12042{
12043 struct cntr_entry *entry;
12044 u64 *sval;
12045
12046 entry = &port_cntrs[index];
12047 sval = ppd->scntrs + entry->offset;
12048
12049 if (vl != CNTR_INVALID_VL)
12050 sval += vl;
12051
12052 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12053 (index <= C_RCV_HDR_OVF_LAST)) {
12054 /* We do not want to bother for disabled contexts */
12055 return 0;
12056 }
12057
12058 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12059}
12060
12061static void update_synth_timer(unsigned long opaque)
12062{
12063 u64 cur_tx;
12064 u64 cur_rx;
12065 u64 total_flits;
12066 u8 update = 0;
12067 int i, j, vl;
12068 struct hfi1_pportdata *ppd;
12069 struct cntr_entry *entry;
12070
12071 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
12072
12073 /*
12074 * Rather than keep beating on the CSRs pick a minimal set that we can
12075 * check to watch for potential roll over. We can do this by looking at
12076 * the number of flits sent/recv. If the total flits exceeds 32bits then
12077 * we have to iterate all the counters and update.
12078 */
12079 entry = &dev_cntrs[C_DC_RCV_FLITS];
12080 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12081
12082 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12083 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12084
12085 hfi1_cdbg(
12086 CNTR,
12087 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12088 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12089
12090 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12091 /*
12092 * May not be strictly necessary to update but it won't hurt and
12093 * simplifies the logic here.
12094 */
12095 update = 1;
12096 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12097 dd->unit);
12098 } else {
12099 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12100 hfi1_cdbg(CNTR,
12101 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12102 total_flits, (u64)CNTR_32BIT_MAX);
12103 if (total_flits >= CNTR_32BIT_MAX) {
12104 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12105 dd->unit);
12106 update = 1;
12107 }
12108 }
12109
12110 if (update) {
12111 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12112 for (i = 0; i < DEV_CNTR_LAST; i++) {
12113 entry = &dev_cntrs[i];
12114 if (entry->flags & CNTR_VL) {
12115 for (vl = 0; vl < C_VL_COUNT; vl++)
12116 read_dev_cntr(dd, i, vl);
12117 } else {
12118 read_dev_cntr(dd, i, CNTR_INVALID_VL);
12119 }
12120 }
12121 ppd = (struct hfi1_pportdata *)(dd + 1);
12122 for (i = 0; i < dd->num_pports; i++, ppd++) {
12123 for (j = 0; j < PORT_CNTR_LAST; j++) {
12124 entry = &port_cntrs[j];
12125 if (entry->flags & CNTR_VL) {
12126 for (vl = 0; vl < C_VL_COUNT; vl++)
12127 read_port_cntr(ppd, j, vl);
12128 } else {
12129 read_port_cntr(ppd, j, CNTR_INVALID_VL);
12130 }
12131 }
12132 }
12133
12134 /*
12135 * We want the value in the register. The goal is to keep track
12136 * of the number of "ticks" not the counter value. In other
12137 * words if the register rolls we want to notice it and go ahead
12138 * and force an update.
12139 */
12140 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12141 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12142 CNTR_MODE_R, 0);
12143
12144 entry = &dev_cntrs[C_DC_RCV_FLITS];
12145 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12146 CNTR_MODE_R, 0);
12147
12148 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12149 dd->unit, dd->last_tx, dd->last_rx);
12150
12151 } else {
12152 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12153 }
12154
48a0cc13 12155 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
77241056
MM
12156}
12157
09a7908b 12158#define C_MAX_NAME 16 /* 15 chars + one for /0 */
77241056
MM
12159static int init_cntrs(struct hfi1_devdata *dd)
12160{
c024c554 12161 int i, rcv_ctxts, j;
77241056
MM
12162 size_t sz;
12163 char *p;
12164 char name[C_MAX_NAME];
12165 struct hfi1_pportdata *ppd;
11d2b114
SS
12166 const char *bit_type_32 = ",32";
12167 const int bit_type_32_sz = strlen(bit_type_32);
77241056
MM
12168
12169 /* set up the stats timer; the add_timer is done at the end */
24523a94
MFW
12170 setup_timer(&dd->synth_stats_timer, update_synth_timer,
12171 (unsigned long)dd);
77241056
MM
12172
12173 /***********************/
12174 /* per device counters */
12175 /***********************/
12176
12177 /* size names and determine how many we have*/
12178 dd->ndevcntrs = 0;
12179 sz = 0;
77241056
MM
12180
12181 for (i = 0; i < DEV_CNTR_LAST; i++) {
77241056
MM
12182 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12183 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12184 continue;
12185 }
12186
12187 if (dev_cntrs[i].flags & CNTR_VL) {
c024c554 12188 dev_cntrs[i].offset = dd->ndevcntrs;
77241056 12189 for (j = 0; j < C_VL_COUNT; j++) {
77241056 12190 snprintf(name, C_MAX_NAME, "%s%d",
17fb4f29 12191 dev_cntrs[i].name, vl_from_idx(j));
77241056 12192 sz += strlen(name);
11d2b114
SS
12193 /* Add ",32" for 32-bit counters */
12194 if (dev_cntrs[i].flags & CNTR_32BIT)
12195 sz += bit_type_32_sz;
77241056 12196 sz++;
77241056 12197 dd->ndevcntrs++;
77241056 12198 }
a699c6c2 12199 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
c024c554 12200 dev_cntrs[i].offset = dd->ndevcntrs;
a699c6c2 12201 for (j = 0; j < dd->chip_sdma_engines; j++) {
a699c6c2
VM
12202 snprintf(name, C_MAX_NAME, "%s%d",
12203 dev_cntrs[i].name, j);
77241056 12204 sz += strlen(name);
11d2b114
SS
12205 /* Add ",32" for 32-bit counters */
12206 if (dev_cntrs[i].flags & CNTR_32BIT)
12207 sz += bit_type_32_sz;
77241056 12208 sz++;
77241056 12209 dd->ndevcntrs++;
77241056
MM
12210 }
12211 } else {
11d2b114 12212 /* +1 for newline. */
77241056 12213 sz += strlen(dev_cntrs[i].name) + 1;
11d2b114
SS
12214 /* Add ",32" for 32-bit counters */
12215 if (dev_cntrs[i].flags & CNTR_32BIT)
12216 sz += bit_type_32_sz;
c024c554 12217 dev_cntrs[i].offset = dd->ndevcntrs;
77241056 12218 dd->ndevcntrs++;
77241056
MM
12219 }
12220 }
12221
12222 /* allocate space for the counter values */
c024c554 12223 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
77241056
MM
12224 if (!dd->cntrs)
12225 goto bail;
12226
c024c554 12227 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
77241056
MM
12228 if (!dd->scntrs)
12229 goto bail;
12230
77241056
MM
12231 /* allocate space for the counter names */
12232 dd->cntrnameslen = sz;
12233 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12234 if (!dd->cntrnames)
12235 goto bail;
12236
12237 /* fill in the names */
c024c554 12238 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
77241056
MM
12239 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12240 /* Nothing */
11d2b114
SS
12241 } else if (dev_cntrs[i].flags & CNTR_VL) {
12242 for (j = 0; j < C_VL_COUNT; j++) {
11d2b114
SS
12243 snprintf(name, C_MAX_NAME, "%s%d",
12244 dev_cntrs[i].name,
12245 vl_from_idx(j));
12246 memcpy(p, name, strlen(name));
12247 p += strlen(name);
12248
12249 /* Counter is 32 bits */
12250 if (dev_cntrs[i].flags & CNTR_32BIT) {
12251 memcpy(p, bit_type_32, bit_type_32_sz);
12252 p += bit_type_32_sz;
77241056 12253 }
11d2b114
SS
12254
12255 *p++ = '\n';
12256 }
12257 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12258 for (j = 0; j < dd->chip_sdma_engines; j++) {
11d2b114
SS
12259 snprintf(name, C_MAX_NAME, "%s%d",
12260 dev_cntrs[i].name, j);
12261 memcpy(p, name, strlen(name));
12262 p += strlen(name);
12263
12264 /* Counter is 32 bits */
12265 if (dev_cntrs[i].flags & CNTR_32BIT) {
12266 memcpy(p, bit_type_32, bit_type_32_sz);
12267 p += bit_type_32_sz;
a699c6c2 12268 }
11d2b114 12269
77241056
MM
12270 *p++ = '\n';
12271 }
11d2b114
SS
12272 } else {
12273 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12274 p += strlen(dev_cntrs[i].name);
12275
12276 /* Counter is 32 bits */
12277 if (dev_cntrs[i].flags & CNTR_32BIT) {
12278 memcpy(p, bit_type_32, bit_type_32_sz);
12279 p += bit_type_32_sz;
12280 }
12281
12282 *p++ = '\n';
77241056
MM
12283 }
12284 }
12285
12286 /*********************/
12287 /* per port counters */
12288 /*********************/
12289
12290 /*
12291 * Go through the counters for the overflows and disable the ones we
12292 * don't need. This varies based on platform so we need to do it
12293 * dynamically here.
12294 */
12295 rcv_ctxts = dd->num_rcv_contexts;
12296 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12297 i <= C_RCV_HDR_OVF_LAST; i++) {
12298 port_cntrs[i].flags |= CNTR_DISABLED;
12299 }
12300
12301 /* size port counter names and determine how many we have*/
12302 sz = 0;
12303 dd->nportcntrs = 0;
12304 for (i = 0; i < PORT_CNTR_LAST; i++) {
77241056
MM
12305 if (port_cntrs[i].flags & CNTR_DISABLED) {
12306 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12307 continue;
12308 }
12309
12310 if (port_cntrs[i].flags & CNTR_VL) {
77241056
MM
12311 port_cntrs[i].offset = dd->nportcntrs;
12312 for (j = 0; j < C_VL_COUNT; j++) {
77241056 12313 snprintf(name, C_MAX_NAME, "%s%d",
17fb4f29 12314 port_cntrs[i].name, vl_from_idx(j));
77241056 12315 sz += strlen(name);
11d2b114
SS
12316 /* Add ",32" for 32-bit counters */
12317 if (port_cntrs[i].flags & CNTR_32BIT)
12318 sz += bit_type_32_sz;
77241056 12319 sz++;
77241056
MM
12320 dd->nportcntrs++;
12321 }
12322 } else {
11d2b114 12323 /* +1 for newline */
77241056 12324 sz += strlen(port_cntrs[i].name) + 1;
11d2b114
SS
12325 /* Add ",32" for 32-bit counters */
12326 if (port_cntrs[i].flags & CNTR_32BIT)
12327 sz += bit_type_32_sz;
77241056
MM
12328 port_cntrs[i].offset = dd->nportcntrs;
12329 dd->nportcntrs++;
77241056
MM
12330 }
12331 }
12332
12333 /* allocate space for the counter names */
12334 dd->portcntrnameslen = sz;
12335 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12336 if (!dd->portcntrnames)
12337 goto bail;
12338
12339 /* fill in port cntr names */
12340 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12341 if (port_cntrs[i].flags & CNTR_DISABLED)
12342 continue;
12343
12344 if (port_cntrs[i].flags & CNTR_VL) {
12345 for (j = 0; j < C_VL_COUNT; j++) {
77241056 12346 snprintf(name, C_MAX_NAME, "%s%d",
17fb4f29 12347 port_cntrs[i].name, vl_from_idx(j));
77241056
MM
12348 memcpy(p, name, strlen(name));
12349 p += strlen(name);
11d2b114
SS
12350
12351 /* Counter is 32 bits */
12352 if (port_cntrs[i].flags & CNTR_32BIT) {
12353 memcpy(p, bit_type_32, bit_type_32_sz);
12354 p += bit_type_32_sz;
12355 }
12356
77241056
MM
12357 *p++ = '\n';
12358 }
12359 } else {
12360 memcpy(p, port_cntrs[i].name,
12361 strlen(port_cntrs[i].name));
12362 p += strlen(port_cntrs[i].name);
11d2b114
SS
12363
12364 /* Counter is 32 bits */
12365 if (port_cntrs[i].flags & CNTR_32BIT) {
12366 memcpy(p, bit_type_32, bit_type_32_sz);
12367 p += bit_type_32_sz;
12368 }
12369
77241056
MM
12370 *p++ = '\n';
12371 }
12372 }
12373
12374 /* allocate per port storage for counter values */
12375 ppd = (struct hfi1_pportdata *)(dd + 1);
12376 for (i = 0; i < dd->num_pports; i++, ppd++) {
12377 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12378 if (!ppd->cntrs)
12379 goto bail;
12380
12381 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12382 if (!ppd->scntrs)
12383 goto bail;
12384 }
12385
12386 /* CPU counters need to be allocated and zeroed */
12387 if (init_cpu_counters(dd))
12388 goto bail;
12389
12390 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12391 return 0;
12392bail:
12393 free_cntrs(dd);
12394 return -ENOMEM;
12395}
12396
77241056
MM
12397static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12398{
12399 switch (chip_lstate) {
12400 default:
12401 dd_dev_err(dd,
17fb4f29
JJ
12402 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12403 chip_lstate);
77241056
MM
12404 /* fall through */
12405 case LSTATE_DOWN:
12406 return IB_PORT_DOWN;
12407 case LSTATE_INIT:
12408 return IB_PORT_INIT;
12409 case LSTATE_ARMED:
12410 return IB_PORT_ARMED;
12411 case LSTATE_ACTIVE:
12412 return IB_PORT_ACTIVE;
12413 }
12414}
12415
12416u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12417{
12418 /* look at the HFI meta-states only */
12419 switch (chip_pstate & 0xf0) {
12420 default:
12421 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
17fb4f29 12422 chip_pstate);
77241056
MM
12423 /* fall through */
12424 case PLS_DISABLED:
12425 return IB_PORTPHYSSTATE_DISABLED;
12426 case PLS_OFFLINE:
12427 return OPA_PORTPHYSSTATE_OFFLINE;
12428 case PLS_POLLING:
12429 return IB_PORTPHYSSTATE_POLLING;
12430 case PLS_CONFIGPHY:
12431 return IB_PORTPHYSSTATE_TRAINING;
12432 case PLS_LINKUP:
12433 return IB_PORTPHYSSTATE_LINKUP;
12434 case PLS_PHYTEST:
12435 return IB_PORTPHYSSTATE_PHY_TEST;
12436 }
12437}
12438
12439/* return the OPA port logical state name */
12440const char *opa_lstate_name(u32 lstate)
12441{
12442 static const char * const port_logical_names[] = {
12443 "PORT_NOP",
12444 "PORT_DOWN",
12445 "PORT_INIT",
12446 "PORT_ARMED",
12447 "PORT_ACTIVE",
12448 "PORT_ACTIVE_DEFER",
12449 };
12450 if (lstate < ARRAY_SIZE(port_logical_names))
12451 return port_logical_names[lstate];
12452 return "unknown";
12453}
12454
12455/* return the OPA port physical state name */
12456const char *opa_pstate_name(u32 pstate)
12457{
12458 static const char * const port_physical_names[] = {
12459 "PHYS_NOP",
12460 "reserved1",
12461 "PHYS_POLL",
12462 "PHYS_DISABLED",
12463 "PHYS_TRAINING",
12464 "PHYS_LINKUP",
12465 "PHYS_LINK_ERR_RECOVER",
12466 "PHYS_PHY_TEST",
12467 "reserved8",
12468 "PHYS_OFFLINE",
12469 "PHYS_GANGED",
12470 "PHYS_TEST",
12471 };
12472 if (pstate < ARRAY_SIZE(port_physical_names))
12473 return port_physical_names[pstate];
12474 return "unknown";
12475}
12476
12477/*
12478 * Read the hardware link state and set the driver's cached value of it.
12479 * Return the (new) current value.
12480 */
12481u32 get_logical_state(struct hfi1_pportdata *ppd)
12482{
12483 u32 new_state;
12484
12485 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12486 if (new_state != ppd->lstate) {
12487 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
17fb4f29 12488 opa_lstate_name(new_state), new_state);
77241056
MM
12489 ppd->lstate = new_state;
12490 }
12491 /*
12492 * Set port status flags in the page mapped into userspace
12493 * memory. Do it here to ensure a reliable state - this is
12494 * the only function called by all state handling code.
12495 * Always set the flags due to the fact that the cache value
12496 * might have been changed explicitly outside of this
12497 * function.
12498 */
12499 if (ppd->statusp) {
12500 switch (ppd->lstate) {
12501 case IB_PORT_DOWN:
12502 case IB_PORT_INIT:
12503 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12504 HFI1_STATUS_IB_READY);
12505 break;
12506 case IB_PORT_ARMED:
12507 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12508 break;
12509 case IB_PORT_ACTIVE:
12510 *ppd->statusp |= HFI1_STATUS_IB_READY;
12511 break;
12512 }
12513 }
12514 return ppd->lstate;
12515}
12516
12517/**
12518 * wait_logical_linkstate - wait for an IB link state change to occur
12519 * @ppd: port device
12520 * @state: the state to wait for
12521 * @msecs: the number of milliseconds to wait
12522 *
12523 * Wait up to msecs milliseconds for IB link state change to occur.
12524 * For now, take the easy polling route.
12525 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12526 */
12527static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12528 int msecs)
12529{
12530 unsigned long timeout;
12531
12532 timeout = jiffies + msecs_to_jiffies(msecs);
12533 while (1) {
12534 if (get_logical_state(ppd) == state)
12535 return 0;
12536 if (time_after(jiffies, timeout))
12537 break;
12538 msleep(20);
12539 }
12540 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12541
12542 return -ETIMEDOUT;
12543}
12544
12545u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
12546{
77241056
MM
12547 u32 pstate;
12548 u32 ib_pstate;
12549
12550 pstate = read_physical_state(ppd->dd);
12551 ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
f45c8dc8 12552 if (ppd->last_pstate != ib_pstate) {
77241056 12553 dd_dev_info(ppd->dd,
17fb4f29
JJ
12554 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12555 __func__, opa_pstate_name(ib_pstate), ib_pstate,
12556 pstate);
f45c8dc8 12557 ppd->last_pstate = ib_pstate;
77241056
MM
12558 }
12559 return ib_pstate;
12560}
12561
77241056
MM
12562#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12563(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12564
12565#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12566(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12567
12568int hfi1_init_ctxt(struct send_context *sc)
12569{
d125a6c6 12570 if (sc) {
77241056
MM
12571 struct hfi1_devdata *dd = sc->dd;
12572 u64 reg;
12573 u8 set = (sc->type == SC_USER ?
12574 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12575 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12576 reg = read_kctxt_csr(dd, sc->hw_context,
12577 SEND_CTXT_CHECK_ENABLE);
12578 if (set)
12579 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12580 else
12581 SET_STATIC_RATE_CONTROL_SMASK(reg);
12582 write_kctxt_csr(dd, sc->hw_context,
12583 SEND_CTXT_CHECK_ENABLE, reg);
12584 }
12585 return 0;
12586}
12587
12588int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12589{
12590 int ret = 0;
12591 u64 reg;
12592
12593 if (dd->icode != ICODE_RTL_SILICON) {
12594 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12595 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12596 __func__);
12597 return -EINVAL;
12598 }
12599 reg = read_csr(dd, ASIC_STS_THERM);
12600 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12601 ASIC_STS_THERM_CURR_TEMP_MASK);
12602 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12603 ASIC_STS_THERM_LO_TEMP_MASK);
12604 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12605 ASIC_STS_THERM_HI_TEMP_MASK);
12606 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12607 ASIC_STS_THERM_CRIT_TEMP_MASK);
12608 /* triggers is a 3-bit value - 1 bit per trigger. */
12609 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12610
12611 return ret;
12612}
12613
12614/* ========================================================================= */
12615
12616/*
12617 * Enable/disable chip from delivering interrupts.
12618 */
12619void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12620{
12621 int i;
12622
12623 /*
12624 * In HFI, the mask needs to be 1 to allow interrupts.
12625 */
12626 if (enable) {
77241056
MM
12627 /* enable all interrupts */
12628 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
8638b77f 12629 write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
77241056 12630
8ebd4cf1 12631 init_qsfp_int(dd);
77241056
MM
12632 } else {
12633 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
8638b77f 12634 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
77241056
MM
12635 }
12636}
12637
12638/*
12639 * Clear all interrupt sources on the chip.
12640 */
12641static void clear_all_interrupts(struct hfi1_devdata *dd)
12642{
12643 int i;
12644
12645 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
8638b77f 12646 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
77241056
MM
12647
12648 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12649 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12650 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12651 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12652 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12653 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12654 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12655 for (i = 0; i < dd->chip_send_contexts; i++)
12656 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12657 for (i = 0; i < dd->chip_sdma_engines; i++)
12658 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12659
12660 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12661 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12662 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12663}
12664
12665/* Move to pcie.c? */
12666static void disable_intx(struct pci_dev *pdev)
12667{
12668 pci_intx(pdev, 0);
12669}
12670
12671static void clean_up_interrupts(struct hfi1_devdata *dd)
12672{
12673 int i;
12674
12675 /* remove irqs - must happen before disabling/turning off */
12676 if (dd->num_msix_entries) {
12677 /* MSI-X */
12678 struct hfi1_msix_entry *me = dd->msix_entries;
12679
12680 for (i = 0; i < dd->num_msix_entries; i++, me++) {
d125a6c6 12681 if (!me->arg) /* => no irq, no affinity */
957558c9
MH
12682 continue;
12683 hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
77241056
MM
12684 free_irq(me->msix.vector, me->arg);
12685 }
12686 } else {
12687 /* INTx */
12688 if (dd->requested_intx_irq) {
12689 free_irq(dd->pcidev->irq, dd);
12690 dd->requested_intx_irq = 0;
12691 }
12692 }
12693
12694 /* turn off interrupts */
12695 if (dd->num_msix_entries) {
12696 /* MSI-X */
6e5b6131 12697 pci_disable_msix(dd->pcidev);
77241056
MM
12698 } else {
12699 /* INTx */
12700 disable_intx(dd->pcidev);
12701 }
12702
12703 /* clean structures */
77241056
MM
12704 kfree(dd->msix_entries);
12705 dd->msix_entries = NULL;
12706 dd->num_msix_entries = 0;
12707}
12708
12709/*
12710 * Remap the interrupt source from the general handler to the given MSI-X
12711 * interrupt.
12712 */
12713static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12714{
12715 u64 reg;
12716 int m, n;
12717
12718 /* clear from the handled mask of the general interrupt */
12719 m = isrc / 64;
12720 n = isrc % 64;
12721 dd->gi_mask[m] &= ~((u64)1 << n);
12722
12723 /* direct the chip source to the given MSI-X interrupt */
12724 m = isrc / 8;
12725 n = isrc % 8;
8638b77f
JJ
12726 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12727 reg &= ~((u64)0xff << (8 * n));
12728 reg |= ((u64)msix_intr & 0xff) << (8 * n);
12729 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
77241056
MM
12730}
12731
12732static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12733 int engine, int msix_intr)
12734{
12735 /*
12736 * SDMA engine interrupt sources grouped by type, rather than
12737 * engine. Per-engine interrupts are as follows:
12738 * SDMA
12739 * SDMAProgress
12740 * SDMAIdle
12741 */
8638b77f 12742 remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
17fb4f29 12743 msix_intr);
8638b77f 12744 remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
17fb4f29 12745 msix_intr);
8638b77f 12746 remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
17fb4f29 12747 msix_intr);
77241056
MM
12748}
12749
77241056
MM
12750static int request_intx_irq(struct hfi1_devdata *dd)
12751{
12752 int ret;
12753
9805071e
JJ
12754 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12755 dd->unit);
77241056 12756 ret = request_irq(dd->pcidev->irq, general_interrupt,
17fb4f29 12757 IRQF_SHARED, dd->intx_name, dd);
77241056
MM
12758 if (ret)
12759 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
17fb4f29 12760 ret);
77241056
MM
12761 else
12762 dd->requested_intx_irq = 1;
12763 return ret;
12764}
12765
12766static int request_msix_irqs(struct hfi1_devdata *dd)
12767{
77241056
MM
12768 int first_general, last_general;
12769 int first_sdma, last_sdma;
12770 int first_rx, last_rx;
957558c9 12771 int i, ret = 0;
77241056
MM
12772
12773 /* calculate the ranges we are going to use */
12774 first_general = 0;
f3ff8189
JJ
12775 last_general = first_general + 1;
12776 first_sdma = last_general;
12777 last_sdma = first_sdma + dd->num_sdma;
12778 first_rx = last_sdma;
77241056
MM
12779 last_rx = first_rx + dd->n_krcv_queues;
12780
77241056
MM
12781 /*
12782 * Sanity check - the code expects all SDMA chip source
12783 * interrupts to be in the same CSR, starting at bit 0. Verify
12784 * that this is true by checking the bit location of the start.
12785 */
12786 BUILD_BUG_ON(IS_SDMA_START % 64);
12787
12788 for (i = 0; i < dd->num_msix_entries; i++) {
12789 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12790 const char *err_info;
12791 irq_handler_t handler;
f4f30031 12792 irq_handler_t thread = NULL;
77241056
MM
12793 void *arg;
12794 int idx;
12795 struct hfi1_ctxtdata *rcd = NULL;
12796 struct sdma_engine *sde = NULL;
12797
12798 /* obtain the arguments to request_irq */
12799 if (first_general <= i && i < last_general) {
12800 idx = i - first_general;
12801 handler = general_interrupt;
12802 arg = dd;
12803 snprintf(me->name, sizeof(me->name),
9805071e 12804 DRIVER_NAME "_%d", dd->unit);
77241056 12805 err_info = "general";
957558c9 12806 me->type = IRQ_GENERAL;
77241056
MM
12807 } else if (first_sdma <= i && i < last_sdma) {
12808 idx = i - first_sdma;
12809 sde = &dd->per_sdma[idx];
12810 handler = sdma_interrupt;
12811 arg = sde;
12812 snprintf(me->name, sizeof(me->name),
9805071e 12813 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
77241056
MM
12814 err_info = "sdma";
12815 remap_sdma_interrupts(dd, idx, i);
957558c9 12816 me->type = IRQ_SDMA;
77241056
MM
12817 } else if (first_rx <= i && i < last_rx) {
12818 idx = i - first_rx;
12819 rcd = dd->rcd[idx];
12820 /* no interrupt if no rcd */
12821 if (!rcd)
12822 continue;
12823 /*
12824 * Set the interrupt register and mask for this
12825 * context's interrupt.
12826 */
8638b77f 12827 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
77241056 12828 rcd->imask = ((u64)1) <<
8638b77f 12829 ((IS_RCVAVAIL_START + idx) % 64);
77241056 12830 handler = receive_context_interrupt;
f4f30031 12831 thread = receive_context_thread;
77241056
MM
12832 arg = rcd;
12833 snprintf(me->name, sizeof(me->name),
9805071e 12834 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
77241056 12835 err_info = "receive context";
66c0933b 12836 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
957558c9 12837 me->type = IRQ_RCVCTXT;
77241056
MM
12838 } else {
12839 /* not in our expected range - complain, then
4d114fdd
JJ
12840 * ignore it
12841 */
77241056 12842 dd_dev_err(dd,
17fb4f29 12843 "Unexpected extra MSI-X interrupt %d\n", i);
77241056
MM
12844 continue;
12845 }
12846 /* no argument, no interrupt */
d125a6c6 12847 if (!arg)
77241056
MM
12848 continue;
12849 /* make sure the name is terminated */
8638b77f 12850 me->name[sizeof(me->name) - 1] = 0;
77241056 12851
f4f30031 12852 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
17fb4f29 12853 me->name, arg);
77241056
MM
12854 if (ret) {
12855 dd_dev_err(dd,
17fb4f29
JJ
12856 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12857 err_info, me->msix.vector, idx, ret);
77241056
MM
12858 return ret;
12859 }
12860 /*
12861 * assign arg after request_irq call, so it will be
12862 * cleaned up
12863 */
12864 me->arg = arg;
12865
957558c9
MH
12866 ret = hfi1_get_irq_affinity(dd, me);
12867 if (ret)
12868 dd_dev_err(dd,
12869 "unable to pin IRQ %d\n", ret);
77241056
MM
12870 }
12871
77241056 12872 return ret;
77241056
MM
12873}
12874
12875/*
12876 * Set the general handler to accept all interrupts, remap all
12877 * chip interrupts back to MSI-X 0.
12878 */
12879static void reset_interrupts(struct hfi1_devdata *dd)
12880{
12881 int i;
12882
12883 /* all interrupts handled by the general handler */
12884 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12885 dd->gi_mask[i] = ~(u64)0;
12886
12887 /* all chip interrupts map to MSI-X 0 */
12888 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
8638b77f 12889 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
77241056
MM
12890}
12891
12892static int set_up_interrupts(struct hfi1_devdata *dd)
12893{
12894 struct hfi1_msix_entry *entries;
12895 u32 total, request;
12896 int i, ret;
12897 int single_interrupt = 0; /* we expect to have all the interrupts */
12898
12899 /*
12900 * Interrupt count:
12901 * 1 general, "slow path" interrupt (includes the SDMA engines
12902 * slow source, SDMACleanupDone)
12903 * N interrupts - one per used SDMA engine
12904 * M interrupt - one per kernel receive context
12905 */
12906 total = 1 + dd->num_sdma + dd->n_krcv_queues;
12907
12908 entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12909 if (!entries) {
77241056
MM
12910 ret = -ENOMEM;
12911 goto fail;
12912 }
12913 /* 1-1 MSI-X entry assignment */
12914 for (i = 0; i < total; i++)
12915 entries[i].msix.entry = i;
12916
12917 /* ask for MSI-X interrupts */
12918 request = total;
12919 request_msix(dd, &request, entries);
12920
12921 if (request == 0) {
12922 /* using INTx */
12923 /* dd->num_msix_entries already zero */
12924 kfree(entries);
12925 single_interrupt = 1;
12926 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12927 } else {
12928 /* using MSI-X */
12929 dd->num_msix_entries = request;
12930 dd->msix_entries = entries;
12931
12932 if (request != total) {
12933 /* using MSI-X, with reduced interrupts */
12934 dd_dev_err(
12935 dd,
12936 "cannot handle reduced interrupt case, want %u, got %u\n",
12937 total, request);
12938 ret = -EINVAL;
12939 goto fail;
12940 }
12941 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12942 }
12943
12944 /* mask all interrupts */
12945 set_intr_state(dd, 0);
12946 /* clear all pending interrupts */
12947 clear_all_interrupts(dd);
12948
12949 /* reset general handler mask, chip MSI-X mappings */
12950 reset_interrupts(dd);
12951
12952 if (single_interrupt)
12953 ret = request_intx_irq(dd);
12954 else
12955 ret = request_msix_irqs(dd);
12956 if (ret)
12957 goto fail;
12958
12959 return 0;
12960
12961fail:
12962 clean_up_interrupts(dd);
12963 return ret;
12964}
12965
12966/*
12967 * Set up context values in dd. Sets:
12968 *
12969 * num_rcv_contexts - number of contexts being used
12970 * n_krcv_queues - number of kernel contexts
12971 * first_user_ctxt - first non-kernel context in array of contexts
12972 * freectxts - number of free user contexts
12973 * num_send_contexts - number of PIO send contexts being used
12974 */
12975static int set_up_context_variables(struct hfi1_devdata *dd)
12976{
429b6a72 12977 unsigned long num_kernel_contexts;
77241056
MM
12978 int total_contexts;
12979 int ret;
12980 unsigned ngroups;
8f000f7f
DL
12981 int qos_rmt_count;
12982 int user_rmt_reduced;
77241056
MM
12983
12984 /*
33a9eb52 12985 * Kernel receive contexts:
82c2611d 12986 * - Context 0 - control context (VL15/multicast/error)
33a9eb52
DL
12987 * - Context 1 - first kernel context
12988 * - Context 2 - second kernel context
12989 * ...
77241056
MM
12990 */
12991 if (n_krcvqs)
82c2611d 12992 /*
33a9eb52
DL
12993 * n_krcvqs is the sum of module parameter kernel receive
12994 * contexts, krcvqs[]. It does not include the control
12995 * context, so add that.
82c2611d 12996 */
33a9eb52 12997 num_kernel_contexts = n_krcvqs + 1;
77241056 12998 else
8784ac02 12999 num_kernel_contexts = DEFAULT_KRCVQS + 1;
77241056
MM
13000 /*
13001 * Every kernel receive context needs an ACK send context.
13002 * one send context is allocated for each VL{0-7} and VL15
13003 */
13004 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
13005 dd_dev_err(dd,
429b6a72 13006 "Reducing # kernel rcv contexts to: %d, from %lu\n",
77241056 13007 (int)(dd->chip_send_contexts - num_vls - 1),
429b6a72 13008 num_kernel_contexts);
77241056
MM
13009 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
13010 }
13011 /*
0852d241
JJ
13012 * User contexts:
13013 * - default to 1 user context per real (non-HT) CPU core if
13014 * num_user_contexts is negative
77241056 13015 */
2ce6bf22 13016 if (num_user_contexts < 0)
0852d241 13017 num_user_contexts =
4197344b 13018 cpumask_weight(&node_affinity.real_cpu_mask);
77241056
MM
13019
13020 total_contexts = num_kernel_contexts + num_user_contexts;
13021
13022 /*
13023 * Adjust the counts given a global max.
13024 */
13025 if (total_contexts > dd->chip_rcv_contexts) {
13026 dd_dev_err(dd,
13027 "Reducing # user receive contexts to: %d, from %d\n",
13028 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
13029 (int)num_user_contexts);
13030 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
13031 /* recalculate */
13032 total_contexts = num_kernel_contexts + num_user_contexts;
13033 }
13034
8f000f7f
DL
13035 /* each user context requires an entry in the RMT */
13036 qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
13037 if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
13038 user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
13039 dd_dev_err(dd,
13040 "RMT size is reducing the number of user receive contexts from %d to %d\n",
13041 (int)num_user_contexts,
13042 user_rmt_reduced);
13043 /* recalculate */
13044 num_user_contexts = user_rmt_reduced;
13045 total_contexts = num_kernel_contexts + num_user_contexts;
13046 }
13047
77241056
MM
13048 /* the first N are kernel contexts, the rest are user contexts */
13049 dd->num_rcv_contexts = total_contexts;
13050 dd->n_krcv_queues = num_kernel_contexts;
13051 dd->first_user_ctxt = num_kernel_contexts;
affa48de 13052 dd->num_user_contexts = num_user_contexts;
77241056
MM
13053 dd->freectxts = num_user_contexts;
13054 dd_dev_info(dd,
17fb4f29
JJ
13055 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
13056 (int)dd->chip_rcv_contexts,
13057 (int)dd->num_rcv_contexts,
13058 (int)dd->n_krcv_queues,
13059 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
77241056
MM
13060
13061 /*
13062 * Receive array allocation:
13063 * All RcvArray entries are divided into groups of 8. This
13064 * is required by the hardware and will speed up writes to
13065 * consecutive entries by using write-combining of the entire
13066 * cacheline.
13067 *
13068 * The number of groups are evenly divided among all contexts.
13069 * any left over groups will be given to the first N user
13070 * contexts.
13071 */
13072 dd->rcv_entries.group_size = RCV_INCREMENT;
13073 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
13074 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13075 dd->rcv_entries.nctxt_extra = ngroups -
13076 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13077 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13078 dd->rcv_entries.ngroups,
13079 dd->rcv_entries.nctxt_extra);
13080 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13081 MAX_EAGER_ENTRIES * 2) {
13082 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13083 dd->rcv_entries.group_size;
13084 dd_dev_info(dd,
17fb4f29
JJ
13085 "RcvArray group count too high, change to %u\n",
13086 dd->rcv_entries.ngroups);
77241056
MM
13087 dd->rcv_entries.nctxt_extra = 0;
13088 }
13089 /*
13090 * PIO send contexts
13091 */
13092 ret = init_sc_pools_and_sizes(dd);
13093 if (ret >= 0) { /* success */
13094 dd->num_send_contexts = ret;
13095 dd_dev_info(
13096 dd,
44306f15 13097 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
77241056
MM
13098 dd->chip_send_contexts,
13099 dd->num_send_contexts,
13100 dd->sc_sizes[SC_KERNEL].count,
13101 dd->sc_sizes[SC_ACK].count,
44306f15
JX
13102 dd->sc_sizes[SC_USER].count,
13103 dd->sc_sizes[SC_VL15].count);
77241056
MM
13104 ret = 0; /* success */
13105 }
13106
13107 return ret;
13108}
13109
13110/*
13111 * Set the device/port partition key table. The MAD code
13112 * will ensure that, at least, the partial management
13113 * partition key is present in the table.
13114 */
13115static void set_partition_keys(struct hfi1_pportdata *ppd)
13116{
13117 struct hfi1_devdata *dd = ppd->dd;
13118 u64 reg = 0;
13119 int i;
13120
13121 dd_dev_info(dd, "Setting partition keys\n");
13122 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13123 reg |= (ppd->pkeys[i] &
13124 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13125 ((i % 4) *
13126 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13127 /* Each register holds 4 PKey values. */
13128 if ((i % 4) == 3) {
13129 write_csr(dd, RCV_PARTITION_KEY +
13130 ((i - 3) * 2), reg);
13131 reg = 0;
13132 }
13133 }
13134
13135 /* Always enable HW pkeys check when pkeys table is set */
13136 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13137}
13138
13139/*
13140 * These CSRs and memories are uninitialized on reset and must be
13141 * written before reading to set the ECC/parity bits.
13142 *
13143 * NOTE: All user context CSRs that are not mmaped write-only
13144 * (e.g. the TID flows) must be initialized even if the driver never
13145 * reads them.
13146 */
13147static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13148{
13149 int i, j;
13150
13151 /* CceIntMap */
13152 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
8638b77f 13153 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
77241056
MM
13154
13155 /* SendCtxtCreditReturnAddr */
13156 for (i = 0; i < dd->chip_send_contexts; i++)
13157 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13158
13159 /* PIO Send buffers */
13160 /* SDMA Send buffers */
4d114fdd
JJ
13161 /*
13162 * These are not normally read, and (presently) have no method
13163 * to be read, so are not pre-initialized
13164 */
77241056
MM
13165
13166 /* RcvHdrAddr */
13167 /* RcvHdrTailAddr */
13168 /* RcvTidFlowTable */
13169 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13170 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13171 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13172 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
8638b77f 13173 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
77241056
MM
13174 }
13175
13176 /* RcvArray */
13177 for (i = 0; i < dd->chip_rcv_array_count; i++)
8638b77f 13178 write_csr(dd, RCV_ARRAY + (8 * i),
17fb4f29 13179 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
77241056
MM
13180
13181 /* RcvQPMapTable */
13182 for (i = 0; i < 32; i++)
13183 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13184}
13185
13186/*
13187 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13188 */
13189static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13190 u64 ctrl_bits)
13191{
13192 unsigned long timeout;
13193 u64 reg;
13194
13195 /* is the condition present? */
13196 reg = read_csr(dd, CCE_STATUS);
13197 if ((reg & status_bits) == 0)
13198 return;
13199
13200 /* clear the condition */
13201 write_csr(dd, CCE_CTRL, ctrl_bits);
13202
13203 /* wait for the condition to clear */
13204 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13205 while (1) {
13206 reg = read_csr(dd, CCE_STATUS);
13207 if ((reg & status_bits) == 0)
13208 return;
13209 if (time_after(jiffies, timeout)) {
13210 dd_dev_err(dd,
17fb4f29
JJ
13211 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13212 status_bits, reg & status_bits);
77241056
MM
13213 return;
13214 }
13215 udelay(1);
13216 }
13217}
13218
13219/* set CCE CSRs to chip reset defaults */
13220static void reset_cce_csrs(struct hfi1_devdata *dd)
13221{
13222 int i;
13223
13224 /* CCE_REVISION read-only */
13225 /* CCE_REVISION2 read-only */
13226 /* CCE_CTRL - bits clear automatically */
13227 /* CCE_STATUS read-only, use CceCtrl to clear */
13228 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13229 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13230 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13231 for (i = 0; i < CCE_NUM_SCRATCH; i++)
13232 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13233 /* CCE_ERR_STATUS read-only */
13234 write_csr(dd, CCE_ERR_MASK, 0);
13235 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13236 /* CCE_ERR_FORCE leave alone */
13237 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13238 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13239 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13240 /* CCE_PCIE_CTRL leave alone */
13241 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13242 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13243 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
17fb4f29 13244 CCE_MSIX_TABLE_UPPER_RESETCSR);
77241056
MM
13245 }
13246 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13247 /* CCE_MSIX_PBA read-only */
13248 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13249 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13250 }
13251 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13252 write_csr(dd, CCE_INT_MAP, 0);
13253 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13254 /* CCE_INT_STATUS read-only */
13255 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13256 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13257 /* CCE_INT_FORCE leave alone */
13258 /* CCE_INT_BLOCKED read-only */
13259 }
13260 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13261 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13262}
13263
77241056
MM
13264/* set MISC CSRs to chip reset defaults */
13265static void reset_misc_csrs(struct hfi1_devdata *dd)
13266{
13267 int i;
13268
13269 for (i = 0; i < 32; i++) {
13270 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13271 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13272 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13273 }
4d114fdd
JJ
13274 /*
13275 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13276 * only be written 128-byte chunks
13277 */
77241056
MM
13278 /* init RSA engine to clear lingering errors */
13279 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13280 write_csr(dd, MISC_CFG_RSA_MU, 0);
13281 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13282 /* MISC_STS_8051_DIGEST read-only */
13283 /* MISC_STS_SBM_DIGEST read-only */
13284 /* MISC_STS_PCIE_DIGEST read-only */
13285 /* MISC_STS_FAB_DIGEST read-only */
13286 /* MISC_ERR_STATUS read-only */
13287 write_csr(dd, MISC_ERR_MASK, 0);
13288 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13289 /* MISC_ERR_FORCE leave alone */
13290}
13291
13292/* set TXE CSRs to chip reset defaults */
13293static void reset_txe_csrs(struct hfi1_devdata *dd)
13294{
13295 int i;
13296
13297 /*
13298 * TXE Kernel CSRs
13299 */
13300 write_csr(dd, SEND_CTRL, 0);
13301 __cm_reset(dd, 0); /* reset CM internal state */
13302 /* SEND_CONTEXTS read-only */
13303 /* SEND_DMA_ENGINES read-only */
13304 /* SEND_PIO_MEM_SIZE read-only */
13305 /* SEND_DMA_MEM_SIZE read-only */
13306 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13307 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13308 /* SEND_PIO_ERR_STATUS read-only */
13309 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13310 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13311 /* SEND_PIO_ERR_FORCE leave alone */
13312 /* SEND_DMA_ERR_STATUS read-only */
13313 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13314 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13315 /* SEND_DMA_ERR_FORCE leave alone */
13316 /* SEND_EGRESS_ERR_STATUS read-only */
13317 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13318 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13319 /* SEND_EGRESS_ERR_FORCE leave alone */
13320 write_csr(dd, SEND_BTH_QP, 0);
13321 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13322 write_csr(dd, SEND_SC2VLT0, 0);
13323 write_csr(dd, SEND_SC2VLT1, 0);
13324 write_csr(dd, SEND_SC2VLT2, 0);
13325 write_csr(dd, SEND_SC2VLT3, 0);
13326 write_csr(dd, SEND_LEN_CHECK0, 0);
13327 write_csr(dd, SEND_LEN_CHECK1, 0);
13328 /* SEND_ERR_STATUS read-only */
13329 write_csr(dd, SEND_ERR_MASK, 0);
13330 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13331 /* SEND_ERR_FORCE read-only */
13332 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
8638b77f 13333 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
77241056 13334 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
8638b77f
JJ
13335 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13336 for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13337 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
77241056 13338 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
8638b77f 13339 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
77241056 13340 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
8638b77f 13341 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
77241056 13342 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
17fb4f29 13343 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
77241056
MM
13344 /* SEND_CM_CREDIT_USED_STATUS read-only */
13345 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13346 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13347 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13348 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13349 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13350 for (i = 0; i < TXE_NUM_DATA_VL; i++)
8638b77f 13351 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
77241056
MM
13352 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13353 /* SEND_CM_CREDIT_USED_VL read-only */
13354 /* SEND_CM_CREDIT_USED_VL15 read-only */
13355 /* SEND_EGRESS_CTXT_STATUS read-only */
13356 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13357 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13358 /* SEND_EGRESS_ERR_INFO read-only */
13359 /* SEND_EGRESS_ERR_SOURCE read-only */
13360
13361 /*
13362 * TXE Per-Context CSRs
13363 */
13364 for (i = 0; i < dd->chip_send_contexts; i++) {
13365 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13366 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13367 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13368 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13369 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13370 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13371 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13372 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13373 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13374 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13375 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13376 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13377 }
13378
13379 /*
13380 * TXE Per-SDMA CSRs
13381 */
13382 for (i = 0; i < dd->chip_sdma_engines; i++) {
13383 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13384 /* SEND_DMA_STATUS read-only */
13385 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13386 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13387 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13388 /* SEND_DMA_HEAD read-only */
13389 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13390 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13391 /* SEND_DMA_IDLE_CNT read-only */
13392 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13393 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13394 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13395 /* SEND_DMA_ENG_ERR_STATUS read-only */
13396 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13397 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13398 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13399 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13400 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13401 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13402 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13403 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13404 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13405 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13406 }
13407}
13408
13409/*
13410 * Expect on entry:
13411 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13412 */
13413static void init_rbufs(struct hfi1_devdata *dd)
13414{
13415 u64 reg;
13416 int count;
13417
13418 /*
13419 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13420 * clear.
13421 */
13422 count = 0;
13423 while (1) {
13424 reg = read_csr(dd, RCV_STATUS);
13425 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13426 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13427 break;
13428 /*
13429 * Give up after 1ms - maximum wait time.
13430 *
e8a70af2 13431 * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at
77241056 13432 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
e8a70af2 13433 * 136 KB / (66% * 250MB/s) = 844us
77241056
MM
13434 */
13435 if (count++ > 500) {
13436 dd_dev_err(dd,
17fb4f29
JJ
13437 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13438 __func__, reg);
77241056
MM
13439 break;
13440 }
13441 udelay(2); /* do not busy-wait the CSR */
13442 }
13443
13444 /* start the init - expect RcvCtrl to be 0 */
13445 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13446
13447 /*
13448 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13449 * period after the write before RcvStatus.RxRbufInitDone is valid.
13450 * The delay in the first run through the loop below is sufficient and
13451 * required before the first read of RcvStatus.RxRbufInintDone.
13452 */
13453 read_csr(dd, RCV_CTRL);
13454
13455 /* wait for the init to finish */
13456 count = 0;
13457 while (1) {
13458 /* delay is required first time through - see above */
13459 udelay(2); /* do not busy-wait the CSR */
13460 reg = read_csr(dd, RCV_STATUS);
13461 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13462 break;
13463
13464 /* give up after 100us - slowest possible at 33MHz is 73us */
13465 if (count++ > 50) {
13466 dd_dev_err(dd,
17fb4f29
JJ
13467 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13468 __func__);
77241056
MM
13469 break;
13470 }
13471 }
13472}
13473
13474/* set RXE CSRs to chip reset defaults */
13475static void reset_rxe_csrs(struct hfi1_devdata *dd)
13476{
13477 int i, j;
13478
13479 /*
13480 * RXE Kernel CSRs
13481 */
13482 write_csr(dd, RCV_CTRL, 0);
13483 init_rbufs(dd);
13484 /* RCV_STATUS read-only */
13485 /* RCV_CONTEXTS read-only */
13486 /* RCV_ARRAY_CNT read-only */
13487 /* RCV_BUF_SIZE read-only */
13488 write_csr(dd, RCV_BTH_QP, 0);
13489 write_csr(dd, RCV_MULTICAST, 0);
13490 write_csr(dd, RCV_BYPASS, 0);
13491 write_csr(dd, RCV_VL15, 0);
13492 /* this is a clear-down */
13493 write_csr(dd, RCV_ERR_INFO,
17fb4f29 13494 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
77241056
MM
13495 /* RCV_ERR_STATUS read-only */
13496 write_csr(dd, RCV_ERR_MASK, 0);
13497 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13498 /* RCV_ERR_FORCE leave alone */
13499 for (i = 0; i < 32; i++)
13500 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13501 for (i = 0; i < 4; i++)
13502 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13503 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13504 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13505 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13506 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13507 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13508 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13509 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13510 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13511 }
13512 for (i = 0; i < 32; i++)
13513 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13514
13515 /*
13516 * RXE Kernel and User Per-Context CSRs
13517 */
13518 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13519 /* kernel */
13520 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13521 /* RCV_CTXT_STATUS read-only */
13522 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13523 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13524 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13525 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13526 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13527 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13528 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13529 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13530 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13531 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13532
13533 /* user */
13534 /* RCV_HDR_TAIL read-only */
13535 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13536 /* RCV_EGR_INDEX_TAIL read-only */
13537 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13538 /* RCV_EGR_OFFSET_TAIL read-only */
13539 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
17fb4f29
JJ
13540 write_uctxt_csr(dd, i,
13541 RCV_TID_FLOW_TABLE + (8 * j), 0);
77241056
MM
13542 }
13543 }
13544}
13545
13546/*
13547 * Set sc2vl tables.
13548 *
13549 * They power on to zeros, so to avoid send context errors
13550 * they need to be set:
13551 *
13552 * SC 0-7 -> VL 0-7 (respectively)
13553 * SC 15 -> VL 15
13554 * otherwise
13555 * -> VL 0
13556 */
13557static void init_sc2vl_tables(struct hfi1_devdata *dd)
13558{
13559 int i;
13560 /* init per architecture spec, constrained by hardware capability */
13561
13562 /* HFI maps sent packets */
13563 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13564 0,
13565 0, 0, 1, 1,
13566 2, 2, 3, 3,
13567 4, 4, 5, 5,
13568 6, 6, 7, 7));
13569 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13570 1,
13571 8, 0, 9, 0,
13572 10, 0, 11, 0,
13573 12, 0, 13, 0,
13574 14, 0, 15, 15));
13575 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13576 2,
13577 16, 0, 17, 0,
13578 18, 0, 19, 0,
13579 20, 0, 21, 0,
13580 22, 0, 23, 0));
13581 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13582 3,
13583 24, 0, 25, 0,
13584 26, 0, 27, 0,
13585 28, 0, 29, 0,
13586 30, 0, 31, 0));
13587
13588 /* DC maps received packets */
13589 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13590 15_0,
13591 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13592 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13593 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13594 31_16,
13595 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13596 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13597
13598 /* initialize the cached sc2vl values consistently with h/w */
13599 for (i = 0; i < 32; i++) {
13600 if (i < 8 || i == 15)
13601 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13602 else
13603 *((u8 *)(dd->sc2vl) + i) = 0;
13604 }
13605}
13606
13607/*
13608 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13609 * depend on the chip going through a power-on reset - a driver may be loaded
13610 * and unloaded many times.
13611 *
13612 * Do not write any CSR values to the chip in this routine - there may be
13613 * a reset following the (possible) FLR in this routine.
13614 *
13615 */
13616static void init_chip(struct hfi1_devdata *dd)
13617{
13618 int i;
13619
13620 /*
13621 * Put the HFI CSRs in a known state.
13622 * Combine this with a DC reset.
13623 *
13624 * Stop the device from doing anything while we do a
13625 * reset. We know there are no other active users of
13626 * the device since we are now in charge. Turn off
13627 * off all outbound and inbound traffic and make sure
13628 * the device does not generate any interrupts.
13629 */
13630
13631 /* disable send contexts and SDMA engines */
13632 write_csr(dd, SEND_CTRL, 0);
13633 for (i = 0; i < dd->chip_send_contexts; i++)
13634 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13635 for (i = 0; i < dd->chip_sdma_engines; i++)
13636 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13637 /* disable port (turn off RXE inbound traffic) and contexts */
13638 write_csr(dd, RCV_CTRL, 0);
13639 for (i = 0; i < dd->chip_rcv_contexts; i++)
13640 write_csr(dd, RCV_CTXT_CTRL, 0);
13641 /* mask all interrupt sources */
13642 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
8638b77f 13643 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
77241056
MM
13644
13645 /*
13646 * DC Reset: do a full DC reset before the register clear.
13647 * A recommended length of time to hold is one CSR read,
13648 * so reread the CceDcCtrl. Then, hold the DC in reset
13649 * across the clear.
13650 */
13651 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
50e5dcbe 13652 (void)read_csr(dd, CCE_DC_CTRL);
77241056
MM
13653
13654 if (use_flr) {
13655 /*
13656 * A FLR will reset the SPC core and part of the PCIe.
13657 * The parts that need to be restored have already been
13658 * saved.
13659 */
13660 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13661
13662 /* do the FLR, the DC reset will remain */
13663 hfi1_pcie_flr(dd);
13664
13665 /* restore command and BARs */
13666 restore_pci_variables(dd);
13667
995deafa 13668 if (is_ax(dd)) {
77241056
MM
13669 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13670 hfi1_pcie_flr(dd);
13671 restore_pci_variables(dd);
13672 }
77241056
MM
13673 } else {
13674 dd_dev_info(dd, "Resetting CSRs with writes\n");
13675 reset_cce_csrs(dd);
13676 reset_txe_csrs(dd);
13677 reset_rxe_csrs(dd);
77241056
MM
13678 reset_misc_csrs(dd);
13679 }
13680 /* clear the DC reset */
13681 write_csr(dd, CCE_DC_CTRL, 0);
7c03ed85 13682
77241056 13683 /* Set the LED off */
773d0451
SS
13684 setextled(dd, 0);
13685
77241056
MM
13686 /*
13687 * Clear the QSFP reset.
72a67ba2 13688 * An FLR enforces a 0 on all out pins. The driver does not touch
77241056 13689 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
72a67ba2 13690 * anything plugged constantly in reset, if it pays attention
77241056 13691 * to RESET_N.
72a67ba2 13692 * Prime examples of this are optical cables. Set all pins high.
77241056
MM
13693 * I2CCLK and I2CDAT will change per direction, and INT_N and
13694 * MODPRS_N are input only and their value is ignored.
13695 */
72a67ba2
EH
13696 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13697 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
a2ee27a4 13698 init_chip_resources(dd);
77241056
MM
13699}
13700
13701static void init_early_variables(struct hfi1_devdata *dd)
13702{
13703 int i;
13704
13705 /* assign link credit variables */
13706 dd->vau = CM_VAU;
13707 dd->link_credits = CM_GLOBAL_CREDITS;
995deafa 13708 if (is_ax(dd))
77241056
MM
13709 dd->link_credits--;
13710 dd->vcu = cu_to_vcu(hfi1_cu);
13711 /* enough room for 8 MAD packets plus header - 17K */
13712 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13713 if (dd->vl15_init > dd->link_credits)
13714 dd->vl15_init = dd->link_credits;
13715
13716 write_uninitialized_csrs_and_memories(dd);
13717
13718 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13719 for (i = 0; i < dd->num_pports; i++) {
13720 struct hfi1_pportdata *ppd = &dd->pport[i];
13721
13722 set_partition_keys(ppd);
13723 }
13724 init_sc2vl_tables(dd);
13725}
13726
13727static void init_kdeth_qp(struct hfi1_devdata *dd)
13728{
13729 /* user changed the KDETH_QP */
13730 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13731 /* out of range or illegal value */
13732 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13733 kdeth_qp = 0;
13734 }
13735 if (kdeth_qp == 0) /* not set, or failed range check */
13736 kdeth_qp = DEFAULT_KDETH_QP;
13737
13738 write_csr(dd, SEND_BTH_QP,
17fb4f29
JJ
13739 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
13740 SEND_BTH_QP_KDETH_QP_SHIFT);
77241056
MM
13741
13742 write_csr(dd, RCV_BTH_QP,
17fb4f29
JJ
13743 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
13744 RCV_BTH_QP_KDETH_QP_SHIFT);
77241056
MM
13745}
13746
13747/**
13748 * init_qpmap_table
13749 * @dd - device data
13750 * @first_ctxt - first context
13751 * @last_ctxt - first context
13752 *
13753 * This return sets the qpn mapping table that
13754 * is indexed by qpn[8:1].
13755 *
13756 * The routine will round robin the 256 settings
13757 * from first_ctxt to last_ctxt.
13758 *
13759 * The first/last looks ahead to having specialized
13760 * receive contexts for mgmt and bypass. Normal
13761 * verbs traffic will assumed to be on a range
13762 * of receive contexts.
13763 */
13764static void init_qpmap_table(struct hfi1_devdata *dd,
13765 u32 first_ctxt,
13766 u32 last_ctxt)
13767{
13768 u64 reg = 0;
13769 u64 regno = RCV_QP_MAP_TABLE;
13770 int i;
13771 u64 ctxt = first_ctxt;
13772
60d585ad 13773 for (i = 0; i < 256; i++) {
77241056 13774 reg |= ctxt << (8 * (i % 8));
77241056
MM
13775 ctxt++;
13776 if (ctxt > last_ctxt)
13777 ctxt = first_ctxt;
60d585ad 13778 if (i % 8 == 7) {
77241056
MM
13779 write_csr(dd, regno, reg);
13780 reg = 0;
13781 regno += 8;
13782 }
13783 }
77241056
MM
13784
13785 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13786 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13787}
13788
372cc85a
DL
13789struct rsm_map_table {
13790 u64 map[NUM_MAP_REGS];
13791 unsigned int used;
13792};
13793
b12349ae
DL
13794struct rsm_rule_data {
13795 u8 offset;
13796 u8 pkt_type;
13797 u32 field1_off;
13798 u32 field2_off;
13799 u32 index1_off;
13800 u32 index1_width;
13801 u32 index2_off;
13802 u32 index2_width;
13803 u32 mask1;
13804 u32 value1;
13805 u32 mask2;
13806 u32 value2;
13807};
13808
372cc85a
DL
13809/*
13810 * Return an initialized RMT map table for users to fill in. OK if it
13811 * returns NULL, indicating no table.
13812 */
13813static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
13814{
13815 struct rsm_map_table *rmt;
13816 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
13817
13818 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
13819 if (rmt) {
13820 memset(rmt->map, rxcontext, sizeof(rmt->map));
13821 rmt->used = 0;
13822 }
13823
13824 return rmt;
13825}
13826
13827/*
13828 * Write the final RMT map table to the chip and free the table. OK if
13829 * table is NULL.
13830 */
13831static void complete_rsm_map_table(struct hfi1_devdata *dd,
13832 struct rsm_map_table *rmt)
13833{
13834 int i;
13835
13836 if (rmt) {
13837 /* write table to chip */
13838 for (i = 0; i < NUM_MAP_REGS; i++)
13839 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
13840
13841 /* enable RSM */
13842 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13843 }
13844}
13845
b12349ae
DL
13846/*
13847 * Add a receive side mapping rule.
13848 */
13849static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
13850 struct rsm_rule_data *rrd)
13851{
13852 write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
13853 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
13854 1ull << rule_index | /* enable bit */
13855 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13856 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
13857 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13858 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13859 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13860 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13861 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13862 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13863 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
13864 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
13865 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
13866 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
13867 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
13868}
13869
4a818bed
DL
13870/* return the number of RSM map table entries that will be used for QOS */
13871static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
13872 unsigned int *np)
13873{
13874 int i;
13875 unsigned int m, n;
13876 u8 max_by_vl = 0;
13877
13878 /* is QOS active at all? */
13879 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13880 num_vls == 1 ||
13881 krcvqsset <= 1)
13882 goto no_qos;
13883
13884 /* determine bits for qpn */
13885 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
13886 if (krcvqs[i] > max_by_vl)
13887 max_by_vl = krcvqs[i];
13888 if (max_by_vl > 32)
13889 goto no_qos;
13890 m = ilog2(__roundup_pow_of_two(max_by_vl));
13891
13892 /* determine bits for vl */
13893 n = ilog2(__roundup_pow_of_two(num_vls));
13894
13895 /* reject if too much is used */
13896 if ((m + n) > 7)
13897 goto no_qos;
13898
13899 if (mp)
13900 *mp = m;
13901 if (np)
13902 *np = n;
13903
13904 return 1 << (m + n);
13905
13906no_qos:
13907 if (mp)
13908 *mp = 0;
13909 if (np)
13910 *np = 0;
13911 return 0;
13912}
13913
77241056
MM
13914/**
13915 * init_qos - init RX qos
13916 * @dd - device data
372cc85a 13917 * @rmt - RSM map table
77241056 13918 *
33a9eb52
DL
13919 * This routine initializes Rule 0 and the RSM map table to implement
13920 * quality of service (qos).
77241056 13921 *
33a9eb52
DL
13922 * If all of the limit tests succeed, qos is applied based on the array
13923 * interpretation of krcvqs where entry 0 is VL0.
77241056 13924 *
33a9eb52
DL
13925 * The number of vl bits (n) and the number of qpn bits (m) are computed to
13926 * feed both the RSM map table and the single rule.
77241056 13927 */
372cc85a 13928static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
77241056 13929{
b12349ae 13930 struct rsm_rule_data rrd;
77241056 13931 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
372cc85a 13932 unsigned int rmt_entries;
77241056 13933 u64 reg;
77241056 13934
4a818bed 13935 if (!rmt)
77241056 13936 goto bail;
4a818bed
DL
13937 rmt_entries = qos_rmt_entries(dd, &m, &n);
13938 if (rmt_entries == 0)
77241056 13939 goto bail;
4a818bed
DL
13940 qpns_per_vl = 1 << m;
13941
372cc85a
DL
13942 /* enough room in the map table? */
13943 rmt_entries = 1 << (m + n);
13944 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
859bcad9 13945 goto bail;
4a818bed 13946
372cc85a 13947 /* add qos entries to the the RSM map table */
33a9eb52 13948 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
77241056
MM
13949 unsigned tctxt;
13950
13951 for (qpn = 0, tctxt = ctxt;
13952 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13953 unsigned idx, regoff, regidx;
13954
372cc85a
DL
13955 /* generate the index the hardware will produce */
13956 idx = rmt->used + ((qpn << n) ^ i);
77241056
MM
13957 regoff = (idx % 8) * 8;
13958 regidx = idx / 8;
372cc85a
DL
13959 /* replace default with context number */
13960 reg = rmt->map[regidx];
77241056
MM
13961 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13962 << regoff);
13963 reg |= (u64)(tctxt++) << regoff;
372cc85a 13964 rmt->map[regidx] = reg;
77241056
MM
13965 if (tctxt == ctxt + krcvqs[i])
13966 tctxt = ctxt;
13967 }
13968 ctxt += krcvqs[i];
13969 }
b12349ae
DL
13970
13971 rrd.offset = rmt->used;
13972 rrd.pkt_type = 2;
13973 rrd.field1_off = LRH_BTH_MATCH_OFFSET;
13974 rrd.field2_off = LRH_SC_MATCH_OFFSET;
13975 rrd.index1_off = LRH_SC_SELECT_OFFSET;
13976 rrd.index1_width = n;
13977 rrd.index2_off = QPN_SELECT_OFFSET;
13978 rrd.index2_width = m + n;
13979 rrd.mask1 = LRH_BTH_MASK;
13980 rrd.value1 = LRH_BTH_VALUE;
13981 rrd.mask2 = LRH_SC_MASK;
13982 rrd.value2 = LRH_SC_VALUE;
13983
13984 /* add rule 0 */
13985 add_rsm_rule(dd, 0, &rrd);
13986
372cc85a
DL
13987 /* mark RSM map entries as used */
13988 rmt->used += rmt_entries;
33a9eb52
DL
13989 /* map everything else to the mcast/err/vl15 context */
13990 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
77241056
MM
13991 dd->qos_shift = n + 1;
13992 return;
13993bail:
13994 dd->qos_shift = 1;
82c2611d 13995 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
77241056
MM
13996}
13997
8f000f7f
DL
13998static void init_user_fecn_handling(struct hfi1_devdata *dd,
13999 struct rsm_map_table *rmt)
14000{
14001 struct rsm_rule_data rrd;
14002 u64 reg;
14003 int i, idx, regoff, regidx;
14004 u8 offset;
14005
14006 /* there needs to be enough room in the map table */
14007 if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
14008 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
14009 return;
14010 }
14011
14012 /*
14013 * RSM will extract the destination context as an index into the
14014 * map table. The destination contexts are a sequential block
14015 * in the range first_user_ctxt...num_rcv_contexts-1 (inclusive).
14016 * Map entries are accessed as offset + extracted value. Adjust
14017 * the added offset so this sequence can be placed anywhere in
14018 * the table - as long as the entries themselves do not wrap.
14019 * There are only enough bits in offset for the table size, so
14020 * start with that to allow for a "negative" offset.
14021 */
14022 offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
14023 (int)dd->first_user_ctxt);
14024
14025 for (i = dd->first_user_ctxt, idx = rmt->used;
14026 i < dd->num_rcv_contexts; i++, idx++) {
14027 /* replace with identity mapping */
14028 regoff = (idx % 8) * 8;
14029 regidx = idx / 8;
14030 reg = rmt->map[regidx];
14031 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14032 reg |= (u64)i << regoff;
14033 rmt->map[regidx] = reg;
14034 }
14035
14036 /*
14037 * For RSM intercept of Expected FECN packets:
14038 * o packet type 0 - expected
14039 * o match on F (bit 95), using select/match 1, and
14040 * o match on SH (bit 133), using select/match 2.
14041 *
14042 * Use index 1 to extract the 8-bit receive context from DestQP
14043 * (start at bit 64). Use that as the RSM map table index.
14044 */
14045 rrd.offset = offset;
14046 rrd.pkt_type = 0;
14047 rrd.field1_off = 95;
14048 rrd.field2_off = 133;
14049 rrd.index1_off = 64;
14050 rrd.index1_width = 8;
14051 rrd.index2_off = 0;
14052 rrd.index2_width = 0;
14053 rrd.mask1 = 1;
14054 rrd.value1 = 1;
14055 rrd.mask2 = 1;
14056 rrd.value2 = 1;
14057
14058 /* add rule 1 */
14059 add_rsm_rule(dd, 1, &rrd);
14060
14061 rmt->used += dd->num_user_contexts;
14062}
14063
77241056
MM
14064static void init_rxe(struct hfi1_devdata *dd)
14065{
372cc85a
DL
14066 struct rsm_map_table *rmt;
14067
77241056
MM
14068 /* enable all receive errors */
14069 write_csr(dd, RCV_ERR_MASK, ~0ull);
372cc85a
DL
14070
14071 rmt = alloc_rsm_map_table(dd);
14072 /* set up QOS, including the QPN map table */
14073 init_qos(dd, rmt);
8f000f7f 14074 init_user_fecn_handling(dd, rmt);
372cc85a
DL
14075 complete_rsm_map_table(dd, rmt);
14076 kfree(rmt);
14077
77241056
MM
14078 /*
14079 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14080 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14081 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
14082 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14083 * Max_PayLoad_Size set to its minimum of 128.
14084 *
14085 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14086 * (64 bytes). Max_Payload_Size is possibly modified upward in
14087 * tune_pcie_caps() which is called after this routine.
14088 */
14089}
14090
14091static void init_other(struct hfi1_devdata *dd)
14092{
14093 /* enable all CCE errors */
14094 write_csr(dd, CCE_ERR_MASK, ~0ull);
14095 /* enable *some* Misc errors */
14096 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14097 /* enable all DC errors, except LCB */
14098 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14099 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14100}
14101
14102/*
14103 * Fill out the given AU table using the given CU. A CU is defined in terms
14104 * AUs. The table is a an encoding: given the index, how many AUs does that
14105 * represent?
14106 *
14107 * NOTE: Assumes that the register layout is the same for the
14108 * local and remote tables.
14109 */
14110static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14111 u32 csr0to3, u32 csr4to7)
14112{
14113 write_csr(dd, csr0to3,
17fb4f29
JJ
14114 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14115 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14116 2ull * cu <<
14117 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14118 4ull * cu <<
14119 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
77241056 14120 write_csr(dd, csr4to7,
17fb4f29
JJ
14121 8ull * cu <<
14122 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14123 16ull * cu <<
14124 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14125 32ull * cu <<
14126 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14127 64ull * cu <<
14128 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
77241056
MM
14129}
14130
14131static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14132{
14133 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
17fb4f29 14134 SEND_CM_LOCAL_AU_TABLE4_TO7);
77241056
MM
14135}
14136
14137void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14138{
14139 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
17fb4f29 14140 SEND_CM_REMOTE_AU_TABLE4_TO7);
77241056
MM
14141}
14142
14143static void init_txe(struct hfi1_devdata *dd)
14144{
14145 int i;
14146
14147 /* enable all PIO, SDMA, general, and Egress errors */
14148 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14149 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14150 write_csr(dd, SEND_ERR_MASK, ~0ull);
14151 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14152
14153 /* enable all per-context and per-SDMA engine errors */
14154 for (i = 0; i < dd->chip_send_contexts; i++)
14155 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14156 for (i = 0; i < dd->chip_sdma_engines; i++)
14157 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14158
14159 /* set the local CU to AU mapping */
14160 assign_local_cm_au_table(dd, dd->vcu);
14161
14162 /*
14163 * Set reasonable default for Credit Return Timer
14164 * Don't set on Simulator - causes it to choke.
14165 */
14166 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14167 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14168}
14169
14170int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
14171{
14172 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
14173 unsigned sctxt;
14174 int ret = 0;
14175 u64 reg;
14176
14177 if (!rcd || !rcd->sc) {
14178 ret = -EINVAL;
14179 goto done;
14180 }
14181 sctxt = rcd->sc->hw_context;
14182 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14183 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14184 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14185 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14186 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14187 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14188 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14189 /*
14190 * Enable send-side J_KEY integrity check, unless this is A0 h/w
77241056 14191 */
995deafa 14192 if (!is_ax(dd)) {
77241056
MM
14193 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14194 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14195 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14196 }
14197
14198 /* Enable J_KEY check on receive context. */
14199 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14200 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14201 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14202 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
14203done:
14204 return ret;
14205}
14206
14207int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
14208{
14209 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
14210 unsigned sctxt;
14211 int ret = 0;
14212 u64 reg;
14213
14214 if (!rcd || !rcd->sc) {
14215 ret = -EINVAL;
14216 goto done;
14217 }
14218 sctxt = rcd->sc->hw_context;
14219 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14220 /*
14221 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14222 * This check would not have been enabled for A0 h/w, see
14223 * set_ctxt_jkey().
14224 */
995deafa 14225 if (!is_ax(dd)) {
77241056
MM
14226 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14227 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14228 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14229 }
14230 /* Turn off the J_KEY on the receive side */
14231 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
14232done:
14233 return ret;
14234}
14235
14236int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
14237{
14238 struct hfi1_ctxtdata *rcd;
14239 unsigned sctxt;
14240 int ret = 0;
14241 u64 reg;
14242
e490974e 14243 if (ctxt < dd->num_rcv_contexts) {
77241056 14244 rcd = dd->rcd[ctxt];
e490974e 14245 } else {
77241056
MM
14246 ret = -EINVAL;
14247 goto done;
14248 }
14249 if (!rcd || !rcd->sc) {
14250 ret = -EINVAL;
14251 goto done;
14252 }
14253 sctxt = rcd->sc->hw_context;
14254 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14255 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14256 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14257 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14258 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
e38d1e4f 14259 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
77241056
MM
14260 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14261done:
14262 return ret;
14263}
14264
14265int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
14266{
14267 struct hfi1_ctxtdata *rcd;
14268 unsigned sctxt;
14269 int ret = 0;
14270 u64 reg;
14271
e490974e 14272 if (ctxt < dd->num_rcv_contexts) {
77241056 14273 rcd = dd->rcd[ctxt];
e490974e 14274 } else {
77241056
MM
14275 ret = -EINVAL;
14276 goto done;
14277 }
14278 if (!rcd || !rcd->sc) {
14279 ret = -EINVAL;
14280 goto done;
14281 }
14282 sctxt = rcd->sc->hw_context;
14283 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14284 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14285 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14286 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14287done:
14288 return ret;
14289}
14290
14291/*
14292 * Start doing the clean up the the chip. Our clean up happens in multiple
14293 * stages and this is just the first.
14294 */
14295void hfi1_start_cleanup(struct hfi1_devdata *dd)
14296{
affa48de 14297 aspm_exit(dd);
77241056
MM
14298 free_cntrs(dd);
14299 free_rcverr(dd);
14300 clean_up_interrupts(dd);
a2ee27a4 14301 finish_chip_resources(dd);
77241056
MM
14302}
14303
14304#define HFI_BASE_GUID(dev) \
14305 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14306
14307/*
78eb129d
DL
14308 * Information can be shared between the two HFIs on the same ASIC
14309 * in the same OS. This function finds the peer device and sets
14310 * up a shared structure.
77241056 14311 */
78eb129d 14312static int init_asic_data(struct hfi1_devdata *dd)
77241056
MM
14313{
14314 unsigned long flags;
14315 struct hfi1_devdata *tmp, *peer = NULL;
98f179a5 14316 struct hfi1_asic_data *asic_data;
78eb129d 14317 int ret = 0;
77241056 14318
98f179a5
TS
14319 /* pre-allocate the asic structure in case we are the first device */
14320 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14321 if (!asic_data)
14322 return -ENOMEM;
14323
77241056
MM
14324 spin_lock_irqsave(&hfi1_devs_lock, flags);
14325 /* Find our peer device */
14326 list_for_each_entry(tmp, &hfi1_dev_list, list) {
14327 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14328 dd->unit != tmp->unit) {
14329 peer = tmp;
14330 break;
14331 }
14332 }
14333
78eb129d 14334 if (peer) {
98f179a5 14335 /* use already allocated structure */
78eb129d 14336 dd->asic_data = peer->asic_data;
98f179a5 14337 kfree(asic_data);
78eb129d 14338 } else {
98f179a5 14339 dd->asic_data = asic_data;
78eb129d
DL
14340 mutex_init(&dd->asic_data->asic_resource_mutex);
14341 }
14342 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
77241056 14343 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
dba715f0
DL
14344
14345 /* first one through - set up i2c devices */
14346 if (!peer)
14347 ret = set_up_i2c(dd, dd->asic_data);
14348
78eb129d 14349 return ret;
77241056
MM
14350}
14351
5d9157aa
DL
14352/*
14353 * Set dd->boardname. Use a generic name if a name is not returned from
14354 * EFI variable space.
14355 *
14356 * Return 0 on success, -ENOMEM if space could not be allocated.
14357 */
14358static int obtain_boardname(struct hfi1_devdata *dd)
14359{
14360 /* generic board description */
14361 const char generic[] =
14362 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14363 unsigned long size;
14364 int ret;
14365
14366 ret = read_hfi1_efi_var(dd, "description", &size,
14367 (void **)&dd->boardname);
14368 if (ret) {
845f876d 14369 dd_dev_info(dd, "Board description not found\n");
5d9157aa
DL
14370 /* use generic description */
14371 dd->boardname = kstrdup(generic, GFP_KERNEL);
14372 if (!dd->boardname)
14373 return -ENOMEM;
14374 }
14375 return 0;
14376}
14377
24487dd3
KW
14378/*
14379 * Check the interrupt registers to make sure that they are mapped correctly.
14380 * It is intended to help user identify any mismapping by VMM when the driver
14381 * is running in a VM. This function should only be called before interrupt
14382 * is set up properly.
14383 *
14384 * Return 0 on success, -EINVAL on failure.
14385 */
14386static int check_int_registers(struct hfi1_devdata *dd)
14387{
14388 u64 reg;
14389 u64 all_bits = ~(u64)0;
14390 u64 mask;
14391
14392 /* Clear CceIntMask[0] to avoid raising any interrupts */
14393 mask = read_csr(dd, CCE_INT_MASK);
14394 write_csr(dd, CCE_INT_MASK, 0ull);
14395 reg = read_csr(dd, CCE_INT_MASK);
14396 if (reg)
14397 goto err_exit;
14398
14399 /* Clear all interrupt status bits */
14400 write_csr(dd, CCE_INT_CLEAR, all_bits);
14401 reg = read_csr(dd, CCE_INT_STATUS);
14402 if (reg)
14403 goto err_exit;
14404
14405 /* Set all interrupt status bits */
14406 write_csr(dd, CCE_INT_FORCE, all_bits);
14407 reg = read_csr(dd, CCE_INT_STATUS);
14408 if (reg != all_bits)
14409 goto err_exit;
14410
14411 /* Restore the interrupt mask */
14412 write_csr(dd, CCE_INT_CLEAR, all_bits);
14413 write_csr(dd, CCE_INT_MASK, mask);
14414
14415 return 0;
14416err_exit:
14417 write_csr(dd, CCE_INT_MASK, mask);
14418 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14419 return -EINVAL;
14420}
14421
77241056 14422/**
7c03ed85 14423 * Allocate and initialize the device structure for the hfi.
77241056
MM
14424 * @dev: the pci_dev for hfi1_ib device
14425 * @ent: pci_device_id struct for this dev
14426 *
14427 * Also allocates, initializes, and returns the devdata struct for this
14428 * device instance
14429 *
14430 * This is global, and is called directly at init to set up the
14431 * chip-specific function pointers for later use.
14432 */
14433struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14434 const struct pci_device_id *ent)
14435{
14436 struct hfi1_devdata *dd;
14437 struct hfi1_pportdata *ppd;
14438 u64 reg;
14439 int i, ret;
14440 static const char * const inames[] = { /* implementation names */
14441 "RTL silicon",
14442 "RTL VCS simulation",
14443 "RTL FPGA emulation",
14444 "Functional simulator"
14445 };
24487dd3 14446 struct pci_dev *parent = pdev->bus->self;
77241056 14447
17fb4f29
JJ
14448 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14449 sizeof(struct hfi1_pportdata));
77241056
MM
14450 if (IS_ERR(dd))
14451 goto bail;
14452 ppd = dd->pport;
14453 for (i = 0; i < dd->num_pports; i++, ppd++) {
14454 int vl;
14455 /* init common fields */
14456 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14457 /* DC supports 4 link widths */
14458 ppd->link_width_supported =
14459 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14460 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14461 ppd->link_width_downgrade_supported =
14462 ppd->link_width_supported;
14463 /* start out enabling only 4X */
14464 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14465 ppd->link_width_downgrade_enabled =
14466 ppd->link_width_downgrade_supported;
14467 /* link width active is 0 when link is down */
14468 /* link width downgrade active is 0 when link is down */
14469
d0d236ea
JJ
14470 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14471 num_vls > HFI1_MAX_VLS_SUPPORTED) {
77241056
MM
14472 hfi1_early_err(&pdev->dev,
14473 "Invalid num_vls %u, using %u VLs\n",
14474 num_vls, HFI1_MAX_VLS_SUPPORTED);
14475 num_vls = HFI1_MAX_VLS_SUPPORTED;
14476 }
14477 ppd->vls_supported = num_vls;
14478 ppd->vls_operational = ppd->vls_supported;
8a4d3444 14479 ppd->actual_vls_operational = ppd->vls_supported;
77241056
MM
14480 /* Set the default MTU. */
14481 for (vl = 0; vl < num_vls; vl++)
14482 dd->vld[vl].mtu = hfi1_max_mtu;
14483 dd->vld[15].mtu = MAX_MAD_PACKET;
14484 /*
14485 * Set the initial values to reasonable default, will be set
14486 * for real when link is up.
14487 */
14488 ppd->lstate = IB_PORT_DOWN;
14489 ppd->overrun_threshold = 0x4;
14490 ppd->phy_error_threshold = 0xf;
14491 ppd->port_crc_mode_enabled = link_crc_mask;
14492 /* initialize supported LTP CRC mode */
14493 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14494 /* initialize enabled LTP CRC mode */
14495 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14496 /* start in offline */
14497 ppd->host_link_state = HLS_DN_OFFLINE;
14498 init_vl_arb_caches(ppd);
f45c8dc8 14499 ppd->last_pstate = 0xff; /* invalid value */
77241056
MM
14500 }
14501
14502 dd->link_default = HLS_DN_POLL;
14503
14504 /*
14505 * Do remaining PCIe setup and save PCIe values in dd.
14506 * Any error printing is already done by the init code.
14507 * On return, we have the chip mapped.
14508 */
26ea2544 14509 ret = hfi1_pcie_ddinit(dd, pdev);
77241056
MM
14510 if (ret < 0)
14511 goto bail_free;
14512
14513 /* verify that reads actually work, save revision for reset check */
14514 dd->revision = read_csr(dd, CCE_REVISION);
14515 if (dd->revision == ~(u64)0) {
14516 dd_dev_err(dd, "cannot read chip CSRs\n");
14517 ret = -EINVAL;
14518 goto bail_cleanup;
14519 }
14520 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14521 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14522 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14523 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14524
24487dd3
KW
14525 /*
14526 * Check interrupt registers mapping if the driver has no access to
14527 * the upstream component. In this case, it is likely that the driver
14528 * is running in a VM.
14529 */
14530 if (!parent) {
14531 ret = check_int_registers(dd);
14532 if (ret)
14533 goto bail_cleanup;
14534 }
14535
4d114fdd
JJ
14536 /*
14537 * obtain the hardware ID - NOT related to unit, which is a
14538 * software enumeration
14539 */
77241056
MM
14540 reg = read_csr(dd, CCE_REVISION2);
14541 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14542 & CCE_REVISION2_HFI_ID_MASK;
14543 /* the variable size will remove unwanted bits */
14544 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14545 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14546 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
17fb4f29
JJ
14547 dd->icode < ARRAY_SIZE(inames) ?
14548 inames[dd->icode] : "unknown", (int)dd->irev);
77241056
MM
14549
14550 /* speeds the hardware can support */
14551 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14552 /* speeds allowed to run at */
14553 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14554 /* give a reasonable active value, will be set on link up */
14555 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14556
14557 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14558 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14559 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14560 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14561 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14562 /* fix up link widths for emulation _p */
14563 ppd = dd->pport;
14564 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14565 ppd->link_width_supported =
14566 ppd->link_width_enabled =
14567 ppd->link_width_downgrade_supported =
14568 ppd->link_width_downgrade_enabled =
14569 OPA_LINK_WIDTH_1X;
14570 }
14571 /* insure num_vls isn't larger than number of sdma engines */
14572 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14573 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
11a5909b
DL
14574 num_vls, dd->chip_sdma_engines);
14575 num_vls = dd->chip_sdma_engines;
14576 ppd->vls_supported = dd->chip_sdma_engines;
8a4d3444 14577 ppd->vls_operational = ppd->vls_supported;
77241056
MM
14578 }
14579
14580 /*
14581 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14582 * Limit the max if larger than the field holds. If timeout is
14583 * non-zero, then the calculated field will be at least 1.
14584 *
14585 * Must be after icode is set up - the cclock rate depends
14586 * on knowing the hardware being used.
14587 */
14588 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14589 if (dd->rcv_intr_timeout_csr >
14590 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14591 dd->rcv_intr_timeout_csr =
14592 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14593 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14594 dd->rcv_intr_timeout_csr = 1;
14595
7c03ed85
EH
14596 /* needs to be done before we look for the peer device */
14597 read_guid(dd);
14598
78eb129d
DL
14599 /* set up shared ASIC data with peer device */
14600 ret = init_asic_data(dd);
14601 if (ret)
14602 goto bail_cleanup;
7c03ed85 14603
77241056
MM
14604 /* obtain chip sizes, reset chip CSRs */
14605 init_chip(dd);
14606
14607 /* read in the PCIe link speed information */
14608 ret = pcie_speeds(dd);
14609 if (ret)
14610 goto bail_cleanup;
14611
e83eba21
DL
14612 /* call before get_platform_config(), after init_chip_resources() */
14613 ret = eprom_init(dd);
14614 if (ret)
14615 goto bail_free_rcverr;
14616
c3838b39
EH
14617 /* Needs to be called before hfi1_firmware_init */
14618 get_platform_config(dd);
14619
77241056
MM
14620 /* read in firmware */
14621 ret = hfi1_firmware_init(dd);
14622 if (ret)
14623 goto bail_cleanup;
14624
14625 /*
14626 * In general, the PCIe Gen3 transition must occur after the
14627 * chip has been idled (so it won't initiate any PCIe transactions
14628 * e.g. an interrupt) and before the driver changes any registers
14629 * (the transition will reset the registers).
14630 *
14631 * In particular, place this call after:
14632 * - init_chip() - the chip will not initiate any PCIe transactions
14633 * - pcie_speeds() - reads the current link speed
14634 * - hfi1_firmware_init() - the needed firmware is ready to be
14635 * downloaded
14636 */
14637 ret = do_pcie_gen3_transition(dd);
14638 if (ret)
14639 goto bail_cleanup;
14640
14641 /* start setting dd values and adjusting CSRs */
14642 init_early_variables(dd);
14643
14644 parse_platform_config(dd);
14645
5d9157aa
DL
14646 ret = obtain_boardname(dd);
14647 if (ret)
77241056 14648 goto bail_cleanup;
77241056
MM
14649
14650 snprintf(dd->boardversion, BOARD_VERS_MAX,
5d9157aa 14651 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
77241056 14652 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
77241056
MM
14653 (u32)dd->majrev,
14654 (u32)dd->minrev,
14655 (dd->revision >> CCE_REVISION_SW_SHIFT)
14656 & CCE_REVISION_SW_MASK);
14657
14658 ret = set_up_context_variables(dd);
14659 if (ret)
14660 goto bail_cleanup;
14661
14662 /* set initial RXE CSRs */
14663 init_rxe(dd);
14664 /* set initial TXE CSRs */
14665 init_txe(dd);
14666 /* set initial non-RXE, non-TXE CSRs */
14667 init_other(dd);
14668 /* set up KDETH QP prefix in both RX and TX CSRs */
14669 init_kdeth_qp(dd);
14670
4197344b
DD
14671 ret = hfi1_dev_affinity_init(dd);
14672 if (ret)
14673 goto bail_cleanup;
957558c9 14674
77241056
MM
14675 /* send contexts must be set up before receive contexts */
14676 ret = init_send_contexts(dd);
14677 if (ret)
14678 goto bail_cleanup;
14679
14680 ret = hfi1_create_ctxts(dd);
14681 if (ret)
14682 goto bail_cleanup;
14683
14684 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14685 /*
14686 * rcd[0] is guaranteed to be valid by this point. Also, all
14687 * context are using the same value, as per the module parameter.
14688 */
14689 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14690
14691 ret = init_pervl_scs(dd);
14692 if (ret)
14693 goto bail_cleanup;
14694
14695 /* sdma init */
14696 for (i = 0; i < dd->num_pports; ++i) {
14697 ret = sdma_init(dd, i);
14698 if (ret)
14699 goto bail_cleanup;
14700 }
14701
14702 /* use contexts created by hfi1_create_ctxts */
14703 ret = set_up_interrupts(dd);
14704 if (ret)
14705 goto bail_cleanup;
14706
14707 /* set up LCB access - must be after set_up_interrupts() */
14708 init_lcb_access(dd);
14709
fc0b76c0
IW
14710 /*
14711 * Serial number is created from the base guid:
14712 * [27:24] = base guid [38:35]
14713 * [23: 0] = base guid [23: 0]
14714 */
77241056 14715 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
fc0b76c0
IW
14716 (dd->base_guid & 0xFFFFFF) |
14717 ((dd->base_guid >> 11) & 0xF000000));
77241056
MM
14718
14719 dd->oui1 = dd->base_guid >> 56 & 0xFF;
14720 dd->oui2 = dd->base_guid >> 48 & 0xFF;
14721 dd->oui3 = dd->base_guid >> 40 & 0xFF;
14722
14723 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14724 if (ret)
14725 goto bail_clear_intr;
77241056
MM
14726
14727 thermal_init(dd);
14728
14729 ret = init_cntrs(dd);
14730 if (ret)
14731 goto bail_clear_intr;
14732
14733 ret = init_rcverr(dd);
14734 if (ret)
14735 goto bail_free_cntrs;
14736
acd7c8fe
TS
14737 init_completion(&dd->user_comp);
14738
14739 /* The user refcount starts with one to inidicate an active device */
14740 atomic_set(&dd->user_refcount, 1);
14741
77241056
MM
14742 goto bail;
14743
14744bail_free_rcverr:
14745 free_rcverr(dd);
14746bail_free_cntrs:
14747 free_cntrs(dd);
14748bail_clear_intr:
14749 clean_up_interrupts(dd);
14750bail_cleanup:
14751 hfi1_pcie_ddcleanup(dd);
14752bail_free:
14753 hfi1_free_devdata(dd);
14754 dd = ERR_PTR(ret);
14755bail:
14756 return dd;
14757}
14758
14759static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14760 u32 dw_len)
14761{
14762 u32 delta_cycles;
14763 u32 current_egress_rate = ppd->current_egress_rate;
14764 /* rates here are in units of 10^6 bits/sec */
14765
14766 if (desired_egress_rate == -1)
14767 return 0; /* shouldn't happen */
14768
14769 if (desired_egress_rate >= current_egress_rate)
14770 return 0; /* we can't help go faster, only slower */
14771
14772 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14773 egress_cycles(dw_len * 4, current_egress_rate);
14774
14775 return (u16)delta_cycles;
14776}
14777
77241056
MM
14778/**
14779 * create_pbc - build a pbc for transmission
14780 * @flags: special case flags or-ed in built pbc
14781 * @srate: static rate
14782 * @vl: vl
14783 * @dwlen: dword length (header words + data words + pbc words)
14784 *
14785 * Create a PBC with the given flags, rate, VL, and length.
14786 *
14787 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14788 * for verbs, which does not use this PSM feature. The lone other caller
14789 * is for the diagnostic interface which calls this if the user does not
14790 * supply their own PBC.
14791 */
14792u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14793 u32 dw_len)
14794{
14795 u64 pbc, delay = 0;
14796
14797 if (unlikely(srate_mbs))
14798 delay = delay_cycles(ppd, srate_mbs, dw_len);
14799
14800 pbc = flags
14801 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14802 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14803 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14804 | (dw_len & PBC_LENGTH_DWS_MASK)
14805 << PBC_LENGTH_DWS_SHIFT;
14806
14807 return pbc;
14808}
14809
14810#define SBUS_THERMAL 0x4f
14811#define SBUS_THERM_MONITOR_MODE 0x1
14812
14813#define THERM_FAILURE(dev, ret, reason) \
14814 dd_dev_err((dd), \
14815 "Thermal sensor initialization failed: %s (%d)\n", \
14816 (reason), (ret))
14817
14818/*
cde10afa 14819 * Initialize the thermal sensor.
77241056
MM
14820 *
14821 * After initialization, enable polling of thermal sensor through
14822 * SBus interface. In order for this to work, the SBus Master
14823 * firmware has to be loaded due to the fact that the HW polling
14824 * logic uses SBus interrupts, which are not supported with
14825 * default firmware. Otherwise, no data will be returned through
14826 * the ASIC_STS_THERM CSR.
14827 */
14828static int thermal_init(struct hfi1_devdata *dd)
14829{
14830 int ret = 0;
14831
14832 if (dd->icode != ICODE_RTL_SILICON ||
a453698b 14833 check_chip_resource(dd, CR_THERM_INIT, NULL))
77241056
MM
14834 return ret;
14835
576531fd
DL
14836 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
14837 if (ret) {
14838 THERM_FAILURE(dd, ret, "Acquire SBus");
14839 return ret;
14840 }
14841
77241056 14842 dd_dev_info(dd, "Initializing thermal sensor\n");
4ef98989
JAQ
14843 /* Disable polling of thermal readings */
14844 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14845 msleep(100);
77241056
MM
14846 /* Thermal Sensor Initialization */
14847 /* Step 1: Reset the Thermal SBus Receiver */
14848 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14849 RESET_SBUS_RECEIVER, 0);
14850 if (ret) {
14851 THERM_FAILURE(dd, ret, "Bus Reset");
14852 goto done;
14853 }
14854 /* Step 2: Set Reset bit in Thermal block */
14855 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14856 WRITE_SBUS_RECEIVER, 0x1);
14857 if (ret) {
14858 THERM_FAILURE(dd, ret, "Therm Block Reset");
14859 goto done;
14860 }
14861 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
14862 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14863 WRITE_SBUS_RECEIVER, 0x32);
14864 if (ret) {
14865 THERM_FAILURE(dd, ret, "Write Clock Div");
14866 goto done;
14867 }
14868 /* Step 4: Select temperature mode */
14869 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14870 WRITE_SBUS_RECEIVER,
14871 SBUS_THERM_MONITOR_MODE);
14872 if (ret) {
14873 THERM_FAILURE(dd, ret, "Write Mode Sel");
14874 goto done;
14875 }
14876 /* Step 5: De-assert block reset and start conversion */
14877 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14878 WRITE_SBUS_RECEIVER, 0x2);
14879 if (ret) {
14880 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14881 goto done;
14882 }
14883 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
14884 msleep(22);
14885
14886 /* Enable polling of thermal readings */
14887 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
a453698b
DL
14888
14889 /* Set initialized flag */
14890 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
14891 if (ret)
14892 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
14893
77241056 14894done:
576531fd 14895 release_chip_resource(dd, CR_SBUS);
77241056
MM
14896 return ret;
14897}
14898
14899static void handle_temp_err(struct hfi1_devdata *dd)
14900{
14901 struct hfi1_pportdata *ppd = &dd->pport[0];
14902 /*
14903 * Thermal Critical Interrupt
14904 * Put the device into forced freeze mode, take link down to
14905 * offline, and put DC into reset.
14906 */
14907 dd_dev_emerg(dd,
14908 "Critical temperature reached! Forcing device into freeze mode!\n");
14909 dd->flags |= HFI1_FORCED_FREEZE;
8638b77f 14910 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
77241056
MM
14911 /*
14912 * Shut DC down as much and as quickly as possible.
14913 *
14914 * Step 1: Take the link down to OFFLINE. This will cause the
14915 * 8051 to put the Serdes in reset. However, we don't want to
14916 * go through the entire link state machine since we want to
14917 * shutdown ASAP. Furthermore, this is not a graceful shutdown
14918 * but rather an attempt to save the chip.
14919 * Code below is almost the same as quiet_serdes() but avoids
14920 * all the extra work and the sleeps.
14921 */
14922 ppd->driver_link_ready = 0;
14923 ppd->link_enabled = 0;
bf640096
HC
14924 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
14925 PLS_OFFLINE);
77241056
MM
14926 /*
14927 * Step 2: Shutdown LCB and 8051
14928 * After shutdown, do not restore DC_CFG_RESET value.
14929 */
14930 dc_shutdown(dd);
14931}