]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/staging/rdma/hfi1/chip.c
IB/hfi1: Use the neighbor link down reason only when valid
[mirror_ubuntu-artful-kernel.git] / drivers / staging / rdma / hfi1 / chip.c
CommitLineData
77241056 1/*
05d6ac1d 2 * Copyright(c) 2015, 2016 Intel Corporation.
77241056
MM
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
77241056
MM
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
77241056
MM
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48/*
49 * This file contains all of the code that is specific to the HFI chip
50 */
51
52#include <linux/pci.h>
53#include <linux/delay.h>
54#include <linux/interrupt.h>
55#include <linux/module.h>
56
57#include "hfi.h"
58#include "trace.h"
59#include "mad.h"
60#include "pio.h"
61#include "sdma.h"
62#include "eprom.h"
5d9157aa 63#include "efivar.h"
8ebd4cf1 64#include "platform.h"
affa48de 65#include "aspm.h"
77241056
MM
66
67#define NUM_IB_PORTS 1
68
69uint kdeth_qp;
70module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
71MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
72
73uint num_vls = HFI1_MAX_VLS_SUPPORTED;
74module_param(num_vls, uint, S_IRUGO);
75MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
76
77/*
78 * Default time to aggregate two 10K packets from the idle state
79 * (timer not running). The timer starts at the end of the first packet,
80 * so only the time for one 10K packet and header plus a bit extra is needed.
81 * 10 * 1024 + 64 header byte = 10304 byte
82 * 10304 byte / 12.5 GB/s = 824.32ns
83 */
84uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
85module_param(rcv_intr_timeout, uint, S_IRUGO);
86MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
87
88uint rcv_intr_count = 16; /* same as qib */
89module_param(rcv_intr_count, uint, S_IRUGO);
90MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
91
92ushort link_crc_mask = SUPPORTED_CRCS;
93module_param(link_crc_mask, ushort, S_IRUGO);
94MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
95
96uint loopback;
97module_param_named(loopback, loopback, uint, S_IRUGO);
98MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
99
100/* Other driver tunables */
101uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
102static ushort crc_14b_sideband = 1;
103static uint use_flr = 1;
104uint quick_linkup; /* skip LNI */
105
106struct flag_table {
107 u64 flag; /* the flag */
108 char *str; /* description string */
109 u16 extra; /* extra information */
110 u16 unused0;
111 u32 unused1;
112};
113
114/* str must be a string constant */
115#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
116#define FLAG_ENTRY0(str, flag) {flag, str, 0}
117
118/* Send Error Consequences */
119#define SEC_WRITE_DROPPED 0x1
120#define SEC_PACKET_DROPPED 0x2
121#define SEC_SC_HALTED 0x4 /* per-context only */
122#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
123
77241056 124#define MIN_KERNEL_KCTXTS 2
82c2611d 125#define FIRST_KERNEL_KCTXT 1
372cc85a
DL
126/* sizes for both the QP and RSM map tables */
127#define NUM_MAP_ENTRIES 256
77241056
MM
128#define NUM_MAP_REGS 32
129
130/* Bit offset into the GUID which carries HFI id information */
131#define GUID_HFI_INDEX_SHIFT 39
132
133/* extract the emulation revision */
134#define emulator_rev(dd) ((dd)->irev >> 8)
135/* parallel and serial emulation versions are 3 and 4 respectively */
136#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
137#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
138
139/* RSM fields */
140
141/* packet type */
142#define IB_PACKET_TYPE 2ull
143#define QW_SHIFT 6ull
144/* QPN[7..1] */
145#define QPN_WIDTH 7ull
146
147/* LRH.BTH: QW 0, OFFSET 48 - for match */
148#define LRH_BTH_QW 0ull
149#define LRH_BTH_BIT_OFFSET 48ull
150#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
151#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
152#define LRH_BTH_SELECT
153#define LRH_BTH_MASK 3ull
154#define LRH_BTH_VALUE 2ull
155
156/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
157#define LRH_SC_QW 0ull
158#define LRH_SC_BIT_OFFSET 56ull
159#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
160#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
161#define LRH_SC_MASK 128ull
162#define LRH_SC_VALUE 0ull
163
164/* SC[n..0] QW 0, OFFSET 60 - for select */
165#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
166
167/* QPN[m+n:1] QW 1, OFFSET 1 */
168#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
169
170/* defines to build power on SC2VL table */
171#define SC2VL_VAL( \
172 num, \
173 sc0, sc0val, \
174 sc1, sc1val, \
175 sc2, sc2val, \
176 sc3, sc3val, \
177 sc4, sc4val, \
178 sc5, sc5val, \
179 sc6, sc6val, \
180 sc7, sc7val) \
181( \
182 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
183 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
184 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
185 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
186 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
187 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
188 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
189 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
190)
191
192#define DC_SC_VL_VAL( \
193 range, \
194 e0, e0val, \
195 e1, e1val, \
196 e2, e2val, \
197 e3, e3val, \
198 e4, e4val, \
199 e5, e5val, \
200 e6, e6val, \
201 e7, e7val, \
202 e8, e8val, \
203 e9, e9val, \
204 e10, e10val, \
205 e11, e11val, \
206 e12, e12val, \
207 e13, e13val, \
208 e14, e14val, \
209 e15, e15val) \
210( \
211 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
212 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
213 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
214 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
215 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
216 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
217 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
218 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
219 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
220 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
221 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
222 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
223 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
224 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
225 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
226 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
227)
228
229/* all CceStatus sub-block freeze bits */
230#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
231 | CCE_STATUS_RXE_FROZE_SMASK \
232 | CCE_STATUS_TXE_FROZE_SMASK \
233 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
234/* all CceStatus sub-block TXE pause bits */
235#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
236 | CCE_STATUS_TXE_PAUSED_SMASK \
237 | CCE_STATUS_SDMA_PAUSED_SMASK)
238/* all CceStatus sub-block RXE pause bits */
239#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
240
241/*
242 * CCE Error flags.
243 */
244static struct flag_table cce_err_status_flags[] = {
245/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
246 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
247/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
248 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
249/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
250 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
251/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
252 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
253/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
254 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
255/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
256 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
257/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
258 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
259/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
260 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
261/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
262 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
263/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
264 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
265/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
266 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
267/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
268 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
269/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
270 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
271/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
272 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
273/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
274 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
275/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
276 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
277/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
278 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
279/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
280 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
281/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
282 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
283/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
284 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
285/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
286 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
287/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
288 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
289/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
290 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
291/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
292 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
293/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
294 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
295/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
296 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
297/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
298 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
299/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
300 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
301/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
302 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
303/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
304 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
305/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
306 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
307/*31*/ FLAG_ENTRY0("LATriggered",
308 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
309/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
310 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
311/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
312 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
313/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
314 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
315/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
316 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
317/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
318 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
319/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
320 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
321/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
322 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
323/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
324 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
325/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
326 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
327/*41-63 reserved*/
328};
329
330/*
331 * Misc Error flags
332 */
333#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
334static struct flag_table misc_err_status_flags[] = {
335/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
336/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
337/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
338/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
339/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
340/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
341/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
342/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
343/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
344/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
345/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
346/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
347/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
348};
349
350/*
351 * TXE PIO Error flags and consequences
352 */
353static struct flag_table pio_err_status_flags[] = {
354/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
355 SEC_WRITE_DROPPED,
356 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
357/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
358 SEC_SPC_FREEZE,
359 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
360/* 2*/ FLAG_ENTRY("PioCsrParity",
361 SEC_SPC_FREEZE,
362 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
363/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
364 SEC_SPC_FREEZE,
365 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
366/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
367 SEC_SPC_FREEZE,
368 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
369/* 5*/ FLAG_ENTRY("PioPccFifoParity",
370 SEC_SPC_FREEZE,
371 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
372/* 6*/ FLAG_ENTRY("PioPecFifoParity",
373 SEC_SPC_FREEZE,
374 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
375/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
376 SEC_SPC_FREEZE,
377 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
378/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
379 SEC_SPC_FREEZE,
380 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
381/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
382 SEC_SPC_FREEZE,
383 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
384/*10*/ FLAG_ENTRY("PioSmPktResetParity",
385 SEC_SPC_FREEZE,
386 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
387/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
388 SEC_SPC_FREEZE,
389 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
390/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
391 SEC_SPC_FREEZE,
392 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
393/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
394 0,
395 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
396/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
397 0,
398 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
399/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
400 SEC_SPC_FREEZE,
401 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
402/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
403 SEC_SPC_FREEZE,
404 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
405/*17*/ FLAG_ENTRY("PioInitSmIn",
406 0,
407 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
408/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
409 SEC_SPC_FREEZE,
410 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
411/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
412 SEC_SPC_FREEZE,
413 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
414/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
415 0,
416 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
417/*21*/ FLAG_ENTRY("PioWriteDataParity",
418 SEC_SPC_FREEZE,
419 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
420/*22*/ FLAG_ENTRY("PioStateMachine",
421 SEC_SPC_FREEZE,
422 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
423/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
8638b77f 424 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
77241056
MM
425 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
426/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
8638b77f 427 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
77241056
MM
428 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
429/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
430 SEC_SPC_FREEZE,
431 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
432/*26*/ FLAG_ENTRY("PioVlfSopParity",
433 SEC_SPC_FREEZE,
434 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
435/*27*/ FLAG_ENTRY("PioVlFifoParity",
436 SEC_SPC_FREEZE,
437 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
438/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
439 SEC_SPC_FREEZE,
440 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
441/*29*/ FLAG_ENTRY("PioPpmcSopLen",
442 SEC_SPC_FREEZE,
443 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
444/*30-31 reserved*/
445/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
446 SEC_SPC_FREEZE,
447 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
448/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
449 SEC_SPC_FREEZE,
450 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
451/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
452 SEC_SPC_FREEZE,
453 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
454/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
455 SEC_SPC_FREEZE,
456 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
457/*36-63 reserved*/
458};
459
460/* TXE PIO errors that cause an SPC freeze */
461#define ALL_PIO_FREEZE_ERR \
462 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
463 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
464 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
465 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
466 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
467 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
468 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
490 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
491
492/*
493 * TXE SDMA Error flags
494 */
495static struct flag_table sdma_err_status_flags[] = {
496/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
497 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
498/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
499 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
500/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
501 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
502/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
503 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
504/*04-63 reserved*/
505};
506
507/* TXE SDMA errors that cause an SPC freeze */
508#define ALL_SDMA_FREEZE_ERR \
509 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
510 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
511 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
512
69a00b8e
MM
513/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
514#define PORT_DISCARD_EGRESS_ERRS \
515 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
516 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
517 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
518
77241056
MM
519/*
520 * TXE Egress Error flags
521 */
522#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
523static struct flag_table egress_err_status_flags[] = {
524/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
525/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
526/* 2 reserved */
527/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
528 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
529/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
530/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
531/* 6 reserved */
532/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
533 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
534/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
535 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
536/* 9-10 reserved */
537/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
538 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
539/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
540/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
541/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
542/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
543/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
544 SEES(TX_SDMA0_DISALLOWED_PACKET)),
545/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
546 SEES(TX_SDMA1_DISALLOWED_PACKET)),
547/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
548 SEES(TX_SDMA2_DISALLOWED_PACKET)),
549/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
550 SEES(TX_SDMA3_DISALLOWED_PACKET)),
551/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
552 SEES(TX_SDMA4_DISALLOWED_PACKET)),
553/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
554 SEES(TX_SDMA5_DISALLOWED_PACKET)),
555/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
556 SEES(TX_SDMA6_DISALLOWED_PACKET)),
557/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
558 SEES(TX_SDMA7_DISALLOWED_PACKET)),
559/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
560 SEES(TX_SDMA8_DISALLOWED_PACKET)),
561/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
562 SEES(TX_SDMA9_DISALLOWED_PACKET)),
563/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
564 SEES(TX_SDMA10_DISALLOWED_PACKET)),
565/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
566 SEES(TX_SDMA11_DISALLOWED_PACKET)),
567/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
568 SEES(TX_SDMA12_DISALLOWED_PACKET)),
569/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
570 SEES(TX_SDMA13_DISALLOWED_PACKET)),
571/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
572 SEES(TX_SDMA14_DISALLOWED_PACKET)),
573/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
574 SEES(TX_SDMA15_DISALLOWED_PACKET)),
575/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
576 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
577/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
578 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
579/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
580 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
581/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
582 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
583/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
584 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
585/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
586 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
587/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
588 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
589/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
590 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
591/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
592 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
593/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
594/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
595/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
596/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
597/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
598/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
599/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
600/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
601/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
602/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
603/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
604/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
605/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
606/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
607/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
608/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
609/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
610/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
611/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
612/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
613/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
614/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
615 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
616/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
617 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
618};
619
620/*
621 * TXE Egress Error Info flags
622 */
623#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
624static struct flag_table egress_err_info_flags[] = {
625/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
626/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
627/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
628/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
629/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
630/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
631/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
632/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
633/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
634/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
635/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
636/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
637/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
638/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
639/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
640/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
641/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
642/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
643/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
644/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
645/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
646/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
647};
648
649/* TXE Egress errors that cause an SPC freeze */
650#define ALL_TXE_EGRESS_FREEZE_ERR \
651 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
652 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
653 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
654 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
655 | SEES(TX_LAUNCH_CSR_PARITY) \
656 | SEES(TX_SBRD_CTL_CSR_PARITY) \
657 | SEES(TX_CONFIG_PARITY) \
658 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
659 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
660 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
661 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
662 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
663 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
664 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
665 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
666 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
667 | SEES(TX_CREDIT_RETURN_PARITY))
668
669/*
670 * TXE Send error flags
671 */
672#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
673static struct flag_table send_err_status_flags[] = {
2c5b521a 674/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
77241056
MM
675/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
676/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
677};
678
679/*
680 * TXE Send Context Error flags and consequences
681 */
682static struct flag_table sc_err_status_flags[] = {
683/* 0*/ FLAG_ENTRY("InconsistentSop",
684 SEC_PACKET_DROPPED | SEC_SC_HALTED,
685 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
686/* 1*/ FLAG_ENTRY("DisallowedPacket",
687 SEC_PACKET_DROPPED | SEC_SC_HALTED,
688 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
689/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
690 SEC_WRITE_DROPPED | SEC_SC_HALTED,
691 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
692/* 3*/ FLAG_ENTRY("WriteOverflow",
693 SEC_WRITE_DROPPED | SEC_SC_HALTED,
694 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
695/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
696 SEC_WRITE_DROPPED | SEC_SC_HALTED,
697 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
698/* 5-63 reserved*/
699};
700
701/*
702 * RXE Receive Error flags
703 */
704#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
705static struct flag_table rxe_err_status_flags[] = {
706/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
707/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
708/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
709/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
710/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
711/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
712/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
713/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
714/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
715/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
716/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
717/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
718/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
719/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
720/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
721/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
722/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
723 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
724/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
725/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
726/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
727 RXES(RBUF_BLOCK_LIST_READ_UNC)),
728/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
729 RXES(RBUF_BLOCK_LIST_READ_COR)),
730/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
731 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
732/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
733 RXES(RBUF_CSR_QENT_CNT_PARITY)),
734/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
735 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
736/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
737 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
738/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
739/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
740/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
741 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
742/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
743/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
744/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
745/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
746/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
747/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
748/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
749/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
750 RXES(RBUF_FL_INITDONE_PARITY)),
751/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
752 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
753/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
754/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
755/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
756/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
757 RXES(LOOKUP_DES_PART1_UNC_COR)),
758/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
759 RXES(LOOKUP_DES_PART2_PARITY)),
760/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
761/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
762/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
763/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
764/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
765/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
766/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
767/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
768/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
769/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
770/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
771/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
772/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
773/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
774/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
775/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
776/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
777/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
778/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
779/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
780/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
781/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
782};
783
784/* RXE errors that will trigger an SPC freeze */
785#define ALL_RXE_FREEZE_ERR \
786 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
787 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
788 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
789 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
790 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
791 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
792 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
830
831#define RXE_FREEZE_ABORT_MASK \
832 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
833 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
834 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
835
836/*
837 * DCC Error Flags
838 */
839#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
840static struct flag_table dcc_err_flags[] = {
841 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
842 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
843 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
844 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
845 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
846 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
847 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
848 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
849 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
850 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
851 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
852 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
853 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
854 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
855 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
856 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
857 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
858 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
859 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
860 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
861 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
862 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
863 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
864 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
865 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
866 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
867 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
868 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
869 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
870 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
871 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
872 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
873 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
874 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
875 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
876 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
877 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
878 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
879 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
880 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
881 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
882 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
883 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
884 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
885 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
886 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
887};
888
889/*
890 * LCB error flags
891 */
892#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
893static struct flag_table lcb_err_flags[] = {
894/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
895/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
896/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
897/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
898 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
899/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
900/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
901/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
902/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
903/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
904/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
905/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
906/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
907/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
908/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
909 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
910/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
911/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
912/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
913/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
914/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
915/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
916 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
917/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
918/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
919/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
920/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
921/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
922/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
923/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
924 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
925/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
926/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
927 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
928/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
929 LCBE(REDUNDANT_FLIT_PARITY_ERR))
930};
931
932/*
933 * DC8051 Error Flags
934 */
935#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
936static struct flag_table dc8051_err_flags[] = {
937 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
938 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
939 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
940 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
941 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
942 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
943 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
944 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
945 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
17fb4f29 946 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
77241056
MM
947 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
948};
949
950/*
951 * DC8051 Information Error flags
952 *
953 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
954 */
955static struct flag_table dc8051_info_err_flags[] = {
956 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
957 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
958 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
959 FLAG_ENTRY0("Serdes internal loopback failure",
17fb4f29 960 FAILED_SERDES_INTERNAL_LOOPBACK),
77241056
MM
961 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
962 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
963 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
964 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
965 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
966 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
967 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
8fefef12
JJ
968 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
969 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT)
77241056
MM
970};
971
972/*
973 * DC8051 Information Host Information flags
974 *
975 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
976 */
977static struct flag_table dc8051_info_host_msg_flags[] = {
978 FLAG_ENTRY0("Host request done", 0x0001),
979 FLAG_ENTRY0("BC SMA message", 0x0002),
980 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
981 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
982 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
983 FLAG_ENTRY0("External device config request", 0x0020),
984 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
985 FLAG_ENTRY0("LinkUp achieved", 0x0080),
986 FLAG_ENTRY0("Link going down", 0x0100),
987};
988
77241056
MM
989static u32 encoded_size(u32 size);
990static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
991static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
992static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
993 u8 *continuous);
994static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
995 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
996static void read_vc_remote_link_width(struct hfi1_devdata *dd,
997 u8 *remote_tx_rate, u16 *link_widths);
998static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
999 u8 *flag_bits, u16 *link_widths);
1000static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1001 u8 *device_rev);
1002static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1003static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1004static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1005 u8 *tx_polarity_inversion,
1006 u8 *rx_polarity_inversion, u8 *max_rate);
1007static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1008 unsigned int context, u64 err_status);
1009static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1010static void handle_dcc_err(struct hfi1_devdata *dd,
1011 unsigned int context, u64 err_status);
1012static void handle_lcb_err(struct hfi1_devdata *dd,
1013 unsigned int context, u64 err_status);
1014static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1015static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1016static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1017static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1018static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1019static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1020static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1021static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1022static void set_partition_keys(struct hfi1_pportdata *);
1023static const char *link_state_name(u32 state);
1024static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1025 u32 state);
1026static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1027 u64 *out_data);
1028static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1029static int thermal_init(struct hfi1_devdata *dd);
1030
1031static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1032 int msecs);
1033static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
feb831dd 1034static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
77241056
MM
1035static void handle_temp_err(struct hfi1_devdata *);
1036static void dc_shutdown(struct hfi1_devdata *);
1037static void dc_start(struct hfi1_devdata *);
8f000f7f
DL
1038static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1039 unsigned int *np);
77241056
MM
1040
1041/*
1042 * Error interrupt table entry. This is used as input to the interrupt
1043 * "clear down" routine used for all second tier error interrupt register.
1044 * Second tier interrupt registers have a single bit representing them
1045 * in the top-level CceIntStatus.
1046 */
1047struct err_reg_info {
1048 u32 status; /* status CSR offset */
1049 u32 clear; /* clear CSR offset */
1050 u32 mask; /* mask CSR offset */
1051 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1052 const char *desc;
1053};
1054
1055#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1056#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1057#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1058
1059/*
1060 * Helpers for building HFI and DC error interrupt table entries. Different
1061 * helpers are needed because of inconsistent register names.
1062 */
1063#define EE(reg, handler, desc) \
1064 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1065 handler, desc }
1066#define DC_EE1(reg, handler, desc) \
1067 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1068#define DC_EE2(reg, handler, desc) \
1069 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1070
1071/*
1072 * Table of the "misc" grouping of error interrupts. Each entry refers to
1073 * another register containing more information.
1074 */
1075static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1076/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1077/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1078/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1079/* 3*/ { 0, 0, 0, NULL }, /* reserved */
1080/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1081/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1082/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1083/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1084 /* the rest are reserved */
1085};
1086
1087/*
1088 * Index into the Various section of the interrupt sources
1089 * corresponding to the Critical Temperature interrupt.
1090 */
1091#define TCRIT_INT_SOURCE 4
1092
1093/*
1094 * SDMA error interrupt entry - refers to another register containing more
1095 * information.
1096 */
1097static const struct err_reg_info sdma_eng_err =
1098 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1099
1100static const struct err_reg_info various_err[NUM_VARIOUS] = {
1101/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1102/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1103/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1104/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1105/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1106 /* rest are reserved */
1107};
1108
1109/*
1110 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1111 * register can not be derived from the MTU value because 10K is not
1112 * a power of 2. Therefore, we need a constant. Everything else can
1113 * be calculated.
1114 */
1115#define DCC_CFG_PORT_MTU_CAP_10240 7
1116
1117/*
1118 * Table of the DC grouping of error interrupts. Each entry refers to
1119 * another register containing more information.
1120 */
1121static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1122/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1123/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1124/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1125/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1126 /* the rest are reserved */
1127};
1128
1129struct cntr_entry {
1130 /*
1131 * counter name
1132 */
1133 char *name;
1134
1135 /*
1136 * csr to read for name (if applicable)
1137 */
1138 u64 csr;
1139
1140 /*
1141 * offset into dd or ppd to store the counter's value
1142 */
1143 int offset;
1144
1145 /*
1146 * flags
1147 */
1148 u8 flags;
1149
1150 /*
1151 * accessor for stat element, context either dd or ppd
1152 */
17fb4f29
JJ
1153 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1154 int mode, u64 data);
77241056
MM
1155};
1156
1157#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1158#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1159
1160#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1161{ \
1162 name, \
1163 csr, \
1164 offset, \
1165 flags, \
1166 accessor \
1167}
1168
1169/* 32bit RXE */
1170#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1171CNTR_ELEM(#name, \
1172 (counter * 8 + RCV_COUNTER_ARRAY32), \
1173 0, flags | CNTR_32BIT, \
1174 port_access_u32_csr)
1175
1176#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1177CNTR_ELEM(#name, \
1178 (counter * 8 + RCV_COUNTER_ARRAY32), \
1179 0, flags | CNTR_32BIT, \
1180 dev_access_u32_csr)
1181
1182/* 64bit RXE */
1183#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1184CNTR_ELEM(#name, \
1185 (counter * 8 + RCV_COUNTER_ARRAY64), \
1186 0, flags, \
1187 port_access_u64_csr)
1188
1189#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1190CNTR_ELEM(#name, \
1191 (counter * 8 + RCV_COUNTER_ARRAY64), \
1192 0, flags, \
1193 dev_access_u64_csr)
1194
1195#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1196#define OVR_ELM(ctx) \
1197CNTR_ELEM("RcvHdrOvr" #ctx, \
8638b77f 1198 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
77241056
MM
1199 0, CNTR_NORMAL, port_access_u64_csr)
1200
1201/* 32bit TXE */
1202#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1203CNTR_ELEM(#name, \
1204 (counter * 8 + SEND_COUNTER_ARRAY32), \
1205 0, flags | CNTR_32BIT, \
1206 port_access_u32_csr)
1207
1208/* 64bit TXE */
1209#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1210CNTR_ELEM(#name, \
1211 (counter * 8 + SEND_COUNTER_ARRAY64), \
1212 0, flags, \
1213 port_access_u64_csr)
1214
1215# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1216CNTR_ELEM(#name,\
1217 counter * 8 + SEND_COUNTER_ARRAY64, \
1218 0, \
1219 flags, \
1220 dev_access_u64_csr)
1221
1222/* CCE */
1223#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1224CNTR_ELEM(#name, \
1225 (counter * 8 + CCE_COUNTER_ARRAY32), \
1226 0, flags | CNTR_32BIT, \
1227 dev_access_u32_csr)
1228
1229#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1230CNTR_ELEM(#name, \
1231 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1232 0, flags | CNTR_32BIT, \
1233 dev_access_u32_csr)
1234
1235/* DC */
1236#define DC_PERF_CNTR(name, counter, flags) \
1237CNTR_ELEM(#name, \
1238 counter, \
1239 0, \
1240 flags, \
1241 dev_access_u64_csr)
1242
1243#define DC_PERF_CNTR_LCB(name, counter, flags) \
1244CNTR_ELEM(#name, \
1245 counter, \
1246 0, \
1247 flags, \
1248 dc_access_lcb_cntr)
1249
1250/* ibp counters */
1251#define SW_IBP_CNTR(name, cntr) \
1252CNTR_ELEM(#name, \
1253 0, \
1254 0, \
1255 CNTR_SYNTH, \
1256 access_ibp_##cntr)
1257
1258u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1259{
77241056 1260 if (dd->flags & HFI1_PRESENT) {
6d210eef 1261 return readq((void __iomem *)dd->kregbase + offset);
77241056
MM
1262 }
1263 return -1;
1264}
1265
1266void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1267{
1268 if (dd->flags & HFI1_PRESENT)
1269 writeq(value, (void __iomem *)dd->kregbase + offset);
1270}
1271
1272void __iomem *get_csr_addr(
1273 struct hfi1_devdata *dd,
1274 u32 offset)
1275{
1276 return (void __iomem *)dd->kregbase + offset;
1277}
1278
1279static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1280 int mode, u64 value)
1281{
1282 u64 ret;
1283
77241056
MM
1284 if (mode == CNTR_MODE_R) {
1285 ret = read_csr(dd, csr);
1286 } else if (mode == CNTR_MODE_W) {
1287 write_csr(dd, csr, value);
1288 ret = value;
1289 } else {
1290 dd_dev_err(dd, "Invalid cntr register access mode");
1291 return 0;
1292 }
1293
1294 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1295 return ret;
1296}
1297
1298/* Dev Access */
1299static u64 dev_access_u32_csr(const struct cntr_entry *entry,
17fb4f29 1300 void *context, int vl, int mode, u64 data)
77241056 1301{
a787bde8 1302 struct hfi1_devdata *dd = context;
a699c6c2 1303 u64 csr = entry->csr;
77241056 1304
a699c6c2
VM
1305 if (entry->flags & CNTR_SDMA) {
1306 if (vl == CNTR_INVALID_VL)
1307 return 0;
1308 csr += 0x100 * vl;
1309 } else {
1310 if (vl != CNTR_INVALID_VL)
1311 return 0;
1312 }
1313 return read_write_csr(dd, csr, mode, data);
1314}
1315
1316static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1317 void *context, int idx, int mode, u64 data)
1318{
1319 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1320
1321 if (dd->per_sdma && idx < dd->num_sdma)
1322 return dd->per_sdma[idx].err_cnt;
1323 return 0;
1324}
1325
1326static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1327 void *context, int idx, int mode, u64 data)
1328{
1329 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1330
1331 if (dd->per_sdma && idx < dd->num_sdma)
1332 return dd->per_sdma[idx].sdma_int_cnt;
1333 return 0;
1334}
1335
1336static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1337 void *context, int idx, int mode, u64 data)
1338{
1339 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1340
1341 if (dd->per_sdma && idx < dd->num_sdma)
1342 return dd->per_sdma[idx].idle_int_cnt;
1343 return 0;
1344}
1345
1346static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1347 void *context, int idx, int mode,
1348 u64 data)
1349{
1350 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1351
1352 if (dd->per_sdma && idx < dd->num_sdma)
1353 return dd->per_sdma[idx].progress_int_cnt;
1354 return 0;
77241056
MM
1355}
1356
1357static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
17fb4f29 1358 int vl, int mode, u64 data)
77241056 1359{
a787bde8 1360 struct hfi1_devdata *dd = context;
77241056
MM
1361
1362 u64 val = 0;
1363 u64 csr = entry->csr;
1364
1365 if (entry->flags & CNTR_VL) {
1366 if (vl == CNTR_INVALID_VL)
1367 return 0;
1368 csr += 8 * vl;
1369 } else {
1370 if (vl != CNTR_INVALID_VL)
1371 return 0;
1372 }
1373
1374 val = read_write_csr(dd, csr, mode, data);
1375 return val;
1376}
1377
1378static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
17fb4f29 1379 int vl, int mode, u64 data)
77241056 1380{
a787bde8 1381 struct hfi1_devdata *dd = context;
77241056
MM
1382 u32 csr = entry->csr;
1383 int ret = 0;
1384
1385 if (vl != CNTR_INVALID_VL)
1386 return 0;
1387 if (mode == CNTR_MODE_R)
1388 ret = read_lcb_csr(dd, csr, &data);
1389 else if (mode == CNTR_MODE_W)
1390 ret = write_lcb_csr(dd, csr, data);
1391
1392 if (ret) {
1393 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1394 return 0;
1395 }
1396
1397 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1398 return data;
1399}
1400
1401/* Port Access */
1402static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
17fb4f29 1403 int vl, int mode, u64 data)
77241056 1404{
a787bde8 1405 struct hfi1_pportdata *ppd = context;
77241056
MM
1406
1407 if (vl != CNTR_INVALID_VL)
1408 return 0;
1409 return read_write_csr(ppd->dd, entry->csr, mode, data);
1410}
1411
1412static u64 port_access_u64_csr(const struct cntr_entry *entry,
17fb4f29 1413 void *context, int vl, int mode, u64 data)
77241056 1414{
a787bde8 1415 struct hfi1_pportdata *ppd = context;
77241056
MM
1416 u64 val;
1417 u64 csr = entry->csr;
1418
1419 if (entry->flags & CNTR_VL) {
1420 if (vl == CNTR_INVALID_VL)
1421 return 0;
1422 csr += 8 * vl;
1423 } else {
1424 if (vl != CNTR_INVALID_VL)
1425 return 0;
1426 }
1427 val = read_write_csr(ppd->dd, csr, mode, data);
1428 return val;
1429}
1430
1431/* Software defined */
1432static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1433 u64 data)
1434{
1435 u64 ret;
1436
1437 if (mode == CNTR_MODE_R) {
1438 ret = *cntr;
1439 } else if (mode == CNTR_MODE_W) {
1440 *cntr = data;
1441 ret = data;
1442 } else {
1443 dd_dev_err(dd, "Invalid cntr sw access mode");
1444 return 0;
1445 }
1446
1447 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1448
1449 return ret;
1450}
1451
1452static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
17fb4f29 1453 int vl, int mode, u64 data)
77241056 1454{
a787bde8 1455 struct hfi1_pportdata *ppd = context;
77241056
MM
1456
1457 if (vl != CNTR_INVALID_VL)
1458 return 0;
1459 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1460}
1461
1462static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
17fb4f29 1463 int vl, int mode, u64 data)
77241056 1464{
a787bde8 1465 struct hfi1_pportdata *ppd = context;
77241056
MM
1466
1467 if (vl != CNTR_INVALID_VL)
1468 return 0;
1469 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1470}
1471
6d014530
DL
1472static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1473 void *context, int vl, int mode,
1474 u64 data)
1475{
1476 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1477
1478 if (vl != CNTR_INVALID_VL)
1479 return 0;
1480 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1481}
1482
77241056 1483static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
17fb4f29 1484 void *context, int vl, int mode, u64 data)
77241056 1485{
69a00b8e
MM
1486 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1487 u64 zero = 0;
1488 u64 *counter;
77241056 1489
69a00b8e
MM
1490 if (vl == CNTR_INVALID_VL)
1491 counter = &ppd->port_xmit_discards;
1492 else if (vl >= 0 && vl < C_VL_COUNT)
1493 counter = &ppd->port_xmit_discards_vl[vl];
1494 else
1495 counter = &zero;
77241056 1496
69a00b8e 1497 return read_write_sw(ppd->dd, counter, mode, data);
77241056
MM
1498}
1499
1500static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
17fb4f29
JJ
1501 void *context, int vl, int mode,
1502 u64 data)
77241056 1503{
a787bde8 1504 struct hfi1_pportdata *ppd = context;
77241056
MM
1505
1506 if (vl != CNTR_INVALID_VL)
1507 return 0;
1508
1509 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1510 mode, data);
1511}
1512
1513static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
17fb4f29 1514 void *context, int vl, int mode, u64 data)
77241056 1515{
a787bde8 1516 struct hfi1_pportdata *ppd = context;
77241056
MM
1517
1518 if (vl != CNTR_INVALID_VL)
1519 return 0;
1520
1521 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1522 mode, data);
1523}
1524
1525u64 get_all_cpu_total(u64 __percpu *cntr)
1526{
1527 int cpu;
1528 u64 counter = 0;
1529
1530 for_each_possible_cpu(cpu)
1531 counter += *per_cpu_ptr(cntr, cpu);
1532 return counter;
1533}
1534
1535static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1536 u64 __percpu *cntr,
1537 int vl, int mode, u64 data)
1538{
77241056
MM
1539 u64 ret = 0;
1540
1541 if (vl != CNTR_INVALID_VL)
1542 return 0;
1543
1544 if (mode == CNTR_MODE_R) {
1545 ret = get_all_cpu_total(cntr) - *z_val;
1546 } else if (mode == CNTR_MODE_W) {
1547 /* A write can only zero the counter */
1548 if (data == 0)
1549 *z_val = get_all_cpu_total(cntr);
1550 else
1551 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1552 } else {
1553 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1554 return 0;
1555 }
1556
1557 return ret;
1558}
1559
1560static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1561 void *context, int vl, int mode, u64 data)
1562{
a787bde8 1563 struct hfi1_devdata *dd = context;
77241056
MM
1564
1565 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1566 mode, data);
1567}
1568
1569static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
17fb4f29 1570 void *context, int vl, int mode, u64 data)
77241056 1571{
a787bde8 1572 struct hfi1_devdata *dd = context;
77241056
MM
1573
1574 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1575 mode, data);
1576}
1577
1578static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1579 void *context, int vl, int mode, u64 data)
1580{
a787bde8 1581 struct hfi1_devdata *dd = context;
77241056
MM
1582
1583 return dd->verbs_dev.n_piowait;
1584}
1585
14553ca1
MM
1586static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1587 void *context, int vl, int mode, u64 data)
1588{
1589 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1590
1591 return dd->verbs_dev.n_piodrain;
1592}
1593
77241056
MM
1594static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1595 void *context, int vl, int mode, u64 data)
1596{
a787bde8 1597 struct hfi1_devdata *dd = context;
77241056
MM
1598
1599 return dd->verbs_dev.n_txwait;
1600}
1601
1602static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1603 void *context, int vl, int mode, u64 data)
1604{
a787bde8 1605 struct hfi1_devdata *dd = context;
77241056
MM
1606
1607 return dd->verbs_dev.n_kmem_wait;
1608}
1609
b421922e 1610static u64 access_sw_send_schedule(const struct cntr_entry *entry,
17fb4f29 1611 void *context, int vl, int mode, u64 data)
b421922e
DL
1612{
1613 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1614
89abfc8d
VM
1615 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1616 mode, data);
b421922e
DL
1617}
1618
2c5b521a
JR
1619/* Software counters for the error status bits within MISC_ERR_STATUS */
1620static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1621 void *context, int vl, int mode,
1622 u64 data)
1623{
1624 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1625
1626 return dd->misc_err_status_cnt[12];
1627}
1628
1629static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1630 void *context, int vl, int mode,
1631 u64 data)
1632{
1633 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1634
1635 return dd->misc_err_status_cnt[11];
1636}
1637
1638static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1639 void *context, int vl, int mode,
1640 u64 data)
1641{
1642 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1643
1644 return dd->misc_err_status_cnt[10];
1645}
1646
1647static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1648 void *context, int vl,
1649 int mode, u64 data)
1650{
1651 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1652
1653 return dd->misc_err_status_cnt[9];
1654}
1655
1656static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1657 void *context, int vl, int mode,
1658 u64 data)
1659{
1660 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1661
1662 return dd->misc_err_status_cnt[8];
1663}
1664
1665static u64 access_misc_efuse_read_bad_addr_err_cnt(
1666 const struct cntr_entry *entry,
1667 void *context, int vl, int mode, u64 data)
1668{
1669 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1670
1671 return dd->misc_err_status_cnt[7];
1672}
1673
1674static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1675 void *context, int vl,
1676 int mode, u64 data)
1677{
1678 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1679
1680 return dd->misc_err_status_cnt[6];
1681}
1682
1683static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1684 void *context, int vl, int mode,
1685 u64 data)
1686{
1687 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1688
1689 return dd->misc_err_status_cnt[5];
1690}
1691
1692static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1693 void *context, int vl, int mode,
1694 u64 data)
1695{
1696 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1697
1698 return dd->misc_err_status_cnt[4];
1699}
1700
1701static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1702 void *context, int vl,
1703 int mode, u64 data)
1704{
1705 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1706
1707 return dd->misc_err_status_cnt[3];
1708}
1709
1710static u64 access_misc_csr_write_bad_addr_err_cnt(
1711 const struct cntr_entry *entry,
1712 void *context, int vl, int mode, u64 data)
1713{
1714 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1715
1716 return dd->misc_err_status_cnt[2];
1717}
1718
1719static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1720 void *context, int vl,
1721 int mode, u64 data)
1722{
1723 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1724
1725 return dd->misc_err_status_cnt[1];
1726}
1727
1728static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1729 void *context, int vl, int mode,
1730 u64 data)
1731{
1732 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1733
1734 return dd->misc_err_status_cnt[0];
1735}
1736
1737/*
1738 * Software counter for the aggregate of
1739 * individual CceErrStatus counters
1740 */
1741static u64 access_sw_cce_err_status_aggregated_cnt(
1742 const struct cntr_entry *entry,
1743 void *context, int vl, int mode, u64 data)
1744{
1745 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1746
1747 return dd->sw_cce_err_status_aggregate;
1748}
1749
1750/*
1751 * Software counters corresponding to each of the
1752 * error status bits within CceErrStatus
1753 */
1754static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1755 void *context, int vl, int mode,
1756 u64 data)
1757{
1758 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1759
1760 return dd->cce_err_status_cnt[40];
1761}
1762
1763static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1764 void *context, int vl, int mode,
1765 u64 data)
1766{
1767 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1768
1769 return dd->cce_err_status_cnt[39];
1770}
1771
1772static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1773 void *context, int vl, int mode,
1774 u64 data)
1775{
1776 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1777
1778 return dd->cce_err_status_cnt[38];
1779}
1780
1781static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1782 void *context, int vl, int mode,
1783 u64 data)
1784{
1785 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1786
1787 return dd->cce_err_status_cnt[37];
1788}
1789
1790static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1791 void *context, int vl, int mode,
1792 u64 data)
1793{
1794 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1795
1796 return dd->cce_err_status_cnt[36];
1797}
1798
1799static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1800 const struct cntr_entry *entry,
1801 void *context, int vl, int mode, u64 data)
1802{
1803 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1804
1805 return dd->cce_err_status_cnt[35];
1806}
1807
1808static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1809 const struct cntr_entry *entry,
1810 void *context, int vl, int mode, u64 data)
1811{
1812 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1813
1814 return dd->cce_err_status_cnt[34];
1815}
1816
1817static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1818 void *context, int vl,
1819 int mode, u64 data)
1820{
1821 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1822
1823 return dd->cce_err_status_cnt[33];
1824}
1825
1826static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1827 void *context, int vl, int mode,
1828 u64 data)
1829{
1830 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1831
1832 return dd->cce_err_status_cnt[32];
1833}
1834
1835static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1836 void *context, int vl, int mode, u64 data)
1837{
1838 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1839
1840 return dd->cce_err_status_cnt[31];
1841}
1842
1843static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1844 void *context, int vl, int mode,
1845 u64 data)
1846{
1847 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1848
1849 return dd->cce_err_status_cnt[30];
1850}
1851
1852static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1853 void *context, int vl, int mode,
1854 u64 data)
1855{
1856 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1857
1858 return dd->cce_err_status_cnt[29];
1859}
1860
1861static u64 access_pcic_transmit_back_parity_err_cnt(
1862 const struct cntr_entry *entry,
1863 void *context, int vl, int mode, u64 data)
1864{
1865 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1866
1867 return dd->cce_err_status_cnt[28];
1868}
1869
1870static u64 access_pcic_transmit_front_parity_err_cnt(
1871 const struct cntr_entry *entry,
1872 void *context, int vl, int mode, u64 data)
1873{
1874 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1875
1876 return dd->cce_err_status_cnt[27];
1877}
1878
1879static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1880 void *context, int vl, int mode,
1881 u64 data)
1882{
1883 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1884
1885 return dd->cce_err_status_cnt[26];
1886}
1887
1888static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1889 void *context, int vl, int mode,
1890 u64 data)
1891{
1892 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1893
1894 return dd->cce_err_status_cnt[25];
1895}
1896
1897static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1898 void *context, int vl, int mode,
1899 u64 data)
1900{
1901 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1902
1903 return dd->cce_err_status_cnt[24];
1904}
1905
1906static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1907 void *context, int vl, int mode,
1908 u64 data)
1909{
1910 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1911
1912 return dd->cce_err_status_cnt[23];
1913}
1914
1915static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1916 void *context, int vl,
1917 int mode, u64 data)
1918{
1919 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1920
1921 return dd->cce_err_status_cnt[22];
1922}
1923
1924static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1925 void *context, int vl, int mode,
1926 u64 data)
1927{
1928 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1929
1930 return dd->cce_err_status_cnt[21];
1931}
1932
1933static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1934 const struct cntr_entry *entry,
1935 void *context, int vl, int mode, u64 data)
1936{
1937 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1938
1939 return dd->cce_err_status_cnt[20];
1940}
1941
1942static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1943 void *context, int vl,
1944 int mode, u64 data)
1945{
1946 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1947
1948 return dd->cce_err_status_cnt[19];
1949}
1950
1951static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1952 void *context, int vl, int mode,
1953 u64 data)
1954{
1955 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1956
1957 return dd->cce_err_status_cnt[18];
1958}
1959
1960static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1961 void *context, int vl, int mode,
1962 u64 data)
1963{
1964 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1965
1966 return dd->cce_err_status_cnt[17];
1967}
1968
1969static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1970 void *context, int vl, int mode,
1971 u64 data)
1972{
1973 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1974
1975 return dd->cce_err_status_cnt[16];
1976}
1977
1978static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1979 void *context, int vl, int mode,
1980 u64 data)
1981{
1982 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1983
1984 return dd->cce_err_status_cnt[15];
1985}
1986
1987static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1988 void *context, int vl,
1989 int mode, u64 data)
1990{
1991 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1992
1993 return dd->cce_err_status_cnt[14];
1994}
1995
1996static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
1997 void *context, int vl, int mode,
1998 u64 data)
1999{
2000 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2001
2002 return dd->cce_err_status_cnt[13];
2003}
2004
2005static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2006 const struct cntr_entry *entry,
2007 void *context, int vl, int mode, u64 data)
2008{
2009 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2010
2011 return dd->cce_err_status_cnt[12];
2012}
2013
2014static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2015 const struct cntr_entry *entry,
2016 void *context, int vl, int mode, u64 data)
2017{
2018 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2019
2020 return dd->cce_err_status_cnt[11];
2021}
2022
2023static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2024 const struct cntr_entry *entry,
2025 void *context, int vl, int mode, u64 data)
2026{
2027 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2028
2029 return dd->cce_err_status_cnt[10];
2030}
2031
2032static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2033 const struct cntr_entry *entry,
2034 void *context, int vl, int mode, u64 data)
2035{
2036 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2037
2038 return dd->cce_err_status_cnt[9];
2039}
2040
2041static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2042 const struct cntr_entry *entry,
2043 void *context, int vl, int mode, u64 data)
2044{
2045 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2046
2047 return dd->cce_err_status_cnt[8];
2048}
2049
2050static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2051 void *context, int vl,
2052 int mode, u64 data)
2053{
2054 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2055
2056 return dd->cce_err_status_cnt[7];
2057}
2058
2059static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2060 const struct cntr_entry *entry,
2061 void *context, int vl, int mode, u64 data)
2062{
2063 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2064
2065 return dd->cce_err_status_cnt[6];
2066}
2067
2068static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2069 void *context, int vl, int mode,
2070 u64 data)
2071{
2072 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2073
2074 return dd->cce_err_status_cnt[5];
2075}
2076
2077static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2078 void *context, int vl, int mode,
2079 u64 data)
2080{
2081 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2082
2083 return dd->cce_err_status_cnt[4];
2084}
2085
2086static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2087 const struct cntr_entry *entry,
2088 void *context, int vl, int mode, u64 data)
2089{
2090 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2091
2092 return dd->cce_err_status_cnt[3];
2093}
2094
2095static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2096 void *context, int vl,
2097 int mode, u64 data)
2098{
2099 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2100
2101 return dd->cce_err_status_cnt[2];
2102}
2103
2104static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2105 void *context, int vl,
2106 int mode, u64 data)
2107{
2108 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2109
2110 return dd->cce_err_status_cnt[1];
2111}
2112
2113static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2114 void *context, int vl, int mode,
2115 u64 data)
2116{
2117 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2118
2119 return dd->cce_err_status_cnt[0];
2120}
2121
2122/*
2123 * Software counters corresponding to each of the
2124 * error status bits within RcvErrStatus
2125 */
2126static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2127 void *context, int vl, int mode,
2128 u64 data)
2129{
2130 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2131
2132 return dd->rcv_err_status_cnt[63];
2133}
2134
2135static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2136 void *context, int vl,
2137 int mode, u64 data)
2138{
2139 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2140
2141 return dd->rcv_err_status_cnt[62];
2142}
2143
2144static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2145 void *context, int vl, int mode,
2146 u64 data)
2147{
2148 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2149
2150 return dd->rcv_err_status_cnt[61];
2151}
2152
2153static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2154 void *context, int vl, int mode,
2155 u64 data)
2156{
2157 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2158
2159 return dd->rcv_err_status_cnt[60];
2160}
2161
2162static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2163 void *context, int vl,
2164 int mode, u64 data)
2165{
2166 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2167
2168 return dd->rcv_err_status_cnt[59];
2169}
2170
2171static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2172 void *context, int vl,
2173 int mode, u64 data)
2174{
2175 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2176
2177 return dd->rcv_err_status_cnt[58];
2178}
2179
2180static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2181 void *context, int vl, int mode,
2182 u64 data)
2183{
2184 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2185
2186 return dd->rcv_err_status_cnt[57];
2187}
2188
2189static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2190 void *context, int vl, int mode,
2191 u64 data)
2192{
2193 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2194
2195 return dd->rcv_err_status_cnt[56];
2196}
2197
2198static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2199 void *context, int vl, int mode,
2200 u64 data)
2201{
2202 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2203
2204 return dd->rcv_err_status_cnt[55];
2205}
2206
2207static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2208 const struct cntr_entry *entry,
2209 void *context, int vl, int mode, u64 data)
2210{
2211 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2212
2213 return dd->rcv_err_status_cnt[54];
2214}
2215
2216static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2217 const struct cntr_entry *entry,
2218 void *context, int vl, int mode, u64 data)
2219{
2220 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2221
2222 return dd->rcv_err_status_cnt[53];
2223}
2224
2225static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2226 void *context, int vl,
2227 int mode, u64 data)
2228{
2229 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2230
2231 return dd->rcv_err_status_cnt[52];
2232}
2233
2234static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2235 void *context, int vl,
2236 int mode, u64 data)
2237{
2238 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2239
2240 return dd->rcv_err_status_cnt[51];
2241}
2242
2243static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2244 void *context, int vl,
2245 int mode, u64 data)
2246{
2247 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2248
2249 return dd->rcv_err_status_cnt[50];
2250}
2251
2252static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2253 void *context, int vl,
2254 int mode, u64 data)
2255{
2256 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2257
2258 return dd->rcv_err_status_cnt[49];
2259}
2260
2261static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2262 void *context, int vl,
2263 int mode, u64 data)
2264{
2265 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2266
2267 return dd->rcv_err_status_cnt[48];
2268}
2269
2270static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2271 void *context, int vl,
2272 int mode, u64 data)
2273{
2274 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2275
2276 return dd->rcv_err_status_cnt[47];
2277}
2278
2279static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2280 void *context, int vl, int mode,
2281 u64 data)
2282{
2283 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2284
2285 return dd->rcv_err_status_cnt[46];
2286}
2287
2288static u64 access_rx_hq_intr_csr_parity_err_cnt(
2289 const struct cntr_entry *entry,
2290 void *context, int vl, int mode, u64 data)
2291{
2292 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2293
2294 return dd->rcv_err_status_cnt[45];
2295}
2296
2297static u64 access_rx_lookup_csr_parity_err_cnt(
2298 const struct cntr_entry *entry,
2299 void *context, int vl, int mode, u64 data)
2300{
2301 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2302
2303 return dd->rcv_err_status_cnt[44];
2304}
2305
2306static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2307 const struct cntr_entry *entry,
2308 void *context, int vl, int mode, u64 data)
2309{
2310 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2311
2312 return dd->rcv_err_status_cnt[43];
2313}
2314
2315static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2316 const struct cntr_entry *entry,
2317 void *context, int vl, int mode, u64 data)
2318{
2319 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2320
2321 return dd->rcv_err_status_cnt[42];
2322}
2323
2324static u64 access_rx_lookup_des_part2_parity_err_cnt(
2325 const struct cntr_entry *entry,
2326 void *context, int vl, int mode, u64 data)
2327{
2328 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2329
2330 return dd->rcv_err_status_cnt[41];
2331}
2332
2333static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2334 const struct cntr_entry *entry,
2335 void *context, int vl, int mode, u64 data)
2336{
2337 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2338
2339 return dd->rcv_err_status_cnt[40];
2340}
2341
2342static u64 access_rx_lookup_des_part1_unc_err_cnt(
2343 const struct cntr_entry *entry,
2344 void *context, int vl, int mode, u64 data)
2345{
2346 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2347
2348 return dd->rcv_err_status_cnt[39];
2349}
2350
2351static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2352 const struct cntr_entry *entry,
2353 void *context, int vl, int mode, u64 data)
2354{
2355 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2356
2357 return dd->rcv_err_status_cnt[38];
2358}
2359
2360static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2361 const struct cntr_entry *entry,
2362 void *context, int vl, int mode, u64 data)
2363{
2364 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2365
2366 return dd->rcv_err_status_cnt[37];
2367}
2368
2369static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2370 const struct cntr_entry *entry,
2371 void *context, int vl, int mode, u64 data)
2372{
2373 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2374
2375 return dd->rcv_err_status_cnt[36];
2376}
2377
2378static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2379 const struct cntr_entry *entry,
2380 void *context, int vl, int mode, u64 data)
2381{
2382 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2383
2384 return dd->rcv_err_status_cnt[35];
2385}
2386
2387static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2388 const struct cntr_entry *entry,
2389 void *context, int vl, int mode, u64 data)
2390{
2391 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2392
2393 return dd->rcv_err_status_cnt[34];
2394}
2395
2396static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2397 const struct cntr_entry *entry,
2398 void *context, int vl, int mode, u64 data)
2399{
2400 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2401
2402 return dd->rcv_err_status_cnt[33];
2403}
2404
2405static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2406 void *context, int vl, int mode,
2407 u64 data)
2408{
2409 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2410
2411 return dd->rcv_err_status_cnt[32];
2412}
2413
2414static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2415 void *context, int vl, int mode,
2416 u64 data)
2417{
2418 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2419
2420 return dd->rcv_err_status_cnt[31];
2421}
2422
2423static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2424 void *context, int vl, int mode,
2425 u64 data)
2426{
2427 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2428
2429 return dd->rcv_err_status_cnt[30];
2430}
2431
2432static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2433 void *context, int vl, int mode,
2434 u64 data)
2435{
2436 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2437
2438 return dd->rcv_err_status_cnt[29];
2439}
2440
2441static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2442 void *context, int vl,
2443 int mode, u64 data)
2444{
2445 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2446
2447 return dd->rcv_err_status_cnt[28];
2448}
2449
2450static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2451 const struct cntr_entry *entry,
2452 void *context, int vl, int mode, u64 data)
2453{
2454 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2455
2456 return dd->rcv_err_status_cnt[27];
2457}
2458
2459static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2460 const struct cntr_entry *entry,
2461 void *context, int vl, int mode, u64 data)
2462{
2463 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2464
2465 return dd->rcv_err_status_cnt[26];
2466}
2467
2468static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2469 const struct cntr_entry *entry,
2470 void *context, int vl, int mode, u64 data)
2471{
2472 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2473
2474 return dd->rcv_err_status_cnt[25];
2475}
2476
2477static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2478 const struct cntr_entry *entry,
2479 void *context, int vl, int mode, u64 data)
2480{
2481 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2482
2483 return dd->rcv_err_status_cnt[24];
2484}
2485
2486static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2487 const struct cntr_entry *entry,
2488 void *context, int vl, int mode, u64 data)
2489{
2490 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2491
2492 return dd->rcv_err_status_cnt[23];
2493}
2494
2495static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2496 const struct cntr_entry *entry,
2497 void *context, int vl, int mode, u64 data)
2498{
2499 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2500
2501 return dd->rcv_err_status_cnt[22];
2502}
2503
2504static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2505 const struct cntr_entry *entry,
2506 void *context, int vl, int mode, u64 data)
2507{
2508 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2509
2510 return dd->rcv_err_status_cnt[21];
2511}
2512
2513static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2514 const struct cntr_entry *entry,
2515 void *context, int vl, int mode, u64 data)
2516{
2517 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2518
2519 return dd->rcv_err_status_cnt[20];
2520}
2521
2522static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2523 const struct cntr_entry *entry,
2524 void *context, int vl, int mode, u64 data)
2525{
2526 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2527
2528 return dd->rcv_err_status_cnt[19];
2529}
2530
2531static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2532 void *context, int vl,
2533 int mode, u64 data)
2534{
2535 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2536
2537 return dd->rcv_err_status_cnt[18];
2538}
2539
2540static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2541 void *context, int vl,
2542 int mode, u64 data)
2543{
2544 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2545
2546 return dd->rcv_err_status_cnt[17];
2547}
2548
2549static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2550 const struct cntr_entry *entry,
2551 void *context, int vl, int mode, u64 data)
2552{
2553 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2554
2555 return dd->rcv_err_status_cnt[16];
2556}
2557
2558static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2559 const struct cntr_entry *entry,
2560 void *context, int vl, int mode, u64 data)
2561{
2562 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2563
2564 return dd->rcv_err_status_cnt[15];
2565}
2566
2567static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2568 void *context, int vl,
2569 int mode, u64 data)
2570{
2571 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2572
2573 return dd->rcv_err_status_cnt[14];
2574}
2575
2576static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2577 void *context, int vl,
2578 int mode, u64 data)
2579{
2580 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2581
2582 return dd->rcv_err_status_cnt[13];
2583}
2584
2585static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2586 void *context, int vl, int mode,
2587 u64 data)
2588{
2589 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2590
2591 return dd->rcv_err_status_cnt[12];
2592}
2593
2594static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2595 void *context, int vl, int mode,
2596 u64 data)
2597{
2598 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2599
2600 return dd->rcv_err_status_cnt[11];
2601}
2602
2603static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2604 void *context, int vl, int mode,
2605 u64 data)
2606{
2607 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2608
2609 return dd->rcv_err_status_cnt[10];
2610}
2611
2612static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2613 void *context, int vl, int mode,
2614 u64 data)
2615{
2616 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2617
2618 return dd->rcv_err_status_cnt[9];
2619}
2620
2621static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2622 void *context, int vl, int mode,
2623 u64 data)
2624{
2625 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2626
2627 return dd->rcv_err_status_cnt[8];
2628}
2629
2630static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2631 const struct cntr_entry *entry,
2632 void *context, int vl, int mode, u64 data)
2633{
2634 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2635
2636 return dd->rcv_err_status_cnt[7];
2637}
2638
2639static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2640 const struct cntr_entry *entry,
2641 void *context, int vl, int mode, u64 data)
2642{
2643 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2644
2645 return dd->rcv_err_status_cnt[6];
2646}
2647
2648static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2649 void *context, int vl, int mode,
2650 u64 data)
2651{
2652 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2653
2654 return dd->rcv_err_status_cnt[5];
2655}
2656
2657static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2658 void *context, int vl, int mode,
2659 u64 data)
2660{
2661 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2662
2663 return dd->rcv_err_status_cnt[4];
2664}
2665
2666static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2667 void *context, int vl, int mode,
2668 u64 data)
2669{
2670 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2671
2672 return dd->rcv_err_status_cnt[3];
2673}
2674
2675static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2676 void *context, int vl, int mode,
2677 u64 data)
2678{
2679 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2680
2681 return dd->rcv_err_status_cnt[2];
2682}
2683
2684static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2685 void *context, int vl, int mode,
2686 u64 data)
2687{
2688 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2689
2690 return dd->rcv_err_status_cnt[1];
2691}
2692
2693static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2694 void *context, int vl, int mode,
2695 u64 data)
2696{
2697 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2698
2699 return dd->rcv_err_status_cnt[0];
2700}
2701
2702/*
2703 * Software counters corresponding to each of the
2704 * error status bits within SendPioErrStatus
2705 */
2706static u64 access_pio_pec_sop_head_parity_err_cnt(
2707 const struct cntr_entry *entry,
2708 void *context, int vl, int mode, u64 data)
2709{
2710 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2711
2712 return dd->send_pio_err_status_cnt[35];
2713}
2714
2715static u64 access_pio_pcc_sop_head_parity_err_cnt(
2716 const struct cntr_entry *entry,
2717 void *context, int vl, int mode, u64 data)
2718{
2719 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2720
2721 return dd->send_pio_err_status_cnt[34];
2722}
2723
2724static u64 access_pio_last_returned_cnt_parity_err_cnt(
2725 const struct cntr_entry *entry,
2726 void *context, int vl, int mode, u64 data)
2727{
2728 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2729
2730 return dd->send_pio_err_status_cnt[33];
2731}
2732
2733static u64 access_pio_current_free_cnt_parity_err_cnt(
2734 const struct cntr_entry *entry,
2735 void *context, int vl, int mode, u64 data)
2736{
2737 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2738
2739 return dd->send_pio_err_status_cnt[32];
2740}
2741
2742static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2743 void *context, int vl, int mode,
2744 u64 data)
2745{
2746 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2747
2748 return dd->send_pio_err_status_cnt[31];
2749}
2750
2751static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2752 void *context, int vl, int mode,
2753 u64 data)
2754{
2755 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2756
2757 return dd->send_pio_err_status_cnt[30];
2758}
2759
2760static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2761 void *context, int vl, int mode,
2762 u64 data)
2763{
2764 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2765
2766 return dd->send_pio_err_status_cnt[29];
2767}
2768
2769static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2770 const struct cntr_entry *entry,
2771 void *context, int vl, int mode, u64 data)
2772{
2773 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2774
2775 return dd->send_pio_err_status_cnt[28];
2776}
2777
2778static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2779 void *context, int vl, int mode,
2780 u64 data)
2781{
2782 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2783
2784 return dd->send_pio_err_status_cnt[27];
2785}
2786
2787static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2788 void *context, int vl, int mode,
2789 u64 data)
2790{
2791 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2792
2793 return dd->send_pio_err_status_cnt[26];
2794}
2795
2796static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2797 void *context, int vl,
2798 int mode, u64 data)
2799{
2800 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2801
2802 return dd->send_pio_err_status_cnt[25];
2803}
2804
2805static u64 access_pio_block_qw_count_parity_err_cnt(
2806 const struct cntr_entry *entry,
2807 void *context, int vl, int mode, u64 data)
2808{
2809 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2810
2811 return dd->send_pio_err_status_cnt[24];
2812}
2813
2814static u64 access_pio_write_qw_valid_parity_err_cnt(
2815 const struct cntr_entry *entry,
2816 void *context, int vl, int mode, u64 data)
2817{
2818 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2819
2820 return dd->send_pio_err_status_cnt[23];
2821}
2822
2823static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2824 void *context, int vl, int mode,
2825 u64 data)
2826{
2827 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2828
2829 return dd->send_pio_err_status_cnt[22];
2830}
2831
2832static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2833 void *context, int vl,
2834 int mode, u64 data)
2835{
2836 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2837
2838 return dd->send_pio_err_status_cnt[21];
2839}
2840
2841static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2842 void *context, int vl,
2843 int mode, u64 data)
2844{
2845 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2846
2847 return dd->send_pio_err_status_cnt[20];
2848}
2849
2850static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2851 void *context, int vl,
2852 int mode, u64 data)
2853{
2854 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2855
2856 return dd->send_pio_err_status_cnt[19];
2857}
2858
2859static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2860 const struct cntr_entry *entry,
2861 void *context, int vl, int mode, u64 data)
2862{
2863 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2864
2865 return dd->send_pio_err_status_cnt[18];
2866}
2867
2868static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2869 void *context, int vl, int mode,
2870 u64 data)
2871{
2872 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2873
2874 return dd->send_pio_err_status_cnt[17];
2875}
2876
2877static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2878 void *context, int vl, int mode,
2879 u64 data)
2880{
2881 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2882
2883 return dd->send_pio_err_status_cnt[16];
2884}
2885
2886static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2887 const struct cntr_entry *entry,
2888 void *context, int vl, int mode, u64 data)
2889{
2890 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2891
2892 return dd->send_pio_err_status_cnt[15];
2893}
2894
2895static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2896 const struct cntr_entry *entry,
2897 void *context, int vl, int mode, u64 data)
2898{
2899 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2900
2901 return dd->send_pio_err_status_cnt[14];
2902}
2903
2904static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2905 const struct cntr_entry *entry,
2906 void *context, int vl, int mode, u64 data)
2907{
2908 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2909
2910 return dd->send_pio_err_status_cnt[13];
2911}
2912
2913static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2914 const struct cntr_entry *entry,
2915 void *context, int vl, int mode, u64 data)
2916{
2917 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2918
2919 return dd->send_pio_err_status_cnt[12];
2920}
2921
2922static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2923 const struct cntr_entry *entry,
2924 void *context, int vl, int mode, u64 data)
2925{
2926 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2927
2928 return dd->send_pio_err_status_cnt[11];
2929}
2930
2931static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2932 const struct cntr_entry *entry,
2933 void *context, int vl, int mode, u64 data)
2934{
2935 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2936
2937 return dd->send_pio_err_status_cnt[10];
2938}
2939
2940static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2941 const struct cntr_entry *entry,
2942 void *context, int vl, int mode, u64 data)
2943{
2944 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2945
2946 return dd->send_pio_err_status_cnt[9];
2947}
2948
2949static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2950 const struct cntr_entry *entry,
2951 void *context, int vl, int mode, u64 data)
2952{
2953 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2954
2955 return dd->send_pio_err_status_cnt[8];
2956}
2957
2958static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2959 const struct cntr_entry *entry,
2960 void *context, int vl, int mode, u64 data)
2961{
2962 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2963
2964 return dd->send_pio_err_status_cnt[7];
2965}
2966
2967static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2968 void *context, int vl, int mode,
2969 u64 data)
2970{
2971 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2972
2973 return dd->send_pio_err_status_cnt[6];
2974}
2975
2976static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2977 void *context, int vl, int mode,
2978 u64 data)
2979{
2980 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2981
2982 return dd->send_pio_err_status_cnt[5];
2983}
2984
2985static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2986 void *context, int vl, int mode,
2987 u64 data)
2988{
2989 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2990
2991 return dd->send_pio_err_status_cnt[4];
2992}
2993
2994static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
2995 void *context, int vl, int mode,
2996 u64 data)
2997{
2998 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2999
3000 return dd->send_pio_err_status_cnt[3];
3001}
3002
3003static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3004 void *context, int vl, int mode,
3005 u64 data)
3006{
3007 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3008
3009 return dd->send_pio_err_status_cnt[2];
3010}
3011
3012static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3013 void *context, int vl,
3014 int mode, u64 data)
3015{
3016 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3017
3018 return dd->send_pio_err_status_cnt[1];
3019}
3020
3021static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3022 void *context, int vl, int mode,
3023 u64 data)
3024{
3025 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3026
3027 return dd->send_pio_err_status_cnt[0];
3028}
3029
3030/*
3031 * Software counters corresponding to each of the
3032 * error status bits within SendDmaErrStatus
3033 */
3034static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3035 const struct cntr_entry *entry,
3036 void *context, int vl, int mode, u64 data)
3037{
3038 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3039
3040 return dd->send_dma_err_status_cnt[3];
3041}
3042
3043static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3044 const struct cntr_entry *entry,
3045 void *context, int vl, int mode, u64 data)
3046{
3047 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3048
3049 return dd->send_dma_err_status_cnt[2];
3050}
3051
3052static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3053 void *context, int vl, int mode,
3054 u64 data)
3055{
3056 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3057
3058 return dd->send_dma_err_status_cnt[1];
3059}
3060
3061static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3062 void *context, int vl, int mode,
3063 u64 data)
3064{
3065 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3066
3067 return dd->send_dma_err_status_cnt[0];
3068}
3069
3070/*
3071 * Software counters corresponding to each of the
3072 * error status bits within SendEgressErrStatus
3073 */
3074static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3075 const struct cntr_entry *entry,
3076 void *context, int vl, int mode, u64 data)
3077{
3078 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3079
3080 return dd->send_egress_err_status_cnt[63];
3081}
3082
3083static u64 access_tx_read_sdma_memory_csr_err_cnt(
3084 const struct cntr_entry *entry,
3085 void *context, int vl, int mode, u64 data)
3086{
3087 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3088
3089 return dd->send_egress_err_status_cnt[62];
3090}
3091
3092static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3093 void *context, int vl, int mode,
3094 u64 data)
3095{
3096 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3097
3098 return dd->send_egress_err_status_cnt[61];
3099}
3100
3101static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3102 void *context, int vl,
3103 int mode, u64 data)
3104{
3105 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3106
3107 return dd->send_egress_err_status_cnt[60];
3108}
3109
3110static u64 access_tx_read_sdma_memory_cor_err_cnt(
3111 const struct cntr_entry *entry,
3112 void *context, int vl, int mode, u64 data)
3113{
3114 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3115
3116 return dd->send_egress_err_status_cnt[59];
3117}
3118
3119static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3120 void *context, int vl, int mode,
3121 u64 data)
3122{
3123 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3124
3125 return dd->send_egress_err_status_cnt[58];
3126}
3127
3128static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3129 void *context, int vl, int mode,
3130 u64 data)
3131{
3132 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3133
3134 return dd->send_egress_err_status_cnt[57];
3135}
3136
3137static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3138 void *context, int vl, int mode,
3139 u64 data)
3140{
3141 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3142
3143 return dd->send_egress_err_status_cnt[56];
3144}
3145
3146static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3147 void *context, int vl, int mode,
3148 u64 data)
3149{
3150 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3151
3152 return dd->send_egress_err_status_cnt[55];
3153}
3154
3155static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3156 void *context, int vl, int mode,
3157 u64 data)
3158{
3159 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3160
3161 return dd->send_egress_err_status_cnt[54];
3162}
3163
3164static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3165 void *context, int vl, int mode,
3166 u64 data)
3167{
3168 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3169
3170 return dd->send_egress_err_status_cnt[53];
3171}
3172
3173static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3174 void *context, int vl, int mode,
3175 u64 data)
3176{
3177 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3178
3179 return dd->send_egress_err_status_cnt[52];
3180}
3181
3182static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3183 void *context, int vl, int mode,
3184 u64 data)
3185{
3186 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3187
3188 return dd->send_egress_err_status_cnt[51];
3189}
3190
3191static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3192 void *context, int vl, int mode,
3193 u64 data)
3194{
3195 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3196
3197 return dd->send_egress_err_status_cnt[50];
3198}
3199
3200static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3201 void *context, int vl, int mode,
3202 u64 data)
3203{
3204 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3205
3206 return dd->send_egress_err_status_cnt[49];
3207}
3208
3209static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3210 void *context, int vl, int mode,
3211 u64 data)
3212{
3213 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3214
3215 return dd->send_egress_err_status_cnt[48];
3216}
3217
3218static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3219 void *context, int vl, int mode,
3220 u64 data)
3221{
3222 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3223
3224 return dd->send_egress_err_status_cnt[47];
3225}
3226
3227static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3228 void *context, int vl, int mode,
3229 u64 data)
3230{
3231 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3232
3233 return dd->send_egress_err_status_cnt[46];
3234}
3235
3236static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3237 void *context, int vl, int mode,
3238 u64 data)
3239{
3240 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3241
3242 return dd->send_egress_err_status_cnt[45];
3243}
3244
3245static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3246 void *context, int vl,
3247 int mode, u64 data)
3248{
3249 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3250
3251 return dd->send_egress_err_status_cnt[44];
3252}
3253
3254static u64 access_tx_read_sdma_memory_unc_err_cnt(
3255 const struct cntr_entry *entry,
3256 void *context, int vl, int mode, u64 data)
3257{
3258 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3259
3260 return dd->send_egress_err_status_cnt[43];
3261}
3262
3263static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3264 void *context, int vl, int mode,
3265 u64 data)
3266{
3267 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3268
3269 return dd->send_egress_err_status_cnt[42];
3270}
3271
3272static u64 access_tx_credit_return_partiy_err_cnt(
3273 const struct cntr_entry *entry,
3274 void *context, int vl, int mode, u64 data)
3275{
3276 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3277
3278 return dd->send_egress_err_status_cnt[41];
3279}
3280
3281static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3282 const struct cntr_entry *entry,
3283 void *context, int vl, int mode, u64 data)
3284{
3285 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3286
3287 return dd->send_egress_err_status_cnt[40];
3288}
3289
3290static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3291 const struct cntr_entry *entry,
3292 void *context, int vl, int mode, u64 data)
3293{
3294 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3295
3296 return dd->send_egress_err_status_cnt[39];
3297}
3298
3299static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3300 const struct cntr_entry *entry,
3301 void *context, int vl, int mode, u64 data)
3302{
3303 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3304
3305 return dd->send_egress_err_status_cnt[38];
3306}
3307
3308static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3309 const struct cntr_entry *entry,
3310 void *context, int vl, int mode, u64 data)
3311{
3312 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3313
3314 return dd->send_egress_err_status_cnt[37];
3315}
3316
3317static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3318 const struct cntr_entry *entry,
3319 void *context, int vl, int mode, u64 data)
3320{
3321 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3322
3323 return dd->send_egress_err_status_cnt[36];
3324}
3325
3326static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3327 const struct cntr_entry *entry,
3328 void *context, int vl, int mode, u64 data)
3329{
3330 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3331
3332 return dd->send_egress_err_status_cnt[35];
3333}
3334
3335static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3336 const struct cntr_entry *entry,
3337 void *context, int vl, int mode, u64 data)
3338{
3339 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3340
3341 return dd->send_egress_err_status_cnt[34];
3342}
3343
3344static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3345 const struct cntr_entry *entry,
3346 void *context, int vl, int mode, u64 data)
3347{
3348 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3349
3350 return dd->send_egress_err_status_cnt[33];
3351}
3352
3353static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3354 const struct cntr_entry *entry,
3355 void *context, int vl, int mode, u64 data)
3356{
3357 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3358
3359 return dd->send_egress_err_status_cnt[32];
3360}
3361
3362static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3363 const struct cntr_entry *entry,
3364 void *context, int vl, int mode, u64 data)
3365{
3366 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3367
3368 return dd->send_egress_err_status_cnt[31];
3369}
3370
3371static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3372 const struct cntr_entry *entry,
3373 void *context, int vl, int mode, u64 data)
3374{
3375 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3376
3377 return dd->send_egress_err_status_cnt[30];
3378}
3379
3380static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3381 const struct cntr_entry *entry,
3382 void *context, int vl, int mode, u64 data)
3383{
3384 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3385
3386 return dd->send_egress_err_status_cnt[29];
3387}
3388
3389static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3390 const struct cntr_entry *entry,
3391 void *context, int vl, int mode, u64 data)
3392{
3393 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3394
3395 return dd->send_egress_err_status_cnt[28];
3396}
3397
3398static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3399 const struct cntr_entry *entry,
3400 void *context, int vl, int mode, u64 data)
3401{
3402 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3403
3404 return dd->send_egress_err_status_cnt[27];
3405}
3406
3407static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3408 const struct cntr_entry *entry,
3409 void *context, int vl, int mode, u64 data)
3410{
3411 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3412
3413 return dd->send_egress_err_status_cnt[26];
3414}
3415
3416static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3417 const struct cntr_entry *entry,
3418 void *context, int vl, int mode, u64 data)
3419{
3420 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3421
3422 return dd->send_egress_err_status_cnt[25];
3423}
3424
3425static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3426 const struct cntr_entry *entry,
3427 void *context, int vl, int mode, u64 data)
3428{
3429 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3430
3431 return dd->send_egress_err_status_cnt[24];
3432}
3433
3434static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3435 const struct cntr_entry *entry,
3436 void *context, int vl, int mode, u64 data)
3437{
3438 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3439
3440 return dd->send_egress_err_status_cnt[23];
3441}
3442
3443static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3444 const struct cntr_entry *entry,
3445 void *context, int vl, int mode, u64 data)
3446{
3447 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3448
3449 return dd->send_egress_err_status_cnt[22];
3450}
3451
3452static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3453 const struct cntr_entry *entry,
3454 void *context, int vl, int mode, u64 data)
3455{
3456 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3457
3458 return dd->send_egress_err_status_cnt[21];
3459}
3460
3461static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3462 const struct cntr_entry *entry,
3463 void *context, int vl, int mode, u64 data)
3464{
3465 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3466
3467 return dd->send_egress_err_status_cnt[20];
3468}
3469
3470static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3471 const struct cntr_entry *entry,
3472 void *context, int vl, int mode, u64 data)
3473{
3474 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3475
3476 return dd->send_egress_err_status_cnt[19];
3477}
3478
3479static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3480 const struct cntr_entry *entry,
3481 void *context, int vl, int mode, u64 data)
3482{
3483 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3484
3485 return dd->send_egress_err_status_cnt[18];
3486}
3487
3488static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3489 const struct cntr_entry *entry,
3490 void *context, int vl, int mode, u64 data)
3491{
3492 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3493
3494 return dd->send_egress_err_status_cnt[17];
3495}
3496
3497static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3498 const struct cntr_entry *entry,
3499 void *context, int vl, int mode, u64 data)
3500{
3501 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3502
3503 return dd->send_egress_err_status_cnt[16];
3504}
3505
3506static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3507 void *context, int vl, int mode,
3508 u64 data)
3509{
3510 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3511
3512 return dd->send_egress_err_status_cnt[15];
3513}
3514
3515static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3516 void *context, int vl,
3517 int mode, u64 data)
3518{
3519 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3520
3521 return dd->send_egress_err_status_cnt[14];
3522}
3523
3524static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3525 void *context, int vl, int mode,
3526 u64 data)
3527{
3528 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3529
3530 return dd->send_egress_err_status_cnt[13];
3531}
3532
3533static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3534 void *context, int vl, int mode,
3535 u64 data)
3536{
3537 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3538
3539 return dd->send_egress_err_status_cnt[12];
3540}
3541
3542static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3543 const struct cntr_entry *entry,
3544 void *context, int vl, int mode, u64 data)
3545{
3546 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3547
3548 return dd->send_egress_err_status_cnt[11];
3549}
3550
3551static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3552 void *context, int vl, int mode,
3553 u64 data)
3554{
3555 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3556
3557 return dd->send_egress_err_status_cnt[10];
3558}
3559
3560static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3561 void *context, int vl, int mode,
3562 u64 data)
3563{
3564 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3565
3566 return dd->send_egress_err_status_cnt[9];
3567}
3568
3569static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3570 const struct cntr_entry *entry,
3571 void *context, int vl, int mode, u64 data)
3572{
3573 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3574
3575 return dd->send_egress_err_status_cnt[8];
3576}
3577
3578static u64 access_tx_pio_launch_intf_parity_err_cnt(
3579 const struct cntr_entry *entry,
3580 void *context, int vl, int mode, u64 data)
3581{
3582 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3583
3584 return dd->send_egress_err_status_cnt[7];
3585}
3586
3587static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3588 void *context, int vl, int mode,
3589 u64 data)
3590{
3591 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3592
3593 return dd->send_egress_err_status_cnt[6];
3594}
3595
3596static u64 access_tx_incorrect_link_state_err_cnt(
3597 const struct cntr_entry *entry,
3598 void *context, int vl, int mode, u64 data)
3599{
3600 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3601
3602 return dd->send_egress_err_status_cnt[5];
3603}
3604
3605static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3606 void *context, int vl, int mode,
3607 u64 data)
3608{
3609 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3610
3611 return dd->send_egress_err_status_cnt[4];
3612}
3613
3614static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3615 const struct cntr_entry *entry,
3616 void *context, int vl, int mode, u64 data)
3617{
3618 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3619
3620 return dd->send_egress_err_status_cnt[3];
3621}
3622
3623static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3624 void *context, int vl, int mode,
3625 u64 data)
3626{
3627 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3628
3629 return dd->send_egress_err_status_cnt[2];
3630}
3631
3632static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3633 const struct cntr_entry *entry,
3634 void *context, int vl, int mode, u64 data)
3635{
3636 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3637
3638 return dd->send_egress_err_status_cnt[1];
3639}
3640
3641static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3642 const struct cntr_entry *entry,
3643 void *context, int vl, int mode, u64 data)
3644{
3645 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3646
3647 return dd->send_egress_err_status_cnt[0];
3648}
3649
3650/*
3651 * Software counters corresponding to each of the
3652 * error status bits within SendErrStatus
3653 */
3654static u64 access_send_csr_write_bad_addr_err_cnt(
3655 const struct cntr_entry *entry,
3656 void *context, int vl, int mode, u64 data)
3657{
3658 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3659
3660 return dd->send_err_status_cnt[2];
3661}
3662
3663static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3664 void *context, int vl,
3665 int mode, u64 data)
3666{
3667 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3668
3669 return dd->send_err_status_cnt[1];
3670}
3671
3672static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3673 void *context, int vl, int mode,
3674 u64 data)
3675{
3676 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3677
3678 return dd->send_err_status_cnt[0];
3679}
3680
3681/*
3682 * Software counters corresponding to each of the
3683 * error status bits within SendCtxtErrStatus
3684 */
3685static u64 access_pio_write_out_of_bounds_err_cnt(
3686 const struct cntr_entry *entry,
3687 void *context, int vl, int mode, u64 data)
3688{
3689 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3690
3691 return dd->sw_ctxt_err_status_cnt[4];
3692}
3693
3694static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3695 void *context, int vl, int mode,
3696 u64 data)
3697{
3698 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3699
3700 return dd->sw_ctxt_err_status_cnt[3];
3701}
3702
3703static u64 access_pio_write_crosses_boundary_err_cnt(
3704 const struct cntr_entry *entry,
3705 void *context, int vl, int mode, u64 data)
3706{
3707 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3708
3709 return dd->sw_ctxt_err_status_cnt[2];
3710}
3711
3712static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3713 void *context, int vl,
3714 int mode, u64 data)
3715{
3716 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3717
3718 return dd->sw_ctxt_err_status_cnt[1];
3719}
3720
3721static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3722 void *context, int vl, int mode,
3723 u64 data)
3724{
3725 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3726
3727 return dd->sw_ctxt_err_status_cnt[0];
3728}
3729
3730/*
3731 * Software counters corresponding to each of the
3732 * error status bits within SendDmaEngErrStatus
3733 */
3734static u64 access_sdma_header_request_fifo_cor_err_cnt(
3735 const struct cntr_entry *entry,
3736 void *context, int vl, int mode, u64 data)
3737{
3738 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3739
3740 return dd->sw_send_dma_eng_err_status_cnt[23];
3741}
3742
3743static u64 access_sdma_header_storage_cor_err_cnt(
3744 const struct cntr_entry *entry,
3745 void *context, int vl, int mode, u64 data)
3746{
3747 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3748
3749 return dd->sw_send_dma_eng_err_status_cnt[22];
3750}
3751
3752static u64 access_sdma_packet_tracking_cor_err_cnt(
3753 const struct cntr_entry *entry,
3754 void *context, int vl, int mode, u64 data)
3755{
3756 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3757
3758 return dd->sw_send_dma_eng_err_status_cnt[21];
3759}
3760
3761static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3762 void *context, int vl, int mode,
3763 u64 data)
3764{
3765 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3766
3767 return dd->sw_send_dma_eng_err_status_cnt[20];
3768}
3769
3770static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3771 void *context, int vl, int mode,
3772 u64 data)
3773{
3774 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3775
3776 return dd->sw_send_dma_eng_err_status_cnt[19];
3777}
3778
3779static u64 access_sdma_header_request_fifo_unc_err_cnt(
3780 const struct cntr_entry *entry,
3781 void *context, int vl, int mode, u64 data)
3782{
3783 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3784
3785 return dd->sw_send_dma_eng_err_status_cnt[18];
3786}
3787
3788static u64 access_sdma_header_storage_unc_err_cnt(
3789 const struct cntr_entry *entry,
3790 void *context, int vl, int mode, u64 data)
3791{
3792 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3793
3794 return dd->sw_send_dma_eng_err_status_cnt[17];
3795}
3796
3797static u64 access_sdma_packet_tracking_unc_err_cnt(
3798 const struct cntr_entry *entry,
3799 void *context, int vl, int mode, u64 data)
3800{
3801 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3802
3803 return dd->sw_send_dma_eng_err_status_cnt[16];
3804}
3805
3806static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3807 void *context, int vl, int mode,
3808 u64 data)
3809{
3810 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3811
3812 return dd->sw_send_dma_eng_err_status_cnt[15];
3813}
3814
3815static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3816 void *context, int vl, int mode,
3817 u64 data)
3818{
3819 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3820
3821 return dd->sw_send_dma_eng_err_status_cnt[14];
3822}
3823
3824static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3825 void *context, int vl, int mode,
3826 u64 data)
3827{
3828 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3829
3830 return dd->sw_send_dma_eng_err_status_cnt[13];
3831}
3832
3833static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3834 void *context, int vl, int mode,
3835 u64 data)
3836{
3837 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3838
3839 return dd->sw_send_dma_eng_err_status_cnt[12];
3840}
3841
3842static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3843 void *context, int vl, int mode,
3844 u64 data)
3845{
3846 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3847
3848 return dd->sw_send_dma_eng_err_status_cnt[11];
3849}
3850
3851static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3852 void *context, int vl, int mode,
3853 u64 data)
3854{
3855 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3856
3857 return dd->sw_send_dma_eng_err_status_cnt[10];
3858}
3859
3860static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3861 void *context, int vl, int mode,
3862 u64 data)
3863{
3864 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3865
3866 return dd->sw_send_dma_eng_err_status_cnt[9];
3867}
3868
3869static u64 access_sdma_packet_desc_overflow_err_cnt(
3870 const struct cntr_entry *entry,
3871 void *context, int vl, int mode, u64 data)
3872{
3873 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3874
3875 return dd->sw_send_dma_eng_err_status_cnt[8];
3876}
3877
3878static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3879 void *context, int vl,
3880 int mode, u64 data)
3881{
3882 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3883
3884 return dd->sw_send_dma_eng_err_status_cnt[7];
3885}
3886
3887static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3888 void *context, int vl, int mode, u64 data)
3889{
3890 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3891
3892 return dd->sw_send_dma_eng_err_status_cnt[6];
3893}
3894
3895static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3896 void *context, int vl, int mode,
3897 u64 data)
3898{
3899 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3900
3901 return dd->sw_send_dma_eng_err_status_cnt[5];
3902}
3903
3904static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3905 void *context, int vl, int mode,
3906 u64 data)
3907{
3908 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3909
3910 return dd->sw_send_dma_eng_err_status_cnt[4];
3911}
3912
3913static u64 access_sdma_tail_out_of_bounds_err_cnt(
3914 const struct cntr_entry *entry,
3915 void *context, int vl, int mode, u64 data)
3916{
3917 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3918
3919 return dd->sw_send_dma_eng_err_status_cnt[3];
3920}
3921
3922static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3923 void *context, int vl, int mode,
3924 u64 data)
3925{
3926 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3927
3928 return dd->sw_send_dma_eng_err_status_cnt[2];
3929}
3930
3931static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3932 void *context, int vl, int mode,
3933 u64 data)
3934{
3935 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3936
3937 return dd->sw_send_dma_eng_err_status_cnt[1];
3938}
3939
3940static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3941 void *context, int vl, int mode,
3942 u64 data)
3943{
3944 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3945
3946 return dd->sw_send_dma_eng_err_status_cnt[0];
3947}
3948
77241056
MM
3949#define def_access_sw_cpu(cntr) \
3950static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3951 void *context, int vl, int mode, u64 data) \
3952{ \
3953 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4eb06882
DD
3954 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
3955 ppd->ibport_data.rvp.cntr, vl, \
77241056
MM
3956 mode, data); \
3957}
3958
3959def_access_sw_cpu(rc_acks);
3960def_access_sw_cpu(rc_qacks);
3961def_access_sw_cpu(rc_delayed_comp);
3962
3963#define def_access_ibp_counter(cntr) \
3964static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3965 void *context, int vl, int mode, u64 data) \
3966{ \
3967 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3968 \
3969 if (vl != CNTR_INVALID_VL) \
3970 return 0; \
3971 \
4eb06882 3972 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
77241056
MM
3973 mode, data); \
3974}
3975
3976def_access_ibp_counter(loop_pkts);
3977def_access_ibp_counter(rc_resends);
3978def_access_ibp_counter(rnr_naks);
3979def_access_ibp_counter(other_naks);
3980def_access_ibp_counter(rc_timeouts);
3981def_access_ibp_counter(pkt_drops);
3982def_access_ibp_counter(dmawait);
3983def_access_ibp_counter(rc_seqnak);
3984def_access_ibp_counter(rc_dupreq);
3985def_access_ibp_counter(rdma_seq);
3986def_access_ibp_counter(unaligned);
3987def_access_ibp_counter(seq_naks);
3988
3989static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
3990[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
3991[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
3992 CNTR_NORMAL),
3993[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
3994 CNTR_NORMAL),
3995[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
3996 RCV_TID_FLOW_GEN_MISMATCH_CNT,
3997 CNTR_NORMAL),
77241056
MM
3998[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
3999 CNTR_NORMAL),
4000[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4001 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4002[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4003 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4004[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4005 CNTR_NORMAL),
4006[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4007 CNTR_NORMAL),
4008[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4009 CNTR_NORMAL),
4010[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4011 CNTR_NORMAL),
4012[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4013 CNTR_NORMAL),
4014[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4015 CNTR_NORMAL),
4016[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4017 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4018[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4019 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4020[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4021 CNTR_SYNTH),
4022[C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
4023[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4024 CNTR_SYNTH),
4025[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4026 CNTR_SYNTH),
4027[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4028 CNTR_SYNTH),
4029[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4030 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4031[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4032 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4033 CNTR_SYNTH),
4034[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4035 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4036[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4037 CNTR_SYNTH),
4038[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4039 CNTR_SYNTH),
4040[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4041 CNTR_SYNTH),
4042[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4043 CNTR_SYNTH),
4044[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4045 CNTR_SYNTH),
4046[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4047 CNTR_SYNTH),
4048[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4049 CNTR_SYNTH),
4050[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4051 CNTR_SYNTH | CNTR_VL),
4052[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4053 CNTR_SYNTH | CNTR_VL),
4054[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4055[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4056 CNTR_SYNTH | CNTR_VL),
4057[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4058[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4059 CNTR_SYNTH | CNTR_VL),
4060[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4061 CNTR_SYNTH),
4062[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4063 CNTR_SYNTH | CNTR_VL),
4064[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4065 CNTR_SYNTH),
4066[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4067 CNTR_SYNTH | CNTR_VL),
4068[C_DC_TOTAL_CRC] =
4069 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4070 CNTR_SYNTH),
4071[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4072 CNTR_SYNTH),
4073[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4074 CNTR_SYNTH),
4075[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4076 CNTR_SYNTH),
4077[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4078 CNTR_SYNTH),
4079[C_DC_CRC_MULT_LN] =
4080 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4081 CNTR_SYNTH),
4082[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4083 CNTR_SYNTH),
4084[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4085 CNTR_SYNTH),
4086[C_DC_SEQ_CRC_CNT] =
4087 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4088 CNTR_SYNTH),
4089[C_DC_ESC0_ONLY_CNT] =
4090 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4091 CNTR_SYNTH),
4092[C_DC_ESC0_PLUS1_CNT] =
4093 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4094 CNTR_SYNTH),
4095[C_DC_ESC0_PLUS2_CNT] =
4096 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4097 CNTR_SYNTH),
4098[C_DC_REINIT_FROM_PEER_CNT] =
4099 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4100 CNTR_SYNTH),
4101[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4102 CNTR_SYNTH),
4103[C_DC_MISC_FLG_CNT] =
4104 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4105 CNTR_SYNTH),
4106[C_DC_PRF_GOOD_LTP_CNT] =
4107 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4108[C_DC_PRF_ACCEPTED_LTP_CNT] =
4109 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4110 CNTR_SYNTH),
4111[C_DC_PRF_RX_FLIT_CNT] =
4112 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4113[C_DC_PRF_TX_FLIT_CNT] =
4114 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4115[C_DC_PRF_CLK_CNTR] =
4116 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4117[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4118 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4119[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4120 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4121 CNTR_SYNTH),
4122[C_DC_PG_STS_TX_SBE_CNT] =
4123 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4124[C_DC_PG_STS_TX_MBE_CNT] =
4125 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4126 CNTR_SYNTH),
4127[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4128 access_sw_cpu_intr),
4129[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4130 access_sw_cpu_rcv_limit),
4131[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4132 access_sw_vtx_wait),
4133[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4134 access_sw_pio_wait),
14553ca1
MM
4135[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4136 access_sw_pio_drain),
77241056
MM
4137[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4138 access_sw_kmem_wait),
b421922e
DL
4139[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4140 access_sw_send_schedule),
a699c6c2
VM
4141[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4142 SEND_DMA_DESC_FETCHED_CNT, 0,
4143 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4144 dev_access_u32_csr),
4145[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4146 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4147 access_sde_int_cnt),
4148[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4149 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4150 access_sde_err_cnt),
4151[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4152 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4153 access_sde_idle_int_cnt),
4154[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4155 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4156 access_sde_progress_int_cnt),
2c5b521a
JR
4157/* MISC_ERR_STATUS */
4158[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4159 CNTR_NORMAL,
4160 access_misc_pll_lock_fail_err_cnt),
4161[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4162 CNTR_NORMAL,
4163 access_misc_mbist_fail_err_cnt),
4164[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4165 CNTR_NORMAL,
4166 access_misc_invalid_eep_cmd_err_cnt),
4167[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4168 CNTR_NORMAL,
4169 access_misc_efuse_done_parity_err_cnt),
4170[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4171 CNTR_NORMAL,
4172 access_misc_efuse_write_err_cnt),
4173[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4174 0, CNTR_NORMAL,
4175 access_misc_efuse_read_bad_addr_err_cnt),
4176[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4177 CNTR_NORMAL,
4178 access_misc_efuse_csr_parity_err_cnt),
4179[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4180 CNTR_NORMAL,
4181 access_misc_fw_auth_failed_err_cnt),
4182[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4183 CNTR_NORMAL,
4184 access_misc_key_mismatch_err_cnt),
4185[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4186 CNTR_NORMAL,
4187 access_misc_sbus_write_failed_err_cnt),
4188[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4189 CNTR_NORMAL,
4190 access_misc_csr_write_bad_addr_err_cnt),
4191[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4192 CNTR_NORMAL,
4193 access_misc_csr_read_bad_addr_err_cnt),
4194[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4195 CNTR_NORMAL,
4196 access_misc_csr_parity_err_cnt),
4197/* CceErrStatus */
4198[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4199 CNTR_NORMAL,
4200 access_sw_cce_err_status_aggregated_cnt),
4201[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4202 CNTR_NORMAL,
4203 access_cce_msix_csr_parity_err_cnt),
4204[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4205 CNTR_NORMAL,
4206 access_cce_int_map_unc_err_cnt),
4207[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4208 CNTR_NORMAL,
4209 access_cce_int_map_cor_err_cnt),
4210[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4211 CNTR_NORMAL,
4212 access_cce_msix_table_unc_err_cnt),
4213[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4214 CNTR_NORMAL,
4215 access_cce_msix_table_cor_err_cnt),
4216[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4217 0, CNTR_NORMAL,
4218 access_cce_rxdma_conv_fifo_parity_err_cnt),
4219[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4220 0, CNTR_NORMAL,
4221 access_cce_rcpl_async_fifo_parity_err_cnt),
4222[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4223 CNTR_NORMAL,
4224 access_cce_seg_write_bad_addr_err_cnt),
4225[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4226 CNTR_NORMAL,
4227 access_cce_seg_read_bad_addr_err_cnt),
4228[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4229 CNTR_NORMAL,
4230 access_la_triggered_cnt),
4231[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4232 CNTR_NORMAL,
4233 access_cce_trgt_cpl_timeout_err_cnt),
4234[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4235 CNTR_NORMAL,
4236 access_pcic_receive_parity_err_cnt),
4237[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4238 CNTR_NORMAL,
4239 access_pcic_transmit_back_parity_err_cnt),
4240[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4241 0, CNTR_NORMAL,
4242 access_pcic_transmit_front_parity_err_cnt),
4243[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4244 CNTR_NORMAL,
4245 access_pcic_cpl_dat_q_unc_err_cnt),
4246[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4247 CNTR_NORMAL,
4248 access_pcic_cpl_hd_q_unc_err_cnt),
4249[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4250 CNTR_NORMAL,
4251 access_pcic_post_dat_q_unc_err_cnt),
4252[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4253 CNTR_NORMAL,
4254 access_pcic_post_hd_q_unc_err_cnt),
4255[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4256 CNTR_NORMAL,
4257 access_pcic_retry_sot_mem_unc_err_cnt),
4258[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4259 CNTR_NORMAL,
4260 access_pcic_retry_mem_unc_err),
4261[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4262 CNTR_NORMAL,
4263 access_pcic_n_post_dat_q_parity_err_cnt),
4264[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4265 CNTR_NORMAL,
4266 access_pcic_n_post_h_q_parity_err_cnt),
4267[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4268 CNTR_NORMAL,
4269 access_pcic_cpl_dat_q_cor_err_cnt),
4270[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4271 CNTR_NORMAL,
4272 access_pcic_cpl_hd_q_cor_err_cnt),
4273[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4274 CNTR_NORMAL,
4275 access_pcic_post_dat_q_cor_err_cnt),
4276[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4277 CNTR_NORMAL,
4278 access_pcic_post_hd_q_cor_err_cnt),
4279[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4280 CNTR_NORMAL,
4281 access_pcic_retry_sot_mem_cor_err_cnt),
4282[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4283 CNTR_NORMAL,
4284 access_pcic_retry_mem_cor_err_cnt),
4285[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4286 "CceCli1AsyncFifoDbgParityError", 0, 0,
4287 CNTR_NORMAL,
4288 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4289[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4290 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4291 CNTR_NORMAL,
4292 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4293 ),
4294[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4295 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4296 CNTR_NORMAL,
4297 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4298[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4299 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4300 CNTR_NORMAL,
4301 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4302[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4303 0, CNTR_NORMAL,
4304 access_cce_cli2_async_fifo_parity_err_cnt),
4305[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4306 CNTR_NORMAL,
4307 access_cce_csr_cfg_bus_parity_err_cnt),
4308[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4309 0, CNTR_NORMAL,
4310 access_cce_cli0_async_fifo_parity_err_cnt),
4311[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4312 CNTR_NORMAL,
4313 access_cce_rspd_data_parity_err_cnt),
4314[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4315 CNTR_NORMAL,
4316 access_cce_trgt_access_err_cnt),
4317[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4318 0, CNTR_NORMAL,
4319 access_cce_trgt_async_fifo_parity_err_cnt),
4320[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4321 CNTR_NORMAL,
4322 access_cce_csr_write_bad_addr_err_cnt),
4323[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4324 CNTR_NORMAL,
4325 access_cce_csr_read_bad_addr_err_cnt),
4326[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4327 CNTR_NORMAL,
4328 access_ccs_csr_parity_err_cnt),
4329
4330/* RcvErrStatus */
4331[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4332 CNTR_NORMAL,
4333 access_rx_csr_parity_err_cnt),
4334[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4335 CNTR_NORMAL,
4336 access_rx_csr_write_bad_addr_err_cnt),
4337[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4338 CNTR_NORMAL,
4339 access_rx_csr_read_bad_addr_err_cnt),
4340[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4341 CNTR_NORMAL,
4342 access_rx_dma_csr_unc_err_cnt),
4343[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4344 CNTR_NORMAL,
4345 access_rx_dma_dq_fsm_encoding_err_cnt),
4346[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4347 CNTR_NORMAL,
4348 access_rx_dma_eq_fsm_encoding_err_cnt),
4349[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4350 CNTR_NORMAL,
4351 access_rx_dma_csr_parity_err_cnt),
4352[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4353 CNTR_NORMAL,
4354 access_rx_rbuf_data_cor_err_cnt),
4355[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4356 CNTR_NORMAL,
4357 access_rx_rbuf_data_unc_err_cnt),
4358[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4359 CNTR_NORMAL,
4360 access_rx_dma_data_fifo_rd_cor_err_cnt),
4361[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4362 CNTR_NORMAL,
4363 access_rx_dma_data_fifo_rd_unc_err_cnt),
4364[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4365 CNTR_NORMAL,
4366 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4367[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4368 CNTR_NORMAL,
4369 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4370[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4371 CNTR_NORMAL,
4372 access_rx_rbuf_desc_part2_cor_err_cnt),
4373[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4374 CNTR_NORMAL,
4375 access_rx_rbuf_desc_part2_unc_err_cnt),
4376[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4377 CNTR_NORMAL,
4378 access_rx_rbuf_desc_part1_cor_err_cnt),
4379[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4380 CNTR_NORMAL,
4381 access_rx_rbuf_desc_part1_unc_err_cnt),
4382[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4383 CNTR_NORMAL,
4384 access_rx_hq_intr_fsm_err_cnt),
4385[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4386 CNTR_NORMAL,
4387 access_rx_hq_intr_csr_parity_err_cnt),
4388[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4389 CNTR_NORMAL,
4390 access_rx_lookup_csr_parity_err_cnt),
4391[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4392 CNTR_NORMAL,
4393 access_rx_lookup_rcv_array_cor_err_cnt),
4394[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4395 CNTR_NORMAL,
4396 access_rx_lookup_rcv_array_unc_err_cnt),
4397[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4398 0, CNTR_NORMAL,
4399 access_rx_lookup_des_part2_parity_err_cnt),
4400[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4401 0, CNTR_NORMAL,
4402 access_rx_lookup_des_part1_unc_cor_err_cnt),
4403[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4404 CNTR_NORMAL,
4405 access_rx_lookup_des_part1_unc_err_cnt),
4406[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4407 CNTR_NORMAL,
4408 access_rx_rbuf_next_free_buf_cor_err_cnt),
4409[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4410 CNTR_NORMAL,
4411 access_rx_rbuf_next_free_buf_unc_err_cnt),
4412[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4413 "RxRbufFlInitWrAddrParityErr", 0, 0,
4414 CNTR_NORMAL,
4415 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4416[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4417 0, CNTR_NORMAL,
4418 access_rx_rbuf_fl_initdone_parity_err_cnt),
4419[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4420 0, CNTR_NORMAL,
4421 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4422[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4423 CNTR_NORMAL,
4424 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4425[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4426 CNTR_NORMAL,
4427 access_rx_rbuf_empty_err_cnt),
4428[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4429 CNTR_NORMAL,
4430 access_rx_rbuf_full_err_cnt),
4431[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4432 CNTR_NORMAL,
4433 access_rbuf_bad_lookup_err_cnt),
4434[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4435 CNTR_NORMAL,
4436 access_rbuf_ctx_id_parity_err_cnt),
4437[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4438 CNTR_NORMAL,
4439 access_rbuf_csr_qeopdw_parity_err_cnt),
4440[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4441 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4442 CNTR_NORMAL,
4443 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4444[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4445 "RxRbufCsrQTlPtrParityErr", 0, 0,
4446 CNTR_NORMAL,
4447 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4448[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4449 0, CNTR_NORMAL,
4450 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4451[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4452 0, CNTR_NORMAL,
4453 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4454[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4455 0, 0, CNTR_NORMAL,
4456 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4457[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4458 0, CNTR_NORMAL,
4459 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4460[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4461 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4462 CNTR_NORMAL,
4463 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4464[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4465 0, CNTR_NORMAL,
4466 access_rx_rbuf_block_list_read_cor_err_cnt),
4467[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4468 0, CNTR_NORMAL,
4469 access_rx_rbuf_block_list_read_unc_err_cnt),
4470[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4471 CNTR_NORMAL,
4472 access_rx_rbuf_lookup_des_cor_err_cnt),
4473[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4474 CNTR_NORMAL,
4475 access_rx_rbuf_lookup_des_unc_err_cnt),
4476[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4477 "RxRbufLookupDesRegUncCorErr", 0, 0,
4478 CNTR_NORMAL,
4479 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4480[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4481 CNTR_NORMAL,
4482 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4483[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4484 CNTR_NORMAL,
4485 access_rx_rbuf_free_list_cor_err_cnt),
4486[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4487 CNTR_NORMAL,
4488 access_rx_rbuf_free_list_unc_err_cnt),
4489[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4490 CNTR_NORMAL,
4491 access_rx_rcv_fsm_encoding_err_cnt),
4492[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4493 CNTR_NORMAL,
4494 access_rx_dma_flag_cor_err_cnt),
4495[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4496 CNTR_NORMAL,
4497 access_rx_dma_flag_unc_err_cnt),
4498[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4499 CNTR_NORMAL,
4500 access_rx_dc_sop_eop_parity_err_cnt),
4501[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4502 CNTR_NORMAL,
4503 access_rx_rcv_csr_parity_err_cnt),
4504[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4505 CNTR_NORMAL,
4506 access_rx_rcv_qp_map_table_cor_err_cnt),
4507[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4508 CNTR_NORMAL,
4509 access_rx_rcv_qp_map_table_unc_err_cnt),
4510[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4511 CNTR_NORMAL,
4512 access_rx_rcv_data_cor_err_cnt),
4513[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4514 CNTR_NORMAL,
4515 access_rx_rcv_data_unc_err_cnt),
4516[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4517 CNTR_NORMAL,
4518 access_rx_rcv_hdr_cor_err_cnt),
4519[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4520 CNTR_NORMAL,
4521 access_rx_rcv_hdr_unc_err_cnt),
4522[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4523 CNTR_NORMAL,
4524 access_rx_dc_intf_parity_err_cnt),
4525[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4526 CNTR_NORMAL,
4527 access_rx_dma_csr_cor_err_cnt),
4528/* SendPioErrStatus */
4529[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4530 CNTR_NORMAL,
4531 access_pio_pec_sop_head_parity_err_cnt),
4532[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4533 CNTR_NORMAL,
4534 access_pio_pcc_sop_head_parity_err_cnt),
4535[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4536 0, 0, CNTR_NORMAL,
4537 access_pio_last_returned_cnt_parity_err_cnt),
4538[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4539 0, CNTR_NORMAL,
4540 access_pio_current_free_cnt_parity_err_cnt),
4541[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4542 CNTR_NORMAL,
4543 access_pio_reserved_31_err_cnt),
4544[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4545 CNTR_NORMAL,
4546 access_pio_reserved_30_err_cnt),
4547[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4548 CNTR_NORMAL,
4549 access_pio_ppmc_sop_len_err_cnt),
4550[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4551 CNTR_NORMAL,
4552 access_pio_ppmc_bqc_mem_parity_err_cnt),
4553[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4554 CNTR_NORMAL,
4555 access_pio_vl_fifo_parity_err_cnt),
4556[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4557 CNTR_NORMAL,
4558 access_pio_vlf_sop_parity_err_cnt),
4559[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4560 CNTR_NORMAL,
4561 access_pio_vlf_v1_len_parity_err_cnt),
4562[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4563 CNTR_NORMAL,
4564 access_pio_block_qw_count_parity_err_cnt),
4565[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4566 CNTR_NORMAL,
4567 access_pio_write_qw_valid_parity_err_cnt),
4568[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4569 CNTR_NORMAL,
4570 access_pio_state_machine_err_cnt),
4571[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4572 CNTR_NORMAL,
4573 access_pio_write_data_parity_err_cnt),
4574[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4575 CNTR_NORMAL,
4576 access_pio_host_addr_mem_cor_err_cnt),
4577[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4578 CNTR_NORMAL,
4579 access_pio_host_addr_mem_unc_err_cnt),
4580[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4581 CNTR_NORMAL,
4582 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4583[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4584 CNTR_NORMAL,
4585 access_pio_init_sm_in_err_cnt),
4586[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4587 CNTR_NORMAL,
4588 access_pio_ppmc_pbl_fifo_err_cnt),
4589[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4590 0, CNTR_NORMAL,
4591 access_pio_credit_ret_fifo_parity_err_cnt),
4592[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4593 CNTR_NORMAL,
4594 access_pio_v1_len_mem_bank1_cor_err_cnt),
4595[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4596 CNTR_NORMAL,
4597 access_pio_v1_len_mem_bank0_cor_err_cnt),
4598[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4599 CNTR_NORMAL,
4600 access_pio_v1_len_mem_bank1_unc_err_cnt),
4601[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4602 CNTR_NORMAL,
4603 access_pio_v1_len_mem_bank0_unc_err_cnt),
4604[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4605 CNTR_NORMAL,
4606 access_pio_sm_pkt_reset_parity_err_cnt),
4607[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4608 CNTR_NORMAL,
4609 access_pio_pkt_evict_fifo_parity_err_cnt),
4610[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4611 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4612 CNTR_NORMAL,
4613 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4614[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4615 CNTR_NORMAL,
4616 access_pio_sbrdctl_crrel_parity_err_cnt),
4617[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4618 CNTR_NORMAL,
4619 access_pio_pec_fifo_parity_err_cnt),
4620[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4621 CNTR_NORMAL,
4622 access_pio_pcc_fifo_parity_err_cnt),
4623[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4624 CNTR_NORMAL,
4625 access_pio_sb_mem_fifo1_err_cnt),
4626[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4627 CNTR_NORMAL,
4628 access_pio_sb_mem_fifo0_err_cnt),
4629[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4630 CNTR_NORMAL,
4631 access_pio_csr_parity_err_cnt),
4632[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4633 CNTR_NORMAL,
4634 access_pio_write_addr_parity_err_cnt),
4635[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4636 CNTR_NORMAL,
4637 access_pio_write_bad_ctxt_err_cnt),
4638/* SendDmaErrStatus */
4639[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4640 0, CNTR_NORMAL,
4641 access_sdma_pcie_req_tracking_cor_err_cnt),
4642[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4643 0, CNTR_NORMAL,
4644 access_sdma_pcie_req_tracking_unc_err_cnt),
4645[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4646 CNTR_NORMAL,
4647 access_sdma_csr_parity_err_cnt),
4648[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4649 CNTR_NORMAL,
4650 access_sdma_rpy_tag_err_cnt),
4651/* SendEgressErrStatus */
4652[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4653 CNTR_NORMAL,
4654 access_tx_read_pio_memory_csr_unc_err_cnt),
4655[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4656 0, CNTR_NORMAL,
4657 access_tx_read_sdma_memory_csr_err_cnt),
4658[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4659 CNTR_NORMAL,
4660 access_tx_egress_fifo_cor_err_cnt),
4661[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4662 CNTR_NORMAL,
4663 access_tx_read_pio_memory_cor_err_cnt),
4664[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4665 CNTR_NORMAL,
4666 access_tx_read_sdma_memory_cor_err_cnt),
4667[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4668 CNTR_NORMAL,
4669 access_tx_sb_hdr_cor_err_cnt),
4670[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4671 CNTR_NORMAL,
4672 access_tx_credit_overrun_err_cnt),
4673[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4674 CNTR_NORMAL,
4675 access_tx_launch_fifo8_cor_err_cnt),
4676[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4677 CNTR_NORMAL,
4678 access_tx_launch_fifo7_cor_err_cnt),
4679[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4680 CNTR_NORMAL,
4681 access_tx_launch_fifo6_cor_err_cnt),
4682[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4683 CNTR_NORMAL,
4684 access_tx_launch_fifo5_cor_err_cnt),
4685[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4686 CNTR_NORMAL,
4687 access_tx_launch_fifo4_cor_err_cnt),
4688[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4689 CNTR_NORMAL,
4690 access_tx_launch_fifo3_cor_err_cnt),
4691[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4692 CNTR_NORMAL,
4693 access_tx_launch_fifo2_cor_err_cnt),
4694[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4695 CNTR_NORMAL,
4696 access_tx_launch_fifo1_cor_err_cnt),
4697[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4698 CNTR_NORMAL,
4699 access_tx_launch_fifo0_cor_err_cnt),
4700[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4701 CNTR_NORMAL,
4702 access_tx_credit_return_vl_err_cnt),
4703[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4704 CNTR_NORMAL,
4705 access_tx_hcrc_insertion_err_cnt),
4706[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4707 CNTR_NORMAL,
4708 access_tx_egress_fifo_unc_err_cnt),
4709[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4710 CNTR_NORMAL,
4711 access_tx_read_pio_memory_unc_err_cnt),
4712[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4713 CNTR_NORMAL,
4714 access_tx_read_sdma_memory_unc_err_cnt),
4715[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4716 CNTR_NORMAL,
4717 access_tx_sb_hdr_unc_err_cnt),
4718[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4719 CNTR_NORMAL,
4720 access_tx_credit_return_partiy_err_cnt),
4721[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4722 0, 0, CNTR_NORMAL,
4723 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4724[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4725 0, 0, CNTR_NORMAL,
4726 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4727[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4728 0, 0, CNTR_NORMAL,
4729 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4730[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4731 0, 0, CNTR_NORMAL,
4732 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4733[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4734 0, 0, CNTR_NORMAL,
4735 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4736[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4737 0, 0, CNTR_NORMAL,
4738 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4739[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4740 0, 0, CNTR_NORMAL,
4741 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4742[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4743 0, 0, CNTR_NORMAL,
4744 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4745[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4746 0, 0, CNTR_NORMAL,
4747 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4748[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4749 0, 0, CNTR_NORMAL,
4750 access_tx_sdma15_disallowed_packet_err_cnt),
4751[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4752 0, 0, CNTR_NORMAL,
4753 access_tx_sdma14_disallowed_packet_err_cnt),
4754[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4755 0, 0, CNTR_NORMAL,
4756 access_tx_sdma13_disallowed_packet_err_cnt),
4757[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4758 0, 0, CNTR_NORMAL,
4759 access_tx_sdma12_disallowed_packet_err_cnt),
4760[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4761 0, 0, CNTR_NORMAL,
4762 access_tx_sdma11_disallowed_packet_err_cnt),
4763[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4764 0, 0, CNTR_NORMAL,
4765 access_tx_sdma10_disallowed_packet_err_cnt),
4766[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4767 0, 0, CNTR_NORMAL,
4768 access_tx_sdma9_disallowed_packet_err_cnt),
4769[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4770 0, 0, CNTR_NORMAL,
4771 access_tx_sdma8_disallowed_packet_err_cnt),
4772[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4773 0, 0, CNTR_NORMAL,
4774 access_tx_sdma7_disallowed_packet_err_cnt),
4775[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4776 0, 0, CNTR_NORMAL,
4777 access_tx_sdma6_disallowed_packet_err_cnt),
4778[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4779 0, 0, CNTR_NORMAL,
4780 access_tx_sdma5_disallowed_packet_err_cnt),
4781[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4782 0, 0, CNTR_NORMAL,
4783 access_tx_sdma4_disallowed_packet_err_cnt),
4784[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4785 0, 0, CNTR_NORMAL,
4786 access_tx_sdma3_disallowed_packet_err_cnt),
4787[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4788 0, 0, CNTR_NORMAL,
4789 access_tx_sdma2_disallowed_packet_err_cnt),
4790[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4791 0, 0, CNTR_NORMAL,
4792 access_tx_sdma1_disallowed_packet_err_cnt),
4793[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4794 0, 0, CNTR_NORMAL,
4795 access_tx_sdma0_disallowed_packet_err_cnt),
4796[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4797 CNTR_NORMAL,
4798 access_tx_config_parity_err_cnt),
4799[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4800 CNTR_NORMAL,
4801 access_tx_sbrd_ctl_csr_parity_err_cnt),
4802[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4803 CNTR_NORMAL,
4804 access_tx_launch_csr_parity_err_cnt),
4805[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4806 CNTR_NORMAL,
4807 access_tx_illegal_vl_err_cnt),
4808[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4809 "TxSbrdCtlStateMachineParityErr", 0, 0,
4810 CNTR_NORMAL,
4811 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4812[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4813 CNTR_NORMAL,
4814 access_egress_reserved_10_err_cnt),
4815[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4816 CNTR_NORMAL,
4817 access_egress_reserved_9_err_cnt),
4818[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4819 0, 0, CNTR_NORMAL,
4820 access_tx_sdma_launch_intf_parity_err_cnt),
4821[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4822 CNTR_NORMAL,
4823 access_tx_pio_launch_intf_parity_err_cnt),
4824[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4825 CNTR_NORMAL,
4826 access_egress_reserved_6_err_cnt),
4827[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4828 CNTR_NORMAL,
4829 access_tx_incorrect_link_state_err_cnt),
4830[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4831 CNTR_NORMAL,
4832 access_tx_linkdown_err_cnt),
4833[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4834 "EgressFifoUnderrunOrParityErr", 0, 0,
4835 CNTR_NORMAL,
4836 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4837[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4838 CNTR_NORMAL,
4839 access_egress_reserved_2_err_cnt),
4840[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4841 CNTR_NORMAL,
4842 access_tx_pkt_integrity_mem_unc_err_cnt),
4843[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4844 CNTR_NORMAL,
4845 access_tx_pkt_integrity_mem_cor_err_cnt),
4846/* SendErrStatus */
4847[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4848 CNTR_NORMAL,
4849 access_send_csr_write_bad_addr_err_cnt),
4850[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4851 CNTR_NORMAL,
4852 access_send_csr_read_bad_addr_err_cnt),
4853[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4854 CNTR_NORMAL,
4855 access_send_csr_parity_cnt),
4856/* SendCtxtErrStatus */
4857[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4858 CNTR_NORMAL,
4859 access_pio_write_out_of_bounds_err_cnt),
4860[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4861 CNTR_NORMAL,
4862 access_pio_write_overflow_err_cnt),
4863[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4864 0, 0, CNTR_NORMAL,
4865 access_pio_write_crosses_boundary_err_cnt),
4866[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4867 CNTR_NORMAL,
4868 access_pio_disallowed_packet_err_cnt),
4869[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4870 CNTR_NORMAL,
4871 access_pio_inconsistent_sop_err_cnt),
4872/* SendDmaEngErrStatus */
4873[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4874 0, 0, CNTR_NORMAL,
4875 access_sdma_header_request_fifo_cor_err_cnt),
4876[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4877 CNTR_NORMAL,
4878 access_sdma_header_storage_cor_err_cnt),
4879[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4880 CNTR_NORMAL,
4881 access_sdma_packet_tracking_cor_err_cnt),
4882[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4883 CNTR_NORMAL,
4884 access_sdma_assembly_cor_err_cnt),
4885[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4886 CNTR_NORMAL,
4887 access_sdma_desc_table_cor_err_cnt),
4888[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4889 0, 0, CNTR_NORMAL,
4890 access_sdma_header_request_fifo_unc_err_cnt),
4891[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4892 CNTR_NORMAL,
4893 access_sdma_header_storage_unc_err_cnt),
4894[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4895 CNTR_NORMAL,
4896 access_sdma_packet_tracking_unc_err_cnt),
4897[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4898 CNTR_NORMAL,
4899 access_sdma_assembly_unc_err_cnt),
4900[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4901 CNTR_NORMAL,
4902 access_sdma_desc_table_unc_err_cnt),
4903[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4904 CNTR_NORMAL,
4905 access_sdma_timeout_err_cnt),
4906[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4907 CNTR_NORMAL,
4908 access_sdma_header_length_err_cnt),
4909[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4910 CNTR_NORMAL,
4911 access_sdma_header_address_err_cnt),
4912[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4913 CNTR_NORMAL,
4914 access_sdma_header_select_err_cnt),
4915[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4916 CNTR_NORMAL,
4917 access_sdma_reserved_9_err_cnt),
4918[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4919 CNTR_NORMAL,
4920 access_sdma_packet_desc_overflow_err_cnt),
4921[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4922 CNTR_NORMAL,
4923 access_sdma_length_mismatch_err_cnt),
4924[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4925 CNTR_NORMAL,
4926 access_sdma_halt_err_cnt),
4927[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4928 CNTR_NORMAL,
4929 access_sdma_mem_read_err_cnt),
4930[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4931 CNTR_NORMAL,
4932 access_sdma_first_desc_err_cnt),
4933[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4934 CNTR_NORMAL,
4935 access_sdma_tail_out_of_bounds_err_cnt),
4936[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4937 CNTR_NORMAL,
4938 access_sdma_too_long_err_cnt),
4939[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4940 CNTR_NORMAL,
4941 access_sdma_gen_mismatch_err_cnt),
4942[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4943 CNTR_NORMAL,
4944 access_sdma_wrong_dw_err_cnt),
77241056
MM
4945};
4946
4947static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4948[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4949 CNTR_NORMAL),
4950[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4951 CNTR_NORMAL),
4952[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4953 CNTR_NORMAL),
4954[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4955 CNTR_NORMAL),
4956[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4957 CNTR_NORMAL),
4958[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4959 CNTR_NORMAL),
4960[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4961 CNTR_NORMAL),
4962[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4963[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4964[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4965[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
17fb4f29 4966 CNTR_SYNTH | CNTR_VL),
77241056 4967[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
17fb4f29 4968 CNTR_SYNTH | CNTR_VL),
77241056 4969[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
17fb4f29 4970 CNTR_SYNTH | CNTR_VL),
77241056
MM
4971[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
4972[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
4973[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
17fb4f29 4974 access_sw_link_dn_cnt),
77241056 4975[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
17fb4f29 4976 access_sw_link_up_cnt),
6d014530
DL
4977[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
4978 access_sw_unknown_frame_cnt),
77241056 4979[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
17fb4f29 4980 access_sw_xmit_discards),
77241056 4981[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
17fb4f29
JJ
4982 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
4983 access_sw_xmit_discards),
77241056 4984[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
17fb4f29 4985 access_xmit_constraint_errs),
77241056 4986[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
17fb4f29 4987 access_rcv_constraint_errs),
77241056
MM
4988[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
4989[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
4990[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
4991[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
4992[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
4993[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
4994[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
4995[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
4996[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
4997[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
4998[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
4999[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5000[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5001 access_sw_cpu_rc_acks),
5002[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
17fb4f29 5003 access_sw_cpu_rc_qacks),
77241056 5004[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
17fb4f29 5005 access_sw_cpu_rc_delayed_comp),
77241056
MM
5006[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5007[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5008[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5009[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5010[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5011[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5012[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5013[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5014[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5015[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5016[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5017[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5018[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5019[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5020[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5021[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5022[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5023[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5024[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5025[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5026[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5027[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5028[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5029[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5030[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5031[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5032[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5033[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5034[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5035[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5036[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5037[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5038[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5039[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5040[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5041[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5042[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5043[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5044[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5045[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5046[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5047[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5048[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5049[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5050[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5051[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5052[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5053[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5054[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5055[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5056[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5057[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5058[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5059[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5060[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5061[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5062[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5063[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5064[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5065[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5066[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5067[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5068[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5069[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5070[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5071[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5072[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5073[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5074[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5075[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5076[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5077[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5078[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5079[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5080[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5081[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5082[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5083[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5084[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5085[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5086};
5087
5088/* ======================================================================== */
5089
77241056
MM
5090/* return true if this is chip revision revision a */
5091int is_ax(struct hfi1_devdata *dd)
5092{
5093 u8 chip_rev_minor =
5094 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5095 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5096 return (chip_rev_minor & 0xf0) == 0;
5097}
5098
5099/* return true if this is chip revision revision b */
5100int is_bx(struct hfi1_devdata *dd)
5101{
5102 u8 chip_rev_minor =
5103 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5104 & CCE_REVISION_CHIP_REV_MINOR_MASK;
995deafa 5105 return (chip_rev_minor & 0xF0) == 0x10;
77241056
MM
5106}
5107
5108/*
5109 * Append string s to buffer buf. Arguments curp and len are the current
5110 * position and remaining length, respectively.
5111 *
5112 * return 0 on success, 1 on out of room
5113 */
5114static int append_str(char *buf, char **curp, int *lenp, const char *s)
5115{
5116 char *p = *curp;
5117 int len = *lenp;
5118 int result = 0; /* success */
5119 char c;
5120
5121 /* add a comma, if first in the buffer */
5122 if (p != buf) {
5123 if (len == 0) {
5124 result = 1; /* out of room */
5125 goto done;
5126 }
5127 *p++ = ',';
5128 len--;
5129 }
5130
5131 /* copy the string */
5132 while ((c = *s++) != 0) {
5133 if (len == 0) {
5134 result = 1; /* out of room */
5135 goto done;
5136 }
5137 *p++ = c;
5138 len--;
5139 }
5140
5141done:
5142 /* write return values */
5143 *curp = p;
5144 *lenp = len;
5145
5146 return result;
5147}
5148
5149/*
5150 * Using the given flag table, print a comma separated string into
5151 * the buffer. End in '*' if the buffer is too short.
5152 */
5153static char *flag_string(char *buf, int buf_len, u64 flags,
17fb4f29 5154 struct flag_table *table, int table_size)
77241056
MM
5155{
5156 char extra[32];
5157 char *p = buf;
5158 int len = buf_len;
5159 int no_room = 0;
5160 int i;
5161
5162 /* make sure there is at least 2 so we can form "*" */
5163 if (len < 2)
5164 return "";
5165
5166 len--; /* leave room for a nul */
5167 for (i = 0; i < table_size; i++) {
5168 if (flags & table[i].flag) {
5169 no_room = append_str(buf, &p, &len, table[i].str);
5170 if (no_room)
5171 break;
5172 flags &= ~table[i].flag;
5173 }
5174 }
5175
5176 /* any undocumented bits left? */
5177 if (!no_room && flags) {
5178 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5179 no_room = append_str(buf, &p, &len, extra);
5180 }
5181
5182 /* add * if ran out of room */
5183 if (no_room) {
5184 /* may need to back up to add space for a '*' */
5185 if (len == 0)
5186 --p;
5187 *p++ = '*';
5188 }
5189
5190 /* add final nul - space already allocated above */
5191 *p = 0;
5192 return buf;
5193}
5194
5195/* first 8 CCE error interrupt source names */
5196static const char * const cce_misc_names[] = {
5197 "CceErrInt", /* 0 */
5198 "RxeErrInt", /* 1 */
5199 "MiscErrInt", /* 2 */
5200 "Reserved3", /* 3 */
5201 "PioErrInt", /* 4 */
5202 "SDmaErrInt", /* 5 */
5203 "EgressErrInt", /* 6 */
5204 "TxeErrInt" /* 7 */
5205};
5206
5207/*
5208 * Return the miscellaneous error interrupt name.
5209 */
5210static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5211{
5212 if (source < ARRAY_SIZE(cce_misc_names))
5213 strncpy(buf, cce_misc_names[source], bsize);
5214 else
17fb4f29
JJ
5215 snprintf(buf, bsize, "Reserved%u",
5216 source + IS_GENERAL_ERR_START);
77241056
MM
5217
5218 return buf;
5219}
5220
5221/*
5222 * Return the SDMA engine error interrupt name.
5223 */
5224static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5225{
5226 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5227 return buf;
5228}
5229
5230/*
5231 * Return the send context error interrupt name.
5232 */
5233static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5234{
5235 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5236 return buf;
5237}
5238
5239static const char * const various_names[] = {
5240 "PbcInt",
5241 "GpioAssertInt",
5242 "Qsfp1Int",
5243 "Qsfp2Int",
5244 "TCritInt"
5245};
5246
5247/*
5248 * Return the various interrupt name.
5249 */
5250static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5251{
5252 if (source < ARRAY_SIZE(various_names))
5253 strncpy(buf, various_names[source], bsize);
5254 else
8638b77f 5255 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
77241056
MM
5256 return buf;
5257}
5258
5259/*
5260 * Return the DC interrupt name.
5261 */
5262static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5263{
5264 static const char * const dc_int_names[] = {
5265 "common",
5266 "lcb",
5267 "8051",
5268 "lbm" /* local block merge */
5269 };
5270
5271 if (source < ARRAY_SIZE(dc_int_names))
5272 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5273 else
5274 snprintf(buf, bsize, "DCInt%u", source);
5275 return buf;
5276}
5277
5278static const char * const sdma_int_names[] = {
5279 "SDmaInt",
5280 "SdmaIdleInt",
5281 "SdmaProgressInt",
5282};
5283
5284/*
5285 * Return the SDMA engine interrupt name.
5286 */
5287static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5288{
5289 /* what interrupt */
5290 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5291 /* which engine */
5292 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5293
5294 if (likely(what < 3))
5295 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5296 else
5297 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5298 return buf;
5299}
5300
5301/*
5302 * Return the receive available interrupt name.
5303 */
5304static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5305{
5306 snprintf(buf, bsize, "RcvAvailInt%u", source);
5307 return buf;
5308}
5309
5310/*
5311 * Return the receive urgent interrupt name.
5312 */
5313static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5314{
5315 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5316 return buf;
5317}
5318
5319/*
5320 * Return the send credit interrupt name.
5321 */
5322static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5323{
5324 snprintf(buf, bsize, "SendCreditInt%u", source);
5325 return buf;
5326}
5327
5328/*
5329 * Return the reserved interrupt name.
5330 */
5331static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5332{
5333 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5334 return buf;
5335}
5336
5337static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5338{
5339 return flag_string(buf, buf_len, flags,
17fb4f29
JJ
5340 cce_err_status_flags,
5341 ARRAY_SIZE(cce_err_status_flags));
77241056
MM
5342}
5343
5344static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5345{
5346 return flag_string(buf, buf_len, flags,
17fb4f29
JJ
5347 rxe_err_status_flags,
5348 ARRAY_SIZE(rxe_err_status_flags));
77241056
MM
5349}
5350
5351static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5352{
5353 return flag_string(buf, buf_len, flags, misc_err_status_flags,
17fb4f29 5354 ARRAY_SIZE(misc_err_status_flags));
77241056
MM
5355}
5356
5357static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5358{
5359 return flag_string(buf, buf_len, flags,
17fb4f29
JJ
5360 pio_err_status_flags,
5361 ARRAY_SIZE(pio_err_status_flags));
77241056
MM
5362}
5363
5364static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5365{
5366 return flag_string(buf, buf_len, flags,
17fb4f29
JJ
5367 sdma_err_status_flags,
5368 ARRAY_SIZE(sdma_err_status_flags));
77241056
MM
5369}
5370
5371static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5372{
5373 return flag_string(buf, buf_len, flags,
17fb4f29
JJ
5374 egress_err_status_flags,
5375 ARRAY_SIZE(egress_err_status_flags));
77241056
MM
5376}
5377
5378static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5379{
5380 return flag_string(buf, buf_len, flags,
17fb4f29
JJ
5381 egress_err_info_flags,
5382 ARRAY_SIZE(egress_err_info_flags));
77241056
MM
5383}
5384
5385static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5386{
5387 return flag_string(buf, buf_len, flags,
17fb4f29
JJ
5388 send_err_status_flags,
5389 ARRAY_SIZE(send_err_status_flags));
77241056
MM
5390}
5391
5392static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5393{
5394 char buf[96];
2c5b521a 5395 int i = 0;
77241056
MM
5396
5397 /*
5398 * For most these errors, there is nothing that can be done except
5399 * report or record it.
5400 */
5401 dd_dev_info(dd, "CCE Error: %s\n",
17fb4f29 5402 cce_err_status_string(buf, sizeof(buf), reg));
77241056 5403
995deafa
MM
5404 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5405 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
77241056
MM
5406 /* this error requires a manual drop into SPC freeze mode */
5407 /* then a fix up */
5408 start_freeze_handling(dd->pport, FREEZE_SELF);
5409 }
2c5b521a
JR
5410
5411 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5412 if (reg & (1ull << i)) {
5413 incr_cntr64(&dd->cce_err_status_cnt[i]);
5414 /* maintain a counter over all cce_err_status errors */
5415 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5416 }
5417 }
77241056
MM
5418}
5419
5420/*
5421 * Check counters for receive errors that do not have an interrupt
5422 * associated with them.
5423 */
5424#define RCVERR_CHECK_TIME 10
5425static void update_rcverr_timer(unsigned long opaque)
5426{
5427 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5428 struct hfi1_pportdata *ppd = dd->pport;
5429 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5430
5431 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
17fb4f29 5432 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
77241056 5433 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
17fb4f29
JJ
5434 set_link_down_reason(
5435 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5436 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
77241056
MM
5437 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5438 }
50e5dcbe 5439 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
77241056
MM
5440
5441 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5442}
5443
5444static int init_rcverr(struct hfi1_devdata *dd)
5445{
24523a94 5446 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
77241056
MM
5447 /* Assume the hardware counter has been reset */
5448 dd->rcv_ovfl_cnt = 0;
5449 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5450}
5451
5452static void free_rcverr(struct hfi1_devdata *dd)
5453{
5454 if (dd->rcverr_timer.data)
5455 del_timer_sync(&dd->rcverr_timer);
5456 dd->rcverr_timer.data = 0;
5457}
5458
5459static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5460{
5461 char buf[96];
2c5b521a 5462 int i = 0;
77241056
MM
5463
5464 dd_dev_info(dd, "Receive Error: %s\n",
17fb4f29 5465 rxe_err_status_string(buf, sizeof(buf), reg));
77241056
MM
5466
5467 if (reg & ALL_RXE_FREEZE_ERR) {
5468 int flags = 0;
5469
5470 /*
5471 * Freeze mode recovery is disabled for the errors
5472 * in RXE_FREEZE_ABORT_MASK
5473 */
995deafa 5474 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
77241056
MM
5475 flags = FREEZE_ABORT;
5476
5477 start_freeze_handling(dd->pport, flags);
5478 }
2c5b521a
JR
5479
5480 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5481 if (reg & (1ull << i))
5482 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5483 }
77241056
MM
5484}
5485
5486static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5487{
5488 char buf[96];
2c5b521a 5489 int i = 0;
77241056
MM
5490
5491 dd_dev_info(dd, "Misc Error: %s",
17fb4f29 5492 misc_err_status_string(buf, sizeof(buf), reg));
2c5b521a
JR
5493 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5494 if (reg & (1ull << i))
5495 incr_cntr64(&dd->misc_err_status_cnt[i]);
5496 }
77241056
MM
5497}
5498
5499static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5500{
5501 char buf[96];
2c5b521a 5502 int i = 0;
77241056
MM
5503
5504 dd_dev_info(dd, "PIO Error: %s\n",
17fb4f29 5505 pio_err_status_string(buf, sizeof(buf), reg));
77241056
MM
5506
5507 if (reg & ALL_PIO_FREEZE_ERR)
5508 start_freeze_handling(dd->pport, 0);
2c5b521a
JR
5509
5510 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5511 if (reg & (1ull << i))
5512 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5513 }
77241056
MM
5514}
5515
5516static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5517{
5518 char buf[96];
2c5b521a 5519 int i = 0;
77241056
MM
5520
5521 dd_dev_info(dd, "SDMA Error: %s\n",
17fb4f29 5522 sdma_err_status_string(buf, sizeof(buf), reg));
77241056
MM
5523
5524 if (reg & ALL_SDMA_FREEZE_ERR)
5525 start_freeze_handling(dd->pport, 0);
2c5b521a
JR
5526
5527 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5528 if (reg & (1ull << i))
5529 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5530 }
77241056
MM
5531}
5532
69a00b8e 5533static inline void __count_port_discards(struct hfi1_pportdata *ppd)
77241056 5534{
69a00b8e
MM
5535 incr_cntr64(&ppd->port_xmit_discards);
5536}
77241056 5537
69a00b8e
MM
5538static void count_port_inactive(struct hfi1_devdata *dd)
5539{
5540 __count_port_discards(dd->pport);
77241056
MM
5541}
5542
5543/*
5544 * We have had a "disallowed packet" error during egress. Determine the
5545 * integrity check which failed, and update relevant error counter, etc.
5546 *
5547 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5548 * bit of state per integrity check, and so we can miss the reason for an
5549 * egress error if more than one packet fails the same integrity check
5550 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5551 */
69a00b8e
MM
5552static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5553 int vl)
77241056
MM
5554{
5555 struct hfi1_pportdata *ppd = dd->pport;
5556 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5557 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5558 char buf[96];
5559
5560 /* clear down all observed info as quickly as possible after read */
5561 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5562
5563 dd_dev_info(dd,
17fb4f29
JJ
5564 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5565 info, egress_err_info_string(buf, sizeof(buf), info), src);
77241056
MM
5566
5567 /* Eventually add other counters for each bit */
69a00b8e
MM
5568 if (info & PORT_DISCARD_EGRESS_ERRS) {
5569 int weight, i;
77241056 5570
69a00b8e 5571 /*
4c9e7aac
DL
5572 * Count all applicable bits as individual errors and
5573 * attribute them to the packet that triggered this handler.
5574 * This may not be completely accurate due to limitations
5575 * on the available hardware error information. There is
5576 * a single information register and any number of error
5577 * packets may have occurred and contributed to it before
5578 * this routine is called. This means that:
5579 * a) If multiple packets with the same error occur before
5580 * this routine is called, earlier packets are missed.
5581 * There is only a single bit for each error type.
5582 * b) Errors may not be attributed to the correct VL.
5583 * The driver is attributing all bits in the info register
5584 * to the packet that triggered this call, but bits
5585 * could be an accumulation of different packets with
5586 * different VLs.
5587 * c) A single error packet may have multiple counts attached
5588 * to it. There is no way for the driver to know if
5589 * multiple bits set in the info register are due to a
5590 * single packet or multiple packets. The driver assumes
5591 * multiple packets.
69a00b8e 5592 */
4c9e7aac 5593 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
69a00b8e
MM
5594 for (i = 0; i < weight; i++) {
5595 __count_port_discards(ppd);
5596 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5597 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5598 else if (vl == 15)
5599 incr_cntr64(&ppd->port_xmit_discards_vl
5600 [C_VL_15]);
5601 }
77241056
MM
5602 }
5603}
5604
5605/*
5606 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5607 * register. Does it represent a 'port inactive' error?
5608 */
5609static inline int port_inactive_err(u64 posn)
5610{
5611 return (posn >= SEES(TX_LINKDOWN) &&
5612 posn <= SEES(TX_INCORRECT_LINK_STATE));
5613}
5614
5615/*
5616 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5617 * register. Does it represent a 'disallowed packet' error?
5618 */
69a00b8e 5619static inline int disallowed_pkt_err(int posn)
77241056
MM
5620{
5621 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5622 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5623}
5624
69a00b8e
MM
5625/*
5626 * Input value is a bit position of one of the SDMA engine disallowed
5627 * packet errors. Return which engine. Use of this must be guarded by
5628 * disallowed_pkt_err().
5629 */
5630static inline int disallowed_pkt_engine(int posn)
5631{
5632 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5633}
5634
5635/*
5636 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5637 * be done.
5638 */
5639static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5640{
5641 struct sdma_vl_map *m;
5642 int vl;
5643
5644 /* range check */
5645 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5646 return -1;
5647
5648 rcu_read_lock();
5649 m = rcu_dereference(dd->sdma_map);
5650 vl = m->engine_to_vl[engine];
5651 rcu_read_unlock();
5652
5653 return vl;
5654}
5655
5656/*
5657 * Translate the send context (sofware index) into a VL. Return -1 if the
5658 * translation cannot be done.
5659 */
5660static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5661{
5662 struct send_context_info *sci;
5663 struct send_context *sc;
5664 int i;
5665
5666 sci = &dd->send_contexts[sw_index];
5667
5668 /* there is no information for user (PSM) and ack contexts */
44306f15 5669 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
69a00b8e
MM
5670 return -1;
5671
5672 sc = sci->sc;
5673 if (!sc)
5674 return -1;
5675 if (dd->vld[15].sc == sc)
5676 return 15;
5677 for (i = 0; i < num_vls; i++)
5678 if (dd->vld[i].sc == sc)
5679 return i;
5680
5681 return -1;
5682}
5683
77241056
MM
5684static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5685{
5686 u64 reg_copy = reg, handled = 0;
5687 char buf[96];
2c5b521a 5688 int i = 0;
77241056
MM
5689
5690 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5691 start_freeze_handling(dd->pport, 0);
69a00b8e
MM
5692 else if (is_ax(dd) &&
5693 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5694 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
77241056
MM
5695 start_freeze_handling(dd->pport, 0);
5696
5697 while (reg_copy) {
5698 int posn = fls64(reg_copy);
69a00b8e 5699 /* fls64() returns a 1-based offset, we want it zero based */
77241056 5700 int shift = posn - 1;
69a00b8e 5701 u64 mask = 1ULL << shift;
77241056
MM
5702
5703 if (port_inactive_err(shift)) {
5704 count_port_inactive(dd);
69a00b8e 5705 handled |= mask;
77241056 5706 } else if (disallowed_pkt_err(shift)) {
69a00b8e
MM
5707 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5708
5709 handle_send_egress_err_info(dd, vl);
5710 handled |= mask;
77241056 5711 }
69a00b8e 5712 reg_copy &= ~mask;
77241056
MM
5713 }
5714
5715 reg &= ~handled;
5716
5717 if (reg)
5718 dd_dev_info(dd, "Egress Error: %s\n",
17fb4f29 5719 egress_err_status_string(buf, sizeof(buf), reg));
2c5b521a
JR
5720
5721 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5722 if (reg & (1ull << i))
5723 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5724 }
77241056
MM
5725}
5726
5727static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5728{
5729 char buf[96];
2c5b521a 5730 int i = 0;
77241056
MM
5731
5732 dd_dev_info(dd, "Send Error: %s\n",
17fb4f29 5733 send_err_status_string(buf, sizeof(buf), reg));
77241056 5734
2c5b521a
JR
5735 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5736 if (reg & (1ull << i))
5737 incr_cntr64(&dd->send_err_status_cnt[i]);
5738 }
77241056
MM
5739}
5740
5741/*
5742 * The maximum number of times the error clear down will loop before
5743 * blocking a repeating error. This value is arbitrary.
5744 */
5745#define MAX_CLEAR_COUNT 20
5746
5747/*
5748 * Clear and handle an error register. All error interrupts are funneled
5749 * through here to have a central location to correctly handle single-
5750 * or multi-shot errors.
5751 *
5752 * For non per-context registers, call this routine with a context value
5753 * of 0 so the per-context offset is zero.
5754 *
5755 * If the handler loops too many times, assume that something is wrong
5756 * and can't be fixed, so mask the error bits.
5757 */
5758static void interrupt_clear_down(struct hfi1_devdata *dd,
5759 u32 context,
5760 const struct err_reg_info *eri)
5761{
5762 u64 reg;
5763 u32 count;
5764
5765 /* read in a loop until no more errors are seen */
5766 count = 0;
5767 while (1) {
5768 reg = read_kctxt_csr(dd, context, eri->status);
5769 if (reg == 0)
5770 break;
5771 write_kctxt_csr(dd, context, eri->clear, reg);
5772 if (likely(eri->handler))
5773 eri->handler(dd, context, reg);
5774 count++;
5775 if (count > MAX_CLEAR_COUNT) {
5776 u64 mask;
5777
5778 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
17fb4f29 5779 eri->desc, reg);
77241056
MM
5780 /*
5781 * Read-modify-write so any other masked bits
5782 * remain masked.
5783 */
5784 mask = read_kctxt_csr(dd, context, eri->mask);
5785 mask &= ~reg;
5786 write_kctxt_csr(dd, context, eri->mask, mask);
5787 break;
5788 }
5789 }
5790}
5791
5792/*
5793 * CCE block "misc" interrupt. Source is < 16.
5794 */
5795static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5796{
5797 const struct err_reg_info *eri = &misc_errs[source];
5798
5799 if (eri->handler) {
5800 interrupt_clear_down(dd, 0, eri);
5801 } else {
5802 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
17fb4f29 5803 source);
77241056
MM
5804 }
5805}
5806
5807static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5808{
5809 return flag_string(buf, buf_len, flags,
17fb4f29
JJ
5810 sc_err_status_flags,
5811 ARRAY_SIZE(sc_err_status_flags));
77241056
MM
5812}
5813
5814/*
5815 * Send context error interrupt. Source (hw_context) is < 160.
5816 *
5817 * All send context errors cause the send context to halt. The normal
5818 * clear-down mechanism cannot be used because we cannot clear the
5819 * error bits until several other long-running items are done first.
5820 * This is OK because with the context halted, nothing else is going
5821 * to happen on it anyway.
5822 */
5823static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5824 unsigned int hw_context)
5825{
5826 struct send_context_info *sci;
5827 struct send_context *sc;
5828 char flags[96];
5829 u64 status;
5830 u32 sw_index;
2c5b521a 5831 int i = 0;
77241056
MM
5832
5833 sw_index = dd->hw_to_sw[hw_context];
5834 if (sw_index >= dd->num_send_contexts) {
5835 dd_dev_err(dd,
17fb4f29
JJ
5836 "out of range sw index %u for send context %u\n",
5837 sw_index, hw_context);
77241056
MM
5838 return;
5839 }
5840 sci = &dd->send_contexts[sw_index];
5841 sc = sci->sc;
5842 if (!sc) {
5843 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
17fb4f29 5844 sw_index, hw_context);
77241056
MM
5845 return;
5846 }
5847
5848 /* tell the software that a halt has begun */
5849 sc_stop(sc, SCF_HALTED);
5850
5851 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5852
5853 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
17fb4f29
JJ
5854 send_context_err_status_string(flags, sizeof(flags),
5855 status));
77241056
MM
5856
5857 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
69a00b8e 5858 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
77241056
MM
5859
5860 /*
5861 * Automatically restart halted kernel contexts out of interrupt
5862 * context. User contexts must ask the driver to restart the context.
5863 */
5864 if (sc->type != SC_USER)
5865 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
2c5b521a
JR
5866
5867 /*
5868 * Update the counters for the corresponding status bits.
5869 * Note that these particular counters are aggregated over all
5870 * 160 contexts.
5871 */
5872 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5873 if (status & (1ull << i))
5874 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5875 }
77241056
MM
5876}
5877
5878static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5879 unsigned int source, u64 status)
5880{
5881 struct sdma_engine *sde;
2c5b521a 5882 int i = 0;
77241056
MM
5883
5884 sde = &dd->per_sdma[source];
5885#ifdef CONFIG_SDMA_VERBOSITY
5886 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5887 slashstrip(__FILE__), __LINE__, __func__);
5888 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5889 sde->this_idx, source, (unsigned long long)status);
5890#endif
a699c6c2 5891 sde->err_cnt++;
77241056 5892 sdma_engine_error(sde, status);
2c5b521a
JR
5893
5894 /*
5895 * Update the counters for the corresponding status bits.
5896 * Note that these particular counters are aggregated over
5897 * all 16 DMA engines.
5898 */
5899 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5900 if (status & (1ull << i))
5901 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5902 }
77241056
MM
5903}
5904
5905/*
5906 * CCE block SDMA error interrupt. Source is < 16.
5907 */
5908static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5909{
5910#ifdef CONFIG_SDMA_VERBOSITY
5911 struct sdma_engine *sde = &dd->per_sdma[source];
5912
5913 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5914 slashstrip(__FILE__), __LINE__, __func__);
5915 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5916 source);
5917 sdma_dumpstate(sde);
5918#endif
5919 interrupt_clear_down(dd, source, &sdma_eng_err);
5920}
5921
5922/*
5923 * CCE block "various" interrupt. Source is < 8.
5924 */
5925static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5926{
5927 const struct err_reg_info *eri = &various_err[source];
5928
5929 /*
5930 * TCritInt cannot go through interrupt_clear_down()
5931 * because it is not a second tier interrupt. The handler
5932 * should be called directly.
5933 */
5934 if (source == TCRIT_INT_SOURCE)
5935 handle_temp_err(dd);
5936 else if (eri->handler)
5937 interrupt_clear_down(dd, 0, eri);
5938 else
5939 dd_dev_info(dd,
17fb4f29
JJ
5940 "%s: Unimplemented/reserved interrupt %d\n",
5941 __func__, source);
77241056
MM
5942}
5943
5944static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5945{
8ebd4cf1 5946 /* src_ctx is always zero */
77241056
MM
5947 struct hfi1_pportdata *ppd = dd->pport;
5948 unsigned long flags;
5949 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5950
5951 if (reg & QSFP_HFI0_MODPRST_N) {
77241056 5952 if (!qsfp_mod_present(ppd)) {
e8aa284b
EH
5953 dd_dev_info(dd, "%s: QSFP module removed\n",
5954 __func__);
5955
77241056
MM
5956 ppd->driver_link_ready = 0;
5957 /*
5958 * Cable removed, reset all our information about the
5959 * cache and cable capabilities
5960 */
5961
5962 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5963 /*
5964 * We don't set cache_refresh_required here as we expect
5965 * an interrupt when a cable is inserted
5966 */
5967 ppd->qsfp_info.cache_valid = 0;
8ebd4cf1
EH
5968 ppd->qsfp_info.reset_needed = 0;
5969 ppd->qsfp_info.limiting_active = 0;
77241056 5970 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
17fb4f29 5971 flags);
8ebd4cf1
EH
5972 /* Invert the ModPresent pin now to detect plug-in */
5973 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
5974 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
a9c05e35
BM
5975
5976 if ((ppd->offline_disabled_reason >
5977 HFI1_ODR_MASK(
e1bf0d5e 5978 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
a9c05e35
BM
5979 (ppd->offline_disabled_reason ==
5980 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
5981 ppd->offline_disabled_reason =
5982 HFI1_ODR_MASK(
e1bf0d5e 5983 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
a9c05e35 5984
77241056
MM
5985 if (ppd->host_link_state == HLS_DN_POLL) {
5986 /*
5987 * The link is still in POLL. This means
5988 * that the normal link down processing
5989 * will not happen. We have to do it here
5990 * before turning the DC off.
5991 */
5992 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
5993 }
5994 } else {
e8aa284b
EH
5995 dd_dev_info(dd, "%s: QSFP module inserted\n",
5996 __func__);
5997
77241056
MM
5998 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5999 ppd->qsfp_info.cache_valid = 0;
6000 ppd->qsfp_info.cache_refresh_required = 1;
6001 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
17fb4f29 6002 flags);
77241056 6003
8ebd4cf1
EH
6004 /*
6005 * Stop inversion of ModPresent pin to detect
6006 * removal of the cable
6007 */
77241056 6008 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
8ebd4cf1
EH
6009 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6010 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6011
6012 ppd->offline_disabled_reason =
6013 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
77241056
MM
6014 }
6015 }
6016
6017 if (reg & QSFP_HFI0_INT_N) {
e8aa284b 6018 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
17fb4f29 6019 __func__);
77241056
MM
6020 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6021 ppd->qsfp_info.check_interrupt_flags = 1;
77241056
MM
6022 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6023 }
6024
6025 /* Schedule the QSFP work only if there is a cable attached. */
6026 if (qsfp_mod_present(ppd))
6027 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6028}
6029
6030static int request_host_lcb_access(struct hfi1_devdata *dd)
6031{
6032 int ret;
6033
6034 ret = do_8051_command(dd, HCMD_MISC,
17fb4f29
JJ
6035 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6036 LOAD_DATA_FIELD_ID_SHIFT, NULL);
77241056
MM
6037 if (ret != HCMD_SUCCESS) {
6038 dd_dev_err(dd, "%s: command failed with error %d\n",
17fb4f29 6039 __func__, ret);
77241056
MM
6040 }
6041 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6042}
6043
6044static int request_8051_lcb_access(struct hfi1_devdata *dd)
6045{
6046 int ret;
6047
6048 ret = do_8051_command(dd, HCMD_MISC,
17fb4f29
JJ
6049 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6050 LOAD_DATA_FIELD_ID_SHIFT, NULL);
77241056
MM
6051 if (ret != HCMD_SUCCESS) {
6052 dd_dev_err(dd, "%s: command failed with error %d\n",
17fb4f29 6053 __func__, ret);
77241056
MM
6054 }
6055 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6056}
6057
6058/*
6059 * Set the LCB selector - allow host access. The DCC selector always
6060 * points to the host.
6061 */
6062static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6063{
6064 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
17fb4f29
JJ
6065 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6066 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
77241056
MM
6067}
6068
6069/*
6070 * Clear the LCB selector - allow 8051 access. The DCC selector always
6071 * points to the host.
6072 */
6073static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6074{
6075 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
17fb4f29 6076 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
77241056
MM
6077}
6078
6079/*
6080 * Acquire LCB access from the 8051. If the host already has access,
6081 * just increment a counter. Otherwise, inform the 8051 that the
6082 * host is taking access.
6083 *
6084 * Returns:
6085 * 0 on success
6086 * -EBUSY if the 8051 has control and cannot be disturbed
6087 * -errno if unable to acquire access from the 8051
6088 */
6089int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6090{
6091 struct hfi1_pportdata *ppd = dd->pport;
6092 int ret = 0;
6093
6094 /*
6095 * Use the host link state lock so the operation of this routine
6096 * { link state check, selector change, count increment } can occur
6097 * as a unit against a link state change. Otherwise there is a
6098 * race between the state change and the count increment.
6099 */
6100 if (sleep_ok) {
6101 mutex_lock(&ppd->hls_lock);
6102 } else {
951842b0 6103 while (!mutex_trylock(&ppd->hls_lock))
77241056
MM
6104 udelay(1);
6105 }
6106
6107 /* this access is valid only when the link is up */
6108 if ((ppd->host_link_state & HLS_UP) == 0) {
6109 dd_dev_info(dd, "%s: link state %s not up\n",
17fb4f29 6110 __func__, link_state_name(ppd->host_link_state));
77241056
MM
6111 ret = -EBUSY;
6112 goto done;
6113 }
6114
6115 if (dd->lcb_access_count == 0) {
6116 ret = request_host_lcb_access(dd);
6117 if (ret) {
6118 dd_dev_err(dd,
17fb4f29
JJ
6119 "%s: unable to acquire LCB access, err %d\n",
6120 __func__, ret);
77241056
MM
6121 goto done;
6122 }
6123 set_host_lcb_access(dd);
6124 }
6125 dd->lcb_access_count++;
6126done:
6127 mutex_unlock(&ppd->hls_lock);
6128 return ret;
6129}
6130
6131/*
6132 * Release LCB access by decrementing the use count. If the count is moving
6133 * from 1 to 0, inform 8051 that it has control back.
6134 *
6135 * Returns:
6136 * 0 on success
6137 * -errno if unable to release access to the 8051
6138 */
6139int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6140{
6141 int ret = 0;
6142
6143 /*
6144 * Use the host link state lock because the acquire needed it.
6145 * Here, we only need to keep { selector change, count decrement }
6146 * as a unit.
6147 */
6148 if (sleep_ok) {
6149 mutex_lock(&dd->pport->hls_lock);
6150 } else {
951842b0 6151 while (!mutex_trylock(&dd->pport->hls_lock))
77241056
MM
6152 udelay(1);
6153 }
6154
6155 if (dd->lcb_access_count == 0) {
6156 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
17fb4f29 6157 __func__);
77241056
MM
6158 goto done;
6159 }
6160
6161 if (dd->lcb_access_count == 1) {
6162 set_8051_lcb_access(dd);
6163 ret = request_8051_lcb_access(dd);
6164 if (ret) {
6165 dd_dev_err(dd,
17fb4f29
JJ
6166 "%s: unable to release LCB access, err %d\n",
6167 __func__, ret);
77241056
MM
6168 /* restore host access if the grant didn't work */
6169 set_host_lcb_access(dd);
6170 goto done;
6171 }
6172 }
6173 dd->lcb_access_count--;
6174done:
6175 mutex_unlock(&dd->pport->hls_lock);
6176 return ret;
6177}
6178
6179/*
6180 * Initialize LCB access variables and state. Called during driver load,
6181 * after most of the initialization is finished.
6182 *
6183 * The DC default is LCB access on for the host. The driver defaults to
6184 * leaving access to the 8051. Assign access now - this constrains the call
6185 * to this routine to be after all LCB set-up is done. In particular, after
6186 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6187 */
6188static void init_lcb_access(struct hfi1_devdata *dd)
6189{
6190 dd->lcb_access_count = 0;
6191}
6192
6193/*
6194 * Write a response back to a 8051 request.
6195 */
6196static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6197{
6198 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
17fb4f29
JJ
6199 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6200 (u64)return_code <<
6201 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6202 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
77241056
MM
6203}
6204
6205/*
cbac386a 6206 * Handle host requests from the 8051.
77241056 6207 */
145dd2b3 6208static void handle_8051_request(struct hfi1_pportdata *ppd)
77241056 6209{
cbac386a 6210 struct hfi1_devdata *dd = ppd->dd;
77241056 6211 u64 reg;
cbac386a 6212 u16 data = 0;
145dd2b3 6213 u8 type;
77241056
MM
6214
6215 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6216 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6217 return; /* no request */
6218
6219 /* zero out COMPLETED so the response is seen */
6220 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6221
6222 /* extract request details */
6223 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6224 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6225 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6226 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6227
6228 switch (type) {
6229 case HREQ_LOAD_CONFIG:
6230 case HREQ_SAVE_CONFIG:
6231 case HREQ_READ_CONFIG:
6232 case HREQ_SET_TX_EQ_ABS:
6233 case HREQ_SET_TX_EQ_REL:
145dd2b3 6234 case HREQ_ENABLE:
77241056 6235 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
17fb4f29 6236 type);
77241056
MM
6237 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6238 break;
77241056
MM
6239 case HREQ_CONFIG_DONE:
6240 hreq_response(dd, HREQ_SUCCESS, 0);
6241 break;
6242
6243 case HREQ_INTERFACE_TEST:
6244 hreq_response(dd, HREQ_SUCCESS, data);
6245 break;
77241056
MM
6246 default:
6247 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6248 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6249 break;
6250 }
6251}
6252
6253static void write_global_credit(struct hfi1_devdata *dd,
6254 u8 vau, u16 total, u16 shared)
6255{
6256 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
17fb4f29
JJ
6257 ((u64)total <<
6258 SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
6259 ((u64)shared <<
6260 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
6261 ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
77241056
MM
6262}
6263
6264/*
6265 * Set up initial VL15 credits of the remote. Assumes the rest of
6266 * the CM credit registers are zero from a previous global or credit reset .
6267 */
6268void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6269{
6270 /* leave shared count at zero for both global and VL15 */
6271 write_global_credit(dd, vau, vl15buf, 0);
6272
6273 /* We may need some credits for another VL when sending packets
6274 * with the snoop interface. Dividing it down the middle for VL15
6275 * and VL0 should suffice.
6276 */
6277 if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6278 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6279 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6280 write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6281 << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6282 } else {
6283 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6284 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6285 }
6286}
6287
6288/*
6289 * Zero all credit details from the previous connection and
6290 * reset the CM manager's internal counters.
6291 */
6292void reset_link_credits(struct hfi1_devdata *dd)
6293{
6294 int i;
6295
6296 /* remove all previous VL credit limits */
6297 for (i = 0; i < TXE_NUM_DATA_VL; i++)
8638b77f 6298 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
77241056
MM
6299 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6300 write_global_credit(dd, 0, 0, 0);
6301 /* reset the CM block */
6302 pio_send_control(dd, PSC_CM_RESET);
6303}
6304
6305/* convert a vCU to a CU */
6306static u32 vcu_to_cu(u8 vcu)
6307{
6308 return 1 << vcu;
6309}
6310
6311/* convert a CU to a vCU */
6312static u8 cu_to_vcu(u32 cu)
6313{
6314 return ilog2(cu);
6315}
6316
6317/* convert a vAU to an AU */
6318static u32 vau_to_au(u8 vau)
6319{
6320 return 8 * (1 << vau);
6321}
6322
6323static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6324{
6325 ppd->sm_trap_qp = 0x0;
6326 ppd->sa_qp = 0x1;
6327}
6328
6329/*
6330 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6331 */
6332static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6333{
6334 u64 reg;
6335
6336 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6337 write_csr(dd, DC_LCB_CFG_RUN, 0);
6338 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6339 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
17fb4f29 6340 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
77241056
MM
6341 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6342 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6343 reg = read_csr(dd, DCC_CFG_RESET);
17fb4f29
JJ
6344 write_csr(dd, DCC_CFG_RESET, reg |
6345 (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6346 (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
50e5dcbe 6347 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
77241056
MM
6348 if (!abort) {
6349 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6350 write_csr(dd, DCC_CFG_RESET, reg);
6351 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6352 }
6353}
6354
6355/*
6356 * This routine should be called after the link has been transitioned to
6357 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6358 * reset).
6359 *
6360 * The expectation is that the caller of this routine would have taken
6361 * care of properly transitioning the link into the correct state.
6362 */
6363static void dc_shutdown(struct hfi1_devdata *dd)
6364{
6365 unsigned long flags;
6366
6367 spin_lock_irqsave(&dd->dc8051_lock, flags);
6368 if (dd->dc_shutdown) {
6369 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6370 return;
6371 }
6372 dd->dc_shutdown = 1;
6373 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6374 /* Shutdown the LCB */
6375 lcb_shutdown(dd, 1);
4d114fdd
JJ
6376 /*
6377 * Going to OFFLINE would have causes the 8051 to put the
77241056 6378 * SerDes into reset already. Just need to shut down the 8051,
4d114fdd
JJ
6379 * itself.
6380 */
77241056
MM
6381 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6382}
6383
4d114fdd
JJ
6384/*
6385 * Calling this after the DC has been brought out of reset should not
6386 * do any damage.
6387 */
77241056
MM
6388static void dc_start(struct hfi1_devdata *dd)
6389{
6390 unsigned long flags;
6391 int ret;
6392
6393 spin_lock_irqsave(&dd->dc8051_lock, flags);
6394 if (!dd->dc_shutdown)
6395 goto done;
6396 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6397 /* Take the 8051 out of reset */
6398 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6399 /* Wait until 8051 is ready */
6400 ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6401 if (ret) {
6402 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
17fb4f29 6403 __func__);
77241056
MM
6404 }
6405 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6406 write_csr(dd, DCC_CFG_RESET, 0x10);
6407 /* lcb_shutdown() with abort=1 does not restore these */
6408 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6409 spin_lock_irqsave(&dd->dc8051_lock, flags);
6410 dd->dc_shutdown = 0;
6411done:
6412 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6413}
6414
6415/*
6416 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6417 */
6418static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6419{
6420 u64 rx_radr, tx_radr;
6421 u32 version;
6422
6423 if (dd->icode != ICODE_FPGA_EMULATION)
6424 return;
6425
6426 /*
6427 * These LCB defaults on emulator _s are good, nothing to do here:
6428 * LCB_CFG_TX_FIFOS_RADR
6429 * LCB_CFG_RX_FIFOS_RADR
6430 * LCB_CFG_LN_DCLK
6431 * LCB_CFG_IGNORE_LOST_RCLK
6432 */
6433 if (is_emulator_s(dd))
6434 return;
6435 /* else this is _p */
6436
6437 version = emulator_rev(dd);
995deafa 6438 if (!is_ax(dd))
77241056
MM
6439 version = 0x2d; /* all B0 use 0x2d or higher settings */
6440
6441 if (version <= 0x12) {
6442 /* release 0x12 and below */
6443
6444 /*
6445 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6446 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6447 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6448 */
6449 rx_radr =
6450 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6451 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6452 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6453 /*
6454 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6455 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6456 */
6457 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6458 } else if (version <= 0x18) {
6459 /* release 0x13 up to 0x18 */
6460 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6461 rx_radr =
6462 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6463 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6464 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6465 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6466 } else if (version == 0x19) {
6467 /* release 0x19 */
6468 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6469 rx_radr =
6470 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6471 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6472 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6473 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6474 } else if (version == 0x1a) {
6475 /* release 0x1a */
6476 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6477 rx_radr =
6478 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6479 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6480 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6481 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6482 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6483 } else {
6484 /* release 0x1b and higher */
6485 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6486 rx_radr =
6487 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6488 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6489 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6490 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6491 }
6492
6493 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6494 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6495 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
17fb4f29 6496 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
77241056
MM
6497 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6498}
6499
6500/*
6501 * Handle a SMA idle message
6502 *
6503 * This is a work-queue function outside of the interrupt.
6504 */
6505void handle_sma_message(struct work_struct *work)
6506{
6507 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6508 sma_message_work);
6509 struct hfi1_devdata *dd = ppd->dd;
6510 u64 msg;
6511 int ret;
6512
4d114fdd
JJ
6513 /*
6514 * msg is bytes 1-4 of the 40-bit idle message - the command code
6515 * is stripped off
6516 */
77241056
MM
6517 ret = read_idle_sma(dd, &msg);
6518 if (ret)
6519 return;
6520 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6521 /*
6522 * React to the SMA message. Byte[1] (0 for us) is the command.
6523 */
6524 switch (msg & 0xff) {
6525 case SMA_IDLE_ARM:
6526 /*
6527 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6528 * State Transitions
6529 *
6530 * Only expected in INIT or ARMED, discard otherwise.
6531 */
6532 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6533 ppd->neighbor_normal = 1;
6534 break;
6535 case SMA_IDLE_ACTIVE:
6536 /*
6537 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6538 * State Transitions
6539 *
6540 * Can activate the node. Discard otherwise.
6541 */
d0d236ea
JJ
6542 if (ppd->host_link_state == HLS_UP_ARMED &&
6543 ppd->is_active_optimize_enabled) {
77241056
MM
6544 ppd->neighbor_normal = 1;
6545 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6546 if (ret)
6547 dd_dev_err(
6548 dd,
6549 "%s: received Active SMA idle message, couldn't set link to Active\n",
6550 __func__);
6551 }
6552 break;
6553 default:
6554 dd_dev_err(dd,
17fb4f29
JJ
6555 "%s: received unexpected SMA idle message 0x%llx\n",
6556 __func__, msg);
77241056
MM
6557 break;
6558 }
6559}
6560
6561static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6562{
6563 u64 rcvctrl;
6564 unsigned long flags;
6565
6566 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6567 rcvctrl = read_csr(dd, RCV_CTRL);
6568 rcvctrl |= add;
6569 rcvctrl &= ~clear;
6570 write_csr(dd, RCV_CTRL, rcvctrl);
6571 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6572}
6573
6574static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6575{
6576 adjust_rcvctrl(dd, add, 0);
6577}
6578
6579static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6580{
6581 adjust_rcvctrl(dd, 0, clear);
6582}
6583
6584/*
6585 * Called from all interrupt handlers to start handling an SPC freeze.
6586 */
6587void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6588{
6589 struct hfi1_devdata *dd = ppd->dd;
6590 struct send_context *sc;
6591 int i;
6592
6593 if (flags & FREEZE_SELF)
6594 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6595
6596 /* enter frozen mode */
6597 dd->flags |= HFI1_FROZEN;
6598
6599 /* notify all SDMA engines that they are going into a freeze */
6600 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6601
6602 /* do halt pre-handling on all enabled send contexts */
6603 for (i = 0; i < dd->num_send_contexts; i++) {
6604 sc = dd->send_contexts[i].sc;
6605 if (sc && (sc->flags & SCF_ENABLED))
6606 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6607 }
6608
6609 /* Send context are frozen. Notify user space */
6610 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6611
6612 if (flags & FREEZE_ABORT) {
6613 dd_dev_err(dd,
6614 "Aborted freeze recovery. Please REBOOT system\n");
6615 return;
6616 }
6617 /* queue non-interrupt handler */
6618 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6619}
6620
6621/*
6622 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6623 * depending on the "freeze" parameter.
6624 *
6625 * No need to return an error if it times out, our only option
6626 * is to proceed anyway.
6627 */
6628static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6629{
6630 unsigned long timeout;
6631 u64 reg;
6632
6633 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6634 while (1) {
6635 reg = read_csr(dd, CCE_STATUS);
6636 if (freeze) {
6637 /* waiting until all indicators are set */
6638 if ((reg & ALL_FROZE) == ALL_FROZE)
6639 return; /* all done */
6640 } else {
6641 /* waiting until all indicators are clear */
6642 if ((reg & ALL_FROZE) == 0)
6643 return; /* all done */
6644 }
6645
6646 if (time_after(jiffies, timeout)) {
6647 dd_dev_err(dd,
17fb4f29
JJ
6648 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6649 freeze ? "" : "un", reg & ALL_FROZE,
6650 freeze ? ALL_FROZE : 0ull);
77241056
MM
6651 return;
6652 }
6653 usleep_range(80, 120);
6654 }
6655}
6656
6657/*
6658 * Do all freeze handling for the RXE block.
6659 */
6660static void rxe_freeze(struct hfi1_devdata *dd)
6661{
6662 int i;
6663
6664 /* disable port */
6665 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6666
6667 /* disable all receive contexts */
6668 for (i = 0; i < dd->num_rcv_contexts; i++)
6669 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6670}
6671
6672/*
6673 * Unfreeze handling for the RXE block - kernel contexts only.
6674 * This will also enable the port. User contexts will do unfreeze
6675 * handling on a per-context basis as they call into the driver.
6676 *
6677 */
6678static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6679{
566c157c 6680 u32 rcvmask;
77241056
MM
6681 int i;
6682
6683 /* enable all kernel contexts */
566c157c
MH
6684 for (i = 0; i < dd->n_krcv_queues; i++) {
6685 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6686 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6687 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6688 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6689 hfi1_rcvctrl(dd, rcvmask, i);
6690 }
77241056
MM
6691
6692 /* enable port */
6693 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6694}
6695
6696/*
6697 * Non-interrupt SPC freeze handling.
6698 *
6699 * This is a work-queue function outside of the triggering interrupt.
6700 */
6701void handle_freeze(struct work_struct *work)
6702{
6703 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6704 freeze_work);
6705 struct hfi1_devdata *dd = ppd->dd;
6706
6707 /* wait for freeze indicators on all affected blocks */
77241056
MM
6708 wait_for_freeze_status(dd, 1);
6709
6710 /* SPC is now frozen */
6711
6712 /* do send PIO freeze steps */
6713 pio_freeze(dd);
6714
6715 /* do send DMA freeze steps */
6716 sdma_freeze(dd);
6717
6718 /* do send egress freeze steps - nothing to do */
6719
6720 /* do receive freeze steps */
6721 rxe_freeze(dd);
6722
6723 /*
6724 * Unfreeze the hardware - clear the freeze, wait for each
6725 * block's frozen bit to clear, then clear the frozen flag.
6726 */
6727 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6728 wait_for_freeze_status(dd, 0);
6729
995deafa 6730 if (is_ax(dd)) {
77241056
MM
6731 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6732 wait_for_freeze_status(dd, 1);
6733 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6734 wait_for_freeze_status(dd, 0);
6735 }
6736
6737 /* do send PIO unfreeze steps for kernel contexts */
6738 pio_kernel_unfreeze(dd);
6739
6740 /* do send DMA unfreeze steps */
6741 sdma_unfreeze(dd);
6742
6743 /* do send egress unfreeze steps - nothing to do */
6744
6745 /* do receive unfreeze steps for kernel contexts */
6746 rxe_kernel_unfreeze(dd);
6747
6748 /*
6749 * The unfreeze procedure touches global device registers when
6750 * it disables and re-enables RXE. Mark the device unfrozen
6751 * after all that is done so other parts of the driver waiting
6752 * for the device to unfreeze don't do things out of order.
6753 *
6754 * The above implies that the meaning of HFI1_FROZEN flag is
6755 * "Device has gone into freeze mode and freeze mode handling
6756 * is still in progress."
6757 *
6758 * The flag will be removed when freeze mode processing has
6759 * completed.
6760 */
6761 dd->flags &= ~HFI1_FROZEN;
6762 wake_up(&dd->event_queue);
6763
6764 /* no longer frozen */
77241056
MM
6765}
6766
6767/*
6768 * Handle a link up interrupt from the 8051.
6769 *
6770 * This is a work-queue function outside of the interrupt.
6771 */
6772void handle_link_up(struct work_struct *work)
6773{
6774 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
17fb4f29 6775 link_up_work);
77241056
MM
6776 set_link_state(ppd, HLS_UP_INIT);
6777
6778 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6779 read_ltp_rtt(ppd->dd);
6780 /*
6781 * OPA specifies that certain counters are cleared on a transition
6782 * to link up, so do that.
6783 */
6784 clear_linkup_counters(ppd->dd);
6785 /*
6786 * And (re)set link up default values.
6787 */
6788 set_linkup_defaults(ppd);
6789
6790 /* enforce link speed enabled */
6791 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6792 /* oops - current speed is not enabled, bounce */
6793 dd_dev_err(ppd->dd,
17fb4f29
JJ
6794 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6795 ppd->link_speed_active, ppd->link_speed_enabled);
77241056 6796 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
17fb4f29 6797 OPA_LINKDOWN_REASON_SPEED_POLICY);
77241056 6798 set_link_state(ppd, HLS_DN_OFFLINE);
8ebd4cf1 6799 tune_serdes(ppd);
77241056
MM
6800 start_link(ppd);
6801 }
6802}
6803
4d114fdd
JJ
6804/*
6805 * Several pieces of LNI information were cached for SMA in ppd.
6806 * Reset these on link down
6807 */
77241056
MM
6808static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6809{
6810 ppd->neighbor_guid = 0;
6811 ppd->neighbor_port_number = 0;
6812 ppd->neighbor_type = 0;
6813 ppd->neighbor_fm_security = 0;
6814}
6815
feb831dd
DL
6816static const char * const link_down_reason_strs[] = {
6817 [OPA_LINKDOWN_REASON_NONE] = "None",
6818 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Recive error 0",
6819 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
6820 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
6821 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
6822 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
6823 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
6824 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
6825 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
6826 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
6827 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
6828 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
6829 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
6830 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
6831 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
6832 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
6833 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
6834 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
6835 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
6836 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
6837 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
6838 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
6839 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
6840 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
6841 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
6842 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
6843 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
6844 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
6845 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
6846 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
6847 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
6848 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
6849 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
6850 "Excessive buffer overrun",
6851 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
6852 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
6853 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
6854 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
6855 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
6856 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
6857 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
6858 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
6859 "Local media not installed",
6860 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
6861 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
6862 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
6863 "End to end not installed",
6864 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
6865 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
6866 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
6867 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
6868 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
6869 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
6870};
6871
6872/* return the neighbor link down reason string */
6873static const char *link_down_reason_str(u8 reason)
6874{
6875 const char *str = NULL;
6876
6877 if (reason < ARRAY_SIZE(link_down_reason_strs))
6878 str = link_down_reason_strs[reason];
6879 if (!str)
6880 str = "(invalid)";
6881
6882 return str;
6883}
6884
77241056
MM
6885/*
6886 * Handle a link down interrupt from the 8051.
6887 *
6888 * This is a work-queue function outside of the interrupt.
6889 */
6890void handle_link_down(struct work_struct *work)
6891{
6892 u8 lcl_reason, neigh_reason = 0;
feb831dd 6893 u8 link_down_reason;
77241056 6894 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
feb831dd
DL
6895 link_down_work);
6896 int was_up;
6897 static const char ldr_str[] = "Link down reason: ";
77241056 6898
8ebd4cf1
EH
6899 if ((ppd->host_link_state &
6900 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6901 ppd->port_type == PORT_TYPE_FIXED)
6902 ppd->offline_disabled_reason =
6903 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6904
6905 /* Go offline first, then deal with reading/writing through 8051 */
feb831dd 6906 was_up = !!(ppd->host_link_state & HLS_UP);
77241056
MM
6907 set_link_state(ppd, HLS_DN_OFFLINE);
6908
feb831dd
DL
6909 if (was_up) {
6910 lcl_reason = 0;
6911 /* link down reason is only valid if the link was up */
6912 read_link_down_reason(ppd->dd, &link_down_reason);
6913 switch (link_down_reason) {
6914 case LDR_LINK_TRANSFER_ACTIVE_LOW:
6915 /* the link went down, no idle message reason */
6916 dd_dev_info(ppd->dd, "%sUnexpected link down\n",
6917 ldr_str);
6918 break;
6919 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
6920 /*
6921 * The neighbor reason is only valid if an idle message
6922 * was received for it.
6923 */
6924 read_planned_down_reason_code(ppd->dd, &neigh_reason);
6925 dd_dev_info(ppd->dd,
6926 "%sNeighbor link down message %d, %s\n",
6927 ldr_str, neigh_reason,
6928 link_down_reason_str(neigh_reason));
6929 break;
6930 case LDR_RECEIVED_HOST_OFFLINE_REQ:
6931 dd_dev_info(ppd->dd,
6932 "%sHost requested link to go offline\n",
6933 ldr_str);
6934 break;
6935 default:
6936 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
6937 ldr_str, link_down_reason);
6938 break;
6939 }
77241056 6940
feb831dd
DL
6941 /*
6942 * If no reason, assume peer-initiated but missed
6943 * LinkGoingDown idle flits.
6944 */
6945 if (neigh_reason == 0)
6946 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6947 } else {
6948 /* went down while polling or going up */
6949 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
6950 }
77241056
MM
6951
6952 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6953
6954 reset_neighbor_info(ppd);
6955
6956 /* disable the port */
6957 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6958
4d114fdd
JJ
6959 /*
6960 * If there is no cable attached, turn the DC off. Otherwise,
6961 * start the link bring up.
6962 */
623bba2d 6963 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd)) {
77241056 6964 dc_shutdown(ppd->dd);
8ebd4cf1
EH
6965 } else {
6966 tune_serdes(ppd);
77241056 6967 start_link(ppd);
8ebd4cf1 6968 }
77241056
MM
6969}
6970
6971void handle_link_bounce(struct work_struct *work)
6972{
6973 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6974 link_bounce_work);
6975
6976 /*
6977 * Only do something if the link is currently up.
6978 */
6979 if (ppd->host_link_state & HLS_UP) {
6980 set_link_state(ppd, HLS_DN_OFFLINE);
8ebd4cf1 6981 tune_serdes(ppd);
77241056
MM
6982 start_link(ppd);
6983 } else {
6984 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
17fb4f29 6985 __func__, link_state_name(ppd->host_link_state));
77241056
MM
6986 }
6987}
6988
6989/*
6990 * Mask conversion: Capability exchange to Port LTP. The capability
6991 * exchange has an implicit 16b CRC that is mandatory.
6992 */
6993static int cap_to_port_ltp(int cap)
6994{
6995 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
6996
6997 if (cap & CAP_CRC_14B)
6998 port_ltp |= PORT_LTP_CRC_MODE_14;
6999 if (cap & CAP_CRC_48B)
7000 port_ltp |= PORT_LTP_CRC_MODE_48;
7001 if (cap & CAP_CRC_12B_16B_PER_LANE)
7002 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7003
7004 return port_ltp;
7005}
7006
7007/*
7008 * Convert an OPA Port LTP mask to capability mask
7009 */
7010int port_ltp_to_cap(int port_ltp)
7011{
7012 int cap_mask = 0;
7013
7014 if (port_ltp & PORT_LTP_CRC_MODE_14)
7015 cap_mask |= CAP_CRC_14B;
7016 if (port_ltp & PORT_LTP_CRC_MODE_48)
7017 cap_mask |= CAP_CRC_48B;
7018 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7019 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7020
7021 return cap_mask;
7022}
7023
7024/*
7025 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7026 */
7027static int lcb_to_port_ltp(int lcb_crc)
7028{
7029 int port_ltp = 0;
7030
7031 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7032 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7033 else if (lcb_crc == LCB_CRC_48B)
7034 port_ltp = PORT_LTP_CRC_MODE_48;
7035 else if (lcb_crc == LCB_CRC_14B)
7036 port_ltp = PORT_LTP_CRC_MODE_14;
7037 else
7038 port_ltp = PORT_LTP_CRC_MODE_16;
7039
7040 return port_ltp;
7041}
7042
7043/*
7044 * Our neighbor has indicated that we are allowed to act as a fabric
7045 * manager, so place the full management partition key in the second
7046 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7047 * that we should already have the limited management partition key in
7048 * array element 1, and also that the port is not yet up when
7049 * add_full_mgmt_pkey() is invoked.
7050 */
7051static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7052{
7053 struct hfi1_devdata *dd = ppd->dd;
7054
8764522e
DL
7055 /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
7056 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
7057 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7058 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
77241056
MM
7059 ppd->pkeys[2] = FULL_MGMT_P_KEY;
7060 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7061}
7062
7063/*
7064 * Convert the given link width to the OPA link width bitmask.
7065 */
7066static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7067{
7068 switch (width) {
7069 case 0:
7070 /*
7071 * Simulator and quick linkup do not set the width.
7072 * Just set it to 4x without complaint.
7073 */
7074 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7075 return OPA_LINK_WIDTH_4X;
7076 return 0; /* no lanes up */
7077 case 1: return OPA_LINK_WIDTH_1X;
7078 case 2: return OPA_LINK_WIDTH_2X;
7079 case 3: return OPA_LINK_WIDTH_3X;
7080 default:
7081 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
17fb4f29 7082 __func__, width);
77241056
MM
7083 /* fall through */
7084 case 4: return OPA_LINK_WIDTH_4X;
7085 }
7086}
7087
7088/*
7089 * Do a population count on the bottom nibble.
7090 */
7091static const u8 bit_counts[16] = {
7092 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7093};
f4d507cd 7094
77241056
MM
7095static inline u8 nibble_to_count(u8 nibble)
7096{
7097 return bit_counts[nibble & 0xf];
7098}
7099
7100/*
7101 * Read the active lane information from the 8051 registers and return
7102 * their widths.
7103 *
7104 * Active lane information is found in these 8051 registers:
7105 * enable_lane_tx
7106 * enable_lane_rx
7107 */
7108static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7109 u16 *rx_width)
7110{
7111 u16 tx, rx;
7112 u8 enable_lane_rx;
7113 u8 enable_lane_tx;
7114 u8 tx_polarity_inversion;
7115 u8 rx_polarity_inversion;
7116 u8 max_rate;
7117
7118 /* read the active lanes */
7119 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
17fb4f29 7120 &rx_polarity_inversion, &max_rate);
77241056
MM
7121 read_local_lni(dd, &enable_lane_rx);
7122
7123 /* convert to counts */
7124 tx = nibble_to_count(enable_lane_tx);
7125 rx = nibble_to_count(enable_lane_rx);
7126
7127 /*
7128 * Set link_speed_active here, overriding what was set in
7129 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7130 * set the max_rate field in handle_verify_cap until v0.19.
7131 */
d0d236ea
JJ
7132 if ((dd->icode == ICODE_RTL_SILICON) &&
7133 (dd->dc8051_ver < dc8051_ver(0, 19))) {
77241056
MM
7134 /* max_rate: 0 = 12.5G, 1 = 25G */
7135 switch (max_rate) {
7136 case 0:
7137 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7138 break;
7139 default:
7140 dd_dev_err(dd,
17fb4f29
JJ
7141 "%s: unexpected max rate %d, using 25Gb\n",
7142 __func__, (int)max_rate);
77241056
MM
7143 /* fall through */
7144 case 1:
7145 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7146 break;
7147 }
7148 }
7149
7150 dd_dev_info(dd,
17fb4f29
JJ
7151 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7152 enable_lane_tx, tx, enable_lane_rx, rx);
77241056
MM
7153 *tx_width = link_width_to_bits(dd, tx);
7154 *rx_width = link_width_to_bits(dd, rx);
7155}
7156
7157/*
7158 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7159 * Valid after the end of VerifyCap and during LinkUp. Does not change
7160 * after link up. I.e. look elsewhere for downgrade information.
7161 *
7162 * Bits are:
7163 * + bits [7:4] contain the number of active transmitters
7164 * + bits [3:0] contain the number of active receivers
7165 * These are numbers 1 through 4 and can be different values if the
7166 * link is asymmetric.
7167 *
7168 * verify_cap_local_fm_link_width[0] retains its original value.
7169 */
7170static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7171 u16 *rx_width)
7172{
7173 u16 widths, tx, rx;
7174 u8 misc_bits, local_flags;
7175 u16 active_tx, active_rx;
7176
7177 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7178 tx = widths >> 12;
7179 rx = (widths >> 8) & 0xf;
7180
7181 *tx_width = link_width_to_bits(dd, tx);
7182 *rx_width = link_width_to_bits(dd, rx);
7183
7184 /* print the active widths */
7185 get_link_widths(dd, &active_tx, &active_rx);
7186}
7187
7188/*
7189 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7190 * hardware information when the link first comes up.
7191 *
7192 * The link width is not available until after VerifyCap.AllFramesReceived
7193 * (the trigger for handle_verify_cap), so this is outside that routine
7194 * and should be called when the 8051 signals linkup.
7195 */
7196void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7197{
7198 u16 tx_width, rx_width;
7199
7200 /* get end-of-LNI link widths */
7201 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7202
7203 /* use tx_width as the link is supposed to be symmetric on link up */
7204 ppd->link_width_active = tx_width;
7205 /* link width downgrade active (LWD.A) starts out matching LW.A */
7206 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7207 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7208 /* per OPA spec, on link up LWD.E resets to LWD.S */
7209 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7210 /* cache the active egress rate (units {10^6 bits/sec]) */
7211 ppd->current_egress_rate = active_egress_rate(ppd);
7212}
7213
7214/*
7215 * Handle a verify capabilities interrupt from the 8051.
7216 *
7217 * This is a work-queue function outside of the interrupt.
7218 */
7219void handle_verify_cap(struct work_struct *work)
7220{
7221 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7222 link_vc_work);
7223 struct hfi1_devdata *dd = ppd->dd;
7224 u64 reg;
7225 u8 power_management;
7226 u8 continious;
7227 u8 vcu;
7228 u8 vau;
7229 u8 z;
7230 u16 vl15buf;
7231 u16 link_widths;
7232 u16 crc_mask;
7233 u16 crc_val;
7234 u16 device_id;
7235 u16 active_tx, active_rx;
7236 u8 partner_supported_crc;
7237 u8 remote_tx_rate;
7238 u8 device_rev;
7239
7240 set_link_state(ppd, HLS_VERIFY_CAP);
7241
7242 lcb_shutdown(dd, 0);
7243 adjust_lcb_for_fpga_serdes(dd);
7244
7245 /*
7246 * These are now valid:
7247 * remote VerifyCap fields in the general LNI config
7248 * CSR DC8051_STS_REMOTE_GUID
7249 * CSR DC8051_STS_REMOTE_NODE_TYPE
7250 * CSR DC8051_STS_REMOTE_FM_SECURITY
7251 * CSR DC8051_STS_REMOTE_PORT_NO
7252 */
7253
7254 read_vc_remote_phy(dd, &power_management, &continious);
17fb4f29
JJ
7255 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7256 &partner_supported_crc);
77241056
MM
7257 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7258 read_remote_device_id(dd, &device_id, &device_rev);
7259 /*
7260 * And the 'MgmtAllowed' information, which is exchanged during
7261 * LNI, is also be available at this point.
7262 */
7263 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7264 /* print the active widths */
7265 get_link_widths(dd, &active_tx, &active_rx);
7266 dd_dev_info(dd,
17fb4f29
JJ
7267 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7268 (int)power_management, (int)continious);
77241056 7269 dd_dev_info(dd,
17fb4f29
JJ
7270 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7271 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7272 (int)partner_supported_crc);
77241056 7273 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
17fb4f29 7274 (u32)remote_tx_rate, (u32)link_widths);
77241056 7275 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
17fb4f29 7276 (u32)device_id, (u32)device_rev);
77241056
MM
7277 /*
7278 * The peer vAU value just read is the peer receiver value. HFI does
7279 * not support a transmit vAU of 0 (AU == 8). We advertised that
7280 * with Z=1 in the fabric capabilities sent to the peer. The peer
7281 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7282 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7283 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7284 * subject to the Z value exception.
7285 */
7286 if (vau == 0)
7287 vau = 1;
7288 set_up_vl15(dd, vau, vl15buf);
7289
7290 /* set up the LCB CRC mode */
7291 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7292
7293 /* order is important: use the lowest bit in common */
7294 if (crc_mask & CAP_CRC_14B)
7295 crc_val = LCB_CRC_14B;
7296 else if (crc_mask & CAP_CRC_48B)
7297 crc_val = LCB_CRC_48B;
7298 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7299 crc_val = LCB_CRC_12B_16B_PER_LANE;
7300 else
7301 crc_val = LCB_CRC_16B;
7302
7303 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7304 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7305 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7306
7307 /* set (14b only) or clear sideband credit */
7308 reg = read_csr(dd, SEND_CM_CTRL);
7309 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7310 write_csr(dd, SEND_CM_CTRL,
17fb4f29 7311 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
77241056
MM
7312 } else {
7313 write_csr(dd, SEND_CM_CTRL,
17fb4f29 7314 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
77241056
MM
7315 }
7316
7317 ppd->link_speed_active = 0; /* invalid value */
7318 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7319 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7320 switch (remote_tx_rate) {
7321 case 0:
7322 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7323 break;
7324 case 1:
7325 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7326 break;
7327 }
7328 } else {
7329 /* actual rate is highest bit of the ANDed rates */
7330 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7331
7332 if (rate & 2)
7333 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7334 else if (rate & 1)
7335 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7336 }
7337 if (ppd->link_speed_active == 0) {
7338 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
17fb4f29 7339 __func__, (int)remote_tx_rate);
77241056
MM
7340 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7341 }
7342
7343 /*
7344 * Cache the values of the supported, enabled, and active
7345 * LTP CRC modes to return in 'portinfo' queries. But the bit
7346 * flags that are returned in the portinfo query differ from
7347 * what's in the link_crc_mask, crc_sizes, and crc_val
7348 * variables. Convert these here.
7349 */
7350 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7351 /* supported crc modes */
7352 ppd->port_ltp_crc_mode |=
7353 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7354 /* enabled crc modes */
7355 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7356 /* active crc mode */
7357
7358 /* set up the remote credit return table */
7359 assign_remote_cm_au_table(dd, vcu);
7360
7361 /*
7362 * The LCB is reset on entry to handle_verify_cap(), so this must
7363 * be applied on every link up.
7364 *
7365 * Adjust LCB error kill enable to kill the link if
7366 * these RBUF errors are seen:
7367 * REPLAY_BUF_MBE_SMASK
7368 * FLIT_INPUT_BUF_MBE_SMASK
7369 */
995deafa 7370 if (is_ax(dd)) { /* fixed in B0 */
77241056
MM
7371 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7372 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7373 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7374 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7375 }
7376
7377 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7378 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7379
7380 /* give 8051 access to the LCB CSRs */
7381 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7382 set_8051_lcb_access(dd);
7383
7384 ppd->neighbor_guid =
7385 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7386 ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7387 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7388 ppd->neighbor_type =
7389 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7390 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7391 ppd->neighbor_fm_security =
7392 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7393 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7394 dd_dev_info(dd,
17fb4f29
JJ
7395 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7396 ppd->neighbor_guid, ppd->neighbor_type,
7397 ppd->mgmt_allowed, ppd->neighbor_fm_security);
77241056
MM
7398 if (ppd->mgmt_allowed)
7399 add_full_mgmt_pkey(ppd);
7400
7401 /* tell the 8051 to go to LinkUp */
7402 set_link_state(ppd, HLS_GOING_UP);
7403}
7404
7405/*
7406 * Apply the link width downgrade enabled policy against the current active
7407 * link widths.
7408 *
7409 * Called when the enabled policy changes or the active link widths change.
7410 */
7411void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7412{
77241056 7413 int do_bounce = 0;
323fd785
DL
7414 int tries;
7415 u16 lwde;
77241056
MM
7416 u16 tx, rx;
7417
323fd785
DL
7418 /* use the hls lock to avoid a race with actual link up */
7419 tries = 0;
7420retry:
77241056
MM
7421 mutex_lock(&ppd->hls_lock);
7422 /* only apply if the link is up */
323fd785
DL
7423 if (!(ppd->host_link_state & HLS_UP)) {
7424 /* still going up..wait and retry */
7425 if (ppd->host_link_state & HLS_GOING_UP) {
7426 if (++tries < 1000) {
7427 mutex_unlock(&ppd->hls_lock);
7428 usleep_range(100, 120); /* arbitrary */
7429 goto retry;
7430 }
7431 dd_dev_err(ppd->dd,
7432 "%s: giving up waiting for link state change\n",
7433 __func__);
7434 }
7435 goto done;
7436 }
7437
7438 lwde = ppd->link_width_downgrade_enabled;
77241056
MM
7439
7440 if (refresh_widths) {
7441 get_link_widths(ppd->dd, &tx, &rx);
7442 ppd->link_width_downgrade_tx_active = tx;
7443 ppd->link_width_downgrade_rx_active = rx;
7444 }
7445
f9b5635c
DL
7446 if (ppd->link_width_downgrade_tx_active == 0 ||
7447 ppd->link_width_downgrade_rx_active == 0) {
7448 /* the 8051 reported a dead link as a downgrade */
7449 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7450 } else if (lwde == 0) {
77241056
MM
7451 /* downgrade is disabled */
7452
7453 /* bounce if not at starting active width */
7454 if ((ppd->link_width_active !=
17fb4f29
JJ
7455 ppd->link_width_downgrade_tx_active) ||
7456 (ppd->link_width_active !=
7457 ppd->link_width_downgrade_rx_active)) {
77241056 7458 dd_dev_err(ppd->dd,
17fb4f29 7459 "Link downgrade is disabled and link has downgraded, downing link\n");
77241056 7460 dd_dev_err(ppd->dd,
17fb4f29
JJ
7461 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7462 ppd->link_width_active,
7463 ppd->link_width_downgrade_tx_active,
7464 ppd->link_width_downgrade_rx_active);
77241056
MM
7465 do_bounce = 1;
7466 }
d0d236ea
JJ
7467 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7468 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
77241056
MM
7469 /* Tx or Rx is outside the enabled policy */
7470 dd_dev_err(ppd->dd,
17fb4f29 7471 "Link is outside of downgrade allowed, downing link\n");
77241056 7472 dd_dev_err(ppd->dd,
17fb4f29
JJ
7473 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7474 lwde, ppd->link_width_downgrade_tx_active,
7475 ppd->link_width_downgrade_rx_active);
77241056
MM
7476 do_bounce = 1;
7477 }
7478
323fd785
DL
7479done:
7480 mutex_unlock(&ppd->hls_lock);
7481
77241056
MM
7482 if (do_bounce) {
7483 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
17fb4f29 7484 OPA_LINKDOWN_REASON_WIDTH_POLICY);
77241056 7485 set_link_state(ppd, HLS_DN_OFFLINE);
8ebd4cf1 7486 tune_serdes(ppd);
77241056
MM
7487 start_link(ppd);
7488 }
7489}
7490
7491/*
7492 * Handle a link downgrade interrupt from the 8051.
7493 *
7494 * This is a work-queue function outside of the interrupt.
7495 */
7496void handle_link_downgrade(struct work_struct *work)
7497{
7498 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7499 link_downgrade_work);
7500
7501 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7502 apply_link_downgrade_policy(ppd, 1);
7503}
7504
7505static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7506{
7507 return flag_string(buf, buf_len, flags, dcc_err_flags,
7508 ARRAY_SIZE(dcc_err_flags));
7509}
7510
7511static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7512{
7513 return flag_string(buf, buf_len, flags, lcb_err_flags,
7514 ARRAY_SIZE(lcb_err_flags));
7515}
7516
7517static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7518{
7519 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7520 ARRAY_SIZE(dc8051_err_flags));
7521}
7522
7523static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7524{
7525 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7526 ARRAY_SIZE(dc8051_info_err_flags));
7527}
7528
7529static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7530{
7531 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7532 ARRAY_SIZE(dc8051_info_host_msg_flags));
7533}
7534
7535static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7536{
7537 struct hfi1_pportdata *ppd = dd->pport;
7538 u64 info, err, host_msg;
7539 int queue_link_down = 0;
7540 char buf[96];
7541
7542 /* look at the flags */
7543 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7544 /* 8051 information set by firmware */
7545 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7546 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7547 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7548 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7549 host_msg = (info >>
7550 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7551 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7552
7553 /*
7554 * Handle error flags.
7555 */
7556 if (err & FAILED_LNI) {
7557 /*
7558 * LNI error indications are cleared by the 8051
7559 * only when starting polling. Only pay attention
7560 * to them when in the states that occur during
7561 * LNI.
7562 */
7563 if (ppd->host_link_state
7564 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7565 queue_link_down = 1;
7566 dd_dev_info(dd, "Link error: %s\n",
17fb4f29
JJ
7567 dc8051_info_err_string(buf,
7568 sizeof(buf),
7569 err &
7570 FAILED_LNI));
77241056
MM
7571 }
7572 err &= ~(u64)FAILED_LNI;
7573 }
6d014530
DL
7574 /* unknown frames can happen durning LNI, just count */
7575 if (err & UNKNOWN_FRAME) {
7576 ppd->unknown_frame_count++;
7577 err &= ~(u64)UNKNOWN_FRAME;
7578 }
77241056
MM
7579 if (err) {
7580 /* report remaining errors, but do not do anything */
7581 dd_dev_err(dd, "8051 info error: %s\n",
17fb4f29
JJ
7582 dc8051_info_err_string(buf, sizeof(buf),
7583 err));
77241056
MM
7584 }
7585
7586 /*
7587 * Handle host message flags.
7588 */
7589 if (host_msg & HOST_REQ_DONE) {
7590 /*
7591 * Presently, the driver does a busy wait for
7592 * host requests to complete. This is only an
7593 * informational message.
7594 * NOTE: The 8051 clears the host message
7595 * information *on the next 8051 command*.
7596 * Therefore, when linkup is achieved,
7597 * this flag will still be set.
7598 */
7599 host_msg &= ~(u64)HOST_REQ_DONE;
7600 }
7601 if (host_msg & BC_SMA_MSG) {
7602 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7603 host_msg &= ~(u64)BC_SMA_MSG;
7604 }
7605 if (host_msg & LINKUP_ACHIEVED) {
7606 dd_dev_info(dd, "8051: Link up\n");
7607 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7608 host_msg &= ~(u64)LINKUP_ACHIEVED;
7609 }
7610 if (host_msg & EXT_DEVICE_CFG_REQ) {
145dd2b3 7611 handle_8051_request(ppd);
77241056
MM
7612 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7613 }
7614 if (host_msg & VERIFY_CAP_FRAME) {
7615 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7616 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7617 }
7618 if (host_msg & LINK_GOING_DOWN) {
7619 const char *extra = "";
7620 /* no downgrade action needed if going down */
7621 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7622 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7623 extra = " (ignoring downgrade)";
7624 }
7625 dd_dev_info(dd, "8051: Link down%s\n", extra);
7626 queue_link_down = 1;
7627 host_msg &= ~(u64)LINK_GOING_DOWN;
7628 }
7629 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7630 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7631 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7632 }
7633 if (host_msg) {
7634 /* report remaining messages, but do not do anything */
7635 dd_dev_info(dd, "8051 info host message: %s\n",
17fb4f29
JJ
7636 dc8051_info_host_msg_string(buf,
7637 sizeof(buf),
7638 host_msg));
77241056
MM
7639 }
7640
7641 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7642 }
7643 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7644 /*
7645 * Lost the 8051 heartbeat. If this happens, we
7646 * receive constant interrupts about it. Disable
7647 * the interrupt after the first.
7648 */
7649 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7650 write_csr(dd, DC_DC8051_ERR_EN,
17fb4f29
JJ
7651 read_csr(dd, DC_DC8051_ERR_EN) &
7652 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
77241056
MM
7653
7654 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7655 }
7656 if (reg) {
7657 /* report the error, but do not do anything */
7658 dd_dev_err(dd, "8051 error: %s\n",
17fb4f29 7659 dc8051_err_string(buf, sizeof(buf), reg));
77241056
MM
7660 }
7661
7662 if (queue_link_down) {
4d114fdd
JJ
7663 /*
7664 * if the link is already going down or disabled, do not
7665 * queue another
7666 */
d0d236ea
JJ
7667 if ((ppd->host_link_state &
7668 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7669 ppd->link_enabled == 0) {
77241056 7670 dd_dev_info(dd, "%s: not queuing link down\n",
17fb4f29 7671 __func__);
77241056
MM
7672 } else {
7673 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7674 }
7675 }
7676}
7677
7678static const char * const fm_config_txt[] = {
7679[0] =
7680 "BadHeadDist: Distance violation between two head flits",
7681[1] =
7682 "BadTailDist: Distance violation between two tail flits",
7683[2] =
7684 "BadCtrlDist: Distance violation between two credit control flits",
7685[3] =
7686 "BadCrdAck: Credits return for unsupported VL",
7687[4] =
7688 "UnsupportedVLMarker: Received VL Marker",
7689[5] =
7690 "BadPreempt: Exceeded the preemption nesting level",
7691[6] =
7692 "BadControlFlit: Received unsupported control flit",
7693/* no 7 */
7694[8] =
7695 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7696};
7697
7698static const char * const port_rcv_txt[] = {
7699[1] =
7700 "BadPktLen: Illegal PktLen",
7701[2] =
7702 "PktLenTooLong: Packet longer than PktLen",
7703[3] =
7704 "PktLenTooShort: Packet shorter than PktLen",
7705[4] =
7706 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7707[5] =
7708 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7709[6] =
7710 "BadL2: Illegal L2 opcode",
7711[7] =
7712 "BadSC: Unsupported SC",
7713[9] =
7714 "BadRC: Illegal RC",
7715[11] =
7716 "PreemptError: Preempting with same VL",
7717[12] =
7718 "PreemptVL15: Preempting a VL15 packet",
7719};
7720
7721#define OPA_LDR_FMCONFIG_OFFSET 16
7722#define OPA_LDR_PORTRCV_OFFSET 0
7723static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7724{
7725 u64 info, hdr0, hdr1;
7726 const char *extra;
7727 char buf[96];
7728 struct hfi1_pportdata *ppd = dd->pport;
7729 u8 lcl_reason = 0;
7730 int do_bounce = 0;
7731
7732 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7733 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7734 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7735 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7736 /* set status bit */
7737 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7738 }
7739 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7740 }
7741
7742 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7743 struct hfi1_pportdata *ppd = dd->pport;
7744 /* this counter saturates at (2^32) - 1 */
7745 if (ppd->link_downed < (u32)UINT_MAX)
7746 ppd->link_downed++;
7747 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7748 }
7749
7750 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7751 u8 reason_valid = 1;
7752
7753 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7754 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7755 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7756 /* set status bit */
7757 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7758 }
7759 switch (info) {
7760 case 0:
7761 case 1:
7762 case 2:
7763 case 3:
7764 case 4:
7765 case 5:
7766 case 6:
7767 extra = fm_config_txt[info];
7768 break;
7769 case 8:
7770 extra = fm_config_txt[info];
7771 if (ppd->port_error_action &
7772 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7773 do_bounce = 1;
7774 /*
7775 * lcl_reason cannot be derived from info
7776 * for this error
7777 */
7778 lcl_reason =
7779 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7780 }
7781 break;
7782 default:
7783 reason_valid = 0;
7784 snprintf(buf, sizeof(buf), "reserved%lld", info);
7785 extra = buf;
7786 break;
7787 }
7788
7789 if (reason_valid && !do_bounce) {
7790 do_bounce = ppd->port_error_action &
7791 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7792 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7793 }
7794
7795 /* just report this */
7796 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7797 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7798 }
7799
7800 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7801 u8 reason_valid = 1;
7802
7803 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7804 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7805 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7806 if (!(dd->err_info_rcvport.status_and_code &
7807 OPA_EI_STATUS_SMASK)) {
7808 dd->err_info_rcvport.status_and_code =
7809 info & OPA_EI_CODE_SMASK;
7810 /* set status bit */
7811 dd->err_info_rcvport.status_and_code |=
7812 OPA_EI_STATUS_SMASK;
4d114fdd
JJ
7813 /*
7814 * save first 2 flits in the packet that caused
7815 * the error
7816 */
77241056
MM
7817 dd->err_info_rcvport.packet_flit1 = hdr0;
7818 dd->err_info_rcvport.packet_flit2 = hdr1;
7819 }
7820 switch (info) {
7821 case 1:
7822 case 2:
7823 case 3:
7824 case 4:
7825 case 5:
7826 case 6:
7827 case 7:
7828 case 9:
7829 case 11:
7830 case 12:
7831 extra = port_rcv_txt[info];
7832 break;
7833 default:
7834 reason_valid = 0;
7835 snprintf(buf, sizeof(buf), "reserved%lld", info);
7836 extra = buf;
7837 break;
7838 }
7839
7840 if (reason_valid && !do_bounce) {
7841 do_bounce = ppd->port_error_action &
7842 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7843 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7844 }
7845
7846 /* just report this */
7847 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7848 dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
17fb4f29 7849 hdr0, hdr1);
77241056
MM
7850
7851 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7852 }
7853
7854 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7855 /* informative only */
7856 dd_dev_info(dd, "8051 access to LCB blocked\n");
7857 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7858 }
7859 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7860 /* informative only */
7861 dd_dev_info(dd, "host access to LCB blocked\n");
7862 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7863 }
7864
7865 /* report any remaining errors */
7866 if (reg)
7867 dd_dev_info(dd, "DCC Error: %s\n",
17fb4f29 7868 dcc_err_string(buf, sizeof(buf), reg));
77241056
MM
7869
7870 if (lcl_reason == 0)
7871 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7872
7873 if (do_bounce) {
7874 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7875 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7876 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7877 }
7878}
7879
7880static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7881{
7882 char buf[96];
7883
7884 dd_dev_info(dd, "LCB Error: %s\n",
17fb4f29 7885 lcb_err_string(buf, sizeof(buf), reg));
77241056
MM
7886}
7887
7888/*
7889 * CCE block DC interrupt. Source is < 8.
7890 */
7891static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7892{
7893 const struct err_reg_info *eri = &dc_errs[source];
7894
7895 if (eri->handler) {
7896 interrupt_clear_down(dd, 0, eri);
7897 } else if (source == 3 /* dc_lbm_int */) {
7898 /*
7899 * This indicates that a parity error has occurred on the
7900 * address/control lines presented to the LBM. The error
7901 * is a single pulse, there is no associated error flag,
7902 * and it is non-maskable. This is because if a parity
7903 * error occurs on the request the request is dropped.
7904 * This should never occur, but it is nice to know if it
7905 * ever does.
7906 */
7907 dd_dev_err(dd, "Parity error in DC LBM block\n");
7908 } else {
7909 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7910 }
7911}
7912
7913/*
7914 * TX block send credit interrupt. Source is < 160.
7915 */
7916static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7917{
7918 sc_group_release_update(dd, source);
7919}
7920
7921/*
7922 * TX block SDMA interrupt. Source is < 48.
7923 *
7924 * SDMA interrupts are grouped by type:
7925 *
7926 * 0 - N-1 = SDma
7927 * N - 2N-1 = SDmaProgress
7928 * 2N - 3N-1 = SDmaIdle
7929 */
7930static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7931{
7932 /* what interrupt */
7933 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
7934 /* which engine */
7935 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7936
7937#ifdef CONFIG_SDMA_VERBOSITY
7938 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7939 slashstrip(__FILE__), __LINE__, __func__);
7940 sdma_dumpstate(&dd->per_sdma[which]);
7941#endif
7942
7943 if (likely(what < 3 && which < dd->num_sdma)) {
7944 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7945 } else {
7946 /* should not happen */
7947 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7948 }
7949}
7950
7951/*
7952 * RX block receive available interrupt. Source is < 160.
7953 */
7954static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7955{
7956 struct hfi1_ctxtdata *rcd;
7957 char *err_detail;
7958
7959 if (likely(source < dd->num_rcv_contexts)) {
7960 rcd = dd->rcd[source];
7961 if (rcd) {
7962 if (source < dd->first_user_ctxt)
f4f30031 7963 rcd->do_interrupt(rcd, 0);
77241056
MM
7964 else
7965 handle_user_interrupt(rcd);
7966 return; /* OK */
7967 }
7968 /* received an interrupt, but no rcd */
7969 err_detail = "dataless";
7970 } else {
7971 /* received an interrupt, but are not using that context */
7972 err_detail = "out of range";
7973 }
7974 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
17fb4f29 7975 err_detail, source);
77241056
MM
7976}
7977
7978/*
7979 * RX block receive urgent interrupt. Source is < 160.
7980 */
7981static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
7982{
7983 struct hfi1_ctxtdata *rcd;
7984 char *err_detail;
7985
7986 if (likely(source < dd->num_rcv_contexts)) {
7987 rcd = dd->rcd[source];
7988 if (rcd) {
7989 /* only pay attention to user urgent interrupts */
7990 if (source >= dd->first_user_ctxt)
7991 handle_user_interrupt(rcd);
7992 return; /* OK */
7993 }
7994 /* received an interrupt, but no rcd */
7995 err_detail = "dataless";
7996 } else {
7997 /* received an interrupt, but are not using that context */
7998 err_detail = "out of range";
7999 }
8000 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
17fb4f29 8001 err_detail, source);
77241056
MM
8002}
8003
8004/*
8005 * Reserved range interrupt. Should not be called in normal operation.
8006 */
8007static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8008{
8009 char name[64];
8010
8011 dd_dev_err(dd, "unexpected %s interrupt\n",
17fb4f29 8012 is_reserved_name(name, sizeof(name), source));
77241056
MM
8013}
8014
8015static const struct is_table is_table[] = {
4d114fdd
JJ
8016/*
8017 * start end
8018 * name func interrupt func
8019 */
77241056
MM
8020{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
8021 is_misc_err_name, is_misc_err_int },
8022{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
8023 is_sdma_eng_err_name, is_sdma_eng_err_int },
8024{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8025 is_sendctxt_err_name, is_sendctxt_err_int },
8026{ IS_SDMA_START, IS_SDMA_END,
8027 is_sdma_eng_name, is_sdma_eng_int },
8028{ IS_VARIOUS_START, IS_VARIOUS_END,
8029 is_various_name, is_various_int },
8030{ IS_DC_START, IS_DC_END,
8031 is_dc_name, is_dc_int },
8032{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
8033 is_rcv_avail_name, is_rcv_avail_int },
8034{ IS_RCVURGENT_START, IS_RCVURGENT_END,
8035 is_rcv_urgent_name, is_rcv_urgent_int },
8036{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
8037 is_send_credit_name, is_send_credit_int},
8038{ IS_RESERVED_START, IS_RESERVED_END,
8039 is_reserved_name, is_reserved_int},
8040};
8041
8042/*
8043 * Interrupt source interrupt - called when the given source has an interrupt.
8044 * Source is a bit index into an array of 64-bit integers.
8045 */
8046static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8047{
8048 const struct is_table *entry;
8049
8050 /* avoids a double compare by walking the table in-order */
8051 for (entry = &is_table[0]; entry->is_name; entry++) {
8052 if (source < entry->end) {
8053 trace_hfi1_interrupt(dd, entry, source);
8054 entry->is_int(dd, source - entry->start);
8055 return;
8056 }
8057 }
8058 /* fell off the end */
8059 dd_dev_err(dd, "invalid interrupt source %u\n", source);
8060}
8061
8062/*
8063 * General interrupt handler. This is able to correctly handle
8064 * all interrupts in case INTx is used.
8065 */
8066static irqreturn_t general_interrupt(int irq, void *data)
8067{
8068 struct hfi1_devdata *dd = data;
8069 u64 regs[CCE_NUM_INT_CSRS];
8070 u32 bit;
8071 int i;
8072
8073 this_cpu_inc(*dd->int_counter);
8074
8075 /* phase 1: scan and clear all handled interrupts */
8076 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8077 if (dd->gi_mask[i] == 0) {
8078 regs[i] = 0; /* used later */
8079 continue;
8080 }
8081 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8082 dd->gi_mask[i];
8083 /* only clear if anything is set */
8084 if (regs[i])
8085 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8086 }
8087
8088 /* phase 2: call the appropriate handler */
8089 for_each_set_bit(bit, (unsigned long *)&regs[0],
17fb4f29 8090 CCE_NUM_INT_CSRS * 64) {
77241056
MM
8091 is_interrupt(dd, bit);
8092 }
8093
8094 return IRQ_HANDLED;
8095}
8096
8097static irqreturn_t sdma_interrupt(int irq, void *data)
8098{
8099 struct sdma_engine *sde = data;
8100 struct hfi1_devdata *dd = sde->dd;
8101 u64 status;
8102
8103#ifdef CONFIG_SDMA_VERBOSITY
8104 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8105 slashstrip(__FILE__), __LINE__, __func__);
8106 sdma_dumpstate(sde);
8107#endif
8108
8109 this_cpu_inc(*dd->int_counter);
8110
8111 /* This read_csr is really bad in the hot path */
8112 status = read_csr(dd,
17fb4f29
JJ
8113 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8114 & sde->imask;
77241056
MM
8115 if (likely(status)) {
8116 /* clear the interrupt(s) */
8117 write_csr(dd,
17fb4f29
JJ
8118 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8119 status);
77241056
MM
8120
8121 /* handle the interrupt(s) */
8122 sdma_engine_interrupt(sde, status);
8123 } else
8124 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
17fb4f29 8125 sde->this_idx);
77241056
MM
8126
8127 return IRQ_HANDLED;
8128}
8129
8130/*
ecd42f8d
DL
8131 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8132 * to insure that the write completed. This does NOT guarantee that
8133 * queued DMA writes to memory from the chip are pushed.
f4f30031
DL
8134 */
8135static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8136{
8137 struct hfi1_devdata *dd = rcd->dd;
8138 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8139
8140 mmiowb(); /* make sure everything before is written */
8141 write_csr(dd, addr, rcd->imask);
8142 /* force the above write on the chip and get a value back */
8143 (void)read_csr(dd, addr);
8144}
8145
8146/* force the receive interrupt */
fb9036dd 8147void force_recv_intr(struct hfi1_ctxtdata *rcd)
f4f30031
DL
8148{
8149 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8150}
8151
ecd42f8d
DL
8152/*
8153 * Return non-zero if a packet is present.
8154 *
8155 * This routine is called when rechecking for packets after the RcvAvail
8156 * interrupt has been cleared down. First, do a quick check of memory for
8157 * a packet present. If not found, use an expensive CSR read of the context
8158 * tail to determine the actual tail. The CSR read is necessary because there
8159 * is no method to push pending DMAs to memory other than an interrupt and we
8160 * are trying to determine if we need to force an interrupt.
8161 */
f4f30031
DL
8162static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8163{
ecd42f8d
DL
8164 u32 tail;
8165 int present;
8166
f4f30031 8167 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
ecd42f8d 8168 present = (rcd->seq_cnt ==
f4f30031 8169 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
ecd42f8d
DL
8170 else /* is RDMA rtail */
8171 present = (rcd->head != get_rcvhdrtail(rcd));
8172
8173 if (present)
8174 return 1;
f4f30031 8175
ecd42f8d
DL
8176 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8177 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8178 return rcd->head != tail;
f4f30031
DL
8179}
8180
8181/*
8182 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8183 * This routine will try to handle packets immediately (latency), but if
8184 * it finds too many, it will invoke the thread handler (bandwitdh). The
16733b88 8185 * chip receive interrupt is *not* cleared down until this or the thread (if
f4f30031
DL
8186 * invoked) is finished. The intent is to avoid extra interrupts while we
8187 * are processing packets anyway.
77241056
MM
8188 */
8189static irqreturn_t receive_context_interrupt(int irq, void *data)
8190{
8191 struct hfi1_ctxtdata *rcd = data;
8192 struct hfi1_devdata *dd = rcd->dd;
f4f30031
DL
8193 int disposition;
8194 int present;
77241056
MM
8195
8196 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8197 this_cpu_inc(*dd->int_counter);
affa48de 8198 aspm_ctx_disable(rcd);
77241056 8199
f4f30031
DL
8200 /* receive interrupt remains blocked while processing packets */
8201 disposition = rcd->do_interrupt(rcd, 0);
77241056 8202
f4f30031
DL
8203 /*
8204 * Too many packets were seen while processing packets in this
8205 * IRQ handler. Invoke the handler thread. The receive interrupt
8206 * remains blocked.
8207 */
8208 if (disposition == RCV_PKT_LIMIT)
8209 return IRQ_WAKE_THREAD;
8210
8211 /*
8212 * The packet processor detected no more packets. Clear the receive
8213 * interrupt and recheck for a packet packet that may have arrived
8214 * after the previous check and interrupt clear. If a packet arrived,
8215 * force another interrupt.
8216 */
8217 clear_recv_intr(rcd);
8218 present = check_packet_present(rcd);
8219 if (present)
8220 force_recv_intr(rcd);
8221
8222 return IRQ_HANDLED;
8223}
8224
8225/*
8226 * Receive packet thread handler. This expects to be invoked with the
8227 * receive interrupt still blocked.
8228 */
8229static irqreturn_t receive_context_thread(int irq, void *data)
8230{
8231 struct hfi1_ctxtdata *rcd = data;
8232 int present;
8233
8234 /* receive interrupt is still blocked from the IRQ handler */
8235 (void)rcd->do_interrupt(rcd, 1);
8236
8237 /*
8238 * The packet processor will only return if it detected no more
8239 * packets. Hold IRQs here so we can safely clear the interrupt and
8240 * recheck for a packet that may have arrived after the previous
8241 * check and the interrupt clear. If a packet arrived, force another
8242 * interrupt.
8243 */
8244 local_irq_disable();
8245 clear_recv_intr(rcd);
8246 present = check_packet_present(rcd);
8247 if (present)
8248 force_recv_intr(rcd);
8249 local_irq_enable();
77241056
MM
8250
8251 return IRQ_HANDLED;
8252}
8253
8254/* ========================================================================= */
8255
8256u32 read_physical_state(struct hfi1_devdata *dd)
8257{
8258 u64 reg;
8259
8260 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8261 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8262 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8263}
8264
fb9036dd 8265u32 read_logical_state(struct hfi1_devdata *dd)
77241056
MM
8266{
8267 u64 reg;
8268
8269 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8270 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8271 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8272}
8273
8274static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8275{
8276 u64 reg;
8277
8278 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8279 /* clear current state, set new state */
8280 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8281 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8282 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8283}
8284
8285/*
8286 * Use the 8051 to read a LCB CSR.
8287 */
8288static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8289{
8290 u32 regno;
8291 int ret;
8292
8293 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8294 if (acquire_lcb_access(dd, 0) == 0) {
8295 *data = read_csr(dd, addr);
8296 release_lcb_access(dd, 0);
8297 return 0;
8298 }
8299 return -EBUSY;
8300 }
8301
8302 /* register is an index of LCB registers: (offset - base) / 8 */
8303 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8304 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8305 if (ret != HCMD_SUCCESS)
8306 return -EBUSY;
8307 return 0;
8308}
8309
8310/*
8311 * Read an LCB CSR. Access may not be in host control, so check.
8312 * Return 0 on success, -EBUSY on failure.
8313 */
8314int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8315{
8316 struct hfi1_pportdata *ppd = dd->pport;
8317
8318 /* if up, go through the 8051 for the value */
8319 if (ppd->host_link_state & HLS_UP)
8320 return read_lcb_via_8051(dd, addr, data);
8321 /* if going up or down, no access */
8322 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8323 return -EBUSY;
8324 /* otherwise, host has access */
8325 *data = read_csr(dd, addr);
8326 return 0;
8327}
8328
8329/*
8330 * Use the 8051 to write a LCB CSR.
8331 */
8332static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8333{
3bf40d65
DL
8334 u32 regno;
8335 int ret;
77241056 8336
3bf40d65
DL
8337 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8338 (dd->dc8051_ver < dc8051_ver(0, 20))) {
8339 if (acquire_lcb_access(dd, 0) == 0) {
8340 write_csr(dd, addr, data);
8341 release_lcb_access(dd, 0);
8342 return 0;
8343 }
8344 return -EBUSY;
77241056 8345 }
3bf40d65
DL
8346
8347 /* register is an index of LCB registers: (offset - base) / 8 */
8348 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8349 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8350 if (ret != HCMD_SUCCESS)
8351 return -EBUSY;
8352 return 0;
77241056
MM
8353}
8354
8355/*
8356 * Write an LCB CSR. Access may not be in host control, so check.
8357 * Return 0 on success, -EBUSY on failure.
8358 */
8359int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8360{
8361 struct hfi1_pportdata *ppd = dd->pport;
8362
8363 /* if up, go through the 8051 for the value */
8364 if (ppd->host_link_state & HLS_UP)
8365 return write_lcb_via_8051(dd, addr, data);
8366 /* if going up or down, no access */
8367 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8368 return -EBUSY;
8369 /* otherwise, host has access */
8370 write_csr(dd, addr, data);
8371 return 0;
8372}
8373
8374/*
8375 * Returns:
8376 * < 0 = Linux error, not able to get access
8377 * > 0 = 8051 command RETURN_CODE
8378 */
8379static int do_8051_command(
8380 struct hfi1_devdata *dd,
8381 u32 type,
8382 u64 in_data,
8383 u64 *out_data)
8384{
8385 u64 reg, completed;
8386 int return_code;
8387 unsigned long flags;
8388 unsigned long timeout;
8389
8390 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8391
8392 /*
8393 * Alternative to holding the lock for a long time:
8394 * - keep busy wait - have other users bounce off
8395 */
8396 spin_lock_irqsave(&dd->dc8051_lock, flags);
8397
8398 /* We can't send any commands to the 8051 if it's in reset */
8399 if (dd->dc_shutdown) {
8400 return_code = -ENODEV;
8401 goto fail;
8402 }
8403
8404 /*
8405 * If an 8051 host command timed out previously, then the 8051 is
8406 * stuck.
8407 *
8408 * On first timeout, attempt to reset and restart the entire DC
8409 * block (including 8051). (Is this too big of a hammer?)
8410 *
8411 * If the 8051 times out a second time, the reset did not bring it
8412 * back to healthy life. In that case, fail any subsequent commands.
8413 */
8414 if (dd->dc8051_timed_out) {
8415 if (dd->dc8051_timed_out > 1) {
8416 dd_dev_err(dd,
8417 "Previous 8051 host command timed out, skipping command %u\n",
8418 type);
8419 return_code = -ENXIO;
8420 goto fail;
8421 }
8422 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8423 dc_shutdown(dd);
8424 dc_start(dd);
8425 spin_lock_irqsave(&dd->dc8051_lock, flags);
8426 }
8427
8428 /*
8429 * If there is no timeout, then the 8051 command interface is
8430 * waiting for a command.
8431 */
8432
3bf40d65
DL
8433 /*
8434 * When writing a LCB CSR, out_data contains the full value to
8435 * to be written, while in_data contains the relative LCB
8436 * address in 7:0. Do the work here, rather than the caller,
8437 * of distrubting the write data to where it needs to go:
8438 *
8439 * Write data
8440 * 39:00 -> in_data[47:8]
8441 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8442 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8443 */
8444 if (type == HCMD_WRITE_LCB_CSR) {
8445 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8446 reg = ((((*out_data) >> 40) & 0xff) <<
8447 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8448 | ((((*out_data) >> 48) & 0xffff) <<
8449 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8450 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8451 }
8452
77241056
MM
8453 /*
8454 * Do two writes: the first to stabilize the type and req_data, the
8455 * second to activate.
8456 */
8457 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8458 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8459 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8460 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8461 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8462 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8463 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8464
8465 /* wait for completion, alternate: interrupt */
8466 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8467 while (1) {
8468 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8469 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8470 if (completed)
8471 break;
8472 if (time_after(jiffies, timeout)) {
8473 dd->dc8051_timed_out++;
8474 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8475 if (out_data)
8476 *out_data = 0;
8477 return_code = -ETIMEDOUT;
8478 goto fail;
8479 }
8480 udelay(2);
8481 }
8482
8483 if (out_data) {
8484 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8485 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8486 if (type == HCMD_READ_LCB_CSR) {
8487 /* top 16 bits are in a different register */
8488 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8489 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8490 << (48
8491 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8492 }
8493 }
8494 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8495 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8496 dd->dc8051_timed_out = 0;
8497 /*
8498 * Clear command for next user.
8499 */
8500 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8501
8502fail:
8503 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8504
8505 return return_code;
8506}
8507
8508static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8509{
8510 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8511}
8512
8ebd4cf1
EH
8513int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8514 u8 lane_id, u32 config_data)
77241056
MM
8515{
8516 u64 data;
8517 int ret;
8518
8519 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8520 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8521 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8522 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8523 if (ret != HCMD_SUCCESS) {
8524 dd_dev_err(dd,
17fb4f29
JJ
8525 "load 8051 config: field id %d, lane %d, err %d\n",
8526 (int)field_id, (int)lane_id, ret);
77241056
MM
8527 }
8528 return ret;
8529}
8530
8531/*
8532 * Read the 8051 firmware "registers". Use the RAM directly. Always
8533 * set the result, even on error.
8534 * Return 0 on success, -errno on failure
8535 */
8ebd4cf1
EH
8536int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8537 u32 *result)
77241056
MM
8538{
8539 u64 big_data;
8540 u32 addr;
8541 int ret;
8542
8543 /* address start depends on the lane_id */
8544 if (lane_id < 4)
8545 addr = (4 * NUM_GENERAL_FIELDS)
8546 + (lane_id * 4 * NUM_LANE_FIELDS);
8547 else
8548 addr = 0;
8549 addr += field_id * 4;
8550
8551 /* read is in 8-byte chunks, hardware will truncate the address down */
8552 ret = read_8051_data(dd, addr, 8, &big_data);
8553
8554 if (ret == 0) {
8555 /* extract the 4 bytes we want */
8556 if (addr & 0x4)
8557 *result = (u32)(big_data >> 32);
8558 else
8559 *result = (u32)big_data;
8560 } else {
8561 *result = 0;
8562 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
17fb4f29 8563 __func__, lane_id, field_id);
77241056
MM
8564 }
8565
8566 return ret;
8567}
8568
8569static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8570 u8 continuous)
8571{
8572 u32 frame;
8573
8574 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8575 | power_management << POWER_MANAGEMENT_SHIFT;
8576 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8577 GENERAL_CONFIG, frame);
8578}
8579
8580static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8581 u16 vl15buf, u8 crc_sizes)
8582{
8583 u32 frame;
8584
8585 frame = (u32)vau << VAU_SHIFT
8586 | (u32)z << Z_SHIFT
8587 | (u32)vcu << VCU_SHIFT
8588 | (u32)vl15buf << VL15BUF_SHIFT
8589 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8590 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8591 GENERAL_CONFIG, frame);
8592}
8593
8594static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8595 u8 *flag_bits, u16 *link_widths)
8596{
8597 u32 frame;
8598
8599 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
17fb4f29 8600 &frame);
77241056
MM
8601 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8602 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8603 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8604}
8605
8606static int write_vc_local_link_width(struct hfi1_devdata *dd,
8607 u8 misc_bits,
8608 u8 flag_bits,
8609 u16 link_widths)
8610{
8611 u32 frame;
8612
8613 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8614 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8615 | (u32)link_widths << LINK_WIDTH_SHIFT;
8616 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8617 frame);
8618}
8619
8620static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8621 u8 device_rev)
8622{
8623 u32 frame;
8624
8625 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8626 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8627 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8628}
8629
8630static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8631 u8 *device_rev)
8632{
8633 u32 frame;
8634
8635 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8636 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8637 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8638 & REMOTE_DEVICE_REV_MASK;
8639}
8640
8641void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8642{
8643 u32 frame;
8644
8645 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8646 *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8647 *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8648}
8649
8650static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8651 u8 *continuous)
8652{
8653 u32 frame;
8654
8655 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8656 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8657 & POWER_MANAGEMENT_MASK;
8658 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8659 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8660}
8661
8662static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8663 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8664{
8665 u32 frame;
8666
8667 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8668 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8669 *z = (frame >> Z_SHIFT) & Z_MASK;
8670 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8671 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8672 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8673}
8674
8675static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8676 u8 *remote_tx_rate,
8677 u16 *link_widths)
8678{
8679 u32 frame;
8680
8681 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
17fb4f29 8682 &frame);
77241056
MM
8683 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8684 & REMOTE_TX_RATE_MASK;
8685 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8686}
8687
8688static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8689{
8690 u32 frame;
8691
8692 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8693 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8694}
8695
8696static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8697{
8698 u32 frame;
8699
8700 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8701 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8702}
8703
8704static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8705{
8706 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8707}
8708
8709static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8710{
8711 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8712}
8713
8714void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8715{
8716 u32 frame;
8717 int ret;
8718
8719 *link_quality = 0;
8720 if (dd->pport->host_link_state & HLS_UP) {
8721 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
17fb4f29 8722 &frame);
77241056
MM
8723 if (ret == 0)
8724 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8725 & LINK_QUALITY_MASK;
8726 }
8727}
8728
8729static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8730{
8731 u32 frame;
8732
8733 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8734 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8735}
8736
feb831dd
DL
8737static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
8738{
8739 u32 frame;
8740
8741 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
8742 *ldr = (frame & 0xff);
8743}
8744
77241056
MM
8745static int read_tx_settings(struct hfi1_devdata *dd,
8746 u8 *enable_lane_tx,
8747 u8 *tx_polarity_inversion,
8748 u8 *rx_polarity_inversion,
8749 u8 *max_rate)
8750{
8751 u32 frame;
8752 int ret;
8753
8754 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8755 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8756 & ENABLE_LANE_TX_MASK;
8757 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8758 & TX_POLARITY_INVERSION_MASK;
8759 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8760 & RX_POLARITY_INVERSION_MASK;
8761 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8762 return ret;
8763}
8764
8765static int write_tx_settings(struct hfi1_devdata *dd,
8766 u8 enable_lane_tx,
8767 u8 tx_polarity_inversion,
8768 u8 rx_polarity_inversion,
8769 u8 max_rate)
8770{
8771 u32 frame;
8772
8773 /* no need to mask, all variable sizes match field widths */
8774 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8775 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8776 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8777 | max_rate << MAX_RATE_SHIFT;
8778 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8779}
8780
8781static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
8782{
8783 u32 frame, version, prod_id;
8784 int ret, lane;
8785
8786 /* 4 lanes */
8787 for (lane = 0; lane < 4; lane++) {
8788 ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
8789 if (ret) {
17fb4f29
JJ
8790 dd_dev_err(dd,
8791 "Unable to read lane %d firmware details\n",
8792 lane);
77241056
MM
8793 continue;
8794 }
8795 version = (frame >> SPICO_ROM_VERSION_SHIFT)
8796 & SPICO_ROM_VERSION_MASK;
8797 prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
8798 & SPICO_ROM_PROD_ID_MASK;
8799 dd_dev_info(dd,
17fb4f29
JJ
8800 "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8801 lane, version, prod_id);
77241056
MM
8802 }
8803}
8804
8805/*
8806 * Read an idle LCB message.
8807 *
8808 * Returns 0 on success, -EINVAL on error
8809 */
8810static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8811{
8812 int ret;
8813
17fb4f29 8814 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
77241056
MM
8815 if (ret != HCMD_SUCCESS) {
8816 dd_dev_err(dd, "read idle message: type %d, err %d\n",
17fb4f29 8817 (u32)type, ret);
77241056
MM
8818 return -EINVAL;
8819 }
8820 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8821 /* return only the payload as we already know the type */
8822 *data_out >>= IDLE_PAYLOAD_SHIFT;
8823 return 0;
8824}
8825
8826/*
8827 * Read an idle SMA message. To be done in response to a notification from
8828 * the 8051.
8829 *
8830 * Returns 0 on success, -EINVAL on error
8831 */
8832static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8833{
17fb4f29
JJ
8834 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
8835 data);
77241056
MM
8836}
8837
8838/*
8839 * Send an idle LCB message.
8840 *
8841 * Returns 0 on success, -EINVAL on error
8842 */
8843static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8844{
8845 int ret;
8846
8847 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8848 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8849 if (ret != HCMD_SUCCESS) {
8850 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
17fb4f29 8851 data, ret);
77241056
MM
8852 return -EINVAL;
8853 }
8854 return 0;
8855}
8856
8857/*
8858 * Send an idle SMA message.
8859 *
8860 * Returns 0 on success, -EINVAL on error
8861 */
8862int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8863{
8864 u64 data;
8865
17fb4f29
JJ
8866 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
8867 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
77241056
MM
8868 return send_idle_message(dd, data);
8869}
8870
8871/*
8872 * Initialize the LCB then do a quick link up. This may or may not be
8873 * in loopback.
8874 *
8875 * return 0 on success, -errno on error
8876 */
8877static int do_quick_linkup(struct hfi1_devdata *dd)
8878{
8879 u64 reg;
8880 unsigned long timeout;
8881 int ret;
8882
8883 lcb_shutdown(dd, 0);
8884
8885 if (loopback) {
8886 /* LCB_CFG_LOOPBACK.VAL = 2 */
8887 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8888 write_csr(dd, DC_LCB_CFG_LOOPBACK,
17fb4f29 8889 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
77241056
MM
8890 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8891 }
8892
8893 /* start the LCBs */
8894 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8895 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8896
8897 /* simulator only loopback steps */
8898 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8899 /* LCB_CFG_RUN.EN = 1 */
8900 write_csr(dd, DC_LCB_CFG_RUN,
17fb4f29 8901 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
77241056
MM
8902
8903 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8904 timeout = jiffies + msecs_to_jiffies(10);
8905 while (1) {
17fb4f29 8906 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
77241056
MM
8907 if (reg)
8908 break;
8909 if (time_after(jiffies, timeout)) {
8910 dd_dev_err(dd,
17fb4f29 8911 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
77241056
MM
8912 return -ETIMEDOUT;
8913 }
8914 udelay(2);
8915 }
8916
8917 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
17fb4f29 8918 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
77241056
MM
8919 }
8920
8921 if (!loopback) {
8922 /*
8923 * When doing quick linkup and not in loopback, both
8924 * sides must be done with LCB set-up before either
8925 * starts the quick linkup. Put a delay here so that
8926 * both sides can be started and have a chance to be
8927 * done with LCB set up before resuming.
8928 */
8929 dd_dev_err(dd,
17fb4f29 8930 "Pausing for peer to be finished with LCB set up\n");
77241056 8931 msleep(5000);
17fb4f29 8932 dd_dev_err(dd, "Continuing with quick linkup\n");
77241056
MM
8933 }
8934
8935 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8936 set_8051_lcb_access(dd);
8937
8938 /*
8939 * State "quick" LinkUp request sets the physical link state to
8940 * LinkUp without a verify capability sequence.
8941 * This state is in simulator v37 and later.
8942 */
8943 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8944 if (ret != HCMD_SUCCESS) {
8945 dd_dev_err(dd,
17fb4f29
JJ
8946 "%s: set physical link state to quick LinkUp failed with return %d\n",
8947 __func__, ret);
77241056
MM
8948
8949 set_host_lcb_access(dd);
8950 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8951
8952 if (ret >= 0)
8953 ret = -EINVAL;
8954 return ret;
8955 }
8956
8957 return 0; /* success */
8958}
8959
8960/*
8961 * Set the SerDes to internal loopback mode.
8962 * Returns 0 on success, -errno on error.
8963 */
8964static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8965{
8966 int ret;
8967
8968 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8969 if (ret == HCMD_SUCCESS)
8970 return 0;
8971 dd_dev_err(dd,
17fb4f29
JJ
8972 "Set physical link state to SerDes Loopback failed with return %d\n",
8973 ret);
77241056
MM
8974 if (ret >= 0)
8975 ret = -EINVAL;
8976 return ret;
8977}
8978
8979/*
8980 * Do all special steps to set up loopback.
8981 */
8982static int init_loopback(struct hfi1_devdata *dd)
8983{
8984 dd_dev_info(dd, "Entering loopback mode\n");
8985
8986 /* all loopbacks should disable self GUID check */
8987 write_csr(dd, DC_DC8051_CFG_MODE,
17fb4f29 8988 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
77241056
MM
8989
8990 /*
8991 * The simulator has only one loopback option - LCB. Switch
8992 * to that option, which includes quick link up.
8993 *
8994 * Accept all valid loopback values.
8995 */
d0d236ea
JJ
8996 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
8997 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
8998 loopback == LOOPBACK_CABLE)) {
77241056
MM
8999 loopback = LOOPBACK_LCB;
9000 quick_linkup = 1;
9001 return 0;
9002 }
9003
9004 /* handle serdes loopback */
9005 if (loopback == LOOPBACK_SERDES) {
9006 /* internal serdes loopack needs quick linkup on RTL */
9007 if (dd->icode == ICODE_RTL_SILICON)
9008 quick_linkup = 1;
9009 return set_serdes_loopback_mode(dd);
9010 }
9011
9012 /* LCB loopback - handled at poll time */
9013 if (loopback == LOOPBACK_LCB) {
9014 quick_linkup = 1; /* LCB is always quick linkup */
9015
9016 /* not supported in emulation due to emulation RTL changes */
9017 if (dd->icode == ICODE_FPGA_EMULATION) {
9018 dd_dev_err(dd,
17fb4f29 9019 "LCB loopback not supported in emulation\n");
77241056
MM
9020 return -EINVAL;
9021 }
9022 return 0;
9023 }
9024
9025 /* external cable loopback requires no extra steps */
9026 if (loopback == LOOPBACK_CABLE)
9027 return 0;
9028
9029 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9030 return -EINVAL;
9031}
9032
9033/*
9034 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9035 * used in the Verify Capability link width attribute.
9036 */
9037static u16 opa_to_vc_link_widths(u16 opa_widths)
9038{
9039 int i;
9040 u16 result = 0;
9041
9042 static const struct link_bits {
9043 u16 from;
9044 u16 to;
9045 } opa_link_xlate[] = {
8638b77f
JJ
9046 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
9047 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
9048 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
9049 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
77241056
MM
9050 };
9051
9052 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9053 if (opa_widths & opa_link_xlate[i].from)
9054 result |= opa_link_xlate[i].to;
9055 }
9056 return result;
9057}
9058
9059/*
9060 * Set link attributes before moving to polling.
9061 */
9062static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9063{
9064 struct hfi1_devdata *dd = ppd->dd;
9065 u8 enable_lane_tx;
9066 u8 tx_polarity_inversion;
9067 u8 rx_polarity_inversion;
9068 int ret;
9069
9070 /* reset our fabric serdes to clear any lingering problems */
9071 fabric_serdes_reset(dd);
9072
9073 /* set the local tx rate - need to read-modify-write */
9074 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
17fb4f29 9075 &rx_polarity_inversion, &ppd->local_tx_rate);
77241056
MM
9076 if (ret)
9077 goto set_local_link_attributes_fail;
9078
9079 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
9080 /* set the tx rate to the fastest enabled */
9081 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9082 ppd->local_tx_rate = 1;
9083 else
9084 ppd->local_tx_rate = 0;
9085 } else {
9086 /* set the tx rate to all enabled */
9087 ppd->local_tx_rate = 0;
9088 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9089 ppd->local_tx_rate |= 2;
9090 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9091 ppd->local_tx_rate |= 1;
9092 }
febffe2c
EH
9093
9094 enable_lane_tx = 0xF; /* enable all four lanes */
77241056 9095 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
17fb4f29 9096 rx_polarity_inversion, ppd->local_tx_rate);
77241056
MM
9097 if (ret != HCMD_SUCCESS)
9098 goto set_local_link_attributes_fail;
9099
9100 /*
9101 * DC supports continuous updates.
9102 */
17fb4f29
JJ
9103 ret = write_vc_local_phy(dd,
9104 0 /* no power management */,
9105 1 /* continuous updates */);
77241056
MM
9106 if (ret != HCMD_SUCCESS)
9107 goto set_local_link_attributes_fail;
9108
9109 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9110 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9111 ppd->port_crc_mode_enabled);
9112 if (ret != HCMD_SUCCESS)
9113 goto set_local_link_attributes_fail;
9114
9115 ret = write_vc_local_link_width(dd, 0, 0,
17fb4f29
JJ
9116 opa_to_vc_link_widths(
9117 ppd->link_width_enabled));
77241056
MM
9118 if (ret != HCMD_SUCCESS)
9119 goto set_local_link_attributes_fail;
9120
9121 /* let peer know who we are */
9122 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9123 if (ret == HCMD_SUCCESS)
9124 return 0;
9125
9126set_local_link_attributes_fail:
9127 dd_dev_err(dd,
17fb4f29
JJ
9128 "Failed to set local link attributes, return 0x%x\n",
9129 ret);
77241056
MM
9130 return ret;
9131}
9132
9133/*
623bba2d
EH
9134 * Call this to start the link.
9135 * Do not do anything if the link is disabled.
9136 * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
77241056
MM
9137 */
9138int start_link(struct hfi1_pportdata *ppd)
9139{
9140 if (!ppd->link_enabled) {
9141 dd_dev_info(ppd->dd,
17fb4f29
JJ
9142 "%s: stopping link start because link is disabled\n",
9143 __func__);
77241056
MM
9144 return 0;
9145 }
9146 if (!ppd->driver_link_ready) {
9147 dd_dev_info(ppd->dd,
17fb4f29
JJ
9148 "%s: stopping link start because driver is not ready\n",
9149 __func__);
77241056
MM
9150 return 0;
9151 }
9152
623bba2d 9153 return set_link_state(ppd, HLS_DN_POLL);
77241056
MM
9154}
9155
8ebd4cf1
EH
9156static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9157{
9158 struct hfi1_devdata *dd = ppd->dd;
9159 u64 mask;
9160 unsigned long timeout;
9161
9162 /*
9163 * Check for QSFP interrupt for t_init (SFF 8679)
9164 */
9165 timeout = jiffies + msecs_to_jiffies(2000);
9166 while (1) {
9167 mask = read_csr(dd, dd->hfi1_id ?
9168 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9169 if (!(mask & QSFP_HFI0_INT_N)) {
9170 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR :
9171 ASIC_QSFP1_CLEAR, QSFP_HFI0_INT_N);
9172 break;
9173 }
9174 if (time_after(jiffies, timeout)) {
9175 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9176 __func__);
9177 break;
9178 }
9179 udelay(2);
9180 }
9181}
9182
9183static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9184{
9185 struct hfi1_devdata *dd = ppd->dd;
9186 u64 mask;
9187
9188 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9189 if (enable)
9190 mask |= (u64)QSFP_HFI0_INT_N;
9191 else
9192 mask &= ~(u64)QSFP_HFI0_INT_N;
9193 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9194}
9195
9196void reset_qsfp(struct hfi1_pportdata *ppd)
77241056
MM
9197{
9198 struct hfi1_devdata *dd = ppd->dd;
9199 u64 mask, qsfp_mask;
9200
8ebd4cf1
EH
9201 /* Disable INT_N from triggering QSFP interrupts */
9202 set_qsfp_int_n(ppd, 0);
9203
9204 /* Reset the QSFP */
77241056 9205 mask = (u64)QSFP_HFI0_RESET_N;
8ebd4cf1 9206 qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
77241056 9207 qsfp_mask |= mask;
17fb4f29 9208 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask);
77241056
MM
9209
9210 qsfp_mask = read_csr(dd,
17fb4f29 9211 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
77241056
MM
9212 qsfp_mask &= ~mask;
9213 write_csr(dd,
17fb4f29 9214 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
77241056
MM
9215
9216 udelay(10);
9217
9218 qsfp_mask |= mask;
9219 write_csr(dd,
17fb4f29 9220 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
8ebd4cf1
EH
9221
9222 wait_for_qsfp_init(ppd);
9223
9224 /*
9225 * Allow INT_N to trigger the QSFP interrupt to watch
9226 * for alarms and warnings
9227 */
9228 set_qsfp_int_n(ppd, 1);
77241056
MM
9229}
9230
9231static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9232 u8 *qsfp_interrupt_status)
9233{
9234 struct hfi1_devdata *dd = ppd->dd;
9235
9236 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
17fb4f29
JJ
9237 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9238 dd_dev_info(dd, "%s: QSFP cable on fire\n",
9239 __func__);
77241056
MM
9240
9241 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
17fb4f29
JJ
9242 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9243 dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
9244 __func__);
77241056
MM
9245
9246 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
17fb4f29
JJ
9247 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9248 dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
9249 __func__);
77241056
MM
9250
9251 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
17fb4f29
JJ
9252 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9253 dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
9254 __func__);
77241056
MM
9255
9256 /* Byte 2 is vendor specific */
9257
9258 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
17fb4f29
JJ
9259 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9260 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
9261 __func__);
77241056
MM
9262
9263 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
17fb4f29
JJ
9264 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9265 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
9266 __func__);
77241056
MM
9267
9268 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
17fb4f29
JJ
9269 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9270 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
9271 __func__);
77241056
MM
9272
9273 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
17fb4f29
JJ
9274 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9275 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
9276 __func__);
77241056
MM
9277
9278 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
17fb4f29
JJ
9279 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9280 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
9281 __func__);
77241056
MM
9282
9283 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
17fb4f29
JJ
9284 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9285 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
9286 __func__);
77241056
MM
9287
9288 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
17fb4f29
JJ
9289 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9290 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
9291 __func__);
77241056
MM
9292
9293 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
17fb4f29
JJ
9294 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9295 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
9296 __func__);
77241056
MM
9297
9298 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
17fb4f29
JJ
9299 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9300 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
9301 __func__);
77241056
MM
9302
9303 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
17fb4f29
JJ
9304 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9305 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
9306 __func__);
77241056
MM
9307
9308 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
17fb4f29
JJ
9309 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9310 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
9311 __func__);
77241056
MM
9312
9313 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
17fb4f29
JJ
9314 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9315 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
9316 __func__);
77241056
MM
9317
9318 /* Bytes 9-10 and 11-12 are reserved */
9319 /* Bytes 13-15 are vendor specific */
9320
9321 return 0;
9322}
9323
623bba2d 9324/* This routine will only be scheduled if the QSFP module present is asserted */
8ebd4cf1 9325void qsfp_event(struct work_struct *work)
77241056
MM
9326{
9327 struct qsfp_data *qd;
9328 struct hfi1_pportdata *ppd;
9329 struct hfi1_devdata *dd;
9330
9331 qd = container_of(work, struct qsfp_data, qsfp_work);
9332 ppd = qd->ppd;
9333 dd = ppd->dd;
9334
9335 /* Sanity check */
9336 if (!qsfp_mod_present(ppd))
9337 return;
9338
9339 /*
9340 * Turn DC back on after cables has been
9341 * re-inserted. Up until now, the DC has been in
9342 * reset to save power.
9343 */
9344 dc_start(dd);
9345
9346 if (qd->cache_refresh_required) {
8ebd4cf1 9347 set_qsfp_int_n(ppd, 0);
77241056 9348
8ebd4cf1
EH
9349 wait_for_qsfp_init(ppd);
9350
9351 /*
9352 * Allow INT_N to trigger the QSFP interrupt to watch
9353 * for alarms and warnings
77241056 9354 */
8ebd4cf1
EH
9355 set_qsfp_int_n(ppd, 1);
9356
9357 tune_serdes(ppd);
9358
9359 start_link(ppd);
77241056
MM
9360 }
9361
9362 if (qd->check_interrupt_flags) {
9363 u8 qsfp_interrupt_status[16] = {0,};
9364
765a6fac
DL
9365 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9366 &qsfp_interrupt_status[0], 16) != 16) {
77241056 9367 dd_dev_info(dd,
17fb4f29
JJ
9368 "%s: Failed to read status of QSFP module\n",
9369 __func__);
77241056
MM
9370 } else {
9371 unsigned long flags;
77241056 9372
8ebd4cf1
EH
9373 handle_qsfp_error_conditions(
9374 ppd, qsfp_interrupt_status);
77241056
MM
9375 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9376 ppd->qsfp_info.check_interrupt_flags = 0;
9377 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
17fb4f29 9378 flags);
77241056
MM
9379 }
9380 }
9381}
9382
8ebd4cf1 9383static void init_qsfp_int(struct hfi1_devdata *dd)
77241056 9384{
8ebd4cf1
EH
9385 struct hfi1_pportdata *ppd = dd->pport;
9386 u64 qsfp_mask, cce_int_mask;
9387 const int qsfp1_int_smask = QSFP1_INT % 64;
9388 const int qsfp2_int_smask = QSFP2_INT % 64;
77241056 9389
8ebd4cf1
EH
9390 /*
9391 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9392 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9393 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9394 * the index of the appropriate CSR in the CCEIntMask CSR array
9395 */
9396 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9397 (8 * (QSFP1_INT / 64)));
9398 if (dd->hfi1_id) {
9399 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9400 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9401 cce_int_mask);
9402 } else {
9403 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9404 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9405 cce_int_mask);
77241056
MM
9406 }
9407
77241056
MM
9408 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9409 /* Clear current status to avoid spurious interrupts */
8ebd4cf1
EH
9410 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9411 qsfp_mask);
9412 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9413 qsfp_mask);
9414
9415 set_qsfp_int_n(ppd, 0);
77241056
MM
9416
9417 /* Handle active low nature of INT_N and MODPRST_N pins */
9418 if (qsfp_mod_present(ppd))
9419 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9420 write_csr(dd,
9421 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9422 qsfp_mask);
77241056
MM
9423}
9424
bbdeb33d
DL
9425/*
9426 * Do a one-time initialize of the LCB block.
9427 */
9428static void init_lcb(struct hfi1_devdata *dd)
9429{
a59329d5
DL
9430 /* simulator does not correctly handle LCB cclk loopback, skip */
9431 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9432 return;
9433
bbdeb33d
DL
9434 /* the DC has been reset earlier in the driver load */
9435
9436 /* set LCB for cclk loopback on the port */
9437 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9438 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9439 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9440 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9441 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9442 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9443 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9444}
9445
77241056
MM
9446int bringup_serdes(struct hfi1_pportdata *ppd)
9447{
9448 struct hfi1_devdata *dd = ppd->dd;
9449 u64 guid;
9450 int ret;
9451
9452 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9453 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9454
9455 guid = ppd->guid;
9456 if (!guid) {
9457 if (dd->base_guid)
9458 guid = dd->base_guid + ppd->port - 1;
9459 ppd->guid = guid;
9460 }
9461
77241056
MM
9462 /* Set linkinit_reason on power up per OPA spec */
9463 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9464
bbdeb33d
DL
9465 /* one-time init of the LCB */
9466 init_lcb(dd);
9467
77241056
MM
9468 if (loopback) {
9469 ret = init_loopback(dd);
9470 if (ret < 0)
9471 return ret;
9472 }
9473
8ebd4cf1
EH
9474 /* tune the SERDES to a ballpark setting for
9475 * optimal signal and bit error rate
9476 * Needs to be done before starting the link
9477 */
9478 tune_serdes(ppd);
9479
77241056
MM
9480 return start_link(ppd);
9481}
9482
9483void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9484{
9485 struct hfi1_devdata *dd = ppd->dd;
9486
9487 /*
9488 * Shut down the link and keep it down. First turn off that the
9489 * driver wants to allow the link to be up (driver_link_ready).
9490 * Then make sure the link is not automatically restarted
9491 * (link_enabled). Cancel any pending restart. And finally
9492 * go offline.
9493 */
9494 ppd->driver_link_ready = 0;
9495 ppd->link_enabled = 0;
9496
8ebd4cf1
EH
9497 ppd->offline_disabled_reason =
9498 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
77241056 9499 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
17fb4f29 9500 OPA_LINKDOWN_REASON_SMA_DISABLED);
77241056
MM
9501 set_link_state(ppd, HLS_DN_OFFLINE);
9502
9503 /* disable the port */
9504 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9505}
9506
9507static inline int init_cpu_counters(struct hfi1_devdata *dd)
9508{
9509 struct hfi1_pportdata *ppd;
9510 int i;
9511
9512 ppd = (struct hfi1_pportdata *)(dd + 1);
9513 for (i = 0; i < dd->num_pports; i++, ppd++) {
4eb06882
DD
9514 ppd->ibport_data.rvp.rc_acks = NULL;
9515 ppd->ibport_data.rvp.rc_qacks = NULL;
9516 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9517 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9518 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9519 if (!ppd->ibport_data.rvp.rc_acks ||
9520 !ppd->ibport_data.rvp.rc_delayed_comp ||
9521 !ppd->ibport_data.rvp.rc_qacks)
77241056
MM
9522 return -ENOMEM;
9523 }
9524
9525 return 0;
9526}
9527
9528static const char * const pt_names[] = {
9529 "expected",
9530 "eager",
9531 "invalid"
9532};
9533
9534static const char *pt_name(u32 type)
9535{
9536 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9537}
9538
9539/*
9540 * index is the index into the receive array
9541 */
9542void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9543 u32 type, unsigned long pa, u16 order)
9544{
9545 u64 reg;
9546 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9547 (dd->kregbase + RCV_ARRAY));
9548
9549 if (!(dd->flags & HFI1_PRESENT))
9550 goto done;
9551
9552 if (type == PT_INVALID) {
9553 pa = 0;
9554 } else if (type > PT_INVALID) {
9555 dd_dev_err(dd,
17fb4f29
JJ
9556 "unexpected receive array type %u for index %u, not handled\n",
9557 type, index);
77241056
MM
9558 goto done;
9559 }
9560
9561 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9562 pt_name(type), index, pa, (unsigned long)order);
9563
9564#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9565 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9566 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9567 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9568 << RCV_ARRAY_RT_ADDR_SHIFT;
9569 writeq(reg, base + (index * 8));
9570
9571 if (type == PT_EAGER)
9572 /*
9573 * Eager entries are written one-by-one so we have to push them
9574 * after we write the entry.
9575 */
9576 flush_wc();
9577done:
9578 return;
9579}
9580
9581void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9582{
9583 struct hfi1_devdata *dd = rcd->dd;
9584 u32 i;
9585
9586 /* this could be optimized */
9587 for (i = rcd->eager_base; i < rcd->eager_base +
9588 rcd->egrbufs.alloced; i++)
9589 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9590
9591 for (i = rcd->expected_base;
9592 i < rcd->expected_base + rcd->expected_count; i++)
9593 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9594}
9595
9596int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
9597 struct hfi1_ctxt_info *kinfo)
9598{
9599 kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
9600 HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
9601 return 0;
9602}
9603
9604struct hfi1_message_header *hfi1_get_msgheader(
9605 struct hfi1_devdata *dd, __le32 *rhf_addr)
9606{
9607 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9608
9609 return (struct hfi1_message_header *)
9610 (rhf_addr - dd->rhf_offset + offset);
9611}
9612
9613static const char * const ib_cfg_name_strings[] = {
9614 "HFI1_IB_CFG_LIDLMC",
9615 "HFI1_IB_CFG_LWID_DG_ENB",
9616 "HFI1_IB_CFG_LWID_ENB",
9617 "HFI1_IB_CFG_LWID",
9618 "HFI1_IB_CFG_SPD_ENB",
9619 "HFI1_IB_CFG_SPD",
9620 "HFI1_IB_CFG_RXPOL_ENB",
9621 "HFI1_IB_CFG_LREV_ENB",
9622 "HFI1_IB_CFG_LINKLATENCY",
9623 "HFI1_IB_CFG_HRTBT",
9624 "HFI1_IB_CFG_OP_VLS",
9625 "HFI1_IB_CFG_VL_HIGH_CAP",
9626 "HFI1_IB_CFG_VL_LOW_CAP",
9627 "HFI1_IB_CFG_OVERRUN_THRESH",
9628 "HFI1_IB_CFG_PHYERR_THRESH",
9629 "HFI1_IB_CFG_LINKDEFAULT",
9630 "HFI1_IB_CFG_PKEYS",
9631 "HFI1_IB_CFG_MTU",
9632 "HFI1_IB_CFG_LSTATE",
9633 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9634 "HFI1_IB_CFG_PMA_TICKS",
9635 "HFI1_IB_CFG_PORT"
9636};
9637
9638static const char *ib_cfg_name(int which)
9639{
9640 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9641 return "invalid";
9642 return ib_cfg_name_strings[which];
9643}
9644
9645int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9646{
9647 struct hfi1_devdata *dd = ppd->dd;
9648 int val = 0;
9649
9650 switch (which) {
9651 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9652 val = ppd->link_width_enabled;
9653 break;
9654 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9655 val = ppd->link_width_active;
9656 break;
9657 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9658 val = ppd->link_speed_enabled;
9659 break;
9660 case HFI1_IB_CFG_SPD: /* current Link speed */
9661 val = ppd->link_speed_active;
9662 break;
9663
9664 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9665 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9666 case HFI1_IB_CFG_LINKLATENCY:
9667 goto unimplemented;
9668
9669 case HFI1_IB_CFG_OP_VLS:
9670 val = ppd->vls_operational;
9671 break;
9672 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9673 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9674 break;
9675 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9676 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9677 break;
9678 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9679 val = ppd->overrun_threshold;
9680 break;
9681 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9682 val = ppd->phy_error_threshold;
9683 break;
9684 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9685 val = dd->link_default;
9686 break;
9687
9688 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9689 case HFI1_IB_CFG_PMA_TICKS:
9690 default:
9691unimplemented:
9692 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9693 dd_dev_info(
9694 dd,
9695 "%s: which %s: not implemented\n",
9696 __func__,
9697 ib_cfg_name(which));
9698 break;
9699 }
9700
9701 return val;
9702}
9703
9704/*
9705 * The largest MAD packet size.
9706 */
9707#define MAX_MAD_PACKET 2048
9708
9709/*
9710 * Return the maximum header bytes that can go on the _wire_
9711 * for this device. This count includes the ICRC which is
9712 * not part of the packet held in memory but it is appended
9713 * by the HW.
9714 * This is dependent on the device's receive header entry size.
9715 * HFI allows this to be set per-receive context, but the
9716 * driver presently enforces a global value.
9717 */
9718u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9719{
9720 /*
9721 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9722 * the Receive Header Entry Size minus the PBC (or RHF) size
9723 * plus one DW for the ICRC appended by HW.
9724 *
9725 * dd->rcd[0].rcvhdrqentsize is in DW.
9726 * We use rcd[0] as all context will have the same value. Also,
9727 * the first kernel context would have been allocated by now so
9728 * we are guaranteed a valid value.
9729 */
9730 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9731}
9732
9733/*
9734 * Set Send Length
9735 * @ppd - per port data
9736 *
9737 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9738 * registers compare against LRH.PktLen, so use the max bytes included
9739 * in the LRH.
9740 *
9741 * This routine changes all VL values except VL15, which it maintains at
9742 * the same value.
9743 */
9744static void set_send_length(struct hfi1_pportdata *ppd)
9745{
9746 struct hfi1_devdata *dd = ppd->dd;
6cc6ad2e
HC
9747 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9748 u32 maxvlmtu = dd->vld[15].mtu;
77241056
MM
9749 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9750 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9751 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9752 int i;
44306f15 9753 u32 thres;
77241056
MM
9754
9755 for (i = 0; i < ppd->vls_supported; i++) {
9756 if (dd->vld[i].mtu > maxvlmtu)
9757 maxvlmtu = dd->vld[i].mtu;
9758 if (i <= 3)
9759 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9760 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9761 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9762 else
9763 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9764 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9765 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9766 }
9767 write_csr(dd, SEND_LEN_CHECK0, len1);
9768 write_csr(dd, SEND_LEN_CHECK1, len2);
9769 /* adjust kernel credit return thresholds based on new MTUs */
9770 /* all kernel receive contexts have the same hdrqentsize */
9771 for (i = 0; i < ppd->vls_supported; i++) {
44306f15
JX
9772 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
9773 sc_mtu_to_threshold(dd->vld[i].sc,
9774 dd->vld[i].mtu,
17fb4f29 9775 dd->rcd[0]->rcvhdrqentsize));
44306f15
JX
9776 sc_set_cr_threshold(dd->vld[i].sc, thres);
9777 }
9778 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
9779 sc_mtu_to_threshold(dd->vld[15].sc,
9780 dd->vld[15].mtu,
9781 dd->rcd[0]->rcvhdrqentsize));
9782 sc_set_cr_threshold(dd->vld[15].sc, thres);
77241056
MM
9783
9784 /* Adjust maximum MTU for the port in DC */
9785 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9786 (ilog2(maxvlmtu >> 8) + 1);
9787 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9788 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9789 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9790 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9791 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9792}
9793
9794static void set_lidlmc(struct hfi1_pportdata *ppd)
9795{
9796 int i;
9797 u64 sreg = 0;
9798 struct hfi1_devdata *dd = ppd->dd;
9799 u32 mask = ~((1U << ppd->lmc) - 1);
9800 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9801
9802 if (dd->hfi1_snoop.mode_flag)
9803 dd_dev_info(dd, "Set lid/lmc while snooping");
9804
9805 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9806 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9807 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
8638b77f 9808 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
77241056
MM
9809 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9810 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9811 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9812
9813 /*
9814 * Iterate over all the send contexts and set their SLID check
9815 */
9816 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9817 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9818 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9819 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9820
9821 for (i = 0; i < dd->chip_send_contexts; i++) {
9822 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9823 i, (u32)sreg);
9824 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9825 }
9826
9827 /* Now we have to do the same thing for the sdma engines */
9828 sdma_update_lmc(dd, mask, ppd->lid);
9829}
9830
9831static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9832{
9833 unsigned long timeout;
9834 u32 curr_state;
9835
9836 timeout = jiffies + msecs_to_jiffies(msecs);
9837 while (1) {
9838 curr_state = read_physical_state(dd);
9839 if (curr_state == state)
9840 break;
9841 if (time_after(jiffies, timeout)) {
9842 dd_dev_err(dd,
17fb4f29
JJ
9843 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9844 state, curr_state);
77241056
MM
9845 return -ETIMEDOUT;
9846 }
9847 usleep_range(1950, 2050); /* sleep 2ms-ish */
9848 }
9849
9850 return 0;
9851}
9852
9853/*
9854 * Helper for set_link_state(). Do not call except from that routine.
9855 * Expects ppd->hls_mutex to be held.
9856 *
9857 * @rem_reason value to be sent to the neighbor
9858 *
9859 * LinkDownReasons only set if transition succeeds.
9860 */
9861static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
9862{
9863 struct hfi1_devdata *dd = ppd->dd;
9864 u32 pstate, previous_state;
9865 u32 last_local_state;
9866 u32 last_remote_state;
9867 int ret;
9868 int do_transition;
9869 int do_wait;
9870
9871 previous_state = ppd->host_link_state;
9872 ppd->host_link_state = HLS_GOING_OFFLINE;
9873 pstate = read_physical_state(dd);
9874 if (pstate == PLS_OFFLINE) {
9875 do_transition = 0; /* in right state */
9876 do_wait = 0; /* ...no need to wait */
9877 } else if ((pstate & 0xff) == PLS_OFFLINE) {
9878 do_transition = 0; /* in an offline transient state */
9879 do_wait = 1; /* ...wait for it to settle */
9880 } else {
9881 do_transition = 1; /* need to move to offline */
9882 do_wait = 1; /* ...will need to wait */
9883 }
9884
9885 if (do_transition) {
9886 ret = set_physical_link_state(dd,
bf640096 9887 (rem_reason << 8) | PLS_OFFLINE);
77241056
MM
9888
9889 if (ret != HCMD_SUCCESS) {
9890 dd_dev_err(dd,
17fb4f29
JJ
9891 "Failed to transition to Offline link state, return %d\n",
9892 ret);
77241056
MM
9893 return -EINVAL;
9894 }
a9c05e35
BM
9895 if (ppd->offline_disabled_reason ==
9896 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
77241056 9897 ppd->offline_disabled_reason =
a9c05e35 9898 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
77241056
MM
9899 }
9900
9901 if (do_wait) {
9902 /* it can take a while for the link to go down */
dc060245 9903 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
77241056
MM
9904 if (ret < 0)
9905 return ret;
9906 }
9907
9908 /* make sure the logical state is also down */
9909 wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
9910
9911 /*
9912 * Now in charge of LCB - must be after the physical state is
9913 * offline.quiet and before host_link_state is changed.
9914 */
9915 set_host_lcb_access(dd);
9916 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9917 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
9918
8ebd4cf1
EH
9919 if (ppd->port_type == PORT_TYPE_QSFP &&
9920 ppd->qsfp_info.limiting_active &&
9921 qsfp_mod_present(ppd)) {
765a6fac
DL
9922 int ret;
9923
9924 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
9925 if (ret == 0) {
9926 set_qsfp_tx(ppd, 0);
9927 release_chip_resource(dd, qsfp_resource(dd));
9928 } else {
9929 /* not fatal, but should warn */
9930 dd_dev_err(dd,
9931 "Unable to acquire lock to turn off QSFP TX\n");
9932 }
8ebd4cf1
EH
9933 }
9934
77241056
MM
9935 /*
9936 * The LNI has a mandatory wait time after the physical state
9937 * moves to Offline.Quiet. The wait time may be different
9938 * depending on how the link went down. The 8051 firmware
9939 * will observe the needed wait time and only move to ready
9940 * when that is completed. The largest of the quiet timeouts
05087f3b
DL
9941 * is 6s, so wait that long and then at least 0.5s more for
9942 * other transitions, and another 0.5s for a buffer.
77241056 9943 */
05087f3b 9944 ret = wait_fm_ready(dd, 7000);
77241056
MM
9945 if (ret) {
9946 dd_dev_err(dd,
17fb4f29 9947 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
77241056
MM
9948 /* state is really offline, so make it so */
9949 ppd->host_link_state = HLS_DN_OFFLINE;
9950 return ret;
9951 }
9952
9953 /*
9954 * The state is now offline and the 8051 is ready to accept host
9955 * requests.
9956 * - change our state
9957 * - notify others if we were previously in a linkup state
9958 */
9959 ppd->host_link_state = HLS_DN_OFFLINE;
9960 if (previous_state & HLS_UP) {
9961 /* went down while link was up */
9962 handle_linkup_change(dd, 0);
9963 } else if (previous_state
9964 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
9965 /* went down while attempting link up */
9966 /* byte 1 of last_*_state is the failure reason */
9967 read_last_local_state(dd, &last_local_state);
9968 read_last_remote_state(dd, &last_remote_state);
9969 dd_dev_err(dd,
17fb4f29
JJ
9970 "LNI failure last states: local 0x%08x, remote 0x%08x\n",
9971 last_local_state, last_remote_state);
77241056
MM
9972 }
9973
9974 /* the active link width (downgrade) is 0 on link down */
9975 ppd->link_width_active = 0;
9976 ppd->link_width_downgrade_tx_active = 0;
9977 ppd->link_width_downgrade_rx_active = 0;
9978 ppd->current_egress_rate = 0;
9979 return 0;
9980}
9981
9982/* return the link state name */
9983static const char *link_state_name(u32 state)
9984{
9985 const char *name;
9986 int n = ilog2(state);
9987 static const char * const names[] = {
9988 [__HLS_UP_INIT_BP] = "INIT",
9989 [__HLS_UP_ARMED_BP] = "ARMED",
9990 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
9991 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
9992 [__HLS_DN_POLL_BP] = "POLL",
9993 [__HLS_DN_DISABLE_BP] = "DISABLE",
9994 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
9995 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
9996 [__HLS_GOING_UP_BP] = "GOING_UP",
9997 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
9998 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
9999 };
10000
10001 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10002 return name ? name : "unknown";
10003}
10004
10005/* return the link state reason name */
10006static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10007{
10008 if (state == HLS_UP_INIT) {
10009 switch (ppd->linkinit_reason) {
10010 case OPA_LINKINIT_REASON_LINKUP:
10011 return "(LINKUP)";
10012 case OPA_LINKINIT_REASON_FLAPPING:
10013 return "(FLAPPING)";
10014 case OPA_LINKINIT_OUTSIDE_POLICY:
10015 return "(OUTSIDE_POLICY)";
10016 case OPA_LINKINIT_QUARANTINED:
10017 return "(QUARANTINED)";
10018 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10019 return "(INSUFIC_CAPABILITY)";
10020 default:
10021 break;
10022 }
10023 }
10024 return "";
10025}
10026
10027/*
10028 * driver_physical_state - convert the driver's notion of a port's
10029 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10030 * Return -1 (converted to a u32) to indicate error.
10031 */
10032u32 driver_physical_state(struct hfi1_pportdata *ppd)
10033{
10034 switch (ppd->host_link_state) {
10035 case HLS_UP_INIT:
10036 case HLS_UP_ARMED:
10037 case HLS_UP_ACTIVE:
10038 return IB_PORTPHYSSTATE_LINKUP;
10039 case HLS_DN_POLL:
10040 return IB_PORTPHYSSTATE_POLLING;
10041 case HLS_DN_DISABLE:
10042 return IB_PORTPHYSSTATE_DISABLED;
10043 case HLS_DN_OFFLINE:
10044 return OPA_PORTPHYSSTATE_OFFLINE;
10045 case HLS_VERIFY_CAP:
10046 return IB_PORTPHYSSTATE_POLLING;
10047 case HLS_GOING_UP:
10048 return IB_PORTPHYSSTATE_POLLING;
10049 case HLS_GOING_OFFLINE:
10050 return OPA_PORTPHYSSTATE_OFFLINE;
10051 case HLS_LINK_COOLDOWN:
10052 return OPA_PORTPHYSSTATE_OFFLINE;
10053 case HLS_DN_DOWNDEF:
10054 default:
10055 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10056 ppd->host_link_state);
10057 return -1;
10058 }
10059}
10060
10061/*
10062 * driver_logical_state - convert the driver's notion of a port's
10063 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10064 * (converted to a u32) to indicate error.
10065 */
10066u32 driver_logical_state(struct hfi1_pportdata *ppd)
10067{
10068 if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP))
10069 return IB_PORT_DOWN;
10070
10071 switch (ppd->host_link_state & HLS_UP) {
10072 case HLS_UP_INIT:
10073 return IB_PORT_INIT;
10074 case HLS_UP_ARMED:
10075 return IB_PORT_ARMED;
10076 case HLS_UP_ACTIVE:
10077 return IB_PORT_ACTIVE;
10078 default:
10079 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10080 ppd->host_link_state);
10081 return -1;
10082 }
10083}
10084
10085void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10086 u8 neigh_reason, u8 rem_reason)
10087{
10088 if (ppd->local_link_down_reason.latest == 0 &&
10089 ppd->neigh_link_down_reason.latest == 0) {
10090 ppd->local_link_down_reason.latest = lcl_reason;
10091 ppd->neigh_link_down_reason.latest = neigh_reason;
10092 ppd->remote_link_down_reason = rem_reason;
10093 }
10094}
10095
10096/*
10097 * Change the physical and/or logical link state.
10098 *
10099 * Do not call this routine while inside an interrupt. It contains
10100 * calls to routines that can take multiple seconds to finish.
10101 *
10102 * Returns 0 on success, -errno on failure.
10103 */
10104int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10105{
10106 struct hfi1_devdata *dd = ppd->dd;
10107 struct ib_event event = {.device = NULL};
10108 int ret1, ret = 0;
10109 int was_up, is_down;
10110 int orig_new_state, poll_bounce;
10111
10112 mutex_lock(&ppd->hls_lock);
10113
10114 orig_new_state = state;
10115 if (state == HLS_DN_DOWNDEF)
10116 state = dd->link_default;
10117
10118 /* interpret poll -> poll as a link bounce */
d0d236ea
JJ
10119 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10120 state == HLS_DN_POLL;
77241056
MM
10121
10122 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
17fb4f29
JJ
10123 link_state_name(ppd->host_link_state),
10124 link_state_name(orig_new_state),
10125 poll_bounce ? "(bounce) " : "",
10126 link_state_reason_name(ppd, state));
77241056
MM
10127
10128 was_up = !!(ppd->host_link_state & HLS_UP);
10129
10130 /*
10131 * If we're going to a (HLS_*) link state that implies the logical
10132 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10133 * reset is_sm_config_started to 0.
10134 */
10135 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10136 ppd->is_sm_config_started = 0;
10137
10138 /*
10139 * Do nothing if the states match. Let a poll to poll link bounce
10140 * go through.
10141 */
10142 if (ppd->host_link_state == state && !poll_bounce)
10143 goto done;
10144
10145 switch (state) {
10146 case HLS_UP_INIT:
d0d236ea
JJ
10147 if (ppd->host_link_state == HLS_DN_POLL &&
10148 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
77241056
MM
10149 /*
10150 * Quick link up jumps from polling to here.
10151 *
10152 * Whether in normal or loopback mode, the
10153 * simulator jumps from polling to link up.
10154 * Accept that here.
10155 */
17fb4f29 10156 /* OK */
77241056
MM
10157 } else if (ppd->host_link_state != HLS_GOING_UP) {
10158 goto unexpected;
10159 }
10160
10161 ppd->host_link_state = HLS_UP_INIT;
10162 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10163 if (ret) {
10164 /* logical state didn't change, stay at going_up */
10165 ppd->host_link_state = HLS_GOING_UP;
10166 dd_dev_err(dd,
17fb4f29
JJ
10167 "%s: logical state did not change to INIT\n",
10168 __func__);
77241056
MM
10169 } else {
10170 /* clear old transient LINKINIT_REASON code */
10171 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10172 ppd->linkinit_reason =
10173 OPA_LINKINIT_REASON_LINKUP;
10174
10175 /* enable the port */
10176 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10177
10178 handle_linkup_change(dd, 1);
10179 }
10180 break;
10181 case HLS_UP_ARMED:
10182 if (ppd->host_link_state != HLS_UP_INIT)
10183 goto unexpected;
10184
10185 ppd->host_link_state = HLS_UP_ARMED;
10186 set_logical_state(dd, LSTATE_ARMED);
10187 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10188 if (ret) {
10189 /* logical state didn't change, stay at init */
10190 ppd->host_link_state = HLS_UP_INIT;
10191 dd_dev_err(dd,
17fb4f29
JJ
10192 "%s: logical state did not change to ARMED\n",
10193 __func__);
77241056
MM
10194 }
10195 /*
10196 * The simulator does not currently implement SMA messages,
10197 * so neighbor_normal is not set. Set it here when we first
10198 * move to Armed.
10199 */
10200 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10201 ppd->neighbor_normal = 1;
10202 break;
10203 case HLS_UP_ACTIVE:
10204 if (ppd->host_link_state != HLS_UP_ARMED)
10205 goto unexpected;
10206
10207 ppd->host_link_state = HLS_UP_ACTIVE;
10208 set_logical_state(dd, LSTATE_ACTIVE);
10209 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10210 if (ret) {
10211 /* logical state didn't change, stay at armed */
10212 ppd->host_link_state = HLS_UP_ARMED;
10213 dd_dev_err(dd,
17fb4f29
JJ
10214 "%s: logical state did not change to ACTIVE\n",
10215 __func__);
77241056 10216 } else {
77241056
MM
10217 /* tell all engines to go running */
10218 sdma_all_running(dd);
10219
10220 /* Signal the IB layer that the port has went active */
ec3f2c12 10221 event.device = &dd->verbs_dev.rdi.ibdev;
77241056
MM
10222 event.element.port_num = ppd->port;
10223 event.event = IB_EVENT_PORT_ACTIVE;
10224 }
10225 break;
10226 case HLS_DN_POLL:
10227 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10228 ppd->host_link_state == HLS_DN_OFFLINE) &&
10229 dd->dc_shutdown)
10230 dc_start(dd);
10231 /* Hand LED control to the DC */
10232 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10233
10234 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10235 u8 tmp = ppd->link_enabled;
10236
10237 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10238 if (ret) {
10239 ppd->link_enabled = tmp;
10240 break;
10241 }
10242 ppd->remote_link_down_reason = 0;
10243
10244 if (ppd->driver_link_ready)
10245 ppd->link_enabled = 1;
10246 }
10247
fb9036dd 10248 set_all_slowpath(ppd->dd);
77241056
MM
10249 ret = set_local_link_attributes(ppd);
10250 if (ret)
10251 break;
10252
10253 ppd->port_error_action = 0;
10254 ppd->host_link_state = HLS_DN_POLL;
10255
10256 if (quick_linkup) {
10257 /* quick linkup does not go into polling */
10258 ret = do_quick_linkup(dd);
10259 } else {
10260 ret1 = set_physical_link_state(dd, PLS_POLLING);
10261 if (ret1 != HCMD_SUCCESS) {
10262 dd_dev_err(dd,
17fb4f29
JJ
10263 "Failed to transition to Polling link state, return 0x%x\n",
10264 ret1);
77241056
MM
10265 ret = -EINVAL;
10266 }
10267 }
a9c05e35
BM
10268 ppd->offline_disabled_reason =
10269 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
77241056
MM
10270 /*
10271 * If an error occurred above, go back to offline. The
10272 * caller may reschedule another attempt.
10273 */
10274 if (ret)
10275 goto_offline(ppd, 0);
10276 break;
10277 case HLS_DN_DISABLE:
10278 /* link is disabled */
10279 ppd->link_enabled = 0;
10280
10281 /* allow any state to transition to disabled */
10282
10283 /* must transition to offline first */
10284 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10285 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10286 if (ret)
10287 break;
10288 ppd->remote_link_down_reason = 0;
10289 }
10290
10291 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10292 if (ret1 != HCMD_SUCCESS) {
10293 dd_dev_err(dd,
17fb4f29
JJ
10294 "Failed to transition to Disabled link state, return 0x%x\n",
10295 ret1);
77241056
MM
10296 ret = -EINVAL;
10297 break;
10298 }
10299 ppd->host_link_state = HLS_DN_DISABLE;
10300 dc_shutdown(dd);
10301 break;
10302 case HLS_DN_OFFLINE:
10303 if (ppd->host_link_state == HLS_DN_DISABLE)
10304 dc_start(dd);
10305
10306 /* allow any state to transition to offline */
10307 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10308 if (!ret)
10309 ppd->remote_link_down_reason = 0;
10310 break;
10311 case HLS_VERIFY_CAP:
10312 if (ppd->host_link_state != HLS_DN_POLL)
10313 goto unexpected;
10314 ppd->host_link_state = HLS_VERIFY_CAP;
10315 break;
10316 case HLS_GOING_UP:
10317 if (ppd->host_link_state != HLS_VERIFY_CAP)
10318 goto unexpected;
10319
10320 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10321 if (ret1 != HCMD_SUCCESS) {
10322 dd_dev_err(dd,
17fb4f29
JJ
10323 "Failed to transition to link up state, return 0x%x\n",
10324 ret1);
77241056
MM
10325 ret = -EINVAL;
10326 break;
10327 }
10328 ppd->host_link_state = HLS_GOING_UP;
10329 break;
10330
10331 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10332 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10333 default:
10334 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
17fb4f29 10335 __func__, state);
77241056
MM
10336 ret = -EINVAL;
10337 break;
10338 }
10339
10340 is_down = !!(ppd->host_link_state & (HLS_DN_POLL |
10341 HLS_DN_DISABLE | HLS_DN_OFFLINE));
10342
10343 if (was_up && is_down && ppd->local_link_down_reason.sma == 0 &&
10344 ppd->neigh_link_down_reason.sma == 0) {
10345 ppd->local_link_down_reason.sma =
10346 ppd->local_link_down_reason.latest;
10347 ppd->neigh_link_down_reason.sma =
10348 ppd->neigh_link_down_reason.latest;
10349 }
10350
10351 goto done;
10352
10353unexpected:
10354 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
17fb4f29
JJ
10355 __func__, link_state_name(ppd->host_link_state),
10356 link_state_name(state));
77241056
MM
10357 ret = -EINVAL;
10358
10359done:
10360 mutex_unlock(&ppd->hls_lock);
10361
10362 if (event.device)
10363 ib_dispatch_event(&event);
10364
10365 return ret;
10366}
10367
10368int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10369{
10370 u64 reg;
10371 int ret = 0;
10372
10373 switch (which) {
10374 case HFI1_IB_CFG_LIDLMC:
10375 set_lidlmc(ppd);
10376 break;
10377 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10378 /*
10379 * The VL Arbitrator high limit is sent in units of 4k
10380 * bytes, while HFI stores it in units of 64 bytes.
10381 */
8638b77f 10382 val *= 4096 / 64;
77241056
MM
10383 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10384 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10385 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10386 break;
10387 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10388 /* HFI only supports POLL as the default link down state */
10389 if (val != HLS_DN_POLL)
10390 ret = -EINVAL;
10391 break;
10392 case HFI1_IB_CFG_OP_VLS:
10393 if (ppd->vls_operational != val) {
10394 ppd->vls_operational = val;
10395 if (!ppd->port)
10396 ret = -EINVAL;
77241056
MM
10397 }
10398 break;
10399 /*
10400 * For link width, link width downgrade, and speed enable, always AND
10401 * the setting with what is actually supported. This has two benefits.
10402 * First, enabled can't have unsupported values, no matter what the
10403 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10404 * "fill in with your supported value" have all the bits in the
10405 * field set, so simply ANDing with supported has the desired result.
10406 */
10407 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10408 ppd->link_width_enabled = val & ppd->link_width_supported;
10409 break;
10410 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10411 ppd->link_width_downgrade_enabled =
10412 val & ppd->link_width_downgrade_supported;
10413 break;
10414 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10415 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10416 break;
10417 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10418 /*
10419 * HFI does not follow IB specs, save this value
10420 * so we can report it, if asked.
10421 */
10422 ppd->overrun_threshold = val;
10423 break;
10424 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10425 /*
10426 * HFI does not follow IB specs, save this value
10427 * so we can report it, if asked.
10428 */
10429 ppd->phy_error_threshold = val;
10430 break;
10431
10432 case HFI1_IB_CFG_MTU:
10433 set_send_length(ppd);
10434 break;
10435
10436 case HFI1_IB_CFG_PKEYS:
10437 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10438 set_partition_keys(ppd);
10439 break;
10440
10441 default:
10442 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10443 dd_dev_info(ppd->dd,
17fb4f29
JJ
10444 "%s: which %s, val 0x%x: not implemented\n",
10445 __func__, ib_cfg_name(which), val);
77241056
MM
10446 break;
10447 }
10448 return ret;
10449}
10450
10451/* begin functions related to vl arbitration table caching */
10452static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10453{
10454 int i;
10455
10456 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10457 VL_ARB_LOW_PRIO_TABLE_SIZE);
10458 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10459 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10460
10461 /*
10462 * Note that we always return values directly from the
10463 * 'vl_arb_cache' (and do no CSR reads) in response to a
10464 * 'Get(VLArbTable)'. This is obviously correct after a
10465 * 'Set(VLArbTable)', since the cache will then be up to
10466 * date. But it's also correct prior to any 'Set(VLArbTable)'
10467 * since then both the cache, and the relevant h/w registers
10468 * will be zeroed.
10469 */
10470
10471 for (i = 0; i < MAX_PRIO_TABLE; i++)
10472 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10473}
10474
10475/*
10476 * vl_arb_lock_cache
10477 *
10478 * All other vl_arb_* functions should be called only after locking
10479 * the cache.
10480 */
10481static inline struct vl_arb_cache *
10482vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10483{
10484 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10485 return NULL;
10486 spin_lock(&ppd->vl_arb_cache[idx].lock);
10487 return &ppd->vl_arb_cache[idx];
10488}
10489
10490static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10491{
10492 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10493}
10494
10495static void vl_arb_get_cache(struct vl_arb_cache *cache,
10496 struct ib_vl_weight_elem *vl)
10497{
10498 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10499}
10500
10501static void vl_arb_set_cache(struct vl_arb_cache *cache,
10502 struct ib_vl_weight_elem *vl)
10503{
10504 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10505}
10506
10507static int vl_arb_match_cache(struct vl_arb_cache *cache,
10508 struct ib_vl_weight_elem *vl)
10509{
10510 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10511}
f4d507cd 10512
77241056
MM
10513/* end functions related to vl arbitration table caching */
10514
10515static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10516 u32 size, struct ib_vl_weight_elem *vl)
10517{
10518 struct hfi1_devdata *dd = ppd->dd;
10519 u64 reg;
10520 unsigned int i, is_up = 0;
10521 int drain, ret = 0;
10522
10523 mutex_lock(&ppd->hls_lock);
10524
10525 if (ppd->host_link_state & HLS_UP)
10526 is_up = 1;
10527
10528 drain = !is_ax(dd) && is_up;
10529
10530 if (drain)
10531 /*
10532 * Before adjusting VL arbitration weights, empty per-VL
10533 * FIFOs, otherwise a packet whose VL weight is being
10534 * set to 0 could get stuck in a FIFO with no chance to
10535 * egress.
10536 */
10537 ret = stop_drain_data_vls(dd);
10538
10539 if (ret) {
10540 dd_dev_err(
10541 dd,
10542 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10543 __func__);
10544 goto err;
10545 }
10546
10547 for (i = 0; i < size; i++, vl++) {
10548 /*
10549 * NOTE: The low priority shift and mask are used here, but
10550 * they are the same for both the low and high registers.
10551 */
10552 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10553 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10554 | (((u64)vl->weight
10555 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10556 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10557 write_csr(dd, target + (i * 8), reg);
10558 }
10559 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10560
10561 if (drain)
10562 open_fill_data_vls(dd); /* reopen all VLs */
10563
10564err:
10565 mutex_unlock(&ppd->hls_lock);
10566
10567 return ret;
10568}
10569
10570/*
10571 * Read one credit merge VL register.
10572 */
10573static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10574 struct vl_limit *vll)
10575{
10576 u64 reg = read_csr(dd, csr);
10577
10578 vll->dedicated = cpu_to_be16(
10579 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10580 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10581 vll->shared = cpu_to_be16(
10582 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10583 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10584}
10585
10586/*
10587 * Read the current credit merge limits.
10588 */
10589static int get_buffer_control(struct hfi1_devdata *dd,
10590 struct buffer_control *bc, u16 *overall_limit)
10591{
10592 u64 reg;
10593 int i;
10594
10595 /* not all entries are filled in */
10596 memset(bc, 0, sizeof(*bc));
10597
10598 /* OPA and HFI have a 1-1 mapping */
10599 for (i = 0; i < TXE_NUM_DATA_VL; i++)
8638b77f 10600 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
77241056
MM
10601
10602 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10603 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10604
10605 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10606 bc->overall_shared_limit = cpu_to_be16(
10607 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10608 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10609 if (overall_limit)
10610 *overall_limit = (reg
10611 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10612 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10613 return sizeof(struct buffer_control);
10614}
10615
10616static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10617{
10618 u64 reg;
10619 int i;
10620
10621 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10622 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10623 for (i = 0; i < sizeof(u64); i++) {
10624 u8 byte = *(((u8 *)&reg) + i);
10625
10626 dp->vlnt[2 * i] = byte & 0xf;
10627 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10628 }
10629
10630 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10631 for (i = 0; i < sizeof(u64); i++) {
10632 u8 byte = *(((u8 *)&reg) + i);
10633
10634 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10635 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10636 }
10637 return sizeof(struct sc2vlnt);
10638}
10639
10640static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10641 struct ib_vl_weight_elem *vl)
10642{
10643 unsigned int i;
10644
10645 for (i = 0; i < nelems; i++, vl++) {
10646 vl->vl = 0xf;
10647 vl->weight = 0;
10648 }
10649}
10650
10651static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10652{
10653 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
17fb4f29
JJ
10654 DC_SC_VL_VAL(15_0,
10655 0, dp->vlnt[0] & 0xf,
10656 1, dp->vlnt[1] & 0xf,
10657 2, dp->vlnt[2] & 0xf,
10658 3, dp->vlnt[3] & 0xf,
10659 4, dp->vlnt[4] & 0xf,
10660 5, dp->vlnt[5] & 0xf,
10661 6, dp->vlnt[6] & 0xf,
10662 7, dp->vlnt[7] & 0xf,
10663 8, dp->vlnt[8] & 0xf,
10664 9, dp->vlnt[9] & 0xf,
10665 10, dp->vlnt[10] & 0xf,
10666 11, dp->vlnt[11] & 0xf,
10667 12, dp->vlnt[12] & 0xf,
10668 13, dp->vlnt[13] & 0xf,
10669 14, dp->vlnt[14] & 0xf,
10670 15, dp->vlnt[15] & 0xf));
77241056 10671 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
17fb4f29
JJ
10672 DC_SC_VL_VAL(31_16,
10673 16, dp->vlnt[16] & 0xf,
10674 17, dp->vlnt[17] & 0xf,
10675 18, dp->vlnt[18] & 0xf,
10676 19, dp->vlnt[19] & 0xf,
10677 20, dp->vlnt[20] & 0xf,
10678 21, dp->vlnt[21] & 0xf,
10679 22, dp->vlnt[22] & 0xf,
10680 23, dp->vlnt[23] & 0xf,
10681 24, dp->vlnt[24] & 0xf,
10682 25, dp->vlnt[25] & 0xf,
10683 26, dp->vlnt[26] & 0xf,
10684 27, dp->vlnt[27] & 0xf,
10685 28, dp->vlnt[28] & 0xf,
10686 29, dp->vlnt[29] & 0xf,
10687 30, dp->vlnt[30] & 0xf,
10688 31, dp->vlnt[31] & 0xf));
77241056
MM
10689}
10690
10691static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10692 u16 limit)
10693{
10694 if (limit != 0)
10695 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
17fb4f29 10696 what, (int)limit, idx);
77241056
MM
10697}
10698
10699/* change only the shared limit portion of SendCmGLobalCredit */
10700static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10701{
10702 u64 reg;
10703
10704 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10705 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10706 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10707 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10708}
10709
10710/* change only the total credit limit portion of SendCmGLobalCredit */
10711static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10712{
10713 u64 reg;
10714
10715 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10716 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10717 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10718 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10719}
10720
10721/* set the given per-VL shared limit */
10722static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10723{
10724 u64 reg;
10725 u32 addr;
10726
10727 if (vl < TXE_NUM_DATA_VL)
10728 addr = SEND_CM_CREDIT_VL + (8 * vl);
10729 else
10730 addr = SEND_CM_CREDIT_VL15;
10731
10732 reg = read_csr(dd, addr);
10733 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10734 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10735 write_csr(dd, addr, reg);
10736}
10737
10738/* set the given per-VL dedicated limit */
10739static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10740{
10741 u64 reg;
10742 u32 addr;
10743
10744 if (vl < TXE_NUM_DATA_VL)
10745 addr = SEND_CM_CREDIT_VL + (8 * vl);
10746 else
10747 addr = SEND_CM_CREDIT_VL15;
10748
10749 reg = read_csr(dd, addr);
10750 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10751 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10752 write_csr(dd, addr, reg);
10753}
10754
10755/* spin until the given per-VL status mask bits clear */
10756static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10757 const char *which)
10758{
10759 unsigned long timeout;
10760 u64 reg;
10761
10762 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10763 while (1) {
10764 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10765
10766 if (reg == 0)
10767 return; /* success */
10768 if (time_after(jiffies, timeout))
10769 break; /* timed out */
10770 udelay(1);
10771 }
10772
10773 dd_dev_err(dd,
17fb4f29
JJ
10774 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10775 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
77241056
MM
10776 /*
10777 * If this occurs, it is likely there was a credit loss on the link.
10778 * The only recovery from that is a link bounce.
10779 */
10780 dd_dev_err(dd,
17fb4f29 10781 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
77241056
MM
10782}
10783
10784/*
10785 * The number of credits on the VLs may be changed while everything
10786 * is "live", but the following algorithm must be followed due to
10787 * how the hardware is actually implemented. In particular,
10788 * Return_Credit_Status[] is the only correct status check.
10789 *
10790 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10791 * set Global_Shared_Credit_Limit = 0
10792 * use_all_vl = 1
10793 * mask0 = all VLs that are changing either dedicated or shared limits
10794 * set Shared_Limit[mask0] = 0
10795 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10796 * if (changing any dedicated limit)
10797 * mask1 = all VLs that are lowering dedicated limits
10798 * lower Dedicated_Limit[mask1]
10799 * spin until Return_Credit_Status[mask1] == 0
10800 * raise Dedicated_Limits
10801 * raise Shared_Limits
10802 * raise Global_Shared_Credit_Limit
10803 *
10804 * lower = if the new limit is lower, set the limit to the new value
10805 * raise = if the new limit is higher than the current value (may be changed
10806 * earlier in the algorithm), set the new limit to the new value
10807 */
8a4d3444
MM
10808int set_buffer_control(struct hfi1_pportdata *ppd,
10809 struct buffer_control *new_bc)
77241056 10810{
8a4d3444 10811 struct hfi1_devdata *dd = ppd->dd;
77241056
MM
10812 u64 changing_mask, ld_mask, stat_mask;
10813 int change_count;
10814 int i, use_all_mask;
10815 int this_shared_changing;
8a4d3444 10816 int vl_count = 0, ret;
77241056
MM
10817 /*
10818 * A0: add the variable any_shared_limit_changing below and in the
10819 * algorithm above. If removing A0 support, it can be removed.
10820 */
10821 int any_shared_limit_changing;
10822 struct buffer_control cur_bc;
10823 u8 changing[OPA_MAX_VLS];
10824 u8 lowering_dedicated[OPA_MAX_VLS];
10825 u16 cur_total;
10826 u32 new_total = 0;
10827 const u64 all_mask =
10828 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10829 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10830 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10831 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10832 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10833 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10834 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10835 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10836 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
10837
10838#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10839#define NUM_USABLE_VLS 16 /* look at VL15 and less */
10840
77241056
MM
10841 /* find the new total credits, do sanity check on unused VLs */
10842 for (i = 0; i < OPA_MAX_VLS; i++) {
10843 if (valid_vl(i)) {
10844 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
10845 continue;
10846 }
10847 nonzero_msg(dd, i, "dedicated",
17fb4f29 10848 be16_to_cpu(new_bc->vl[i].dedicated));
77241056 10849 nonzero_msg(dd, i, "shared",
17fb4f29 10850 be16_to_cpu(new_bc->vl[i].shared));
77241056
MM
10851 new_bc->vl[i].dedicated = 0;
10852 new_bc->vl[i].shared = 0;
10853 }
10854 new_total += be16_to_cpu(new_bc->overall_shared_limit);
bff14bb6 10855
77241056
MM
10856 /* fetch the current values */
10857 get_buffer_control(dd, &cur_bc, &cur_total);
10858
10859 /*
10860 * Create the masks we will use.
10861 */
10862 memset(changing, 0, sizeof(changing));
10863 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
4d114fdd
JJ
10864 /*
10865 * NOTE: Assumes that the individual VL bits are adjacent and in
10866 * increasing order
10867 */
77241056
MM
10868 stat_mask =
10869 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
10870 changing_mask = 0;
10871 ld_mask = 0;
10872 change_count = 0;
10873 any_shared_limit_changing = 0;
10874 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
10875 if (!valid_vl(i))
10876 continue;
10877 this_shared_changing = new_bc->vl[i].shared
10878 != cur_bc.vl[i].shared;
10879 if (this_shared_changing)
10880 any_shared_limit_changing = 1;
d0d236ea
JJ
10881 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
10882 this_shared_changing) {
77241056
MM
10883 changing[i] = 1;
10884 changing_mask |= stat_mask;
10885 change_count++;
10886 }
10887 if (be16_to_cpu(new_bc->vl[i].dedicated) <
10888 be16_to_cpu(cur_bc.vl[i].dedicated)) {
10889 lowering_dedicated[i] = 1;
10890 ld_mask |= stat_mask;
10891 }
10892 }
10893
10894 /* bracket the credit change with a total adjustment */
10895 if (new_total > cur_total)
10896 set_global_limit(dd, new_total);
10897
10898 /*
10899 * Start the credit change algorithm.
10900 */
10901 use_all_mask = 0;
10902 if ((be16_to_cpu(new_bc->overall_shared_limit) <
995deafa
MM
10903 be16_to_cpu(cur_bc.overall_shared_limit)) ||
10904 (is_ax(dd) && any_shared_limit_changing)) {
77241056
MM
10905 set_global_shared(dd, 0);
10906 cur_bc.overall_shared_limit = 0;
10907 use_all_mask = 1;
10908 }
10909
10910 for (i = 0; i < NUM_USABLE_VLS; i++) {
10911 if (!valid_vl(i))
10912 continue;
10913
10914 if (changing[i]) {
10915 set_vl_shared(dd, i, 0);
10916 cur_bc.vl[i].shared = 0;
10917 }
10918 }
10919
10920 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
17fb4f29 10921 "shared");
77241056
MM
10922
10923 if (change_count > 0) {
10924 for (i = 0; i < NUM_USABLE_VLS; i++) {
10925 if (!valid_vl(i))
10926 continue;
10927
10928 if (lowering_dedicated[i]) {
10929 set_vl_dedicated(dd, i,
17fb4f29
JJ
10930 be16_to_cpu(new_bc->
10931 vl[i].dedicated));
77241056
MM
10932 cur_bc.vl[i].dedicated =
10933 new_bc->vl[i].dedicated;
10934 }
10935 }
10936
10937 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
10938
10939 /* now raise all dedicated that are going up */
10940 for (i = 0; i < NUM_USABLE_VLS; i++) {
10941 if (!valid_vl(i))
10942 continue;
10943
10944 if (be16_to_cpu(new_bc->vl[i].dedicated) >
10945 be16_to_cpu(cur_bc.vl[i].dedicated))
10946 set_vl_dedicated(dd, i,
17fb4f29
JJ
10947 be16_to_cpu(new_bc->
10948 vl[i].dedicated));
77241056
MM
10949 }
10950 }
10951
10952 /* next raise all shared that are going up */
10953 for (i = 0; i < NUM_USABLE_VLS; i++) {
10954 if (!valid_vl(i))
10955 continue;
10956
10957 if (be16_to_cpu(new_bc->vl[i].shared) >
10958 be16_to_cpu(cur_bc.vl[i].shared))
10959 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
10960 }
10961
10962 /* finally raise the global shared */
10963 if (be16_to_cpu(new_bc->overall_shared_limit) >
17fb4f29 10964 be16_to_cpu(cur_bc.overall_shared_limit))
77241056 10965 set_global_shared(dd,
17fb4f29 10966 be16_to_cpu(new_bc->overall_shared_limit));
77241056
MM
10967
10968 /* bracket the credit change with a total adjustment */
10969 if (new_total < cur_total)
10970 set_global_limit(dd, new_total);
8a4d3444
MM
10971
10972 /*
10973 * Determine the actual number of operational VLS using the number of
10974 * dedicated and shared credits for each VL.
10975 */
10976 if (change_count > 0) {
10977 for (i = 0; i < TXE_NUM_DATA_VL; i++)
10978 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
10979 be16_to_cpu(new_bc->vl[i].shared) > 0)
10980 vl_count++;
10981 ppd->actual_vls_operational = vl_count;
10982 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
10983 ppd->actual_vls_operational :
10984 ppd->vls_operational,
10985 NULL);
10986 if (ret == 0)
10987 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
10988 ppd->actual_vls_operational :
10989 ppd->vls_operational, NULL);
10990 if (ret)
10991 return ret;
10992 }
77241056
MM
10993 return 0;
10994}
10995
10996/*
10997 * Read the given fabric manager table. Return the size of the
10998 * table (in bytes) on success, and a negative error code on
10999 * failure.
11000 */
11001int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11002
11003{
11004 int size;
11005 struct vl_arb_cache *vlc;
11006
11007 switch (which) {
11008 case FM_TBL_VL_HIGH_ARB:
11009 size = 256;
11010 /*
11011 * OPA specifies 128 elements (of 2 bytes each), though
11012 * HFI supports only 16 elements in h/w.
11013 */
11014 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11015 vl_arb_get_cache(vlc, t);
11016 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11017 break;
11018 case FM_TBL_VL_LOW_ARB:
11019 size = 256;
11020 /*
11021 * OPA specifies 128 elements (of 2 bytes each), though
11022 * HFI supports only 16 elements in h/w.
11023 */
11024 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11025 vl_arb_get_cache(vlc, t);
11026 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11027 break;
11028 case FM_TBL_BUFFER_CONTROL:
11029 size = get_buffer_control(ppd->dd, t, NULL);
11030 break;
11031 case FM_TBL_SC2VLNT:
11032 size = get_sc2vlnt(ppd->dd, t);
11033 break;
11034 case FM_TBL_VL_PREEMPT_ELEMS:
11035 size = 256;
11036 /* OPA specifies 128 elements, of 2 bytes each */
11037 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11038 break;
11039 case FM_TBL_VL_PREEMPT_MATRIX:
11040 size = 256;
11041 /*
11042 * OPA specifies that this is the same size as the VL
11043 * arbitration tables (i.e., 256 bytes).
11044 */
11045 break;
11046 default:
11047 return -EINVAL;
11048 }
11049 return size;
11050}
11051
11052/*
11053 * Write the given fabric manager table.
11054 */
11055int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11056{
11057 int ret = 0;
11058 struct vl_arb_cache *vlc;
11059
11060 switch (which) {
11061 case FM_TBL_VL_HIGH_ARB:
11062 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11063 if (vl_arb_match_cache(vlc, t)) {
11064 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11065 break;
11066 }
11067 vl_arb_set_cache(vlc, t);
11068 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11069 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11070 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11071 break;
11072 case FM_TBL_VL_LOW_ARB:
11073 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11074 if (vl_arb_match_cache(vlc, t)) {
11075 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11076 break;
11077 }
11078 vl_arb_set_cache(vlc, t);
11079 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11080 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11081 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11082 break;
11083 case FM_TBL_BUFFER_CONTROL:
8a4d3444 11084 ret = set_buffer_control(ppd, t);
77241056
MM
11085 break;
11086 case FM_TBL_SC2VLNT:
11087 set_sc2vlnt(ppd->dd, t);
11088 break;
11089 default:
11090 ret = -EINVAL;
11091 }
11092 return ret;
11093}
11094
11095/*
11096 * Disable all data VLs.
11097 *
11098 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11099 */
11100static int disable_data_vls(struct hfi1_devdata *dd)
11101{
995deafa 11102 if (is_ax(dd))
77241056
MM
11103 return 1;
11104
11105 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11106
11107 return 0;
11108}
11109
11110/*
11111 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11112 * Just re-enables all data VLs (the "fill" part happens
11113 * automatically - the name was chosen for symmetry with
11114 * stop_drain_data_vls()).
11115 *
11116 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11117 */
11118int open_fill_data_vls(struct hfi1_devdata *dd)
11119{
995deafa 11120 if (is_ax(dd))
77241056
MM
11121 return 1;
11122
11123 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11124
11125 return 0;
11126}
11127
11128/*
11129 * drain_data_vls() - assumes that disable_data_vls() has been called,
11130 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11131 * engines to drop to 0.
11132 */
11133static void drain_data_vls(struct hfi1_devdata *dd)
11134{
11135 sc_wait(dd);
11136 sdma_wait(dd);
11137 pause_for_credit_return(dd);
11138}
11139
11140/*
11141 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11142 *
11143 * Use open_fill_data_vls() to resume using data VLs. This pair is
11144 * meant to be used like this:
11145 *
11146 * stop_drain_data_vls(dd);
11147 * // do things with per-VL resources
11148 * open_fill_data_vls(dd);
11149 */
11150int stop_drain_data_vls(struct hfi1_devdata *dd)
11151{
11152 int ret;
11153
11154 ret = disable_data_vls(dd);
11155 if (ret == 0)
11156 drain_data_vls(dd);
11157
11158 return ret;
11159}
11160
11161/*
11162 * Convert a nanosecond time to a cclock count. No matter how slow
11163 * the cclock, a non-zero ns will always have a non-zero result.
11164 */
11165u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11166{
11167 u32 cclocks;
11168
11169 if (dd->icode == ICODE_FPGA_EMULATION)
11170 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11171 else /* simulation pretends to be ASIC */
11172 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11173 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11174 cclocks = 1;
11175 return cclocks;
11176}
11177
11178/*
11179 * Convert a cclock count to nanoseconds. Not matter how slow
11180 * the cclock, a non-zero cclocks will always have a non-zero result.
11181 */
11182u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11183{
11184 u32 ns;
11185
11186 if (dd->icode == ICODE_FPGA_EMULATION)
11187 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11188 else /* simulation pretends to be ASIC */
11189 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11190 if (cclocks && !ns)
11191 ns = 1;
11192 return ns;
11193}
11194
11195/*
11196 * Dynamically adjust the receive interrupt timeout for a context based on
11197 * incoming packet rate.
11198 *
11199 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11200 */
11201static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11202{
11203 struct hfi1_devdata *dd = rcd->dd;
11204 u32 timeout = rcd->rcvavail_timeout;
11205
11206 /*
11207 * This algorithm doubles or halves the timeout depending on whether
11208 * the number of packets received in this interrupt were less than or
11209 * greater equal the interrupt count.
11210 *
11211 * The calculations below do not allow a steady state to be achieved.
11212 * Only at the endpoints it is possible to have an unchanging
11213 * timeout.
11214 */
11215 if (npkts < rcv_intr_count) {
11216 /*
11217 * Not enough packets arrived before the timeout, adjust
11218 * timeout downward.
11219 */
11220 if (timeout < 2) /* already at minimum? */
11221 return;
11222 timeout >>= 1;
11223 } else {
11224 /*
11225 * More than enough packets arrived before the timeout, adjust
11226 * timeout upward.
11227 */
11228 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11229 return;
11230 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11231 }
11232
11233 rcd->rcvavail_timeout = timeout;
4d114fdd
JJ
11234 /*
11235 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11236 * been verified to be in range
11237 */
77241056 11238 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
17fb4f29
JJ
11239 (u64)timeout <<
11240 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
77241056
MM
11241}
11242
11243void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11244 u32 intr_adjust, u32 npkts)
11245{
11246 struct hfi1_devdata *dd = rcd->dd;
11247 u64 reg;
11248 u32 ctxt = rcd->ctxt;
11249
11250 /*
11251 * Need to write timeout register before updating RcvHdrHead to ensure
11252 * that a new value is used when the HW decides to restart counting.
11253 */
11254 if (intr_adjust)
11255 adjust_rcv_timeout(rcd, npkts);
11256 if (updegr) {
11257 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11258 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11259 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11260 }
11261 mmiowb();
11262 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11263 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11264 << RCV_HDR_HEAD_HEAD_SHIFT);
11265 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11266 mmiowb();
11267}
11268
11269u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11270{
11271 u32 head, tail;
11272
11273 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11274 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11275
11276 if (rcd->rcvhdrtail_kvaddr)
11277 tail = get_rcvhdrtail(rcd);
11278 else
11279 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11280
11281 return head == tail;
11282}
11283
11284/*
11285 * Context Control and Receive Array encoding for buffer size:
11286 * 0x0 invalid
11287 * 0x1 4 KB
11288 * 0x2 8 KB
11289 * 0x3 16 KB
11290 * 0x4 32 KB
11291 * 0x5 64 KB
11292 * 0x6 128 KB
11293 * 0x7 256 KB
11294 * 0x8 512 KB (Receive Array only)
11295 * 0x9 1 MB (Receive Array only)
11296 * 0xa 2 MB (Receive Array only)
11297 *
11298 * 0xB-0xF - reserved (Receive Array only)
11299 *
11300 *
11301 * This routine assumes that the value has already been sanity checked.
11302 */
11303static u32 encoded_size(u32 size)
11304{
11305 switch (size) {
8638b77f
JJ
11306 case 4 * 1024: return 0x1;
11307 case 8 * 1024: return 0x2;
11308 case 16 * 1024: return 0x3;
11309 case 32 * 1024: return 0x4;
11310 case 64 * 1024: return 0x5;
11311 case 128 * 1024: return 0x6;
11312 case 256 * 1024: return 0x7;
11313 case 512 * 1024: return 0x8;
11314 case 1 * 1024 * 1024: return 0x9;
11315 case 2 * 1024 * 1024: return 0xa;
77241056
MM
11316 }
11317 return 0x1; /* if invalid, go with the minimum size */
11318}
11319
11320void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11321{
11322 struct hfi1_ctxtdata *rcd;
11323 u64 rcvctrl, reg;
11324 int did_enable = 0;
11325
11326 rcd = dd->rcd[ctxt];
11327 if (!rcd)
11328 return;
11329
11330 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11331
11332 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11333 /* if the context already enabled, don't do the extra steps */
d0d236ea
JJ
11334 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11335 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
77241056
MM
11336 /* reset the tail and hdr addresses, and sequence count */
11337 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11338 rcd->rcvhdrq_phys);
11339 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11340 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11341 rcd->rcvhdrqtailaddr_phys);
11342 rcd->seq_cnt = 1;
11343
11344 /* reset the cached receive header queue head value */
11345 rcd->head = 0;
11346
11347 /*
11348 * Zero the receive header queue so we don't get false
11349 * positives when checking the sequence number. The
11350 * sequence numbers could land exactly on the same spot.
11351 * E.g. a rcd restart before the receive header wrapped.
11352 */
11353 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11354
11355 /* starting timeout */
11356 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11357
11358 /* enable the context */
11359 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11360
11361 /* clean the egr buffer size first */
11362 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11363 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11364 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11365 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11366
11367 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11368 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11369 did_enable = 1;
11370
11371 /* zero RcvEgrIndexHead */
11372 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11373
11374 /* set eager count and base index */
11375 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11376 & RCV_EGR_CTRL_EGR_CNT_MASK)
11377 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11378 (((rcd->eager_base >> RCV_SHIFT)
11379 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11380 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11381 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11382
11383 /*
11384 * Set TID (expected) count and base index.
11385 * rcd->expected_count is set to individual RcvArray entries,
11386 * not pairs, and the CSR takes a pair-count in groups of
11387 * four, so divide by 8.
11388 */
11389 reg = (((rcd->expected_count >> RCV_SHIFT)
11390 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11391 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11392 (((rcd->expected_base >> RCV_SHIFT)
11393 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11394 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11395 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
82c2611d
NV
11396 if (ctxt == HFI1_CTRL_CTXT)
11397 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
77241056
MM
11398 }
11399 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11400 write_csr(dd, RCV_VL15, 0);
46b010d3
MB
11401 /*
11402 * When receive context is being disabled turn on tail
11403 * update with a dummy tail address and then disable
11404 * receive context.
11405 */
11406 if (dd->rcvhdrtail_dummy_physaddr) {
11407 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11408 dd->rcvhdrtail_dummy_physaddr);
566c157c 11409 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
46b010d3
MB
11410 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11411 }
11412
77241056
MM
11413 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11414 }
11415 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11416 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11417 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11418 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11419 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
11420 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
566c157c
MH
11421 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11422 /* See comment on RcvCtxtCtrl.TailUpd above */
11423 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11424 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11425 }
77241056
MM
11426 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11427 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11428 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11429 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11430 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
4d114fdd
JJ
11431 /*
11432 * In one-packet-per-eager mode, the size comes from
11433 * the RcvArray entry.
11434 */
77241056
MM
11435 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11436 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11437 }
11438 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11439 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11440 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11441 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11442 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11443 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11444 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11445 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11446 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11447 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11448 rcd->rcvctrl = rcvctrl;
11449 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11450 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11451
11452 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
d0d236ea
JJ
11453 if (did_enable &&
11454 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
77241056
MM
11455 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11456 if (reg != 0) {
11457 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
17fb4f29 11458 ctxt, reg);
77241056
MM
11459 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11460 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11461 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11462 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11463 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11464 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
17fb4f29 11465 ctxt, reg, reg == 0 ? "not" : "still");
77241056
MM
11466 }
11467 }
11468
11469 if (did_enable) {
11470 /*
11471 * The interrupt timeout and count must be set after
11472 * the context is enabled to take effect.
11473 */
11474 /* set interrupt timeout */
11475 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
17fb4f29 11476 (u64)rcd->rcvavail_timeout <<
77241056
MM
11477 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11478
11479 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11480 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11481 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11482 }
11483
11484 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11485 /*
11486 * If the context has been disabled and the Tail Update has
46b010d3
MB
11487 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11488 * so it doesn't contain an address that is invalid.
77241056 11489 */
46b010d3
MB
11490 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11491 dd->rcvhdrtail_dummy_physaddr);
77241056
MM
11492}
11493
582e05c3 11494u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
77241056
MM
11495{
11496 int ret;
11497 u64 val = 0;
11498
11499 if (namep) {
11500 ret = dd->cntrnameslen;
77241056
MM
11501 *namep = dd->cntrnames;
11502 } else {
11503 const struct cntr_entry *entry;
11504 int i, j;
11505
11506 ret = (dd->ndevcntrs) * sizeof(u64);
77241056
MM
11507
11508 /* Get the start of the block of counters */
11509 *cntrp = dd->cntrs;
11510
11511 /*
11512 * Now go and fill in each counter in the block.
11513 */
11514 for (i = 0; i < DEV_CNTR_LAST; i++) {
11515 entry = &dev_cntrs[i];
11516 hfi1_cdbg(CNTR, "reading %s", entry->name);
11517 if (entry->flags & CNTR_DISABLED) {
11518 /* Nothing */
11519 hfi1_cdbg(CNTR, "\tDisabled\n");
11520 } else {
11521 if (entry->flags & CNTR_VL) {
11522 hfi1_cdbg(CNTR, "\tPer VL\n");
11523 for (j = 0; j < C_VL_COUNT; j++) {
11524 val = entry->rw_cntr(entry,
11525 dd, j,
11526 CNTR_MODE_R,
11527 0);
11528 hfi1_cdbg(
11529 CNTR,
11530 "\t\tRead 0x%llx for %d\n",
11531 val, j);
11532 dd->cntrs[entry->offset + j] =
11533 val;
11534 }
a699c6c2
VM
11535 } else if (entry->flags & CNTR_SDMA) {
11536 hfi1_cdbg(CNTR,
11537 "\t Per SDMA Engine\n");
11538 for (j = 0; j < dd->chip_sdma_engines;
11539 j++) {
11540 val =
11541 entry->rw_cntr(entry, dd, j,
11542 CNTR_MODE_R, 0);
11543 hfi1_cdbg(CNTR,
11544 "\t\tRead 0x%llx for %d\n",
11545 val, j);
11546 dd->cntrs[entry->offset + j] =
11547 val;
11548 }
77241056
MM
11549 } else {
11550 val = entry->rw_cntr(entry, dd,
11551 CNTR_INVALID_VL,
11552 CNTR_MODE_R, 0);
11553 dd->cntrs[entry->offset] = val;
11554 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11555 }
11556 }
11557 }
11558 }
11559 return ret;
11560}
11561
11562/*
11563 * Used by sysfs to create files for hfi stats to read
11564 */
582e05c3 11565u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
77241056
MM
11566{
11567 int ret;
11568 u64 val = 0;
11569
11570 if (namep) {
582e05c3
DL
11571 ret = ppd->dd->portcntrnameslen;
11572 *namep = ppd->dd->portcntrnames;
77241056
MM
11573 } else {
11574 const struct cntr_entry *entry;
77241056
MM
11575 int i, j;
11576
582e05c3 11577 ret = ppd->dd->nportcntrs * sizeof(u64);
77241056
MM
11578 *cntrp = ppd->cntrs;
11579
11580 for (i = 0; i < PORT_CNTR_LAST; i++) {
11581 entry = &port_cntrs[i];
11582 hfi1_cdbg(CNTR, "reading %s", entry->name);
11583 if (entry->flags & CNTR_DISABLED) {
11584 /* Nothing */
11585 hfi1_cdbg(CNTR, "\tDisabled\n");
11586 continue;
11587 }
11588
11589 if (entry->flags & CNTR_VL) {
11590 hfi1_cdbg(CNTR, "\tPer VL");
11591 for (j = 0; j < C_VL_COUNT; j++) {
11592 val = entry->rw_cntr(entry, ppd, j,
11593 CNTR_MODE_R,
11594 0);
11595 hfi1_cdbg(
11596 CNTR,
11597 "\t\tRead 0x%llx for %d",
11598 val, j);
11599 ppd->cntrs[entry->offset + j] = val;
11600 }
11601 } else {
11602 val = entry->rw_cntr(entry, ppd,
11603 CNTR_INVALID_VL,
11604 CNTR_MODE_R,
11605 0);
11606 ppd->cntrs[entry->offset] = val;
11607 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11608 }
11609 }
11610 }
11611 return ret;
11612}
11613
11614static void free_cntrs(struct hfi1_devdata *dd)
11615{
11616 struct hfi1_pportdata *ppd;
11617 int i;
11618
11619 if (dd->synth_stats_timer.data)
11620 del_timer_sync(&dd->synth_stats_timer);
11621 dd->synth_stats_timer.data = 0;
11622 ppd = (struct hfi1_pportdata *)(dd + 1);
11623 for (i = 0; i < dd->num_pports; i++, ppd++) {
11624 kfree(ppd->cntrs);
11625 kfree(ppd->scntrs);
4eb06882
DD
11626 free_percpu(ppd->ibport_data.rvp.rc_acks);
11627 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11628 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
77241056
MM
11629 ppd->cntrs = NULL;
11630 ppd->scntrs = NULL;
4eb06882
DD
11631 ppd->ibport_data.rvp.rc_acks = NULL;
11632 ppd->ibport_data.rvp.rc_qacks = NULL;
11633 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
77241056
MM
11634 }
11635 kfree(dd->portcntrnames);
11636 dd->portcntrnames = NULL;
11637 kfree(dd->cntrs);
11638 dd->cntrs = NULL;
11639 kfree(dd->scntrs);
11640 dd->scntrs = NULL;
11641 kfree(dd->cntrnames);
11642 dd->cntrnames = NULL;
11643}
11644
11645#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
11646#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
11647
11648static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11649 u64 *psval, void *context, int vl)
11650{
11651 u64 val;
11652 u64 sval = *psval;
11653
11654 if (entry->flags & CNTR_DISABLED) {
11655 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11656 return 0;
11657 }
11658
11659 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11660
11661 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11662
11663 /* If its a synthetic counter there is more work we need to do */
11664 if (entry->flags & CNTR_SYNTH) {
11665 if (sval == CNTR_MAX) {
11666 /* No need to read already saturated */
11667 return CNTR_MAX;
11668 }
11669
11670 if (entry->flags & CNTR_32BIT) {
11671 /* 32bit counters can wrap multiple times */
11672 u64 upper = sval >> 32;
11673 u64 lower = (sval << 32) >> 32;
11674
11675 if (lower > val) { /* hw wrapped */
11676 if (upper == CNTR_32BIT_MAX)
11677 val = CNTR_MAX;
11678 else
11679 upper++;
11680 }
11681
11682 if (val != CNTR_MAX)
11683 val = (upper << 32) | val;
11684
11685 } else {
11686 /* If we rolled we are saturated */
11687 if ((val < sval) || (val > CNTR_MAX))
11688 val = CNTR_MAX;
11689 }
11690 }
11691
11692 *psval = val;
11693
11694 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11695
11696 return val;
11697}
11698
11699static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11700 struct cntr_entry *entry,
11701 u64 *psval, void *context, int vl, u64 data)
11702{
11703 u64 val;
11704
11705 if (entry->flags & CNTR_DISABLED) {
11706 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11707 return 0;
11708 }
11709
11710 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11711
11712 if (entry->flags & CNTR_SYNTH) {
11713 *psval = data;
11714 if (entry->flags & CNTR_32BIT) {
11715 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11716 (data << 32) >> 32);
11717 val = data; /* return the full 64bit value */
11718 } else {
11719 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11720 data);
11721 }
11722 } else {
11723 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11724 }
11725
11726 *psval = val;
11727
11728 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11729
11730 return val;
11731}
11732
11733u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11734{
11735 struct cntr_entry *entry;
11736 u64 *sval;
11737
11738 entry = &dev_cntrs[index];
11739 sval = dd->scntrs + entry->offset;
11740
11741 if (vl != CNTR_INVALID_VL)
11742 sval += vl;
11743
11744 return read_dev_port_cntr(dd, entry, sval, dd, vl);
11745}
11746
11747u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11748{
11749 struct cntr_entry *entry;
11750 u64 *sval;
11751
11752 entry = &dev_cntrs[index];
11753 sval = dd->scntrs + entry->offset;
11754
11755 if (vl != CNTR_INVALID_VL)
11756 sval += vl;
11757
11758 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11759}
11760
11761u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11762{
11763 struct cntr_entry *entry;
11764 u64 *sval;
11765
11766 entry = &port_cntrs[index];
11767 sval = ppd->scntrs + entry->offset;
11768
11769 if (vl != CNTR_INVALID_VL)
11770 sval += vl;
11771
11772 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11773 (index <= C_RCV_HDR_OVF_LAST)) {
11774 /* We do not want to bother for disabled contexts */
11775 return 0;
11776 }
11777
11778 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11779}
11780
11781u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11782{
11783 struct cntr_entry *entry;
11784 u64 *sval;
11785
11786 entry = &port_cntrs[index];
11787 sval = ppd->scntrs + entry->offset;
11788
11789 if (vl != CNTR_INVALID_VL)
11790 sval += vl;
11791
11792 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11793 (index <= C_RCV_HDR_OVF_LAST)) {
11794 /* We do not want to bother for disabled contexts */
11795 return 0;
11796 }
11797
11798 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
11799}
11800
11801static void update_synth_timer(unsigned long opaque)
11802{
11803 u64 cur_tx;
11804 u64 cur_rx;
11805 u64 total_flits;
11806 u8 update = 0;
11807 int i, j, vl;
11808 struct hfi1_pportdata *ppd;
11809 struct cntr_entry *entry;
11810
11811 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
11812
11813 /*
11814 * Rather than keep beating on the CSRs pick a minimal set that we can
11815 * check to watch for potential roll over. We can do this by looking at
11816 * the number of flits sent/recv. If the total flits exceeds 32bits then
11817 * we have to iterate all the counters and update.
11818 */
11819 entry = &dev_cntrs[C_DC_RCV_FLITS];
11820 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11821
11822 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11823 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11824
11825 hfi1_cdbg(
11826 CNTR,
11827 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11828 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
11829
11830 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
11831 /*
11832 * May not be strictly necessary to update but it won't hurt and
11833 * simplifies the logic here.
11834 */
11835 update = 1;
11836 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
11837 dd->unit);
11838 } else {
11839 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
11840 hfi1_cdbg(CNTR,
11841 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
11842 total_flits, (u64)CNTR_32BIT_MAX);
11843 if (total_flits >= CNTR_32BIT_MAX) {
11844 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
11845 dd->unit);
11846 update = 1;
11847 }
11848 }
11849
11850 if (update) {
11851 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
11852 for (i = 0; i < DEV_CNTR_LAST; i++) {
11853 entry = &dev_cntrs[i];
11854 if (entry->flags & CNTR_VL) {
11855 for (vl = 0; vl < C_VL_COUNT; vl++)
11856 read_dev_cntr(dd, i, vl);
11857 } else {
11858 read_dev_cntr(dd, i, CNTR_INVALID_VL);
11859 }
11860 }
11861 ppd = (struct hfi1_pportdata *)(dd + 1);
11862 for (i = 0; i < dd->num_pports; i++, ppd++) {
11863 for (j = 0; j < PORT_CNTR_LAST; j++) {
11864 entry = &port_cntrs[j];
11865 if (entry->flags & CNTR_VL) {
11866 for (vl = 0; vl < C_VL_COUNT; vl++)
11867 read_port_cntr(ppd, j, vl);
11868 } else {
11869 read_port_cntr(ppd, j, CNTR_INVALID_VL);
11870 }
11871 }
11872 }
11873
11874 /*
11875 * We want the value in the register. The goal is to keep track
11876 * of the number of "ticks" not the counter value. In other
11877 * words if the register rolls we want to notice it and go ahead
11878 * and force an update.
11879 */
11880 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11881 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11882 CNTR_MODE_R, 0);
11883
11884 entry = &dev_cntrs[C_DC_RCV_FLITS];
11885 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11886 CNTR_MODE_R, 0);
11887
11888 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11889 dd->unit, dd->last_tx, dd->last_rx);
11890
11891 } else {
11892 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
11893 }
11894
11895mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11896}
11897
11898#define C_MAX_NAME 13 /* 12 chars + one for /0 */
11899static int init_cntrs(struct hfi1_devdata *dd)
11900{
c024c554 11901 int i, rcv_ctxts, j;
77241056
MM
11902 size_t sz;
11903 char *p;
11904 char name[C_MAX_NAME];
11905 struct hfi1_pportdata *ppd;
11d2b114
SS
11906 const char *bit_type_32 = ",32";
11907 const int bit_type_32_sz = strlen(bit_type_32);
77241056
MM
11908
11909 /* set up the stats timer; the add_timer is done at the end */
24523a94
MFW
11910 setup_timer(&dd->synth_stats_timer, update_synth_timer,
11911 (unsigned long)dd);
77241056
MM
11912
11913 /***********************/
11914 /* per device counters */
11915 /***********************/
11916
11917 /* size names and determine how many we have*/
11918 dd->ndevcntrs = 0;
11919 sz = 0;
77241056
MM
11920
11921 for (i = 0; i < DEV_CNTR_LAST; i++) {
77241056
MM
11922 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11923 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
11924 continue;
11925 }
11926
11927 if (dev_cntrs[i].flags & CNTR_VL) {
c024c554 11928 dev_cntrs[i].offset = dd->ndevcntrs;
77241056 11929 for (j = 0; j < C_VL_COUNT; j++) {
77241056 11930 snprintf(name, C_MAX_NAME, "%s%d",
17fb4f29 11931 dev_cntrs[i].name, vl_from_idx(j));
77241056 11932 sz += strlen(name);
11d2b114
SS
11933 /* Add ",32" for 32-bit counters */
11934 if (dev_cntrs[i].flags & CNTR_32BIT)
11935 sz += bit_type_32_sz;
77241056 11936 sz++;
77241056 11937 dd->ndevcntrs++;
77241056 11938 }
a699c6c2 11939 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
c024c554 11940 dev_cntrs[i].offset = dd->ndevcntrs;
a699c6c2 11941 for (j = 0; j < dd->chip_sdma_engines; j++) {
a699c6c2
VM
11942 snprintf(name, C_MAX_NAME, "%s%d",
11943 dev_cntrs[i].name, j);
77241056 11944 sz += strlen(name);
11d2b114
SS
11945 /* Add ",32" for 32-bit counters */
11946 if (dev_cntrs[i].flags & CNTR_32BIT)
11947 sz += bit_type_32_sz;
77241056 11948 sz++;
77241056 11949 dd->ndevcntrs++;
77241056
MM
11950 }
11951 } else {
11d2b114 11952 /* +1 for newline. */
77241056 11953 sz += strlen(dev_cntrs[i].name) + 1;
11d2b114
SS
11954 /* Add ",32" for 32-bit counters */
11955 if (dev_cntrs[i].flags & CNTR_32BIT)
11956 sz += bit_type_32_sz;
c024c554 11957 dev_cntrs[i].offset = dd->ndevcntrs;
77241056 11958 dd->ndevcntrs++;
77241056
MM
11959 }
11960 }
11961
11962 /* allocate space for the counter values */
c024c554 11963 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
77241056
MM
11964 if (!dd->cntrs)
11965 goto bail;
11966
c024c554 11967 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
77241056
MM
11968 if (!dd->scntrs)
11969 goto bail;
11970
77241056
MM
11971 /* allocate space for the counter names */
11972 dd->cntrnameslen = sz;
11973 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
11974 if (!dd->cntrnames)
11975 goto bail;
11976
11977 /* fill in the names */
c024c554 11978 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
77241056
MM
11979 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11980 /* Nothing */
11d2b114
SS
11981 } else if (dev_cntrs[i].flags & CNTR_VL) {
11982 for (j = 0; j < C_VL_COUNT; j++) {
11d2b114
SS
11983 snprintf(name, C_MAX_NAME, "%s%d",
11984 dev_cntrs[i].name,
11985 vl_from_idx(j));
11986 memcpy(p, name, strlen(name));
11987 p += strlen(name);
11988
11989 /* Counter is 32 bits */
11990 if (dev_cntrs[i].flags & CNTR_32BIT) {
11991 memcpy(p, bit_type_32, bit_type_32_sz);
11992 p += bit_type_32_sz;
77241056 11993 }
11d2b114
SS
11994
11995 *p++ = '\n';
11996 }
11997 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
11998 for (j = 0; j < dd->chip_sdma_engines; j++) {
11d2b114
SS
11999 snprintf(name, C_MAX_NAME, "%s%d",
12000 dev_cntrs[i].name, j);
12001 memcpy(p, name, strlen(name));
12002 p += strlen(name);
12003
12004 /* Counter is 32 bits */
12005 if (dev_cntrs[i].flags & CNTR_32BIT) {
12006 memcpy(p, bit_type_32, bit_type_32_sz);
12007 p += bit_type_32_sz;
a699c6c2 12008 }
11d2b114 12009
77241056
MM
12010 *p++ = '\n';
12011 }
11d2b114
SS
12012 } else {
12013 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12014 p += strlen(dev_cntrs[i].name);
12015
12016 /* Counter is 32 bits */
12017 if (dev_cntrs[i].flags & CNTR_32BIT) {
12018 memcpy(p, bit_type_32, bit_type_32_sz);
12019 p += bit_type_32_sz;
12020 }
12021
12022 *p++ = '\n';
77241056
MM
12023 }
12024 }
12025
12026 /*********************/
12027 /* per port counters */
12028 /*********************/
12029
12030 /*
12031 * Go through the counters for the overflows and disable the ones we
12032 * don't need. This varies based on platform so we need to do it
12033 * dynamically here.
12034 */
12035 rcv_ctxts = dd->num_rcv_contexts;
12036 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12037 i <= C_RCV_HDR_OVF_LAST; i++) {
12038 port_cntrs[i].flags |= CNTR_DISABLED;
12039 }
12040
12041 /* size port counter names and determine how many we have*/
12042 sz = 0;
12043 dd->nportcntrs = 0;
12044 for (i = 0; i < PORT_CNTR_LAST; i++) {
77241056
MM
12045 if (port_cntrs[i].flags & CNTR_DISABLED) {
12046 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12047 continue;
12048 }
12049
12050 if (port_cntrs[i].flags & CNTR_VL) {
77241056
MM
12051 port_cntrs[i].offset = dd->nportcntrs;
12052 for (j = 0; j < C_VL_COUNT; j++) {
77241056 12053 snprintf(name, C_MAX_NAME, "%s%d",
17fb4f29 12054 port_cntrs[i].name, vl_from_idx(j));
77241056 12055 sz += strlen(name);
11d2b114
SS
12056 /* Add ",32" for 32-bit counters */
12057 if (port_cntrs[i].flags & CNTR_32BIT)
12058 sz += bit_type_32_sz;
77241056 12059 sz++;
77241056
MM
12060 dd->nportcntrs++;
12061 }
12062 } else {
11d2b114 12063 /* +1 for newline */
77241056 12064 sz += strlen(port_cntrs[i].name) + 1;
11d2b114
SS
12065 /* Add ",32" for 32-bit counters */
12066 if (port_cntrs[i].flags & CNTR_32BIT)
12067 sz += bit_type_32_sz;
77241056
MM
12068 port_cntrs[i].offset = dd->nportcntrs;
12069 dd->nportcntrs++;
77241056
MM
12070 }
12071 }
12072
12073 /* allocate space for the counter names */
12074 dd->portcntrnameslen = sz;
12075 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12076 if (!dd->portcntrnames)
12077 goto bail;
12078
12079 /* fill in port cntr names */
12080 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12081 if (port_cntrs[i].flags & CNTR_DISABLED)
12082 continue;
12083
12084 if (port_cntrs[i].flags & CNTR_VL) {
12085 for (j = 0; j < C_VL_COUNT; j++) {
77241056 12086 snprintf(name, C_MAX_NAME, "%s%d",
17fb4f29 12087 port_cntrs[i].name, vl_from_idx(j));
77241056
MM
12088 memcpy(p, name, strlen(name));
12089 p += strlen(name);
11d2b114
SS
12090
12091 /* Counter is 32 bits */
12092 if (port_cntrs[i].flags & CNTR_32BIT) {
12093 memcpy(p, bit_type_32, bit_type_32_sz);
12094 p += bit_type_32_sz;
12095 }
12096
77241056
MM
12097 *p++ = '\n';
12098 }
12099 } else {
12100 memcpy(p, port_cntrs[i].name,
12101 strlen(port_cntrs[i].name));
12102 p += strlen(port_cntrs[i].name);
11d2b114
SS
12103
12104 /* Counter is 32 bits */
12105 if (port_cntrs[i].flags & CNTR_32BIT) {
12106 memcpy(p, bit_type_32, bit_type_32_sz);
12107 p += bit_type_32_sz;
12108 }
12109
77241056
MM
12110 *p++ = '\n';
12111 }
12112 }
12113
12114 /* allocate per port storage for counter values */
12115 ppd = (struct hfi1_pportdata *)(dd + 1);
12116 for (i = 0; i < dd->num_pports; i++, ppd++) {
12117 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12118 if (!ppd->cntrs)
12119 goto bail;
12120
12121 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12122 if (!ppd->scntrs)
12123 goto bail;
12124 }
12125
12126 /* CPU counters need to be allocated and zeroed */
12127 if (init_cpu_counters(dd))
12128 goto bail;
12129
12130 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12131 return 0;
12132bail:
12133 free_cntrs(dd);
12134 return -ENOMEM;
12135}
12136
77241056
MM
12137static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12138{
12139 switch (chip_lstate) {
12140 default:
12141 dd_dev_err(dd,
17fb4f29
JJ
12142 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12143 chip_lstate);
77241056
MM
12144 /* fall through */
12145 case LSTATE_DOWN:
12146 return IB_PORT_DOWN;
12147 case LSTATE_INIT:
12148 return IB_PORT_INIT;
12149 case LSTATE_ARMED:
12150 return IB_PORT_ARMED;
12151 case LSTATE_ACTIVE:
12152 return IB_PORT_ACTIVE;
12153 }
12154}
12155
12156u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12157{
12158 /* look at the HFI meta-states only */
12159 switch (chip_pstate & 0xf0) {
12160 default:
12161 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
17fb4f29 12162 chip_pstate);
77241056
MM
12163 /* fall through */
12164 case PLS_DISABLED:
12165 return IB_PORTPHYSSTATE_DISABLED;
12166 case PLS_OFFLINE:
12167 return OPA_PORTPHYSSTATE_OFFLINE;
12168 case PLS_POLLING:
12169 return IB_PORTPHYSSTATE_POLLING;
12170 case PLS_CONFIGPHY:
12171 return IB_PORTPHYSSTATE_TRAINING;
12172 case PLS_LINKUP:
12173 return IB_PORTPHYSSTATE_LINKUP;
12174 case PLS_PHYTEST:
12175 return IB_PORTPHYSSTATE_PHY_TEST;
12176 }
12177}
12178
12179/* return the OPA port logical state name */
12180const char *opa_lstate_name(u32 lstate)
12181{
12182 static const char * const port_logical_names[] = {
12183 "PORT_NOP",
12184 "PORT_DOWN",
12185 "PORT_INIT",
12186 "PORT_ARMED",
12187 "PORT_ACTIVE",
12188 "PORT_ACTIVE_DEFER",
12189 };
12190 if (lstate < ARRAY_SIZE(port_logical_names))
12191 return port_logical_names[lstate];
12192 return "unknown";
12193}
12194
12195/* return the OPA port physical state name */
12196const char *opa_pstate_name(u32 pstate)
12197{
12198 static const char * const port_physical_names[] = {
12199 "PHYS_NOP",
12200 "reserved1",
12201 "PHYS_POLL",
12202 "PHYS_DISABLED",
12203 "PHYS_TRAINING",
12204 "PHYS_LINKUP",
12205 "PHYS_LINK_ERR_RECOVER",
12206 "PHYS_PHY_TEST",
12207 "reserved8",
12208 "PHYS_OFFLINE",
12209 "PHYS_GANGED",
12210 "PHYS_TEST",
12211 };
12212 if (pstate < ARRAY_SIZE(port_physical_names))
12213 return port_physical_names[pstate];
12214 return "unknown";
12215}
12216
12217/*
12218 * Read the hardware link state and set the driver's cached value of it.
12219 * Return the (new) current value.
12220 */
12221u32 get_logical_state(struct hfi1_pportdata *ppd)
12222{
12223 u32 new_state;
12224
12225 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12226 if (new_state != ppd->lstate) {
12227 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
17fb4f29 12228 opa_lstate_name(new_state), new_state);
77241056
MM
12229 ppd->lstate = new_state;
12230 }
12231 /*
12232 * Set port status flags in the page mapped into userspace
12233 * memory. Do it here to ensure a reliable state - this is
12234 * the only function called by all state handling code.
12235 * Always set the flags due to the fact that the cache value
12236 * might have been changed explicitly outside of this
12237 * function.
12238 */
12239 if (ppd->statusp) {
12240 switch (ppd->lstate) {
12241 case IB_PORT_DOWN:
12242 case IB_PORT_INIT:
12243 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12244 HFI1_STATUS_IB_READY);
12245 break;
12246 case IB_PORT_ARMED:
12247 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12248 break;
12249 case IB_PORT_ACTIVE:
12250 *ppd->statusp |= HFI1_STATUS_IB_READY;
12251 break;
12252 }
12253 }
12254 return ppd->lstate;
12255}
12256
12257/**
12258 * wait_logical_linkstate - wait for an IB link state change to occur
12259 * @ppd: port device
12260 * @state: the state to wait for
12261 * @msecs: the number of milliseconds to wait
12262 *
12263 * Wait up to msecs milliseconds for IB link state change to occur.
12264 * For now, take the easy polling route.
12265 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12266 */
12267static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12268 int msecs)
12269{
12270 unsigned long timeout;
12271
12272 timeout = jiffies + msecs_to_jiffies(msecs);
12273 while (1) {
12274 if (get_logical_state(ppd) == state)
12275 return 0;
12276 if (time_after(jiffies, timeout))
12277 break;
12278 msleep(20);
12279 }
12280 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12281
12282 return -ETIMEDOUT;
12283}
12284
12285u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
12286{
77241056
MM
12287 u32 pstate;
12288 u32 ib_pstate;
12289
12290 pstate = read_physical_state(ppd->dd);
12291 ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
f45c8dc8 12292 if (ppd->last_pstate != ib_pstate) {
77241056 12293 dd_dev_info(ppd->dd,
17fb4f29
JJ
12294 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12295 __func__, opa_pstate_name(ib_pstate), ib_pstate,
12296 pstate);
f45c8dc8 12297 ppd->last_pstate = ib_pstate;
77241056
MM
12298 }
12299 return ib_pstate;
12300}
12301
12302/*
12303 * Read/modify/write ASIC_QSFP register bits as selected by mask
12304 * data: 0 or 1 in the positions depending on what needs to be written
12305 * dir: 0 for read, 1 for write
12306 * mask: select by setting
12307 * I2CCLK (bit 0)
12308 * I2CDATA (bit 1)
12309 */
12310u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
12311 u32 mask)
12312{
12313 u64 qsfp_oe, target_oe;
12314
12315 target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
12316 if (mask) {
12317 /* We are writing register bits, so lock access */
12318 dir &= mask;
12319 data &= mask;
12320
12321 qsfp_oe = read_csr(dd, target_oe);
12322 qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
12323 write_csr(dd, target_oe, qsfp_oe);
12324 }
12325 /* We are exclusively reading bits here, but it is unlikely
12326 * we'll get valid data when we set the direction of the pin
12327 * in the same call, so read should call this function again
12328 * to get valid data
12329 */
12330 return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
12331}
12332
12333#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12334(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12335
12336#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12337(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12338
12339int hfi1_init_ctxt(struct send_context *sc)
12340{
d125a6c6 12341 if (sc) {
77241056
MM
12342 struct hfi1_devdata *dd = sc->dd;
12343 u64 reg;
12344 u8 set = (sc->type == SC_USER ?
12345 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12346 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12347 reg = read_kctxt_csr(dd, sc->hw_context,
12348 SEND_CTXT_CHECK_ENABLE);
12349 if (set)
12350 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12351 else
12352 SET_STATIC_RATE_CONTROL_SMASK(reg);
12353 write_kctxt_csr(dd, sc->hw_context,
12354 SEND_CTXT_CHECK_ENABLE, reg);
12355 }
12356 return 0;
12357}
12358
12359int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12360{
12361 int ret = 0;
12362 u64 reg;
12363
12364 if (dd->icode != ICODE_RTL_SILICON) {
12365 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12366 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12367 __func__);
12368 return -EINVAL;
12369 }
12370 reg = read_csr(dd, ASIC_STS_THERM);
12371 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12372 ASIC_STS_THERM_CURR_TEMP_MASK);
12373 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12374 ASIC_STS_THERM_LO_TEMP_MASK);
12375 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12376 ASIC_STS_THERM_HI_TEMP_MASK);
12377 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12378 ASIC_STS_THERM_CRIT_TEMP_MASK);
12379 /* triggers is a 3-bit value - 1 bit per trigger. */
12380 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12381
12382 return ret;
12383}
12384
12385/* ========================================================================= */
12386
12387/*
12388 * Enable/disable chip from delivering interrupts.
12389 */
12390void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12391{
12392 int i;
12393
12394 /*
12395 * In HFI, the mask needs to be 1 to allow interrupts.
12396 */
12397 if (enable) {
77241056
MM
12398 /* enable all interrupts */
12399 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
8638b77f 12400 write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
77241056 12401
8ebd4cf1 12402 init_qsfp_int(dd);
77241056
MM
12403 } else {
12404 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
8638b77f 12405 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
77241056
MM
12406 }
12407}
12408
12409/*
12410 * Clear all interrupt sources on the chip.
12411 */
12412static void clear_all_interrupts(struct hfi1_devdata *dd)
12413{
12414 int i;
12415
12416 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
8638b77f 12417 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
77241056
MM
12418
12419 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12420 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12421 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12422 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12423 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12424 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12425 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12426 for (i = 0; i < dd->chip_send_contexts; i++)
12427 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12428 for (i = 0; i < dd->chip_sdma_engines; i++)
12429 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12430
12431 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12432 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12433 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12434}
12435
12436/* Move to pcie.c? */
12437static void disable_intx(struct pci_dev *pdev)
12438{
12439 pci_intx(pdev, 0);
12440}
12441
12442static void clean_up_interrupts(struct hfi1_devdata *dd)
12443{
12444 int i;
12445
12446 /* remove irqs - must happen before disabling/turning off */
12447 if (dd->num_msix_entries) {
12448 /* MSI-X */
12449 struct hfi1_msix_entry *me = dd->msix_entries;
12450
12451 for (i = 0; i < dd->num_msix_entries; i++, me++) {
d125a6c6 12452 if (!me->arg) /* => no irq, no affinity */
957558c9
MH
12453 continue;
12454 hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
77241056
MM
12455 free_irq(me->msix.vector, me->arg);
12456 }
12457 } else {
12458 /* INTx */
12459 if (dd->requested_intx_irq) {
12460 free_irq(dd->pcidev->irq, dd);
12461 dd->requested_intx_irq = 0;
12462 }
12463 }
12464
12465 /* turn off interrupts */
12466 if (dd->num_msix_entries) {
12467 /* MSI-X */
6e5b6131 12468 pci_disable_msix(dd->pcidev);
77241056
MM
12469 } else {
12470 /* INTx */
12471 disable_intx(dd->pcidev);
12472 }
12473
12474 /* clean structures */
77241056
MM
12475 kfree(dd->msix_entries);
12476 dd->msix_entries = NULL;
12477 dd->num_msix_entries = 0;
12478}
12479
12480/*
12481 * Remap the interrupt source from the general handler to the given MSI-X
12482 * interrupt.
12483 */
12484static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12485{
12486 u64 reg;
12487 int m, n;
12488
12489 /* clear from the handled mask of the general interrupt */
12490 m = isrc / 64;
12491 n = isrc % 64;
12492 dd->gi_mask[m] &= ~((u64)1 << n);
12493
12494 /* direct the chip source to the given MSI-X interrupt */
12495 m = isrc / 8;
12496 n = isrc % 8;
8638b77f
JJ
12497 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12498 reg &= ~((u64)0xff << (8 * n));
12499 reg |= ((u64)msix_intr & 0xff) << (8 * n);
12500 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
77241056
MM
12501}
12502
12503static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12504 int engine, int msix_intr)
12505{
12506 /*
12507 * SDMA engine interrupt sources grouped by type, rather than
12508 * engine. Per-engine interrupts are as follows:
12509 * SDMA
12510 * SDMAProgress
12511 * SDMAIdle
12512 */
8638b77f 12513 remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
17fb4f29 12514 msix_intr);
8638b77f 12515 remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
17fb4f29 12516 msix_intr);
8638b77f 12517 remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
17fb4f29 12518 msix_intr);
77241056
MM
12519}
12520
77241056
MM
12521static int request_intx_irq(struct hfi1_devdata *dd)
12522{
12523 int ret;
12524
9805071e
JJ
12525 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12526 dd->unit);
77241056 12527 ret = request_irq(dd->pcidev->irq, general_interrupt,
17fb4f29 12528 IRQF_SHARED, dd->intx_name, dd);
77241056
MM
12529 if (ret)
12530 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
17fb4f29 12531 ret);
77241056
MM
12532 else
12533 dd->requested_intx_irq = 1;
12534 return ret;
12535}
12536
12537static int request_msix_irqs(struct hfi1_devdata *dd)
12538{
77241056
MM
12539 int first_general, last_general;
12540 int first_sdma, last_sdma;
12541 int first_rx, last_rx;
957558c9 12542 int i, ret = 0;
77241056
MM
12543
12544 /* calculate the ranges we are going to use */
12545 first_general = 0;
f3ff8189
JJ
12546 last_general = first_general + 1;
12547 first_sdma = last_general;
12548 last_sdma = first_sdma + dd->num_sdma;
12549 first_rx = last_sdma;
77241056
MM
12550 last_rx = first_rx + dd->n_krcv_queues;
12551
77241056
MM
12552 /*
12553 * Sanity check - the code expects all SDMA chip source
12554 * interrupts to be in the same CSR, starting at bit 0. Verify
12555 * that this is true by checking the bit location of the start.
12556 */
12557 BUILD_BUG_ON(IS_SDMA_START % 64);
12558
12559 for (i = 0; i < dd->num_msix_entries; i++) {
12560 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12561 const char *err_info;
12562 irq_handler_t handler;
f4f30031 12563 irq_handler_t thread = NULL;
77241056
MM
12564 void *arg;
12565 int idx;
12566 struct hfi1_ctxtdata *rcd = NULL;
12567 struct sdma_engine *sde = NULL;
12568
12569 /* obtain the arguments to request_irq */
12570 if (first_general <= i && i < last_general) {
12571 idx = i - first_general;
12572 handler = general_interrupt;
12573 arg = dd;
12574 snprintf(me->name, sizeof(me->name),
9805071e 12575 DRIVER_NAME "_%d", dd->unit);
77241056 12576 err_info = "general";
957558c9 12577 me->type = IRQ_GENERAL;
77241056
MM
12578 } else if (first_sdma <= i && i < last_sdma) {
12579 idx = i - first_sdma;
12580 sde = &dd->per_sdma[idx];
12581 handler = sdma_interrupt;
12582 arg = sde;
12583 snprintf(me->name, sizeof(me->name),
9805071e 12584 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
77241056
MM
12585 err_info = "sdma";
12586 remap_sdma_interrupts(dd, idx, i);
957558c9 12587 me->type = IRQ_SDMA;
77241056
MM
12588 } else if (first_rx <= i && i < last_rx) {
12589 idx = i - first_rx;
12590 rcd = dd->rcd[idx];
12591 /* no interrupt if no rcd */
12592 if (!rcd)
12593 continue;
12594 /*
12595 * Set the interrupt register and mask for this
12596 * context's interrupt.
12597 */
8638b77f 12598 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
77241056 12599 rcd->imask = ((u64)1) <<
8638b77f 12600 ((IS_RCVAVAIL_START + idx) % 64);
77241056 12601 handler = receive_context_interrupt;
f4f30031 12602 thread = receive_context_thread;
77241056
MM
12603 arg = rcd;
12604 snprintf(me->name, sizeof(me->name),
9805071e 12605 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
77241056 12606 err_info = "receive context";
66c0933b 12607 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
957558c9 12608 me->type = IRQ_RCVCTXT;
77241056
MM
12609 } else {
12610 /* not in our expected range - complain, then
4d114fdd
JJ
12611 * ignore it
12612 */
77241056 12613 dd_dev_err(dd,
17fb4f29 12614 "Unexpected extra MSI-X interrupt %d\n", i);
77241056
MM
12615 continue;
12616 }
12617 /* no argument, no interrupt */
d125a6c6 12618 if (!arg)
77241056
MM
12619 continue;
12620 /* make sure the name is terminated */
8638b77f 12621 me->name[sizeof(me->name) - 1] = 0;
77241056 12622
f4f30031 12623 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
17fb4f29 12624 me->name, arg);
77241056
MM
12625 if (ret) {
12626 dd_dev_err(dd,
17fb4f29
JJ
12627 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12628 err_info, me->msix.vector, idx, ret);
77241056
MM
12629 return ret;
12630 }
12631 /*
12632 * assign arg after request_irq call, so it will be
12633 * cleaned up
12634 */
12635 me->arg = arg;
12636
957558c9
MH
12637 ret = hfi1_get_irq_affinity(dd, me);
12638 if (ret)
12639 dd_dev_err(dd,
12640 "unable to pin IRQ %d\n", ret);
77241056
MM
12641 }
12642
77241056 12643 return ret;
77241056
MM
12644}
12645
12646/*
12647 * Set the general handler to accept all interrupts, remap all
12648 * chip interrupts back to MSI-X 0.
12649 */
12650static void reset_interrupts(struct hfi1_devdata *dd)
12651{
12652 int i;
12653
12654 /* all interrupts handled by the general handler */
12655 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12656 dd->gi_mask[i] = ~(u64)0;
12657
12658 /* all chip interrupts map to MSI-X 0 */
12659 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
8638b77f 12660 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
77241056
MM
12661}
12662
12663static int set_up_interrupts(struct hfi1_devdata *dd)
12664{
12665 struct hfi1_msix_entry *entries;
12666 u32 total, request;
12667 int i, ret;
12668 int single_interrupt = 0; /* we expect to have all the interrupts */
12669
12670 /*
12671 * Interrupt count:
12672 * 1 general, "slow path" interrupt (includes the SDMA engines
12673 * slow source, SDMACleanupDone)
12674 * N interrupts - one per used SDMA engine
12675 * M interrupt - one per kernel receive context
12676 */
12677 total = 1 + dd->num_sdma + dd->n_krcv_queues;
12678
12679 entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12680 if (!entries) {
77241056
MM
12681 ret = -ENOMEM;
12682 goto fail;
12683 }
12684 /* 1-1 MSI-X entry assignment */
12685 for (i = 0; i < total; i++)
12686 entries[i].msix.entry = i;
12687
12688 /* ask for MSI-X interrupts */
12689 request = total;
12690 request_msix(dd, &request, entries);
12691
12692 if (request == 0) {
12693 /* using INTx */
12694 /* dd->num_msix_entries already zero */
12695 kfree(entries);
12696 single_interrupt = 1;
12697 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12698 } else {
12699 /* using MSI-X */
12700 dd->num_msix_entries = request;
12701 dd->msix_entries = entries;
12702
12703 if (request != total) {
12704 /* using MSI-X, with reduced interrupts */
12705 dd_dev_err(
12706 dd,
12707 "cannot handle reduced interrupt case, want %u, got %u\n",
12708 total, request);
12709 ret = -EINVAL;
12710 goto fail;
12711 }
12712 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12713 }
12714
12715 /* mask all interrupts */
12716 set_intr_state(dd, 0);
12717 /* clear all pending interrupts */
12718 clear_all_interrupts(dd);
12719
12720 /* reset general handler mask, chip MSI-X mappings */
12721 reset_interrupts(dd);
12722
12723 if (single_interrupt)
12724 ret = request_intx_irq(dd);
12725 else
12726 ret = request_msix_irqs(dd);
12727 if (ret)
12728 goto fail;
12729
12730 return 0;
12731
12732fail:
12733 clean_up_interrupts(dd);
12734 return ret;
12735}
12736
12737/*
12738 * Set up context values in dd. Sets:
12739 *
12740 * num_rcv_contexts - number of contexts being used
12741 * n_krcv_queues - number of kernel contexts
12742 * first_user_ctxt - first non-kernel context in array of contexts
12743 * freectxts - number of free user contexts
12744 * num_send_contexts - number of PIO send contexts being used
12745 */
12746static int set_up_context_variables(struct hfi1_devdata *dd)
12747{
12748 int num_kernel_contexts;
77241056
MM
12749 int total_contexts;
12750 int ret;
12751 unsigned ngroups;
8f000f7f
DL
12752 int qos_rmt_count;
12753 int user_rmt_reduced;
77241056
MM
12754
12755 /*
33a9eb52
DL
12756 * Kernel receive contexts:
12757 * - min of 2 or 1 context/numa (excluding control context)
82c2611d 12758 * - Context 0 - control context (VL15/multicast/error)
33a9eb52
DL
12759 * - Context 1 - first kernel context
12760 * - Context 2 - second kernel context
12761 * ...
77241056
MM
12762 */
12763 if (n_krcvqs)
82c2611d 12764 /*
33a9eb52
DL
12765 * n_krcvqs is the sum of module parameter kernel receive
12766 * contexts, krcvqs[]. It does not include the control
12767 * context, so add that.
82c2611d 12768 */
33a9eb52 12769 num_kernel_contexts = n_krcvqs + 1;
77241056 12770 else
0edf80ea 12771 num_kernel_contexts = num_online_nodes() + 1;
77241056
MM
12772 num_kernel_contexts =
12773 max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
12774 /*
12775 * Every kernel receive context needs an ACK send context.
12776 * one send context is allocated for each VL{0-7} and VL15
12777 */
12778 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12779 dd_dev_err(dd,
12780 "Reducing # kernel rcv contexts to: %d, from %d\n",
12781 (int)(dd->chip_send_contexts - num_vls - 1),
12782 (int)num_kernel_contexts);
12783 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12784 }
12785 /*
0852d241
JJ
12786 * User contexts:
12787 * - default to 1 user context per real (non-HT) CPU core if
12788 * num_user_contexts is negative
77241056 12789 */
2ce6bf22 12790 if (num_user_contexts < 0)
0852d241
JJ
12791 num_user_contexts =
12792 cpumask_weight(&dd->affinity->real_cpu_mask);
77241056
MM
12793
12794 total_contexts = num_kernel_contexts + num_user_contexts;
12795
12796 /*
12797 * Adjust the counts given a global max.
12798 */
12799 if (total_contexts > dd->chip_rcv_contexts) {
12800 dd_dev_err(dd,
12801 "Reducing # user receive contexts to: %d, from %d\n",
12802 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12803 (int)num_user_contexts);
12804 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12805 /* recalculate */
12806 total_contexts = num_kernel_contexts + num_user_contexts;
12807 }
12808
8f000f7f
DL
12809 /* each user context requires an entry in the RMT */
12810 qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
12811 if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
12812 user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
12813 dd_dev_err(dd,
12814 "RMT size is reducing the number of user receive contexts from %d to %d\n",
12815 (int)num_user_contexts,
12816 user_rmt_reduced);
12817 /* recalculate */
12818 num_user_contexts = user_rmt_reduced;
12819 total_contexts = num_kernel_contexts + num_user_contexts;
12820 }
12821
77241056
MM
12822 /* the first N are kernel contexts, the rest are user contexts */
12823 dd->num_rcv_contexts = total_contexts;
12824 dd->n_krcv_queues = num_kernel_contexts;
12825 dd->first_user_ctxt = num_kernel_contexts;
affa48de 12826 dd->num_user_contexts = num_user_contexts;
77241056
MM
12827 dd->freectxts = num_user_contexts;
12828 dd_dev_info(dd,
17fb4f29
JJ
12829 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12830 (int)dd->chip_rcv_contexts,
12831 (int)dd->num_rcv_contexts,
12832 (int)dd->n_krcv_queues,
12833 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
77241056
MM
12834
12835 /*
12836 * Receive array allocation:
12837 * All RcvArray entries are divided into groups of 8. This
12838 * is required by the hardware and will speed up writes to
12839 * consecutive entries by using write-combining of the entire
12840 * cacheline.
12841 *
12842 * The number of groups are evenly divided among all contexts.
12843 * any left over groups will be given to the first N user
12844 * contexts.
12845 */
12846 dd->rcv_entries.group_size = RCV_INCREMENT;
12847 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
12848 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
12849 dd->rcv_entries.nctxt_extra = ngroups -
12850 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
12851 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
12852 dd->rcv_entries.ngroups,
12853 dd->rcv_entries.nctxt_extra);
12854 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
12855 MAX_EAGER_ENTRIES * 2) {
12856 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
12857 dd->rcv_entries.group_size;
12858 dd_dev_info(dd,
17fb4f29
JJ
12859 "RcvArray group count too high, change to %u\n",
12860 dd->rcv_entries.ngroups);
77241056
MM
12861 dd->rcv_entries.nctxt_extra = 0;
12862 }
12863 /*
12864 * PIO send contexts
12865 */
12866 ret = init_sc_pools_and_sizes(dd);
12867 if (ret >= 0) { /* success */
12868 dd->num_send_contexts = ret;
12869 dd_dev_info(
12870 dd,
44306f15 12871 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
77241056
MM
12872 dd->chip_send_contexts,
12873 dd->num_send_contexts,
12874 dd->sc_sizes[SC_KERNEL].count,
12875 dd->sc_sizes[SC_ACK].count,
44306f15
JX
12876 dd->sc_sizes[SC_USER].count,
12877 dd->sc_sizes[SC_VL15].count);
77241056
MM
12878 ret = 0; /* success */
12879 }
12880
12881 return ret;
12882}
12883
12884/*
12885 * Set the device/port partition key table. The MAD code
12886 * will ensure that, at least, the partial management
12887 * partition key is present in the table.
12888 */
12889static void set_partition_keys(struct hfi1_pportdata *ppd)
12890{
12891 struct hfi1_devdata *dd = ppd->dd;
12892 u64 reg = 0;
12893 int i;
12894
12895 dd_dev_info(dd, "Setting partition keys\n");
12896 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
12897 reg |= (ppd->pkeys[i] &
12898 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
12899 ((i % 4) *
12900 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
12901 /* Each register holds 4 PKey values. */
12902 if ((i % 4) == 3) {
12903 write_csr(dd, RCV_PARTITION_KEY +
12904 ((i - 3) * 2), reg);
12905 reg = 0;
12906 }
12907 }
12908
12909 /* Always enable HW pkeys check when pkeys table is set */
12910 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
12911}
12912
12913/*
12914 * These CSRs and memories are uninitialized on reset and must be
12915 * written before reading to set the ECC/parity bits.
12916 *
12917 * NOTE: All user context CSRs that are not mmaped write-only
12918 * (e.g. the TID flows) must be initialized even if the driver never
12919 * reads them.
12920 */
12921static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
12922{
12923 int i, j;
12924
12925 /* CceIntMap */
12926 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
8638b77f 12927 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
77241056
MM
12928
12929 /* SendCtxtCreditReturnAddr */
12930 for (i = 0; i < dd->chip_send_contexts; i++)
12931 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12932
12933 /* PIO Send buffers */
12934 /* SDMA Send buffers */
4d114fdd
JJ
12935 /*
12936 * These are not normally read, and (presently) have no method
12937 * to be read, so are not pre-initialized
12938 */
77241056
MM
12939
12940 /* RcvHdrAddr */
12941 /* RcvHdrTailAddr */
12942 /* RcvTidFlowTable */
12943 for (i = 0; i < dd->chip_rcv_contexts; i++) {
12944 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
12945 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
12946 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
8638b77f 12947 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
77241056
MM
12948 }
12949
12950 /* RcvArray */
12951 for (i = 0; i < dd->chip_rcv_array_count; i++)
8638b77f 12952 write_csr(dd, RCV_ARRAY + (8 * i),
17fb4f29 12953 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
77241056
MM
12954
12955 /* RcvQPMapTable */
12956 for (i = 0; i < 32; i++)
12957 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
12958}
12959
12960/*
12961 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
12962 */
12963static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
12964 u64 ctrl_bits)
12965{
12966 unsigned long timeout;
12967 u64 reg;
12968
12969 /* is the condition present? */
12970 reg = read_csr(dd, CCE_STATUS);
12971 if ((reg & status_bits) == 0)
12972 return;
12973
12974 /* clear the condition */
12975 write_csr(dd, CCE_CTRL, ctrl_bits);
12976
12977 /* wait for the condition to clear */
12978 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
12979 while (1) {
12980 reg = read_csr(dd, CCE_STATUS);
12981 if ((reg & status_bits) == 0)
12982 return;
12983 if (time_after(jiffies, timeout)) {
12984 dd_dev_err(dd,
17fb4f29
JJ
12985 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
12986 status_bits, reg & status_bits);
77241056
MM
12987 return;
12988 }
12989 udelay(1);
12990 }
12991}
12992
12993/* set CCE CSRs to chip reset defaults */
12994static void reset_cce_csrs(struct hfi1_devdata *dd)
12995{
12996 int i;
12997
12998 /* CCE_REVISION read-only */
12999 /* CCE_REVISION2 read-only */
13000 /* CCE_CTRL - bits clear automatically */
13001 /* CCE_STATUS read-only, use CceCtrl to clear */
13002 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13003 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13004 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13005 for (i = 0; i < CCE_NUM_SCRATCH; i++)
13006 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13007 /* CCE_ERR_STATUS read-only */
13008 write_csr(dd, CCE_ERR_MASK, 0);
13009 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13010 /* CCE_ERR_FORCE leave alone */
13011 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13012 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13013 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13014 /* CCE_PCIE_CTRL leave alone */
13015 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13016 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13017 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
17fb4f29 13018 CCE_MSIX_TABLE_UPPER_RESETCSR);
77241056
MM
13019 }
13020 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13021 /* CCE_MSIX_PBA read-only */
13022 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13023 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13024 }
13025 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13026 write_csr(dd, CCE_INT_MAP, 0);
13027 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13028 /* CCE_INT_STATUS read-only */
13029 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13030 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13031 /* CCE_INT_FORCE leave alone */
13032 /* CCE_INT_BLOCKED read-only */
13033 }
13034 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13035 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13036}
13037
77241056
MM
13038/* set MISC CSRs to chip reset defaults */
13039static void reset_misc_csrs(struct hfi1_devdata *dd)
13040{
13041 int i;
13042
13043 for (i = 0; i < 32; i++) {
13044 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13045 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13046 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13047 }
4d114fdd
JJ
13048 /*
13049 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13050 * only be written 128-byte chunks
13051 */
77241056
MM
13052 /* init RSA engine to clear lingering errors */
13053 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13054 write_csr(dd, MISC_CFG_RSA_MU, 0);
13055 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13056 /* MISC_STS_8051_DIGEST read-only */
13057 /* MISC_STS_SBM_DIGEST read-only */
13058 /* MISC_STS_PCIE_DIGEST read-only */
13059 /* MISC_STS_FAB_DIGEST read-only */
13060 /* MISC_ERR_STATUS read-only */
13061 write_csr(dd, MISC_ERR_MASK, 0);
13062 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13063 /* MISC_ERR_FORCE leave alone */
13064}
13065
13066/* set TXE CSRs to chip reset defaults */
13067static void reset_txe_csrs(struct hfi1_devdata *dd)
13068{
13069 int i;
13070
13071 /*
13072 * TXE Kernel CSRs
13073 */
13074 write_csr(dd, SEND_CTRL, 0);
13075 __cm_reset(dd, 0); /* reset CM internal state */
13076 /* SEND_CONTEXTS read-only */
13077 /* SEND_DMA_ENGINES read-only */
13078 /* SEND_PIO_MEM_SIZE read-only */
13079 /* SEND_DMA_MEM_SIZE read-only */
13080 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13081 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13082 /* SEND_PIO_ERR_STATUS read-only */
13083 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13084 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13085 /* SEND_PIO_ERR_FORCE leave alone */
13086 /* SEND_DMA_ERR_STATUS read-only */
13087 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13088 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13089 /* SEND_DMA_ERR_FORCE leave alone */
13090 /* SEND_EGRESS_ERR_STATUS read-only */
13091 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13092 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13093 /* SEND_EGRESS_ERR_FORCE leave alone */
13094 write_csr(dd, SEND_BTH_QP, 0);
13095 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13096 write_csr(dd, SEND_SC2VLT0, 0);
13097 write_csr(dd, SEND_SC2VLT1, 0);
13098 write_csr(dd, SEND_SC2VLT2, 0);
13099 write_csr(dd, SEND_SC2VLT3, 0);
13100 write_csr(dd, SEND_LEN_CHECK0, 0);
13101 write_csr(dd, SEND_LEN_CHECK1, 0);
13102 /* SEND_ERR_STATUS read-only */
13103 write_csr(dd, SEND_ERR_MASK, 0);
13104 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13105 /* SEND_ERR_FORCE read-only */
13106 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
8638b77f 13107 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
77241056 13108 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
8638b77f
JJ
13109 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13110 for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13111 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
77241056 13112 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
8638b77f 13113 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
77241056 13114 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
8638b77f 13115 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
77241056 13116 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
17fb4f29 13117 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
77241056
MM
13118 /* SEND_CM_CREDIT_USED_STATUS read-only */
13119 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13120 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13121 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13122 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13123 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13124 for (i = 0; i < TXE_NUM_DATA_VL; i++)
8638b77f 13125 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
77241056
MM
13126 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13127 /* SEND_CM_CREDIT_USED_VL read-only */
13128 /* SEND_CM_CREDIT_USED_VL15 read-only */
13129 /* SEND_EGRESS_CTXT_STATUS read-only */
13130 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13131 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13132 /* SEND_EGRESS_ERR_INFO read-only */
13133 /* SEND_EGRESS_ERR_SOURCE read-only */
13134
13135 /*
13136 * TXE Per-Context CSRs
13137 */
13138 for (i = 0; i < dd->chip_send_contexts; i++) {
13139 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13140 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13141 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13142 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13143 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13144 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13145 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13146 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13147 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13148 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13149 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13150 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13151 }
13152
13153 /*
13154 * TXE Per-SDMA CSRs
13155 */
13156 for (i = 0; i < dd->chip_sdma_engines; i++) {
13157 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13158 /* SEND_DMA_STATUS read-only */
13159 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13160 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13161 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13162 /* SEND_DMA_HEAD read-only */
13163 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13164 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13165 /* SEND_DMA_IDLE_CNT read-only */
13166 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13167 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13168 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13169 /* SEND_DMA_ENG_ERR_STATUS read-only */
13170 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13171 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13172 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13173 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13174 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13175 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13176 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13177 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13178 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13179 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13180 }
13181}
13182
13183/*
13184 * Expect on entry:
13185 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13186 */
13187static void init_rbufs(struct hfi1_devdata *dd)
13188{
13189 u64 reg;
13190 int count;
13191
13192 /*
13193 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13194 * clear.
13195 */
13196 count = 0;
13197 while (1) {
13198 reg = read_csr(dd, RCV_STATUS);
13199 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13200 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13201 break;
13202 /*
13203 * Give up after 1ms - maximum wait time.
13204 *
13205 * RBuf size is 148KiB. Slowest possible is PCIe Gen1 x1 at
13206 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13207 * 148 KB / (66% * 250MB/s) = 920us
13208 */
13209 if (count++ > 500) {
13210 dd_dev_err(dd,
17fb4f29
JJ
13211 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13212 __func__, reg);
77241056
MM
13213 break;
13214 }
13215 udelay(2); /* do not busy-wait the CSR */
13216 }
13217
13218 /* start the init - expect RcvCtrl to be 0 */
13219 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13220
13221 /*
13222 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13223 * period after the write before RcvStatus.RxRbufInitDone is valid.
13224 * The delay in the first run through the loop below is sufficient and
13225 * required before the first read of RcvStatus.RxRbufInintDone.
13226 */
13227 read_csr(dd, RCV_CTRL);
13228
13229 /* wait for the init to finish */
13230 count = 0;
13231 while (1) {
13232 /* delay is required first time through - see above */
13233 udelay(2); /* do not busy-wait the CSR */
13234 reg = read_csr(dd, RCV_STATUS);
13235 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13236 break;
13237
13238 /* give up after 100us - slowest possible at 33MHz is 73us */
13239 if (count++ > 50) {
13240 dd_dev_err(dd,
17fb4f29
JJ
13241 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13242 __func__);
77241056
MM
13243 break;
13244 }
13245 }
13246}
13247
13248/* set RXE CSRs to chip reset defaults */
13249static void reset_rxe_csrs(struct hfi1_devdata *dd)
13250{
13251 int i, j;
13252
13253 /*
13254 * RXE Kernel CSRs
13255 */
13256 write_csr(dd, RCV_CTRL, 0);
13257 init_rbufs(dd);
13258 /* RCV_STATUS read-only */
13259 /* RCV_CONTEXTS read-only */
13260 /* RCV_ARRAY_CNT read-only */
13261 /* RCV_BUF_SIZE read-only */
13262 write_csr(dd, RCV_BTH_QP, 0);
13263 write_csr(dd, RCV_MULTICAST, 0);
13264 write_csr(dd, RCV_BYPASS, 0);
13265 write_csr(dd, RCV_VL15, 0);
13266 /* this is a clear-down */
13267 write_csr(dd, RCV_ERR_INFO,
17fb4f29 13268 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
77241056
MM
13269 /* RCV_ERR_STATUS read-only */
13270 write_csr(dd, RCV_ERR_MASK, 0);
13271 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13272 /* RCV_ERR_FORCE leave alone */
13273 for (i = 0; i < 32; i++)
13274 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13275 for (i = 0; i < 4; i++)
13276 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13277 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13278 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13279 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13280 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13281 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13282 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13283 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13284 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13285 }
13286 for (i = 0; i < 32; i++)
13287 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13288
13289 /*
13290 * RXE Kernel and User Per-Context CSRs
13291 */
13292 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13293 /* kernel */
13294 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13295 /* RCV_CTXT_STATUS read-only */
13296 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13297 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13298 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13299 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13300 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13301 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13302 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13303 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13304 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13305 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13306
13307 /* user */
13308 /* RCV_HDR_TAIL read-only */
13309 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13310 /* RCV_EGR_INDEX_TAIL read-only */
13311 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13312 /* RCV_EGR_OFFSET_TAIL read-only */
13313 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
17fb4f29
JJ
13314 write_uctxt_csr(dd, i,
13315 RCV_TID_FLOW_TABLE + (8 * j), 0);
77241056
MM
13316 }
13317 }
13318}
13319
13320/*
13321 * Set sc2vl tables.
13322 *
13323 * They power on to zeros, so to avoid send context errors
13324 * they need to be set:
13325 *
13326 * SC 0-7 -> VL 0-7 (respectively)
13327 * SC 15 -> VL 15
13328 * otherwise
13329 * -> VL 0
13330 */
13331static void init_sc2vl_tables(struct hfi1_devdata *dd)
13332{
13333 int i;
13334 /* init per architecture spec, constrained by hardware capability */
13335
13336 /* HFI maps sent packets */
13337 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13338 0,
13339 0, 0, 1, 1,
13340 2, 2, 3, 3,
13341 4, 4, 5, 5,
13342 6, 6, 7, 7));
13343 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13344 1,
13345 8, 0, 9, 0,
13346 10, 0, 11, 0,
13347 12, 0, 13, 0,
13348 14, 0, 15, 15));
13349 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13350 2,
13351 16, 0, 17, 0,
13352 18, 0, 19, 0,
13353 20, 0, 21, 0,
13354 22, 0, 23, 0));
13355 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13356 3,
13357 24, 0, 25, 0,
13358 26, 0, 27, 0,
13359 28, 0, 29, 0,
13360 30, 0, 31, 0));
13361
13362 /* DC maps received packets */
13363 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13364 15_0,
13365 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13366 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13367 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13368 31_16,
13369 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13370 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13371
13372 /* initialize the cached sc2vl values consistently with h/w */
13373 for (i = 0; i < 32; i++) {
13374 if (i < 8 || i == 15)
13375 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13376 else
13377 *((u8 *)(dd->sc2vl) + i) = 0;
13378 }
13379}
13380
13381/*
13382 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13383 * depend on the chip going through a power-on reset - a driver may be loaded
13384 * and unloaded many times.
13385 *
13386 * Do not write any CSR values to the chip in this routine - there may be
13387 * a reset following the (possible) FLR in this routine.
13388 *
13389 */
13390static void init_chip(struct hfi1_devdata *dd)
13391{
13392 int i;
13393
13394 /*
13395 * Put the HFI CSRs in a known state.
13396 * Combine this with a DC reset.
13397 *
13398 * Stop the device from doing anything while we do a
13399 * reset. We know there are no other active users of
13400 * the device since we are now in charge. Turn off
13401 * off all outbound and inbound traffic and make sure
13402 * the device does not generate any interrupts.
13403 */
13404
13405 /* disable send contexts and SDMA engines */
13406 write_csr(dd, SEND_CTRL, 0);
13407 for (i = 0; i < dd->chip_send_contexts; i++)
13408 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13409 for (i = 0; i < dd->chip_sdma_engines; i++)
13410 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13411 /* disable port (turn off RXE inbound traffic) and contexts */
13412 write_csr(dd, RCV_CTRL, 0);
13413 for (i = 0; i < dd->chip_rcv_contexts; i++)
13414 write_csr(dd, RCV_CTXT_CTRL, 0);
13415 /* mask all interrupt sources */
13416 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
8638b77f 13417 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
77241056
MM
13418
13419 /*
13420 * DC Reset: do a full DC reset before the register clear.
13421 * A recommended length of time to hold is one CSR read,
13422 * so reread the CceDcCtrl. Then, hold the DC in reset
13423 * across the clear.
13424 */
13425 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
50e5dcbe 13426 (void)read_csr(dd, CCE_DC_CTRL);
77241056
MM
13427
13428 if (use_flr) {
13429 /*
13430 * A FLR will reset the SPC core and part of the PCIe.
13431 * The parts that need to be restored have already been
13432 * saved.
13433 */
13434 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13435
13436 /* do the FLR, the DC reset will remain */
13437 hfi1_pcie_flr(dd);
13438
13439 /* restore command and BARs */
13440 restore_pci_variables(dd);
13441
995deafa 13442 if (is_ax(dd)) {
77241056
MM
13443 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13444 hfi1_pcie_flr(dd);
13445 restore_pci_variables(dd);
13446 }
77241056
MM
13447 } else {
13448 dd_dev_info(dd, "Resetting CSRs with writes\n");
13449 reset_cce_csrs(dd);
13450 reset_txe_csrs(dd);
13451 reset_rxe_csrs(dd);
77241056
MM
13452 reset_misc_csrs(dd);
13453 }
13454 /* clear the DC reset */
13455 write_csr(dd, CCE_DC_CTRL, 0);
7c03ed85 13456
77241056 13457 /* Set the LED off */
773d0451
SS
13458 setextled(dd, 0);
13459
77241056
MM
13460 /*
13461 * Clear the QSFP reset.
72a67ba2 13462 * An FLR enforces a 0 on all out pins. The driver does not touch
77241056 13463 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
72a67ba2 13464 * anything plugged constantly in reset, if it pays attention
77241056 13465 * to RESET_N.
72a67ba2 13466 * Prime examples of this are optical cables. Set all pins high.
77241056
MM
13467 * I2CCLK and I2CDAT will change per direction, and INT_N and
13468 * MODPRS_N are input only and their value is ignored.
13469 */
72a67ba2
EH
13470 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13471 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
a2ee27a4 13472 init_chip_resources(dd);
77241056
MM
13473}
13474
13475static void init_early_variables(struct hfi1_devdata *dd)
13476{
13477 int i;
13478
13479 /* assign link credit variables */
13480 dd->vau = CM_VAU;
13481 dd->link_credits = CM_GLOBAL_CREDITS;
995deafa 13482 if (is_ax(dd))
77241056
MM
13483 dd->link_credits--;
13484 dd->vcu = cu_to_vcu(hfi1_cu);
13485 /* enough room for 8 MAD packets plus header - 17K */
13486 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13487 if (dd->vl15_init > dd->link_credits)
13488 dd->vl15_init = dd->link_credits;
13489
13490 write_uninitialized_csrs_and_memories(dd);
13491
13492 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13493 for (i = 0; i < dd->num_pports; i++) {
13494 struct hfi1_pportdata *ppd = &dd->pport[i];
13495
13496 set_partition_keys(ppd);
13497 }
13498 init_sc2vl_tables(dd);
13499}
13500
13501static void init_kdeth_qp(struct hfi1_devdata *dd)
13502{
13503 /* user changed the KDETH_QP */
13504 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13505 /* out of range or illegal value */
13506 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13507 kdeth_qp = 0;
13508 }
13509 if (kdeth_qp == 0) /* not set, or failed range check */
13510 kdeth_qp = DEFAULT_KDETH_QP;
13511
13512 write_csr(dd, SEND_BTH_QP,
17fb4f29
JJ
13513 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
13514 SEND_BTH_QP_KDETH_QP_SHIFT);
77241056
MM
13515
13516 write_csr(dd, RCV_BTH_QP,
17fb4f29
JJ
13517 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
13518 RCV_BTH_QP_KDETH_QP_SHIFT);
77241056
MM
13519}
13520
13521/**
13522 * init_qpmap_table
13523 * @dd - device data
13524 * @first_ctxt - first context
13525 * @last_ctxt - first context
13526 *
13527 * This return sets the qpn mapping table that
13528 * is indexed by qpn[8:1].
13529 *
13530 * The routine will round robin the 256 settings
13531 * from first_ctxt to last_ctxt.
13532 *
13533 * The first/last looks ahead to having specialized
13534 * receive contexts for mgmt and bypass. Normal
13535 * verbs traffic will assumed to be on a range
13536 * of receive contexts.
13537 */
13538static void init_qpmap_table(struct hfi1_devdata *dd,
13539 u32 first_ctxt,
13540 u32 last_ctxt)
13541{
13542 u64 reg = 0;
13543 u64 regno = RCV_QP_MAP_TABLE;
13544 int i;
13545 u64 ctxt = first_ctxt;
13546
60d585ad 13547 for (i = 0; i < 256; i++) {
77241056 13548 reg |= ctxt << (8 * (i % 8));
77241056
MM
13549 ctxt++;
13550 if (ctxt > last_ctxt)
13551 ctxt = first_ctxt;
60d585ad 13552 if (i % 8 == 7) {
77241056
MM
13553 write_csr(dd, regno, reg);
13554 reg = 0;
13555 regno += 8;
13556 }
13557 }
77241056
MM
13558
13559 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13560 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13561}
13562
372cc85a
DL
13563struct rsm_map_table {
13564 u64 map[NUM_MAP_REGS];
13565 unsigned int used;
13566};
13567
b12349ae
DL
13568struct rsm_rule_data {
13569 u8 offset;
13570 u8 pkt_type;
13571 u32 field1_off;
13572 u32 field2_off;
13573 u32 index1_off;
13574 u32 index1_width;
13575 u32 index2_off;
13576 u32 index2_width;
13577 u32 mask1;
13578 u32 value1;
13579 u32 mask2;
13580 u32 value2;
13581};
13582
372cc85a
DL
13583/*
13584 * Return an initialized RMT map table for users to fill in. OK if it
13585 * returns NULL, indicating no table.
13586 */
13587static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
13588{
13589 struct rsm_map_table *rmt;
13590 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
13591
13592 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
13593 if (rmt) {
13594 memset(rmt->map, rxcontext, sizeof(rmt->map));
13595 rmt->used = 0;
13596 }
13597
13598 return rmt;
13599}
13600
13601/*
13602 * Write the final RMT map table to the chip and free the table. OK if
13603 * table is NULL.
13604 */
13605static void complete_rsm_map_table(struct hfi1_devdata *dd,
13606 struct rsm_map_table *rmt)
13607{
13608 int i;
13609
13610 if (rmt) {
13611 /* write table to chip */
13612 for (i = 0; i < NUM_MAP_REGS; i++)
13613 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
13614
13615 /* enable RSM */
13616 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13617 }
13618}
13619
b12349ae
DL
13620/*
13621 * Add a receive side mapping rule.
13622 */
13623static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
13624 struct rsm_rule_data *rrd)
13625{
13626 write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
13627 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
13628 1ull << rule_index | /* enable bit */
13629 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13630 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
13631 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13632 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13633 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13634 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13635 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13636 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13637 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
13638 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
13639 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
13640 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
13641 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
13642}
13643
4a818bed
DL
13644/* return the number of RSM map table entries that will be used for QOS */
13645static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
13646 unsigned int *np)
13647{
13648 int i;
13649 unsigned int m, n;
13650 u8 max_by_vl = 0;
13651
13652 /* is QOS active at all? */
13653 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13654 num_vls == 1 ||
13655 krcvqsset <= 1)
13656 goto no_qos;
13657
13658 /* determine bits for qpn */
13659 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
13660 if (krcvqs[i] > max_by_vl)
13661 max_by_vl = krcvqs[i];
13662 if (max_by_vl > 32)
13663 goto no_qos;
13664 m = ilog2(__roundup_pow_of_two(max_by_vl));
13665
13666 /* determine bits for vl */
13667 n = ilog2(__roundup_pow_of_two(num_vls));
13668
13669 /* reject if too much is used */
13670 if ((m + n) > 7)
13671 goto no_qos;
13672
13673 if (mp)
13674 *mp = m;
13675 if (np)
13676 *np = n;
13677
13678 return 1 << (m + n);
13679
13680no_qos:
13681 if (mp)
13682 *mp = 0;
13683 if (np)
13684 *np = 0;
13685 return 0;
13686}
13687
77241056
MM
13688/**
13689 * init_qos - init RX qos
13690 * @dd - device data
372cc85a 13691 * @rmt - RSM map table
77241056 13692 *
33a9eb52
DL
13693 * This routine initializes Rule 0 and the RSM map table to implement
13694 * quality of service (qos).
77241056 13695 *
33a9eb52
DL
13696 * If all of the limit tests succeed, qos is applied based on the array
13697 * interpretation of krcvqs where entry 0 is VL0.
77241056 13698 *
33a9eb52
DL
13699 * The number of vl bits (n) and the number of qpn bits (m) are computed to
13700 * feed both the RSM map table and the single rule.
77241056 13701 */
372cc85a 13702static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
77241056 13703{
b12349ae 13704 struct rsm_rule_data rrd;
77241056 13705 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
372cc85a 13706 unsigned int rmt_entries;
77241056 13707 u64 reg;
77241056 13708
4a818bed 13709 if (!rmt)
77241056 13710 goto bail;
4a818bed
DL
13711 rmt_entries = qos_rmt_entries(dd, &m, &n);
13712 if (rmt_entries == 0)
77241056 13713 goto bail;
4a818bed
DL
13714 qpns_per_vl = 1 << m;
13715
372cc85a
DL
13716 /* enough room in the map table? */
13717 rmt_entries = 1 << (m + n);
13718 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
859bcad9 13719 goto bail;
4a818bed 13720
372cc85a 13721 /* add qos entries to the the RSM map table */
33a9eb52 13722 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
77241056
MM
13723 unsigned tctxt;
13724
13725 for (qpn = 0, tctxt = ctxt;
13726 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13727 unsigned idx, regoff, regidx;
13728
372cc85a
DL
13729 /* generate the index the hardware will produce */
13730 idx = rmt->used + ((qpn << n) ^ i);
77241056
MM
13731 regoff = (idx % 8) * 8;
13732 regidx = idx / 8;
372cc85a
DL
13733 /* replace default with context number */
13734 reg = rmt->map[regidx];
77241056
MM
13735 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13736 << regoff);
13737 reg |= (u64)(tctxt++) << regoff;
372cc85a 13738 rmt->map[regidx] = reg;
77241056
MM
13739 if (tctxt == ctxt + krcvqs[i])
13740 tctxt = ctxt;
13741 }
13742 ctxt += krcvqs[i];
13743 }
b12349ae
DL
13744
13745 rrd.offset = rmt->used;
13746 rrd.pkt_type = 2;
13747 rrd.field1_off = LRH_BTH_MATCH_OFFSET;
13748 rrd.field2_off = LRH_SC_MATCH_OFFSET;
13749 rrd.index1_off = LRH_SC_SELECT_OFFSET;
13750 rrd.index1_width = n;
13751 rrd.index2_off = QPN_SELECT_OFFSET;
13752 rrd.index2_width = m + n;
13753 rrd.mask1 = LRH_BTH_MASK;
13754 rrd.value1 = LRH_BTH_VALUE;
13755 rrd.mask2 = LRH_SC_MASK;
13756 rrd.value2 = LRH_SC_VALUE;
13757
13758 /* add rule 0 */
13759 add_rsm_rule(dd, 0, &rrd);
13760
372cc85a
DL
13761 /* mark RSM map entries as used */
13762 rmt->used += rmt_entries;
33a9eb52
DL
13763 /* map everything else to the mcast/err/vl15 context */
13764 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
77241056
MM
13765 dd->qos_shift = n + 1;
13766 return;
13767bail:
13768 dd->qos_shift = 1;
82c2611d 13769 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
77241056
MM
13770}
13771
8f000f7f
DL
13772static void init_user_fecn_handling(struct hfi1_devdata *dd,
13773 struct rsm_map_table *rmt)
13774{
13775 struct rsm_rule_data rrd;
13776 u64 reg;
13777 int i, idx, regoff, regidx;
13778 u8 offset;
13779
13780 /* there needs to be enough room in the map table */
13781 if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
13782 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
13783 return;
13784 }
13785
13786 /*
13787 * RSM will extract the destination context as an index into the
13788 * map table. The destination contexts are a sequential block
13789 * in the range first_user_ctxt...num_rcv_contexts-1 (inclusive).
13790 * Map entries are accessed as offset + extracted value. Adjust
13791 * the added offset so this sequence can be placed anywhere in
13792 * the table - as long as the entries themselves do not wrap.
13793 * There are only enough bits in offset for the table size, so
13794 * start with that to allow for a "negative" offset.
13795 */
13796 offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
13797 (int)dd->first_user_ctxt);
13798
13799 for (i = dd->first_user_ctxt, idx = rmt->used;
13800 i < dd->num_rcv_contexts; i++, idx++) {
13801 /* replace with identity mapping */
13802 regoff = (idx % 8) * 8;
13803 regidx = idx / 8;
13804 reg = rmt->map[regidx];
13805 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
13806 reg |= (u64)i << regoff;
13807 rmt->map[regidx] = reg;
13808 }
13809
13810 /*
13811 * For RSM intercept of Expected FECN packets:
13812 * o packet type 0 - expected
13813 * o match on F (bit 95), using select/match 1, and
13814 * o match on SH (bit 133), using select/match 2.
13815 *
13816 * Use index 1 to extract the 8-bit receive context from DestQP
13817 * (start at bit 64). Use that as the RSM map table index.
13818 */
13819 rrd.offset = offset;
13820 rrd.pkt_type = 0;
13821 rrd.field1_off = 95;
13822 rrd.field2_off = 133;
13823 rrd.index1_off = 64;
13824 rrd.index1_width = 8;
13825 rrd.index2_off = 0;
13826 rrd.index2_width = 0;
13827 rrd.mask1 = 1;
13828 rrd.value1 = 1;
13829 rrd.mask2 = 1;
13830 rrd.value2 = 1;
13831
13832 /* add rule 1 */
13833 add_rsm_rule(dd, 1, &rrd);
13834
13835 rmt->used += dd->num_user_contexts;
13836}
13837
77241056
MM
13838static void init_rxe(struct hfi1_devdata *dd)
13839{
372cc85a
DL
13840 struct rsm_map_table *rmt;
13841
77241056
MM
13842 /* enable all receive errors */
13843 write_csr(dd, RCV_ERR_MASK, ~0ull);
372cc85a
DL
13844
13845 rmt = alloc_rsm_map_table(dd);
13846 /* set up QOS, including the QPN map table */
13847 init_qos(dd, rmt);
8f000f7f 13848 init_user_fecn_handling(dd, rmt);
372cc85a
DL
13849 complete_rsm_map_table(dd, rmt);
13850 kfree(rmt);
13851
77241056
MM
13852 /*
13853 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13854 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13855 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
13856 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13857 * Max_PayLoad_Size set to its minimum of 128.
13858 *
13859 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13860 * (64 bytes). Max_Payload_Size is possibly modified upward in
13861 * tune_pcie_caps() which is called after this routine.
13862 */
13863}
13864
13865static void init_other(struct hfi1_devdata *dd)
13866{
13867 /* enable all CCE errors */
13868 write_csr(dd, CCE_ERR_MASK, ~0ull);
13869 /* enable *some* Misc errors */
13870 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
13871 /* enable all DC errors, except LCB */
13872 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
13873 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
13874}
13875
13876/*
13877 * Fill out the given AU table using the given CU. A CU is defined in terms
13878 * AUs. The table is a an encoding: given the index, how many AUs does that
13879 * represent?
13880 *
13881 * NOTE: Assumes that the register layout is the same for the
13882 * local and remote tables.
13883 */
13884static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
13885 u32 csr0to3, u32 csr4to7)
13886{
13887 write_csr(dd, csr0to3,
17fb4f29
JJ
13888 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
13889 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
13890 2ull * cu <<
13891 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
13892 4ull * cu <<
13893 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
77241056 13894 write_csr(dd, csr4to7,
17fb4f29
JJ
13895 8ull * cu <<
13896 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
13897 16ull * cu <<
13898 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
13899 32ull * cu <<
13900 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
13901 64ull * cu <<
13902 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
77241056
MM
13903}
13904
13905static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13906{
13907 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
17fb4f29 13908 SEND_CM_LOCAL_AU_TABLE4_TO7);
77241056
MM
13909}
13910
13911void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13912{
13913 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
17fb4f29 13914 SEND_CM_REMOTE_AU_TABLE4_TO7);
77241056
MM
13915}
13916
13917static void init_txe(struct hfi1_devdata *dd)
13918{
13919 int i;
13920
13921 /* enable all PIO, SDMA, general, and Egress errors */
13922 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
13923 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
13924 write_csr(dd, SEND_ERR_MASK, ~0ull);
13925 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
13926
13927 /* enable all per-context and per-SDMA engine errors */
13928 for (i = 0; i < dd->chip_send_contexts; i++)
13929 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
13930 for (i = 0; i < dd->chip_sdma_engines; i++)
13931 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
13932
13933 /* set the local CU to AU mapping */
13934 assign_local_cm_au_table(dd, dd->vcu);
13935
13936 /*
13937 * Set reasonable default for Credit Return Timer
13938 * Don't set on Simulator - causes it to choke.
13939 */
13940 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
13941 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
13942}
13943
13944int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
13945{
13946 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13947 unsigned sctxt;
13948 int ret = 0;
13949 u64 reg;
13950
13951 if (!rcd || !rcd->sc) {
13952 ret = -EINVAL;
13953 goto done;
13954 }
13955 sctxt = rcd->sc->hw_context;
13956 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
13957 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
13958 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
13959 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
13960 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
13961 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
13962 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
13963 /*
13964 * Enable send-side J_KEY integrity check, unless this is A0 h/w
77241056 13965 */
995deafa 13966 if (!is_ax(dd)) {
77241056
MM
13967 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13968 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13969 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13970 }
13971
13972 /* Enable J_KEY check on receive context. */
13973 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
13974 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
13975 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
13976 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
13977done:
13978 return ret;
13979}
13980
13981int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
13982{
13983 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13984 unsigned sctxt;
13985 int ret = 0;
13986 u64 reg;
13987
13988 if (!rcd || !rcd->sc) {
13989 ret = -EINVAL;
13990 goto done;
13991 }
13992 sctxt = rcd->sc->hw_context;
13993 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
13994 /*
13995 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
13996 * This check would not have been enabled for A0 h/w, see
13997 * set_ctxt_jkey().
13998 */
995deafa 13999 if (!is_ax(dd)) {
77241056
MM
14000 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14001 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14002 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14003 }
14004 /* Turn off the J_KEY on the receive side */
14005 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
14006done:
14007 return ret;
14008}
14009
14010int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
14011{
14012 struct hfi1_ctxtdata *rcd;
14013 unsigned sctxt;
14014 int ret = 0;
14015 u64 reg;
14016
e490974e 14017 if (ctxt < dd->num_rcv_contexts) {
77241056 14018 rcd = dd->rcd[ctxt];
e490974e 14019 } else {
77241056
MM
14020 ret = -EINVAL;
14021 goto done;
14022 }
14023 if (!rcd || !rcd->sc) {
14024 ret = -EINVAL;
14025 goto done;
14026 }
14027 sctxt = rcd->sc->hw_context;
14028 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14029 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14030 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14031 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14032 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
e38d1e4f 14033 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
77241056
MM
14034 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14035done:
14036 return ret;
14037}
14038
14039int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
14040{
14041 struct hfi1_ctxtdata *rcd;
14042 unsigned sctxt;
14043 int ret = 0;
14044 u64 reg;
14045
e490974e 14046 if (ctxt < dd->num_rcv_contexts) {
77241056 14047 rcd = dd->rcd[ctxt];
e490974e 14048 } else {
77241056
MM
14049 ret = -EINVAL;
14050 goto done;
14051 }
14052 if (!rcd || !rcd->sc) {
14053 ret = -EINVAL;
14054 goto done;
14055 }
14056 sctxt = rcd->sc->hw_context;
14057 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14058 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14059 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14060 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14061done:
14062 return ret;
14063}
14064
14065/*
14066 * Start doing the clean up the the chip. Our clean up happens in multiple
14067 * stages and this is just the first.
14068 */
14069void hfi1_start_cleanup(struct hfi1_devdata *dd)
14070{
affa48de 14071 aspm_exit(dd);
77241056
MM
14072 free_cntrs(dd);
14073 free_rcverr(dd);
14074 clean_up_interrupts(dd);
a2ee27a4 14075 finish_chip_resources(dd);
77241056
MM
14076}
14077
14078#define HFI_BASE_GUID(dev) \
14079 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14080
14081/*
78eb129d
DL
14082 * Information can be shared between the two HFIs on the same ASIC
14083 * in the same OS. This function finds the peer device and sets
14084 * up a shared structure.
77241056 14085 */
78eb129d 14086static int init_asic_data(struct hfi1_devdata *dd)
77241056
MM
14087{
14088 unsigned long flags;
14089 struct hfi1_devdata *tmp, *peer = NULL;
78eb129d 14090 int ret = 0;
77241056
MM
14091
14092 spin_lock_irqsave(&hfi1_devs_lock, flags);
14093 /* Find our peer device */
14094 list_for_each_entry(tmp, &hfi1_dev_list, list) {
14095 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14096 dd->unit != tmp->unit) {
14097 peer = tmp;
14098 break;
14099 }
14100 }
14101
78eb129d
DL
14102 if (peer) {
14103 dd->asic_data = peer->asic_data;
14104 } else {
14105 dd->asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14106 if (!dd->asic_data) {
14107 ret = -ENOMEM;
14108 goto done;
14109 }
14110 mutex_init(&dd->asic_data->asic_resource_mutex);
14111 }
14112 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14113
14114done:
77241056 14115 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
78eb129d 14116 return ret;
77241056
MM
14117}
14118
5d9157aa
DL
14119/*
14120 * Set dd->boardname. Use a generic name if a name is not returned from
14121 * EFI variable space.
14122 *
14123 * Return 0 on success, -ENOMEM if space could not be allocated.
14124 */
14125static int obtain_boardname(struct hfi1_devdata *dd)
14126{
14127 /* generic board description */
14128 const char generic[] =
14129 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14130 unsigned long size;
14131 int ret;
14132
14133 ret = read_hfi1_efi_var(dd, "description", &size,
14134 (void **)&dd->boardname);
14135 if (ret) {
845f876d 14136 dd_dev_info(dd, "Board description not found\n");
5d9157aa
DL
14137 /* use generic description */
14138 dd->boardname = kstrdup(generic, GFP_KERNEL);
14139 if (!dd->boardname)
14140 return -ENOMEM;
14141 }
14142 return 0;
14143}
14144
24487dd3
KW
14145/*
14146 * Check the interrupt registers to make sure that they are mapped correctly.
14147 * It is intended to help user identify any mismapping by VMM when the driver
14148 * is running in a VM. This function should only be called before interrupt
14149 * is set up properly.
14150 *
14151 * Return 0 on success, -EINVAL on failure.
14152 */
14153static int check_int_registers(struct hfi1_devdata *dd)
14154{
14155 u64 reg;
14156 u64 all_bits = ~(u64)0;
14157 u64 mask;
14158
14159 /* Clear CceIntMask[0] to avoid raising any interrupts */
14160 mask = read_csr(dd, CCE_INT_MASK);
14161 write_csr(dd, CCE_INT_MASK, 0ull);
14162 reg = read_csr(dd, CCE_INT_MASK);
14163 if (reg)
14164 goto err_exit;
14165
14166 /* Clear all interrupt status bits */
14167 write_csr(dd, CCE_INT_CLEAR, all_bits);
14168 reg = read_csr(dd, CCE_INT_STATUS);
14169 if (reg)
14170 goto err_exit;
14171
14172 /* Set all interrupt status bits */
14173 write_csr(dd, CCE_INT_FORCE, all_bits);
14174 reg = read_csr(dd, CCE_INT_STATUS);
14175 if (reg != all_bits)
14176 goto err_exit;
14177
14178 /* Restore the interrupt mask */
14179 write_csr(dd, CCE_INT_CLEAR, all_bits);
14180 write_csr(dd, CCE_INT_MASK, mask);
14181
14182 return 0;
14183err_exit:
14184 write_csr(dd, CCE_INT_MASK, mask);
14185 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14186 return -EINVAL;
14187}
14188
77241056 14189/**
7c03ed85 14190 * Allocate and initialize the device structure for the hfi.
77241056
MM
14191 * @dev: the pci_dev for hfi1_ib device
14192 * @ent: pci_device_id struct for this dev
14193 *
14194 * Also allocates, initializes, and returns the devdata struct for this
14195 * device instance
14196 *
14197 * This is global, and is called directly at init to set up the
14198 * chip-specific function pointers for later use.
14199 */
14200struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14201 const struct pci_device_id *ent)
14202{
14203 struct hfi1_devdata *dd;
14204 struct hfi1_pportdata *ppd;
14205 u64 reg;
14206 int i, ret;
14207 static const char * const inames[] = { /* implementation names */
14208 "RTL silicon",
14209 "RTL VCS simulation",
14210 "RTL FPGA emulation",
14211 "Functional simulator"
14212 };
24487dd3 14213 struct pci_dev *parent = pdev->bus->self;
77241056 14214
17fb4f29
JJ
14215 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14216 sizeof(struct hfi1_pportdata));
77241056
MM
14217 if (IS_ERR(dd))
14218 goto bail;
14219 ppd = dd->pport;
14220 for (i = 0; i < dd->num_pports; i++, ppd++) {
14221 int vl;
14222 /* init common fields */
14223 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14224 /* DC supports 4 link widths */
14225 ppd->link_width_supported =
14226 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14227 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14228 ppd->link_width_downgrade_supported =
14229 ppd->link_width_supported;
14230 /* start out enabling only 4X */
14231 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14232 ppd->link_width_downgrade_enabled =
14233 ppd->link_width_downgrade_supported;
14234 /* link width active is 0 when link is down */
14235 /* link width downgrade active is 0 when link is down */
14236
d0d236ea
JJ
14237 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14238 num_vls > HFI1_MAX_VLS_SUPPORTED) {
77241056
MM
14239 hfi1_early_err(&pdev->dev,
14240 "Invalid num_vls %u, using %u VLs\n",
14241 num_vls, HFI1_MAX_VLS_SUPPORTED);
14242 num_vls = HFI1_MAX_VLS_SUPPORTED;
14243 }
14244 ppd->vls_supported = num_vls;
14245 ppd->vls_operational = ppd->vls_supported;
8a4d3444 14246 ppd->actual_vls_operational = ppd->vls_supported;
77241056
MM
14247 /* Set the default MTU. */
14248 for (vl = 0; vl < num_vls; vl++)
14249 dd->vld[vl].mtu = hfi1_max_mtu;
14250 dd->vld[15].mtu = MAX_MAD_PACKET;
14251 /*
14252 * Set the initial values to reasonable default, will be set
14253 * for real when link is up.
14254 */
14255 ppd->lstate = IB_PORT_DOWN;
14256 ppd->overrun_threshold = 0x4;
14257 ppd->phy_error_threshold = 0xf;
14258 ppd->port_crc_mode_enabled = link_crc_mask;
14259 /* initialize supported LTP CRC mode */
14260 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14261 /* initialize enabled LTP CRC mode */
14262 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14263 /* start in offline */
14264 ppd->host_link_state = HLS_DN_OFFLINE;
14265 init_vl_arb_caches(ppd);
f45c8dc8 14266 ppd->last_pstate = 0xff; /* invalid value */
77241056
MM
14267 }
14268
14269 dd->link_default = HLS_DN_POLL;
14270
14271 /*
14272 * Do remaining PCIe setup and save PCIe values in dd.
14273 * Any error printing is already done by the init code.
14274 * On return, we have the chip mapped.
14275 */
14276 ret = hfi1_pcie_ddinit(dd, pdev, ent);
14277 if (ret < 0)
14278 goto bail_free;
14279
14280 /* verify that reads actually work, save revision for reset check */
14281 dd->revision = read_csr(dd, CCE_REVISION);
14282 if (dd->revision == ~(u64)0) {
14283 dd_dev_err(dd, "cannot read chip CSRs\n");
14284 ret = -EINVAL;
14285 goto bail_cleanup;
14286 }
14287 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14288 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14289 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14290 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14291
24487dd3
KW
14292 /*
14293 * Check interrupt registers mapping if the driver has no access to
14294 * the upstream component. In this case, it is likely that the driver
14295 * is running in a VM.
14296 */
14297 if (!parent) {
14298 ret = check_int_registers(dd);
14299 if (ret)
14300 goto bail_cleanup;
14301 }
14302
4d114fdd
JJ
14303 /*
14304 * obtain the hardware ID - NOT related to unit, which is a
14305 * software enumeration
14306 */
77241056
MM
14307 reg = read_csr(dd, CCE_REVISION2);
14308 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14309 & CCE_REVISION2_HFI_ID_MASK;
14310 /* the variable size will remove unwanted bits */
14311 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14312 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14313 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
17fb4f29
JJ
14314 dd->icode < ARRAY_SIZE(inames) ?
14315 inames[dd->icode] : "unknown", (int)dd->irev);
77241056
MM
14316
14317 /* speeds the hardware can support */
14318 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14319 /* speeds allowed to run at */
14320 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14321 /* give a reasonable active value, will be set on link up */
14322 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14323
14324 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14325 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14326 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14327 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14328 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14329 /* fix up link widths for emulation _p */
14330 ppd = dd->pport;
14331 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14332 ppd->link_width_supported =
14333 ppd->link_width_enabled =
14334 ppd->link_width_downgrade_supported =
14335 ppd->link_width_downgrade_enabled =
14336 OPA_LINK_WIDTH_1X;
14337 }
14338 /* insure num_vls isn't larger than number of sdma engines */
14339 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14340 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
11a5909b
DL
14341 num_vls, dd->chip_sdma_engines);
14342 num_vls = dd->chip_sdma_engines;
14343 ppd->vls_supported = dd->chip_sdma_engines;
8a4d3444 14344 ppd->vls_operational = ppd->vls_supported;
77241056
MM
14345 }
14346
14347 /*
14348 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14349 * Limit the max if larger than the field holds. If timeout is
14350 * non-zero, then the calculated field will be at least 1.
14351 *
14352 * Must be after icode is set up - the cclock rate depends
14353 * on knowing the hardware being used.
14354 */
14355 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14356 if (dd->rcv_intr_timeout_csr >
14357 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14358 dd->rcv_intr_timeout_csr =
14359 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14360 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14361 dd->rcv_intr_timeout_csr = 1;
14362
7c03ed85
EH
14363 /* needs to be done before we look for the peer device */
14364 read_guid(dd);
14365
78eb129d
DL
14366 /* set up shared ASIC data with peer device */
14367 ret = init_asic_data(dd);
14368 if (ret)
14369 goto bail_cleanup;
7c03ed85 14370
77241056
MM
14371 /* obtain chip sizes, reset chip CSRs */
14372 init_chip(dd);
14373
14374 /* read in the PCIe link speed information */
14375 ret = pcie_speeds(dd);
14376 if (ret)
14377 goto bail_cleanup;
14378
c3838b39
EH
14379 /* Needs to be called before hfi1_firmware_init */
14380 get_platform_config(dd);
14381
77241056
MM
14382 /* read in firmware */
14383 ret = hfi1_firmware_init(dd);
14384 if (ret)
14385 goto bail_cleanup;
14386
14387 /*
14388 * In general, the PCIe Gen3 transition must occur after the
14389 * chip has been idled (so it won't initiate any PCIe transactions
14390 * e.g. an interrupt) and before the driver changes any registers
14391 * (the transition will reset the registers).
14392 *
14393 * In particular, place this call after:
14394 * - init_chip() - the chip will not initiate any PCIe transactions
14395 * - pcie_speeds() - reads the current link speed
14396 * - hfi1_firmware_init() - the needed firmware is ready to be
14397 * downloaded
14398 */
14399 ret = do_pcie_gen3_transition(dd);
14400 if (ret)
14401 goto bail_cleanup;
14402
14403 /* start setting dd values and adjusting CSRs */
14404 init_early_variables(dd);
14405
14406 parse_platform_config(dd);
14407
5d9157aa
DL
14408 ret = obtain_boardname(dd);
14409 if (ret)
77241056 14410 goto bail_cleanup;
77241056
MM
14411
14412 snprintf(dd->boardversion, BOARD_VERS_MAX,
5d9157aa 14413 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
77241056 14414 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
77241056
MM
14415 (u32)dd->majrev,
14416 (u32)dd->minrev,
14417 (dd->revision >> CCE_REVISION_SW_SHIFT)
14418 & CCE_REVISION_SW_MASK);
14419
0852d241
JJ
14420 /*
14421 * The real cpu mask is part of the affinity struct but has to be
14422 * initialized earlier than the rest of the affinity struct because it
14423 * is needed to calculate the number of user contexts in
14424 * set_up_context_variables(). However, hfi1_dev_affinity_init(),
14425 * which initializes the rest of the affinity struct members,
14426 * depends on set_up_context_variables() for the number of kernel
14427 * contexts, so it cannot be called before set_up_context_variables().
14428 */
14429 ret = init_real_cpu_mask(dd);
14430 if (ret)
14431 goto bail_cleanup;
14432
77241056
MM
14433 ret = set_up_context_variables(dd);
14434 if (ret)
14435 goto bail_cleanup;
14436
14437 /* set initial RXE CSRs */
14438 init_rxe(dd);
14439 /* set initial TXE CSRs */
14440 init_txe(dd);
14441 /* set initial non-RXE, non-TXE CSRs */
14442 init_other(dd);
14443 /* set up KDETH QP prefix in both RX and TX CSRs */
14444 init_kdeth_qp(dd);
14445
0852d241 14446 hfi1_dev_affinity_init(dd);
957558c9 14447
77241056
MM
14448 /* send contexts must be set up before receive contexts */
14449 ret = init_send_contexts(dd);
14450 if (ret)
14451 goto bail_cleanup;
14452
14453 ret = hfi1_create_ctxts(dd);
14454 if (ret)
14455 goto bail_cleanup;
14456
14457 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14458 /*
14459 * rcd[0] is guaranteed to be valid by this point. Also, all
14460 * context are using the same value, as per the module parameter.
14461 */
14462 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14463
14464 ret = init_pervl_scs(dd);
14465 if (ret)
14466 goto bail_cleanup;
14467
14468 /* sdma init */
14469 for (i = 0; i < dd->num_pports; ++i) {
14470 ret = sdma_init(dd, i);
14471 if (ret)
14472 goto bail_cleanup;
14473 }
14474
14475 /* use contexts created by hfi1_create_ctxts */
14476 ret = set_up_interrupts(dd);
14477 if (ret)
14478 goto bail_cleanup;
14479
14480 /* set up LCB access - must be after set_up_interrupts() */
14481 init_lcb_access(dd);
14482
14483 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
14484 dd->base_guid & 0xFFFFFF);
14485
14486 dd->oui1 = dd->base_guid >> 56 & 0xFF;
14487 dd->oui2 = dd->base_guid >> 48 & 0xFF;
14488 dd->oui3 = dd->base_guid >> 40 & 0xFF;
14489
14490 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14491 if (ret)
14492 goto bail_clear_intr;
14493 check_fabric_firmware_versions(dd);
14494
14495 thermal_init(dd);
14496
14497 ret = init_cntrs(dd);
14498 if (ret)
14499 goto bail_clear_intr;
14500
14501 ret = init_rcverr(dd);
14502 if (ret)
14503 goto bail_free_cntrs;
14504
14505 ret = eprom_init(dd);
14506 if (ret)
14507 goto bail_free_rcverr;
14508
14509 goto bail;
14510
14511bail_free_rcverr:
14512 free_rcverr(dd);
14513bail_free_cntrs:
14514 free_cntrs(dd);
14515bail_clear_intr:
14516 clean_up_interrupts(dd);
14517bail_cleanup:
14518 hfi1_pcie_ddcleanup(dd);
14519bail_free:
14520 hfi1_free_devdata(dd);
14521 dd = ERR_PTR(ret);
14522bail:
14523 return dd;
14524}
14525
14526static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14527 u32 dw_len)
14528{
14529 u32 delta_cycles;
14530 u32 current_egress_rate = ppd->current_egress_rate;
14531 /* rates here are in units of 10^6 bits/sec */
14532
14533 if (desired_egress_rate == -1)
14534 return 0; /* shouldn't happen */
14535
14536 if (desired_egress_rate >= current_egress_rate)
14537 return 0; /* we can't help go faster, only slower */
14538
14539 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14540 egress_cycles(dw_len * 4, current_egress_rate);
14541
14542 return (u16)delta_cycles;
14543}
14544
77241056
MM
14545/**
14546 * create_pbc - build a pbc for transmission
14547 * @flags: special case flags or-ed in built pbc
14548 * @srate: static rate
14549 * @vl: vl
14550 * @dwlen: dword length (header words + data words + pbc words)
14551 *
14552 * Create a PBC with the given flags, rate, VL, and length.
14553 *
14554 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14555 * for verbs, which does not use this PSM feature. The lone other caller
14556 * is for the diagnostic interface which calls this if the user does not
14557 * supply their own PBC.
14558 */
14559u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14560 u32 dw_len)
14561{
14562 u64 pbc, delay = 0;
14563
14564 if (unlikely(srate_mbs))
14565 delay = delay_cycles(ppd, srate_mbs, dw_len);
14566
14567 pbc = flags
14568 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14569 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14570 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14571 | (dw_len & PBC_LENGTH_DWS_MASK)
14572 << PBC_LENGTH_DWS_SHIFT;
14573
14574 return pbc;
14575}
14576
14577#define SBUS_THERMAL 0x4f
14578#define SBUS_THERM_MONITOR_MODE 0x1
14579
14580#define THERM_FAILURE(dev, ret, reason) \
14581 dd_dev_err((dd), \
14582 "Thermal sensor initialization failed: %s (%d)\n", \
14583 (reason), (ret))
14584
14585/*
14586 * Initialize the Avago Thermal sensor.
14587 *
14588 * After initialization, enable polling of thermal sensor through
14589 * SBus interface. In order for this to work, the SBus Master
14590 * firmware has to be loaded due to the fact that the HW polling
14591 * logic uses SBus interrupts, which are not supported with
14592 * default firmware. Otherwise, no data will be returned through
14593 * the ASIC_STS_THERM CSR.
14594 */
14595static int thermal_init(struct hfi1_devdata *dd)
14596{
14597 int ret = 0;
14598
14599 if (dd->icode != ICODE_RTL_SILICON ||
a453698b 14600 check_chip_resource(dd, CR_THERM_INIT, NULL))
77241056
MM
14601 return ret;
14602
576531fd
DL
14603 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
14604 if (ret) {
14605 THERM_FAILURE(dd, ret, "Acquire SBus");
14606 return ret;
14607 }
14608
77241056 14609 dd_dev_info(dd, "Initializing thermal sensor\n");
4ef98989
JAQ
14610 /* Disable polling of thermal readings */
14611 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14612 msleep(100);
77241056
MM
14613 /* Thermal Sensor Initialization */
14614 /* Step 1: Reset the Thermal SBus Receiver */
14615 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14616 RESET_SBUS_RECEIVER, 0);
14617 if (ret) {
14618 THERM_FAILURE(dd, ret, "Bus Reset");
14619 goto done;
14620 }
14621 /* Step 2: Set Reset bit in Thermal block */
14622 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14623 WRITE_SBUS_RECEIVER, 0x1);
14624 if (ret) {
14625 THERM_FAILURE(dd, ret, "Therm Block Reset");
14626 goto done;
14627 }
14628 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
14629 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14630 WRITE_SBUS_RECEIVER, 0x32);
14631 if (ret) {
14632 THERM_FAILURE(dd, ret, "Write Clock Div");
14633 goto done;
14634 }
14635 /* Step 4: Select temperature mode */
14636 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14637 WRITE_SBUS_RECEIVER,
14638 SBUS_THERM_MONITOR_MODE);
14639 if (ret) {
14640 THERM_FAILURE(dd, ret, "Write Mode Sel");
14641 goto done;
14642 }
14643 /* Step 5: De-assert block reset and start conversion */
14644 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14645 WRITE_SBUS_RECEIVER, 0x2);
14646 if (ret) {
14647 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14648 goto done;
14649 }
14650 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
14651 msleep(22);
14652
14653 /* Enable polling of thermal readings */
14654 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
a453698b
DL
14655
14656 /* Set initialized flag */
14657 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
14658 if (ret)
14659 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
14660
77241056 14661done:
576531fd 14662 release_chip_resource(dd, CR_SBUS);
77241056
MM
14663 return ret;
14664}
14665
14666static void handle_temp_err(struct hfi1_devdata *dd)
14667{
14668 struct hfi1_pportdata *ppd = &dd->pport[0];
14669 /*
14670 * Thermal Critical Interrupt
14671 * Put the device into forced freeze mode, take link down to
14672 * offline, and put DC into reset.
14673 */
14674 dd_dev_emerg(dd,
14675 "Critical temperature reached! Forcing device into freeze mode!\n");
14676 dd->flags |= HFI1_FORCED_FREEZE;
8638b77f 14677 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
77241056
MM
14678 /*
14679 * Shut DC down as much and as quickly as possible.
14680 *
14681 * Step 1: Take the link down to OFFLINE. This will cause the
14682 * 8051 to put the Serdes in reset. However, we don't want to
14683 * go through the entire link state machine since we want to
14684 * shutdown ASAP. Furthermore, this is not a graceful shutdown
14685 * but rather an attempt to save the chip.
14686 * Code below is almost the same as quiet_serdes() but avoids
14687 * all the extra work and the sleeps.
14688 */
14689 ppd->driver_link_ready = 0;
14690 ppd->link_enabled = 0;
bf640096
HC
14691 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
14692 PLS_OFFLINE);
77241056
MM
14693 /*
14694 * Step 2: Shutdown LCB and 8051
14695 * After shutdown, do not restore DC_CFG_RESET value.
14696 */
14697 dc_shutdown(dd);
14698}