]>
Commit | Line | Data |
---|---|---|
f931551b | 1 | /* |
1fb9fed6 MM |
2 | * Copyright (c) 2012 Intel Corporation. All rights reserved. |
3 | * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved. | |
f931551b RC |
4 | * |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | ||
34 | /* | |
35 | * This file contains all of the code that is specific to the | |
36 | * InfiniPath 7322 chip | |
37 | */ | |
38 | ||
39 | #include <linux/interrupt.h> | |
40 | #include <linux/pci.h> | |
41 | #include <linux/delay.h> | |
42 | #include <linux/io.h> | |
43 | #include <linux/jiffies.h> | |
e4dd23d7 | 44 | #include <linux/module.h> |
f931551b RC |
45 | #include <rdma/ib_verbs.h> |
46 | #include <rdma/ib_smi.h> | |
8469ba39 MM |
47 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
48 | #include <linux/dca.h> | |
49 | #endif | |
f931551b RC |
50 | |
51 | #include "qib.h" | |
52 | #include "qib_7322_regs.h" | |
53 | #include "qib_qsfp.h" | |
54 | ||
55 | #include "qib_mad.h" | |
1fb9fed6 | 56 | #include "qib_verbs.h" |
f931551b | 57 | |
7fac3301 MM |
58 | #undef pr_fmt |
59 | #define pr_fmt(fmt) QIB_DRV_NAME " " fmt | |
60 | ||
f931551b RC |
61 | static void qib_setup_7322_setextled(struct qib_pportdata *, u32); |
62 | static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t); | |
63 | static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op); | |
64 | static irqreturn_t qib_7322intr(int irq, void *data); | |
65 | static irqreturn_t qib_7322bufavail(int irq, void *data); | |
66 | static irqreturn_t sdma_intr(int irq, void *data); | |
67 | static irqreturn_t sdma_idle_intr(int irq, void *data); | |
68 | static irqreturn_t sdma_progress_intr(int irq, void *data); | |
69 | static irqreturn_t sdma_cleanup_intr(int irq, void *data); | |
70 | static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32, | |
71 | struct qib_ctxtdata *rcd); | |
72 | static u8 qib_7322_phys_portstate(u64); | |
73 | static u32 qib_7322_iblink_state(u64); | |
74 | static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd, | |
75 | u16 linitcmd); | |
76 | static void force_h1(struct qib_pportdata *); | |
77 | static void adj_tx_serdes(struct qib_pportdata *); | |
78 | static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8); | |
79 | static void qib_7322_mini_pcs_reset(struct qib_pportdata *); | |
80 | ||
81 | static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32); | |
82 | static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned); | |
a0a234d4 MM |
83 | static void serdes_7322_los_enable(struct qib_pportdata *, int); |
84 | static int serdes_7322_init_old(struct qib_pportdata *); | |
85 | static int serdes_7322_init_new(struct qib_pportdata *); | |
0b3ddf38 | 86 | static void dump_sdma_7322_state(struct qib_pportdata *); |
f931551b RC |
87 | |
88 | #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb)) | |
89 | ||
90 | /* LE2 serdes values for different cases */ | |
91 | #define LE2_DEFAULT 5 | |
92 | #define LE2_5m 4 | |
93 | #define LE2_QME 0 | |
94 | ||
95 | /* Below is special-purpose, so only really works for the IB SerDes blocks. */ | |
96 | #define IBSD(hw_pidx) (hw_pidx + 2) | |
97 | ||
98 | /* these are variables for documentation and experimentation purposes */ | |
99 | static const unsigned rcv_int_timeout = 375; | |
100 | static const unsigned rcv_int_count = 16; | |
101 | static const unsigned sdma_idle_cnt = 64; | |
102 | ||
103 | /* Time to stop altering Rx Equalization parameters, after link up. */ | |
104 | #define RXEQ_DISABLE_MSECS 2500 | |
105 | ||
106 | /* | |
107 | * Number of VLs we are configured to use (to allow for more | |
108 | * credits per vl, etc.) | |
109 | */ | |
110 | ushort qib_num_cfg_vls = 2; | |
111 | module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO); | |
112 | MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)"); | |
113 | ||
114 | static ushort qib_chase = 1; | |
115 | module_param_named(chase, qib_chase, ushort, S_IRUGO); | |
116 | MODULE_PARM_DESC(chase, "Enable state chase handling"); | |
117 | ||
118 | static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */ | |
119 | module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO); | |
a46a2802 | 120 | MODULE_PARM_DESC(long_attenuation, |
f931551b RC |
121 | "attenuation cutoff (dB) for long copper cable setup"); |
122 | ||
123 | static ushort qib_singleport; | |
124 | module_param_named(singleport, qib_singleport, ushort, S_IRUGO); | |
125 | MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space"); | |
126 | ||
e67306a3 MM |
127 | static ushort qib_krcvq01_no_msi; |
128 | module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO); | |
129 | MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2"); | |
130 | ||
0a43e117 MM |
131 | /* |
132 | * Receive header queue sizes | |
133 | */ | |
134 | static unsigned qib_rcvhdrcnt; | |
135 | module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO); | |
136 | MODULE_PARM_DESC(rcvhdrcnt, "receive header count"); | |
137 | ||
138 | static unsigned qib_rcvhdrsize; | |
139 | module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO); | |
140 | MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words"); | |
141 | ||
142 | static unsigned qib_rcvhdrentsize; | |
143 | module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO); | |
144 | MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words"); | |
145 | ||
f931551b RC |
146 | #define MAX_ATTEN_LEN 64 /* plenty for any real system */ |
147 | /* for read back, default index is ~5m copper cable */ | |
a77fcf89 RC |
148 | static char txselect_list[MAX_ATTEN_LEN] = "10"; |
149 | static struct kparam_string kp_txselect = { | |
150 | .string = txselect_list, | |
f931551b RC |
151 | .maxlen = MAX_ATTEN_LEN |
152 | }; | |
a77fcf89 RC |
153 | static int setup_txselect(const char *, struct kernel_param *); |
154 | module_param_call(txselect, setup_txselect, param_get_string, | |
155 | &kp_txselect, S_IWUSR | S_IRUGO); | |
a46a2802 | 156 | MODULE_PARM_DESC(txselect, |
a77fcf89 | 157 | "Tx serdes indices (for no QSFP or invalid QSFP data)"); |
f931551b RC |
158 | |
159 | #define BOARD_QME7342 5 | |
160 | #define BOARD_QMH7342 6 | |
0e6bbba5 | 161 | #define BOARD_QMH7360 9 |
f931551b RC |
162 | #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ |
163 | BOARD_QMH7342) | |
164 | #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ | |
165 | BOARD_QME7342) | |
166 | ||
167 | #define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64)) | |
168 | ||
169 | #define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64))) | |
170 | ||
171 | #define MASK_ACROSS(lsb, msb) \ | |
172 | (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb)) | |
173 | ||
174 | #define SYM_RMASK(regname, fldname) ((u64) \ | |
175 | QIB_7322_##regname##_##fldname##_RMASK) | |
176 | ||
177 | #define SYM_MASK(regname, fldname) ((u64) \ | |
178 | QIB_7322_##regname##_##fldname##_RMASK << \ | |
179 | QIB_7322_##regname##_##fldname##_LSB) | |
180 | ||
181 | #define SYM_FIELD(value, regname, fldname) ((u64) \ | |
182 | (((value) >> SYM_LSB(regname, fldname)) & \ | |
183 | SYM_RMASK(regname, fldname))) | |
184 | ||
185 | /* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */ | |
186 | #define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \ | |
187 | (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits)) | |
188 | ||
189 | #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask) | |
190 | #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask) | |
191 | #define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask) | |
192 | #define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask) | |
193 | #define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port) | |
194 | /* Below because most, but not all, fields of IntMask have that full suffix */ | |
195 | #define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port) | |
196 | ||
197 | ||
198 | #define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB) | |
199 | ||
200 | /* | |
201 | * the size bits give us 2^N, in KB units. 0 marks as invalid, | |
202 | * and 7 is reserved. We currently use only 2KB and 4KB | |
203 | */ | |
204 | #define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB | |
205 | #define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */ | |
206 | #define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */ | |
207 | #define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */ | |
208 | ||
209 | #define SendIBSLIDAssignMask \ | |
210 | QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK | |
211 | #define SendIBSLMCMask \ | |
212 | QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK | |
213 | ||
214 | #define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn) | |
215 | #define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn) | |
216 | #define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn) | |
217 | #define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn) | |
218 | #define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN) | |
219 | #define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN) | |
220 | ||
221 | #define _QIB_GPIO_SDA_NUM 1 | |
222 | #define _QIB_GPIO_SCL_NUM 0 | |
223 | #define QIB_EEPROM_WEN_NUM 14 | |
224 | #define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */ | |
225 | ||
226 | /* HW counter clock is at 4nsec */ | |
227 | #define QIB_7322_PSXMITWAIT_CHECK_RATE 4000 | |
228 | ||
229 | /* full speed IB port 1 only */ | |
230 | #define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR) | |
231 | #define PORT_SPD_CAP_SHIFT 3 | |
232 | ||
233 | /* full speed featuremask, both ports */ | |
234 | #define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT)) | |
235 | ||
236 | /* | |
237 | * This file contains almost all the chip-specific register information and | |
238 | * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip. | |
239 | */ | |
240 | ||
241 | /* Use defines to tie machine-generated names to lower-case names */ | |
242 | #define kr_contextcnt KREG_IDX(ContextCnt) | |
243 | #define kr_control KREG_IDX(Control) | |
244 | #define kr_counterregbase KREG_IDX(CntrRegBase) | |
245 | #define kr_errclear KREG_IDX(ErrClear) | |
246 | #define kr_errmask KREG_IDX(ErrMask) | |
247 | #define kr_errstatus KREG_IDX(ErrStatus) | |
248 | #define kr_extctrl KREG_IDX(EXTCtrl) | |
249 | #define kr_extstatus KREG_IDX(EXTStatus) | |
250 | #define kr_gpio_clear KREG_IDX(GPIOClear) | |
251 | #define kr_gpio_mask KREG_IDX(GPIOMask) | |
252 | #define kr_gpio_out KREG_IDX(GPIOOut) | |
253 | #define kr_gpio_status KREG_IDX(GPIOStatus) | |
254 | #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl) | |
255 | #define kr_debugportval KREG_IDX(DebugPortValueReg) | |
256 | #define kr_fmask KREG_IDX(feature_mask) | |
257 | #define kr_act_fmask KREG_IDX(active_feature_mask) | |
258 | #define kr_hwerrclear KREG_IDX(HwErrClear) | |
259 | #define kr_hwerrmask KREG_IDX(HwErrMask) | |
260 | #define kr_hwerrstatus KREG_IDX(HwErrStatus) | |
261 | #define kr_intclear KREG_IDX(IntClear) | |
262 | #define kr_intmask KREG_IDX(IntMask) | |
263 | #define kr_intredirect KREG_IDX(IntRedirect0) | |
264 | #define kr_intstatus KREG_IDX(IntStatus) | |
265 | #define kr_pagealign KREG_IDX(PageAlign) | |
266 | #define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0) | |
267 | #define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */ | |
268 | #define kr_rcvegrbase KREG_IDX(RcvEgrBase) | |
269 | #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt) | |
270 | #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt) | |
271 | #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize) | |
272 | #define kr_rcvhdrsize KREG_IDX(RcvHdrSize) | |
273 | #define kr_rcvtidbase KREG_IDX(RcvTIDBase) | |
274 | #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt) | |
275 | #define kr_revision KREG_IDX(Revision) | |
276 | #define kr_scratch KREG_IDX(Scratch) | |
277 | #define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */ | |
278 | #define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */ | |
279 | #define kr_sendctrl KREG_IDX(SendCtrl) | |
280 | #define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */ | |
281 | #define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */ | |
282 | #define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr) | |
283 | #define kr_sendpiobufbase KREG_IDX(SendBufBase) | |
284 | #define kr_sendpiobufcnt KREG_IDX(SendBufCnt) | |
285 | #define kr_sendpiosize KREG_IDX(SendBufSize) | |
286 | #define kr_sendregbase KREG_IDX(SendRegBase) | |
287 | #define kr_sendbufavail0 KREG_IDX(SendBufAvail0) | |
288 | #define kr_userregbase KREG_IDX(UserRegBase) | |
289 | #define kr_intgranted KREG_IDX(Int_Granted) | |
290 | #define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int) | |
291 | #define kr_intblocked KREG_IDX(IntBlocked) | |
292 | #define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG) | |
293 | ||
294 | /* | |
295 | * per-port kernel registers. Access only with qib_read_kreg_port() | |
296 | * or qib_write_kreg_port() | |
297 | */ | |
298 | #define krp_errclear KREG_IBPORT_IDX(ErrClear) | |
299 | #define krp_errmask KREG_IBPORT_IDX(ErrMask) | |
300 | #define krp_errstatus KREG_IBPORT_IDX(ErrStatus) | |
301 | #define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0) | |
302 | #define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit) | |
303 | #define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID) | |
304 | #define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig) | |
305 | #define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA) | |
306 | #define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB) | |
307 | #define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC) | |
308 | #define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA) | |
309 | #define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB) | |
310 | #define krp_txestatus KREG_IBPORT_IDX(TXEStatus) | |
311 | #define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0) | |
312 | #define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl) | |
313 | #define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey) | |
314 | #define krp_psinterval KREG_IBPORT_IDX(PSInterval) | |
315 | #define krp_psstart KREG_IBPORT_IDX(PSStart) | |
316 | #define krp_psstat KREG_IBPORT_IDX(PSStat) | |
317 | #define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP) | |
318 | #define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl) | |
319 | #define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt) | |
320 | #define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA) | |
321 | #define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0) | |
322 | #define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15) | |
323 | #define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl) | |
324 | #define krp_sendctrl KREG_IBPORT_IDX(SendCtrl) | |
325 | #define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase) | |
326 | #define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0) | |
327 | #define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1) | |
328 | #define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2) | |
329 | #define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0) | |
330 | #define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1) | |
331 | #define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2) | |
332 | #define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt) | |
333 | #define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead) | |
334 | #define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr) | |
335 | #define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt) | |
336 | #define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen) | |
337 | #define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld) | |
338 | #define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt) | |
339 | #define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus) | |
340 | #define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail) | |
341 | #define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom) | |
342 | #define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign) | |
343 | #define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask) | |
344 | #define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX) | |
345 | #define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD) | |
346 | #define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE) | |
347 | #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl) | |
348 | ||
349 | /* | |
b595076a | 350 | * Per-context kernel registers. Access only with qib_read_kreg_ctxt() |
f931551b RC |
351 | * or qib_write_kreg_ctxt() |
352 | */ | |
353 | #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0) | |
354 | #define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0) | |
355 | ||
356 | /* | |
357 | * TID Flow table, per context. Reduces | |
358 | * number of hdrq updates to one per flow (or on errors). | |
359 | * context 0 and 1 share same memory, but have distinct | |
360 | * addresses. Since for now, we never use expected sends | |
361 | * on kernel contexts, we don't worry about that (we initialize | |
362 | * those entries for ctxt 0/1 on driver load twice, for example). | |
363 | */ | |
364 | #define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */ | |
365 | #define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0)) | |
366 | ||
367 | /* these are the error bits in the tid flows, and are W1C */ | |
368 | #define TIDFLOW_ERRBITS ( \ | |
369 | (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \ | |
370 | SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \ | |
371 | (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \ | |
372 | SYM_LSB(RcvTIDFlowTable0, SeqMismatch))) | |
373 | ||
374 | /* Most (not all) Counters are per-IBport. | |
375 | * Requires LBIntCnt is at offset 0 in the group | |
376 | */ | |
377 | #define CREG_IDX(regname) \ | |
378 | ((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64)) | |
379 | ||
380 | #define crp_badformat CREG_IDX(RxVersionErrCnt) | |
381 | #define crp_err_rlen CREG_IDX(RxLenErrCnt) | |
382 | #define crp_erricrc CREG_IDX(RxICRCErrCnt) | |
383 | #define crp_errlink CREG_IDX(RxLinkMalformCnt) | |
384 | #define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt) | |
385 | #define crp_errpkey CREG_IDX(RxPKeyMismatchCnt) | |
386 | #define crp_errvcrc CREG_IDX(RxVCRCErrCnt) | |
387 | #define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt) | |
388 | #define crp_iblinkdown CREG_IDX(IBLinkDownedCnt) | |
389 | #define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt) | |
390 | #define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt) | |
391 | #define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt) | |
392 | #define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt) | |
393 | #define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt) | |
394 | #define crp_pktrcv CREG_IDX(RxDataPktCnt) | |
395 | #define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt) | |
396 | #define crp_pktsend CREG_IDX(TxDataPktCnt) | |
397 | #define crp_pktsendflow CREG_IDX(TxFlowPktCnt) | |
398 | #define crp_psrcvdatacount CREG_IDX(PSRcvDataCount) | |
399 | #define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount) | |
400 | #define crp_psxmitdatacount CREG_IDX(PSXmitDataCount) | |
401 | #define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount) | |
402 | #define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount) | |
403 | #define crp_rcvebp CREG_IDX(RxEBPCnt) | |
404 | #define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt) | |
405 | #define crp_rcvovfl CREG_IDX(RxBufOvflCnt) | |
406 | #define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt) | |
407 | #define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt) | |
408 | #define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt) | |
409 | #define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt) | |
410 | #define crp_rxvlerr CREG_IDX(RxVlErrCnt) | |
411 | #define crp_sendstall CREG_IDX(TxFlowStallCnt) | |
412 | #define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt) | |
413 | #define crp_txhdrerr CREG_IDX(TxHeadersErrCnt) | |
414 | #define crp_txlenerr CREG_IDX(TxLenErrCnt) | |
f931551b RC |
415 | #define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt) |
416 | #define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt) | |
417 | #define crp_txunderrun CREG_IDX(TxUnderrunCnt) | |
418 | #define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt) | |
419 | #define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt) | |
420 | #define crp_wordrcv CREG_IDX(RxDwordCnt) | |
421 | #define crp_wordsend CREG_IDX(TxDwordCnt) | |
422 | #define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut) | |
423 | ||
424 | /* these are the (few) counters that are not port-specific */ | |
425 | #define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \ | |
426 | QIB_7322_LBIntCnt_OFFS) / sizeof(u64)) | |
427 | #define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt) | |
428 | #define cr_lbint CREG_DEVIDX(LBIntCnt) | |
429 | #define cr_lbstall CREG_DEVIDX(LBFlowStallCnt) | |
430 | #define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt) | |
431 | #define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt) | |
432 | #define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt) | |
433 | #define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt) | |
434 | ||
435 | /* no chip register for # of IB ports supported, so define */ | |
436 | #define NUM_IB_PORTS 2 | |
437 | ||
438 | /* 1 VL15 buffer per hardware IB port, no register for this, so define */ | |
439 | #define NUM_VL15_BUFS NUM_IB_PORTS | |
440 | ||
441 | /* | |
442 | * context 0 and 1 are special, and there is no chip register that | |
443 | * defines this value, so we have to define it here. | |
444 | * These are all allocated to either 0 or 1 for single port | |
445 | * hardware configuration, otherwise each gets half | |
446 | */ | |
447 | #define KCTXT0_EGRCNT 2048 | |
448 | ||
449 | /* values for vl and port fields in PBC, 7322-specific */ | |
450 | #define PBC_PORT_SEL_LSB 26 | |
451 | #define PBC_PORT_SEL_RMASK 1 | |
452 | #define PBC_VL_NUM_LSB 27 | |
453 | #define PBC_VL_NUM_RMASK 7 | |
454 | #define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */ | |
455 | #define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */ | |
456 | ||
457 | static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = { | |
458 | [IB_RATE_2_5_GBPS] = 16, | |
459 | [IB_RATE_5_GBPS] = 8, | |
460 | [IB_RATE_10_GBPS] = 4, | |
461 | [IB_RATE_20_GBPS] = 2, | |
462 | [IB_RATE_30_GBPS] = 2, | |
463 | [IB_RATE_40_GBPS] = 1 | |
464 | }; | |
465 | ||
466 | #define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive) | |
467 | #define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive) | |
468 | ||
469 | /* link training states, from IBC */ | |
470 | #define IB_7322_LT_STATE_DISABLED 0x00 | |
471 | #define IB_7322_LT_STATE_LINKUP 0x01 | |
472 | #define IB_7322_LT_STATE_POLLACTIVE 0x02 | |
473 | #define IB_7322_LT_STATE_POLLQUIET 0x03 | |
474 | #define IB_7322_LT_STATE_SLEEPDELAY 0x04 | |
475 | #define IB_7322_LT_STATE_SLEEPQUIET 0x05 | |
476 | #define IB_7322_LT_STATE_CFGDEBOUNCE 0x08 | |
477 | #define IB_7322_LT_STATE_CFGRCVFCFG 0x09 | |
478 | #define IB_7322_LT_STATE_CFGWAITRMT 0x0a | |
479 | #define IB_7322_LT_STATE_CFGIDLE 0x0b | |
480 | #define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c | |
481 | #define IB_7322_LT_STATE_TXREVLANES 0x0d | |
482 | #define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e | |
483 | #define IB_7322_LT_STATE_RECOVERIDLE 0x0f | |
484 | #define IB_7322_LT_STATE_CFGENH 0x10 | |
485 | #define IB_7322_LT_STATE_CFGTEST 0x11 | |
31264484 MH |
486 | #define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12 |
487 | #define IB_7322_LT_STATE_CFGWAITENH 0x13 | |
f931551b RC |
488 | |
489 | /* link state machine states from IBC */ | |
490 | #define IB_7322_L_STATE_DOWN 0x0 | |
491 | #define IB_7322_L_STATE_INIT 0x1 | |
492 | #define IB_7322_L_STATE_ARM 0x2 | |
493 | #define IB_7322_L_STATE_ACTIVE 0x3 | |
494 | #define IB_7322_L_STATE_ACT_DEFER 0x4 | |
495 | ||
496 | static const u8 qib_7322_physportstate[0x20] = { | |
497 | [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED, | |
498 | [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP, | |
499 | [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL, | |
500 | [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL, | |
501 | [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP, | |
502 | [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP, | |
503 | [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN, | |
504 | [IB_7322_LT_STATE_CFGRCVFCFG] = | |
505 | IB_PHYSPORTSTATE_CFG_TRAIN, | |
506 | [IB_7322_LT_STATE_CFGWAITRMT] = | |
507 | IB_PHYSPORTSTATE_CFG_TRAIN, | |
508 | [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE, | |
509 | [IB_7322_LT_STATE_RECOVERRETRAIN] = | |
510 | IB_PHYSPORTSTATE_LINK_ERR_RECOVER, | |
511 | [IB_7322_LT_STATE_RECOVERWAITRMT] = | |
512 | IB_PHYSPORTSTATE_LINK_ERR_RECOVER, | |
513 | [IB_7322_LT_STATE_RECOVERIDLE] = | |
514 | IB_PHYSPORTSTATE_LINK_ERR_RECOVER, | |
515 | [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH, | |
516 | [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN, | |
31264484 MH |
517 | [IB_7322_LT_STATE_CFGWAITRMTTEST] = |
518 | IB_PHYSPORTSTATE_CFG_TRAIN, | |
519 | [IB_7322_LT_STATE_CFGWAITENH] = | |
520 | IB_PHYSPORTSTATE_CFG_WAIT_ENH, | |
f931551b RC |
521 | [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN, |
522 | [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN, | |
523 | [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN, | |
524 | [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN | |
525 | }; | |
526 | ||
8469ba39 MM |
527 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
528 | struct qib_irq_notify { | |
529 | int rcv; | |
530 | void *arg; | |
531 | struct irq_affinity_notify notify; | |
532 | }; | |
533 | #endif | |
534 | ||
f931551b RC |
535 | struct qib_chip_specific { |
536 | u64 __iomem *cregbase; | |
537 | u64 *cntrs; | |
538 | spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */ | |
539 | spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */ | |
540 | u64 main_int_mask; /* clear bits which have dedicated handlers */ | |
541 | u64 int_enable_mask; /* for per port interrupts in single port mode */ | |
542 | u64 errormask; | |
543 | u64 hwerrmask; | |
544 | u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */ | |
545 | u64 gpio_mask; /* shadow the gpio mask register */ | |
546 | u64 extctrl; /* shadow the gpio output enable, etc... */ | |
547 | u32 ncntrs; | |
548 | u32 nportcntrs; | |
549 | u32 cntrnamelen; | |
550 | u32 portcntrnamelen; | |
551 | u32 numctxts; | |
552 | u32 rcvegrcnt; | |
553 | u32 updthresh; /* current AvailUpdThld */ | |
554 | u32 updthresh_dflt; /* default AvailUpdThld */ | |
555 | u32 r1; | |
556 | int irq; | |
557 | u32 num_msix_entries; | |
558 | u32 sdmabufcnt; | |
559 | u32 lastbuf_for_pio; | |
560 | u32 stay_in_freeze; | |
561 | u32 recovery_ports_initted; | |
8469ba39 MM |
562 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
563 | u32 dca_ctrl; | |
564 | int rhdr_cpu[18]; | |
565 | int sdma_cpu[2]; | |
566 | u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */ | |
567 | #endif | |
a778f3fd | 568 | struct qib_msix_entry *msix_entries; |
f931551b RC |
569 | unsigned long *sendchkenable; |
570 | unsigned long *sendgrhchk; | |
571 | unsigned long *sendibchk; | |
572 | u32 rcvavail_timeout[18]; | |
573 | char emsgbuf[128]; /* for device error interrupt msg buffer */ | |
574 | }; | |
575 | ||
576 | /* Table of entries in "human readable" form Tx Emphasis. */ | |
577 | struct txdds_ent { | |
578 | u8 amp; | |
579 | u8 pre; | |
580 | u8 main; | |
581 | u8 post; | |
582 | }; | |
583 | ||
584 | struct vendor_txdds_ent { | |
585 | u8 oui[QSFP_VOUI_LEN]; | |
586 | u8 *partnum; | |
587 | struct txdds_ent sdr; | |
588 | struct txdds_ent ddr; | |
589 | struct txdds_ent qdr; | |
590 | }; | |
591 | ||
592 | static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *); | |
593 | ||
594 | #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */ | |
22baa407 | 595 | #define TXDDS_EXTRA_SZ 18 /* number of extra tx settings entries */ |
e706203c | 596 | #define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */ |
f931551b RC |
597 | #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */ |
598 | ||
599 | #define H1_FORCE_VAL 8 | |
a77fcf89 RC |
600 | #define H1_FORCE_QME 1 /* may be overridden via setup_txselect() */ |
601 | #define H1_FORCE_QMH 7 /* may be overridden via setup_txselect() */ | |
f931551b RC |
602 | |
603 | /* The static and dynamic registers are paired, and the pairs indexed by spd */ | |
604 | #define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \ | |
605 | + ((spd) * 2)) | |
606 | ||
607 | #define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */ | |
608 | #define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */ | |
609 | #define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */ | |
610 | #define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */ | |
611 | #define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */ | |
612 | ||
f931551b RC |
613 | struct qib_chippport_specific { |
614 | u64 __iomem *kpregbase; | |
615 | u64 __iomem *cpregbase; | |
616 | u64 *portcntrs; | |
617 | struct qib_pportdata *ppd; | |
618 | wait_queue_head_t autoneg_wait; | |
619 | struct delayed_work autoneg_work; | |
620 | struct delayed_work ipg_work; | |
621 | struct timer_list chase_timer; | |
622 | /* | |
623 | * these 5 fields are used to establish deltas for IB symbol | |
624 | * errors and linkrecovery errors. They can be reported on | |
625 | * some chips during link negotiation prior to INIT, and with | |
626 | * DDR when faking DDR negotiations with non-IBTA switches. | |
627 | * The chip counters are adjusted at driver unload if there is | |
628 | * a non-zero delta. | |
629 | */ | |
630 | u64 ibdeltainprog; | |
631 | u64 ibsymdelta; | |
632 | u64 ibsymsnap; | |
633 | u64 iblnkerrdelta; | |
634 | u64 iblnkerrsnap; | |
635 | u64 iblnkdownsnap; | |
636 | u64 iblnkdowndelta; | |
637 | u64 ibmalfdelta; | |
638 | u64 ibmalfsnap; | |
639 | u64 ibcctrl_a; /* krp_ibcctrl_a shadow */ | |
640 | u64 ibcctrl_b; /* krp_ibcctrl_b shadow */ | |
8482d5d1 MM |
641 | unsigned long qdr_dfe_time; |
642 | unsigned long chase_end; | |
f931551b RC |
643 | u32 autoneg_tries; |
644 | u32 recovery_init; | |
645 | u32 qdr_dfe_on; | |
646 | u32 qdr_reforce; | |
647 | /* | |
648 | * Per-bay per-channel rcv QMH H1 values and Tx values for QDR. | |
649 | * entry zero is unused, to simplify indexing | |
650 | */ | |
a77fcf89 RC |
651 | u8 h1_val; |
652 | u8 no_eep; /* txselect table index to use if no qsfp info */ | |
f931551b RC |
653 | u8 ipg_tries; |
654 | u8 ibmalfusesnap; | |
655 | struct qib_qsfp_data qsfp_data; | |
656 | char epmsgbuf[192]; /* for port error interrupt msg buffer */ | |
0b3ddf38 | 657 | char sdmamsgbuf[192]; /* for per-port sdma error messages */ |
f931551b RC |
658 | }; |
659 | ||
660 | static struct { | |
661 | const char *name; | |
662 | irq_handler_t handler; | |
663 | int lsb; | |
664 | int port; /* 0 if not port-specific, else port # */ | |
8469ba39 | 665 | int dca; |
f931551b | 666 | } irq_table[] = { |
8469ba39 | 667 | { "", qib_7322intr, -1, 0, 0 }, |
a778f3fd | 668 | { " (buf avail)", qib_7322bufavail, |
8469ba39 | 669 | SYM_LSB(IntStatus, SendBufAvail), 0, 0}, |
a778f3fd | 670 | { " (sdma 0)", sdma_intr, |
8469ba39 | 671 | SYM_LSB(IntStatus, SDmaInt_0), 1, 1 }, |
a778f3fd | 672 | { " (sdma 1)", sdma_intr, |
8469ba39 | 673 | SYM_LSB(IntStatus, SDmaInt_1), 2, 1 }, |
a778f3fd | 674 | { " (sdmaI 0)", sdma_idle_intr, |
8469ba39 | 675 | SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1}, |
a778f3fd | 676 | { " (sdmaI 1)", sdma_idle_intr, |
8469ba39 | 677 | SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1}, |
a778f3fd | 678 | { " (sdmaP 0)", sdma_progress_intr, |
8469ba39 | 679 | SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 }, |
a778f3fd | 680 | { " (sdmaP 1)", sdma_progress_intr, |
8469ba39 | 681 | SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 }, |
a778f3fd | 682 | { " (sdmaC 0)", sdma_cleanup_intr, |
8469ba39 | 683 | SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 }, |
a778f3fd | 684 | { " (sdmaC 1)", sdma_cleanup_intr, |
8469ba39 | 685 | SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0}, |
f931551b RC |
686 | }; |
687 | ||
8469ba39 MM |
688 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
689 | ||
690 | static const struct dca_reg_map { | |
691 | int shadow_inx; | |
692 | int lsb; | |
693 | u64 mask; | |
694 | u16 regno; | |
695 | } dca_rcvhdr_reg_map[] = { | |
696 | { 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH), | |
697 | ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) }, | |
698 | { 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH), | |
699 | ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) }, | |
700 | { 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH), | |
701 | ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) }, | |
702 | { 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH), | |
703 | ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) }, | |
704 | { 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH), | |
705 | ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) }, | |
706 | { 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH), | |
707 | ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) }, | |
708 | { 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH), | |
709 | ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) }, | |
710 | { 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH), | |
711 | ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) }, | |
712 | { 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH), | |
713 | ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) }, | |
714 | { 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH), | |
715 | ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) }, | |
716 | { 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH), | |
717 | ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) }, | |
718 | { 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH), | |
719 | ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) }, | |
720 | { 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH), | |
721 | ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) }, | |
722 | { 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH), | |
723 | ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) }, | |
724 | { 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH), | |
725 | ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) }, | |
726 | { 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH), | |
727 | ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) }, | |
728 | { 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH), | |
729 | ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) }, | |
730 | { 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH), | |
731 | ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) }, | |
732 | }; | |
733 | #endif | |
734 | ||
f931551b RC |
735 | /* ibcctrl bits */ |
736 | #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1 | |
737 | /* cycle through TS1/TS2 till OK */ | |
738 | #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2 | |
739 | /* wait for TS1, then go on */ | |
740 | #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3 | |
741 | #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16 | |
742 | ||
743 | #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */ | |
744 | #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */ | |
745 | #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */ | |
746 | ||
747 | #define BLOB_7322_IBCHG 0x101 | |
748 | ||
749 | static inline void qib_write_kreg(const struct qib_devdata *dd, | |
750 | const u32 regno, u64 value); | |
751 | static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32); | |
752 | static void write_7322_initregs(struct qib_devdata *); | |
753 | static void write_7322_init_portregs(struct qib_pportdata *); | |
754 | static void setup_7322_link_recovery(struct qib_pportdata *, u32); | |
755 | static void check_7322_rxe_status(struct qib_pportdata *); | |
756 | static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *); | |
8469ba39 MM |
757 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
758 | static void qib_setup_dca(struct qib_devdata *dd); | |
759 | static void setup_dca_notifier(struct qib_devdata *dd, | |
760 | struct qib_msix_entry *m); | |
761 | static void reset_dca_notifier(struct qib_devdata *dd, | |
762 | struct qib_msix_entry *m); | |
763 | #endif | |
f931551b RC |
764 | |
765 | /** | |
766 | * qib_read_ureg32 - read 32-bit virtualized per-context register | |
767 | * @dd: device | |
768 | * @regno: register number | |
769 | * @ctxt: context number | |
770 | * | |
771 | * Return the contents of a register that is virtualized to be per context. | |
772 | * Returns -1 on errors (not distinguishable from valid contents at | |
773 | * runtime; we may add a separate error variable at some point). | |
774 | */ | |
775 | static inline u32 qib_read_ureg32(const struct qib_devdata *dd, | |
776 | enum qib_ureg regno, int ctxt) | |
777 | { | |
778 | if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) | |
779 | return 0; | |
780 | return readl(regno + (u64 __iomem *)( | |
781 | (dd->ureg_align * ctxt) + (dd->userbase ? | |
782 | (char __iomem *)dd->userbase : | |
783 | (char __iomem *)dd->kregbase + dd->uregbase))); | |
784 | } | |
785 | ||
786 | /** | |
787 | * qib_read_ureg - read virtualized per-context register | |
788 | * @dd: device | |
789 | * @regno: register number | |
790 | * @ctxt: context number | |
791 | * | |
792 | * Return the contents of a register that is virtualized to be per context. | |
793 | * Returns -1 on errors (not distinguishable from valid contents at | |
794 | * runtime; we may add a separate error variable at some point). | |
795 | */ | |
796 | static inline u64 qib_read_ureg(const struct qib_devdata *dd, | |
797 | enum qib_ureg regno, int ctxt) | |
798 | { | |
799 | ||
800 | if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) | |
801 | return 0; | |
802 | return readq(regno + (u64 __iomem *)( | |
803 | (dd->ureg_align * ctxt) + (dd->userbase ? | |
804 | (char __iomem *)dd->userbase : | |
805 | (char __iomem *)dd->kregbase + dd->uregbase))); | |
806 | } | |
807 | ||
808 | /** | |
809 | * qib_write_ureg - write virtualized per-context register | |
810 | * @dd: device | |
811 | * @regno: register number | |
812 | * @value: value | |
813 | * @ctxt: context | |
814 | * | |
815 | * Write the contents of a register that is virtualized to be per context. | |
816 | */ | |
817 | static inline void qib_write_ureg(const struct qib_devdata *dd, | |
818 | enum qib_ureg regno, u64 value, int ctxt) | |
819 | { | |
820 | u64 __iomem *ubase; | |
da12c1f6 | 821 | |
f931551b RC |
822 | if (dd->userbase) |
823 | ubase = (u64 __iomem *) | |
824 | ((char __iomem *) dd->userbase + | |
825 | dd->ureg_align * ctxt); | |
826 | else | |
827 | ubase = (u64 __iomem *) | |
828 | (dd->uregbase + | |
829 | (char __iomem *) dd->kregbase + | |
830 | dd->ureg_align * ctxt); | |
831 | ||
832 | if (dd->kregbase && (dd->flags & QIB_PRESENT)) | |
833 | writeq(value, &ubase[regno]); | |
834 | } | |
835 | ||
836 | static inline u32 qib_read_kreg32(const struct qib_devdata *dd, | |
837 | const u32 regno) | |
838 | { | |
839 | if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) | |
840 | return -1; | |
841 | return readl((u32 __iomem *) &dd->kregbase[regno]); | |
842 | } | |
843 | ||
844 | static inline u64 qib_read_kreg64(const struct qib_devdata *dd, | |
845 | const u32 regno) | |
846 | { | |
847 | if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) | |
848 | return -1; | |
849 | return readq(&dd->kregbase[regno]); | |
850 | } | |
851 | ||
852 | static inline void qib_write_kreg(const struct qib_devdata *dd, | |
853 | const u32 regno, u64 value) | |
854 | { | |
855 | if (dd->kregbase && (dd->flags & QIB_PRESENT)) | |
856 | writeq(value, &dd->kregbase[regno]); | |
857 | } | |
858 | ||
859 | /* | |
860 | * not many sanity checks for the port-specific kernel register routines, | |
861 | * since they are only used when it's known to be safe. | |
862 | */ | |
863 | static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd, | |
864 | const u16 regno) | |
865 | { | |
866 | if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT)) | |
867 | return 0ULL; | |
868 | return readq(&ppd->cpspec->kpregbase[regno]); | |
869 | } | |
870 | ||
871 | static inline void qib_write_kreg_port(const struct qib_pportdata *ppd, | |
872 | const u16 regno, u64 value) | |
873 | { | |
874 | if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase && | |
875 | (ppd->dd->flags & QIB_PRESENT)) | |
876 | writeq(value, &ppd->cpspec->kpregbase[regno]); | |
877 | } | |
878 | ||
879 | /** | |
880 | * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register | |
881 | * @dd: the qlogic_ib device | |
882 | * @regno: the register number to write | |
883 | * @ctxt: the context containing the register | |
884 | * @value: the value to write | |
885 | */ | |
886 | static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd, | |
887 | const u16 regno, unsigned ctxt, | |
888 | u64 value) | |
889 | { | |
890 | qib_write_kreg(dd, regno + ctxt, value); | |
891 | } | |
892 | ||
893 | static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno) | |
894 | { | |
895 | if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) | |
896 | return 0; | |
897 | return readq(&dd->cspec->cregbase[regno]); | |
898 | ||
899 | ||
900 | } | |
901 | ||
902 | static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno) | |
903 | { | |
904 | if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) | |
905 | return 0; | |
906 | return readl(&dd->cspec->cregbase[regno]); | |
907 | ||
908 | ||
909 | } | |
910 | ||
911 | static inline void write_7322_creg_port(const struct qib_pportdata *ppd, | |
912 | u16 regno, u64 value) | |
913 | { | |
914 | if (ppd->cpspec && ppd->cpspec->cpregbase && | |
915 | (ppd->dd->flags & QIB_PRESENT)) | |
916 | writeq(value, &ppd->cpspec->cpregbase[regno]); | |
917 | } | |
918 | ||
919 | static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd, | |
920 | u16 regno) | |
921 | { | |
922 | if (!ppd->cpspec || !ppd->cpspec->cpregbase || | |
923 | !(ppd->dd->flags & QIB_PRESENT)) | |
924 | return 0; | |
925 | return readq(&ppd->cpspec->cpregbase[regno]); | |
926 | } | |
927 | ||
928 | static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd, | |
929 | u16 regno) | |
930 | { | |
931 | if (!ppd->cpspec || !ppd->cpspec->cpregbase || | |
932 | !(ppd->dd->flags & QIB_PRESENT)) | |
933 | return 0; | |
934 | return readl(&ppd->cpspec->cpregbase[regno]); | |
935 | } | |
936 | ||
937 | /* bits in Control register */ | |
938 | #define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset) | |
939 | #define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn) | |
940 | ||
941 | /* bits in general interrupt regs */ | |
942 | #define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask) | |
943 | #define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17) | |
944 | #define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB) | |
945 | #define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask) | |
946 | #define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17) | |
947 | #define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB) | |
948 | #define QIB_I_C_ERROR INT_MASK(Err) | |
949 | ||
950 | #define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1)) | |
951 | #define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail) | |
952 | #define QIB_I_GPIO INT_MASK(AssertGPIO) | |
953 | #define QIB_I_P_SDMAINT(pidx) \ | |
954 | (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \ | |
955 | INT_MASK_P(SDmaProgress, pidx) | \ | |
956 | INT_MASK_PM(SDmaCleanupDone, pidx)) | |
957 | ||
958 | /* Interrupt bits that are "per port" */ | |
959 | #define QIB_I_P_BITSEXTANT(pidx) \ | |
960 | (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \ | |
961 | INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \ | |
962 | INT_MASK_P(SDmaProgress, pidx) | \ | |
963 | INT_MASK_PM(SDmaCleanupDone, pidx)) | |
964 | ||
965 | /* Interrupt bits that are common to a device */ | |
966 | /* currently unused: QIB_I_SPIOSENT */ | |
967 | #define QIB_I_C_BITSEXTANT \ | |
968 | (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \ | |
969 | QIB_I_SPIOSENT | \ | |
970 | QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO) | |
971 | ||
972 | #define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \ | |
973 | QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1)) | |
974 | ||
975 | /* | |
976 | * Error bits that are "per port". | |
977 | */ | |
978 | #define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged) | |
979 | #define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr) | |
980 | #define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr) | |
981 | #define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr) | |
982 | #define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr) | |
983 | #define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr) | |
984 | #define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr) | |
985 | #define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr) | |
986 | #define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr) | |
987 | #define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr) | |
988 | #define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr) | |
989 | #define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr) | |
990 | #define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr) | |
991 | #define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr) | |
992 | #define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr) | |
993 | #define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr) | |
994 | #define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr) | |
995 | #define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr) | |
996 | #define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr) | |
997 | #define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr) | |
998 | #define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr) | |
999 | #define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr) | |
1000 | #define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr) | |
1001 | #define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr) | |
1002 | #define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr) | |
1003 | #define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr) | |
1004 | #define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr) | |
1005 | #define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr) | |
1006 | ||
1007 | #define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr) | |
1008 | #define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr) | |
1009 | #define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr) | |
1010 | #define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr) | |
1011 | #define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr) | |
1012 | #define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr) | |
1013 | #define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr) | |
1014 | #define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr) | |
1015 | #define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr) | |
1016 | #define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr) | |
1017 | #define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr) | |
1018 | ||
1019 | /* Error bits that are common to a device */ | |
1020 | #define QIB_E_RESET ERR_MASK(ResetNegated) | |
1021 | #define QIB_E_HARDWARE ERR_MASK(HardwareErr) | |
1022 | #define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr) | |
1023 | ||
1024 | ||
1025 | /* | |
1026 | * Per chip (rather than per-port) errors. Most either do | |
1027 | * nothing but trigger a print (because they self-recover, or | |
1028 | * always occur in tandem with other errors that handle the | |
1029 | * issue), or because they indicate errors with no recovery, | |
1030 | * but we want to know that they happened. | |
1031 | */ | |
1032 | #define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr) | |
1033 | #define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd) | |
1034 | #define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr) | |
1035 | #define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr) | |
1036 | #define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr) | |
1037 | #define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr) | |
1038 | #define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr) | |
1039 | #define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr) | |
1040 | ||
1041 | /* SDMA chip errors (not per port) | |
1042 | * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get | |
1043 | * the SDMAHALT error immediately, so we just print the dup error via the | |
1044 | * E_AUTO mechanism. This is true of most of the per-port fatal errors | |
1045 | * as well, but since this is port-independent, by definition, it's | |
1046 | * handled a bit differently. SDMA_VL15 and SDMA_WRONG_PORT are per | |
1047 | * packet send errors, and so are handled in the same manner as other | |
1048 | * per-packet errors. | |
1049 | */ | |
1050 | #define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err) | |
1051 | #define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr) | |
1052 | #define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr) | |
1053 | ||
1054 | /* | |
1055 | * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS | |
1056 | * it is used to print "common" packet errors. | |
1057 | */ | |
1058 | #define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\ | |
1059 | QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\ | |
1060 | QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\ | |
1061 | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \ | |
1062 | QIB_E_P_REBP) | |
1063 | ||
1064 | /* Error Bits that Packet-related (Receive, per-port) */ | |
1065 | #define QIB_E_P_RPKTERRS (\ | |
1066 | QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \ | |
1067 | QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \ | |
1068 | QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\ | |
1069 | QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \ | |
1070 | QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \ | |
1071 | QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP) | |
1072 | ||
1073 | /* | |
1074 | * Error bits that are Send-related (per port) | |
1075 | * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling). | |
1076 | * All of these potentially need to have a buffer disarmed | |
1077 | */ | |
1078 | #define QIB_E_P_SPKTERRS (\ | |
1079 | QIB_E_P_SUNEXP_PKTNUM |\ | |
1080 | QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\ | |
1081 | QIB_E_P_SMAXPKTLEN |\ | |
1082 | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \ | |
1083 | QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \ | |
1084 | QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL) | |
1085 | ||
1086 | #define QIB_E_SPKTERRS ( \ | |
1087 | QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \ | |
1088 | ERR_MASK_N(SendUnsupportedVLErr) | \ | |
1089 | QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT) | |
1090 | ||
1091 | #define QIB_E_P_SDMAERRS ( \ | |
1092 | QIB_E_P_SDMAHALT | \ | |
1093 | QIB_E_P_SDMADESCADDRMISALIGN | \ | |
1094 | QIB_E_P_SDMAUNEXPDATA | \ | |
1095 | QIB_E_P_SDMAMISSINGDW | \ | |
1096 | QIB_E_P_SDMADWEN | \ | |
1097 | QIB_E_P_SDMARPYTAG | \ | |
1098 | QIB_E_P_SDMA1STDESC | \ | |
1099 | QIB_E_P_SDMABASE | \ | |
1100 | QIB_E_P_SDMATAILOUTOFBOUND | \ | |
1101 | QIB_E_P_SDMAOUTOFBOUND | \ | |
1102 | QIB_E_P_SDMAGENMISMATCH) | |
1103 | ||
1104 | /* | |
1105 | * This sets some bits more than once, but makes it more obvious which | |
1106 | * bits are not handled under other categories, and the repeat definition | |
1107 | * is not a problem. | |
1108 | */ | |
1109 | #define QIB_E_P_BITSEXTANT ( \ | |
1110 | QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \ | |
1111 | QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \ | |
1112 | QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \ | |
1113 | QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \ | |
1114 | ) | |
1115 | ||
1116 | /* | |
1117 | * These are errors that can occur when the link | |
1118 | * changes state while a packet is being sent or received. This doesn't | |
1119 | * cover things like EBP or VCRC that can be the result of a sending | |
1120 | * having the link change state, so we receive a "known bad" packet. | |
1121 | * All of these are "per port", so renamed: | |
1122 | */ | |
1123 | #define QIB_E_P_LINK_PKTERRS (\ | |
1124 | QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\ | |
1125 | QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\ | |
1126 | QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\ | |
1127 | QIB_E_P_RUNEXPCHAR) | |
1128 | ||
1129 | /* | |
1130 | * This sets some bits more than once, but makes it more obvious which | |
1131 | * bits are not handled under other categories (such as QIB_E_SPKTERRS), | |
1132 | * and the repeat definition is not a problem. | |
1133 | */ | |
1134 | #define QIB_E_C_BITSEXTANT (\ | |
1135 | QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\ | |
1136 | QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\ | |
1137 | QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE) | |
1138 | ||
1139 | /* Likewise Neuter E_SPKT_ERRS_IGNORE */ | |
1140 | #define E_SPKT_ERRS_IGNORE 0 | |
1141 | ||
1142 | #define QIB_EXTS_MEMBIST_DISABLED \ | |
1143 | SYM_MASK(EXTStatus, MemBISTDisabled) | |
1144 | #define QIB_EXTS_MEMBIST_ENDTEST \ | |
1145 | SYM_MASK(EXTStatus, MemBISTEndTest) | |
1146 | ||
1147 | #define QIB_E_SPIOARMLAUNCH \ | |
1148 | ERR_MASK(SendArmLaunchErr) | |
1149 | ||
1150 | #define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd) | |
1151 | #define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd) | |
1152 | ||
1153 | /* | |
1154 | * IBTA_1_2 is set when multiple speeds are enabled (normal), | |
1155 | * and also if forced QDR (only QDR enabled). It's enabled for the | |
1156 | * forced QDR case so that scrambling will be enabled by the TS3 | |
1157 | * exchange, when supported by both sides of the link. | |
1158 | */ | |
1159 | #define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE) | |
1160 | #define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED) | |
1161 | #define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR) | |
1162 | #define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | |
1163 | #define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | |
1164 | #define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \ | |
1165 | SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)) | |
1166 | #define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR) | |
1167 | ||
1168 | #define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod) | |
1169 | #define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod) | |
1170 | ||
1171 | #define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS) | |
1172 | #define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS)) | |
1173 | #define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS)) | |
1174 | ||
1175 | #define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP) | |
1176 | #define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP) | |
1177 | #define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \ | |
1178 | SYM_MASK(IBCCtrlB_0, HRTBT_ENB)) | |
1179 | #define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \ | |
1180 | SYM_LSB(IBCCtrlB_0, HRTBT_ENB)) | |
1181 | #define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB) | |
1182 | ||
1183 | #define IBA7322_REDIRECT_VEC_PER_REG 12 | |
1184 | ||
1185 | #define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En) | |
1186 | #define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En) | |
1187 | #define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En) | |
1188 | #define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En) | |
1189 | #define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En) | |
1190 | ||
1191 | #define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */ | |
1192 | ||
1193 | #define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \ | |
e67306a3 | 1194 | .msg = #fldname , .sz = sizeof(#fldname) } |
f931551b | 1195 | #define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \ |
e67306a3 | 1196 | fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) } |
f931551b RC |
1197 | static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = { |
1198 | HWE_AUTO_P(IBSerdesPClkNotDetect, 1), | |
1199 | HWE_AUTO_P(IBSerdesPClkNotDetect, 0), | |
1200 | HWE_AUTO(PCIESerdesPClkNotDetect), | |
1201 | HWE_AUTO(PowerOnBISTFailed), | |
1202 | HWE_AUTO(TempsenseTholdReached), | |
1203 | HWE_AUTO(MemoryErr), | |
1204 | HWE_AUTO(PCIeBusParityErr), | |
1205 | HWE_AUTO(PcieCplTimeout), | |
1206 | HWE_AUTO(PciePoisonedTLP), | |
1207 | HWE_AUTO_P(SDmaMemReadErr, 1), | |
1208 | HWE_AUTO_P(SDmaMemReadErr, 0), | |
1209 | HWE_AUTO_P(IBCBusFromSPCParityErr, 1), | |
b9e03e04 | 1210 | HWE_AUTO_P(IBCBusToSPCParityErr, 1), |
f931551b | 1211 | HWE_AUTO_P(IBCBusFromSPCParityErr, 0), |
b9e03e04 | 1212 | HWE_AUTO(statusValidNoEop), |
f931551b | 1213 | HWE_AUTO(LATriggered), |
e67306a3 | 1214 | { .mask = 0, .sz = 0 } |
f931551b RC |
1215 | }; |
1216 | ||
1217 | #define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \ | |
e67306a3 | 1218 | .msg = #fldname, .sz = sizeof(#fldname) } |
f931551b | 1219 | #define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \ |
e67306a3 | 1220 | .msg = #fldname, .sz = sizeof(#fldname) } |
f931551b | 1221 | static const struct qib_hwerror_msgs qib_7322error_msgs[] = { |
e67306a3 MM |
1222 | E_AUTO(RcvEgrFullErr), |
1223 | E_AUTO(RcvHdrFullErr), | |
f931551b RC |
1224 | E_AUTO(ResetNegated), |
1225 | E_AUTO(HardwareErr), | |
1226 | E_AUTO(InvalidAddrErr), | |
1227 | E_AUTO(SDmaVL15Err), | |
1228 | E_AUTO(SBufVL15MisUseErr), | |
1229 | E_AUTO(InvalidEEPCmd), | |
1230 | E_AUTO(RcvContextShareErr), | |
1231 | E_AUTO(SendVLMismatchErr), | |
1232 | E_AUTO(SendArmLaunchErr), | |
1233 | E_AUTO(SendSpecialTriggerErr), | |
1234 | E_AUTO(SDmaWrongPortErr), | |
1235 | E_AUTO(SDmaBufMaskDuplicateErr), | |
e67306a3 | 1236 | { .mask = 0, .sz = 0 } |
f931551b RC |
1237 | }; |
1238 | ||
1239 | static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = { | |
1240 | E_P_AUTO(IBStatusChanged), | |
1241 | E_P_AUTO(SHeadersErr), | |
1242 | E_P_AUTO(VL15BufMisuseErr), | |
1243 | /* | |
1244 | * SDmaHaltErr is not really an error, make it clearer; | |
1245 | */ | |
e67306a3 MM |
1246 | {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted", |
1247 | .sz = 11}, | |
f931551b RC |
1248 | E_P_AUTO(SDmaDescAddrMisalignErr), |
1249 | E_P_AUTO(SDmaUnexpDataErr), | |
1250 | E_P_AUTO(SDmaMissingDwErr), | |
1251 | E_P_AUTO(SDmaDwEnErr), | |
1252 | E_P_AUTO(SDmaRpyTagErr), | |
1253 | E_P_AUTO(SDma1stDescErr), | |
1254 | E_P_AUTO(SDmaBaseErr), | |
1255 | E_P_AUTO(SDmaTailOutOfBoundErr), | |
1256 | E_P_AUTO(SDmaOutOfBoundErr), | |
1257 | E_P_AUTO(SDmaGenMismatchErr), | |
1258 | E_P_AUTO(SendBufMisuseErr), | |
1259 | E_P_AUTO(SendUnsupportedVLErr), | |
1260 | E_P_AUTO(SendUnexpectedPktNumErr), | |
1261 | E_P_AUTO(SendDroppedDataPktErr), | |
1262 | E_P_AUTO(SendDroppedSmpPktErr), | |
1263 | E_P_AUTO(SendPktLenErr), | |
1264 | E_P_AUTO(SendUnderRunErr), | |
1265 | E_P_AUTO(SendMaxPktLenErr), | |
1266 | E_P_AUTO(SendMinPktLenErr), | |
1267 | E_P_AUTO(RcvIBLostLinkErr), | |
1268 | E_P_AUTO(RcvHdrErr), | |
1269 | E_P_AUTO(RcvHdrLenErr), | |
1270 | E_P_AUTO(RcvBadTidErr), | |
1271 | E_P_AUTO(RcvBadVersionErr), | |
1272 | E_P_AUTO(RcvIBFlowErr), | |
1273 | E_P_AUTO(RcvEBPErr), | |
1274 | E_P_AUTO(RcvUnsupportedVLErr), | |
1275 | E_P_AUTO(RcvUnexpectedCharErr), | |
1276 | E_P_AUTO(RcvShortPktLenErr), | |
1277 | E_P_AUTO(RcvLongPktLenErr), | |
1278 | E_P_AUTO(RcvMaxPktLenErr), | |
1279 | E_P_AUTO(RcvMinPktLenErr), | |
1280 | E_P_AUTO(RcvICRCErr), | |
1281 | E_P_AUTO(RcvVCRCErr), | |
1282 | E_P_AUTO(RcvFormatErr), | |
e67306a3 | 1283 | { .mask = 0, .sz = 0 } |
f931551b RC |
1284 | }; |
1285 | ||
1286 | /* | |
1287 | * Below generates "auto-message" for interrupts not specific to any port or | |
1288 | * context | |
1289 | */ | |
1290 | #define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \ | |
e67306a3 | 1291 | .msg = #fldname, .sz = sizeof(#fldname) } |
f931551b RC |
1292 | /* Below generates "auto-message" for interrupts specific to a port */ |
1293 | #define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\ | |
1294 | SYM_LSB(IntMask, fldname##Mask##_0), \ | |
1295 | SYM_LSB(IntMask, fldname##Mask##_1)), \ | |
e67306a3 | 1296 | .msg = #fldname "_P", .sz = sizeof(#fldname "_P") } |
f931551b RC |
1297 | /* For some reason, the SerDesTrimDone bits are reversed */ |
1298 | #define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\ | |
1299 | SYM_LSB(IntMask, fldname##Mask##_1), \ | |
1300 | SYM_LSB(IntMask, fldname##Mask##_0)), \ | |
e67306a3 | 1301 | .msg = #fldname "_P", .sz = sizeof(#fldname "_P") } |
f931551b RC |
1302 | /* |
1303 | * Below generates "auto-message" for interrupts specific to a context, | |
1304 | * with ctxt-number appended | |
1305 | */ | |
1306 | #define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\ | |
1307 | SYM_LSB(IntMask, fldname##0IntMask), \ | |
1308 | SYM_LSB(IntMask, fldname##17IntMask)), \ | |
e67306a3 | 1309 | .msg = #fldname "_C", .sz = sizeof(#fldname "_C") } |
f931551b RC |
1310 | |
1311 | static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = { | |
1312 | INTR_AUTO_P(SDmaInt), | |
1313 | INTR_AUTO_P(SDmaProgressInt), | |
1314 | INTR_AUTO_P(SDmaIdleInt), | |
1315 | INTR_AUTO_P(SDmaCleanupDone), | |
1316 | INTR_AUTO_C(RcvUrg), | |
1317 | INTR_AUTO_P(ErrInt), | |
1318 | INTR_AUTO(ErrInt), /* non-port-specific errs */ | |
1319 | INTR_AUTO(AssertGPIOInt), | |
1320 | INTR_AUTO_P(SendDoneInt), | |
1321 | INTR_AUTO(SendBufAvailInt), | |
1322 | INTR_AUTO_C(RcvAvail), | |
e67306a3 | 1323 | { .mask = 0, .sz = 0 } |
f931551b RC |
1324 | }; |
1325 | ||
1326 | #define TXSYMPTOM_AUTO_P(fldname) \ | |
e67306a3 MM |
1327 | { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \ |
1328 | .msg = #fldname, .sz = sizeof(#fldname) } | |
f931551b RC |
1329 | static const struct qib_hwerror_msgs hdrchk_msgs[] = { |
1330 | TXSYMPTOM_AUTO_P(NonKeyPacket), | |
1331 | TXSYMPTOM_AUTO_P(GRHFail), | |
1332 | TXSYMPTOM_AUTO_P(PkeyFail), | |
1333 | TXSYMPTOM_AUTO_P(QPFail), | |
1334 | TXSYMPTOM_AUTO_P(SLIDFail), | |
1335 | TXSYMPTOM_AUTO_P(RawIPV6), | |
1336 | TXSYMPTOM_AUTO_P(PacketTooSmall), | |
e67306a3 | 1337 | { .mask = 0, .sz = 0 } |
f931551b RC |
1338 | }; |
1339 | ||
1340 | #define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */ | |
1341 | ||
1342 | /* | |
1343 | * Called when we might have an error that is specific to a particular | |
1344 | * PIO buffer, and may need to cancel that buffer, so it can be re-used, | |
1345 | * because we don't need to force the update of pioavail | |
1346 | */ | |
1347 | static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd) | |
1348 | { | |
1349 | struct qib_devdata *dd = ppd->dd; | |
1350 | u32 i; | |
1351 | int any; | |
1352 | u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; | |
1353 | u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG; | |
1354 | unsigned long sbuf[4]; | |
1355 | ||
1356 | /* | |
1357 | * It's possible that sendbuffererror could have bits set; might | |
1358 | * have already done this as a result of hardware error handling. | |
1359 | */ | |
1360 | any = 0; | |
1361 | for (i = 0; i < regcnt; ++i) { | |
1362 | sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i); | |
1363 | if (sbuf[i]) { | |
1364 | any = 1; | |
1365 | qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]); | |
1366 | } | |
1367 | } | |
1368 | ||
1369 | if (any) | |
1370 | qib_disarm_piobufs_set(dd, sbuf, piobcnt); | |
1371 | } | |
1372 | ||
1373 | /* No txe_recover yet, if ever */ | |
1374 | ||
1375 | /* No decode__errors yet */ | |
1376 | static void err_decode(char *msg, size_t len, u64 errs, | |
1377 | const struct qib_hwerror_msgs *msp) | |
1378 | { | |
1379 | u64 these, lmask; | |
1380 | int took, multi, n = 0; | |
1381 | ||
e67306a3 | 1382 | while (errs && msp && msp->mask) { |
f931551b RC |
1383 | multi = (msp->mask & (msp->mask - 1)); |
1384 | while (errs & msp->mask) { | |
1385 | these = (errs & msp->mask); | |
1386 | lmask = (these & (these - 1)) ^ these; | |
1387 | if (len) { | |
1388 | if (n++) { | |
1389 | /* separate the strings */ | |
1390 | *msg++ = ','; | |
1391 | len--; | |
1392 | } | |
e67306a3 MM |
1393 | BUG_ON(!msp->sz); |
1394 | /* msp->sz counts the nul */ | |
1395 | took = min_t(size_t, msp->sz - (size_t)1, len); | |
1396 | memcpy(msg, msp->msg, took); | |
f931551b RC |
1397 | len -= took; |
1398 | msg += took; | |
e67306a3 MM |
1399 | if (len) |
1400 | *msg = '\0'; | |
f931551b RC |
1401 | } |
1402 | errs &= ~lmask; | |
1403 | if (len && multi) { | |
1404 | /* More than one bit this mask */ | |
1405 | int idx = -1; | |
1406 | ||
1407 | while (lmask & msp->mask) { | |
1408 | ++idx; | |
1409 | lmask >>= 1; | |
1410 | } | |
1411 | took = scnprintf(msg, len, "_%d", idx); | |
1412 | len -= took; | |
1413 | msg += took; | |
1414 | } | |
1415 | } | |
1416 | ++msp; | |
1417 | } | |
1418 | /* If some bits are left, show in hex. */ | |
1419 | if (len && errs) | |
1420 | snprintf(msg, len, "%sMORE:%llX", n ? "," : "", | |
1421 | (unsigned long long) errs); | |
1422 | } | |
1423 | ||
1424 | /* only called if r1 set */ | |
1425 | static void flush_fifo(struct qib_pportdata *ppd) | |
1426 | { | |
1427 | struct qib_devdata *dd = ppd->dd; | |
1428 | u32 __iomem *piobuf; | |
1429 | u32 bufn; | |
1430 | u32 *hdr; | |
1431 | u64 pbc; | |
1432 | const unsigned hdrwords = 7; | |
1433 | static struct qib_ib_header ibhdr = { | |
1434 | .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH), | |
1435 | .lrh[1] = IB_LID_PERMISSIVE, | |
1436 | .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC), | |
1437 | .lrh[3] = IB_LID_PERMISSIVE, | |
1438 | .u.oth.bth[0] = cpu_to_be32( | |
1439 | (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY), | |
1440 | .u.oth.bth[1] = cpu_to_be32(0), | |
1441 | .u.oth.bth[2] = cpu_to_be32(0), | |
1442 | .u.oth.u.ud.deth[0] = cpu_to_be32(0), | |
1443 | .u.oth.u.ud.deth[1] = cpu_to_be32(0), | |
1444 | }; | |
1445 | ||
1446 | /* | |
1447 | * Send a dummy VL15 packet to flush the launch FIFO. | |
1448 | * This will not actually be sent since the TxeBypassIbc bit is set. | |
1449 | */ | |
1450 | pbc = PBC_7322_VL15_SEND | | |
1451 | (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) | | |
1452 | (hdrwords + SIZE_OF_CRC); | |
1453 | piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn); | |
1454 | if (!piobuf) | |
1455 | return; | |
1456 | writeq(pbc, piobuf); | |
1457 | hdr = (u32 *) &ibhdr; | |
1458 | if (dd->flags & QIB_PIO_FLUSH_WC) { | |
1459 | qib_flush_wc(); | |
1460 | qib_pio_copy(piobuf + 2, hdr, hdrwords - 1); | |
1461 | qib_flush_wc(); | |
1462 | __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1); | |
1463 | qib_flush_wc(); | |
1464 | } else | |
1465 | qib_pio_copy(piobuf + 2, hdr, hdrwords); | |
1466 | qib_sendbuf_done(dd, bufn); | |
1467 | } | |
1468 | ||
1469 | /* | |
1470 | * This is called with interrupts disabled and sdma_lock held. | |
1471 | */ | |
1472 | static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op) | |
1473 | { | |
1474 | struct qib_devdata *dd = ppd->dd; | |
1475 | u64 set_sendctrl = 0; | |
1476 | u64 clr_sendctrl = 0; | |
1477 | ||
1478 | if (op & QIB_SDMA_SENDCTRL_OP_ENABLE) | |
1479 | set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable); | |
1480 | else | |
1481 | clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable); | |
1482 | ||
1483 | if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE) | |
1484 | set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable); | |
1485 | else | |
1486 | clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable); | |
1487 | ||
1488 | if (op & QIB_SDMA_SENDCTRL_OP_HALT) | |
1489 | set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt); | |
1490 | else | |
1491 | clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt); | |
1492 | ||
1493 | if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) | |
1494 | set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) | | |
1495 | SYM_MASK(SendCtrl_0, TxeAbortIbc) | | |
1496 | SYM_MASK(SendCtrl_0, TxeDrainRmFifo); | |
1497 | else | |
1498 | clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) | | |
1499 | SYM_MASK(SendCtrl_0, TxeAbortIbc) | | |
1500 | SYM_MASK(SendCtrl_0, TxeDrainRmFifo); | |
1501 | ||
1502 | spin_lock(&dd->sendctrl_lock); | |
1503 | ||
1504 | /* If we are draining everything, block sends first */ | |
1505 | if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) { | |
1506 | ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable); | |
1507 | qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); | |
1508 | qib_write_kreg(dd, kr_scratch, 0); | |
1509 | } | |
1510 | ||
1511 | ppd->p_sendctrl |= set_sendctrl; | |
1512 | ppd->p_sendctrl &= ~clr_sendctrl; | |
1513 | ||
1514 | if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP) | |
1515 | qib_write_kreg_port(ppd, krp_sendctrl, | |
1516 | ppd->p_sendctrl | | |
1517 | SYM_MASK(SendCtrl_0, SDmaCleanup)); | |
1518 | else | |
1519 | qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); | |
1520 | qib_write_kreg(dd, kr_scratch, 0); | |
1521 | ||
1522 | if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) { | |
1523 | ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable); | |
1524 | qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); | |
1525 | qib_write_kreg(dd, kr_scratch, 0); | |
1526 | } | |
1527 | ||
1528 | spin_unlock(&dd->sendctrl_lock); | |
1529 | ||
1530 | if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1) | |
1531 | flush_fifo(ppd); | |
1532 | } | |
1533 | ||
1534 | static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd) | |
1535 | { | |
1536 | __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned); | |
1537 | } | |
1538 | ||
1539 | static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd) | |
1540 | { | |
1541 | /* | |
1542 | * Set SendDmaLenGen and clear and set | |
1543 | * the MSB of the generation count to enable generation checking | |
1544 | * and load the internal generation counter. | |
1545 | */ | |
1546 | qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt); | |
1547 | qib_write_kreg_port(ppd, krp_senddmalengen, | |
1548 | ppd->sdma_descq_cnt | | |
1549 | (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB)); | |
1550 | } | |
1551 | ||
1552 | /* | |
1553 | * Must be called with sdma_lock held, or before init finished. | |
1554 | */ | |
1555 | static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail) | |
1556 | { | |
1557 | /* Commit writes to memory and advance the tail on the chip */ | |
1558 | wmb(); | |
1559 | ppd->sdma_descq_tail = tail; | |
1560 | qib_write_kreg_port(ppd, krp_senddmatail, tail); | |
1561 | } | |
1562 | ||
1563 | /* | |
1564 | * This is called with interrupts disabled and sdma_lock held. | |
1565 | */ | |
1566 | static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd) | |
1567 | { | |
1568 | /* | |
1569 | * Drain all FIFOs. | |
1570 | * The hardware doesn't require this but we do it so that verbs | |
1571 | * and user applications don't wait for link active to send stale | |
1572 | * data. | |
1573 | */ | |
1574 | sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH); | |
1575 | ||
1576 | qib_sdma_7322_setlengen(ppd); | |
1577 | qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */ | |
1578 | ppd->sdma_head_dma[0] = 0; | |
1579 | qib_7322_sdma_sendctrl(ppd, | |
1580 | ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP); | |
1581 | } | |
1582 | ||
1583 | #define DISABLES_SDMA ( \ | |
1584 | QIB_E_P_SDMAHALT | \ | |
1585 | QIB_E_P_SDMADESCADDRMISALIGN | \ | |
1586 | QIB_E_P_SDMAMISSINGDW | \ | |
1587 | QIB_E_P_SDMADWEN | \ | |
1588 | QIB_E_P_SDMARPYTAG | \ | |
1589 | QIB_E_P_SDMA1STDESC | \ | |
1590 | QIB_E_P_SDMABASE | \ | |
1591 | QIB_E_P_SDMATAILOUTOFBOUND | \ | |
1592 | QIB_E_P_SDMAOUTOFBOUND | \ | |
1593 | QIB_E_P_SDMAGENMISMATCH) | |
1594 | ||
1595 | static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs) | |
1596 | { | |
1597 | unsigned long flags; | |
1598 | struct qib_devdata *dd = ppd->dd; | |
1599 | ||
1600 | errs &= QIB_E_P_SDMAERRS; | |
b268e4db MM |
1601 | err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf), |
1602 | errs, qib_7322p_error_msgs); | |
f931551b RC |
1603 | |
1604 | if (errs & QIB_E_P_SDMAUNEXPDATA) | |
1605 | qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit, | |
1606 | ppd->port); | |
1607 | ||
1608 | spin_lock_irqsave(&ppd->sdma_lock, flags); | |
1609 | ||
0b3ddf38 DL |
1610 | if (errs != QIB_E_P_SDMAHALT) { |
1611 | /* SDMA errors have QIB_E_P_SDMAHALT and another bit set */ | |
1612 | qib_dev_porterr(dd, ppd->port, | |
1613 | "SDMA %s 0x%016llx %s\n", | |
1614 | qib_sdma_state_names[ppd->sdma_state.current_state], | |
1615 | errs, ppd->cpspec->sdmamsgbuf); | |
1616 | dump_sdma_7322_state(ppd); | |
1617 | } | |
1618 | ||
f931551b RC |
1619 | switch (ppd->sdma_state.current_state) { |
1620 | case qib_sdma_state_s00_hw_down: | |
1621 | break; | |
1622 | ||
1623 | case qib_sdma_state_s10_hw_start_up_wait: | |
1624 | if (errs & QIB_E_P_SDMAHALT) | |
1625 | __qib_sdma_process_event(ppd, | |
1626 | qib_sdma_event_e20_hw_started); | |
1627 | break; | |
1628 | ||
1629 | case qib_sdma_state_s20_idle: | |
1630 | break; | |
1631 | ||
1632 | case qib_sdma_state_s30_sw_clean_up_wait: | |
1633 | break; | |
1634 | ||
1635 | case qib_sdma_state_s40_hw_clean_up_wait: | |
1636 | if (errs & QIB_E_P_SDMAHALT) | |
1637 | __qib_sdma_process_event(ppd, | |
1638 | qib_sdma_event_e50_hw_cleaned); | |
1639 | break; | |
1640 | ||
1641 | case qib_sdma_state_s50_hw_halt_wait: | |
1642 | if (errs & QIB_E_P_SDMAHALT) | |
1643 | __qib_sdma_process_event(ppd, | |
1644 | qib_sdma_event_e60_hw_halted); | |
1645 | break; | |
1646 | ||
1647 | case qib_sdma_state_s99_running: | |
1648 | __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted); | |
1649 | __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted); | |
1650 | break; | |
1651 | } | |
1652 | ||
1653 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | |
1654 | } | |
1655 | ||
1656 | /* | |
1657 | * handle per-device errors (not per-port errors) | |
1658 | */ | |
1659 | static noinline void handle_7322_errors(struct qib_devdata *dd) | |
1660 | { | |
1661 | char *msg; | |
1662 | u64 iserr = 0; | |
1663 | u64 errs; | |
1664 | u64 mask; | |
1665 | int log_idx; | |
1666 | ||
1667 | qib_stats.sps_errints++; | |
1668 | errs = qib_read_kreg64(dd, kr_errstatus); | |
1669 | if (!errs) { | |
7fac3301 MM |
1670 | qib_devinfo(dd->pcidev, |
1671 | "device error interrupt, but no error bits set!\n"); | |
f931551b RC |
1672 | goto done; |
1673 | } | |
1674 | ||
1675 | /* don't report errors that are masked */ | |
1676 | errs &= dd->cspec->errormask; | |
1677 | msg = dd->cspec->emsgbuf; | |
1678 | ||
1679 | /* do these first, they are most important */ | |
1680 | if (errs & QIB_E_HARDWARE) { | |
1681 | *msg = '\0'; | |
041af0bb | 1682 | qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf)); |
f931551b RC |
1683 | } else |
1684 | for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) | |
1685 | if (errs & dd->eep_st_masks[log_idx].errs_to_log) | |
1686 | qib_inc_eeprom_err(dd, log_idx, 1); | |
1687 | ||
1688 | if (errs & QIB_E_SPKTERRS) { | |
1689 | qib_disarm_7322_senderrbufs(dd->pport); | |
1690 | qib_stats.sps_txerrs++; | |
1691 | } else if (errs & QIB_E_INVALIDADDR) | |
1692 | qib_stats.sps_txerrs++; | |
1693 | else if (errs & QIB_E_ARMLAUNCH) { | |
1694 | qib_stats.sps_txerrs++; | |
1695 | qib_disarm_7322_senderrbufs(dd->pport); | |
1696 | } | |
1697 | qib_write_kreg(dd, kr_errclear, errs); | |
1698 | ||
1699 | /* | |
1700 | * The ones we mask off are handled specially below | |
1701 | * or above. Also mask SDMADISABLED by default as it | |
1702 | * is too chatty. | |
1703 | */ | |
1704 | mask = QIB_E_HARDWARE; | |
1705 | *msg = '\0'; | |
1706 | ||
041af0bb | 1707 | err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask, |
f931551b RC |
1708 | qib_7322error_msgs); |
1709 | ||
1710 | /* | |
1711 | * Getting reset is a tragedy for all ports. Mark the device | |
1712 | * _and_ the ports as "offline" in way meaningful to each. | |
1713 | */ | |
1714 | if (errs & QIB_E_RESET) { | |
1715 | int pidx; | |
1716 | ||
7fac3301 MM |
1717 | qib_dev_err(dd, |
1718 | "Got reset, requires re-init (unload and reload driver)\n"); | |
f931551b RC |
1719 | dd->flags &= ~QIB_INITTED; /* needs re-init */ |
1720 | /* mark as having had error */ | |
1721 | *dd->devstatusp |= QIB_STATUS_HWERROR; | |
1722 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | |
1723 | if (dd->pport[pidx].link_speed_supported) | |
1724 | *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF; | |
1725 | } | |
1726 | ||
1727 | if (*msg && iserr) | |
1728 | qib_dev_err(dd, "%s error\n", msg); | |
1729 | ||
1730 | /* | |
1731 | * If there were hdrq or egrfull errors, wake up any processes | |
1732 | * waiting in poll. We used to try to check which contexts had | |
1733 | * the overflow, but given the cost of that and the chip reads | |
1734 | * to support it, it's better to just wake everybody up if we | |
1735 | * get an overflow; waiters can poll again if it's not them. | |
1736 | */ | |
1737 | if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) { | |
1738 | qib_handle_urcv(dd, ~0U); | |
1739 | if (errs & ERR_MASK(RcvEgrFullErr)) | |
1740 | qib_stats.sps_buffull++; | |
1741 | else | |
1742 | qib_stats.sps_hdrfull++; | |
1743 | } | |
1744 | ||
1745 | done: | |
1746 | return; | |
1747 | } | |
1748 | ||
e67306a3 MM |
1749 | static void qib_error_tasklet(unsigned long data) |
1750 | { | |
1751 | struct qib_devdata *dd = (struct qib_devdata *)data; | |
1752 | ||
1753 | handle_7322_errors(dd); | |
1754 | qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); | |
1755 | } | |
1756 | ||
f931551b RC |
1757 | static void reenable_chase(unsigned long opaque) |
1758 | { | |
1759 | struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; | |
1760 | ||
1761 | ppd->cpspec->chase_timer.expires = 0; | |
1762 | qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN, | |
1763 | QLOGIC_IB_IBCC_LINKINITCMD_POLL); | |
1764 | } | |
1765 | ||
8482d5d1 MM |
1766 | static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow, |
1767 | u8 ibclt) | |
f931551b RC |
1768 | { |
1769 | ppd->cpspec->chase_end = 0; | |
1770 | ||
1771 | if (!qib_chase) | |
1772 | return; | |
1773 | ||
1774 | qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN, | |
1775 | QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); | |
1776 | ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME; | |
1777 | add_timer(&ppd->cpspec->chase_timer); | |
1778 | } | |
1779 | ||
1780 | static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst) | |
1781 | { | |
1782 | u8 ibclt; | |
8482d5d1 | 1783 | unsigned long tnow; |
f931551b RC |
1784 | |
1785 | ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState); | |
1786 | ||
1787 | /* | |
1788 | * Detect and handle the state chase issue, where we can | |
1789 | * get stuck if we are unlucky on timing on both sides of | |
1790 | * the link. If we are, we disable, set a timer, and | |
1791 | * then re-enable. | |
1792 | */ | |
1793 | switch (ibclt) { | |
1794 | case IB_7322_LT_STATE_CFGRCVFCFG: | |
1795 | case IB_7322_LT_STATE_CFGWAITRMT: | |
1796 | case IB_7322_LT_STATE_TXREVLANES: | |
1797 | case IB_7322_LT_STATE_CFGENH: | |
8482d5d1 | 1798 | tnow = jiffies; |
f931551b | 1799 | if (ppd->cpspec->chase_end && |
8482d5d1 | 1800 | time_after(tnow, ppd->cpspec->chase_end)) |
f931551b RC |
1801 | disable_chase(ppd, tnow, ibclt); |
1802 | else if (!ppd->cpspec->chase_end) | |
1803 | ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME; | |
1804 | break; | |
1805 | default: | |
1806 | ppd->cpspec->chase_end = 0; | |
1807 | break; | |
1808 | } | |
1809 | ||
31264484 MH |
1810 | if (((ibclt >= IB_7322_LT_STATE_CFGTEST && |
1811 | ibclt <= IB_7322_LT_STATE_CFGWAITENH) || | |
1812 | ibclt == IB_7322_LT_STATE_LINKUP) && | |
f931551b RC |
1813 | (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) { |
1814 | force_h1(ppd); | |
1815 | ppd->cpspec->qdr_reforce = 1; | |
a0a234d4 MM |
1816 | if (!ppd->dd->cspec->r1) |
1817 | serdes_7322_los_enable(ppd, 0); | |
f931551b RC |
1818 | } else if (ppd->cpspec->qdr_reforce && |
1819 | (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) && | |
1820 | (ibclt == IB_7322_LT_STATE_CFGENH || | |
1821 | ibclt == IB_7322_LT_STATE_CFGIDLE || | |
1822 | ibclt == IB_7322_LT_STATE_LINKUP)) | |
1823 | force_h1(ppd); | |
1824 | ||
1825 | if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) && | |
1826 | ppd->link_speed_enabled == QIB_IB_QDR && | |
1827 | (ibclt == IB_7322_LT_STATE_CFGTEST || | |
1828 | ibclt == IB_7322_LT_STATE_CFGENH || | |
1829 | (ibclt >= IB_7322_LT_STATE_POLLACTIVE && | |
1830 | ibclt <= IB_7322_LT_STATE_SLEEPQUIET))) | |
1831 | adj_tx_serdes(ppd); | |
1832 | ||
a0a234d4 MM |
1833 | if (ibclt != IB_7322_LT_STATE_LINKUP) { |
1834 | u8 ltstate = qib_7322_phys_portstate(ibcst); | |
1835 | u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, | |
1836 | LinkTrainingState); | |
1837 | if (!ppd->dd->cspec->r1 && | |
1838 | pibclt == IB_7322_LT_STATE_LINKUP && | |
1839 | ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER && | |
1840 | ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN && | |
1841 | ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT && | |
1842 | ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE) | |
1843 | /* If the link went down (but no into recovery, | |
1844 | * turn LOS back on */ | |
1845 | serdes_7322_los_enable(ppd, 1); | |
1846 | if (!ppd->cpspec->qdr_dfe_on && | |
1847 | ibclt <= IB_7322_LT_STATE_SLEEPQUIET) { | |
1848 | ppd->cpspec->qdr_dfe_on = 1; | |
1849 | ppd->cpspec->qdr_dfe_time = 0; | |
1850 | /* On link down, reenable QDR adaptation */ | |
1851 | qib_write_kreg_port(ppd, krp_static_adapt_dis(2), | |
1852 | ppd->dd->cspec->r1 ? | |
1853 | QDR_STATIC_ADAPT_DOWN_R1 : | |
1854 | QDR_STATIC_ADAPT_DOWN); | |
7fac3301 MM |
1855 | pr_info( |
1856 | "IB%u:%u re-enabled QDR adaptation ibclt %x\n", | |
1857 | ppd->dd->unit, ppd->port, ibclt); | |
a0a234d4 | 1858 | } |
f931551b RC |
1859 | } |
1860 | } | |
1861 | ||
f2d255a0 MM |
1862 | static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32); |
1863 | ||
f931551b RC |
1864 | /* |
1865 | * This is per-pport error handling. | |
1866 | * will likely get it's own MSIx interrupt (one for each port, | |
1867 | * although just a single handler). | |
1868 | */ | |
1869 | static noinline void handle_7322_p_errors(struct qib_pportdata *ppd) | |
1870 | { | |
1871 | char *msg; | |
1872 | u64 ignore_this_time = 0, iserr = 0, errs, fmask; | |
1873 | struct qib_devdata *dd = ppd->dd; | |
1874 | ||
1875 | /* do this as soon as possible */ | |
1876 | fmask = qib_read_kreg64(dd, kr_act_fmask); | |
1877 | if (!fmask) | |
1878 | check_7322_rxe_status(ppd); | |
1879 | ||
1880 | errs = qib_read_kreg_port(ppd, krp_errstatus); | |
1881 | if (!errs) | |
1882 | qib_devinfo(dd->pcidev, | |
1883 | "Port%d error interrupt, but no error bits set!\n", | |
1884 | ppd->port); | |
1885 | if (!fmask) | |
1886 | errs &= ~QIB_E_P_IBSTATUSCHANGED; | |
1887 | if (!errs) | |
1888 | goto done; | |
1889 | ||
1890 | msg = ppd->cpspec->epmsgbuf; | |
1891 | *msg = '\0'; | |
1892 | ||
1893 | if (errs & ~QIB_E_P_BITSEXTANT) { | |
041af0bb | 1894 | err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), |
f931551b RC |
1895 | errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs); |
1896 | if (!*msg) | |
041af0bb | 1897 | snprintf(msg, sizeof(ppd->cpspec->epmsgbuf), |
f931551b | 1898 | "no others"); |
7fac3301 MM |
1899 | qib_dev_porterr(dd, ppd->port, |
1900 | "error interrupt with unknown errors 0x%016Lx set (and %s)\n", | |
1901 | (errs & ~QIB_E_P_BITSEXTANT), msg); | |
f931551b RC |
1902 | *msg = '\0'; |
1903 | } | |
1904 | ||
1905 | if (errs & QIB_E_P_SHDR) { | |
1906 | u64 symptom; | |
1907 | ||
1908 | /* determine cause, then write to clear */ | |
1909 | symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom); | |
1910 | qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0); | |
041af0bb | 1911 | err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom, |
f931551b RC |
1912 | hdrchk_msgs); |
1913 | *msg = '\0'; | |
1914 | /* senderrbuf cleared in SPKTERRS below */ | |
1915 | } | |
1916 | ||
1917 | if (errs & QIB_E_P_SPKTERRS) { | |
1918 | if ((errs & QIB_E_P_LINK_PKTERRS) && | |
1919 | !(ppd->lflags & QIBL_LINKACTIVE)) { | |
1920 | /* | |
1921 | * This can happen when trying to bring the link | |
1922 | * up, but the IB link changes state at the "wrong" | |
1923 | * time. The IB logic then complains that the packet | |
1924 | * isn't valid. We don't want to confuse people, so | |
1925 | * we just don't print them, except at debug | |
1926 | */ | |
041af0bb | 1927 | err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), |
f931551b RC |
1928 | (errs & QIB_E_P_LINK_PKTERRS), |
1929 | qib_7322p_error_msgs); | |
1930 | *msg = '\0'; | |
1931 | ignore_this_time = errs & QIB_E_P_LINK_PKTERRS; | |
1932 | } | |
1933 | qib_disarm_7322_senderrbufs(ppd); | |
1934 | } else if ((errs & QIB_E_P_LINK_PKTERRS) && | |
1935 | !(ppd->lflags & QIBL_LINKACTIVE)) { | |
1936 | /* | |
1937 | * This can happen when SMA is trying to bring the link | |
1938 | * up, but the IB link changes state at the "wrong" time. | |
1939 | * The IB logic then complains that the packet isn't | |
1940 | * valid. We don't want to confuse people, so we just | |
1941 | * don't print them, except at debug | |
1942 | */ | |
041af0bb | 1943 | err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs, |
f931551b RC |
1944 | qib_7322p_error_msgs); |
1945 | ignore_this_time = errs & QIB_E_P_LINK_PKTERRS; | |
1946 | *msg = '\0'; | |
1947 | } | |
1948 | ||
1949 | qib_write_kreg_port(ppd, krp_errclear, errs); | |
1950 | ||
1951 | errs &= ~ignore_this_time; | |
1952 | if (!errs) | |
1953 | goto done; | |
1954 | ||
1955 | if (errs & QIB_E_P_RPKTERRS) | |
1956 | qib_stats.sps_rcverrs++; | |
1957 | if (errs & QIB_E_P_SPKTERRS) | |
1958 | qib_stats.sps_txerrs++; | |
1959 | ||
1960 | iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS); | |
1961 | ||
1962 | if (errs & QIB_E_P_SDMAERRS) | |
1963 | sdma_7322_p_errors(ppd, errs); | |
1964 | ||
1965 | if (errs & QIB_E_P_IBSTATUSCHANGED) { | |
1966 | u64 ibcs; | |
1967 | u8 ltstate; | |
1968 | ||
1969 | ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a); | |
1970 | ltstate = qib_7322_phys_portstate(ibcs); | |
1971 | ||
1972 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) | |
1973 | handle_serdes_issues(ppd, ibcs); | |
1974 | if (!(ppd->cpspec->ibcctrl_a & | |
1975 | SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) { | |
1976 | /* | |
1977 | * We got our interrupt, so init code should be | |
1978 | * happy and not try alternatives. Now squelch | |
1979 | * other "chatter" from link-negotiation (pre Init) | |
1980 | */ | |
1981 | ppd->cpspec->ibcctrl_a |= | |
1982 | SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn); | |
1983 | qib_write_kreg_port(ppd, krp_ibcctrl_a, | |
1984 | ppd->cpspec->ibcctrl_a); | |
1985 | } | |
1986 | ||
1987 | /* Update our picture of width and speed from chip */ | |
1988 | ppd->link_width_active = | |
1989 | (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ? | |
1990 | IB_WIDTH_4X : IB_WIDTH_1X; | |
1991 | ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0, | |
1992 | LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs & | |
1993 | SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ? | |
1994 | QIB_IB_DDR : QIB_IB_SDR; | |
1995 | ||
1996 | if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate != | |
1997 | IB_PHYSPORTSTATE_DISABLED) | |
1998 | qib_set_ib_7322_lstate(ppd, 0, | |
1999 | QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); | |
d70585f7 | 2000 | else |
f931551b RC |
2001 | /* |
2002 | * Since going into a recovery state causes the link | |
2003 | * state to go down and since recovery is transitory, | |
2004 | * it is better if we "miss" ever seeing the link | |
2005 | * training state go into recovery (i.e., ignore this | |
2006 | * transition for link state special handling purposes) | |
2007 | * without updating lastibcstat. | |
2008 | */ | |
2009 | if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER && | |
2010 | ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN && | |
2011 | ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT && | |
2012 | ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE) | |
2013 | qib_handle_e_ibstatuschanged(ppd, ibcs); | |
2014 | } | |
2015 | if (*msg && iserr) | |
2016 | qib_dev_porterr(dd, ppd->port, "%s error\n", msg); | |
2017 | ||
2018 | if (ppd->state_wanted & ppd->lflags) | |
2019 | wake_up_interruptible(&ppd->state_wait); | |
2020 | done: | |
2021 | return; | |
2022 | } | |
2023 | ||
2024 | /* enable/disable chip from delivering interrupts */ | |
2025 | static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable) | |
2026 | { | |
2027 | if (enable) { | |
2028 | if (dd->flags & QIB_BADINTR) | |
2029 | return; | |
2030 | qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask); | |
2031 | /* cause any pending enabled interrupts to be re-delivered */ | |
2032 | qib_write_kreg(dd, kr_intclear, 0ULL); | |
2033 | if (dd->cspec->num_msix_entries) { | |
2034 | /* and same for MSIx */ | |
2035 | u64 val = qib_read_kreg64(dd, kr_intgranted); | |
da12c1f6 | 2036 | |
f931551b RC |
2037 | if (val) |
2038 | qib_write_kreg(dd, kr_intgranted, val); | |
2039 | } | |
2040 | } else | |
2041 | qib_write_kreg(dd, kr_intmask, 0ULL); | |
2042 | } | |
2043 | ||
2044 | /* | |
2045 | * Try to cleanup as much as possible for anything that might have gone | |
2046 | * wrong while in freeze mode, such as pio buffers being written by user | |
2047 | * processes (causing armlaunch), send errors due to going into freeze mode, | |
2048 | * etc., and try to avoid causing extra interrupts while doing so. | |
2049 | * Forcibly update the in-memory pioavail register copies after cleanup | |
2050 | * because the chip won't do it while in freeze mode (the register values | |
2051 | * themselves are kept correct). | |
2052 | * Make sure that we don't lose any important interrupts by using the chip | |
2053 | * feature that says that writing 0 to a bit in *clear that is set in | |
2054 | * *status will cause an interrupt to be generated again (if allowed by | |
2055 | * the *mask value). | |
2056 | * This is in chip-specific code because of all of the register accesses, | |
2057 | * even though the details are similar on most chips. | |
2058 | */ | |
2059 | static void qib_7322_clear_freeze(struct qib_devdata *dd) | |
2060 | { | |
2061 | int pidx; | |
2062 | ||
2063 | /* disable error interrupts, to avoid confusion */ | |
2064 | qib_write_kreg(dd, kr_errmask, 0ULL); | |
2065 | ||
2066 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | |
2067 | if (dd->pport[pidx].link_speed_supported) | |
2068 | qib_write_kreg_port(dd->pport + pidx, krp_errmask, | |
2069 | 0ULL); | |
2070 | ||
2071 | /* also disable interrupts; errormask is sometimes overwriten */ | |
2072 | qib_7322_set_intr_state(dd, 0); | |
2073 | ||
2074 | /* clear the freeze, and be sure chip saw it */ | |
2075 | qib_write_kreg(dd, kr_control, dd->control); | |
2076 | qib_read_kreg32(dd, kr_scratch); | |
2077 | ||
2078 | /* | |
2079 | * Force new interrupt if any hwerr, error or interrupt bits are | |
2080 | * still set, and clear "safe" send packet errors related to freeze | |
2081 | * and cancelling sends. Re-enable error interrupts before possible | |
2082 | * force of re-interrupt on pending interrupts. | |
2083 | */ | |
2084 | qib_write_kreg(dd, kr_hwerrclear, 0ULL); | |
2085 | qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE); | |
2086 | qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); | |
2087 | /* We need to purge per-port errs and reset mask, too */ | |
2088 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
2089 | if (!dd->pport[pidx].link_speed_supported) | |
2090 | continue; | |
2091 | qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull); | |
2092 | qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull); | |
2093 | } | |
2094 | qib_7322_set_intr_state(dd, 1); | |
2095 | } | |
2096 | ||
2097 | /* no error handling to speak of */ | |
2098 | /** | |
2099 | * qib_7322_handle_hwerrors - display hardware errors. | |
2100 | * @dd: the qlogic_ib device | |
2101 | * @msg: the output buffer | |
2102 | * @msgl: the size of the output buffer | |
2103 | * | |
2104 | * Use same msg buffer as regular errors to avoid excessive stack | |
2105 | * use. Most hardware errors are catastrophic, but for right now, | |
2106 | * we'll print them and continue. We reuse the same message buffer as | |
2107 | * qib_handle_errors() to avoid excessive stack usage. | |
2108 | */ | |
2109 | static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg, | |
2110 | size_t msgl) | |
2111 | { | |
2112 | u64 hwerrs; | |
2113 | u32 ctrl; | |
2114 | int isfatal = 0; | |
2115 | ||
2116 | hwerrs = qib_read_kreg64(dd, kr_hwerrstatus); | |
2117 | if (!hwerrs) | |
2118 | goto bail; | |
2119 | if (hwerrs == ~0ULL) { | |
7fac3301 MM |
2120 | qib_dev_err(dd, |
2121 | "Read of hardware error status failed (all bits set); ignoring\n"); | |
f931551b RC |
2122 | goto bail; |
2123 | } | |
2124 | qib_stats.sps_hwerrs++; | |
2125 | ||
2126 | /* Always clear the error status register, except BIST fail */ | |
2127 | qib_write_kreg(dd, kr_hwerrclear, hwerrs & | |
2128 | ~HWE_MASK(PowerOnBISTFailed)); | |
2129 | ||
2130 | hwerrs &= dd->cspec->hwerrmask; | |
2131 | ||
2132 | /* no EEPROM logging, yet */ | |
2133 | ||
2134 | if (hwerrs) | |
7fac3301 MM |
2135 | qib_devinfo(dd->pcidev, |
2136 | "Hardware error: hwerr=0x%llx (cleared)\n", | |
2137 | (unsigned long long) hwerrs); | |
f931551b RC |
2138 | |
2139 | ctrl = qib_read_kreg32(dd, kr_control); | |
2140 | if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) { | |
2141 | /* | |
2142 | * No recovery yet... | |
2143 | */ | |
2144 | if ((hwerrs & ~HWE_MASK(LATriggered)) || | |
2145 | dd->cspec->stay_in_freeze) { | |
2146 | /* | |
2147 | * If any set that we aren't ignoring only make the | |
2148 | * complaint once, in case it's stuck or recurring, | |
2149 | * and we get here multiple times | |
2150 | * Force link down, so switch knows, and | |
2151 | * LEDs are turned off. | |
2152 | */ | |
2153 | if (dd->flags & QIB_INITTED) | |
2154 | isfatal = 1; | |
2155 | } else | |
2156 | qib_7322_clear_freeze(dd); | |
2157 | } | |
2158 | ||
2159 | if (hwerrs & HWE_MASK(PowerOnBISTFailed)) { | |
2160 | isfatal = 1; | |
7fac3301 MM |
2161 | strlcpy(msg, |
2162 | "[Memory BIST test failed, InfiniPath hardware unusable]", | |
2163 | msgl); | |
f931551b RC |
2164 | /* ignore from now on, so disable until driver reloaded */ |
2165 | dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); | |
2166 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); | |
2167 | } | |
2168 | ||
2169 | err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs); | |
2170 | ||
2171 | /* Ignore esoteric PLL failures et al. */ | |
2172 | ||
2173 | qib_dev_err(dd, "%s hardware error\n", msg); | |
2174 | ||
0b3ddf38 DL |
2175 | if (hwerrs & |
2176 | (SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) | | |
2177 | SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) { | |
2178 | int pidx = 0; | |
2179 | int err; | |
2180 | unsigned long flags; | |
2181 | struct qib_pportdata *ppd = dd->pport; | |
da12c1f6 | 2182 | |
0b3ddf38 DL |
2183 | for (; pidx < dd->num_pports; ++pidx, ppd++) { |
2184 | err = 0; | |
2185 | if (pidx == 0 && (hwerrs & | |
2186 | SYM_MASK(HwErrMask, SDmaMemReadErrMask_0))) | |
2187 | err++; | |
2188 | if (pidx == 1 && (hwerrs & | |
2189 | SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) | |
2190 | err++; | |
2191 | if (err) { | |
2192 | spin_lock_irqsave(&ppd->sdma_lock, flags); | |
2193 | dump_sdma_7322_state(ppd); | |
2194 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | |
2195 | } | |
2196 | } | |
2197 | } | |
2198 | ||
f931551b | 2199 | if (isfatal && !dd->diag_client) { |
7fac3301 MM |
2200 | qib_dev_err(dd, |
2201 | "Fatal Hardware Error, no longer usable, SN %.16s\n", | |
2202 | dd->serial); | |
f931551b RC |
2203 | /* |
2204 | * for /sys status file and user programs to print; if no | |
2205 | * trailing brace is copied, we'll know it was truncated. | |
2206 | */ | |
2207 | if (dd->freezemsg) | |
2208 | snprintf(dd->freezemsg, dd->freezelen, | |
2209 | "{%s}", msg); | |
2210 | qib_disable_after_error(dd); | |
2211 | } | |
2212 | bail:; | |
2213 | } | |
2214 | ||
2215 | /** | |
2216 | * qib_7322_init_hwerrors - enable hardware errors | |
2217 | * @dd: the qlogic_ib device | |
2218 | * | |
2219 | * now that we have finished initializing everything that might reasonably | |
2220 | * cause a hardware error, and cleared those errors bits as they occur, | |
2221 | * we can enable hardware errors in the mask (potentially enabling | |
2222 | * freeze mode), and enable hardware errors as errors (along with | |
2223 | * everything else) in errormask | |
2224 | */ | |
2225 | static void qib_7322_init_hwerrors(struct qib_devdata *dd) | |
2226 | { | |
2227 | int pidx; | |
2228 | u64 extsval; | |
2229 | ||
2230 | extsval = qib_read_kreg64(dd, kr_extstatus); | |
2231 | if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED | | |
2232 | QIB_EXTS_MEMBIST_ENDTEST))) | |
2233 | qib_dev_err(dd, "MemBIST did not complete!\n"); | |
2234 | ||
2235 | /* never clear BIST failure, so reported on each driver load */ | |
2236 | qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed)); | |
2237 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); | |
2238 | ||
2239 | /* clear all */ | |
2240 | qib_write_kreg(dd, kr_errclear, ~0ULL); | |
2241 | /* enable errors that are masked, at least this first time. */ | |
2242 | qib_write_kreg(dd, kr_errmask, ~0ULL); | |
2243 | dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask); | |
2244 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | |
2245 | if (dd->pport[pidx].link_speed_supported) | |
2246 | qib_write_kreg_port(dd->pport + pidx, krp_errmask, | |
2247 | ~0ULL); | |
2248 | } | |
2249 | ||
2250 | /* | |
2251 | * Disable and enable the armlaunch error. Used for PIO bandwidth testing | |
2252 | * on chips that are count-based, rather than trigger-based. There is no | |
2253 | * reference counting, but that's also fine, given the intended use. | |
2254 | * Only chip-specific because it's all register accesses | |
2255 | */ | |
2256 | static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable) | |
2257 | { | |
2258 | if (enable) { | |
2259 | qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH); | |
2260 | dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH; | |
2261 | } else | |
2262 | dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH; | |
2263 | qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); | |
2264 | } | |
2265 | ||
2266 | /* | |
2267 | * Formerly took parameter <which> in pre-shifted, | |
2268 | * pre-merged form with LinkCmd and LinkInitCmd | |
2269 | * together, and assuming the zero was NOP. | |
2270 | */ | |
2271 | static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd, | |
2272 | u16 linitcmd) | |
2273 | { | |
2274 | u64 mod_wd; | |
2275 | struct qib_devdata *dd = ppd->dd; | |
2276 | unsigned long flags; | |
2277 | ||
2278 | if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) { | |
2279 | /* | |
2280 | * If we are told to disable, note that so link-recovery | |
2281 | * code does not attempt to bring us back up. | |
2282 | * Also reset everything that we can, so we start | |
2283 | * completely clean when re-enabled (before we | |
2284 | * actually issue the disable to the IBC) | |
2285 | */ | |
2286 | qib_7322_mini_pcs_reset(ppd); | |
2287 | spin_lock_irqsave(&ppd->lflags_lock, flags); | |
2288 | ppd->lflags |= QIBL_IB_LINK_DISABLED; | |
2289 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | |
2290 | } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) { | |
2291 | /* | |
2292 | * Any other linkinitcmd will lead to LINKDOWN and then | |
2293 | * to INIT (if all is well), so clear flag to let | |
2294 | * link-recovery code attempt to bring us back up. | |
2295 | */ | |
2296 | spin_lock_irqsave(&ppd->lflags_lock, flags); | |
2297 | ppd->lflags &= ~QIBL_IB_LINK_DISABLED; | |
2298 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | |
2299 | /* | |
2300 | * Clear status change interrupt reduction so the | |
2301 | * new state is seen. | |
2302 | */ | |
2303 | ppd->cpspec->ibcctrl_a &= | |
2304 | ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn); | |
2305 | } | |
2306 | ||
2307 | mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) | | |
2308 | (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); | |
2309 | ||
2310 | qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a | | |
2311 | mod_wd); | |
2312 | /* write to chip to prevent back-to-back writes of ibc reg */ | |
2313 | qib_write_kreg(dd, kr_scratch, 0); | |
2314 | ||
2315 | } | |
2316 | ||
2317 | /* | |
2318 | * The total RCV buffer memory is 64KB, used for both ports, and is | |
2319 | * in units of 64 bytes (same as IB flow control credit unit). | |
2320 | * The consumedVL unit in the same registers are in 32 byte units! | |
2321 | * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks, | |
2322 | * and we can therefore allocate just 9 IB credits for 2 VL15 packets | |
2323 | * in krp_rxcreditvl15, rather than 10. | |
2324 | */ | |
2325 | #define RCV_BUF_UNITSZ 64 | |
2326 | #define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports)) | |
2327 | ||
2328 | static void set_vls(struct qib_pportdata *ppd) | |
2329 | { | |
2330 | int i, numvls, totcred, cred_vl, vl0extra; | |
2331 | struct qib_devdata *dd = ppd->dd; | |
2332 | u64 val; | |
2333 | ||
2334 | numvls = qib_num_vls(ppd->vls_operational); | |
2335 | ||
2336 | /* | |
2337 | * Set up per-VL credits. Below is kluge based on these assumptions: | |
2338 | * 1) port is disabled at the time early_init is called. | |
2339 | * 2) give VL15 17 credits, for two max-plausible packets. | |
2340 | * 3) Give VL0-N the rest, with any rounding excess used for VL0 | |
2341 | */ | |
2342 | /* 2 VL15 packets @ 288 bytes each (including IB headers) */ | |
2343 | totcred = NUM_RCV_BUF_UNITS(dd); | |
2344 | cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ; | |
2345 | totcred -= cred_vl; | |
2346 | qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl); | |
2347 | cred_vl = totcred / numvls; | |
2348 | vl0extra = totcred - cred_vl * numvls; | |
2349 | qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra); | |
2350 | for (i = 1; i < numvls; i++) | |
2351 | qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl); | |
2352 | for (; i < 8; i++) /* no buffer space for other VLs */ | |
2353 | qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0); | |
2354 | ||
2355 | /* Notify IBC that credits need to be recalculated */ | |
2356 | val = qib_read_kreg_port(ppd, krp_ibsdtestiftx); | |
2357 | val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE); | |
2358 | qib_write_kreg_port(ppd, krp_ibsdtestiftx, val); | |
2359 | qib_write_kreg(dd, kr_scratch, 0ULL); | |
2360 | val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE); | |
2361 | qib_write_kreg_port(ppd, krp_ibsdtestiftx, val); | |
2362 | ||
2363 | for (i = 0; i < numvls; i++) | |
2364 | val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i); | |
2365 | val = qib_read_kreg_port(ppd, krp_rxcreditvl15); | |
2366 | ||
2367 | /* Change the number of operational VLs */ | |
2368 | ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a & | |
2369 | ~SYM_MASK(IBCCtrlA_0, NumVLane)) | | |
2370 | ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane)); | |
2371 | qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); | |
2372 | qib_write_kreg(dd, kr_scratch, 0ULL); | |
2373 | } | |
2374 | ||
2375 | /* | |
2376 | * The code that deals with actual SerDes is in serdes_7322_init(). | |
2377 | * Compared to the code for iba7220, it is minimal. | |
2378 | */ | |
2379 | static int serdes_7322_init(struct qib_pportdata *ppd); | |
2380 | ||
2381 | /** | |
2382 | * qib_7322_bringup_serdes - bring up the serdes | |
2383 | * @ppd: physical port on the qlogic_ib device | |
2384 | */ | |
2385 | static int qib_7322_bringup_serdes(struct qib_pportdata *ppd) | |
2386 | { | |
2387 | struct qib_devdata *dd = ppd->dd; | |
2388 | u64 val, guid, ibc; | |
2389 | unsigned long flags; | |
2390 | int ret = 0; | |
2391 | ||
2392 | /* | |
2393 | * SerDes model not in Pd, but still need to | |
2394 | * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere | |
2395 | * eventually. | |
2396 | */ | |
2397 | /* Put IBC in reset, sends disabled (should be in reset already) */ | |
2398 | ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn); | |
2399 | qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); | |
2400 | qib_write_kreg(dd, kr_scratch, 0ULL); | |
2401 | ||
2f75e12c MM |
2402 | /* ensure previous Tx parameters are not still forced */ |
2403 | qib_write_kreg_port(ppd, krp_tx_deemph_override, | |
2404 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
2405 | reset_tx_deemphasis_override)); | |
2406 | ||
f931551b RC |
2407 | if (qib_compat_ddr_negotiate) { |
2408 | ppd->cpspec->ibdeltainprog = 1; | |
2409 | ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd, | |
2410 | crp_ibsymbolerr); | |
2411 | ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd, | |
2412 | crp_iblinkerrrecov); | |
2413 | } | |
2414 | ||
2415 | /* flowcontrolwatermark is in units of KBytes */ | |
2416 | ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark); | |
2417 | /* | |
2418 | * Flow control is sent this often, even if no changes in | |
2419 | * buffer space occur. Units are 128ns for this chip. | |
2420 | * Set to 3usec. | |
2421 | */ | |
2422 | ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod); | |
2423 | /* max error tolerance */ | |
2424 | ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold); | |
2425 | /* IB credit flow control. */ | |
2426 | ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold); | |
2427 | /* | |
2428 | * set initial max size pkt IBC will send, including ICRC; it's the | |
2429 | * PIO buffer size in dwords, less 1; also see qib_set_mtu() | |
2430 | */ | |
2431 | ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << | |
2432 | SYM_LSB(IBCCtrlA_0, MaxPktLen); | |
2433 | ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */ | |
2434 | ||
f931551b RC |
2435 | /* |
2436 | * Reset the PCS interface to the serdes (and also ibc, which is still | |
2437 | * in reset from above). Writes new value of ibcctrl_a as last step. | |
2438 | */ | |
2439 | qib_7322_mini_pcs_reset(ppd); | |
f931551b RC |
2440 | |
2441 | if (!ppd->cpspec->ibcctrl_b) { | |
2442 | unsigned lse = ppd->link_speed_enabled; | |
2443 | ||
2444 | /* | |
2445 | * Not on re-init after reset, establish shadow | |
2446 | * and force initial config. | |
2447 | */ | |
2448 | ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd, | |
2449 | krp_ibcctrl_b); | |
2450 | ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR | | |
2451 | IBA7322_IBC_SPEED_DDR | | |
2452 | IBA7322_IBC_SPEED_SDR | | |
2453 | IBA7322_IBC_WIDTH_AUTONEG | | |
2454 | SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED)); | |
2455 | if (lse & (lse - 1)) /* Muliple speeds enabled */ | |
2456 | ppd->cpspec->ibcctrl_b |= | |
2457 | (lse << IBA7322_IBC_SPEED_LSB) | | |
2458 | IBA7322_IBC_IBTA_1_2_MASK | | |
2459 | IBA7322_IBC_MAX_SPEED_MASK; | |
2460 | else | |
2461 | ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ? | |
2462 | IBA7322_IBC_SPEED_QDR | | |
2463 | IBA7322_IBC_IBTA_1_2_MASK : | |
2464 | (lse == QIB_IB_DDR) ? | |
2465 | IBA7322_IBC_SPEED_DDR : | |
2466 | IBA7322_IBC_SPEED_SDR; | |
2467 | if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) == | |
2468 | (IB_WIDTH_1X | IB_WIDTH_4X)) | |
2469 | ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG; | |
2470 | else | |
2471 | ppd->cpspec->ibcctrl_b |= | |
2472 | ppd->link_width_enabled == IB_WIDTH_4X ? | |
2473 | IBA7322_IBC_WIDTH_4X_ONLY : | |
2474 | IBA7322_IBC_WIDTH_1X_ONLY; | |
2475 | ||
2476 | /* always enable these on driver reload, not sticky */ | |
2477 | ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK | | |
2478 | IBA7322_IBC_HRTBT_MASK); | |
2479 | } | |
2480 | qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b); | |
2481 | ||
2482 | /* setup so we have more time at CFGTEST to change H1 */ | |
2483 | val = qib_read_kreg_port(ppd, krp_ibcctrl_c); | |
2484 | val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH); | |
2485 | val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH); | |
2486 | qib_write_kreg_port(ppd, krp_ibcctrl_c, val); | |
2487 | ||
2488 | serdes_7322_init(ppd); | |
2489 | ||
2490 | guid = be64_to_cpu(ppd->guid); | |
2491 | if (!guid) { | |
2492 | if (dd->base_guid) | |
2493 | guid = be64_to_cpu(dd->base_guid) + ppd->port - 1; | |
2494 | ppd->guid = cpu_to_be64(guid); | |
2495 | } | |
2496 | ||
2497 | qib_write_kreg_port(ppd, krp_hrtbt_guid, guid); | |
2498 | /* write to chip to prevent back-to-back writes of ibc reg */ | |
2499 | qib_write_kreg(dd, kr_scratch, 0); | |
2500 | ||
2501 | /* Enable port */ | |
2502 | ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn); | |
2503 | set_vls(ppd); | |
2504 | ||
8ee887d7 MM |
2505 | /* initially come up DISABLED, without sending anything. */ |
2506 | val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE << | |
2507 | QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); | |
2508 | qib_write_kreg_port(ppd, krp_ibcctrl_a, val); | |
2509 | qib_write_kreg(dd, kr_scratch, 0ULL); | |
2510 | /* clear the linkinit cmds */ | |
2511 | ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd); | |
2512 | ||
f931551b RC |
2513 | /* be paranoid against later code motion, etc. */ |
2514 | spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); | |
2515 | ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable); | |
2516 | qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl); | |
2517 | spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); | |
2518 | ||
2519 | /* Also enable IBSTATUSCHG interrupt. */ | |
2520 | val = qib_read_kreg_port(ppd, krp_errmask); | |
2521 | qib_write_kreg_port(ppd, krp_errmask, | |
2522 | val | ERR_MASK_N(IBStatusChanged)); | |
2523 | ||
2524 | /* Always zero until we start messing with SerDes for real */ | |
2525 | return ret; | |
2526 | } | |
2527 | ||
2528 | /** | |
2529 | * qib_7322_quiet_serdes - set serdes to txidle | |
2530 | * @dd: the qlogic_ib device | |
2531 | * Called when driver is being unloaded | |
2532 | */ | |
2533 | static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd) | |
2534 | { | |
2535 | u64 val; | |
2536 | unsigned long flags; | |
2537 | ||
2538 | qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); | |
2539 | ||
2540 | spin_lock_irqsave(&ppd->lflags_lock, flags); | |
2541 | ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; | |
2542 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | |
2543 | wake_up(&ppd->cpspec->autoneg_wait); | |
f0626710 | 2544 | cancel_delayed_work_sync(&ppd->cpspec->autoneg_work); |
f931551b | 2545 | if (ppd->dd->cspec->r1) |
f0626710 | 2546 | cancel_delayed_work_sync(&ppd->cpspec->ipg_work); |
f931551b RC |
2547 | |
2548 | ppd->cpspec->chase_end = 0; | |
2549 | if (ppd->cpspec->chase_timer.data) /* if initted */ | |
2550 | del_timer_sync(&ppd->cpspec->chase_timer); | |
2551 | ||
2552 | /* | |
2553 | * Despite the name, actually disables IBC as well. Do it when | |
2554 | * we are as sure as possible that no more packets can be | |
2555 | * received, following the down and the PCS reset. | |
2556 | * The actual disabling happens in qib_7322_mini_pci_reset(), | |
2557 | * along with the PCS being reset. | |
2558 | */ | |
2559 | ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn); | |
2560 | qib_7322_mini_pcs_reset(ppd); | |
2561 | ||
2562 | /* | |
2563 | * Update the adjusted counters so the adjustment persists | |
2564 | * across driver reload. | |
2565 | */ | |
2566 | if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta || | |
2567 | ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) { | |
2568 | struct qib_devdata *dd = ppd->dd; | |
2569 | u64 diagc; | |
2570 | ||
2571 | /* enable counter writes */ | |
2572 | diagc = qib_read_kreg64(dd, kr_hwdiagctrl); | |
2573 | qib_write_kreg(dd, kr_hwdiagctrl, | |
2574 | diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable)); | |
2575 | ||
2576 | if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) { | |
2577 | val = read_7322_creg32_port(ppd, crp_ibsymbolerr); | |
2578 | if (ppd->cpspec->ibdeltainprog) | |
2579 | val -= val - ppd->cpspec->ibsymsnap; | |
2580 | val -= ppd->cpspec->ibsymdelta; | |
2581 | write_7322_creg_port(ppd, crp_ibsymbolerr, val); | |
2582 | } | |
2583 | if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) { | |
2584 | val = read_7322_creg32_port(ppd, crp_iblinkerrrecov); | |
2585 | if (ppd->cpspec->ibdeltainprog) | |
2586 | val -= val - ppd->cpspec->iblnkerrsnap; | |
2587 | val -= ppd->cpspec->iblnkerrdelta; | |
2588 | write_7322_creg_port(ppd, crp_iblinkerrrecov, val); | |
2589 | } | |
2590 | if (ppd->cpspec->iblnkdowndelta) { | |
2591 | val = read_7322_creg32_port(ppd, crp_iblinkdown); | |
2592 | val += ppd->cpspec->iblnkdowndelta; | |
2593 | write_7322_creg_port(ppd, crp_iblinkdown, val); | |
2594 | } | |
2595 | /* | |
2596 | * No need to save ibmalfdelta since IB perfcounters | |
2597 | * are cleared on driver reload. | |
2598 | */ | |
2599 | ||
2600 | /* and disable counter writes */ | |
2601 | qib_write_kreg(dd, kr_hwdiagctrl, diagc); | |
2602 | } | |
2603 | } | |
2604 | ||
2605 | /** | |
2606 | * qib_setup_7322_setextled - set the state of the two external LEDs | |
2607 | * @ppd: physical port on the qlogic_ib device | |
2608 | * @on: whether the link is up or not | |
2609 | * | |
2610 | * The exact combo of LEDs if on is true is determined by looking | |
2611 | * at the ibcstatus. | |
2612 | * | |
2613 | * These LEDs indicate the physical and logical state of IB link. | |
2614 | * For this chip (at least with recommended board pinouts), LED1 | |
2615 | * is Yellow (logical state) and LED2 is Green (physical state), | |
2616 | * | |
2617 | * Note: We try to match the Mellanox HCA LED behavior as best | |
2618 | * we can. Green indicates physical link state is OK (something is | |
2619 | * plugged in, and we can train). | |
2620 | * Amber indicates the link is logically up (ACTIVE). | |
2621 | * Mellanox further blinks the amber LED to indicate data packet | |
2622 | * activity, but we have no hardware support for that, so it would | |
2623 | * require waking up every 10-20 msecs and checking the counters | |
2624 | * on the chip, and then turning the LED off if appropriate. That's | |
2625 | * visible overhead, so not something we will do. | |
2626 | */ | |
2627 | static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on) | |
2628 | { | |
2629 | struct qib_devdata *dd = ppd->dd; | |
2630 | u64 extctl, ledblink = 0, val; | |
2631 | unsigned long flags; | |
2632 | int yel, grn; | |
2633 | ||
2634 | /* | |
2635 | * The diags use the LED to indicate diag info, so we leave | |
2636 | * the external LED alone when the diags are running. | |
2637 | */ | |
2638 | if (dd->diag_client) | |
2639 | return; | |
2640 | ||
2641 | /* Allow override of LED display for, e.g. Locating system in rack */ | |
2642 | if (ppd->led_override) { | |
2643 | grn = (ppd->led_override & QIB_LED_PHYS); | |
2644 | yel = (ppd->led_override & QIB_LED_LOG); | |
2645 | } else if (on) { | |
2646 | val = qib_read_kreg_port(ppd, krp_ibcstatus_a); | |
2647 | grn = qib_7322_phys_portstate(val) == | |
2648 | IB_PHYSPORTSTATE_LINKUP; | |
2649 | yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE; | |
2650 | } else { | |
2651 | grn = 0; | |
2652 | yel = 0; | |
2653 | } | |
2654 | ||
2655 | spin_lock_irqsave(&dd->cspec->gpio_lock, flags); | |
2656 | extctl = dd->cspec->extctrl & (ppd->port == 1 ? | |
2657 | ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK); | |
2658 | if (grn) { | |
2659 | extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN; | |
2660 | /* | |
2661 | * Counts are in chip clock (4ns) periods. | |
2662 | * This is 1/16 sec (66.6ms) on, | |
2663 | * 3/16 sec (187.5 ms) off, with packets rcvd. | |
2664 | */ | |
2665 | ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) | | |
2666 | ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT); | |
2667 | } | |
2668 | if (yel) | |
2669 | extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL; | |
2670 | dd->cspec->extctrl = extctl; | |
2671 | qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); | |
2672 | spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); | |
2673 | ||
2674 | if (ledblink) /* blink the LED on packet receive */ | |
2675 | qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink); | |
2676 | } | |
2677 | ||
8469ba39 MM |
2678 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
2679 | ||
2680 | static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event) | |
2681 | { | |
2682 | switch (event) { | |
2683 | case DCA_PROVIDER_ADD: | |
2684 | if (dd->flags & QIB_DCA_ENABLED) | |
2685 | break; | |
2686 | if (!dca_add_requester(&dd->pcidev->dev)) { | |
2687 | qib_devinfo(dd->pcidev, "DCA enabled\n"); | |
2688 | dd->flags |= QIB_DCA_ENABLED; | |
2689 | qib_setup_dca(dd); | |
2690 | } | |
2691 | break; | |
2692 | case DCA_PROVIDER_REMOVE: | |
2693 | if (dd->flags & QIB_DCA_ENABLED) { | |
2694 | dca_remove_requester(&dd->pcidev->dev); | |
2695 | dd->flags &= ~QIB_DCA_ENABLED; | |
2696 | dd->cspec->dca_ctrl = 0; | |
2697 | qib_write_kreg(dd, KREG_IDX(DCACtrlA), | |
2698 | dd->cspec->dca_ctrl); | |
2699 | } | |
2700 | break; | |
2701 | } | |
2702 | return 0; | |
2703 | } | |
2704 | ||
2705 | static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu) | |
2706 | { | |
2707 | struct qib_devdata *dd = rcd->dd; | |
2708 | struct qib_chip_specific *cspec = dd->cspec; | |
2709 | ||
2710 | if (!(dd->flags & QIB_DCA_ENABLED)) | |
2711 | return; | |
2712 | if (cspec->rhdr_cpu[rcd->ctxt] != cpu) { | |
2713 | const struct dca_reg_map *rmp; | |
2714 | ||
2715 | cspec->rhdr_cpu[rcd->ctxt] = cpu; | |
2716 | rmp = &dca_rcvhdr_reg_map[rcd->ctxt]; | |
2717 | cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask; | |
2718 | cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |= | |
2719 | (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb; | |
2720 | qib_devinfo(dd->pcidev, | |
2721 | "Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu, | |
2722 | (long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]); | |
2723 | qib_write_kreg(dd, rmp->regno, | |
2724 | cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]); | |
2725 | cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable); | |
2726 | qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl); | |
2727 | } | |
2728 | } | |
2729 | ||
2730 | static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu) | |
2731 | { | |
2732 | struct qib_devdata *dd = ppd->dd; | |
2733 | struct qib_chip_specific *cspec = dd->cspec; | |
2734 | unsigned pidx = ppd->port - 1; | |
2735 | ||
2736 | if (!(dd->flags & QIB_DCA_ENABLED)) | |
2737 | return; | |
2738 | if (cspec->sdma_cpu[pidx] != cpu) { | |
2739 | cspec->sdma_cpu[pidx] = cpu; | |
2740 | cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ? | |
2741 | SYM_MASK(DCACtrlF, SendDma1DCAOPH) : | |
2742 | SYM_MASK(DCACtrlF, SendDma0DCAOPH)); | |
2743 | cspec->dca_rcvhdr_ctrl[4] |= | |
2744 | (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << | |
2745 | (ppd->hw_pidx ? | |
2746 | SYM_LSB(DCACtrlF, SendDma1DCAOPH) : | |
2747 | SYM_LSB(DCACtrlF, SendDma0DCAOPH)); | |
2748 | qib_devinfo(dd->pcidev, | |
2749 | "sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu, | |
2750 | (long long) cspec->dca_rcvhdr_ctrl[4]); | |
2751 | qib_write_kreg(dd, KREG_IDX(DCACtrlF), | |
2752 | cspec->dca_rcvhdr_ctrl[4]); | |
2753 | cspec->dca_ctrl |= ppd->hw_pidx ? | |
2754 | SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) : | |
2755 | SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable); | |
2756 | qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl); | |
2757 | } | |
2758 | } | |
2759 | ||
2760 | static void qib_setup_dca(struct qib_devdata *dd) | |
2761 | { | |
2762 | struct qib_chip_specific *cspec = dd->cspec; | |
2763 | int i; | |
2764 | ||
2765 | for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++) | |
2766 | cspec->rhdr_cpu[i] = -1; | |
2767 | for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++) | |
2768 | cspec->sdma_cpu[i] = -1; | |
2769 | cspec->dca_rcvhdr_ctrl[0] = | |
2770 | (1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) | | |
2771 | (1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) | | |
2772 | (1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) | | |
2773 | (1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt)); | |
2774 | cspec->dca_rcvhdr_ctrl[1] = | |
2775 | (1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) | | |
2776 | (1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) | | |
2777 | (1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) | | |
2778 | (1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt)); | |
2779 | cspec->dca_rcvhdr_ctrl[2] = | |
2780 | (1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) | | |
2781 | (1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) | | |
2782 | (1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) | | |
2783 | (1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt)); | |
2784 | cspec->dca_rcvhdr_ctrl[3] = | |
2785 | (1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) | | |
2786 | (1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) | | |
2787 | (1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) | | |
2788 | (1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt)); | |
2789 | cspec->dca_rcvhdr_ctrl[4] = | |
2790 | (1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) | | |
2791 | (1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt)); | |
2792 | for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++) | |
2793 | qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i, | |
2794 | cspec->dca_rcvhdr_ctrl[i]); | |
2795 | for (i = 0; i < cspec->num_msix_entries; i++) | |
2796 | setup_dca_notifier(dd, &cspec->msix_entries[i]); | |
2797 | } | |
2798 | ||
2799 | static void qib_irq_notifier_notify(struct irq_affinity_notify *notify, | |
2800 | const cpumask_t *mask) | |
2801 | { | |
2802 | struct qib_irq_notify *n = | |
2803 | container_of(notify, struct qib_irq_notify, notify); | |
2804 | int cpu = cpumask_first(mask); | |
2805 | ||
2806 | if (n->rcv) { | |
2807 | struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg; | |
da12c1f6 | 2808 | |
8469ba39 MM |
2809 | qib_update_rhdrq_dca(rcd, cpu); |
2810 | } else { | |
2811 | struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg; | |
da12c1f6 | 2812 | |
8469ba39 MM |
2813 | qib_update_sdma_dca(ppd, cpu); |
2814 | } | |
2815 | } | |
2816 | ||
2817 | static void qib_irq_notifier_release(struct kref *ref) | |
2818 | { | |
2819 | struct qib_irq_notify *n = | |
2820 | container_of(ref, struct qib_irq_notify, notify.kref); | |
2821 | struct qib_devdata *dd; | |
2822 | ||
2823 | if (n->rcv) { | |
2824 | struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg; | |
da12c1f6 | 2825 | |
8469ba39 MM |
2826 | dd = rcd->dd; |
2827 | } else { | |
2828 | struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg; | |
da12c1f6 | 2829 | |
8469ba39 MM |
2830 | dd = ppd->dd; |
2831 | } | |
2832 | qib_devinfo(dd->pcidev, | |
2833 | "release on HCA notify 0x%p n 0x%p\n", ref, n); | |
2834 | kfree(n); | |
2835 | } | |
2836 | #endif | |
2837 | ||
f931551b RC |
2838 | /* |
2839 | * Disable MSIx interrupt if enabled, call generic MSIx code | |
2840 | * to cleanup, and clear pending MSIx interrupts. | |
2841 | * Used for fallback to INTx, after reset, and when MSIx setup fails. | |
2842 | */ | |
2843 | static void qib_7322_nomsix(struct qib_devdata *dd) | |
2844 | { | |
2845 | u64 intgranted; | |
2846 | int n; | |
2847 | ||
2848 | dd->cspec->main_int_mask = ~0ULL; | |
2849 | n = dd->cspec->num_msix_entries; | |
2850 | if (n) { | |
2851 | int i; | |
2852 | ||
2853 | dd->cspec->num_msix_entries = 0; | |
a778f3fd | 2854 | for (i = 0; i < n; i++) { |
8469ba39 MM |
2855 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
2856 | reset_dca_notifier(dd, &dd->cspec->msix_entries[i]); | |
2857 | #endif | |
a778f3fd MM |
2858 | irq_set_affinity_hint( |
2859 | dd->cspec->msix_entries[i].msix.vector, NULL); | |
2860 | free_cpumask_var(dd->cspec->msix_entries[i].mask); | |
2861 | free_irq(dd->cspec->msix_entries[i].msix.vector, | |
2862 | dd->cspec->msix_entries[i].arg); | |
2863 | } | |
f931551b RC |
2864 | qib_nomsix(dd); |
2865 | } | |
2866 | /* make sure no MSIx interrupts are left pending */ | |
2867 | intgranted = qib_read_kreg64(dd, kr_intgranted); | |
2868 | if (intgranted) | |
2869 | qib_write_kreg(dd, kr_intgranted, intgranted); | |
2870 | } | |
2871 | ||
2872 | static void qib_7322_free_irq(struct qib_devdata *dd) | |
2873 | { | |
2874 | if (dd->cspec->irq) { | |
2875 | free_irq(dd->cspec->irq, dd); | |
2876 | dd->cspec->irq = 0; | |
2877 | } | |
2878 | qib_7322_nomsix(dd); | |
2879 | } | |
2880 | ||
2881 | static void qib_setup_7322_cleanup(struct qib_devdata *dd) | |
2882 | { | |
2883 | int i; | |
2884 | ||
8469ba39 MM |
2885 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
2886 | if (dd->flags & QIB_DCA_ENABLED) { | |
2887 | dca_remove_requester(&dd->pcidev->dev); | |
2888 | dd->flags &= ~QIB_DCA_ENABLED; | |
2889 | dd->cspec->dca_ctrl = 0; | |
2890 | qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl); | |
2891 | } | |
2892 | #endif | |
2893 | ||
f931551b RC |
2894 | qib_7322_free_irq(dd); |
2895 | kfree(dd->cspec->cntrs); | |
2896 | kfree(dd->cspec->sendchkenable); | |
2897 | kfree(dd->cspec->sendgrhchk); | |
2898 | kfree(dd->cspec->sendibchk); | |
2899 | kfree(dd->cspec->msix_entries); | |
f931551b RC |
2900 | for (i = 0; i < dd->num_pports; i++) { |
2901 | unsigned long flags; | |
2902 | u32 mask = QSFP_GPIO_MOD_PRS_N | | |
2903 | (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT); | |
2904 | ||
2905 | kfree(dd->pport[i].cpspec->portcntrs); | |
2906 | if (dd->flags & QIB_HAS_QSFP) { | |
2907 | spin_lock_irqsave(&dd->cspec->gpio_lock, flags); | |
2908 | dd->cspec->gpio_mask &= ~mask; | |
2909 | qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); | |
2910 | spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); | |
2911 | qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data); | |
2912 | } | |
2913 | if (dd->pport[i].ibport_data.smi_ah) | |
2914 | ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah); | |
2915 | } | |
2916 | } | |
2917 | ||
2918 | /* handle SDMA interrupts */ | |
2919 | static void sdma_7322_intr(struct qib_devdata *dd, u64 istat) | |
2920 | { | |
2921 | struct qib_pportdata *ppd0 = &dd->pport[0]; | |
2922 | struct qib_pportdata *ppd1 = &dd->pport[1]; | |
2923 | u64 intr0 = istat & (INT_MASK_P(SDma, 0) | | |
2924 | INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0)); | |
2925 | u64 intr1 = istat & (INT_MASK_P(SDma, 1) | | |
2926 | INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1)); | |
2927 | ||
2928 | if (intr0) | |
2929 | qib_sdma_intr(ppd0); | |
2930 | if (intr1) | |
2931 | qib_sdma_intr(ppd1); | |
2932 | ||
2933 | if (istat & INT_MASK_PM(SDmaCleanupDone, 0)) | |
2934 | qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started); | |
2935 | if (istat & INT_MASK_PM(SDmaCleanupDone, 1)) | |
2936 | qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started); | |
2937 | } | |
2938 | ||
2939 | /* | |
2940 | * Set or clear the Send buffer available interrupt enable bit. | |
2941 | */ | |
2942 | static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint) | |
2943 | { | |
2944 | unsigned long flags; | |
2945 | ||
2946 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | |
2947 | if (needint) | |
2948 | dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail); | |
2949 | else | |
2950 | dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail); | |
2951 | qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); | |
2952 | qib_write_kreg(dd, kr_scratch, 0ULL); | |
2953 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | |
2954 | } | |
2955 | ||
2956 | /* | |
2957 | * Somehow got an interrupt with reserved bits set in interrupt status. | |
2958 | * Print a message so we know it happened, then clear them. | |
2959 | * keep mainline interrupt handler cache-friendly | |
2960 | */ | |
2961 | static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat) | |
2962 | { | |
2963 | u64 kills; | |
2964 | char msg[128]; | |
2965 | ||
2966 | kills = istat & ~QIB_I_BITSEXTANT; | |
7fac3301 MM |
2967 | qib_dev_err(dd, |
2968 | "Clearing reserved interrupt(s) 0x%016llx: %s\n", | |
2969 | (unsigned long long) kills, msg); | |
f931551b RC |
2970 | qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills)); |
2971 | } | |
2972 | ||
2973 | /* keep mainline interrupt handler cache-friendly */ | |
2974 | static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd) | |
2975 | { | |
2976 | u32 gpiostatus; | |
2977 | int handled = 0; | |
2978 | int pidx; | |
2979 | ||
2980 | /* | |
2981 | * Boards for this chip currently don't use GPIO interrupts, | |
2982 | * so clear by writing GPIOstatus to GPIOclear, and complain | |
2983 | * to developer. To avoid endless repeats, clear | |
2984 | * the bits in the mask, since there is some kind of | |
2985 | * programming error or chip problem. | |
2986 | */ | |
2987 | gpiostatus = qib_read_kreg32(dd, kr_gpio_status); | |
2988 | /* | |
2989 | * In theory, writing GPIOstatus to GPIOclear could | |
2990 | * have a bad side-effect on some diagnostic that wanted | |
2991 | * to poll for a status-change, but the various shadows | |
2992 | * make that problematic at best. Diags will just suppress | |
2993 | * all GPIO interrupts during such tests. | |
2994 | */ | |
2995 | qib_write_kreg(dd, kr_gpio_clear, gpiostatus); | |
2996 | /* | |
2997 | * Check for QSFP MOD_PRS changes | |
2998 | * only works for single port if IB1 != pidx1 | |
2999 | */ | |
3000 | for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP); | |
3001 | ++pidx) { | |
3002 | struct qib_pportdata *ppd; | |
3003 | struct qib_qsfp_data *qd; | |
3004 | u32 mask; | |
da12c1f6 | 3005 | |
f931551b RC |
3006 | if (!dd->pport[pidx].link_speed_supported) |
3007 | continue; | |
3008 | mask = QSFP_GPIO_MOD_PRS_N; | |
3009 | ppd = dd->pport + pidx; | |
3010 | mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx); | |
3011 | if (gpiostatus & dd->cspec->gpio_mask & mask) { | |
3012 | u64 pins; | |
da12c1f6 | 3013 | |
f931551b RC |
3014 | qd = &ppd->cpspec->qsfp_data; |
3015 | gpiostatus &= ~mask; | |
3016 | pins = qib_read_kreg64(dd, kr_extstatus); | |
3017 | pins >>= SYM_LSB(EXTStatus, GPIOIn); | |
3018 | if (!(pins & mask)) { | |
3019 | ++handled; | |
8482d5d1 | 3020 | qd->t_insert = jiffies; |
f0626710 | 3021 | queue_work(ib_wq, &qd->work); |
f931551b RC |
3022 | } |
3023 | } | |
3024 | } | |
3025 | ||
3026 | if (gpiostatus && !handled) { | |
3027 | const u32 mask = qib_read_kreg32(dd, kr_gpio_mask); | |
3028 | u32 gpio_irq = mask & gpiostatus; | |
3029 | ||
3030 | /* | |
3031 | * Clear any troublemakers, and update chip from shadow | |
3032 | */ | |
3033 | dd->cspec->gpio_mask &= ~gpio_irq; | |
3034 | qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); | |
3035 | } | |
3036 | } | |
3037 | ||
3038 | /* | |
3039 | * Handle errors and unusual events first, separate function | |
3040 | * to improve cache hits for fast path interrupt handling. | |
3041 | */ | |
3042 | static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat) | |
3043 | { | |
3044 | if (istat & ~QIB_I_BITSEXTANT) | |
3045 | unknown_7322_ibits(dd, istat); | |
3046 | if (istat & QIB_I_GPIO) | |
3047 | unknown_7322_gpio_intr(dd); | |
e67306a3 MM |
3048 | if (istat & QIB_I_C_ERROR) { |
3049 | qib_write_kreg(dd, kr_errmask, 0ULL); | |
3050 | tasklet_schedule(&dd->error_tasklet); | |
3051 | } | |
f931551b RC |
3052 | if (istat & INT_MASK_P(Err, 0) && dd->rcd[0]) |
3053 | handle_7322_p_errors(dd->rcd[0]->ppd); | |
3054 | if (istat & INT_MASK_P(Err, 1) && dd->rcd[1]) | |
3055 | handle_7322_p_errors(dd->rcd[1]->ppd); | |
3056 | } | |
3057 | ||
3058 | /* | |
3059 | * Dynamically adjust the rcv int timeout for a context based on incoming | |
3060 | * packet rate. | |
3061 | */ | |
3062 | static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts) | |
3063 | { | |
3064 | struct qib_devdata *dd = rcd->dd; | |
3065 | u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt]; | |
3066 | ||
3067 | /* | |
3068 | * Dynamically adjust idle timeout on chip | |
3069 | * based on number of packets processed. | |
3070 | */ | |
3071 | if (npkts < rcv_int_count && timeout > 2) | |
3072 | timeout >>= 1; | |
3073 | else if (npkts >= rcv_int_count && timeout < rcv_int_timeout) | |
3074 | timeout = min(timeout << 1, rcv_int_timeout); | |
3075 | else | |
3076 | return; | |
3077 | ||
3078 | dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout; | |
3079 | qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout); | |
3080 | } | |
3081 | ||
3082 | /* | |
3083 | * This is the main interrupt handler. | |
3084 | * It will normally only be used for low frequency interrupts but may | |
3085 | * have to handle all interrupts if INTx is enabled or fewer than normal | |
3086 | * MSIx interrupts were allocated. | |
3087 | * This routine should ignore the interrupt bits for any of the | |
3088 | * dedicated MSIx handlers. | |
3089 | */ | |
3090 | static irqreturn_t qib_7322intr(int irq, void *data) | |
3091 | { | |
3092 | struct qib_devdata *dd = data; | |
3093 | irqreturn_t ret; | |
3094 | u64 istat; | |
3095 | u64 ctxtrbits; | |
3096 | u64 rmask; | |
3097 | unsigned i; | |
3098 | u32 npkts; | |
3099 | ||
3100 | if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) { | |
3101 | /* | |
3102 | * This return value is not great, but we do not want the | |
3103 | * interrupt core code to remove our interrupt handler | |
3104 | * because we don't appear to be handling an interrupt | |
3105 | * during a chip reset. | |
3106 | */ | |
3107 | ret = IRQ_HANDLED; | |
3108 | goto bail; | |
3109 | } | |
3110 | ||
3111 | istat = qib_read_kreg64(dd, kr_intstatus); | |
3112 | ||
3113 | if (unlikely(istat == ~0ULL)) { | |
3114 | qib_bad_intrstatus(dd); | |
3115 | qib_dev_err(dd, "Interrupt status all f's, skipping\n"); | |
3116 | /* don't know if it was our interrupt or not */ | |
3117 | ret = IRQ_NONE; | |
3118 | goto bail; | |
3119 | } | |
3120 | ||
3121 | istat &= dd->cspec->main_int_mask; | |
3122 | if (unlikely(!istat)) { | |
3123 | /* already handled, or shared and not us */ | |
3124 | ret = IRQ_NONE; | |
3125 | goto bail; | |
3126 | } | |
3127 | ||
1ed88dd7 | 3128 | this_cpu_inc(*dd->int_counter); |
f931551b RC |
3129 | |
3130 | /* handle "errors" of various kinds first, device ahead of port */ | |
3131 | if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO | | |
3132 | QIB_I_C_ERROR | INT_MASK_P(Err, 0) | | |
3133 | INT_MASK_P(Err, 1)))) | |
3134 | unlikely_7322_intr(dd, istat); | |
3135 | ||
3136 | /* | |
3137 | * Clear the interrupt bits we found set, relatively early, so we | |
3138 | * "know" know the chip will have seen this by the time we process | |
3139 | * the queue, and will re-interrupt if necessary. The processor | |
3140 | * itself won't take the interrupt again until we return. | |
3141 | */ | |
3142 | qib_write_kreg(dd, kr_intclear, istat); | |
3143 | ||
3144 | /* | |
3145 | * Handle kernel receive queues before checking for pio buffers | |
3146 | * available since receives can overflow; piobuf waiters can afford | |
3147 | * a few extra cycles, since they were waiting anyway. | |
3148 | */ | |
3149 | ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK); | |
3150 | if (ctxtrbits) { | |
3151 | rmask = (1ULL << QIB_I_RCVAVAIL_LSB) | | |
3152 | (1ULL << QIB_I_RCVURG_LSB); | |
3153 | for (i = 0; i < dd->first_user_ctxt; i++) { | |
3154 | if (ctxtrbits & rmask) { | |
3155 | ctxtrbits &= ~rmask; | |
44d75d3d | 3156 | if (dd->rcd[i]) |
f931551b | 3157 | qib_kreceive(dd->rcd[i], NULL, &npkts); |
f931551b RC |
3158 | } |
3159 | rmask <<= 1; | |
3160 | } | |
3161 | if (ctxtrbits) { | |
3162 | ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) | | |
3163 | (ctxtrbits >> QIB_I_RCVURG_LSB); | |
3164 | qib_handle_urcv(dd, ctxtrbits); | |
3165 | } | |
3166 | } | |
3167 | ||
3168 | if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1))) | |
3169 | sdma_7322_intr(dd, istat); | |
3170 | ||
3171 | if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED)) | |
3172 | qib_ib_piobufavail(dd); | |
3173 | ||
3174 | ret = IRQ_HANDLED; | |
3175 | bail: | |
3176 | return ret; | |
3177 | } | |
3178 | ||
3179 | /* | |
3180 | * Dedicated receive packet available interrupt handler. | |
3181 | */ | |
3182 | static irqreturn_t qib_7322pintr(int irq, void *data) | |
3183 | { | |
3184 | struct qib_ctxtdata *rcd = data; | |
3185 | struct qib_devdata *dd = rcd->dd; | |
3186 | u32 npkts; | |
3187 | ||
3188 | if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) | |
3189 | /* | |
3190 | * This return value is not great, but we do not want the | |
3191 | * interrupt core code to remove our interrupt handler | |
3192 | * because we don't appear to be handling an interrupt | |
3193 | * during a chip reset. | |
3194 | */ | |
3195 | return IRQ_HANDLED; | |
3196 | ||
1ed88dd7 | 3197 | this_cpu_inc(*dd->int_counter); |
f931551b | 3198 | |
f931551b RC |
3199 | /* Clear the interrupt bit we expect to be set. */ |
3200 | qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) | | |
3201 | (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt); | |
3202 | ||
3203 | qib_kreceive(rcd, NULL, &npkts); | |
f931551b RC |
3204 | |
3205 | return IRQ_HANDLED; | |
3206 | } | |
3207 | ||
3208 | /* | |
3209 | * Dedicated Send buffer available interrupt handler. | |
3210 | */ | |
3211 | static irqreturn_t qib_7322bufavail(int irq, void *data) | |
3212 | { | |
3213 | struct qib_devdata *dd = data; | |
3214 | ||
3215 | if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) | |
3216 | /* | |
3217 | * This return value is not great, but we do not want the | |
3218 | * interrupt core code to remove our interrupt handler | |
3219 | * because we don't appear to be handling an interrupt | |
3220 | * during a chip reset. | |
3221 | */ | |
3222 | return IRQ_HANDLED; | |
3223 | ||
1ed88dd7 | 3224 | this_cpu_inc(*dd->int_counter); |
f931551b RC |
3225 | |
3226 | /* Clear the interrupt bit we expect to be set. */ | |
3227 | qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL); | |
3228 | ||
3229 | /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */ | |
3230 | if (dd->flags & QIB_INITTED) | |
3231 | qib_ib_piobufavail(dd); | |
3232 | else | |
3233 | qib_wantpiobuf_7322_intr(dd, 0); | |
3234 | ||
3235 | return IRQ_HANDLED; | |
3236 | } | |
3237 | ||
3238 | /* | |
3239 | * Dedicated Send DMA interrupt handler. | |
3240 | */ | |
3241 | static irqreturn_t sdma_intr(int irq, void *data) | |
3242 | { | |
3243 | struct qib_pportdata *ppd = data; | |
3244 | struct qib_devdata *dd = ppd->dd; | |
3245 | ||
3246 | if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) | |
3247 | /* | |
3248 | * This return value is not great, but we do not want the | |
3249 | * interrupt core code to remove our interrupt handler | |
3250 | * because we don't appear to be handling an interrupt | |
3251 | * during a chip reset. | |
3252 | */ | |
3253 | return IRQ_HANDLED; | |
3254 | ||
1ed88dd7 | 3255 | this_cpu_inc(*dd->int_counter); |
f931551b | 3256 | |
f931551b RC |
3257 | /* Clear the interrupt bit we expect to be set. */ |
3258 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? | |
3259 | INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0)); | |
3260 | qib_sdma_intr(ppd); | |
3261 | ||
3262 | return IRQ_HANDLED; | |
3263 | } | |
3264 | ||
3265 | /* | |
3266 | * Dedicated Send DMA idle interrupt handler. | |
3267 | */ | |
3268 | static irqreturn_t sdma_idle_intr(int irq, void *data) | |
3269 | { | |
3270 | struct qib_pportdata *ppd = data; | |
3271 | struct qib_devdata *dd = ppd->dd; | |
3272 | ||
3273 | if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) | |
3274 | /* | |
3275 | * This return value is not great, but we do not want the | |
3276 | * interrupt core code to remove our interrupt handler | |
3277 | * because we don't appear to be handling an interrupt | |
3278 | * during a chip reset. | |
3279 | */ | |
3280 | return IRQ_HANDLED; | |
3281 | ||
1ed88dd7 | 3282 | this_cpu_inc(*dd->int_counter); |
f931551b | 3283 | |
f931551b RC |
3284 | /* Clear the interrupt bit we expect to be set. */ |
3285 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? | |
3286 | INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0)); | |
3287 | qib_sdma_intr(ppd); | |
3288 | ||
3289 | return IRQ_HANDLED; | |
3290 | } | |
3291 | ||
3292 | /* | |
3293 | * Dedicated Send DMA progress interrupt handler. | |
3294 | */ | |
3295 | static irqreturn_t sdma_progress_intr(int irq, void *data) | |
3296 | { | |
3297 | struct qib_pportdata *ppd = data; | |
3298 | struct qib_devdata *dd = ppd->dd; | |
3299 | ||
3300 | if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) | |
3301 | /* | |
3302 | * This return value is not great, but we do not want the | |
3303 | * interrupt core code to remove our interrupt handler | |
3304 | * because we don't appear to be handling an interrupt | |
3305 | * during a chip reset. | |
3306 | */ | |
3307 | return IRQ_HANDLED; | |
3308 | ||
1ed88dd7 | 3309 | this_cpu_inc(*dd->int_counter); |
f931551b | 3310 | |
f931551b RC |
3311 | /* Clear the interrupt bit we expect to be set. */ |
3312 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? | |
3313 | INT_MASK_P(SDmaProgress, 1) : | |
3314 | INT_MASK_P(SDmaProgress, 0)); | |
3315 | qib_sdma_intr(ppd); | |
3316 | ||
3317 | return IRQ_HANDLED; | |
3318 | } | |
3319 | ||
3320 | /* | |
3321 | * Dedicated Send DMA cleanup interrupt handler. | |
3322 | */ | |
3323 | static irqreturn_t sdma_cleanup_intr(int irq, void *data) | |
3324 | { | |
3325 | struct qib_pportdata *ppd = data; | |
3326 | struct qib_devdata *dd = ppd->dd; | |
3327 | ||
3328 | if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) | |
3329 | /* | |
3330 | * This return value is not great, but we do not want the | |
3331 | * interrupt core code to remove our interrupt handler | |
3332 | * because we don't appear to be handling an interrupt | |
3333 | * during a chip reset. | |
3334 | */ | |
3335 | return IRQ_HANDLED; | |
3336 | ||
1ed88dd7 | 3337 | this_cpu_inc(*dd->int_counter); |
f931551b | 3338 | |
f931551b RC |
3339 | /* Clear the interrupt bit we expect to be set. */ |
3340 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? | |
3341 | INT_MASK_PM(SDmaCleanupDone, 1) : | |
3342 | INT_MASK_PM(SDmaCleanupDone, 0)); | |
3343 | qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started); | |
3344 | ||
3345 | return IRQ_HANDLED; | |
3346 | } | |
3347 | ||
8469ba39 MM |
3348 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
3349 | ||
3350 | static void reset_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m) | |
3351 | { | |
3352 | if (!m->dca) | |
3353 | return; | |
3354 | qib_devinfo(dd->pcidev, | |
3355 | "Disabling notifier on HCA %d irq %d\n", | |
3356 | dd->unit, | |
3357 | m->msix.vector); | |
3358 | irq_set_affinity_notifier( | |
3359 | m->msix.vector, | |
3360 | NULL); | |
3361 | m->notifier = NULL; | |
3362 | } | |
3363 | ||
3364 | static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m) | |
3365 | { | |
3366 | struct qib_irq_notify *n; | |
3367 | ||
3368 | if (!m->dca) | |
3369 | return; | |
3370 | n = kzalloc(sizeof(*n), GFP_KERNEL); | |
3371 | if (n) { | |
3372 | int ret; | |
3373 | ||
3374 | m->notifier = n; | |
3375 | n->notify.irq = m->msix.vector; | |
3376 | n->notify.notify = qib_irq_notifier_notify; | |
3377 | n->notify.release = qib_irq_notifier_release; | |
3378 | n->arg = m->arg; | |
3379 | n->rcv = m->rcv; | |
3380 | qib_devinfo(dd->pcidev, | |
3381 | "set notifier irq %d rcv %d notify %p\n", | |
3382 | n->notify.irq, n->rcv, &n->notify); | |
3383 | ret = irq_set_affinity_notifier( | |
3384 | n->notify.irq, | |
3385 | &n->notify); | |
3386 | if (ret) { | |
3387 | m->notifier = NULL; | |
3388 | kfree(n); | |
3389 | } | |
3390 | } | |
3391 | } | |
3392 | ||
3393 | #endif | |
3394 | ||
f931551b RC |
3395 | /* |
3396 | * Set up our chip-specific interrupt handler. | |
3397 | * The interrupt type has already been setup, so | |
3398 | * we just need to do the registration and error checking. | |
3399 | * If we are using MSIx interrupts, we may fall back to | |
3400 | * INTx later, if the interrupt handler doesn't get called | |
3401 | * within 1/2 second (see verify_interrupt()). | |
3402 | */ | |
3403 | static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend) | |
3404 | { | |
3405 | int ret, i, msixnum; | |
3406 | u64 redirect[6]; | |
3407 | u64 mask; | |
a778f3fd MM |
3408 | const struct cpumask *local_mask; |
3409 | int firstcpu, secondcpu = 0, currrcvcpu = 0; | |
f931551b RC |
3410 | |
3411 | if (!dd->num_pports) | |
3412 | return; | |
3413 | ||
3414 | if (clearpend) { | |
3415 | /* | |
3416 | * if not switching interrupt types, be sure interrupts are | |
3417 | * disabled, and then clear anything pending at this point, | |
3418 | * because we are starting clean. | |
3419 | */ | |
3420 | qib_7322_set_intr_state(dd, 0); | |
3421 | ||
3422 | /* clear the reset error, init error/hwerror mask */ | |
3423 | qib_7322_init_hwerrors(dd); | |
3424 | ||
3425 | /* clear any interrupt bits that might be set */ | |
3426 | qib_write_kreg(dd, kr_intclear, ~0ULL); | |
3427 | ||
3428 | /* make sure no pending MSIx intr, and clear diag reg */ | |
3429 | qib_write_kreg(dd, kr_intgranted, ~0ULL); | |
3430 | qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL); | |
3431 | } | |
3432 | ||
3433 | if (!dd->cspec->num_msix_entries) { | |
3434 | /* Try to get INTx interrupt */ | |
3435 | try_intx: | |
3436 | if (!dd->pcidev->irq) { | |
7fac3301 MM |
3437 | qib_dev_err(dd, |
3438 | "irq is 0, BIOS error? Interrupts won't work\n"); | |
f931551b RC |
3439 | goto bail; |
3440 | } | |
3441 | ret = request_irq(dd->pcidev->irq, qib_7322intr, | |
3442 | IRQF_SHARED, QIB_DRV_NAME, dd); | |
3443 | if (ret) { | |
7fac3301 MM |
3444 | qib_dev_err(dd, |
3445 | "Couldn't setup INTx interrupt (irq=%d): %d\n", | |
3446 | dd->pcidev->irq, ret); | |
f931551b RC |
3447 | goto bail; |
3448 | } | |
3449 | dd->cspec->irq = dd->pcidev->irq; | |
3450 | dd->cspec->main_int_mask = ~0ULL; | |
3451 | goto bail; | |
3452 | } | |
3453 | ||
3454 | /* Try to get MSIx interrupts */ | |
041af0bb | 3455 | memset(redirect, 0, sizeof(redirect)); |
f931551b RC |
3456 | mask = ~0ULL; |
3457 | msixnum = 0; | |
a778f3fd MM |
3458 | local_mask = cpumask_of_pcibus(dd->pcidev->bus); |
3459 | firstcpu = cpumask_first(local_mask); | |
3460 | if (firstcpu >= nr_cpu_ids || | |
3461 | cpumask_weight(local_mask) == num_online_cpus()) { | |
3462 | local_mask = topology_core_cpumask(0); | |
3463 | firstcpu = cpumask_first(local_mask); | |
3464 | } | |
3465 | if (firstcpu < nr_cpu_ids) { | |
3466 | secondcpu = cpumask_next(firstcpu, local_mask); | |
3467 | if (secondcpu >= nr_cpu_ids) | |
3468 | secondcpu = firstcpu; | |
3469 | currrcvcpu = secondcpu; | |
3470 | } | |
f931551b RC |
3471 | for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) { |
3472 | irq_handler_t handler; | |
f931551b RC |
3473 | void *arg; |
3474 | u64 val; | |
3475 | int lsb, reg, sh; | |
8469ba39 MM |
3476 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
3477 | int dca = 0; | |
3478 | #endif | |
f931551b | 3479 | |
a778f3fd MM |
3480 | dd->cspec->msix_entries[msixnum]. |
3481 | name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1] | |
3482 | = '\0'; | |
f931551b RC |
3483 | if (i < ARRAY_SIZE(irq_table)) { |
3484 | if (irq_table[i].port) { | |
3485 | /* skip if for a non-configured port */ | |
3486 | if (irq_table[i].port > dd->num_pports) | |
3487 | continue; | |
3488 | arg = dd->pport + irq_table[i].port - 1; | |
3489 | } else | |
3490 | arg = dd; | |
8469ba39 MM |
3491 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
3492 | dca = irq_table[i].dca; | |
3493 | #endif | |
f931551b RC |
3494 | lsb = irq_table[i].lsb; |
3495 | handler = irq_table[i].handler; | |
a778f3fd MM |
3496 | snprintf(dd->cspec->msix_entries[msixnum].name, |
3497 | sizeof(dd->cspec->msix_entries[msixnum].name) | |
3498 | - 1, | |
3499 | QIB_DRV_NAME "%d%s", dd->unit, | |
3500 | irq_table[i].name); | |
f931551b RC |
3501 | } else { |
3502 | unsigned ctxt; | |
3503 | ||
3504 | ctxt = i - ARRAY_SIZE(irq_table); | |
3505 | /* per krcvq context receive interrupt */ | |
3506 | arg = dd->rcd[ctxt]; | |
3507 | if (!arg) | |
3508 | continue; | |
e67306a3 MM |
3509 | if (qib_krcvq01_no_msi && ctxt < 2) |
3510 | continue; | |
8469ba39 MM |
3511 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
3512 | dca = 1; | |
3513 | #endif | |
f931551b RC |
3514 | lsb = QIB_I_RCVAVAIL_LSB + ctxt; |
3515 | handler = qib_7322pintr; | |
a778f3fd MM |
3516 | snprintf(dd->cspec->msix_entries[msixnum].name, |
3517 | sizeof(dd->cspec->msix_entries[msixnum].name) | |
3518 | - 1, | |
3519 | QIB_DRV_NAME "%d (kctx)", dd->unit); | |
f931551b | 3520 | } |
a778f3fd MM |
3521 | ret = request_irq( |
3522 | dd->cspec->msix_entries[msixnum].msix.vector, | |
3523 | handler, 0, dd->cspec->msix_entries[msixnum].name, | |
3524 | arg); | |
f931551b RC |
3525 | if (ret) { |
3526 | /* | |
3527 | * Shouldn't happen since the enable said we could | |
3528 | * have as many as we are trying to setup here. | |
3529 | */ | |
7fac3301 MM |
3530 | qib_dev_err(dd, |
3531 | "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n", | |
3532 | msixnum, | |
a778f3fd MM |
3533 | dd->cspec->msix_entries[msixnum].msix.vector, |
3534 | ret); | |
f931551b RC |
3535 | qib_7322_nomsix(dd); |
3536 | goto try_intx; | |
3537 | } | |
a778f3fd | 3538 | dd->cspec->msix_entries[msixnum].arg = arg; |
8469ba39 MM |
3539 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
3540 | dd->cspec->msix_entries[msixnum].dca = dca; | |
3541 | dd->cspec->msix_entries[msixnum].rcv = | |
3542 | handler == qib_7322pintr; | |
3543 | #endif | |
f931551b RC |
3544 | if (lsb >= 0) { |
3545 | reg = lsb / IBA7322_REDIRECT_VEC_PER_REG; | |
3546 | sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) * | |
3547 | SYM_LSB(IntRedirect0, vec1); | |
3548 | mask &= ~(1ULL << lsb); | |
3549 | redirect[reg] |= ((u64) msixnum) << sh; | |
3550 | } | |
3551 | val = qib_read_kreg64(dd, 2 * msixnum + 1 + | |
3552 | (QIB_7322_MsixTable_OFFS / sizeof(u64))); | |
a778f3fd MM |
3553 | if (firstcpu < nr_cpu_ids && |
3554 | zalloc_cpumask_var( | |
3555 | &dd->cspec->msix_entries[msixnum].mask, | |
3556 | GFP_KERNEL)) { | |
3557 | if (handler == qib_7322pintr) { | |
3558 | cpumask_set_cpu(currrcvcpu, | |
3559 | dd->cspec->msix_entries[msixnum].mask); | |
3560 | currrcvcpu = cpumask_next(currrcvcpu, | |
3561 | local_mask); | |
3562 | if (currrcvcpu >= nr_cpu_ids) | |
3563 | currrcvcpu = secondcpu; | |
3564 | } else { | |
3565 | cpumask_set_cpu(firstcpu, | |
3566 | dd->cspec->msix_entries[msixnum].mask); | |
3567 | } | |
3568 | irq_set_affinity_hint( | |
3569 | dd->cspec->msix_entries[msixnum].msix.vector, | |
3570 | dd->cspec->msix_entries[msixnum].mask); | |
3571 | } | |
f931551b RC |
3572 | msixnum++; |
3573 | } | |
3574 | /* Initialize the vector mapping */ | |
3575 | for (i = 0; i < ARRAY_SIZE(redirect); i++) | |
3576 | qib_write_kreg(dd, kr_intredirect + i, redirect[i]); | |
3577 | dd->cspec->main_int_mask = mask; | |
e67306a3 MM |
3578 | tasklet_init(&dd->error_tasklet, qib_error_tasklet, |
3579 | (unsigned long)dd); | |
f931551b RC |
3580 | bail:; |
3581 | } | |
3582 | ||
3583 | /** | |
3584 | * qib_7322_boardname - fill in the board name and note features | |
3585 | * @dd: the qlogic_ib device | |
3586 | * | |
3587 | * info will be based on the board revision register | |
3588 | */ | |
3589 | static unsigned qib_7322_boardname(struct qib_devdata *dd) | |
3590 | { | |
3591 | /* Will need enumeration of board-types here */ | |
3592 | char *n; | |
3593 | u32 boardid, namelen; | |
3594 | unsigned features = DUAL_PORT_CAP; | |
3595 | ||
3596 | boardid = SYM_FIELD(dd->revision, Revision, BoardID); | |
3597 | ||
3598 | switch (boardid) { | |
3599 | case 0: | |
3600 | n = "InfiniPath_QLE7342_Emulation"; | |
3601 | break; | |
3602 | case 1: | |
3603 | n = "InfiniPath_QLE7340"; | |
3604 | dd->flags |= QIB_HAS_QSFP; | |
3605 | features = PORT_SPD_CAP; | |
3606 | break; | |
3607 | case 2: | |
3608 | n = "InfiniPath_QLE7342"; | |
3609 | dd->flags |= QIB_HAS_QSFP; | |
3610 | break; | |
3611 | case 3: | |
3612 | n = "InfiniPath_QMI7342"; | |
3613 | break; | |
3614 | case 4: | |
3615 | n = "InfiniPath_Unsupported7342"; | |
3616 | qib_dev_err(dd, "Unsupported version of QMH7342\n"); | |
3617 | features = 0; | |
3618 | break; | |
3619 | case BOARD_QMH7342: | |
3620 | n = "InfiniPath_QMH7342"; | |
3621 | features = 0x24; | |
3622 | break; | |
3623 | case BOARD_QME7342: | |
3624 | n = "InfiniPath_QME7342"; | |
3625 | break; | |
f509f9c1 MM |
3626 | case 8: |
3627 | n = "InfiniPath_QME7362"; | |
3628 | dd->flags |= QIB_HAS_QSFP; | |
3629 | break; | |
0e6bbba5 VA |
3630 | case BOARD_QMH7360: |
3631 | n = "Intel IB QDR 1P FLR-QSFP Adptr"; | |
3632 | dd->flags |= QIB_HAS_QSFP; | |
3633 | break; | |
f931551b RC |
3634 | case 15: |
3635 | n = "InfiniPath_QLE7342_TEST"; | |
3636 | dd->flags |= QIB_HAS_QSFP; | |
3637 | break; | |
3638 | default: | |
3639 | n = "InfiniPath_QLE73xy_UNKNOWN"; | |
3640 | qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid); | |
3641 | break; | |
3642 | } | |
3643 | dd->board_atten = 1; /* index into txdds_Xdr */ | |
3644 | ||
3645 | namelen = strlen(n) + 1; | |
3646 | dd->boardname = kmalloc(namelen, GFP_KERNEL); | |
3647 | if (!dd->boardname) | |
3648 | qib_dev_err(dd, "Failed allocation for board name: %s\n", n); | |
3649 | else | |
3650 | snprintf(dd->boardname, namelen, "%s", n); | |
3651 | ||
3652 | snprintf(dd->boardversion, sizeof(dd->boardversion), | |
3653 | "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n", | |
3654 | QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname, | |
3655 | (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch), | |
3656 | dd->majrev, dd->minrev, | |
3657 | (unsigned)SYM_FIELD(dd->revision, Revision_R, SW)); | |
3658 | ||
3659 | if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) { | |
7fac3301 MM |
3660 | qib_devinfo(dd->pcidev, |
3661 | "IB%u: Forced to single port mode by module parameter\n", | |
3662 | dd->unit); | |
f931551b RC |
3663 | features &= PORT_SPD_CAP; |
3664 | } | |
3665 | ||
3666 | return features; | |
3667 | } | |
3668 | ||
3669 | /* | |
3670 | * This routine sleeps, so it can only be called from user context, not | |
3671 | * from interrupt context. | |
3672 | */ | |
3673 | static int qib_do_7322_reset(struct qib_devdata *dd) | |
3674 | { | |
3675 | u64 val; | |
3676 | u64 *msix_vecsave; | |
3677 | int i, msix_entries, ret = 1; | |
3678 | u16 cmdval; | |
3679 | u8 int_line, clinesz; | |
3680 | unsigned long flags; | |
3681 | ||
3682 | /* Use dev_err so it shows up in logs, etc. */ | |
3683 | qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit); | |
3684 | ||
3685 | qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz); | |
3686 | ||
3687 | msix_entries = dd->cspec->num_msix_entries; | |
3688 | ||
3689 | /* no interrupts till re-initted */ | |
3690 | qib_7322_set_intr_state(dd, 0); | |
3691 | ||
3692 | if (msix_entries) { | |
3693 | qib_7322_nomsix(dd); | |
3694 | /* can be up to 512 bytes, too big for stack */ | |
3695 | msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries * | |
3696 | sizeof(u64), GFP_KERNEL); | |
3697 | if (!msix_vecsave) | |
3698 | qib_dev_err(dd, "No mem to save MSIx data\n"); | |
3699 | } else | |
3700 | msix_vecsave = NULL; | |
3701 | ||
3702 | /* | |
3703 | * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector | |
3704 | * info that is set up by the BIOS, so we have to save and restore | |
3705 | * it ourselves. There is some risk something could change it, | |
3706 | * after we save it, but since we have disabled the MSIx, it | |
3707 | * shouldn't be touched... | |
3708 | */ | |
3709 | for (i = 0; i < msix_entries; i++) { | |
3710 | u64 vecaddr, vecdata; | |
da12c1f6 | 3711 | |
f931551b RC |
3712 | vecaddr = qib_read_kreg64(dd, 2 * i + |
3713 | (QIB_7322_MsixTable_OFFS / sizeof(u64))); | |
3714 | vecdata = qib_read_kreg64(dd, 1 + 2 * i + | |
3715 | (QIB_7322_MsixTable_OFFS / sizeof(u64))); | |
3716 | if (msix_vecsave) { | |
3717 | msix_vecsave[2 * i] = vecaddr; | |
3718 | /* save it without the masked bit set */ | |
3719 | msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL; | |
3720 | } | |
3721 | } | |
3722 | ||
3723 | dd->pport->cpspec->ibdeltainprog = 0; | |
3724 | dd->pport->cpspec->ibsymdelta = 0; | |
3725 | dd->pport->cpspec->iblnkerrdelta = 0; | |
3726 | dd->pport->cpspec->ibmalfdelta = 0; | |
1ed88dd7 MM |
3727 | /* so we check interrupts work again */ |
3728 | dd->z_int_counter = qib_int_counter(dd); | |
f931551b RC |
3729 | |
3730 | /* | |
3731 | * Keep chip from being accessed until we are ready. Use | |
3732 | * writeq() directly, to allow the write even though QIB_PRESENT | |
e9c54999 | 3733 | * isn't set. |
f931551b RC |
3734 | */ |
3735 | dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR); | |
3736 | dd->flags |= QIB_DOING_RESET; | |
3737 | val = dd->control | QLOGIC_IB_C_RESET; | |
3738 | writeq(val, &dd->kregbase[kr_control]); | |
3739 | ||
3740 | for (i = 1; i <= 5; i++) { | |
3741 | /* | |
3742 | * Allow MBIST, etc. to complete; longer on each retry. | |
3743 | * We sometimes get machine checks from bus timeout if no | |
3744 | * response, so for now, make it *really* long. | |
3745 | */ | |
3746 | msleep(1000 + (1 + i) * 3000); | |
3747 | ||
3748 | qib_pcie_reenable(dd, cmdval, int_line, clinesz); | |
3749 | ||
3750 | /* | |
3751 | * Use readq directly, so we don't need to mark it as PRESENT | |
3752 | * until we get a successful indication that all is well. | |
3753 | */ | |
3754 | val = readq(&dd->kregbase[kr_revision]); | |
3755 | if (val == dd->revision) | |
3756 | break; | |
3757 | if (i == 5) { | |
7fac3301 MM |
3758 | qib_dev_err(dd, |
3759 | "Failed to initialize after reset, unusable\n"); | |
f931551b RC |
3760 | ret = 0; |
3761 | goto bail; | |
3762 | } | |
3763 | } | |
3764 | ||
3765 | dd->flags |= QIB_PRESENT; /* it's back */ | |
3766 | ||
3767 | if (msix_entries) { | |
3768 | /* restore the MSIx vector address and data if saved above */ | |
3769 | for (i = 0; i < msix_entries; i++) { | |
a778f3fd | 3770 | dd->cspec->msix_entries[i].msix.entry = i; |
f931551b RC |
3771 | if (!msix_vecsave || !msix_vecsave[2 * i]) |
3772 | continue; | |
3773 | qib_write_kreg(dd, 2 * i + | |
3774 | (QIB_7322_MsixTable_OFFS / sizeof(u64)), | |
3775 | msix_vecsave[2 * i]); | |
3776 | qib_write_kreg(dd, 1 + 2 * i + | |
3777 | (QIB_7322_MsixTable_OFFS / sizeof(u64)), | |
3778 | msix_vecsave[1 + 2 * i]); | |
3779 | } | |
3780 | } | |
3781 | ||
3782 | /* initialize the remaining registers. */ | |
3783 | for (i = 0; i < dd->num_pports; ++i) | |
3784 | write_7322_init_portregs(&dd->pport[i]); | |
3785 | write_7322_initregs(dd); | |
3786 | ||
3787 | if (qib_pcie_params(dd, dd->lbus_width, | |
3788 | &dd->cspec->num_msix_entries, | |
3789 | dd->cspec->msix_entries)) | |
7fac3301 MM |
3790 | qib_dev_err(dd, |
3791 | "Reset failed to setup PCIe or interrupts; continuing anyway\n"); | |
f931551b RC |
3792 | |
3793 | qib_setup_7322_interrupt(dd, 1); | |
3794 | ||
3795 | for (i = 0; i < dd->num_pports; ++i) { | |
3796 | struct qib_pportdata *ppd = &dd->pport[i]; | |
3797 | ||
3798 | spin_lock_irqsave(&ppd->lflags_lock, flags); | |
3799 | ppd->lflags |= QIBL_IB_FORCE_NOTIFY; | |
3800 | ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; | |
3801 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | |
3802 | } | |
3803 | ||
3804 | bail: | |
3805 | dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */ | |
3806 | kfree(msix_vecsave); | |
3807 | return ret; | |
3808 | } | |
3809 | ||
3810 | /** | |
3811 | * qib_7322_put_tid - write a TID to the chip | |
3812 | * @dd: the qlogic_ib device | |
3813 | * @tidptr: pointer to the expected TID (in chip) to update | |
3814 | * @tidtype: 0 for eager, 1 for expected | |
3815 | * @pa: physical address of in memory buffer; tidinvalid if freeing | |
3816 | */ | |
3817 | static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr, | |
3818 | u32 type, unsigned long pa) | |
3819 | { | |
3820 | if (!(dd->flags & QIB_PRESENT)) | |
3821 | return; | |
3822 | if (pa != dd->tidinvalid) { | |
3823 | u64 chippa = pa >> IBA7322_TID_PA_SHIFT; | |
3824 | ||
3825 | /* paranoia checks */ | |
3826 | if (pa != (chippa << IBA7322_TID_PA_SHIFT)) { | |
3827 | qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n", | |
3828 | pa); | |
3829 | return; | |
3830 | } | |
3831 | if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) { | |
7fac3301 MM |
3832 | qib_dev_err(dd, |
3833 | "Physical page address 0x%lx larger than supported\n", | |
3834 | pa); | |
f931551b RC |
3835 | return; |
3836 | } | |
3837 | ||
3838 | if (type == RCVHQ_RCV_TYPE_EAGER) | |
3839 | chippa |= dd->tidtemplate; | |
3840 | else /* for now, always full 4KB page */ | |
3841 | chippa |= IBA7322_TID_SZ_4K; | |
3842 | pa = chippa; | |
3843 | } | |
3844 | writeq(pa, tidptr); | |
3845 | mmiowb(); | |
3846 | } | |
3847 | ||
3848 | /** | |
3849 | * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager | |
3850 | * @dd: the qlogic_ib device | |
3851 | * @ctxt: the ctxt | |
3852 | * | |
3853 | * clear all TID entries for a ctxt, expected and eager. | |
3854 | * Used from qib_close(). | |
3855 | */ | |
3856 | static void qib_7322_clear_tids(struct qib_devdata *dd, | |
3857 | struct qib_ctxtdata *rcd) | |
3858 | { | |
3859 | u64 __iomem *tidbase; | |
3860 | unsigned long tidinv; | |
3861 | u32 ctxt; | |
3862 | int i; | |
3863 | ||
3864 | if (!dd->kregbase || !rcd) | |
3865 | return; | |
3866 | ||
3867 | ctxt = rcd->ctxt; | |
3868 | ||
3869 | tidinv = dd->tidinvalid; | |
3870 | tidbase = (u64 __iomem *) | |
3871 | ((char __iomem *) dd->kregbase + | |
3872 | dd->rcvtidbase + | |
3873 | ctxt * dd->rcvtidcnt * sizeof(*tidbase)); | |
3874 | ||
3875 | for (i = 0; i < dd->rcvtidcnt; i++) | |
3876 | qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED, | |
3877 | tidinv); | |
3878 | ||
3879 | tidbase = (u64 __iomem *) | |
3880 | ((char __iomem *) dd->kregbase + | |
3881 | dd->rcvegrbase + | |
3882 | rcd->rcvegr_tid_base * sizeof(*tidbase)); | |
3883 | ||
3884 | for (i = 0; i < rcd->rcvegrcnt; i++) | |
3885 | qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER, | |
3886 | tidinv); | |
3887 | } | |
3888 | ||
3889 | /** | |
3890 | * qib_7322_tidtemplate - setup constants for TID updates | |
3891 | * @dd: the qlogic_ib device | |
3892 | * | |
3893 | * We setup stuff that we use a lot, to avoid calculating each time | |
3894 | */ | |
3895 | static void qib_7322_tidtemplate(struct qib_devdata *dd) | |
3896 | { | |
3897 | /* | |
3898 | * For now, we always allocate 4KB buffers (at init) so we can | |
3899 | * receive max size packets. We may want a module parameter to | |
3900 | * specify 2KB or 4KB and/or make it per port instead of per device | |
3901 | * for those who want to reduce memory footprint. Note that the | |
3902 | * rcvhdrentsize size must be large enough to hold the largest | |
3903 | * IB header (currently 96 bytes) that we expect to handle (plus of | |
3904 | * course the 2 dwords of RHF). | |
3905 | */ | |
3906 | if (dd->rcvegrbufsize == 2048) | |
3907 | dd->tidtemplate = IBA7322_TID_SZ_2K; | |
3908 | else if (dd->rcvegrbufsize == 4096) | |
3909 | dd->tidtemplate = IBA7322_TID_SZ_4K; | |
3910 | dd->tidinvalid = 0; | |
3911 | } | |
3912 | ||
3913 | /** | |
3914 | * qib_init_7322_get_base_info - set chip-specific flags for user code | |
3915 | * @rcd: the qlogic_ib ctxt | |
3916 | * @kbase: qib_base_info pointer | |
3917 | * | |
3918 | * We set the PCIE flag because the lower bandwidth on PCIe vs | |
3919 | * HyperTransport can affect some user packet algorithims. | |
3920 | */ | |
3921 | ||
3922 | static int qib_7322_get_base_info(struct qib_ctxtdata *rcd, | |
3923 | struct qib_base_info *kinfo) | |
3924 | { | |
3925 | kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP | | |
3926 | QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL | | |
3927 | QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA; | |
3928 | if (rcd->dd->cspec->r1) | |
3929 | kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK; | |
3930 | if (rcd->dd->flags & QIB_USE_SPCL_TRIG) | |
3931 | kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER; | |
3932 | ||
3933 | return 0; | |
3934 | } | |
3935 | ||
3936 | static struct qib_message_header * | |
3937 | qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr) | |
3938 | { | |
3939 | u32 offset = qib_hdrget_offset(rhf_addr); | |
3940 | ||
3941 | return (struct qib_message_header *) | |
3942 | (rhf_addr - dd->rhf_offset + offset); | |
3943 | } | |
3944 | ||
3945 | /* | |
3946 | * Configure number of contexts. | |
3947 | */ | |
3948 | static void qib_7322_config_ctxts(struct qib_devdata *dd) | |
3949 | { | |
3950 | unsigned long flags; | |
3951 | u32 nchipctxts; | |
3952 | ||
3953 | nchipctxts = qib_read_kreg32(dd, kr_contextcnt); | |
3954 | dd->cspec->numctxts = nchipctxts; | |
3955 | if (qib_n_krcv_queues > 1 && dd->num_pports) { | |
f931551b RC |
3956 | dd->first_user_ctxt = NUM_IB_PORTS + |
3957 | (qib_n_krcv_queues - 1) * dd->num_pports; | |
3958 | if (dd->first_user_ctxt > nchipctxts) | |
3959 | dd->first_user_ctxt = nchipctxts; | |
3960 | dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports; | |
3961 | } else { | |
3962 | dd->first_user_ctxt = NUM_IB_PORTS; | |
3963 | dd->n_krcv_queues = 1; | |
3964 | } | |
3965 | ||
3966 | if (!qib_cfgctxts) { | |
3967 | int nctxts = dd->first_user_ctxt + num_online_cpus(); | |
3968 | ||
3969 | if (nctxts <= 6) | |
3970 | dd->ctxtcnt = 6; | |
3971 | else if (nctxts <= 10) | |
3972 | dd->ctxtcnt = 10; | |
3973 | else if (nctxts <= nchipctxts) | |
3974 | dd->ctxtcnt = nchipctxts; | |
3975 | } else if (qib_cfgctxts < dd->num_pports) | |
3976 | dd->ctxtcnt = dd->num_pports; | |
3977 | else if (qib_cfgctxts <= nchipctxts) | |
3978 | dd->ctxtcnt = qib_cfgctxts; | |
3979 | if (!dd->ctxtcnt) /* none of the above, set to max */ | |
3980 | dd->ctxtcnt = nchipctxts; | |
3981 | ||
3982 | /* | |
3983 | * Chip can be configured for 6, 10, or 18 ctxts, and choice | |
3984 | * affects number of eager TIDs per ctxt (1K, 2K, 4K). | |
3985 | * Lock to be paranoid about later motion, etc. | |
3986 | */ | |
3987 | spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); | |
3988 | if (dd->ctxtcnt > 10) | |
3989 | dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg); | |
3990 | else if (dd->ctxtcnt > 6) | |
3991 | dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg); | |
3992 | /* else configure for default 6 receive ctxts */ | |
3993 | ||
3994 | /* The XRC opcode is 5. */ | |
3995 | dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode); | |
3996 | ||
3997 | /* | |
3998 | * RcvCtrl *must* be written here so that the | |
3999 | * chip understands how to change rcvegrcnt below. | |
4000 | */ | |
4001 | qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); | |
4002 | spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); | |
4003 | ||
4004 | /* kr_rcvegrcnt changes based on the number of contexts enabled */ | |
4005 | dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt); | |
0a43e117 MM |
4006 | if (qib_rcvhdrcnt) |
4007 | dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt); | |
4008 | else | |
8d4548f2 | 4009 | dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt, |
0a43e117 | 4010 | dd->num_pports > 1 ? 1024U : 2048U); |
f931551b RC |
4011 | } |
4012 | ||
4013 | static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which) | |
4014 | { | |
4015 | ||
4016 | int lsb, ret = 0; | |
4017 | u64 maskr; /* right-justified mask */ | |
4018 | ||
4019 | switch (which) { | |
4020 | ||
4021 | case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */ | |
4022 | ret = ppd->link_width_enabled; | |
4023 | goto done; | |
4024 | ||
4025 | case QIB_IB_CFG_LWID: /* Get currently active Link-width */ | |
4026 | ret = ppd->link_width_active; | |
4027 | goto done; | |
4028 | ||
4029 | case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */ | |
4030 | ret = ppd->link_speed_enabled; | |
4031 | goto done; | |
4032 | ||
4033 | case QIB_IB_CFG_SPD: /* Get current Link spd */ | |
4034 | ret = ppd->link_speed_active; | |
4035 | goto done; | |
4036 | ||
4037 | case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */ | |
4038 | lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP); | |
4039 | maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP); | |
4040 | break; | |
4041 | ||
4042 | case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */ | |
4043 | lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED); | |
4044 | maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED); | |
4045 | break; | |
4046 | ||
4047 | case QIB_IB_CFG_LINKLATENCY: | |
4048 | ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) & | |
4049 | SYM_MASK(IBCStatusB_0, LinkRoundTripLatency); | |
4050 | goto done; | |
4051 | ||
4052 | case QIB_IB_CFG_OP_VLS: | |
4053 | ret = ppd->vls_operational; | |
4054 | goto done; | |
4055 | ||
4056 | case QIB_IB_CFG_VL_HIGH_CAP: | |
4057 | ret = 16; | |
4058 | goto done; | |
4059 | ||
4060 | case QIB_IB_CFG_VL_LOW_CAP: | |
4061 | ret = 16; | |
4062 | goto done; | |
4063 | ||
4064 | case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ | |
4065 | ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0, | |
4066 | OverrunThreshold); | |
4067 | goto done; | |
4068 | ||
4069 | case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ | |
4070 | ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0, | |
4071 | PhyerrThreshold); | |
4072 | goto done; | |
4073 | ||
4074 | case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ | |
4075 | /* will only take effect when the link state changes */ | |
4076 | ret = (ppd->cpspec->ibcctrl_a & | |
4077 | SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ? | |
4078 | IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL; | |
4079 | goto done; | |
4080 | ||
4081 | case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */ | |
4082 | lsb = IBA7322_IBC_HRTBT_LSB; | |
4083 | maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */ | |
4084 | break; | |
4085 | ||
4086 | case QIB_IB_CFG_PMA_TICKS: | |
4087 | /* | |
4088 | * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs | |
4089 | * Since the clock is always 250MHz, the value is 3, 1 or 0. | |
4090 | */ | |
4091 | if (ppd->link_speed_active == QIB_IB_QDR) | |
4092 | ret = 3; | |
4093 | else if (ppd->link_speed_active == QIB_IB_DDR) | |
4094 | ret = 1; | |
4095 | else | |
4096 | ret = 0; | |
4097 | goto done; | |
4098 | ||
4099 | default: | |
4100 | ret = -EINVAL; | |
4101 | goto done; | |
4102 | } | |
4103 | ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr); | |
4104 | done: | |
4105 | return ret; | |
4106 | } | |
4107 | ||
4108 | /* | |
4109 | * Below again cribbed liberally from older version. Do not lean | |
4110 | * heavily on it. | |
4111 | */ | |
4112 | #define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB | |
4113 | #define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \ | |
4114 | | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16)) | |
4115 | ||
4116 | static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val) | |
4117 | { | |
4118 | struct qib_devdata *dd = ppd->dd; | |
4119 | u64 maskr; /* right-justified mask */ | |
4120 | int lsb, ret = 0; | |
4121 | u16 lcmd, licmd; | |
4122 | unsigned long flags; | |
4123 | ||
4124 | switch (which) { | |
4125 | case QIB_IB_CFG_LIDLMC: | |
4126 | /* | |
4127 | * Set LID and LMC. Combined to avoid possible hazard | |
4128 | * caller puts LMC in 16MSbits, DLID in 16LSbits of val | |
4129 | */ | |
4130 | lsb = IBA7322_IBC_DLIDLMC_SHIFT; | |
4131 | maskr = IBA7322_IBC_DLIDLMC_MASK; | |
4132 | /* | |
4133 | * For header-checking, the SLID in the packet will | |
4134 | * be masked with SendIBSLMCMask, and compared | |
4135 | * with SendIBSLIDAssignMask. Make sure we do not | |
4136 | * set any bits not covered by the mask, or we get | |
4137 | * false-positives. | |
4138 | */ | |
4139 | qib_write_kreg_port(ppd, krp_sendslid, | |
4140 | val & (val >> 16) & SendIBSLIDAssignMask); | |
4141 | qib_write_kreg_port(ppd, krp_sendslidmask, | |
4142 | (val >> 16) & SendIBSLMCMask); | |
4143 | break; | |
4144 | ||
4145 | case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */ | |
4146 | ppd->link_width_enabled = val; | |
4147 | /* convert IB value to chip register value */ | |
4148 | if (val == IB_WIDTH_1X) | |
4149 | val = 0; | |
4150 | else if (val == IB_WIDTH_4X) | |
4151 | val = 1; | |
4152 | else | |
4153 | val = 3; | |
4154 | maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS); | |
4155 | lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS); | |
4156 | break; | |
4157 | ||
4158 | case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */ | |
4159 | /* | |
4160 | * As with width, only write the actual register if the | |
4161 | * link is currently down, otherwise takes effect on next | |
25985edc | 4162 | * link change. Since setting is being explicitly requested |
f931551b RC |
4163 | * (via MAD or sysfs), clear autoneg failure status if speed |
4164 | * autoneg is enabled. | |
4165 | */ | |
4166 | ppd->link_speed_enabled = val; | |
4167 | val <<= IBA7322_IBC_SPEED_LSB; | |
4168 | maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK | | |
4169 | IBA7322_IBC_MAX_SPEED_MASK; | |
4170 | if (val & (val - 1)) { | |
4171 | /* Muliple speeds enabled */ | |
4172 | val |= IBA7322_IBC_IBTA_1_2_MASK | | |
4173 | IBA7322_IBC_MAX_SPEED_MASK; | |
4174 | spin_lock_irqsave(&ppd->lflags_lock, flags); | |
4175 | ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; | |
4176 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | |
4177 | } else if (val & IBA7322_IBC_SPEED_QDR) | |
4178 | val |= IBA7322_IBC_IBTA_1_2_MASK; | |
4179 | /* IBTA 1.2 mode + min/max + speed bits are contiguous */ | |
4180 | lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE); | |
4181 | break; | |
4182 | ||
4183 | case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */ | |
4184 | lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP); | |
4185 | maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP); | |
4186 | break; | |
4187 | ||
4188 | case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */ | |
4189 | lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED); | |
4190 | maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED); | |
4191 | break; | |
4192 | ||
4193 | case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ | |
4194 | maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0, | |
4195 | OverrunThreshold); | |
4196 | if (maskr != val) { | |
4197 | ppd->cpspec->ibcctrl_a &= | |
4198 | ~SYM_MASK(IBCCtrlA_0, OverrunThreshold); | |
4199 | ppd->cpspec->ibcctrl_a |= (u64) val << | |
4200 | SYM_LSB(IBCCtrlA_0, OverrunThreshold); | |
4201 | qib_write_kreg_port(ppd, krp_ibcctrl_a, | |
4202 | ppd->cpspec->ibcctrl_a); | |
4203 | qib_write_kreg(dd, kr_scratch, 0ULL); | |
4204 | } | |
4205 | goto bail; | |
4206 | ||
4207 | case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ | |
4208 | maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0, | |
4209 | PhyerrThreshold); | |
4210 | if (maskr != val) { | |
4211 | ppd->cpspec->ibcctrl_a &= | |
4212 | ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold); | |
4213 | ppd->cpspec->ibcctrl_a |= (u64) val << | |
4214 | SYM_LSB(IBCCtrlA_0, PhyerrThreshold); | |
4215 | qib_write_kreg_port(ppd, krp_ibcctrl_a, | |
4216 | ppd->cpspec->ibcctrl_a); | |
4217 | qib_write_kreg(dd, kr_scratch, 0ULL); | |
4218 | } | |
4219 | goto bail; | |
4220 | ||
4221 | case QIB_IB_CFG_PKEYS: /* update pkeys */ | |
4222 | maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) | | |
4223 | ((u64) ppd->pkeys[2] << 32) | | |
4224 | ((u64) ppd->pkeys[3] << 48); | |
4225 | qib_write_kreg_port(ppd, krp_partitionkey, maskr); | |
4226 | goto bail; | |
4227 | ||
4228 | case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ | |
4229 | /* will only take effect when the link state changes */ | |
4230 | if (val == IB_LINKINITCMD_POLL) | |
4231 | ppd->cpspec->ibcctrl_a &= | |
4232 | ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState); | |
4233 | else /* SLEEP */ | |
4234 | ppd->cpspec->ibcctrl_a |= | |
4235 | SYM_MASK(IBCCtrlA_0, LinkDownDefaultState); | |
4236 | qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); | |
4237 | qib_write_kreg(dd, kr_scratch, 0ULL); | |
4238 | goto bail; | |
4239 | ||
4240 | case QIB_IB_CFG_MTU: /* update the MTU in IBC */ | |
4241 | /* | |
4242 | * Update our housekeeping variables, and set IBC max | |
4243 | * size, same as init code; max IBC is max we allow in | |
4244 | * buffer, less the qword pbc, plus 1 for ICRC, in dwords | |
4245 | * Set even if it's unchanged, print debug message only | |
4246 | * on changes. | |
4247 | */ | |
4248 | val = (ppd->ibmaxlen >> 2) + 1; | |
4249 | ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen); | |
4250 | ppd->cpspec->ibcctrl_a |= (u64)val << | |
4251 | SYM_LSB(IBCCtrlA_0, MaxPktLen); | |
4252 | qib_write_kreg_port(ppd, krp_ibcctrl_a, | |
4253 | ppd->cpspec->ibcctrl_a); | |
4254 | qib_write_kreg(dd, kr_scratch, 0ULL); | |
4255 | goto bail; | |
4256 | ||
4257 | case QIB_IB_CFG_LSTATE: /* set the IB link state */ | |
4258 | switch (val & 0xffff0000) { | |
4259 | case IB_LINKCMD_DOWN: | |
4260 | lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN; | |
4261 | ppd->cpspec->ibmalfusesnap = 1; | |
4262 | ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd, | |
4263 | crp_errlink); | |
4264 | if (!ppd->cpspec->ibdeltainprog && | |
4265 | qib_compat_ddr_negotiate) { | |
4266 | ppd->cpspec->ibdeltainprog = 1; | |
4267 | ppd->cpspec->ibsymsnap = | |
4268 | read_7322_creg32_port(ppd, | |
4269 | crp_ibsymbolerr); | |
4270 | ppd->cpspec->iblnkerrsnap = | |
4271 | read_7322_creg32_port(ppd, | |
4272 | crp_iblinkerrrecov); | |
4273 | } | |
4274 | break; | |
4275 | ||
4276 | case IB_LINKCMD_ARMED: | |
4277 | lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED; | |
4278 | if (ppd->cpspec->ibmalfusesnap) { | |
4279 | ppd->cpspec->ibmalfusesnap = 0; | |
4280 | ppd->cpspec->ibmalfdelta += | |
4281 | read_7322_creg32_port(ppd, | |
4282 | crp_errlink) - | |
4283 | ppd->cpspec->ibmalfsnap; | |
4284 | } | |
4285 | break; | |
4286 | ||
4287 | case IB_LINKCMD_ACTIVE: | |
4288 | lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE; | |
4289 | break; | |
4290 | ||
4291 | default: | |
4292 | ret = -EINVAL; | |
4293 | qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16); | |
4294 | goto bail; | |
4295 | } | |
4296 | switch (val & 0xffff) { | |
4297 | case IB_LINKINITCMD_NOP: | |
4298 | licmd = 0; | |
4299 | break; | |
4300 | ||
4301 | case IB_LINKINITCMD_POLL: | |
4302 | licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL; | |
4303 | break; | |
4304 | ||
4305 | case IB_LINKINITCMD_SLEEP: | |
4306 | licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP; | |
4307 | break; | |
4308 | ||
4309 | case IB_LINKINITCMD_DISABLE: | |
4310 | licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE; | |
4311 | ppd->cpspec->chase_end = 0; | |
4312 | /* | |
4313 | * stop state chase counter and timer, if running. | |
4314 | * wait forpending timer, but don't clear .data (ppd)! | |
4315 | */ | |
4316 | if (ppd->cpspec->chase_timer.expires) { | |
4317 | del_timer_sync(&ppd->cpspec->chase_timer); | |
4318 | ppd->cpspec->chase_timer.expires = 0; | |
4319 | } | |
4320 | break; | |
4321 | ||
4322 | default: | |
4323 | ret = -EINVAL; | |
4324 | qib_dev_err(dd, "bad linkinitcmd req 0x%x\n", | |
4325 | val & 0xffff); | |
4326 | goto bail; | |
4327 | } | |
4328 | qib_set_ib_7322_lstate(ppd, lcmd, licmd); | |
4329 | goto bail; | |
4330 | ||
4331 | case QIB_IB_CFG_OP_VLS: | |
4332 | if (ppd->vls_operational != val) { | |
4333 | ppd->vls_operational = val; | |
4334 | set_vls(ppd); | |
4335 | } | |
4336 | goto bail; | |
4337 | ||
4338 | case QIB_IB_CFG_VL_HIGH_LIMIT: | |
4339 | qib_write_kreg_port(ppd, krp_highprio_limit, val); | |
4340 | goto bail; | |
4341 | ||
4342 | case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */ | |
4343 | if (val > 3) { | |
4344 | ret = -EINVAL; | |
4345 | goto bail; | |
4346 | } | |
4347 | lsb = IBA7322_IBC_HRTBT_LSB; | |
4348 | maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */ | |
4349 | break; | |
4350 | ||
4351 | case QIB_IB_CFG_PORT: | |
4352 | /* val is the port number of the switch we are connected to. */ | |
4353 | if (ppd->dd->cspec->r1) { | |
4354 | cancel_delayed_work(&ppd->cpspec->ipg_work); | |
4355 | ppd->cpspec->ipg_tries = 0; | |
4356 | } | |
4357 | goto bail; | |
4358 | ||
4359 | default: | |
4360 | ret = -EINVAL; | |
4361 | goto bail; | |
4362 | } | |
4363 | ppd->cpspec->ibcctrl_b &= ~(maskr << lsb); | |
4364 | ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb); | |
4365 | qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b); | |
4366 | qib_write_kreg(dd, kr_scratch, 0); | |
4367 | bail: | |
4368 | return ret; | |
4369 | } | |
4370 | ||
4371 | static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what) | |
4372 | { | |
4373 | int ret = 0; | |
4374 | u64 val, ctrlb; | |
4375 | ||
4376 | /* only IBC loopback, may add serdes and xgxs loopbacks later */ | |
4377 | if (!strncmp(what, "ibc", 3)) { | |
4378 | ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, | |
4379 | Loopback); | |
4380 | val = 0; /* disable heart beat, so link will come up */ | |
4381 | qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n", | |
4382 | ppd->dd->unit, ppd->port); | |
4383 | } else if (!strncmp(what, "off", 3)) { | |
4384 | ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, | |
4385 | Loopback); | |
4386 | /* enable heart beat again */ | |
4387 | val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB; | |
7fac3301 MM |
4388 | qib_devinfo(ppd->dd->pcidev, |
4389 | "Disabling IB%u:%u IBC loopback (normal)\n", | |
4390 | ppd->dd->unit, ppd->port); | |
f931551b RC |
4391 | } else |
4392 | ret = -EINVAL; | |
4393 | if (!ret) { | |
4394 | qib_write_kreg_port(ppd, krp_ibcctrl_a, | |
4395 | ppd->cpspec->ibcctrl_a); | |
4396 | ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK | |
4397 | << IBA7322_IBC_HRTBT_LSB); | |
4398 | ppd->cpspec->ibcctrl_b = ctrlb | val; | |
4399 | qib_write_kreg_port(ppd, krp_ibcctrl_b, | |
4400 | ppd->cpspec->ibcctrl_b); | |
4401 | qib_write_kreg(ppd->dd, kr_scratch, 0); | |
4402 | } | |
4403 | return ret; | |
4404 | } | |
4405 | ||
4406 | static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno, | |
4407 | struct ib_vl_weight_elem *vl) | |
4408 | { | |
4409 | unsigned i; | |
4410 | ||
4411 | for (i = 0; i < 16; i++, regno++, vl++) { | |
4412 | u32 val = qib_read_kreg_port(ppd, regno); | |
4413 | ||
4414 | vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) & | |
4415 | SYM_RMASK(LowPriority0_0, VirtualLane); | |
4416 | vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) & | |
4417 | SYM_RMASK(LowPriority0_0, Weight); | |
4418 | } | |
4419 | } | |
4420 | ||
4421 | static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno, | |
4422 | struct ib_vl_weight_elem *vl) | |
4423 | { | |
4424 | unsigned i; | |
4425 | ||
4426 | for (i = 0; i < 16; i++, regno++, vl++) { | |
4427 | u64 val; | |
4428 | ||
4429 | val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) << | |
4430 | SYM_LSB(LowPriority0_0, VirtualLane)) | | |
4431 | ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) << | |
4432 | SYM_LSB(LowPriority0_0, Weight)); | |
4433 | qib_write_kreg_port(ppd, regno, val); | |
4434 | } | |
4435 | if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) { | |
4436 | struct qib_devdata *dd = ppd->dd; | |
4437 | unsigned long flags; | |
4438 | ||
4439 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | |
4440 | ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn); | |
4441 | qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); | |
4442 | qib_write_kreg(dd, kr_scratch, 0); | |
4443 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | |
4444 | } | |
4445 | } | |
4446 | ||
4447 | static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t) | |
4448 | { | |
4449 | switch (which) { | |
4450 | case QIB_IB_TBL_VL_HIGH_ARB: | |
4451 | get_vl_weights(ppd, krp_highprio_0, t); | |
4452 | break; | |
4453 | ||
4454 | case QIB_IB_TBL_VL_LOW_ARB: | |
4455 | get_vl_weights(ppd, krp_lowprio_0, t); | |
4456 | break; | |
4457 | ||
4458 | default: | |
4459 | return -EINVAL; | |
4460 | } | |
4461 | return 0; | |
4462 | } | |
4463 | ||
4464 | static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t) | |
4465 | { | |
4466 | switch (which) { | |
4467 | case QIB_IB_TBL_VL_HIGH_ARB: | |
4468 | set_vl_weights(ppd, krp_highprio_0, t); | |
4469 | break; | |
4470 | ||
4471 | case QIB_IB_TBL_VL_LOW_ARB: | |
4472 | set_vl_weights(ppd, krp_lowprio_0, t); | |
4473 | break; | |
4474 | ||
4475 | default: | |
4476 | return -EINVAL; | |
4477 | } | |
4478 | return 0; | |
4479 | } | |
4480 | ||
4481 | static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd, | |
19ede2e4 | 4482 | u32 updegr, u32 egrhd, u32 npkts) |
f931551b | 4483 | { |
19ede2e4 MM |
4484 | /* |
4485 | * Need to write timeout register before updating rcvhdrhead to ensure | |
4486 | * that the timer is enabled on reception of a packet. | |
4487 | */ | |
4488 | if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT) | |
4489 | adjust_rcv_timeout(rcd, npkts); | |
f931551b RC |
4490 | if (updegr) |
4491 | qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); | |
eddfb675 RV |
4492 | mmiowb(); |
4493 | qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); | |
4494 | qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); | |
4495 | mmiowb(); | |
f931551b RC |
4496 | } |
4497 | ||
4498 | static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd) | |
4499 | { | |
4500 | u32 head, tail; | |
4501 | ||
4502 | head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt); | |
4503 | if (rcd->rcvhdrtail_kvaddr) | |
4504 | tail = qib_get_rcvhdrtail(rcd); | |
4505 | else | |
4506 | tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt); | |
4507 | return head == tail; | |
4508 | } | |
4509 | ||
4510 | #define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \ | |
4511 | QIB_RCVCTRL_CTXT_DIS | \ | |
4512 | QIB_RCVCTRL_TIDFLOW_ENB | \ | |
4513 | QIB_RCVCTRL_TIDFLOW_DIS | \ | |
4514 | QIB_RCVCTRL_TAILUPD_ENB | \ | |
4515 | QIB_RCVCTRL_TAILUPD_DIS | \ | |
4516 | QIB_RCVCTRL_INTRAVAIL_ENB | \ | |
4517 | QIB_RCVCTRL_INTRAVAIL_DIS | \ | |
4518 | QIB_RCVCTRL_BP_ENB | \ | |
4519 | QIB_RCVCTRL_BP_DIS) | |
4520 | ||
4521 | #define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \ | |
4522 | QIB_RCVCTRL_CTXT_DIS | \ | |
4523 | QIB_RCVCTRL_PKEY_DIS | \ | |
4524 | QIB_RCVCTRL_PKEY_ENB) | |
4525 | ||
4526 | /* | |
4527 | * Modify the RCVCTRL register in chip-specific way. This | |
4528 | * is a function because bit positions and (future) register | |
4529 | * location is chip-specifc, but the needed operations are | |
4530 | * generic. <op> is a bit-mask because we often want to | |
4531 | * do multiple modifications. | |
4532 | */ | |
4533 | static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op, | |
4534 | int ctxt) | |
4535 | { | |
4536 | struct qib_devdata *dd = ppd->dd; | |
4537 | struct qib_ctxtdata *rcd; | |
4538 | u64 mask, val; | |
4539 | unsigned long flags; | |
4540 | ||
4541 | spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); | |
4542 | ||
4543 | if (op & QIB_RCVCTRL_TIDFLOW_ENB) | |
4544 | dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable); | |
4545 | if (op & QIB_RCVCTRL_TIDFLOW_DIS) | |
4546 | dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable); | |
4547 | if (op & QIB_RCVCTRL_TAILUPD_ENB) | |
4548 | dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd); | |
4549 | if (op & QIB_RCVCTRL_TAILUPD_DIS) | |
4550 | dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd); | |
4551 | if (op & QIB_RCVCTRL_PKEY_ENB) | |
4552 | ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable); | |
4553 | if (op & QIB_RCVCTRL_PKEY_DIS) | |
4554 | ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable); | |
4555 | if (ctxt < 0) { | |
4556 | mask = (1ULL << dd->ctxtcnt) - 1; | |
4557 | rcd = NULL; | |
4558 | } else { | |
4559 | mask = (1ULL << ctxt); | |
4560 | rcd = dd->rcd[ctxt]; | |
4561 | } | |
4562 | if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) { | |
4563 | ppd->p_rcvctrl |= | |
4564 | (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel)); | |
4565 | if (!(dd->flags & QIB_NODMA_RTAIL)) { | |
4566 | op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */ | |
4567 | dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd); | |
4568 | } | |
4569 | /* Write these registers before the context is enabled. */ | |
4570 | qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, | |
4571 | rcd->rcvhdrqtailaddr_phys); | |
4572 | qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, | |
4573 | rcd->rcvhdrq_phys); | |
4574 | rcd->seq_cnt = 1; | |
f931551b RC |
4575 | } |
4576 | if (op & QIB_RCVCTRL_CTXT_DIS) | |
4577 | ppd->p_rcvctrl &= | |
4578 | ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel)); | |
4579 | if (op & QIB_RCVCTRL_BP_ENB) | |
4580 | dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull); | |
4581 | if (op & QIB_RCVCTRL_BP_DIS) | |
4582 | dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull)); | |
4583 | if (op & QIB_RCVCTRL_INTRAVAIL_ENB) | |
4584 | dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail)); | |
4585 | if (op & QIB_RCVCTRL_INTRAVAIL_DIS) | |
4586 | dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail)); | |
4587 | /* | |
4588 | * Decide which registers to write depending on the ops enabled. | |
4589 | * Special case is "flush" (no bits set at all) | |
4590 | * which needs to write both. | |
4591 | */ | |
4592 | if (op == 0 || (op & RCVCTRL_COMMON_MODS)) | |
4593 | qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); | |
4594 | if (op == 0 || (op & RCVCTRL_PORT_MODS)) | |
4595 | qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl); | |
4596 | if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) { | |
4597 | /* | |
4598 | * Init the context registers also; if we were | |
4599 | * disabled, tail and head should both be zero | |
4600 | * already from the enable, but since we don't | |
25985edc | 4601 | * know, we have to do it explicitly. |
f931551b RC |
4602 | */ |
4603 | val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt); | |
4604 | qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt); | |
4605 | ||
4606 | /* be sure enabling write seen; hd/tl should be 0 */ | |
4607 | (void) qib_read_kreg32(dd, kr_scratch); | |
4608 | val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt); | |
4609 | dd->rcd[ctxt]->head = val; | |
4610 | /* If kctxt, interrupt on next receive. */ | |
4611 | if (ctxt < dd->first_user_ctxt) | |
4612 | val |= dd->rhdrhead_intr_off; | |
4613 | qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); | |
4614 | } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && | |
4615 | dd->rcd[ctxt] && dd->rhdrhead_intr_off) { | |
4616 | /* arm rcv interrupt */ | |
4617 | val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off; | |
4618 | qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); | |
4619 | } | |
4620 | if (op & QIB_RCVCTRL_CTXT_DIS) { | |
4621 | unsigned f; | |
4622 | ||
4623 | /* Now that the context is disabled, clear these registers. */ | |
4624 | if (ctxt >= 0) { | |
4625 | qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0); | |
4626 | qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0); | |
4627 | for (f = 0; f < NUM_TIDFLOWS_CTXT; f++) | |
4628 | qib_write_ureg(dd, ur_rcvflowtable + f, | |
4629 | TIDFLOW_ERRBITS, ctxt); | |
4630 | } else { | |
4631 | unsigned i; | |
4632 | ||
4633 | for (i = 0; i < dd->cfgctxts; i++) { | |
4634 | qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, | |
4635 | i, 0); | |
4636 | qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0); | |
4637 | for (f = 0; f < NUM_TIDFLOWS_CTXT; f++) | |
4638 | qib_write_ureg(dd, ur_rcvflowtable + f, | |
4639 | TIDFLOW_ERRBITS, i); | |
4640 | } | |
4641 | } | |
4642 | } | |
4643 | spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); | |
4644 | } | |
4645 | ||
4646 | /* | |
4647 | * Modify the SENDCTRL register in chip-specific way. This | |
4648 | * is a function where there are multiple such registers with | |
4649 | * slightly different layouts. | |
4650 | * The chip doesn't allow back-to-back sendctrl writes, so write | |
4651 | * the scratch register after writing sendctrl. | |
4652 | * | |
4653 | * Which register is written depends on the operation. | |
4654 | * Most operate on the common register, while | |
4655 | * SEND_ENB and SEND_DIS operate on the per-port ones. | |
4656 | * SEND_ENB is included in common because it can change SPCL_TRIG | |
4657 | */ | |
4658 | #define SENDCTRL_COMMON_MODS (\ | |
4659 | QIB_SENDCTRL_CLEAR | \ | |
4660 | QIB_SENDCTRL_AVAIL_DIS | \ | |
4661 | QIB_SENDCTRL_AVAIL_ENB | \ | |
4662 | QIB_SENDCTRL_AVAIL_BLIP | \ | |
4663 | QIB_SENDCTRL_DISARM | \ | |
4664 | QIB_SENDCTRL_DISARM_ALL | \ | |
4665 | QIB_SENDCTRL_SEND_ENB) | |
4666 | ||
4667 | #define SENDCTRL_PORT_MODS (\ | |
4668 | QIB_SENDCTRL_CLEAR | \ | |
4669 | QIB_SENDCTRL_SEND_ENB | \ | |
4670 | QIB_SENDCTRL_SEND_DIS | \ | |
4671 | QIB_SENDCTRL_FLUSH) | |
4672 | ||
4673 | static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op) | |
4674 | { | |
4675 | struct qib_devdata *dd = ppd->dd; | |
4676 | u64 tmp_dd_sendctrl; | |
4677 | unsigned long flags; | |
4678 | ||
4679 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | |
4680 | ||
4681 | /* First the dd ones that are "sticky", saved in shadow */ | |
4682 | if (op & QIB_SENDCTRL_CLEAR) | |
4683 | dd->sendctrl = 0; | |
4684 | if (op & QIB_SENDCTRL_AVAIL_DIS) | |
4685 | dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); | |
4686 | else if (op & QIB_SENDCTRL_AVAIL_ENB) { | |
4687 | dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd); | |
4688 | if (dd->flags & QIB_USE_SPCL_TRIG) | |
4689 | dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn); | |
4690 | } | |
4691 | ||
4692 | /* Then the ppd ones that are "sticky", saved in shadow */ | |
4693 | if (op & QIB_SENDCTRL_SEND_DIS) | |
4694 | ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable); | |
4695 | else if (op & QIB_SENDCTRL_SEND_ENB) | |
4696 | ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable); | |
4697 | ||
4698 | if (op & QIB_SENDCTRL_DISARM_ALL) { | |
4699 | u32 i, last; | |
4700 | ||
4701 | tmp_dd_sendctrl = dd->sendctrl; | |
4702 | last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; | |
4703 | /* | |
4704 | * Disarm any buffers that are not yet launched, | |
4705 | * disabling updates until done. | |
4706 | */ | |
4707 | tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); | |
4708 | for (i = 0; i < last; i++) { | |
4709 | qib_write_kreg(dd, kr_sendctrl, | |
4710 | tmp_dd_sendctrl | | |
4711 | SYM_MASK(SendCtrl, Disarm) | i); | |
4712 | qib_write_kreg(dd, kr_scratch, 0); | |
4713 | } | |
4714 | } | |
4715 | ||
4716 | if (op & QIB_SENDCTRL_FLUSH) { | |
4717 | u64 tmp_ppd_sendctrl = ppd->p_sendctrl; | |
4718 | ||
4719 | /* | |
4720 | * Now drain all the fifos. The Abort bit should never be | |
4721 | * needed, so for now, at least, we don't use it. | |
4722 | */ | |
4723 | tmp_ppd_sendctrl |= | |
4724 | SYM_MASK(SendCtrl_0, TxeDrainRmFifo) | | |
4725 | SYM_MASK(SendCtrl_0, TxeDrainLaFifo) | | |
4726 | SYM_MASK(SendCtrl_0, TxeBypassIbc); | |
4727 | qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl); | |
4728 | qib_write_kreg(dd, kr_scratch, 0); | |
4729 | } | |
4730 | ||
4731 | tmp_dd_sendctrl = dd->sendctrl; | |
4732 | ||
4733 | if (op & QIB_SENDCTRL_DISARM) | |
4734 | tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) | | |
4735 | ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) << | |
4736 | SYM_LSB(SendCtrl, DisarmSendBuf)); | |
4737 | if ((op & QIB_SENDCTRL_AVAIL_BLIP) && | |
4738 | (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd))) | |
4739 | tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); | |
4740 | ||
4741 | if (op == 0 || (op & SENDCTRL_COMMON_MODS)) { | |
4742 | qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl); | |
4743 | qib_write_kreg(dd, kr_scratch, 0); | |
4744 | } | |
4745 | ||
4746 | if (op == 0 || (op & SENDCTRL_PORT_MODS)) { | |
4747 | qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); | |
4748 | qib_write_kreg(dd, kr_scratch, 0); | |
4749 | } | |
4750 | ||
4751 | if (op & QIB_SENDCTRL_AVAIL_BLIP) { | |
4752 | qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); | |
4753 | qib_write_kreg(dd, kr_scratch, 0); | |
4754 | } | |
4755 | ||
4756 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | |
4757 | ||
4758 | if (op & QIB_SENDCTRL_FLUSH) { | |
4759 | u32 v; | |
4760 | /* | |
4761 | * ensure writes have hit chip, then do a few | |
4762 | * more reads, to allow DMA of pioavail registers | |
4763 | * to occur, so in-memory copy is in sync with | |
4764 | * the chip. Not always safe to sleep. | |
4765 | */ | |
4766 | v = qib_read_kreg32(dd, kr_scratch); | |
4767 | qib_write_kreg(dd, kr_scratch, v); | |
4768 | v = qib_read_kreg32(dd, kr_scratch); | |
4769 | qib_write_kreg(dd, kr_scratch, v); | |
4770 | qib_read_kreg32(dd, kr_scratch); | |
4771 | } | |
4772 | } | |
4773 | ||
4774 | #define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */ | |
4775 | #define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */ | |
4776 | #define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */ | |
4777 | ||
4778 | /** | |
4779 | * qib_portcntr_7322 - read a per-port chip counter | |
4780 | * @ppd: the qlogic_ib pport | |
4781 | * @creg: the counter to read (not a chip offset) | |
4782 | */ | |
4783 | static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg) | |
4784 | { | |
4785 | struct qib_devdata *dd = ppd->dd; | |
4786 | u64 ret = 0ULL; | |
4787 | u16 creg; | |
4788 | /* 0xffff for unimplemented or synthesized counters */ | |
4789 | static const u32 xlator[] = { | |
4790 | [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG, | |
4791 | [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG, | |
4792 | [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount, | |
4793 | [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount, | |
4794 | [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount, | |
4795 | [QIBPORTCNTR_SENDSTALL] = crp_sendstall, | |
4796 | [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG, | |
4797 | [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount, | |
4798 | [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount, | |
4799 | [QIBPORTCNTR_RCVEBP] = crp_rcvebp, | |
4800 | [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl, | |
4801 | [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG, | |
4802 | [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed for 7322 */ | |
4803 | [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr, | |
4804 | [QIBPORTCNTR_RXVLERR] = crp_rxvlerr, | |
4805 | [QIBPORTCNTR_ERRICRC] = crp_erricrc, | |
4806 | [QIBPORTCNTR_ERRVCRC] = crp_errvcrc, | |
4807 | [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc, | |
4808 | [QIBPORTCNTR_BADFORMAT] = crp_badformat, | |
4809 | [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen, | |
4810 | [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr, | |
4811 | [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen, | |
4812 | [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl, | |
4813 | [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl, | |
4814 | [QIBPORTCNTR_ERRLINK] = crp_errlink, | |
4815 | [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown, | |
4816 | [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov, | |
4817 | [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr, | |
4818 | [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt, | |
4819 | [QIBPORTCNTR_ERRPKEY] = crp_errpkey, | |
4820 | /* | |
4821 | * the next 3 aren't really counters, but were implemented | |
4822 | * as counters in older chips, so still get accessed as | |
4823 | * though they were counters from this code. | |
4824 | */ | |
4825 | [QIBPORTCNTR_PSINTERVAL] = krp_psinterval, | |
4826 | [QIBPORTCNTR_PSSTART] = krp_psstart, | |
4827 | [QIBPORTCNTR_PSSTAT] = krp_psstat, | |
4828 | /* pseudo-counter, summed for all ports */ | |
4829 | [QIBPORTCNTR_KHDROVFL] = 0xffff, | |
4830 | }; | |
4831 | ||
4832 | if (reg >= ARRAY_SIZE(xlator)) { | |
4833 | qib_devinfo(ppd->dd->pcidev, | |
4834 | "Unimplemented portcounter %u\n", reg); | |
4835 | goto done; | |
4836 | } | |
4837 | creg = xlator[reg] & _PORT_CNTR_IDXMASK; | |
4838 | ||
4839 | /* handle non-counters and special cases first */ | |
4840 | if (reg == QIBPORTCNTR_KHDROVFL) { | |
4841 | int i; | |
4842 | ||
4843 | /* sum over all kernel contexts (skip if mini_init) */ | |
4844 | for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) { | |
4845 | struct qib_ctxtdata *rcd = dd->rcd[i]; | |
4846 | ||
4847 | if (!rcd || rcd->ppd != ppd) | |
4848 | continue; | |
4849 | ret += read_7322_creg32(dd, cr_base_egrovfl + i); | |
4850 | } | |
4851 | goto done; | |
4852 | } else if (reg == QIBPORTCNTR_RXDROPPKT) { | |
4853 | /* | |
4854 | * Used as part of the synthesis of port_rcv_errors | |
4855 | * in the verbs code for IBTA counters. Not needed for 7322, | |
4856 | * because all the errors are already counted by other cntrs. | |
4857 | */ | |
4858 | goto done; | |
4859 | } else if (reg == QIBPORTCNTR_PSINTERVAL || | |
4860 | reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) { | |
4861 | /* were counters in older chips, now per-port kernel regs */ | |
4862 | ret = qib_read_kreg_port(ppd, creg); | |
4863 | goto done; | |
4864 | } | |
4865 | ||
4866 | /* | |
4867 | * Only fast increment counters are 64 bits; use 32 bit reads to | |
4868 | * avoid two independent reads when on Opteron. | |
4869 | */ | |
4870 | if (xlator[reg] & _PORT_64BIT_FLAG) | |
4871 | ret = read_7322_creg_port(ppd, creg); | |
4872 | else | |
4873 | ret = read_7322_creg32_port(ppd, creg); | |
4874 | if (creg == crp_ibsymbolerr) { | |
4875 | if (ppd->cpspec->ibdeltainprog) | |
4876 | ret -= ret - ppd->cpspec->ibsymsnap; | |
4877 | ret -= ppd->cpspec->ibsymdelta; | |
4878 | } else if (creg == crp_iblinkerrrecov) { | |
4879 | if (ppd->cpspec->ibdeltainprog) | |
4880 | ret -= ret - ppd->cpspec->iblnkerrsnap; | |
4881 | ret -= ppd->cpspec->iblnkerrdelta; | |
4882 | } else if (creg == crp_errlink) | |
4883 | ret -= ppd->cpspec->ibmalfdelta; | |
4884 | else if (creg == crp_iblinkdown) | |
4885 | ret += ppd->cpspec->iblnkdowndelta; | |
4886 | done: | |
4887 | return ret; | |
4888 | } | |
4889 | ||
4890 | /* | |
4891 | * Device counter names (not port-specific), one line per stat, | |
4892 | * single string. Used by utilities like ipathstats to print the stats | |
4893 | * in a way which works for different versions of drivers, without changing | |
4894 | * the utility. Names need to be 12 chars or less (w/o newline), for proper | |
4895 | * display by utility. | |
4896 | * Non-error counters are first. | |
4897 | * Start of "error" conters is indicated by a leading "E " on the first | |
4898 | * "error" counter, and doesn't count in label length. | |
4899 | * The EgrOvfl list needs to be last so we truncate them at the configured | |
4900 | * context count for the device. | |
4901 | * cntr7322indices contains the corresponding register indices. | |
4902 | */ | |
4903 | static const char cntr7322names[] = | |
4904 | "Interrupts\n" | |
4905 | "HostBusStall\n" | |
4906 | "E RxTIDFull\n" | |
4907 | "RxTIDInvalid\n" | |
4908 | "RxTIDFloDrop\n" /* 7322 only */ | |
4909 | "Ctxt0EgrOvfl\n" | |
4910 | "Ctxt1EgrOvfl\n" | |
4911 | "Ctxt2EgrOvfl\n" | |
4912 | "Ctxt3EgrOvfl\n" | |
4913 | "Ctxt4EgrOvfl\n" | |
4914 | "Ctxt5EgrOvfl\n" | |
4915 | "Ctxt6EgrOvfl\n" | |
4916 | "Ctxt7EgrOvfl\n" | |
4917 | "Ctxt8EgrOvfl\n" | |
4918 | "Ctxt9EgrOvfl\n" | |
4919 | "Ctx10EgrOvfl\n" | |
4920 | "Ctx11EgrOvfl\n" | |
4921 | "Ctx12EgrOvfl\n" | |
4922 | "Ctx13EgrOvfl\n" | |
4923 | "Ctx14EgrOvfl\n" | |
4924 | "Ctx15EgrOvfl\n" | |
4925 | "Ctx16EgrOvfl\n" | |
4926 | "Ctx17EgrOvfl\n" | |
4927 | ; | |
4928 | ||
4929 | static const u32 cntr7322indices[] = { | |
4930 | cr_lbint | _PORT_64BIT_FLAG, | |
4931 | cr_lbstall | _PORT_64BIT_FLAG, | |
4932 | cr_tidfull, | |
4933 | cr_tidinvalid, | |
4934 | cr_rxtidflowdrop, | |
4935 | cr_base_egrovfl + 0, | |
4936 | cr_base_egrovfl + 1, | |
4937 | cr_base_egrovfl + 2, | |
4938 | cr_base_egrovfl + 3, | |
4939 | cr_base_egrovfl + 4, | |
4940 | cr_base_egrovfl + 5, | |
4941 | cr_base_egrovfl + 6, | |
4942 | cr_base_egrovfl + 7, | |
4943 | cr_base_egrovfl + 8, | |
4944 | cr_base_egrovfl + 9, | |
4945 | cr_base_egrovfl + 10, | |
4946 | cr_base_egrovfl + 11, | |
4947 | cr_base_egrovfl + 12, | |
4948 | cr_base_egrovfl + 13, | |
4949 | cr_base_egrovfl + 14, | |
4950 | cr_base_egrovfl + 15, | |
4951 | cr_base_egrovfl + 16, | |
4952 | cr_base_egrovfl + 17, | |
4953 | }; | |
4954 | ||
4955 | /* | |
4956 | * same as cntr7322names and cntr7322indices, but for port-specific counters. | |
4957 | * portcntr7322indices is somewhat complicated by some registers needing | |
4958 | * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG | |
4959 | */ | |
4960 | static const char portcntr7322names[] = | |
4961 | "TxPkt\n" | |
4962 | "TxFlowPkt\n" | |
4963 | "TxWords\n" | |
4964 | "RxPkt\n" | |
4965 | "RxFlowPkt\n" | |
4966 | "RxWords\n" | |
4967 | "TxFlowStall\n" | |
4968 | "TxDmaDesc\n" /* 7220 and 7322-only */ | |
4969 | "E RxDlidFltr\n" /* 7220 and 7322-only */ | |
4970 | "IBStatusChng\n" | |
4971 | "IBLinkDown\n" | |
4972 | "IBLnkRecov\n" | |
4973 | "IBRxLinkErr\n" | |
4974 | "IBSymbolErr\n" | |
4975 | "RxLLIErr\n" | |
4976 | "RxBadFormat\n" | |
4977 | "RxBadLen\n" | |
4978 | "RxBufOvrfl\n" | |
4979 | "RxEBP\n" | |
4980 | "RxFlowCtlErr\n" | |
4981 | "RxICRCerr\n" | |
4982 | "RxLPCRCerr\n" | |
4983 | "RxVCRCerr\n" | |
4984 | "RxInvalLen\n" | |
4985 | "RxInvalPKey\n" | |
4986 | "RxPktDropped\n" | |
4987 | "TxBadLength\n" | |
4988 | "TxDropped\n" | |
4989 | "TxInvalLen\n" | |
4990 | "TxUnderrun\n" | |
4991 | "TxUnsupVL\n" | |
4992 | "RxLclPhyErr\n" /* 7220 and 7322-only from here down */ | |
4993 | "RxVL15Drop\n" | |
4994 | "RxVlErr\n" | |
4995 | "XcessBufOvfl\n" | |
4996 | "RxQPBadCtxt\n" /* 7322-only from here down */ | |
4997 | "TXBadHeader\n" | |
4998 | ; | |
4999 | ||
5000 | static const u32 portcntr7322indices[] = { | |
5001 | QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG, | |
5002 | crp_pktsendflow, | |
5003 | QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG, | |
5004 | QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG, | |
5005 | crp_pktrcvflowctrl, | |
5006 | QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG, | |
5007 | QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG, | |
5008 | crp_txsdmadesc | _PORT_64BIT_FLAG, | |
5009 | crp_rxdlidfltr, | |
5010 | crp_ibstatuschange, | |
5011 | QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG, | |
5012 | QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG, | |
5013 | QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG, | |
5014 | QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG, | |
5015 | QIBPORTCNTR_LLI | _PORT_VIRT_FLAG, | |
5016 | QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG, | |
5017 | QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG, | |
5018 | QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG, | |
5019 | QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG, | |
5020 | crp_rcvflowctrlviol, | |
5021 | QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG, | |
5022 | QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG, | |
5023 | QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG, | |
5024 | QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG, | |
5025 | QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG, | |
5026 | QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG, | |
5027 | crp_txminmaxlenerr, | |
5028 | crp_txdroppedpkt, | |
5029 | crp_txlenerr, | |
5030 | crp_txunderrun, | |
5031 | crp_txunsupvl, | |
5032 | QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG, | |
5033 | QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG, | |
5034 | QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG, | |
5035 | QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG, | |
5036 | crp_rxqpinvalidctxt, | |
5037 | crp_txhdrerr, | |
5038 | }; | |
5039 | ||
5040 | /* do all the setup to make the counter reads efficient later */ | |
5041 | static void init_7322_cntrnames(struct qib_devdata *dd) | |
5042 | { | |
5043 | int i, j = 0; | |
5044 | char *s; | |
5045 | ||
5046 | for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts; | |
5047 | i++) { | |
5048 | /* we always have at least one counter before the egrovfl */ | |
5049 | if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12)) | |
5050 | j = 1; | |
5051 | s = strchr(s + 1, '\n'); | |
5052 | if (s && j) | |
5053 | j++; | |
5054 | } | |
5055 | dd->cspec->ncntrs = i; | |
5056 | if (!s) | |
5057 | /* full list; size is without terminating null */ | |
5058 | dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1; | |
5059 | else | |
5060 | dd->cspec->cntrnamelen = 1 + s - cntr7322names; | |
5061 | dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs | |
5062 | * sizeof(u64), GFP_KERNEL); | |
5063 | if (!dd->cspec->cntrs) | |
5064 | qib_dev_err(dd, "Failed allocation for counters\n"); | |
5065 | ||
5066 | for (i = 0, s = (char *)portcntr7322names; s; i++) | |
5067 | s = strchr(s + 1, '\n'); | |
5068 | dd->cspec->nportcntrs = i - 1; | |
5069 | dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1; | |
5070 | for (i = 0; i < dd->num_pports; ++i) { | |
5071 | dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs | |
5072 | * sizeof(u64), GFP_KERNEL); | |
5073 | if (!dd->pport[i].cpspec->portcntrs) | |
7fac3301 MM |
5074 | qib_dev_err(dd, |
5075 | "Failed allocation for portcounters\n"); | |
f931551b RC |
5076 | } |
5077 | } | |
5078 | ||
5079 | static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep, | |
5080 | u64 **cntrp) | |
5081 | { | |
5082 | u32 ret; | |
5083 | ||
5084 | if (namep) { | |
5085 | ret = dd->cspec->cntrnamelen; | |
5086 | if (pos >= ret) | |
5087 | ret = 0; /* final read after getting everything */ | |
5088 | else | |
5089 | *namep = (char *) cntr7322names; | |
5090 | } else { | |
5091 | u64 *cntr = dd->cspec->cntrs; | |
5092 | int i; | |
5093 | ||
5094 | ret = dd->cspec->ncntrs * sizeof(u64); | |
5095 | if (!cntr || pos >= ret) { | |
5096 | /* everything read, or couldn't get memory */ | |
5097 | ret = 0; | |
5098 | goto done; | |
5099 | } | |
5100 | *cntrp = cntr; | |
5101 | for (i = 0; i < dd->cspec->ncntrs; i++) | |
5102 | if (cntr7322indices[i] & _PORT_64BIT_FLAG) | |
5103 | *cntr++ = read_7322_creg(dd, | |
5104 | cntr7322indices[i] & | |
5105 | _PORT_CNTR_IDXMASK); | |
5106 | else | |
5107 | *cntr++ = read_7322_creg32(dd, | |
5108 | cntr7322indices[i]); | |
5109 | } | |
5110 | done: | |
5111 | return ret; | |
5112 | } | |
5113 | ||
5114 | static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port, | |
5115 | char **namep, u64 **cntrp) | |
5116 | { | |
5117 | u32 ret; | |
5118 | ||
5119 | if (namep) { | |
5120 | ret = dd->cspec->portcntrnamelen; | |
5121 | if (pos >= ret) | |
5122 | ret = 0; /* final read after getting everything */ | |
5123 | else | |
5124 | *namep = (char *)portcntr7322names; | |
5125 | } else { | |
5126 | struct qib_pportdata *ppd = &dd->pport[port]; | |
5127 | u64 *cntr = ppd->cpspec->portcntrs; | |
5128 | int i; | |
5129 | ||
5130 | ret = dd->cspec->nportcntrs * sizeof(u64); | |
5131 | if (!cntr || pos >= ret) { | |
5132 | /* everything read, or couldn't get memory */ | |
5133 | ret = 0; | |
5134 | goto done; | |
5135 | } | |
5136 | *cntrp = cntr; | |
5137 | for (i = 0; i < dd->cspec->nportcntrs; i++) { | |
5138 | if (portcntr7322indices[i] & _PORT_VIRT_FLAG) | |
5139 | *cntr++ = qib_portcntr_7322(ppd, | |
5140 | portcntr7322indices[i] & | |
5141 | _PORT_CNTR_IDXMASK); | |
5142 | else if (portcntr7322indices[i] & _PORT_64BIT_FLAG) | |
5143 | *cntr++ = read_7322_creg_port(ppd, | |
5144 | portcntr7322indices[i] & | |
5145 | _PORT_CNTR_IDXMASK); | |
5146 | else | |
5147 | *cntr++ = read_7322_creg32_port(ppd, | |
5148 | portcntr7322indices[i]); | |
5149 | } | |
5150 | } | |
5151 | done: | |
5152 | return ret; | |
5153 | } | |
5154 | ||
5155 | /** | |
5156 | * qib_get_7322_faststats - get word counters from chip before they overflow | |
5157 | * @opaque - contains a pointer to the qlogic_ib device qib_devdata | |
5158 | * | |
5159 | * VESTIGIAL IBA7322 has no "small fast counters", so the only | |
5160 | * real purpose of this function is to maintain the notion of | |
5161 | * "active time", which in turn is only logged into the eeprom, | |
5162 | * which we don;t have, yet, for 7322-based boards. | |
5163 | * | |
5164 | * called from add_timer | |
5165 | */ | |
5166 | static void qib_get_7322_faststats(unsigned long opaque) | |
5167 | { | |
5168 | struct qib_devdata *dd = (struct qib_devdata *) opaque; | |
5169 | struct qib_pportdata *ppd; | |
5170 | unsigned long flags; | |
5171 | u64 traffic_wds; | |
5172 | int pidx; | |
5173 | ||
5174 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
5175 | ppd = dd->pport + pidx; | |
5176 | ||
5177 | /* | |
5178 | * If port isn't enabled or not operational ports, or | |
5179 | * diags is running (can cause memory diags to fail) | |
5180 | * skip this port this time. | |
5181 | */ | |
5182 | if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED) | |
5183 | || dd->diag_client) | |
5184 | continue; | |
5185 | ||
5186 | /* | |
5187 | * Maintain an activity timer, based on traffic | |
5188 | * exceeding a threshold, so we need to check the word-counts | |
5189 | * even if they are 64-bit. | |
5190 | */ | |
5191 | traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) + | |
5192 | qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND); | |
5193 | spin_lock_irqsave(&ppd->dd->eep_st_lock, flags); | |
5194 | traffic_wds -= ppd->dd->traffic_wds; | |
5195 | ppd->dd->traffic_wds += traffic_wds; | |
f931551b RC |
5196 | spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags); |
5197 | if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active & | |
5198 | QIB_IB_QDR) && | |
5199 | (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | | |
5200 | QIBL_LINKACTIVE)) && | |
5201 | ppd->cpspec->qdr_dfe_time && | |
8482d5d1 | 5202 | time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) { |
f931551b RC |
5203 | ppd->cpspec->qdr_dfe_on = 0; |
5204 | ||
5205 | qib_write_kreg_port(ppd, krp_static_adapt_dis(2), | |
5206 | ppd->dd->cspec->r1 ? | |
5207 | QDR_STATIC_ADAPT_INIT_R1 : | |
5208 | QDR_STATIC_ADAPT_INIT); | |
5209 | force_h1(ppd); | |
5210 | } | |
5211 | } | |
5212 | mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); | |
5213 | } | |
5214 | ||
5215 | /* | |
5216 | * If we were using MSIx, try to fallback to INTx. | |
5217 | */ | |
5218 | static int qib_7322_intr_fallback(struct qib_devdata *dd) | |
5219 | { | |
5220 | if (!dd->cspec->num_msix_entries) | |
5221 | return 0; /* already using INTx */ | |
5222 | ||
7fac3301 MM |
5223 | qib_devinfo(dd->pcidev, |
5224 | "MSIx interrupt not detected, trying INTx interrupts\n"); | |
f931551b RC |
5225 | qib_7322_nomsix(dd); |
5226 | qib_enable_intx(dd->pcidev); | |
5227 | qib_setup_7322_interrupt(dd, 0); | |
5228 | return 1; | |
5229 | } | |
5230 | ||
5231 | /* | |
5232 | * Reset the XGXS (between serdes and IBC). Slightly less intrusive | |
5233 | * than resetting the IBC or external link state, and useful in some | |
5234 | * cases to cause some retraining. To do this right, we reset IBC | |
5235 | * as well, then return to previous state (which may be still in reset) | |
5236 | * NOTE: some callers of this "know" this writes the current value | |
5237 | * of cpspec->ibcctrl_a as part of it's operation, so if that changes, | |
5238 | * check all callers. | |
5239 | */ | |
5240 | static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd) | |
5241 | { | |
5242 | u64 val; | |
5243 | struct qib_devdata *dd = ppd->dd; | |
5244 | const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) | | |
5245 | SYM_MASK(IBPCSConfig_0, xcv_treset) | | |
5246 | SYM_MASK(IBPCSConfig_0, tx_rx_reset); | |
5247 | ||
5248 | val = qib_read_kreg_port(ppd, krp_ib_pcsconfig); | |
b9e03e04 RC |
5249 | qib_write_kreg(dd, kr_hwerrmask, |
5250 | dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop)); | |
f931551b RC |
5251 | qib_write_kreg_port(ppd, krp_ibcctrl_a, |
5252 | ppd->cpspec->ibcctrl_a & | |
5253 | ~SYM_MASK(IBCCtrlA_0, IBLinkEn)); | |
5254 | ||
5255 | qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits); | |
5256 | qib_read_kreg32(dd, kr_scratch); | |
5257 | qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits); | |
5258 | qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); | |
5259 | qib_write_kreg(dd, kr_scratch, 0ULL); | |
b9e03e04 RC |
5260 | qib_write_kreg(dd, kr_hwerrclear, |
5261 | SYM_MASK(HwErrClear, statusValidNoEopClear)); | |
5262 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); | |
f931551b RC |
5263 | } |
5264 | ||
5265 | /* | |
5266 | * This code for non-IBTA-compliant IB speed negotiation is only known to | |
5267 | * work for the SDR to DDR transition, and only between an HCA and a switch | |
5268 | * with recent firmware. It is based on observed heuristics, rather than | |
5269 | * actual knowledge of the non-compliant speed negotiation. | |
5270 | * It has a number of hard-coded fields, since the hope is to rewrite this | |
5271 | * when a spec is available on how the negoation is intended to work. | |
5272 | */ | |
5273 | static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr, | |
5274 | u32 dcnt, u32 *data) | |
5275 | { | |
5276 | int i; | |
5277 | u64 pbc; | |
5278 | u32 __iomem *piobuf; | |
5279 | u32 pnum, control, len; | |
5280 | struct qib_devdata *dd = ppd->dd; | |
5281 | ||
5282 | i = 0; | |
5283 | len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */ | |
5284 | control = qib_7322_setpbc_control(ppd, len, 0, 15); | |
5285 | pbc = ((u64) control << 32) | len; | |
5286 | while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) { | |
5287 | if (i++ > 15) | |
5288 | return; | |
5289 | udelay(2); | |
5290 | } | |
5291 | /* disable header check on this packet, since it can't be valid */ | |
5292 | dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL); | |
5293 | writeq(pbc, piobuf); | |
5294 | qib_flush_wc(); | |
5295 | qib_pio_copy(piobuf + 2, hdr, 7); | |
5296 | qib_pio_copy(piobuf + 9, data, dcnt); | |
5297 | if (dd->flags & QIB_USE_SPCL_TRIG) { | |
5298 | u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023; | |
5299 | ||
5300 | qib_flush_wc(); | |
5301 | __raw_writel(0xaebecede, piobuf + spcl_off); | |
5302 | } | |
5303 | qib_flush_wc(); | |
5304 | qib_sendbuf_done(dd, pnum); | |
5305 | /* and re-enable hdr check */ | |
5306 | dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL); | |
5307 | } | |
5308 | ||
5309 | /* | |
5310 | * _start packet gets sent twice at start, _done gets sent twice at end | |
5311 | */ | |
5312 | static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which) | |
5313 | { | |
5314 | struct qib_devdata *dd = ppd->dd; | |
5315 | static u32 swapped; | |
5316 | u32 dw, i, hcnt, dcnt, *data; | |
5317 | static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba }; | |
5318 | static u32 madpayload_start[0x40] = { | |
5319 | 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0, | |
5320 | 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, | |
5321 | 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */ | |
5322 | }; | |
5323 | static u32 madpayload_done[0x40] = { | |
5324 | 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0, | |
5325 | 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, | |
5326 | 0x40000001, 0x1388, 0x15e, /* rest 0's */ | |
5327 | }; | |
5328 | ||
5329 | dcnt = ARRAY_SIZE(madpayload_start); | |
5330 | hcnt = ARRAY_SIZE(hdr); | |
5331 | if (!swapped) { | |
5332 | /* for maintainability, do it at runtime */ | |
5333 | for (i = 0; i < hcnt; i++) { | |
5334 | dw = (__force u32) cpu_to_be32(hdr[i]); | |
5335 | hdr[i] = dw; | |
5336 | } | |
5337 | for (i = 0; i < dcnt; i++) { | |
5338 | dw = (__force u32) cpu_to_be32(madpayload_start[i]); | |
5339 | madpayload_start[i] = dw; | |
5340 | dw = (__force u32) cpu_to_be32(madpayload_done[i]); | |
5341 | madpayload_done[i] = dw; | |
5342 | } | |
5343 | swapped = 1; | |
5344 | } | |
5345 | ||
5346 | data = which ? madpayload_done : madpayload_start; | |
5347 | ||
5348 | autoneg_7322_sendpkt(ppd, hdr, dcnt, data); | |
5349 | qib_read_kreg64(dd, kr_scratch); | |
5350 | udelay(2); | |
5351 | autoneg_7322_sendpkt(ppd, hdr, dcnt, data); | |
5352 | qib_read_kreg64(dd, kr_scratch); | |
5353 | udelay(2); | |
5354 | } | |
5355 | ||
5356 | /* | |
5357 | * Do the absolute minimum to cause an IB speed change, and make it | |
5358 | * ready, but don't actually trigger the change. The caller will | |
5359 | * do that when ready (if link is in Polling training state, it will | |
5360 | * happen immediately, otherwise when link next goes down) | |
5361 | * | |
5362 | * This routine should only be used as part of the DDR autonegotation | |
5363 | * code for devices that are not compliant with IB 1.2 (or code that | |
5364 | * fixes things up for same). | |
5365 | * | |
5366 | * When link has gone down, and autoneg enabled, or autoneg has | |
5367 | * failed and we give up until next time we set both speeds, and | |
5368 | * then we want IBTA enabled as well as "use max enabled speed. | |
5369 | */ | |
5370 | static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed) | |
5371 | { | |
5372 | u64 newctrlb; | |
da12c1f6 | 5373 | |
f931551b RC |
5374 | newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK | |
5375 | IBA7322_IBC_IBTA_1_2_MASK | | |
5376 | IBA7322_IBC_MAX_SPEED_MASK); | |
5377 | ||
5378 | if (speed & (speed - 1)) /* multiple speeds */ | |
5379 | newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) | | |
5380 | IBA7322_IBC_IBTA_1_2_MASK | | |
5381 | IBA7322_IBC_MAX_SPEED_MASK; | |
5382 | else | |
5383 | newctrlb |= speed == QIB_IB_QDR ? | |
5384 | IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK : | |
5385 | ((speed == QIB_IB_DDR ? | |
5386 | IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR)); | |
5387 | ||
5388 | if (newctrlb == ppd->cpspec->ibcctrl_b) | |
5389 | return; | |
5390 | ||
5391 | ppd->cpspec->ibcctrl_b = newctrlb; | |
5392 | qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b); | |
5393 | qib_write_kreg(ppd->dd, kr_scratch, 0); | |
5394 | } | |
5395 | ||
5396 | /* | |
5397 | * This routine is only used when we are not talking to another | |
5398 | * IB 1.2-compliant device that we think can do DDR. | |
5399 | * (This includes all existing switch chips as of Oct 2007.) | |
5400 | * 1.2-compliant devices go directly to DDR prior to reaching INIT | |
5401 | */ | |
5402 | static void try_7322_autoneg(struct qib_pportdata *ppd) | |
5403 | { | |
5404 | unsigned long flags; | |
5405 | ||
5406 | spin_lock_irqsave(&ppd->lflags_lock, flags); | |
5407 | ppd->lflags |= QIBL_IB_AUTONEG_INPROG; | |
5408 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | |
5409 | qib_autoneg_7322_send(ppd, 0); | |
5410 | set_7322_ibspeed_fast(ppd, QIB_IB_DDR); | |
5411 | qib_7322_mini_pcs_reset(ppd); | |
5412 | /* 2 msec is minimum length of a poll cycle */ | |
f0626710 TH |
5413 | queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work, |
5414 | msecs_to_jiffies(2)); | |
f931551b RC |
5415 | } |
5416 | ||
5417 | /* | |
5418 | * Handle the empirically determined mechanism for auto-negotiation | |
5419 | * of DDR speed with switches. | |
5420 | */ | |
5421 | static void autoneg_7322_work(struct work_struct *work) | |
5422 | { | |
5423 | struct qib_pportdata *ppd; | |
5424 | struct qib_devdata *dd; | |
5425 | u64 startms; | |
5426 | u32 i; | |
5427 | unsigned long flags; | |
5428 | ||
5429 | ppd = container_of(work, struct qib_chippport_specific, | |
5430 | autoneg_work.work)->ppd; | |
5431 | dd = ppd->dd; | |
5432 | ||
5433 | startms = jiffies_to_msecs(jiffies); | |
5434 | ||
5435 | /* | |
5436 | * Busy wait for this first part, it should be at most a | |
5437 | * few hundred usec, since we scheduled ourselves for 2msec. | |
5438 | */ | |
5439 | for (i = 0; i < 25; i++) { | |
5440 | if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState) | |
5441 | == IB_7322_LT_STATE_POLLQUIET) { | |
5442 | qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE); | |
5443 | break; | |
5444 | } | |
5445 | udelay(100); | |
5446 | } | |
5447 | ||
5448 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) | |
5449 | goto done; /* we got there early or told to stop */ | |
5450 | ||
5451 | /* we expect this to timeout */ | |
5452 | if (wait_event_timeout(ppd->cpspec->autoneg_wait, | |
5453 | !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), | |
5454 | msecs_to_jiffies(90))) | |
5455 | goto done; | |
5456 | qib_7322_mini_pcs_reset(ppd); | |
5457 | ||
5458 | /* we expect this to timeout */ | |
5459 | if (wait_event_timeout(ppd->cpspec->autoneg_wait, | |
5460 | !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), | |
5461 | msecs_to_jiffies(1700))) | |
5462 | goto done; | |
5463 | qib_7322_mini_pcs_reset(ppd); | |
5464 | ||
5465 | set_7322_ibspeed_fast(ppd, QIB_IB_SDR); | |
5466 | ||
5467 | /* | |
5468 | * Wait up to 250 msec for link to train and get to INIT at DDR; | |
5469 | * this should terminate early. | |
5470 | */ | |
5471 | wait_event_timeout(ppd->cpspec->autoneg_wait, | |
5472 | !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), | |
5473 | msecs_to_jiffies(250)); | |
5474 | done: | |
5475 | if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) { | |
5476 | spin_lock_irqsave(&ppd->lflags_lock, flags); | |
5477 | ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; | |
5478 | if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) { | |
5479 | ppd->lflags |= QIBL_IB_AUTONEG_FAILED; | |
5480 | ppd->cpspec->autoneg_tries = 0; | |
5481 | } | |
5482 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | |
5483 | set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); | |
5484 | } | |
5485 | } | |
5486 | ||
5487 | /* | |
5488 | * This routine is used to request IPG set in the QLogic switch. | |
5489 | * Only called if r1. | |
5490 | */ | |
5491 | static void try_7322_ipg(struct qib_pportdata *ppd) | |
5492 | { | |
5493 | struct qib_ibport *ibp = &ppd->ibport_data; | |
5494 | struct ib_mad_send_buf *send_buf; | |
5495 | struct ib_mad_agent *agent; | |
5496 | struct ib_smp *smp; | |
5497 | unsigned delay; | |
5498 | int ret; | |
5499 | ||
f24a6d48 | 5500 | agent = ibp->rvp.send_agent; |
f931551b RC |
5501 | if (!agent) |
5502 | goto retry; | |
5503 | ||
5504 | send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR, | |
da2dfaa3 IW |
5505 | IB_MGMT_MAD_DATA, GFP_ATOMIC, |
5506 | IB_MGMT_BASE_VERSION); | |
f931551b RC |
5507 | if (IS_ERR(send_buf)) |
5508 | goto retry; | |
5509 | ||
5510 | if (!ibp->smi_ah) { | |
f931551b RC |
5511 | struct ib_ah *ah; |
5512 | ||
1fb9fed6 | 5513 | ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE)); |
f931551b | 5514 | if (IS_ERR(ah)) |
1fb9fed6 | 5515 | ret = PTR_ERR(ah); |
f931551b RC |
5516 | else { |
5517 | send_buf->ah = ah; | |
96ab1ac1 | 5518 | ibp->smi_ah = ibah_to_rvtah(ah); |
f931551b RC |
5519 | ret = 0; |
5520 | } | |
5521 | } else { | |
5522 | send_buf->ah = &ibp->smi_ah->ibah; | |
5523 | ret = 0; | |
5524 | } | |
5525 | ||
5526 | smp = send_buf->mad; | |
5527 | smp->base_version = IB_MGMT_BASE_VERSION; | |
5528 | smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE; | |
5529 | smp->class_version = 1; | |
5530 | smp->method = IB_MGMT_METHOD_SEND; | |
5531 | smp->hop_cnt = 1; | |
5532 | smp->attr_id = QIB_VENDOR_IPG; | |
5533 | smp->attr_mod = 0; | |
5534 | ||
5535 | if (!ret) | |
5536 | ret = ib_post_send_mad(send_buf, NULL); | |
5537 | if (ret) | |
5538 | ib_free_send_mad(send_buf); | |
5539 | retry: | |
5540 | delay = 2 << ppd->cpspec->ipg_tries; | |
f0626710 TH |
5541 | queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work, |
5542 | msecs_to_jiffies(delay)); | |
f931551b RC |
5543 | } |
5544 | ||
5545 | /* | |
5546 | * Timeout handler for setting IPG. | |
5547 | * Only called if r1. | |
5548 | */ | |
5549 | static void ipg_7322_work(struct work_struct *work) | |
5550 | { | |
5551 | struct qib_pportdata *ppd; | |
5552 | ||
5553 | ppd = container_of(work, struct qib_chippport_specific, | |
5554 | ipg_work.work)->ppd; | |
5555 | if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE)) | |
5556 | && ++ppd->cpspec->ipg_tries <= 10) | |
5557 | try_7322_ipg(ppd); | |
5558 | } | |
5559 | ||
5560 | static u32 qib_7322_iblink_state(u64 ibcs) | |
5561 | { | |
5562 | u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState); | |
5563 | ||
5564 | switch (state) { | |
5565 | case IB_7322_L_STATE_INIT: | |
5566 | state = IB_PORT_INIT; | |
5567 | break; | |
5568 | case IB_7322_L_STATE_ARM: | |
5569 | state = IB_PORT_ARMED; | |
5570 | break; | |
5571 | case IB_7322_L_STATE_ACTIVE: | |
5572 | /* fall through */ | |
5573 | case IB_7322_L_STATE_ACT_DEFER: | |
5574 | state = IB_PORT_ACTIVE; | |
5575 | break; | |
5576 | default: /* fall through */ | |
5577 | case IB_7322_L_STATE_DOWN: | |
5578 | state = IB_PORT_DOWN; | |
5579 | break; | |
5580 | } | |
5581 | return state; | |
5582 | } | |
5583 | ||
5584 | /* returns the IBTA port state, rather than the IBC link training state */ | |
5585 | static u8 qib_7322_phys_portstate(u64 ibcs) | |
5586 | { | |
5587 | u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState); | |
5588 | return qib_7322_physportstate[state]; | |
5589 | } | |
5590 | ||
5591 | static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) | |
5592 | { | |
5593 | int ret = 0, symadj = 0; | |
5594 | unsigned long flags; | |
5595 | int mult; | |
5596 | ||
5597 | spin_lock_irqsave(&ppd->lflags_lock, flags); | |
5598 | ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY; | |
5599 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | |
5600 | ||
5601 | /* Update our picture of width and speed from chip */ | |
5602 | if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) { | |
5603 | ppd->link_speed_active = QIB_IB_QDR; | |
5604 | mult = 4; | |
5605 | } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) { | |
5606 | ppd->link_speed_active = QIB_IB_DDR; | |
5607 | mult = 2; | |
5608 | } else { | |
5609 | ppd->link_speed_active = QIB_IB_SDR; | |
5610 | mult = 1; | |
5611 | } | |
5612 | if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) { | |
5613 | ppd->link_width_active = IB_WIDTH_4X; | |
5614 | mult *= 4; | |
5615 | } else | |
5616 | ppd->link_width_active = IB_WIDTH_1X; | |
5617 | ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)]; | |
5618 | ||
5619 | if (!ibup) { | |
5620 | u64 clr; | |
5621 | ||
5622 | /* Link went down. */ | |
5623 | /* do IPG MAD again after linkdown, even if last time failed */ | |
5624 | ppd->cpspec->ipg_tries = 0; | |
5625 | clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) & | |
5626 | (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) | | |
5627 | SYM_MASK(IBCStatusB_0, heartbeat_crosstalk)); | |
5628 | if (clr) | |
5629 | qib_write_kreg_port(ppd, krp_ibcstatus_b, clr); | |
5630 | if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED | | |
5631 | QIBL_IB_AUTONEG_INPROG))) | |
5632 | set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); | |
5633 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { | |
dde05cbd MH |
5634 | struct qib_qsfp_data *qd = |
5635 | &ppd->cpspec->qsfp_data; | |
a77fcf89 RC |
5636 | /* unlock the Tx settings, speed may change */ |
5637 | qib_write_kreg_port(ppd, krp_tx_deemph_override, | |
5638 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
5639 | reset_tx_deemphasis_override)); | |
f931551b | 5640 | qib_cancel_sends(ppd); |
a77fcf89 RC |
5641 | /* on link down, ensure sane pcs state */ |
5642 | qib_7322_mini_pcs_reset(ppd); | |
dde05cbd MH |
5643 | /* schedule the qsfp refresh which should turn the link |
5644 | off */ | |
5645 | if (ppd->dd->flags & QIB_HAS_QSFP) { | |
8482d5d1 | 5646 | qd->t_insert = jiffies; |
042f36e1 | 5647 | queue_work(ib_wq, &qd->work); |
dde05cbd | 5648 | } |
f931551b RC |
5649 | spin_lock_irqsave(&ppd->sdma_lock, flags); |
5650 | if (__qib_sdma_running(ppd)) | |
5651 | __qib_sdma_process_event(ppd, | |
5652 | qib_sdma_event_e70_go_idle); | |
5653 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | |
5654 | } | |
5655 | clr = read_7322_creg32_port(ppd, crp_iblinkdown); | |
5656 | if (clr == ppd->cpspec->iblnkdownsnap) | |
5657 | ppd->cpspec->iblnkdowndelta++; | |
5658 | } else { | |
5659 | if (qib_compat_ddr_negotiate && | |
5660 | !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED | | |
5661 | QIBL_IB_AUTONEG_INPROG)) && | |
5662 | ppd->link_speed_active == QIB_IB_SDR && | |
5663 | (ppd->link_speed_enabled & QIB_IB_DDR) | |
5664 | && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) { | |
5665 | /* we are SDR, and auto-negotiation enabled */ | |
5666 | ++ppd->cpspec->autoneg_tries; | |
5667 | if (!ppd->cpspec->ibdeltainprog) { | |
5668 | ppd->cpspec->ibdeltainprog = 1; | |
5669 | ppd->cpspec->ibsymdelta += | |
5670 | read_7322_creg32_port(ppd, | |
5671 | crp_ibsymbolerr) - | |
5672 | ppd->cpspec->ibsymsnap; | |
5673 | ppd->cpspec->iblnkerrdelta += | |
5674 | read_7322_creg32_port(ppd, | |
5675 | crp_iblinkerrrecov) - | |
5676 | ppd->cpspec->iblnkerrsnap; | |
5677 | } | |
5678 | try_7322_autoneg(ppd); | |
5679 | ret = 1; /* no other IB status change processing */ | |
5680 | } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) && | |
5681 | ppd->link_speed_active == QIB_IB_SDR) { | |
5682 | qib_autoneg_7322_send(ppd, 1); | |
5683 | set_7322_ibspeed_fast(ppd, QIB_IB_DDR); | |
5684 | qib_7322_mini_pcs_reset(ppd); | |
5685 | udelay(2); | |
5686 | ret = 1; /* no other IB status change processing */ | |
5687 | } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) && | |
5688 | (ppd->link_speed_active & QIB_IB_DDR)) { | |
5689 | spin_lock_irqsave(&ppd->lflags_lock, flags); | |
5690 | ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG | | |
5691 | QIBL_IB_AUTONEG_FAILED); | |
5692 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | |
5693 | ppd->cpspec->autoneg_tries = 0; | |
5694 | /* re-enable SDR, for next link down */ | |
5695 | set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); | |
5696 | wake_up(&ppd->cpspec->autoneg_wait); | |
5697 | symadj = 1; | |
5698 | } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) { | |
5699 | /* | |
5700 | * Clear autoneg failure flag, and do setup | |
5701 | * so we'll try next time link goes down and | |
5702 | * back to INIT (possibly connected to a | |
5703 | * different device). | |
5704 | */ | |
5705 | spin_lock_irqsave(&ppd->lflags_lock, flags); | |
5706 | ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; | |
5707 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | |
5708 | ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK; | |
5709 | symadj = 1; | |
5710 | } | |
5711 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { | |
5712 | symadj = 1; | |
5713 | if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10) | |
5714 | try_7322_ipg(ppd); | |
5715 | if (!ppd->cpspec->recovery_init) | |
5716 | setup_7322_link_recovery(ppd, 0); | |
5717 | ppd->cpspec->qdr_dfe_time = jiffies + | |
5718 | msecs_to_jiffies(QDR_DFE_DISABLE_DELAY); | |
5719 | } | |
5720 | ppd->cpspec->ibmalfusesnap = 0; | |
5721 | ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd, | |
5722 | crp_errlink); | |
5723 | } | |
5724 | if (symadj) { | |
5725 | ppd->cpspec->iblnkdownsnap = | |
5726 | read_7322_creg32_port(ppd, crp_iblinkdown); | |
5727 | if (ppd->cpspec->ibdeltainprog) { | |
5728 | ppd->cpspec->ibdeltainprog = 0; | |
5729 | ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd, | |
5730 | crp_ibsymbolerr) - ppd->cpspec->ibsymsnap; | |
5731 | ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd, | |
5732 | crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap; | |
5733 | } | |
5734 | } else if (!ibup && qib_compat_ddr_negotiate && | |
5735 | !ppd->cpspec->ibdeltainprog && | |
5736 | !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { | |
5737 | ppd->cpspec->ibdeltainprog = 1; | |
5738 | ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd, | |
5739 | crp_ibsymbolerr); | |
5740 | ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd, | |
5741 | crp_iblinkerrrecov); | |
5742 | } | |
5743 | ||
5744 | if (!ret) | |
5745 | qib_setup_7322_setextled(ppd, ibup); | |
5746 | return ret; | |
5747 | } | |
5748 | ||
5749 | /* | |
5750 | * Does read/modify/write to appropriate registers to | |
5751 | * set output and direction bits selected by mask. | |
5752 | * these are in their canonical postions (e.g. lsb of | |
5753 | * dir will end up in D48 of extctrl on existing chips). | |
5754 | * returns contents of GP Inputs. | |
5755 | */ | |
5756 | static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask) | |
5757 | { | |
5758 | u64 read_val, new_out; | |
5759 | unsigned long flags; | |
5760 | ||
5761 | if (mask) { | |
5762 | /* some bits being written, lock access to GPIO */ | |
5763 | dir &= mask; | |
5764 | out &= mask; | |
5765 | spin_lock_irqsave(&dd->cspec->gpio_lock, flags); | |
5766 | dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe)); | |
5767 | dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe)); | |
5768 | new_out = (dd->cspec->gpio_out & ~mask) | out; | |
5769 | ||
5770 | qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); | |
5771 | qib_write_kreg(dd, kr_gpio_out, new_out); | |
5772 | dd->cspec->gpio_out = new_out; | |
5773 | spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); | |
5774 | } | |
5775 | /* | |
5776 | * It is unlikely that a read at this time would get valid | |
5777 | * data on a pin whose direction line was set in the same | |
5778 | * call to this function. We include the read here because | |
5779 | * that allows us to potentially combine a change on one pin with | |
5780 | * a read on another, and because the old code did something like | |
5781 | * this. | |
5782 | */ | |
5783 | read_val = qib_read_kreg64(dd, kr_extstatus); | |
5784 | return SYM_FIELD(read_val, EXTStatus, GPIOIn); | |
5785 | } | |
5786 | ||
5787 | /* Enable writes to config EEPROM, if possible. Returns previous state */ | |
5788 | static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen) | |
5789 | { | |
5790 | int prev_wen; | |
5791 | u32 mask; | |
5792 | ||
5793 | mask = 1 << QIB_EEPROM_WEN_NUM; | |
5794 | prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM; | |
5795 | gpio_7322_mod(dd, wen ? 0 : mask, mask, mask); | |
5796 | ||
5797 | return prev_wen & 1; | |
5798 | } | |
5799 | ||
5800 | /* | |
5801 | * Read fundamental info we need to use the chip. These are | |
5802 | * the registers that describe chip capabilities, and are | |
5803 | * saved in shadow registers. | |
5804 | */ | |
5805 | static void get_7322_chip_params(struct qib_devdata *dd) | |
5806 | { | |
5807 | u64 val; | |
5808 | u32 piobufs; | |
5809 | int mtu; | |
5810 | ||
5811 | dd->palign = qib_read_kreg32(dd, kr_pagealign); | |
5812 | ||
5813 | dd->uregbase = qib_read_kreg32(dd, kr_userregbase); | |
5814 | ||
5815 | dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt); | |
5816 | dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase); | |
5817 | dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase); | |
5818 | dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase); | |
5819 | dd->pio2k_bufbase = dd->piobufbase & 0xffffffff; | |
5820 | ||
5821 | val = qib_read_kreg64(dd, kr_sendpiobufcnt); | |
5822 | dd->piobcnt2k = val & ~0U; | |
5823 | dd->piobcnt4k = val >> 32; | |
5824 | val = qib_read_kreg64(dd, kr_sendpiosize); | |
5825 | dd->piosize2k = val & ~0U; | |
5826 | dd->piosize4k = val >> 32; | |
5827 | ||
5828 | mtu = ib_mtu_enum_to_int(qib_ibmtu); | |
5829 | if (mtu == -1) | |
5830 | mtu = QIB_DEFAULT_MTU; | |
5831 | dd->pport[0].ibmtu = (u32)mtu; | |
5832 | dd->pport[1].ibmtu = (u32)mtu; | |
5833 | ||
5834 | /* these may be adjusted in init_chip_wc_pat() */ | |
5835 | dd->pio2kbase = (u32 __iomem *) | |
5836 | ((char __iomem *) dd->kregbase + dd->pio2k_bufbase); | |
5837 | dd->pio4kbase = (u32 __iomem *) | |
5838 | ((char __iomem *) dd->kregbase + | |
5839 | (dd->piobufbase >> 32)); | |
5840 | /* | |
5841 | * 4K buffers take 2 pages; we use roundup just to be | |
5842 | * paranoid; we calculate it once here, rather than on | |
5843 | * ever buf allocate | |
5844 | */ | |
5845 | dd->align4k = ALIGN(dd->piosize4k, dd->palign); | |
5846 | ||
5847 | piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS; | |
5848 | ||
5849 | dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) / | |
5850 | (sizeof(u64) * BITS_PER_BYTE / 2); | |
5851 | } | |
5852 | ||
5853 | /* | |
5854 | * The chip base addresses in cspec and cpspec have to be set | |
5855 | * after possible init_chip_wc_pat(), rather than in | |
5856 | * get_7322_chip_params(), so split out as separate function | |
5857 | */ | |
5858 | static void qib_7322_set_baseaddrs(struct qib_devdata *dd) | |
5859 | { | |
5860 | u32 cregbase; | |
da12c1f6 | 5861 | |
f931551b RC |
5862 | cregbase = qib_read_kreg32(dd, kr_counterregbase); |
5863 | ||
5864 | dd->cspec->cregbase = (u64 __iomem *)(cregbase + | |
5865 | (char __iomem *)dd->kregbase); | |
5866 | ||
5867 | dd->egrtidbase = (u64 __iomem *) | |
5868 | ((char __iomem *) dd->kregbase + dd->rcvegrbase); | |
5869 | ||
5870 | /* port registers are defined as relative to base of chip */ | |
5871 | dd->pport[0].cpspec->kpregbase = | |
5872 | (u64 __iomem *)((char __iomem *)dd->kregbase); | |
5873 | dd->pport[1].cpspec->kpregbase = | |
5874 | (u64 __iomem *)(dd->palign + | |
5875 | (char __iomem *)dd->kregbase); | |
5876 | dd->pport[0].cpspec->cpregbase = | |
5877 | (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0], | |
5878 | kr_counterregbase) + (char __iomem *)dd->kregbase); | |
5879 | dd->pport[1].cpspec->cpregbase = | |
5880 | (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1], | |
5881 | kr_counterregbase) + (char __iomem *)dd->kregbase); | |
5882 | } | |
5883 | ||
5884 | /* | |
5885 | * This is a fairly special-purpose observer, so we only support | |
5886 | * the port-specific parts of SendCtrl | |
5887 | */ | |
5888 | ||
5889 | #define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) | \ | |
5890 | SYM_MASK(SendCtrl_0, SDmaEnable) | \ | |
5891 | SYM_MASK(SendCtrl_0, SDmaIntEnable) | \ | |
5892 | SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \ | |
5893 | SYM_MASK(SendCtrl_0, SDmaHalt) | \ | |
5894 | SYM_MASK(SendCtrl_0, IBVLArbiterEn) | \ | |
5895 | SYM_MASK(SendCtrl_0, ForceCreditUpToDate)) | |
5896 | ||
5897 | static int sendctrl_hook(struct qib_devdata *dd, | |
5898 | const struct diag_observer *op, u32 offs, | |
5899 | u64 *data, u64 mask, int only_32) | |
5900 | { | |
5901 | unsigned long flags; | |
5902 | unsigned idx; | |
5903 | unsigned pidx; | |
5904 | struct qib_pportdata *ppd = NULL; | |
5905 | u64 local_data, all_bits; | |
5906 | ||
5907 | /* | |
5908 | * The fixed correspondence between Physical ports and pports is | |
5909 | * severed. We need to hunt for the ppd that corresponds | |
5910 | * to the offset we got. And we have to do that without admitting | |
5911 | * we know the stride, apparently. | |
5912 | */ | |
5913 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
5914 | u64 __iomem *psptr; | |
5915 | u32 psoffs; | |
5916 | ||
5917 | ppd = dd->pport + pidx; | |
5918 | if (!ppd->cpspec->kpregbase) | |
5919 | continue; | |
5920 | ||
5921 | psptr = ppd->cpspec->kpregbase + krp_sendctrl; | |
5922 | psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr); | |
5923 | if (psoffs == offs) | |
5924 | break; | |
5925 | } | |
5926 | ||
5927 | /* If pport is not being managed by driver, just avoid shadows. */ | |
5928 | if (pidx >= dd->num_pports) | |
5929 | ppd = NULL; | |
5930 | ||
5931 | /* In any case, "idx" is flat index in kreg space */ | |
5932 | idx = offs / sizeof(u64); | |
5933 | ||
5934 | all_bits = ~0ULL; | |
5935 | if (only_32) | |
5936 | all_bits >>= 32; | |
5937 | ||
5938 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | |
5939 | if (!ppd || (mask & all_bits) != all_bits) { | |
5940 | /* | |
5941 | * At least some mask bits are zero, so we need | |
5942 | * to read. The judgement call is whether from | |
5943 | * reg or shadow. First-cut: read reg, and complain | |
5944 | * if any bits which should be shadowed are different | |
5945 | * from their shadowed value. | |
5946 | */ | |
5947 | if (only_32) | |
5948 | local_data = (u64)qib_read_kreg32(dd, idx); | |
5949 | else | |
5950 | local_data = qib_read_kreg64(dd, idx); | |
5951 | *data = (local_data & ~mask) | (*data & mask); | |
5952 | } | |
5953 | if (mask) { | |
5954 | /* | |
5955 | * At least some mask bits are one, so we need | |
5956 | * to write, but only shadow some bits. | |
5957 | */ | |
5958 | u64 sval, tval; /* Shadowed, transient */ | |
5959 | ||
5960 | /* | |
5961 | * New shadow val is bits we don't want to touch, | |
5962 | * ORed with bits we do, that are intended for shadow. | |
5963 | */ | |
5964 | if (ppd) { | |
5965 | sval = ppd->p_sendctrl & ~mask; | |
5966 | sval |= *data & SENDCTRL_SHADOWED & mask; | |
5967 | ppd->p_sendctrl = sval; | |
5968 | } else | |
5969 | sval = *data & SENDCTRL_SHADOWED & mask; | |
5970 | tval = sval | (*data & ~SENDCTRL_SHADOWED & mask); | |
5971 | qib_write_kreg(dd, idx, tval); | |
5972 | qib_write_kreg(dd, kr_scratch, 0Ull); | |
5973 | } | |
5974 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | |
5975 | return only_32 ? 4 : 8; | |
5976 | } | |
5977 | ||
5978 | static const struct diag_observer sendctrl_0_observer = { | |
5979 | sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64), | |
5980 | KREG_IDX(SendCtrl_0) * sizeof(u64) | |
5981 | }; | |
5982 | ||
5983 | static const struct diag_observer sendctrl_1_observer = { | |
5984 | sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64), | |
5985 | KREG_IDX(SendCtrl_1) * sizeof(u64) | |
5986 | }; | |
5987 | ||
5988 | static ushort sdma_fetch_prio = 8; | |
5989 | module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO); | |
5990 | MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority"); | |
5991 | ||
5992 | /* Besides logging QSFP events, we set appropriate TxDDS values */ | |
5993 | static void init_txdds_table(struct qib_pportdata *ppd, int override); | |
5994 | ||
5995 | static void qsfp_7322_event(struct work_struct *work) | |
5996 | { | |
5997 | struct qib_qsfp_data *qd; | |
5998 | struct qib_pportdata *ppd; | |
8482d5d1 | 5999 | unsigned long pwrup; |
16d99812 | 6000 | unsigned long flags; |
f931551b RC |
6001 | int ret; |
6002 | u32 le2; | |
6003 | ||
6004 | qd = container_of(work, struct qib_qsfp_data, work); | |
6005 | ppd = qd->ppd; | |
dde05cbd MH |
6006 | pwrup = qd->t_insert + |
6007 | msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC); | |
f931551b | 6008 | |
dde05cbd MH |
6009 | /* Delay for 20 msecs to allow ModPrs resistor to setup */ |
6010 | mdelay(QSFP_MODPRS_LAG_MSEC); | |
6011 | ||
16d99812 MH |
6012 | if (!qib_qsfp_mod_present(ppd)) { |
6013 | ppd->cpspec->qsfp_data.modpresent = 0; | |
dde05cbd MH |
6014 | /* Set the physical link to disabled */ |
6015 | qib_set_ib_7322_lstate(ppd, 0, | |
6016 | QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); | |
16d99812 MH |
6017 | spin_lock_irqsave(&ppd->lflags_lock, flags); |
6018 | ppd->lflags &= ~QIBL_LINKV; | |
6019 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | |
6020 | } else { | |
dde05cbd MH |
6021 | /* |
6022 | * Some QSFP's not only do not respond until the full power-up | |
6023 | * time, but may behave badly if we try. So hold off responding | |
6024 | * to insertion. | |
6025 | */ | |
6026 | while (1) { | |
8482d5d1 | 6027 | if (time_is_before_jiffies(pwrup)) |
dde05cbd MH |
6028 | break; |
6029 | msleep(20); | |
6030 | } | |
6031 | ||
6032 | ret = qib_refresh_qsfp_cache(ppd, &qd->cache); | |
6033 | ||
6034 | /* | |
6035 | * Need to change LE2 back to defaults if we couldn't | |
6036 | * read the cable type (to handle cable swaps), so do this | |
6037 | * even on failure to read cable information. We don't | |
6038 | * get here for QME, so IS_QME check not needed here. | |
6039 | */ | |
6040 | if (!ret && !ppd->dd->cspec->r1) { | |
6041 | if (QSFP_IS_ACTIVE_FAR(qd->cache.tech)) | |
6042 | le2 = LE2_QME; | |
6043 | else if (qd->cache.atten[1] >= qib_long_atten && | |
6044 | QSFP_IS_CU(qd->cache.tech)) | |
6045 | le2 = LE2_5m; | |
6046 | else | |
6047 | le2 = LE2_DEFAULT; | |
6048 | } else | |
4634b794 | 6049 | le2 = LE2_DEFAULT; |
dde05cbd MH |
6050 | ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7)); |
6051 | /* | |
6052 | * We always change parameteters, since we can choose | |
6053 | * values for cables without eeproms, and the cable may have | |
6054 | * changed from a cable with full or partial eeprom content | |
6055 | * to one with partial or no content. | |
6056 | */ | |
6057 | init_txdds_table(ppd, 0); | |
6058 | /* The physical link is being re-enabled only when the | |
16d99812 MH |
6059 | * previous state was DISABLED and the VALID bit is not |
6060 | * set. This should only happen when the cable has been | |
6061 | * physically pulled. */ | |
6062 | if (!ppd->cpspec->qsfp_data.modpresent && | |
6063 | (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) { | |
6064 | ppd->cpspec->qsfp_data.modpresent = 1; | |
dde05cbd MH |
6065 | qib_set_ib_7322_lstate(ppd, 0, |
6066 | QLOGIC_IB_IBCC_LINKINITCMD_SLEEP); | |
16d99812 MH |
6067 | spin_lock_irqsave(&ppd->lflags_lock, flags); |
6068 | ppd->lflags |= QIBL_LINKV; | |
6069 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | |
6070 | } | |
dde05cbd | 6071 | } |
f931551b RC |
6072 | } |
6073 | ||
6074 | /* | |
6075 | * There is little we can do but complain to the user if QSFP | |
6076 | * initialization fails. | |
6077 | */ | |
6078 | static void qib_init_7322_qsfp(struct qib_pportdata *ppd) | |
6079 | { | |
6080 | unsigned long flags; | |
6081 | struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data; | |
6082 | struct qib_devdata *dd = ppd->dd; | |
6083 | u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N; | |
6084 | ||
6085 | mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx); | |
6086 | qd->ppd = ppd; | |
6087 | qib_qsfp_init(qd, qsfp_7322_event); | |
6088 | spin_lock_irqsave(&dd->cspec->gpio_lock, flags); | |
6089 | dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert)); | |
6090 | dd->cspec->gpio_mask |= mod_prs_bit; | |
6091 | qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); | |
6092 | qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); | |
6093 | spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); | |
6094 | } | |
6095 | ||
6096 | /* | |
a77fcf89 | 6097 | * called at device initialization time, and also if the txselect |
f931551b RC |
6098 | * module parameter is changed. This is used for cables that don't |
6099 | * have valid QSFP EEPROMs (not present, or attenuation is zero). | |
6100 | * We initialize to the default, then if there is a specific | |
a77fcf89 RC |
6101 | * unit,port match, we use that (and set it immediately, for the |
6102 | * current speed, if the link is at INIT or better). | |
f931551b | 6103 | * String format is "default# unit#,port#=# ... u,p=#", separators must |
a77fcf89 RC |
6104 | * be a SPACE character. A newline terminates. The u,p=# tuples may |
6105 | * optionally have "u,p=#,#", where the final # is the H1 value | |
f931551b RC |
6106 | * The last specific match is used (actually, all are used, but last |
6107 | * one is the one that winds up set); if none at all, fall back on default. | |
6108 | */ | |
6109 | static void set_no_qsfp_atten(struct qib_devdata *dd, int change) | |
6110 | { | |
6111 | char *nxt, *str; | |
a77fcf89 | 6112 | u32 pidx, unit, port, deflt, h1; |
f931551b | 6113 | unsigned long val; |
a77fcf89 | 6114 | int any = 0, seth1; |
e706203c | 6115 | int txdds_size; |
f931551b | 6116 | |
a77fcf89 | 6117 | str = txselect_list; |
f931551b | 6118 | |
a77fcf89 | 6119 | /* default number is validated in setup_txselect() */ |
f931551b RC |
6120 | deflt = simple_strtoul(str, &nxt, 0); |
6121 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | |
6122 | dd->pport[pidx].cpspec->no_eep = deflt; | |
6123 | ||
e706203c MM |
6124 | txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ; |
6125 | if (IS_QME(dd) || IS_QMH(dd)) | |
6126 | txdds_size += TXDDS_MFG_SZ; | |
6127 | ||
f931551b RC |
6128 | while (*nxt && nxt[1]) { |
6129 | str = ++nxt; | |
6130 | unit = simple_strtoul(str, &nxt, 0); | |
6131 | if (nxt == str || !*nxt || *nxt != ',') { | |
6132 | while (*nxt && *nxt++ != ' ') /* skip to next, if any */ | |
6133 | ; | |
6134 | continue; | |
6135 | } | |
6136 | str = ++nxt; | |
6137 | port = simple_strtoul(str, &nxt, 0); | |
6138 | if (nxt == str || *nxt != '=') { | |
6139 | while (*nxt && *nxt++ != ' ') /* skip to next, if any */ | |
6140 | ; | |
6141 | continue; | |
6142 | } | |
6143 | str = ++nxt; | |
6144 | val = simple_strtoul(str, &nxt, 0); | |
6145 | if (nxt == str) { | |
6146 | while (*nxt && *nxt++ != ' ') /* skip to next, if any */ | |
6147 | ; | |
6148 | continue; | |
6149 | } | |
e706203c | 6150 | if (val >= txdds_size) |
f931551b | 6151 | continue; |
a77fcf89 RC |
6152 | seth1 = 0; |
6153 | h1 = 0; /* gcc thinks it might be used uninitted */ | |
6154 | if (*nxt == ',' && nxt[1]) { | |
6155 | str = ++nxt; | |
6156 | h1 = (u32)simple_strtoul(str, &nxt, 0); | |
6157 | if (nxt == str) | |
6158 | while (*nxt && *nxt++ != ' ') /* skip */ | |
6159 | ; | |
6160 | else | |
6161 | seth1 = 1; | |
6162 | } | |
f931551b RC |
6163 | for (pidx = 0; dd->unit == unit && pidx < dd->num_pports; |
6164 | ++pidx) { | |
a77fcf89 RC |
6165 | struct qib_pportdata *ppd = &dd->pport[pidx]; |
6166 | ||
6167 | if (ppd->port != port || !ppd->link_speed_supported) | |
f931551b | 6168 | continue; |
a77fcf89 | 6169 | ppd->cpspec->no_eep = val; |
7c7a416e RC |
6170 | if (seth1) |
6171 | ppd->cpspec->h1_val = h1; | |
f931551b | 6172 | /* now change the IBC and serdes, overriding generic */ |
a77fcf89 | 6173 | init_txdds_table(ppd, 1); |
d70585f7 | 6174 | /* Re-enable the physical state machine on mezz boards |
dde05cbd MH |
6175 | * now that the correct settings have been set. |
6176 | * QSFP boards are handles by the QSFP event handler */ | |
d70585f7 MH |
6177 | if (IS_QMH(dd) || IS_QME(dd)) |
6178 | qib_set_ib_7322_lstate(ppd, 0, | |
6179 | QLOGIC_IB_IBCC_LINKINITCMD_SLEEP); | |
f931551b RC |
6180 | any++; |
6181 | } | |
6182 | if (*nxt == '\n') | |
6183 | break; /* done */ | |
6184 | } | |
6185 | if (change && !any) { | |
6186 | /* no specific setting, use the default. | |
6187 | * Change the IBC and serdes, but since it's | |
6188 | * general, don't override specific settings. | |
6189 | */ | |
a77fcf89 RC |
6190 | for (pidx = 0; pidx < dd->num_pports; ++pidx) |
6191 | if (dd->pport[pidx].link_speed_supported) | |
6192 | init_txdds_table(&dd->pport[pidx], 0); | |
f931551b RC |
6193 | } |
6194 | } | |
6195 | ||
a77fcf89 RC |
6196 | /* handle the txselect parameter changing */ |
6197 | static int setup_txselect(const char *str, struct kernel_param *kp) | |
f931551b RC |
6198 | { |
6199 | struct qib_devdata *dd; | |
6200 | unsigned long val; | |
2fadd831 | 6201 | char *n; |
da12c1f6 | 6202 | |
f931551b | 6203 | if (strlen(str) >= MAX_ATTEN_LEN) { |
7fac3301 | 6204 | pr_info("txselect_values string too long\n"); |
f931551b RC |
6205 | return -ENOSPC; |
6206 | } | |
2fadd831 MM |
6207 | val = simple_strtoul(str, &n, 0); |
6208 | if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + | |
e706203c | 6209 | TXDDS_MFG_SZ)) { |
7fac3301 | 6210 | pr_info("txselect_values must start with a number < %d\n", |
e706203c | 6211 | TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ); |
2fadd831 | 6212 | return -EINVAL; |
f931551b | 6213 | } |
7fac3301 | 6214 | strcpy(txselect_list, str); |
2fadd831 | 6215 | |
f931551b | 6216 | list_for_each_entry(dd, &qib_dev_list, list) |
a77fcf89 RC |
6217 | if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322) |
6218 | set_no_qsfp_atten(dd, 1); | |
f931551b RC |
6219 | return 0; |
6220 | } | |
6221 | ||
6222 | /* | |
6223 | * Write the final few registers that depend on some of the | |
6224 | * init setup. Done late in init, just before bringing up | |
6225 | * the serdes. | |
6226 | */ | |
6227 | static int qib_late_7322_initreg(struct qib_devdata *dd) | |
6228 | { | |
6229 | int ret = 0, n; | |
6230 | u64 val; | |
6231 | ||
6232 | qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize); | |
6233 | qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize); | |
6234 | qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt); | |
6235 | qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); | |
6236 | val = qib_read_kreg64(dd, kr_sendpioavailaddr); | |
6237 | if (val != dd->pioavailregs_phys) { | |
7fac3301 MM |
6238 | qib_dev_err(dd, |
6239 | "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n", | |
6240 | (unsigned long) dd->pioavailregs_phys, | |
6241 | (unsigned long long) val); | |
f931551b RC |
6242 | ret = -EINVAL; |
6243 | } | |
6244 | ||
6245 | n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; | |
6246 | qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL); | |
6247 | /* driver sends get pkey, lid, etc. checking also, to catch bugs */ | |
6248 | qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL); | |
6249 | ||
6250 | qib_register_observer(dd, &sendctrl_0_observer); | |
6251 | qib_register_observer(dd, &sendctrl_1_observer); | |
6252 | ||
6253 | dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN; | |
6254 | qib_write_kreg(dd, kr_control, dd->control); | |
6255 | /* | |
6256 | * Set SendDmaFetchPriority and init Tx params, including | |
6257 | * QSFP handler on boards that have QSFP. | |
6258 | * First set our default attenuation entry for cables that | |
6259 | * don't have valid attenuation. | |
6260 | */ | |
6261 | set_no_qsfp_atten(dd, 0); | |
6262 | for (n = 0; n < dd->num_pports; ++n) { | |
6263 | struct qib_pportdata *ppd = dd->pport + n; | |
6264 | ||
6265 | qib_write_kreg_port(ppd, krp_senddmaprioritythld, | |
6266 | sdma_fetch_prio & 0xf); | |
6267 | /* Initialize qsfp if present on board. */ | |
6268 | if (dd->flags & QIB_HAS_QSFP) | |
6269 | qib_init_7322_qsfp(ppd); | |
6270 | } | |
6271 | dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN; | |
6272 | qib_write_kreg(dd, kr_control, dd->control); | |
6273 | ||
6274 | return ret; | |
6275 | } | |
6276 | ||
6277 | /* per IB port errors. */ | |
6278 | #define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \ | |
6279 | MASK_ACROSS(8, 15)) | |
6280 | #define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41)) | |
6281 | #define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \ | |
6282 | MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \ | |
6283 | MASK_ACROSS(0, 11)) | |
6284 | ||
6285 | /* | |
6286 | * Write the initialization per-port registers that need to be done at | |
6287 | * driver load and after reset completes (i.e., that aren't done as part | |
6288 | * of other init procedures called from qib_init.c). | |
6289 | * Some of these should be redundant on reset, but play safe. | |
6290 | */ | |
6291 | static void write_7322_init_portregs(struct qib_pportdata *ppd) | |
6292 | { | |
6293 | u64 val; | |
6294 | int i; | |
6295 | ||
6296 | if (!ppd->link_speed_supported) { | |
6297 | /* no buffer credits for this port */ | |
6298 | for (i = 1; i < 8; i++) | |
6299 | qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0); | |
6300 | qib_write_kreg_port(ppd, krp_ibcctrl_b, 0); | |
6301 | qib_write_kreg(ppd->dd, kr_scratch, 0); | |
6302 | return; | |
6303 | } | |
6304 | ||
6305 | /* | |
6306 | * Set the number of supported virtual lanes in IBC, | |
6307 | * for flow control packet handling on unsupported VLs | |
6308 | */ | |
6309 | val = qib_read_kreg_port(ppd, krp_ibsdtestiftx); | |
6310 | val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP); | |
6311 | val |= (u64)(ppd->vls_supported - 1) << | |
6312 | SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP); | |
6313 | qib_write_kreg_port(ppd, krp_ibsdtestiftx, val); | |
6314 | ||
6315 | qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP); | |
6316 | ||
6317 | /* enable tx header checking */ | |
6318 | qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY | | |
6319 | IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID | | |
6320 | IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ); | |
6321 | ||
6322 | qib_write_kreg_port(ppd, krp_ncmodectrl, | |
6323 | SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal)); | |
6324 | ||
6325 | /* | |
6326 | * Unconditionally clear the bufmask bits. If SDMA is | |
6327 | * enabled, we'll set them appropriately later. | |
6328 | */ | |
6329 | qib_write_kreg_port(ppd, krp_senddmabufmask0, 0); | |
6330 | qib_write_kreg_port(ppd, krp_senddmabufmask1, 0); | |
6331 | qib_write_kreg_port(ppd, krp_senddmabufmask2, 0); | |
6332 | if (ppd->dd->cspec->r1) | |
6333 | ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate); | |
6334 | } | |
6335 | ||
6336 | /* | |
6337 | * Write the initialization per-device registers that need to be done at | |
6338 | * driver load and after reset completes (i.e., that aren't done as part | |
6339 | * of other init procedures called from qib_init.c). Also write per-port | |
6340 | * registers that are affected by overall device config, such as QP mapping | |
6341 | * Some of these should be redundant on reset, but play safe. | |
6342 | */ | |
6343 | static void write_7322_initregs(struct qib_devdata *dd) | |
6344 | { | |
6345 | struct qib_pportdata *ppd; | |
6346 | int i, pidx; | |
6347 | u64 val; | |
6348 | ||
6349 | /* Set Multicast QPs received by port 2 to map to context one. */ | |
6350 | qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1); | |
6351 | ||
6352 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
6353 | unsigned n, regno; | |
6354 | unsigned long flags; | |
6355 | ||
2528ea60 MM |
6356 | if (dd->n_krcv_queues < 2 || |
6357 | !dd->pport[pidx].link_speed_supported) | |
f931551b RC |
6358 | continue; |
6359 | ||
6360 | ppd = &dd->pport[pidx]; | |
6361 | ||
6362 | /* be paranoid against later code motion, etc. */ | |
6363 | spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); | |
6364 | ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable); | |
6365 | spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); | |
6366 | ||
6367 | /* Initialize QP to context mapping */ | |
6368 | regno = krp_rcvqpmaptable; | |
6369 | val = 0; | |
6370 | if (dd->num_pports > 1) | |
6371 | n = dd->first_user_ctxt / dd->num_pports; | |
6372 | else | |
6373 | n = dd->first_user_ctxt - 1; | |
6374 | for (i = 0; i < 32; ) { | |
6375 | unsigned ctxt; | |
6376 | ||
6377 | if (dd->num_pports > 1) | |
6378 | ctxt = (i % n) * dd->num_pports + pidx; | |
6379 | else if (i % n) | |
6380 | ctxt = (i % n) + 1; | |
6381 | else | |
6382 | ctxt = ppd->hw_pidx; | |
6383 | val |= ctxt << (5 * (i % 6)); | |
6384 | i++; | |
6385 | if (i % 6 == 0) { | |
6386 | qib_write_kreg_port(ppd, regno, val); | |
6387 | val = 0; | |
6388 | regno++; | |
6389 | } | |
6390 | } | |
6391 | qib_write_kreg_port(ppd, regno, val); | |
6392 | } | |
6393 | ||
6394 | /* | |
6395 | * Setup up interrupt mitigation for kernel contexts, but | |
6396 | * not user contexts (user contexts use interrupts when | |
6397 | * stalled waiting for any packet, so want those interrupts | |
6398 | * right away). | |
6399 | */ | |
6400 | for (i = 0; i < dd->first_user_ctxt; i++) { | |
6401 | dd->cspec->rcvavail_timeout[i] = rcv_int_timeout; | |
6402 | qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout); | |
6403 | } | |
6404 | ||
6405 | /* | |
6406 | * Initialize as (disabled) rcvflow tables. Application code | |
6407 | * will setup each flow as it uses the flow. | |
6408 | * Doesn't clear any of the error bits that might be set. | |
6409 | */ | |
6410 | val = TIDFLOW_ERRBITS; /* these are W1C */ | |
0502f94c | 6411 | for (i = 0; i < dd->cfgctxts; i++) { |
f931551b | 6412 | int flow; |
da12c1f6 | 6413 | |
f931551b RC |
6414 | for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++) |
6415 | qib_write_ureg(dd, ur_rcvflowtable+flow, val, i); | |
6416 | } | |
6417 | ||
6418 | /* | |
6419 | * dual cards init to dual port recovery, single port cards to | |
6420 | * the one port. Dual port cards may later adjust to 1 port, | |
6421 | * and then back to dual port if both ports are connected | |
6422 | * */ | |
6423 | if (dd->num_pports) | |
6424 | setup_7322_link_recovery(dd->pport, dd->num_pports > 1); | |
6425 | } | |
6426 | ||
6427 | static int qib_init_7322_variables(struct qib_devdata *dd) | |
6428 | { | |
6429 | struct qib_pportdata *ppd; | |
6430 | unsigned features, pidx, sbufcnt; | |
6431 | int ret, mtu; | |
6432 | u32 sbufs, updthresh; | |
d4988623 | 6433 | resource_size_t vl15off; |
f931551b RC |
6434 | |
6435 | /* pport structs are contiguous, allocated after devdata */ | |
6436 | ppd = (struct qib_pportdata *)(dd + 1); | |
6437 | dd->pport = ppd; | |
6438 | ppd[0].dd = dd; | |
6439 | ppd[1].dd = dd; | |
6440 | ||
6441 | dd->cspec = (struct qib_chip_specific *)(ppd + 2); | |
6442 | ||
6443 | ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1); | |
6444 | ppd[1].cpspec = &ppd[0].cpspec[1]; | |
6445 | ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */ | |
6446 | ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */ | |
6447 | ||
6448 | spin_lock_init(&dd->cspec->rcvmod_lock); | |
6449 | spin_lock_init(&dd->cspec->gpio_lock); | |
6450 | ||
6451 | /* we haven't yet set QIB_PRESENT, so use read directly */ | |
6452 | dd->revision = readq(&dd->kregbase[kr_revision]); | |
6453 | ||
6454 | if ((dd->revision & 0xffffffffU) == 0xffffffffU) { | |
7fac3301 MM |
6455 | qib_dev_err(dd, |
6456 | "Revision register read failure, giving up initialization\n"); | |
f931551b RC |
6457 | ret = -ENODEV; |
6458 | goto bail; | |
6459 | } | |
6460 | dd->flags |= QIB_PRESENT; /* now register routines work */ | |
6461 | ||
6462 | dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor); | |
6463 | dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor); | |
6464 | dd->cspec->r1 = dd->minrev == 1; | |
6465 | ||
6466 | get_7322_chip_params(dd); | |
6467 | features = qib_7322_boardname(dd); | |
6468 | ||
6469 | /* now that piobcnt2k and 4k set, we can allocate these */ | |
6470 | sbufcnt = dd->piobcnt2k + dd->piobcnt4k + | |
6471 | NUM_VL15_BUFS + BITS_PER_LONG - 1; | |
6472 | sbufcnt /= BITS_PER_LONG; | |
6473 | dd->cspec->sendchkenable = kmalloc(sbufcnt * | |
6474 | sizeof(*dd->cspec->sendchkenable), GFP_KERNEL); | |
6475 | dd->cspec->sendgrhchk = kmalloc(sbufcnt * | |
6476 | sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL); | |
6477 | dd->cspec->sendibchk = kmalloc(sbufcnt * | |
6478 | sizeof(*dd->cspec->sendibchk), GFP_KERNEL); | |
6479 | if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk || | |
6480 | !dd->cspec->sendibchk) { | |
6481 | qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n"); | |
6482 | ret = -ENOMEM; | |
6483 | goto bail; | |
6484 | } | |
6485 | ||
6486 | ppd = dd->pport; | |
6487 | ||
6488 | /* | |
6489 | * GPIO bits for TWSI data and clock, | |
6490 | * used for serial EEPROM. | |
6491 | */ | |
6492 | dd->gpio_sda_num = _QIB_GPIO_SDA_NUM; | |
6493 | dd->gpio_scl_num = _QIB_GPIO_SCL_NUM; | |
6494 | dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV; | |
6495 | ||
6496 | dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY | | |
6497 | QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP | | |
6498 | QIB_HAS_THRESH_UPDATE | | |
6499 | (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0); | |
6500 | dd->flags |= qib_special_trigger ? | |
6501 | QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA; | |
6502 | ||
6503 | /* | |
6504 | * Setup initial values. These may change when PAT is enabled, but | |
6505 | * we need these to do initial chip register accesses. | |
6506 | */ | |
6507 | qib_7322_set_baseaddrs(dd); | |
6508 | ||
6509 | mtu = ib_mtu_enum_to_int(qib_ibmtu); | |
6510 | if (mtu == -1) | |
6511 | mtu = QIB_DEFAULT_MTU; | |
6512 | ||
6513 | dd->cspec->int_enable_mask = QIB_I_BITSEXTANT; | |
6514 | /* all hwerrors become interrupts, unless special purposed */ | |
6515 | dd->cspec->hwerrmask = ~0ULL; | |
6516 | /* link_recovery setup causes these errors, so ignore them, | |
6517 | * other than clearing them when they occur */ | |
6518 | dd->cspec->hwerrmask &= | |
6519 | ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) | | |
6520 | SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) | | |
6521 | HWE_MASK(LATriggered)); | |
6522 | ||
6523 | for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) { | |
6524 | struct qib_chippport_specific *cp = ppd->cpspec; | |
da12c1f6 | 6525 | |
f931551b RC |
6526 | ppd->link_speed_supported = features & PORT_SPD_CAP; |
6527 | features >>= PORT_SPD_CAP_SHIFT; | |
6528 | if (!ppd->link_speed_supported) { | |
6529 | /* single port mode (7340, or configured) */ | |
6530 | dd->skip_kctxt_mask |= 1 << pidx; | |
6531 | if (pidx == 0) { | |
6532 | /* Make sure port is disabled. */ | |
6533 | qib_write_kreg_port(ppd, krp_rcvctrl, 0); | |
6534 | qib_write_kreg_port(ppd, krp_ibcctrl_a, 0); | |
6535 | ppd[0] = ppd[1]; | |
6536 | dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask, | |
6537 | IBSerdesPClkNotDetectMask_0) | |
6538 | | SYM_MASK(HwErrMask, | |
6539 | SDmaMemReadErrMask_0)); | |
6540 | dd->cspec->int_enable_mask &= ~( | |
6541 | SYM_MASK(IntMask, SDmaCleanupDoneMask_0) | | |
6542 | SYM_MASK(IntMask, SDmaIdleIntMask_0) | | |
6543 | SYM_MASK(IntMask, SDmaProgressIntMask_0) | | |
6544 | SYM_MASK(IntMask, SDmaIntMask_0) | | |
6545 | SYM_MASK(IntMask, ErrIntMask_0) | | |
6546 | SYM_MASK(IntMask, SendDoneIntMask_0)); | |
6547 | } else { | |
6548 | /* Make sure port is disabled. */ | |
6549 | qib_write_kreg_port(ppd, krp_rcvctrl, 0); | |
6550 | qib_write_kreg_port(ppd, krp_ibcctrl_a, 0); | |
6551 | dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask, | |
6552 | IBSerdesPClkNotDetectMask_1) | |
6553 | | SYM_MASK(HwErrMask, | |
6554 | SDmaMemReadErrMask_1)); | |
6555 | dd->cspec->int_enable_mask &= ~( | |
6556 | SYM_MASK(IntMask, SDmaCleanupDoneMask_1) | | |
6557 | SYM_MASK(IntMask, SDmaIdleIntMask_1) | | |
6558 | SYM_MASK(IntMask, SDmaProgressIntMask_1) | | |
6559 | SYM_MASK(IntMask, SDmaIntMask_1) | | |
6560 | SYM_MASK(IntMask, ErrIntMask_1) | | |
6561 | SYM_MASK(IntMask, SendDoneIntMask_1)); | |
6562 | } | |
6563 | continue; | |
6564 | } | |
6565 | ||
6566 | dd->num_pports++; | |
7d7632ad MM |
6567 | ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports); |
6568 | if (ret) { | |
6569 | dd->num_pports--; | |
6570 | goto bail; | |
6571 | } | |
f931551b RC |
6572 | |
6573 | ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; | |
6574 | ppd->link_width_enabled = IB_WIDTH_4X; | |
6575 | ppd->link_speed_enabled = ppd->link_speed_supported; | |
6576 | /* | |
6577 | * Set the initial values to reasonable default, will be set | |
6578 | * for real when link is up. | |
6579 | */ | |
6580 | ppd->link_width_active = IB_WIDTH_4X; | |
6581 | ppd->link_speed_active = QIB_IB_SDR; | |
6582 | ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS]; | |
6583 | switch (qib_num_cfg_vls) { | |
6584 | case 1: | |
6585 | ppd->vls_supported = IB_VL_VL0; | |
6586 | break; | |
6587 | case 2: | |
6588 | ppd->vls_supported = IB_VL_VL0_1; | |
6589 | break; | |
6590 | default: | |
6591 | qib_devinfo(dd->pcidev, | |
6592 | "Invalid num_vls %u, using 4 VLs\n", | |
6593 | qib_num_cfg_vls); | |
6594 | qib_num_cfg_vls = 4; | |
6595 | /* fall through */ | |
6596 | case 4: | |
6597 | ppd->vls_supported = IB_VL_VL0_3; | |
6598 | break; | |
6599 | case 8: | |
6600 | if (mtu <= 2048) | |
6601 | ppd->vls_supported = IB_VL_VL0_7; | |
6602 | else { | |
6603 | qib_devinfo(dd->pcidev, | |
a46a2802 | 6604 | "Invalid num_vls %u for MTU %d , using 4 VLs\n", |
f931551b RC |
6605 | qib_num_cfg_vls, mtu); |
6606 | ppd->vls_supported = IB_VL_VL0_3; | |
6607 | qib_num_cfg_vls = 4; | |
6608 | } | |
6609 | break; | |
6610 | } | |
6611 | ppd->vls_operational = ppd->vls_supported; | |
6612 | ||
6613 | init_waitqueue_head(&cp->autoneg_wait); | |
6614 | INIT_DELAYED_WORK(&cp->autoneg_work, | |
6615 | autoneg_7322_work); | |
6616 | if (ppd->dd->cspec->r1) | |
6617 | INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work); | |
6618 | ||
6619 | /* | |
6620 | * For Mez and similar cards, no qsfp info, so do | |
6621 | * the "cable info" setup here. Can be overridden | |
6622 | * in adapter-specific routines. | |
6623 | */ | |
7c7a416e RC |
6624 | if (!(dd->flags & QIB_HAS_QSFP)) { |
6625 | if (!IS_QMH(dd) && !IS_QME(dd)) | |
7fac3301 MM |
6626 | qib_devinfo(dd->pcidev, |
6627 | "IB%u:%u: Unknown mezzanine card type\n", | |
6628 | dd->unit, ppd->port); | |
a77fcf89 | 6629 | cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME; |
f931551b | 6630 | /* |
a77fcf89 RC |
6631 | * Choose center value as default tx serdes setting |
6632 | * until changed through module parameter. | |
f931551b | 6633 | */ |
a77fcf89 RC |
6634 | ppd->cpspec->no_eep = IS_QMH(dd) ? |
6635 | TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4; | |
f931551b RC |
6636 | } else |
6637 | cp->h1_val = H1_FORCE_VAL; | |
6638 | ||
6639 | /* Avoid writes to chip for mini_init */ | |
6640 | if (!qib_mini_init) | |
6641 | write_7322_init_portregs(ppd); | |
6642 | ||
6643 | init_timer(&cp->chase_timer); | |
6644 | cp->chase_timer.function = reenable_chase; | |
6645 | cp->chase_timer.data = (unsigned long)ppd; | |
6646 | ||
6647 | ppd++; | |
6648 | } | |
6649 | ||
0a43e117 MM |
6650 | dd->rcvhdrentsize = qib_rcvhdrentsize ? |
6651 | qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE; | |
6652 | dd->rcvhdrsize = qib_rcvhdrsize ? | |
6653 | qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE; | |
a77fcf89 | 6654 | dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32); |
f931551b RC |
6655 | |
6656 | /* we always allocate at least 2048 bytes for eager buffers */ | |
6657 | dd->rcvegrbufsize = max(mtu, 2048); | |
9e1c0e43 MM |
6658 | BUG_ON(!is_power_of_2(dd->rcvegrbufsize)); |
6659 | dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize); | |
f931551b RC |
6660 | |
6661 | qib_7322_tidtemplate(dd); | |
6662 | ||
6663 | /* | |
6664 | * We can request a receive interrupt for 1 or | |
6665 | * more packets from current offset. | |
6666 | */ | |
6667 | dd->rhdrhead_intr_off = | |
6668 | (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT; | |
6669 | ||
6670 | /* setup the stats timer; the add_timer is done at end of init */ | |
6671 | init_timer(&dd->stats_timer); | |
6672 | dd->stats_timer.function = qib_get_7322_faststats; | |
6673 | dd->stats_timer.data = (unsigned long) dd; | |
6674 | ||
6675 | dd->ureg_align = 0x10000; /* 64KB alignment */ | |
6676 | ||
6677 | dd->piosize2kmax_dwords = dd->piosize2k >> 2; | |
6678 | ||
6679 | qib_7322_config_ctxts(dd); | |
6680 | qib_set_ctxtcnt(dd); | |
6681 | ||
d4988623 LR |
6682 | /* |
6683 | * We do not set WC on the VL15 buffers to avoid | |
6684 | * a rare problem with unaligned writes from | |
6685 | * interrupt-flushed store buffers, so we need | |
6686 | * to map those separately here. We can't solve | |
6687 | * this for the rarely used mtrr case. | |
6688 | */ | |
6689 | ret = init_chip_wc_pat(dd, 0); | |
6690 | if (ret) | |
6691 | goto bail; | |
fce24a9d | 6692 | |
d4988623 LR |
6693 | /* vl15 buffers start just after the 4k buffers */ |
6694 | vl15off = dd->physaddr + (dd->piobufbase >> 32) + | |
6695 | dd->piobcnt4k * dd->align4k; | |
6696 | dd->piovl15base = ioremap_nocache(vl15off, | |
6697 | NUM_VL15_BUFS * dd->align4k); | |
6698 | if (!dd->piovl15base) { | |
6699 | ret = -ENOMEM; | |
6700 | goto bail; | |
f931551b | 6701 | } |
d4988623 | 6702 | |
f931551b RC |
6703 | qib_7322_set_baseaddrs(dd); /* set chip access pointers now */ |
6704 | ||
6705 | ret = 0; | |
6706 | if (qib_mini_init) | |
6707 | goto bail; | |
6708 | if (!dd->num_pports) { | |
6709 | qib_dev_err(dd, "No ports enabled, giving up initialization\n"); | |
6710 | goto bail; /* no error, so can still figure out why err */ | |
6711 | } | |
6712 | ||
6713 | write_7322_initregs(dd); | |
6714 | ret = qib_create_ctxts(dd); | |
6715 | init_7322_cntrnames(dd); | |
6716 | ||
6717 | updthresh = 8U; /* update threshold */ | |
6718 | ||
6719 | /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA. | |
6720 | * reserve the update threshold amount for other kernel use, such | |
6721 | * as sending SMI, MAD, and ACKs, or 3, whichever is greater, | |
6722 | * unless we aren't enabling SDMA, in which case we want to use | |
6723 | * all the 4k bufs for the kernel. | |
6724 | * if this was less than the update threshold, we could wait | |
6725 | * a long time for an update. Coded this way because we | |
6726 | * sometimes change the update threshold for various reasons, | |
6727 | * and we want this to remain robust. | |
6728 | */ | |
6729 | if (dd->flags & QIB_HAS_SEND_DMA) { | |
6730 | dd->cspec->sdmabufcnt = dd->piobcnt4k; | |
6731 | sbufs = updthresh > 3 ? updthresh : 3; | |
6732 | } else { | |
6733 | dd->cspec->sdmabufcnt = 0; | |
6734 | sbufs = dd->piobcnt4k; | |
6735 | } | |
6736 | dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k - | |
6737 | dd->cspec->sdmabufcnt; | |
6738 | dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs; | |
6739 | dd->cspec->lastbuf_for_pio--; /* range is <= , not < */ | |
bb77a077 | 6740 | dd->last_pio = dd->cspec->lastbuf_for_pio; |
f931551b RC |
6741 | dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ? |
6742 | dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0; | |
6743 | ||
6744 | /* | |
6745 | * If we have 16 user contexts, we will have 7 sbufs | |
6746 | * per context, so reduce the update threshold to match. We | |
6747 | * want to update before we actually run out, at low pbufs/ctxt | |
6748 | * so give ourselves some margin. | |
6749 | */ | |
6750 | if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh) | |
6751 | updthresh = dd->pbufsctxt - 2; | |
6752 | dd->cspec->updthresh_dflt = updthresh; | |
6753 | dd->cspec->updthresh = updthresh; | |
6754 | ||
6755 | /* before full enable, no interrupts, no locking needed */ | |
6756 | dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld)) | |
6757 | << SYM_LSB(SendCtrl, AvailUpdThld)) | | |
6758 | SYM_MASK(SendCtrl, SendBufAvailPad64Byte); | |
6759 | ||
6760 | dd->psxmitwait_supported = 1; | |
6761 | dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE; | |
6762 | bail: | |
6763 | if (!dd->ctxtcnt) | |
6764 | dd->ctxtcnt = 1; /* for other initialization code */ | |
6765 | ||
6766 | return ret; | |
6767 | } | |
6768 | ||
6769 | static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc, | |
6770 | u32 *pbufnum) | |
6771 | { | |
6772 | u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK; | |
6773 | struct qib_devdata *dd = ppd->dd; | |
6774 | ||
6775 | /* last is same for 2k and 4k, because we use 4k if all 2k busy */ | |
6776 | if (pbc & PBC_7322_VL15_SEND) { | |
6777 | first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx; | |
6778 | last = first; | |
6779 | } else { | |
6780 | if ((plen + 1) > dd->piosize2kmax_dwords) | |
6781 | first = dd->piobcnt2k; | |
6782 | else | |
6783 | first = 0; | |
6784 | last = dd->cspec->lastbuf_for_pio; | |
6785 | } | |
6786 | return qib_getsendbuf_range(dd, pbufnum, first, last); | |
6787 | } | |
6788 | ||
6789 | static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv, | |
6790 | u32 start) | |
6791 | { | |
6792 | qib_write_kreg_port(ppd, krp_psinterval, intv); | |
6793 | qib_write_kreg_port(ppd, krp_psstart, start); | |
6794 | } | |
6795 | ||
6796 | /* | |
6797 | * Must be called with sdma_lock held, or before init finished. | |
6798 | */ | |
6799 | static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt) | |
6800 | { | |
6801 | qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt); | |
6802 | } | |
6803 | ||
0b3ddf38 DL |
6804 | /* |
6805 | * sdma_lock should be acquired before calling this routine | |
6806 | */ | |
6807 | static void dump_sdma_7322_state(struct qib_pportdata *ppd) | |
6808 | { | |
6809 | u64 reg, reg1, reg2; | |
6810 | ||
6811 | reg = qib_read_kreg_port(ppd, krp_senddmastatus); | |
6812 | qib_dev_porterr(ppd->dd, ppd->port, | |
6813 | "SDMA senddmastatus: 0x%016llx\n", reg); | |
6814 | ||
6815 | reg = qib_read_kreg_port(ppd, krp_sendctrl); | |
6816 | qib_dev_porterr(ppd->dd, ppd->port, | |
6817 | "SDMA sendctrl: 0x%016llx\n", reg); | |
6818 | ||
6819 | reg = qib_read_kreg_port(ppd, krp_senddmabase); | |
6820 | qib_dev_porterr(ppd->dd, ppd->port, | |
6821 | "SDMA senddmabase: 0x%016llx\n", reg); | |
6822 | ||
6823 | reg = qib_read_kreg_port(ppd, krp_senddmabufmask0); | |
6824 | reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1); | |
6825 | reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2); | |
6826 | qib_dev_porterr(ppd->dd, ppd->port, | |
6827 | "SDMA senddmabufmask 0:%llx 1:%llx 2:%llx\n", | |
6828 | reg, reg1, reg2); | |
6829 | ||
6830 | /* get bufuse bits, clear them, and print them again if non-zero */ | |
6831 | reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0); | |
6832 | qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg); | |
6833 | reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1); | |
6834 | qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1); | |
6835 | reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2); | |
6836 | qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2); | |
6837 | /* 0 and 1 should always be zero, so print as short form */ | |
6838 | qib_dev_porterr(ppd->dd, ppd->port, | |
6839 | "SDMA current senddmabuf_use 0:%llx 1:%llx 2:%llx\n", | |
6840 | reg, reg1, reg2); | |
6841 | reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0); | |
6842 | reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1); | |
6843 | reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2); | |
6844 | /* 0 and 1 should always be zero, so print as short form */ | |
6845 | qib_dev_porterr(ppd->dd, ppd->port, | |
6846 | "SDMA cleared senddmabuf_use 0:%llx 1:%llx 2:%llx\n", | |
6847 | reg, reg1, reg2); | |
6848 | ||
6849 | reg = qib_read_kreg_port(ppd, krp_senddmatail); | |
6850 | qib_dev_porterr(ppd->dd, ppd->port, | |
6851 | "SDMA senddmatail: 0x%016llx\n", reg); | |
6852 | ||
6853 | reg = qib_read_kreg_port(ppd, krp_senddmahead); | |
6854 | qib_dev_porterr(ppd->dd, ppd->port, | |
6855 | "SDMA senddmahead: 0x%016llx\n", reg); | |
6856 | ||
6857 | reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr); | |
6858 | qib_dev_porterr(ppd->dd, ppd->port, | |
6859 | "SDMA senddmaheadaddr: 0x%016llx\n", reg); | |
6860 | ||
6861 | reg = qib_read_kreg_port(ppd, krp_senddmalengen); | |
6862 | qib_dev_porterr(ppd->dd, ppd->port, | |
6863 | "SDMA senddmalengen: 0x%016llx\n", reg); | |
6864 | ||
6865 | reg = qib_read_kreg_port(ppd, krp_senddmadesccnt); | |
6866 | qib_dev_porterr(ppd->dd, ppd->port, | |
6867 | "SDMA senddmadesccnt: 0x%016llx\n", reg); | |
6868 | ||
6869 | reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt); | |
6870 | qib_dev_porterr(ppd->dd, ppd->port, | |
6871 | "SDMA senddmaidlecnt: 0x%016llx\n", reg); | |
6872 | ||
6873 | reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld); | |
6874 | qib_dev_porterr(ppd->dd, ppd->port, | |
6875 | "SDMA senddmapriorityhld: 0x%016llx\n", reg); | |
6876 | ||
6877 | reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt); | |
6878 | qib_dev_porterr(ppd->dd, ppd->port, | |
6879 | "SDMA senddmareloadcnt: 0x%016llx\n", reg); | |
6880 | ||
6881 | dump_sdma_state(ppd); | |
6882 | } | |
6883 | ||
f931551b RC |
6884 | static struct sdma_set_state_action sdma_7322_action_table[] = { |
6885 | [qib_sdma_state_s00_hw_down] = { | |
6886 | .go_s99_running_tofalse = 1, | |
6887 | .op_enable = 0, | |
6888 | .op_intenable = 0, | |
6889 | .op_halt = 0, | |
6890 | .op_drain = 0, | |
6891 | }, | |
6892 | [qib_sdma_state_s10_hw_start_up_wait] = { | |
6893 | .op_enable = 0, | |
6894 | .op_intenable = 1, | |
6895 | .op_halt = 1, | |
6896 | .op_drain = 0, | |
6897 | }, | |
6898 | [qib_sdma_state_s20_idle] = { | |
6899 | .op_enable = 1, | |
6900 | .op_intenable = 1, | |
6901 | .op_halt = 1, | |
6902 | .op_drain = 0, | |
6903 | }, | |
6904 | [qib_sdma_state_s30_sw_clean_up_wait] = { | |
6905 | .op_enable = 0, | |
6906 | .op_intenable = 1, | |
6907 | .op_halt = 1, | |
6908 | .op_drain = 0, | |
6909 | }, | |
6910 | [qib_sdma_state_s40_hw_clean_up_wait] = { | |
6911 | .op_enable = 1, | |
6912 | .op_intenable = 1, | |
6913 | .op_halt = 1, | |
6914 | .op_drain = 0, | |
6915 | }, | |
6916 | [qib_sdma_state_s50_hw_halt_wait] = { | |
6917 | .op_enable = 1, | |
6918 | .op_intenable = 1, | |
6919 | .op_halt = 1, | |
6920 | .op_drain = 1, | |
6921 | }, | |
6922 | [qib_sdma_state_s99_running] = { | |
6923 | .op_enable = 1, | |
6924 | .op_intenable = 1, | |
6925 | .op_halt = 0, | |
6926 | .op_drain = 0, | |
6927 | .go_s99_running_totrue = 1, | |
6928 | }, | |
6929 | }; | |
6930 | ||
6931 | static void qib_7322_sdma_init_early(struct qib_pportdata *ppd) | |
6932 | { | |
6933 | ppd->sdma_state.set_state_action = sdma_7322_action_table; | |
6934 | } | |
6935 | ||
6936 | static int init_sdma_7322_regs(struct qib_pportdata *ppd) | |
6937 | { | |
6938 | struct qib_devdata *dd = ppd->dd; | |
6939 | unsigned lastbuf, erstbuf; | |
6940 | u64 senddmabufmask[3] = { 0 }; | |
6941 | int n, ret = 0; | |
6942 | ||
6943 | qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys); | |
6944 | qib_sdma_7322_setlengen(ppd); | |
6945 | qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */ | |
6946 | qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt); | |
6947 | qib_write_kreg_port(ppd, krp_senddmadesccnt, 0); | |
6948 | qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys); | |
6949 | ||
6950 | if (dd->num_pports) | |
6951 | n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */ | |
6952 | else | |
6953 | n = dd->cspec->sdmabufcnt; /* failsafe for init */ | |
6954 | erstbuf = (dd->piobcnt2k + dd->piobcnt4k) - | |
6955 | ((dd->num_pports == 1 || ppd->port == 2) ? n : | |
6956 | dd->cspec->sdmabufcnt); | |
6957 | lastbuf = erstbuf + n; | |
6958 | ||
6959 | ppd->sdma_state.first_sendbuf = erstbuf; | |
6960 | ppd->sdma_state.last_sendbuf = lastbuf; | |
6961 | for (; erstbuf < lastbuf; ++erstbuf) { | |
6962 | unsigned word = erstbuf / BITS_PER_LONG; | |
6963 | unsigned bit = erstbuf & (BITS_PER_LONG - 1); | |
6964 | ||
6965 | BUG_ON(word >= 3); | |
6966 | senddmabufmask[word] |= 1ULL << bit; | |
6967 | } | |
6968 | qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]); | |
6969 | qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]); | |
6970 | qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]); | |
6971 | return ret; | |
6972 | } | |
6973 | ||
6974 | /* sdma_lock must be held */ | |
6975 | static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd) | |
6976 | { | |
6977 | struct qib_devdata *dd = ppd->dd; | |
6978 | int sane; | |
6979 | int use_dmahead; | |
6980 | u16 swhead; | |
6981 | u16 swtail; | |
6982 | u16 cnt; | |
6983 | u16 hwhead; | |
6984 | ||
6985 | use_dmahead = __qib_sdma_running(ppd) && | |
6986 | (dd->flags & QIB_HAS_SDMA_TIMEOUT); | |
6987 | retry: | |
6988 | hwhead = use_dmahead ? | |
6989 | (u16) le64_to_cpu(*ppd->sdma_head_dma) : | |
6990 | (u16) qib_read_kreg_port(ppd, krp_senddmahead); | |
6991 | ||
6992 | swhead = ppd->sdma_descq_head; | |
6993 | swtail = ppd->sdma_descq_tail; | |
6994 | cnt = ppd->sdma_descq_cnt; | |
6995 | ||
6996 | if (swhead < swtail) | |
6997 | /* not wrapped */ | |
6998 | sane = (hwhead >= swhead) & (hwhead <= swtail); | |
6999 | else if (swhead > swtail) | |
7000 | /* wrapped around */ | |
7001 | sane = ((hwhead >= swhead) && (hwhead < cnt)) || | |
7002 | (hwhead <= swtail); | |
7003 | else | |
7004 | /* empty */ | |
7005 | sane = (hwhead == swhead); | |
7006 | ||
7007 | if (unlikely(!sane)) { | |
7008 | if (use_dmahead) { | |
7009 | /* try one more time, directly from the register */ | |
7010 | use_dmahead = 0; | |
7011 | goto retry; | |
7012 | } | |
7013 | /* proceed as if no progress */ | |
7014 | hwhead = swhead; | |
7015 | } | |
7016 | ||
7017 | return hwhead; | |
7018 | } | |
7019 | ||
7020 | static int qib_sdma_7322_busy(struct qib_pportdata *ppd) | |
7021 | { | |
7022 | u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus); | |
7023 | ||
7024 | return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) || | |
7025 | (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) || | |
7026 | !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) || | |
7027 | !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty)); | |
7028 | } | |
7029 | ||
7030 | /* | |
7031 | * Compute the amount of delay before sending the next packet if the | |
7032 | * port's send rate differs from the static rate set for the QP. | |
7033 | * The delay affects the next packet and the amount of the delay is | |
7034 | * based on the length of the this packet. | |
7035 | */ | |
7036 | static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen, | |
7037 | u8 srate, u8 vl) | |
7038 | { | |
7039 | u8 snd_mult = ppd->delay_mult; | |
7040 | u8 rcv_mult = ib_rate_to_delay[srate]; | |
7041 | u32 ret; | |
7042 | ||
7043 | ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0; | |
7044 | ||
7045 | /* Indicate VL15, else set the VL in the control word */ | |
7046 | if (vl == 15) | |
7047 | ret |= PBC_7322_VL15_SEND_CTRL; | |
7048 | else | |
7049 | ret |= vl << PBC_VL_NUM_LSB; | |
7050 | ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB; | |
7051 | ||
7052 | return ret; | |
7053 | } | |
7054 | ||
7055 | /* | |
7056 | * Enable the per-port VL15 send buffers for use. | |
7057 | * They follow the rest of the buffers, without a config parameter. | |
7058 | * This was in initregs, but that is done before the shadow | |
7059 | * is set up, and this has to be done after the shadow is | |
7060 | * set up. | |
7061 | */ | |
7062 | static void qib_7322_initvl15_bufs(struct qib_devdata *dd) | |
7063 | { | |
7064 | unsigned vl15bufs; | |
7065 | ||
7066 | vl15bufs = dd->piobcnt2k + dd->piobcnt4k; | |
7067 | qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS, | |
7068 | TXCHK_CHG_TYPE_KERN, NULL); | |
7069 | } | |
7070 | ||
7071 | static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd) | |
7072 | { | |
7073 | if (rcd->ctxt < NUM_IB_PORTS) { | |
7074 | if (rcd->dd->num_pports > 1) { | |
7075 | rcd->rcvegrcnt = KCTXT0_EGRCNT / 2; | |
7076 | rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0; | |
7077 | } else { | |
7078 | rcd->rcvegrcnt = KCTXT0_EGRCNT; | |
7079 | rcd->rcvegr_tid_base = 0; | |
7080 | } | |
7081 | } else { | |
7082 | rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt; | |
7083 | rcd->rcvegr_tid_base = KCTXT0_EGRCNT + | |
7084 | (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt; | |
7085 | } | |
7086 | } | |
7087 | ||
7088 | #define QTXSLEEPS 5000 | |
7089 | static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start, | |
7090 | u32 len, u32 which, struct qib_ctxtdata *rcd) | |
7091 | { | |
7092 | int i; | |
7093 | const int last = start + len - 1; | |
7094 | const int lastr = last / BITS_PER_LONG; | |
7095 | u32 sleeps = 0; | |
7096 | int wait = rcd != NULL; | |
7097 | unsigned long flags; | |
7098 | ||
7099 | while (wait) { | |
7100 | unsigned long shadow; | |
7101 | int cstart, previ = -1; | |
7102 | ||
7103 | /* | |
7104 | * when flipping from kernel to user, we can't change | |
7105 | * the checking type if the buffer is allocated to the | |
7106 | * driver. It's OK the other direction, because it's | |
7107 | * from close, and we have just disarm'ed all the | |
7108 | * buffers. All the kernel to kernel changes are also | |
7109 | * OK. | |
7110 | */ | |
7111 | for (cstart = start; cstart <= last; cstart++) { | |
7112 | i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT) | |
7113 | / BITS_PER_LONG; | |
7114 | if (i != previ) { | |
7115 | shadow = (unsigned long) | |
7116 | le64_to_cpu(dd->pioavailregs_dma[i]); | |
7117 | previ = i; | |
7118 | } | |
7119 | if (test_bit(((2 * cstart) + | |
7120 | QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT) | |
7121 | % BITS_PER_LONG, &shadow)) | |
7122 | break; | |
7123 | } | |
7124 | ||
7125 | if (cstart > last) | |
7126 | break; | |
7127 | ||
7128 | if (sleeps == QTXSLEEPS) | |
7129 | break; | |
7130 | /* make sure we see an updated copy next time around */ | |
7131 | sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); | |
7132 | sleeps++; | |
a0a234d4 | 7133 | msleep(20); |
f931551b RC |
7134 | } |
7135 | ||
7136 | switch (which) { | |
7137 | case TXCHK_CHG_TYPE_DIS1: | |
7138 | /* | |
7139 | * disable checking on a range; used by diags; just | |
7140 | * one buffer, but still written generically | |
7141 | */ | |
7142 | for (i = start; i <= last; i++) | |
7143 | clear_bit(i, dd->cspec->sendchkenable); | |
7144 | break; | |
7145 | ||
7146 | case TXCHK_CHG_TYPE_ENAB1: | |
7147 | /* | |
7148 | * (re)enable checking on a range; used by diags; just | |
7149 | * one buffer, but still written generically; read | |
7150 | * scratch to be sure buffer actually triggered, not | |
7151 | * just flushed from processor. | |
7152 | */ | |
7153 | qib_read_kreg32(dd, kr_scratch); | |
7154 | for (i = start; i <= last; i++) | |
7155 | set_bit(i, dd->cspec->sendchkenable); | |
7156 | break; | |
7157 | ||
7158 | case TXCHK_CHG_TYPE_KERN: | |
7159 | /* usable by kernel */ | |
7160 | for (i = start; i <= last; i++) { | |
7161 | set_bit(i, dd->cspec->sendibchk); | |
7162 | clear_bit(i, dd->cspec->sendgrhchk); | |
7163 | } | |
7164 | spin_lock_irqsave(&dd->uctxt_lock, flags); | |
7165 | /* see if we need to raise avail update threshold */ | |
7166 | for (i = dd->first_user_ctxt; | |
7167 | dd->cspec->updthresh != dd->cspec->updthresh_dflt | |
7168 | && i < dd->cfgctxts; i++) | |
7169 | if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt && | |
7170 | ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1) | |
7171 | < dd->cspec->updthresh_dflt) | |
7172 | break; | |
7173 | spin_unlock_irqrestore(&dd->uctxt_lock, flags); | |
7174 | if (i == dd->cfgctxts) { | |
7175 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | |
7176 | dd->cspec->updthresh = dd->cspec->updthresh_dflt; | |
7177 | dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld); | |
7178 | dd->sendctrl |= (dd->cspec->updthresh & | |
7179 | SYM_RMASK(SendCtrl, AvailUpdThld)) << | |
7180 | SYM_LSB(SendCtrl, AvailUpdThld); | |
7181 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | |
7182 | sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); | |
7183 | } | |
7184 | break; | |
7185 | ||
7186 | case TXCHK_CHG_TYPE_USER: | |
7187 | /* for user process */ | |
7188 | for (i = start; i <= last; i++) { | |
7189 | clear_bit(i, dd->cspec->sendibchk); | |
7190 | set_bit(i, dd->cspec->sendgrhchk); | |
7191 | } | |
7192 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | |
7193 | if (rcd && rcd->subctxt_cnt && ((rcd->piocnt | |
7194 | / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) { | |
7195 | dd->cspec->updthresh = (rcd->piocnt / | |
7196 | rcd->subctxt_cnt) - 1; | |
7197 | dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld); | |
7198 | dd->sendctrl |= (dd->cspec->updthresh & | |
7199 | SYM_RMASK(SendCtrl, AvailUpdThld)) | |
7200 | << SYM_LSB(SendCtrl, AvailUpdThld); | |
7201 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | |
7202 | sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); | |
7203 | } else | |
7204 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | |
7205 | break; | |
7206 | ||
7207 | default: | |
7208 | break; | |
7209 | } | |
7210 | ||
7211 | for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i) | |
7212 | qib_write_kreg(dd, kr_sendcheckmask + i, | |
7213 | dd->cspec->sendchkenable[i]); | |
7214 | ||
7215 | for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) { | |
7216 | qib_write_kreg(dd, kr_sendgrhcheckmask + i, | |
7217 | dd->cspec->sendgrhchk[i]); | |
7218 | qib_write_kreg(dd, kr_sendibpktmask + i, | |
7219 | dd->cspec->sendibchk[i]); | |
7220 | } | |
7221 | ||
7222 | /* | |
7223 | * Be sure whatever we did was seen by the chip and acted upon, | |
7224 | * before we return. Mostly important for which >= 2. | |
7225 | */ | |
7226 | qib_read_kreg32(dd, kr_scratch); | |
7227 | } | |
7228 | ||
7229 | ||
7230 | /* useful for trigger analyzers, etc. */ | |
7231 | static void writescratch(struct qib_devdata *dd, u32 val) | |
7232 | { | |
7233 | qib_write_kreg(dd, kr_scratch, val); | |
7234 | } | |
7235 | ||
7236 | /* Dummy for now, use chip regs soon */ | |
7237 | static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum) | |
7238 | { | |
7239 | return -ENXIO; | |
7240 | } | |
7241 | ||
7242 | /** | |
7243 | * qib_init_iba7322_funcs - set up the chip-specific function pointers | |
7244 | * @dev: the pci_dev for qlogic_ib device | |
7245 | * @ent: pci_device_id struct for this dev | |
7246 | * | |
7247 | * Also allocates, inits, and returns the devdata struct for this | |
7248 | * device instance | |
7249 | * | |
7250 | * This is global, and is called directly at init to set up the | |
7251 | * chip-specific function pointers for later use. | |
7252 | */ | |
7253 | struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev, | |
7254 | const struct pci_device_id *ent) | |
7255 | { | |
7256 | struct qib_devdata *dd; | |
7257 | int ret, i; | |
7258 | u32 tabsize, actual_cnt = 0; | |
7259 | ||
7260 | dd = qib_alloc_devdata(pdev, | |
7261 | NUM_IB_PORTS * sizeof(struct qib_pportdata) + | |
7262 | sizeof(struct qib_chip_specific) + | |
7263 | NUM_IB_PORTS * sizeof(struct qib_chippport_specific)); | |
7264 | if (IS_ERR(dd)) | |
7265 | goto bail; | |
7266 | ||
7267 | dd->f_bringup_serdes = qib_7322_bringup_serdes; | |
7268 | dd->f_cleanup = qib_setup_7322_cleanup; | |
7269 | dd->f_clear_tids = qib_7322_clear_tids; | |
7270 | dd->f_free_irq = qib_7322_free_irq; | |
7271 | dd->f_get_base_info = qib_7322_get_base_info; | |
7272 | dd->f_get_msgheader = qib_7322_get_msgheader; | |
7273 | dd->f_getsendbuf = qib_7322_getsendbuf; | |
7274 | dd->f_gpio_mod = gpio_7322_mod; | |
7275 | dd->f_eeprom_wen = qib_7322_eeprom_wen; | |
7276 | dd->f_hdrqempty = qib_7322_hdrqempty; | |
7277 | dd->f_ib_updown = qib_7322_ib_updown; | |
7278 | dd->f_init_ctxt = qib_7322_init_ctxt; | |
7279 | dd->f_initvl15_bufs = qib_7322_initvl15_bufs; | |
7280 | dd->f_intr_fallback = qib_7322_intr_fallback; | |
7281 | dd->f_late_initreg = qib_late_7322_initreg; | |
7282 | dd->f_setpbc_control = qib_7322_setpbc_control; | |
7283 | dd->f_portcntr = qib_portcntr_7322; | |
7284 | dd->f_put_tid = qib_7322_put_tid; | |
7285 | dd->f_quiet_serdes = qib_7322_mini_quiet_serdes; | |
7286 | dd->f_rcvctrl = rcvctrl_7322_mod; | |
7287 | dd->f_read_cntrs = qib_read_7322cntrs; | |
7288 | dd->f_read_portcntrs = qib_read_7322portcntrs; | |
7289 | dd->f_reset = qib_do_7322_reset; | |
7290 | dd->f_init_sdma_regs = init_sdma_7322_regs; | |
7291 | dd->f_sdma_busy = qib_sdma_7322_busy; | |
7292 | dd->f_sdma_gethead = qib_sdma_7322_gethead; | |
7293 | dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl; | |
7294 | dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt; | |
7295 | dd->f_sdma_update_tail = qib_sdma_update_7322_tail; | |
7296 | dd->f_sendctrl = sendctrl_7322_mod; | |
7297 | dd->f_set_armlaunch = qib_set_7322_armlaunch; | |
7298 | dd->f_set_cntr_sample = qib_set_cntr_7322_sample; | |
7299 | dd->f_iblink_state = qib_7322_iblink_state; | |
7300 | dd->f_ibphys_portstate = qib_7322_phys_portstate; | |
7301 | dd->f_get_ib_cfg = qib_7322_get_ib_cfg; | |
7302 | dd->f_set_ib_cfg = qib_7322_set_ib_cfg; | |
7303 | dd->f_set_ib_loopback = qib_7322_set_loopback; | |
7304 | dd->f_get_ib_table = qib_7322_get_ib_table; | |
7305 | dd->f_set_ib_table = qib_7322_set_ib_table; | |
7306 | dd->f_set_intr_state = qib_7322_set_intr_state; | |
7307 | dd->f_setextled = qib_setup_7322_setextled; | |
7308 | dd->f_txchk_change = qib_7322_txchk_change; | |
7309 | dd->f_update_usrhead = qib_update_7322_usrhead; | |
7310 | dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr; | |
7311 | dd->f_xgxs_reset = qib_7322_mini_pcs_reset; | |
7312 | dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up; | |
7313 | dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up; | |
7314 | dd->f_sdma_init_early = qib_7322_sdma_init_early; | |
7315 | dd->f_writescratch = writescratch; | |
7316 | dd->f_tempsense_rd = qib_7322_tempsense_rd; | |
8469ba39 MM |
7317 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
7318 | dd->f_notify_dca = qib_7322_notify_dca; | |
7319 | #endif | |
f931551b RC |
7320 | /* |
7321 | * Do remaining PCIe setup and save PCIe values in dd. | |
7322 | * Any error printing is already done by the init code. | |
7323 | * On return, we have the chip mapped, but chip registers | |
7324 | * are not set up until start of qib_init_7322_variables. | |
7325 | */ | |
7326 | ret = qib_pcie_ddinit(dd, pdev, ent); | |
7327 | if (ret < 0) | |
7328 | goto bail_free; | |
7329 | ||
7330 | /* initialize chip-specific variables */ | |
7331 | ret = qib_init_7322_variables(dd); | |
7332 | if (ret) | |
7333 | goto bail_cleanup; | |
7334 | ||
7335 | if (qib_mini_init || !dd->num_pports) | |
7336 | goto bail; | |
7337 | ||
7338 | /* | |
7339 | * Determine number of vectors we want; depends on port count | |
7340 | * and number of configured kernel receive queues actually used. | |
7341 | * Should also depend on whether sdma is enabled or not, but | |
7342 | * that's such a rare testing case it's not worth worrying about. | |
7343 | */ | |
7344 | tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table); | |
7345 | for (i = 0; i < tabsize; i++) | |
7346 | if ((i < ARRAY_SIZE(irq_table) && | |
7347 | irq_table[i].port <= dd->num_pports) || | |
7348 | (i >= ARRAY_SIZE(irq_table) && | |
7349 | dd->rcd[i - ARRAY_SIZE(irq_table)])) | |
7350 | actual_cnt++; | |
e67306a3 MM |
7351 | /* reduce by ctxt's < 2 */ |
7352 | if (qib_krcvq01_no_msi) | |
7353 | actual_cnt -= dd->num_pports; | |
7354 | ||
f931551b | 7355 | tabsize = actual_cnt; |
8469ba39 | 7356 | dd->cspec->msix_entries = kzalloc(tabsize * |
a778f3fd MM |
7357 | sizeof(struct qib_msix_entry), GFP_KERNEL); |
7358 | if (!dd->cspec->msix_entries) { | |
f931551b RC |
7359 | qib_dev_err(dd, "No memory for MSIx table\n"); |
7360 | tabsize = 0; | |
7361 | } | |
7362 | for (i = 0; i < tabsize; i++) | |
a778f3fd | 7363 | dd->cspec->msix_entries[i].msix.entry = i; |
f931551b RC |
7364 | |
7365 | if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries)) | |
7fac3301 MM |
7366 | qib_dev_err(dd, |
7367 | "Failed to setup PCIe or interrupts; continuing anyway\n"); | |
f931551b RC |
7368 | /* may be less than we wanted, if not enough available */ |
7369 | dd->cspec->num_msix_entries = tabsize; | |
7370 | ||
7371 | /* setup interrupt handler */ | |
7372 | qib_setup_7322_interrupt(dd, 1); | |
7373 | ||
7374 | /* clear diagctrl register, in case diags were running and crashed */ | |
7375 | qib_write_kreg(dd, kr_hwdiagctrl, 0); | |
8469ba39 MM |
7376 | #ifdef CONFIG_INFINIBAND_QIB_DCA |
7377 | if (!dca_add_requester(&pdev->dev)) { | |
7378 | qib_devinfo(dd->pcidev, "DCA enabled\n"); | |
7379 | dd->flags |= QIB_DCA_ENABLED; | |
7380 | qib_setup_dca(dd); | |
7381 | } | |
7382 | #endif | |
f931551b RC |
7383 | goto bail; |
7384 | ||
7385 | bail_cleanup: | |
7386 | qib_pcie_ddcleanup(dd); | |
7387 | bail_free: | |
7388 | qib_free_devdata(dd); | |
7389 | dd = ERR_PTR(ret); | |
7390 | bail: | |
7391 | return dd; | |
7392 | } | |
7393 | ||
7394 | /* | |
7395 | * Set the table entry at the specified index from the table specifed. | |
7396 | * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first | |
7397 | * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR. | |
7398 | * 'idx' below addresses the correct entry, while its 4 LSBs select the | |
7399 | * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table. | |
7400 | */ | |
7401 | #define DDS_ENT_AMP_LSB 14 | |
7402 | #define DDS_ENT_MAIN_LSB 9 | |
7403 | #define DDS_ENT_POST_LSB 5 | |
7404 | #define DDS_ENT_PRE_XTRA_LSB 3 | |
7405 | #define DDS_ENT_PRE_LSB 0 | |
7406 | ||
7407 | /* | |
7408 | * Set one entry in the TxDDS table for spec'd port | |
7409 | * ridx picks one of the entries, while tp points | |
7410 | * to the appropriate table entry. | |
7411 | */ | |
7412 | static void set_txdds(struct qib_pportdata *ppd, int ridx, | |
7413 | const struct txdds_ent *tp) | |
7414 | { | |
7415 | struct qib_devdata *dd = ppd->dd; | |
7416 | u32 pack_ent; | |
7417 | int regidx; | |
7418 | ||
7419 | /* Get correct offset in chip-space, and in source table */ | |
7420 | regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx; | |
7421 | /* | |
7422 | * We do not use qib_write_kreg_port() because it was intended | |
7423 | * only for registers in the lower "port specific" pages. | |
7424 | * So do index calculation by hand. | |
7425 | */ | |
7426 | if (ppd->hw_pidx) | |
7427 | regidx += (dd->palign / sizeof(u64)); | |
7428 | ||
7429 | pack_ent = tp->amp << DDS_ENT_AMP_LSB; | |
7430 | pack_ent |= tp->main << DDS_ENT_MAIN_LSB; | |
7431 | pack_ent |= tp->pre << DDS_ENT_PRE_LSB; | |
7432 | pack_ent |= tp->post << DDS_ENT_POST_LSB; | |
7433 | qib_write_kreg(dd, regidx, pack_ent); | |
7434 | /* Prevent back-to-back writes by hitting scratch */ | |
7435 | qib_write_kreg(ppd->dd, kr_scratch, 0); | |
7436 | } | |
7437 | ||
7438 | static const struct vendor_txdds_ent vendor_txdds[] = { | |
7439 | { /* Amphenol 1m 30awg NoEq */ | |
7440 | { 0x41, 0x50, 0x48 }, "584470002 ", | |
7441 | { 10, 0, 0, 5 }, { 10, 0, 0, 9 }, { 7, 1, 0, 13 }, | |
7442 | }, | |
7443 | { /* Amphenol 3m 28awg NoEq */ | |
7444 | { 0x41, 0x50, 0x48 }, "584470004 ", | |
7445 | { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 7, 15 }, | |
7446 | }, | |
7447 | { /* Finisar 3m OM2 Optical */ | |
7448 | { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL", | |
7449 | { 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { 0, 0, 0, 13 }, | |
7450 | }, | |
7451 | { /* Finisar 30m OM2 Optical */ | |
7452 | { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL", | |
7453 | { 0, 0, 0, 1 }, { 0, 0, 0, 5 }, { 0, 0, 0, 11 }, | |
7454 | }, | |
7455 | { /* Finisar Default OM2 Optical */ | |
7456 | { 0x00, 0x90, 0x65 }, NULL, | |
7457 | { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 0, 0, 12 }, | |
7458 | }, | |
7459 | { /* Gore 1m 30awg NoEq */ | |
7460 | { 0x00, 0x21, 0x77 }, "QSN3300-1 ", | |
7461 | { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 0, 15 }, | |
7462 | }, | |
7463 | { /* Gore 2m 30awg NoEq */ | |
7464 | { 0x00, 0x21, 0x77 }, "QSN3300-2 ", | |
7465 | { 0, 0, 0, 8 }, { 0, 0, 0, 10 }, { 0, 1, 7, 15 }, | |
7466 | }, | |
7467 | { /* Gore 1m 28awg NoEq */ | |
7468 | { 0x00, 0x21, 0x77 }, "QSN3800-1 ", | |
7469 | { 0, 0, 0, 6 }, { 0, 0, 0, 8 }, { 0, 1, 0, 15 }, | |
7470 | }, | |
7471 | { /* Gore 3m 28awg NoEq */ | |
7472 | { 0x00, 0x21, 0x77 }, "QSN3800-3 ", | |
7473 | { 0, 0, 0, 9 }, { 0, 0, 0, 13 }, { 0, 1, 7, 15 }, | |
7474 | }, | |
7475 | { /* Gore 5m 24awg Eq */ | |
7476 | { 0x00, 0x21, 0x77 }, "QSN7000-5 ", | |
7477 | { 0, 0, 0, 7 }, { 0, 0, 0, 9 }, { 0, 1, 3, 15 }, | |
7478 | }, | |
7479 | { /* Gore 7m 24awg Eq */ | |
7480 | { 0x00, 0x21, 0x77 }, "QSN7000-7 ", | |
7481 | { 0, 0, 0, 9 }, { 0, 0, 0, 11 }, { 0, 2, 6, 15 }, | |
7482 | }, | |
7483 | { /* Gore 5m 26awg Eq */ | |
7484 | { 0x00, 0x21, 0x77 }, "QSN7600-5 ", | |
7485 | { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 9, 13 }, | |
7486 | }, | |
7487 | { /* Gore 7m 26awg Eq */ | |
7488 | { 0x00, 0x21, 0x77 }, "QSN7600-7 ", | |
7489 | { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 10, 1, 8, 15 }, | |
7490 | }, | |
7491 | { /* Intersil 12m 24awg Active */ | |
7492 | { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224", | |
7493 | { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 3, 0, 9 }, | |
7494 | }, | |
7495 | { /* Intersil 10m 28awg Active */ | |
7496 | { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028", | |
7497 | { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 2, 0, 2 }, | |
7498 | }, | |
7499 | { /* Intersil 7m 30awg Active */ | |
7500 | { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730", | |
7501 | { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 1, 0, 3 }, | |
7502 | }, | |
7503 | { /* Intersil 5m 32awg Active */ | |
7504 | { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532", | |
7505 | { 0, 0, 0, 6 }, { 0, 0, 0, 6 }, { 0, 2, 0, 8 }, | |
7506 | }, | |
7507 | { /* Intersil Default Active */ | |
7508 | { 0x00, 0x30, 0xB4 }, NULL, | |
7509 | { 0, 0, 0, 6 }, { 0, 0, 0, 5 }, { 0, 2, 0, 5 }, | |
7510 | }, | |
7511 | { /* Luxtera 20m Active Optical */ | |
7512 | { 0x00, 0x25, 0x63 }, NULL, | |
7513 | { 0, 0, 0, 5 }, { 0, 0, 0, 8 }, { 0, 2, 0, 12 }, | |
7514 | }, | |
7515 | { /* Molex 1M Cu loopback */ | |
7516 | { 0x00, 0x09, 0x3A }, "74763-0025 ", | |
7517 | { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, | |
7518 | }, | |
7519 | { /* Molex 2m 28awg NoEq */ | |
7520 | { 0x00, 0x09, 0x3A }, "74757-2201 ", | |
7521 | { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 1, 15 }, | |
7522 | }, | |
7523 | }; | |
7524 | ||
7525 | static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = { | |
7526 | /* amp, pre, main, post */ | |
7527 | { 2, 2, 15, 6 }, /* Loopback */ | |
7528 | { 0, 0, 0, 1 }, /* 2 dB */ | |
7529 | { 0, 0, 0, 2 }, /* 3 dB */ | |
7530 | { 0, 0, 0, 3 }, /* 4 dB */ | |
7531 | { 0, 0, 0, 4 }, /* 5 dB */ | |
7532 | { 0, 0, 0, 5 }, /* 6 dB */ | |
7533 | { 0, 0, 0, 6 }, /* 7 dB */ | |
7534 | { 0, 0, 0, 7 }, /* 8 dB */ | |
7535 | { 0, 0, 0, 8 }, /* 9 dB */ | |
7536 | { 0, 0, 0, 9 }, /* 10 dB */ | |
7537 | { 0, 0, 0, 10 }, /* 11 dB */ | |
7538 | { 0, 0, 0, 11 }, /* 12 dB */ | |
7539 | { 0, 0, 0, 12 }, /* 13 dB */ | |
7540 | { 0, 0, 0, 13 }, /* 14 dB */ | |
7541 | { 0, 0, 0, 14 }, /* 15 dB */ | |
7542 | { 0, 0, 0, 15 }, /* 16 dB */ | |
7543 | }; | |
7544 | ||
7545 | static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = { | |
7546 | /* amp, pre, main, post */ | |
7547 | { 2, 2, 15, 6 }, /* Loopback */ | |
7548 | { 0, 0, 0, 8 }, /* 2 dB */ | |
7549 | { 0, 0, 0, 8 }, /* 3 dB */ | |
7550 | { 0, 0, 0, 9 }, /* 4 dB */ | |
7551 | { 0, 0, 0, 9 }, /* 5 dB */ | |
7552 | { 0, 0, 0, 10 }, /* 6 dB */ | |
7553 | { 0, 0, 0, 10 }, /* 7 dB */ | |
7554 | { 0, 0, 0, 11 }, /* 8 dB */ | |
7555 | { 0, 0, 0, 11 }, /* 9 dB */ | |
7556 | { 0, 0, 0, 12 }, /* 10 dB */ | |
7557 | { 0, 0, 0, 12 }, /* 11 dB */ | |
7558 | { 0, 0, 0, 13 }, /* 12 dB */ | |
7559 | { 0, 0, 0, 13 }, /* 13 dB */ | |
7560 | { 0, 0, 0, 14 }, /* 14 dB */ | |
7561 | { 0, 0, 0, 14 }, /* 15 dB */ | |
7562 | { 0, 0, 0, 15 }, /* 16 dB */ | |
7563 | }; | |
7564 | ||
7565 | static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = { | |
7566 | /* amp, pre, main, post */ | |
7567 | { 2, 2, 15, 6 }, /* Loopback */ | |
a77fcf89 RC |
7568 | { 0, 1, 0, 7 }, /* 2 dB (also QMH7342) */ |
7569 | { 0, 1, 0, 9 }, /* 3 dB (also QMH7342) */ | |
f931551b RC |
7570 | { 0, 1, 0, 11 }, /* 4 dB */ |
7571 | { 0, 1, 0, 13 }, /* 5 dB */ | |
7572 | { 0, 1, 0, 15 }, /* 6 dB */ | |
7573 | { 0, 1, 3, 15 }, /* 7 dB */ | |
7574 | { 0, 1, 7, 15 }, /* 8 dB */ | |
7575 | { 0, 1, 7, 15 }, /* 9 dB */ | |
7576 | { 0, 1, 8, 15 }, /* 10 dB */ | |
7577 | { 0, 1, 9, 15 }, /* 11 dB */ | |
7578 | { 0, 1, 10, 15 }, /* 12 dB */ | |
7579 | { 0, 2, 6, 15 }, /* 13 dB */ | |
7580 | { 0, 2, 7, 15 }, /* 14 dB */ | |
7581 | { 0, 2, 8, 15 }, /* 15 dB */ | |
7582 | { 0, 2, 9, 15 }, /* 16 dB */ | |
7583 | }; | |
7584 | ||
a77fcf89 RC |
7585 | /* |
7586 | * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ. | |
7587 | * These are mostly used for mez cards going through connectors | |
7588 | * and backplane traces, but can be used to add other "unusual" | |
7589 | * table values as well. | |
7590 | */ | |
7591 | static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = { | |
7592 | /* amp, pre, main, post */ | |
7593 | { 0, 0, 0, 1 }, /* QMH7342 backplane settings */ | |
7594 | { 0, 0, 0, 1 }, /* QMH7342 backplane settings */ | |
7595 | { 0, 0, 0, 2 }, /* QMH7342 backplane settings */ | |
7596 | { 0, 0, 0, 2 }, /* QMH7342 backplane settings */ | |
7c7a416e RC |
7597 | { 0, 0, 0, 3 }, /* QMH7342 backplane settings */ |
7598 | { 0, 0, 0, 4 }, /* QMH7342 backplane settings */ | |
22baa407 MH |
7599 | { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */ |
7600 | { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */ | |
7601 | { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */ | |
7602 | { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */ | |
7603 | { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */ | |
7604 | { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */ | |
7605 | { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */ | |
7606 | { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */ | |
7607 | { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */ | |
7608 | { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */ | |
7609 | { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */ | |
7610 | { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */ | |
a77fcf89 RC |
7611 | }; |
7612 | ||
7613 | static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = { | |
7614 | /* amp, pre, main, post */ | |
7615 | { 0, 0, 0, 7 }, /* QMH7342 backplane settings */ | |
7616 | { 0, 0, 0, 7 }, /* QMH7342 backplane settings */ | |
7617 | { 0, 0, 0, 8 }, /* QMH7342 backplane settings */ | |
7618 | { 0, 0, 0, 8 }, /* QMH7342 backplane settings */ | |
7c7a416e RC |
7619 | { 0, 0, 0, 9 }, /* QMH7342 backplane settings */ |
7620 | { 0, 0, 0, 10 }, /* QMH7342 backplane settings */ | |
22baa407 MH |
7621 | { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */ |
7622 | { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */ | |
7623 | { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */ | |
7624 | { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */ | |
7625 | { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */ | |
7626 | { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */ | |
7627 | { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */ | |
7628 | { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */ | |
7629 | { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */ | |
7630 | { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */ | |
7631 | { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */ | |
7632 | { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */ | |
a77fcf89 RC |
7633 | }; |
7634 | ||
7635 | static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = { | |
7636 | /* amp, pre, main, post */ | |
7637 | { 0, 1, 0, 4 }, /* QMH7342 backplane settings */ | |
7638 | { 0, 1, 0, 5 }, /* QMH7342 backplane settings */ | |
7639 | { 0, 1, 0, 6 }, /* QMH7342 backplane settings */ | |
7640 | { 0, 1, 0, 8 }, /* QMH7342 backplane settings */ | |
7c7a416e RC |
7641 | { 0, 1, 0, 10 }, /* QMH7342 backplane settings */ |
7642 | { 0, 1, 0, 12 }, /* QMH7342 backplane settings */ | |
22baa407 MH |
7643 | { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */ |
7644 | { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */ | |
7645 | { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */ | |
7646 | { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */ | |
7647 | { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */ | |
7648 | { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */ | |
7649 | { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */ | |
7650 | { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */ | |
7651 | { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */ | |
7652 | { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */ | |
7653 | { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */ | |
7654 | { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */ | |
a77fcf89 RC |
7655 | }; |
7656 | ||
e706203c MM |
7657 | static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = { |
7658 | /* amp, pre, main, post */ | |
7659 | { 0, 0, 0, 0 }, /* QME7342 mfg settings */ | |
7660 | { 0, 0, 0, 6 }, /* QME7342 P2 mfg settings */ | |
7661 | }; | |
7662 | ||
f931551b RC |
7663 | static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds, |
7664 | unsigned atten) | |
7665 | { | |
7666 | /* | |
7667 | * The attenuation table starts at 2dB for entry 1, | |
7668 | * with entry 0 being the loopback entry. | |
7669 | */ | |
7670 | if (atten <= 2) | |
7671 | atten = 1; | |
7672 | else if (atten > TXDDS_TABLE_SZ) | |
7673 | atten = TXDDS_TABLE_SZ - 1; | |
7674 | else | |
7675 | atten--; | |
7676 | return txdds + atten; | |
7677 | } | |
7678 | ||
7679 | /* | |
a77fcf89 | 7680 | * if override is set, the module parameter txselect has a value |
f931551b RC |
7681 | * for this specific port, so use it, rather than our normal mechanism. |
7682 | */ | |
7683 | static void find_best_ent(struct qib_pportdata *ppd, | |
7684 | const struct txdds_ent **sdr_dds, | |
7685 | const struct txdds_ent **ddr_dds, | |
7686 | const struct txdds_ent **qdr_dds, int override) | |
7687 | { | |
7688 | struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache; | |
7689 | int idx; | |
7690 | ||
7691 | /* Search table of known cables */ | |
7692 | for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) { | |
7693 | const struct vendor_txdds_ent *v = vendor_txdds + idx; | |
7694 | ||
7695 | if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) && | |
7696 | (!v->partnum || | |
7697 | !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) { | |
7698 | *sdr_dds = &v->sdr; | |
7699 | *ddr_dds = &v->ddr; | |
7700 | *qdr_dds = &v->qdr; | |
7701 | return; | |
7702 | } | |
7703 | } | |
7704 | ||
dde05cbd MH |
7705 | /* Active cables don't have attenuation so we only set SERDES |
7706 | * settings to account for the attenuation of the board traces. */ | |
f931551b RC |
7707 | if (!override && QSFP_IS_ACTIVE(qd->tech)) { |
7708 | *sdr_dds = txdds_sdr + ppd->dd->board_atten; | |
7709 | *ddr_dds = txdds_ddr + ppd->dd->board_atten; | |
7710 | *qdr_dds = txdds_qdr + ppd->dd->board_atten; | |
7711 | return; | |
7712 | } | |
7713 | ||
7714 | if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] || | |
7715 | qd->atten[1])) { | |
7716 | *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]); | |
7717 | *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]); | |
7718 | *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]); | |
7719 | return; | |
a77fcf89 | 7720 | } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) { |
f931551b RC |
7721 | /* |
7722 | * If we have no (or incomplete) data from the cable | |
a77fcf89 RC |
7723 | * EEPROM, or no QSFP, or override is set, use the |
7724 | * module parameter value to index into the attentuation | |
7725 | * table. | |
f931551b | 7726 | */ |
a77fcf89 RC |
7727 | idx = ppd->cpspec->no_eep; |
7728 | *sdr_dds = &txdds_sdr[idx]; | |
7729 | *ddr_dds = &txdds_ddr[idx]; | |
7730 | *qdr_dds = &txdds_qdr[idx]; | |
7731 | } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) { | |
7732 | /* similar to above, but index into the "extra" table. */ | |
7733 | idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ; | |
7734 | *sdr_dds = &txdds_extra_sdr[idx]; | |
7735 | *ddr_dds = &txdds_extra_ddr[idx]; | |
7736 | *qdr_dds = &txdds_extra_qdr[idx]; | |
e706203c MM |
7737 | } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) && |
7738 | ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + | |
7739 | TXDDS_MFG_SZ)) { | |
7740 | idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ); | |
7fac3301 | 7741 | pr_info("IB%u:%u use idx %u into txdds_mfg\n", |
e706203c MM |
7742 | ppd->dd->unit, ppd->port, idx); |
7743 | *sdr_dds = &txdds_extra_mfg[idx]; | |
7744 | *ddr_dds = &txdds_extra_mfg[idx]; | |
7745 | *qdr_dds = &txdds_extra_mfg[idx]; | |
a77fcf89 RC |
7746 | } else { |
7747 | /* this shouldn't happen, it's range checked */ | |
7748 | *sdr_dds = txdds_sdr + qib_long_atten; | |
7749 | *ddr_dds = txdds_ddr + qib_long_atten; | |
7750 | *qdr_dds = txdds_qdr + qib_long_atten; | |
f931551b RC |
7751 | } |
7752 | } | |
7753 | ||
7754 | static void init_txdds_table(struct qib_pportdata *ppd, int override) | |
7755 | { | |
7756 | const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds; | |
7757 | struct txdds_ent *dds; | |
7758 | int idx; | |
7759 | int single_ent = 0; | |
7760 | ||
a77fcf89 RC |
7761 | find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override); |
7762 | ||
7763 | /* for mez cards or override, use the selected value for all entries */ | |
7764 | if (!(ppd->dd->flags & QIB_HAS_QSFP) || override) | |
f931551b | 7765 | single_ent = 1; |
f931551b RC |
7766 | |
7767 | /* Fill in the first entry with the best entry found. */ | |
7768 | set_txdds(ppd, 0, sdr_dds); | |
7769 | set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds); | |
7770 | set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds); | |
a77fcf89 RC |
7771 | if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | |
7772 | QIBL_LINKACTIVE)) { | |
7773 | dds = (struct txdds_ent *)(ppd->link_speed_active == | |
7774 | QIB_IB_QDR ? qdr_dds : | |
7775 | (ppd->link_speed_active == | |
7776 | QIB_IB_DDR ? ddr_dds : sdr_dds)); | |
7777 | write_tx_serdes_param(ppd, dds); | |
7778 | } | |
f931551b RC |
7779 | |
7780 | /* Fill in the remaining entries with the default table values. */ | |
7781 | for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) { | |
7782 | set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx); | |
7783 | set_txdds(ppd, idx + TXDDS_TABLE_SZ, | |
7784 | single_ent ? ddr_dds : txdds_ddr + idx); | |
7785 | set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ, | |
7786 | single_ent ? qdr_dds : txdds_qdr + idx); | |
7787 | } | |
7788 | } | |
7789 | ||
7790 | #define KR_AHB_ACC KREG_IDX(ahb_access_ctrl) | |
7791 | #define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg) | |
7792 | #define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy) | |
7793 | #define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address) | |
7794 | #define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data) | |
7795 | #define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read) | |
7796 | #define AHB_TRANS_TRIES 10 | |
7797 | ||
7798 | /* | |
7799 | * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4, | |
7800 | * 5=subsystem which is why most calls have "chan + chan >> 1" | |
7801 | * for the channel argument. | |
7802 | */ | |
7803 | static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr, | |
7804 | u32 data, u32 mask) | |
7805 | { | |
7806 | u32 rd_data, wr_data, sz_mask; | |
7807 | u64 trans, acc, prev_acc; | |
7808 | u32 ret = 0xBAD0BAD; | |
7809 | int tries; | |
7810 | ||
7811 | prev_acc = qib_read_kreg64(dd, KR_AHB_ACC); | |
7812 | /* From this point on, make sure we return access */ | |
7813 | acc = (quad << 1) | 1; | |
7814 | qib_write_kreg(dd, KR_AHB_ACC, acc); | |
7815 | ||
7816 | for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) { | |
7817 | trans = qib_read_kreg64(dd, KR_AHB_TRANS); | |
7818 | if (trans & AHB_TRANS_RDY) | |
7819 | break; | |
7820 | } | |
7821 | if (tries >= AHB_TRANS_TRIES) { | |
7822 | qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES); | |
7823 | goto bail; | |
7824 | } | |
7825 | ||
7826 | /* If mask is not all 1s, we need to read, but different SerDes | |
7827 | * entities have different sizes | |
7828 | */ | |
7829 | sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1; | |
7830 | wr_data = data & mask & sz_mask; | |
7831 | if ((~mask & sz_mask) != 0) { | |
7832 | trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1); | |
7833 | qib_write_kreg(dd, KR_AHB_TRANS, trans); | |
7834 | ||
7835 | for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) { | |
7836 | trans = qib_read_kreg64(dd, KR_AHB_TRANS); | |
7837 | if (trans & AHB_TRANS_RDY) | |
7838 | break; | |
7839 | } | |
7840 | if (tries >= AHB_TRANS_TRIES) { | |
7841 | qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n", | |
7842 | AHB_TRANS_TRIES); | |
7843 | goto bail; | |
7844 | } | |
7845 | /* Re-read in case host split reads and read data first */ | |
7846 | trans = qib_read_kreg64(dd, KR_AHB_TRANS); | |
7847 | rd_data = (uint32_t)(trans >> AHB_DATA_LSB); | |
7848 | wr_data |= (rd_data & ~mask & sz_mask); | |
7849 | } | |
7850 | ||
7851 | /* If mask is not zero, we need to write. */ | |
7852 | if (mask & sz_mask) { | |
7853 | trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1); | |
7854 | trans |= ((uint64_t)wr_data << AHB_DATA_LSB); | |
7855 | trans |= AHB_WR; | |
7856 | qib_write_kreg(dd, KR_AHB_TRANS, trans); | |
7857 | ||
7858 | for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) { | |
7859 | trans = qib_read_kreg64(dd, KR_AHB_TRANS); | |
7860 | if (trans & AHB_TRANS_RDY) | |
7861 | break; | |
7862 | } | |
7863 | if (tries >= AHB_TRANS_TRIES) { | |
7864 | qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n", | |
7865 | AHB_TRANS_TRIES); | |
7866 | goto bail; | |
7867 | } | |
7868 | } | |
7869 | ret = wr_data; | |
7870 | bail: | |
7871 | qib_write_kreg(dd, KR_AHB_ACC, prev_acc); | |
7872 | return ret; | |
7873 | } | |
7874 | ||
7875 | static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data, | |
7876 | unsigned mask) | |
7877 | { | |
7878 | struct qib_devdata *dd = ppd->dd; | |
7879 | int chan; | |
7880 | u32 rbc; | |
7881 | ||
7882 | for (chan = 0; chan < SERDES_CHANS; ++chan) { | |
7883 | ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr, | |
7884 | data, mask); | |
7885 | rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), | |
7886 | addr, 0, 0); | |
7887 | } | |
7888 | } | |
7889 | ||
a0a234d4 MM |
7890 | static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable) |
7891 | { | |
7892 | u64 data = qib_read_kreg_port(ppd, krp_serdesctrl); | |
31264484 MH |
7893 | u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN); |
7894 | ||
7895 | if (enable && !state) { | |
7fac3301 | 7896 | pr_info("IB%u:%u Turning LOS on\n", |
31264484 | 7897 | ppd->dd->unit, ppd->port); |
a0a234d4 | 7898 | data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN); |
31264484 | 7899 | } else if (!enable && state) { |
7fac3301 | 7900 | pr_info("IB%u:%u Turning LOS off\n", |
31264484 | 7901 | ppd->dd->unit, ppd->port); |
a0a234d4 | 7902 | data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN); |
31264484 | 7903 | } |
a0a234d4 MM |
7904 | qib_write_kreg_port(ppd, krp_serdesctrl, data); |
7905 | } | |
7906 | ||
f931551b RC |
7907 | static int serdes_7322_init(struct qib_pportdata *ppd) |
7908 | { | |
a0a234d4 | 7909 | int ret = 0; |
da12c1f6 | 7910 | |
a0a234d4 MM |
7911 | if (ppd->dd->cspec->r1) |
7912 | ret = serdes_7322_init_old(ppd); | |
7913 | else | |
7914 | ret = serdes_7322_init_new(ppd); | |
7915 | return ret; | |
7916 | } | |
7917 | ||
7918 | static int serdes_7322_init_old(struct qib_pportdata *ppd) | |
7919 | { | |
f931551b RC |
7920 | u32 le_val; |
7921 | ||
7922 | /* | |
7923 | * Initialize the Tx DDS tables. Also done every QSFP event, | |
7924 | * for adapters with QSFP | |
7925 | */ | |
7926 | init_txdds_table(ppd, 0); | |
7927 | ||
a77fcf89 RC |
7928 | /* ensure no tx overrides from earlier driver loads */ |
7929 | qib_write_kreg_port(ppd, krp_tx_deemph_override, | |
7930 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
7931 | reset_tx_deemphasis_override)); | |
7932 | ||
f931551b RC |
7933 | /* Patch some SerDes defaults to "Better for IB" */ |
7934 | /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */ | |
7935 | ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9)); | |
7936 | ||
7937 | /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */ | |
7938 | ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11)); | |
7939 | /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */ | |
7940 | ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6)); | |
7941 | ||
7942 | /* May be overridden in qsfp_7322_event */ | |
7943 | le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT; | |
7944 | ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7)); | |
7945 | ||
7946 | /* enable LE1 adaptation for all but QME, which is disabled */ | |
7947 | le_val = IS_QME(ppd->dd) ? 0 : 1; | |
7948 | ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5)); | |
7949 | ||
7950 | /* Clear cmode-override, may be set from older driver */ | |
7951 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14); | |
7952 | ||
7953 | /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */ | |
7954 | ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8)); | |
7955 | ||
7956 | /* setup LoS params; these are subsystem, so chan == 5 */ | |
7957 | /* LoS filter threshold_count on, ch 0-3, set to 8 */ | |
7958 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11)); | |
7959 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4)); | |
7960 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11)); | |
7961 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4)); | |
7962 | ||
7963 | /* LoS filter threshold_count off, ch 0-3, set to 4 */ | |
7964 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0)); | |
7965 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8)); | |
7966 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0)); | |
7967 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8)); | |
7968 | ||
7969 | /* LoS filter select enabled */ | |
7970 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15); | |
7971 | ||
7972 | /* LoS target data: SDR=4, DDR=2, QDR=1 */ | |
7973 | ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */ | |
7974 | ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */ | |
7975 | ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */ | |
7976 | ||
a0a234d4 | 7977 | serdes_7322_los_enable(ppd, 1); |
f931551b RC |
7978 | |
7979 | /* rxbistena; set 0 to avoid effects of it switch later */ | |
7980 | ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15); | |
7981 | ||
7982 | /* Configure 4 DFE taps, and only they adapt */ | |
7983 | ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0)); | |
7984 | ||
7985 | /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */ | |
7986 | le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac; | |
7987 | ibsd_wr_allchans(ppd, 21, le_val, 0xfffe); | |
7988 | ||
7989 | /* | |
7990 | * Set receive adaptation mode. SDR and DDR adaptation are | |
7991 | * always on, and QDR is initially enabled; later disabled. | |
7992 | */ | |
7993 | qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL); | |
7994 | qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL); | |
7995 | qib_write_kreg_port(ppd, krp_static_adapt_dis(2), | |
7996 | ppd->dd->cspec->r1 ? | |
7997 | QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN); | |
7998 | ppd->cpspec->qdr_dfe_on = 1; | |
7999 | ||
a77fcf89 | 8000 | /* FLoop LOS gate: PPM filter enabled */ |
f931551b RC |
8001 | ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10); |
8002 | ||
8003 | /* rx offset center enabled */ | |
8004 | ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4); | |
8005 | ||
8006 | if (!ppd->dd->cspec->r1) { | |
8007 | ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12); | |
8008 | ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8); | |
8009 | } | |
8010 | ||
8011 | /* Set the frequency loop bandwidth to 15 */ | |
8012 | ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5)); | |
8013 | ||
8014 | return 0; | |
8015 | } | |
8016 | ||
a0a234d4 MM |
8017 | static int serdes_7322_init_new(struct qib_pportdata *ppd) |
8018 | { | |
8482d5d1 | 8019 | unsigned long tend; |
a0a234d4 MM |
8020 | u32 le_val, rxcaldone; |
8021 | int chan, chan_done = (1 << SERDES_CHANS) - 1; | |
8022 | ||
a0a234d4 MM |
8023 | /* Clear cmode-override, may be set from older driver */ |
8024 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14); | |
8025 | ||
8026 | /* ensure no tx overrides from earlier driver loads */ | |
8027 | qib_write_kreg_port(ppd, krp_tx_deemph_override, | |
8028 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
8029 | reset_tx_deemphasis_override)); | |
8030 | ||
8031 | /* START OF LSI SUGGESTED SERDES BRINGUP */ | |
8032 | /* Reset - Calibration Setup */ | |
8033 | /* Stop DFE adaptaion */ | |
8034 | ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1)); | |
8035 | /* Disable LE1 */ | |
8036 | ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5)); | |
8037 | /* Disable autoadapt for LE1 */ | |
8038 | ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15)); | |
8039 | /* Disable LE2 */ | |
8040 | ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6)); | |
8041 | /* Disable VGA */ | |
8042 | ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0)); | |
8043 | /* Disable AFE Offset Cancel */ | |
8044 | ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12)); | |
8045 | /* Disable Timing Loop */ | |
8046 | ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3)); | |
8047 | /* Disable Frequency Loop */ | |
8048 | ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4)); | |
8049 | /* Disable Baseline Wander Correction */ | |
8050 | ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13)); | |
8051 | /* Disable RX Calibration */ | |
8052 | ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10)); | |
8053 | /* Disable RX Offset Calibration */ | |
8054 | ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4)); | |
8055 | /* Select BB CDR */ | |
8056 | ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15)); | |
8057 | /* CDR Step Size */ | |
8058 | ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8)); | |
8059 | /* Enable phase Calibration */ | |
8060 | ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5)); | |
8061 | /* DFE Bandwidth [2:14-12] */ | |
8062 | ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12)); | |
8063 | /* DFE Config (4 taps only) */ | |
8064 | ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0)); | |
8065 | /* Gain Loop Bandwidth */ | |
8066 | if (!ppd->dd->cspec->r1) { | |
8067 | ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12)); | |
8068 | ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8)); | |
8069 | } else { | |
8070 | ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11)); | |
8071 | } | |
8072 | /* Baseline Wander Correction Gain [13:4-0] (leave as default) */ | |
8073 | /* Baseline Wander Correction Gain [3:7-5] (leave as default) */ | |
8074 | /* Data Rate Select [5:7-6] (leave as default) */ | |
25985edc | 8075 | /* RX Parallel Word Width [3:10-8] (leave as default) */ |
a0a234d4 MM |
8076 | |
8077 | /* RX REST */ | |
8078 | /* Single- or Multi-channel reset */ | |
8079 | /* RX Analog reset */ | |
8080 | /* RX Digital reset */ | |
8081 | ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13)); | |
8082 | msleep(20); | |
8083 | /* RX Analog reset */ | |
8084 | ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14)); | |
8085 | msleep(20); | |
8086 | /* RX Digital reset */ | |
8087 | ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13)); | |
8088 | msleep(20); | |
8089 | ||
8090 | /* setup LoS params; these are subsystem, so chan == 5 */ | |
8091 | /* LoS filter threshold_count on, ch 0-3, set to 8 */ | |
8092 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11)); | |
8093 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4)); | |
8094 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11)); | |
8095 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4)); | |
8096 | ||
8097 | /* LoS filter threshold_count off, ch 0-3, set to 4 */ | |
8098 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0)); | |
8099 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8)); | |
8100 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0)); | |
8101 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8)); | |
8102 | ||
8103 | /* LoS filter select enabled */ | |
8104 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15); | |
8105 | ||
8106 | /* LoS target data: SDR=4, DDR=2, QDR=1 */ | |
8107 | ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */ | |
8108 | ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */ | |
8109 | ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */ | |
8110 | ||
8111 | /* Turn on LOS on initial SERDES init */ | |
8112 | serdes_7322_los_enable(ppd, 1); | |
8113 | /* FLoop LOS gate: PPM filter enabled */ | |
8114 | ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10); | |
8115 | ||
8116 | /* RX LATCH CALIBRATION */ | |
8117 | /* Enable Eyefinder Phase Calibration latch */ | |
8118 | ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0)); | |
8119 | /* Enable RX Offset Calibration latch */ | |
8120 | ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4)); | |
8121 | msleep(20); | |
8122 | /* Start Calibration */ | |
8123 | ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10)); | |
8482d5d1 MM |
8124 | tend = jiffies + msecs_to_jiffies(500); |
8125 | while (chan_done && !time_is_before_jiffies(tend)) { | |
a0a234d4 MM |
8126 | msleep(20); |
8127 | for (chan = 0; chan < SERDES_CHANS; ++chan) { | |
8128 | rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), | |
8129 | (chan + (chan >> 1)), | |
8130 | 25, 0, 0); | |
8131 | if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 && | |
8132 | (~chan_done & (1 << chan)) == 0) | |
8133 | chan_done &= ~(1 << chan); | |
8134 | } | |
8135 | } | |
8136 | if (chan_done) { | |
7fac3301 | 8137 | pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n", |
a0a234d4 MM |
8138 | IBSD(ppd->hw_pidx), chan_done); |
8139 | } else { | |
8140 | for (chan = 0; chan < SERDES_CHANS; ++chan) { | |
8141 | rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), | |
8142 | (chan + (chan >> 1)), | |
8143 | 25, 0, 0); | |
8144 | if ((~rxcaldone & (u32)BMASK(10, 10)) == 0) | |
7fac3301 MM |
8145 | pr_info("Serdes %d chan %d calibration failed\n", |
8146 | IBSD(ppd->hw_pidx), chan); | |
a0a234d4 MM |
8147 | } |
8148 | } | |
8149 | ||
8150 | /* Turn off Calibration */ | |
8151 | ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10)); | |
8152 | msleep(20); | |
8153 | ||
8154 | /* BRING RX UP */ | |
8155 | /* Set LE2 value (May be overridden in qsfp_7322_event) */ | |
8156 | le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT; | |
8157 | ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7)); | |
8158 | /* Set LE2 Loop bandwidth */ | |
8159 | ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5)); | |
8160 | /* Enable LE2 */ | |
8161 | ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6)); | |
8162 | msleep(20); | |
8163 | /* Enable H0 only */ | |
8164 | ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1)); | |
8165 | /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */ | |
8166 | le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac; | |
8167 | ibsd_wr_allchans(ppd, 21, le_val, 0xfffe); | |
8168 | /* Enable VGA */ | |
8169 | ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0)); | |
8170 | msleep(20); | |
8171 | /* Set Frequency Loop Bandwidth */ | |
f665acb3 | 8172 | ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5)); |
a0a234d4 MM |
8173 | /* Enable Frequency Loop */ |
8174 | ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4)); | |
8175 | /* Set Timing Loop Bandwidth */ | |
8176 | ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9)); | |
8177 | /* Enable Timing Loop */ | |
8178 | ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3)); | |
8179 | msleep(50); | |
8180 | /* Enable DFE | |
8181 | * Set receive adaptation mode. SDR and DDR adaptation are | |
8182 | * always on, and QDR is initially enabled; later disabled. | |
8183 | */ | |
8184 | qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL); | |
8185 | qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL); | |
8186 | qib_write_kreg_port(ppd, krp_static_adapt_dis(2), | |
8187 | ppd->dd->cspec->r1 ? | |
8188 | QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN); | |
8189 | ppd->cpspec->qdr_dfe_on = 1; | |
8190 | /* Disable LE1 */ | |
8191 | ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5)); | |
8192 | /* Disable auto adapt for LE1 */ | |
8193 | ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15)); | |
8194 | msleep(20); | |
8195 | /* Enable AFE Offset Cancel */ | |
8196 | ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12)); | |
8197 | /* Enable Baseline Wander Correction */ | |
8198 | ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13)); | |
8199 | /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */ | |
8200 | ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11)); | |
8201 | /* VGA output common mode */ | |
8202 | ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2)); | |
8203 | ||
dde05cbd MH |
8204 | /* |
8205 | * Initialize the Tx DDS tables. Also done every QSFP event, | |
8206 | * for adapters with QSFP | |
8207 | */ | |
8208 | init_txdds_table(ppd, 0); | |
8209 | ||
a0a234d4 MM |
8210 | return 0; |
8211 | } | |
8212 | ||
f931551b RC |
8213 | /* start adjust QMH serdes parameters */ |
8214 | ||
8215 | static void set_man_code(struct qib_pportdata *ppd, int chan, int code) | |
8216 | { | |
8217 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), | |
8218 | 9, code << 9, 0x3f << 9); | |
8219 | } | |
8220 | ||
8221 | static void set_man_mode_h1(struct qib_pportdata *ppd, int chan, | |
8222 | int enable, u32 tapenable) | |
8223 | { | |
8224 | if (enable) | |
8225 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), | |
8226 | 1, 3 << 10, 0x1f << 10); | |
8227 | else | |
8228 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), | |
8229 | 1, 0, 0x1f << 10); | |
8230 | } | |
8231 | ||
8232 | /* Set clock to 1, 0, 1, 0 */ | |
8233 | static void clock_man(struct qib_pportdata *ppd, int chan) | |
8234 | { | |
8235 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), | |
8236 | 4, 0x4000, 0x4000); | |
8237 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), | |
8238 | 4, 0, 0x4000); | |
8239 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), | |
8240 | 4, 0x4000, 0x4000); | |
8241 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), | |
8242 | 4, 0, 0x4000); | |
8243 | } | |
8244 | ||
8245 | /* | |
8246 | * write the current Tx serdes pre,post,main,amp settings into the serdes. | |
8247 | * The caller must pass the settings appropriate for the current speed, | |
8248 | * or not care if they are correct for the current speed. | |
8249 | */ | |
8250 | static void write_tx_serdes_param(struct qib_pportdata *ppd, | |
8251 | struct txdds_ent *txdds) | |
8252 | { | |
8253 | u64 deemph; | |
8254 | ||
8255 | deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override); | |
8256 | /* field names for amp, main, post, pre, respectively */ | |
8257 | deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) | | |
8258 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) | | |
8259 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) | | |
8260 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena)); | |
a77fcf89 RC |
8261 | |
8262 | deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
8263 | tx_override_deemphasis_select); | |
8264 | deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
8265 | txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
8266 | txampcntl_d2a); | |
8267 | deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
8268 | txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
8269 | txc0_ena); | |
8270 | deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
8271 | txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
8272 | txcp1_ena); | |
8273 | deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
8274 | txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | |
f931551b RC |
8275 | txcn1_ena); |
8276 | qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph); | |
8277 | } | |
8278 | ||
8279 | /* | |
a77fcf89 RC |
8280 | * Set the parameters for mez cards on link bounce, so they are |
8281 | * always exactly what was requested. Similar logic to init_txdds | |
8282 | * but does just the serdes. | |
f931551b RC |
8283 | */ |
8284 | static void adj_tx_serdes(struct qib_pportdata *ppd) | |
8285 | { | |
a77fcf89 RC |
8286 | const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds; |
8287 | struct txdds_ent *dds; | |
f931551b | 8288 | |
a77fcf89 RC |
8289 | find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1); |
8290 | dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ? | |
8291 | qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ? | |
8292 | ddr_dds : sdr_dds)); | |
8293 | write_tx_serdes_param(ppd, dds); | |
f931551b RC |
8294 | } |
8295 | ||
8296 | /* set QDR forced value for H1, if needed */ | |
8297 | static void force_h1(struct qib_pportdata *ppd) | |
8298 | { | |
8299 | int chan; | |
8300 | ||
8301 | ppd->cpspec->qdr_reforce = 0; | |
8302 | if (!ppd->dd->cspec->r1) | |
8303 | return; | |
8304 | ||
8305 | for (chan = 0; chan < SERDES_CHANS; chan++) { | |
8306 | set_man_mode_h1(ppd, chan, 1, 0); | |
8307 | set_man_code(ppd, chan, ppd->cpspec->h1_val); | |
8308 | clock_man(ppd, chan); | |
8309 | set_man_mode_h1(ppd, chan, 0, 0); | |
8310 | } | |
8311 | } | |
8312 | ||
f931551b RC |
8313 | #define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN) |
8314 | #define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en) | |
8315 | ||
8316 | #define R_OPCODE_LSB 3 | |
8317 | #define R_OP_NOP 0 | |
8318 | #define R_OP_SHIFT 2 | |
8319 | #define R_OP_UPDATE 3 | |
8320 | #define R_TDI_LSB 2 | |
8321 | #define R_TDO_LSB 1 | |
8322 | #define R_RDY 1 | |
8323 | ||
8324 | static int qib_r_grab(struct qib_devdata *dd) | |
8325 | { | |
da12c1f6 MM |
8326 | u64 val = SJA_EN; |
8327 | ||
f931551b RC |
8328 | qib_write_kreg(dd, kr_r_access, val); |
8329 | qib_read_kreg32(dd, kr_scratch); | |
8330 | return 0; | |
8331 | } | |
8332 | ||
8333 | /* qib_r_wait_for_rdy() not only waits for the ready bit, it | |
8334 | * returns the current state of R_TDO | |
8335 | */ | |
8336 | static int qib_r_wait_for_rdy(struct qib_devdata *dd) | |
8337 | { | |
8338 | u64 val; | |
8339 | int timeout; | |
da12c1f6 | 8340 | |
f931551b RC |
8341 | for (timeout = 0; timeout < 100 ; ++timeout) { |
8342 | val = qib_read_kreg32(dd, kr_r_access); | |
8343 | if (val & R_RDY) | |
8344 | return (val >> R_TDO_LSB) & 1; | |
8345 | } | |
8346 | return -1; | |
8347 | } | |
8348 | ||
8349 | static int qib_r_shift(struct qib_devdata *dd, int bisten, | |
8350 | int len, u8 *inp, u8 *outp) | |
8351 | { | |
8352 | u64 valbase, val; | |
8353 | int ret, pos; | |
8354 | ||
8355 | valbase = SJA_EN | (bisten << BISTEN_LSB) | | |
8356 | (R_OP_SHIFT << R_OPCODE_LSB); | |
8357 | ret = qib_r_wait_for_rdy(dd); | |
8358 | if (ret < 0) | |
8359 | goto bail; | |
8360 | for (pos = 0; pos < len; ++pos) { | |
8361 | val = valbase; | |
8362 | if (outp) { | |
8363 | outp[pos >> 3] &= ~(1 << (pos & 7)); | |
8364 | outp[pos >> 3] |= (ret << (pos & 7)); | |
8365 | } | |
8366 | if (inp) { | |
8367 | int tdi = inp[pos >> 3] >> (pos & 7); | |
da12c1f6 | 8368 | |
f931551b RC |
8369 | val |= ((tdi & 1) << R_TDI_LSB); |
8370 | } | |
8371 | qib_write_kreg(dd, kr_r_access, val); | |
8372 | qib_read_kreg32(dd, kr_scratch); | |
8373 | ret = qib_r_wait_for_rdy(dd); | |
8374 | if (ret < 0) | |
8375 | break; | |
8376 | } | |
8377 | /* Restore to NOP between operations. */ | |
8378 | val = SJA_EN | (bisten << BISTEN_LSB); | |
8379 | qib_write_kreg(dd, kr_r_access, val); | |
8380 | qib_read_kreg32(dd, kr_scratch); | |
8381 | ret = qib_r_wait_for_rdy(dd); | |
8382 | ||
8383 | if (ret >= 0) | |
8384 | ret = pos; | |
8385 | bail: | |
8386 | return ret; | |
8387 | } | |
8388 | ||
8389 | static int qib_r_update(struct qib_devdata *dd, int bisten) | |
8390 | { | |
8391 | u64 val; | |
8392 | int ret; | |
8393 | ||
8394 | val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB); | |
8395 | ret = qib_r_wait_for_rdy(dd); | |
8396 | if (ret >= 0) { | |
8397 | qib_write_kreg(dd, kr_r_access, val); | |
8398 | qib_read_kreg32(dd, kr_scratch); | |
8399 | } | |
8400 | return ret; | |
8401 | } | |
8402 | ||
8403 | #define BISTEN_PORT_SEL 15 | |
8404 | #define LEN_PORT_SEL 625 | |
8405 | #define BISTEN_AT 17 | |
8406 | #define LEN_AT 156 | |
8407 | #define BISTEN_ETM 16 | |
8408 | #define LEN_ETM 632 | |
8409 | ||
8410 | #define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE) | |
8411 | ||
8412 | /* these are common for all IB port use cases. */ | |
8413 | static u8 reset_at[BIT2BYTE(LEN_AT)] = { | |
8414 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
8415 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, | |
8416 | }; | |
8417 | static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = { | |
8418 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
8419 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
8420 | 0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e, | |
8421 | 0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7, | |
8422 | 0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70, | |
8423 | 0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00, | |
8424 | 0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
8425 | 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, | |
8426 | }; | |
8427 | static u8 at[BIT2BYTE(LEN_AT)] = { | |
8428 | 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, | |
8429 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, | |
8430 | }; | |
8431 | ||
8432 | /* used for IB1 or IB2, only one in use */ | |
8433 | static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = { | |
8434 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
8435 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
8436 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
8437 | 0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00, | |
8438 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
8439 | 0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03, | |
8440 | 0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00, | |
8441 | 0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00, | |
8442 | }; | |
8443 | ||
8444 | /* used when both IB1 and IB2 are in use */ | |
8445 | static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = { | |
8446 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
8447 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, | |
8448 | 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
8449 | 0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05, | |
8450 | 0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, | |
8451 | 0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07, | |
8452 | 0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00, | |
8453 | 0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, | |
8454 | }; | |
8455 | ||
8456 | /* used when only IB1 is in use */ | |
8457 | static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = { | |
8458 | 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13, | |
8459 | 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c, | |
8460 | 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, | |
8461 | 0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, | |
8462 | 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32, | |
8463 | 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, | |
8464 | 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, | |
8465 | 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, | |
8466 | }; | |
8467 | ||
8468 | /* used when only IB2 is in use */ | |
8469 | static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = { | |
8470 | 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39, | |
8471 | 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32, | |
8472 | 0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, | |
8473 | 0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, | |
8474 | 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32, | |
8475 | 0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, | |
8476 | 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, | |
8477 | 0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, | |
8478 | }; | |
8479 | ||
8480 | /* used when both IB1 and IB2 are in use */ | |
8481 | static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = { | |
8482 | 0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13, | |
8483 | 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c, | |
8484 | 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, | |
8485 | 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, | |
8486 | 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32, | |
8487 | 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a, | |
8488 | 0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, | |
8489 | 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, | |
8490 | }; | |
8491 | ||
8492 | /* | |
8493 | * Do setup to properly handle IB link recovery; if port is zero, we | |
8494 | * are initializing to cover both ports; otherwise we are initializing | |
8495 | * to cover a single port card, or the port has reached INIT and we may | |
8496 | * need to switch coverage types. | |
8497 | */ | |
8498 | static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both) | |
8499 | { | |
8500 | u8 *portsel, *etm; | |
8501 | struct qib_devdata *dd = ppd->dd; | |
8502 | ||
8503 | if (!ppd->dd->cspec->r1) | |
8504 | return; | |
8505 | if (!both) { | |
8506 | dd->cspec->recovery_ports_initted++; | |
8507 | ppd->cpspec->recovery_init = 1; | |
8508 | } | |
8509 | if (!both && dd->cspec->recovery_ports_initted == 1) { | |
8510 | portsel = ppd->port == 1 ? portsel_port1 : portsel_port2; | |
8511 | etm = atetm_1port; | |
8512 | } else { | |
8513 | portsel = portsel_2port; | |
8514 | etm = atetm_2port; | |
8515 | } | |
8516 | ||
8517 | if (qib_r_grab(dd) < 0 || | |
8518 | qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 || | |
8519 | qib_r_update(dd, BISTEN_ETM) < 0 || | |
8520 | qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 || | |
8521 | qib_r_update(dd, BISTEN_AT) < 0 || | |
8522 | qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL, | |
8523 | portsel, NULL) < 0 || | |
8524 | qib_r_update(dd, BISTEN_PORT_SEL) < 0 || | |
8525 | qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 || | |
8526 | qib_r_update(dd, BISTEN_AT) < 0 || | |
8527 | qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 || | |
8528 | qib_r_update(dd, BISTEN_ETM) < 0) | |
8529 | qib_dev_err(dd, "Failed IB link recovery setup\n"); | |
8530 | } | |
8531 | ||
8532 | static void check_7322_rxe_status(struct qib_pportdata *ppd) | |
8533 | { | |
8534 | struct qib_devdata *dd = ppd->dd; | |
8535 | u64 fmask; | |
8536 | ||
8537 | if (dd->cspec->recovery_ports_initted != 1) | |
8538 | return; /* rest doesn't apply to dualport */ | |
8539 | qib_write_kreg(dd, kr_control, dd->control | | |
8540 | SYM_MASK(Control, FreezeMode)); | |
8541 | (void)qib_read_kreg64(dd, kr_scratch); | |
8542 | udelay(3); /* ibcreset asserted 400ns, be sure that's over */ | |
8543 | fmask = qib_read_kreg64(dd, kr_act_fmask); | |
8544 | if (!fmask) { | |
8545 | /* | |
8546 | * require a powercycle before we'll work again, and make | |
8547 | * sure we get no more interrupts, and don't turn off | |
8548 | * freeze. | |
8549 | */ | |
8550 | ppd->dd->cspec->stay_in_freeze = 1; | |
8551 | qib_7322_set_intr_state(ppd->dd, 0); | |
8552 | qib_write_kreg(dd, kr_fmask, 0ULL); | |
8553 | qib_dev_err(dd, "HCA unusable until powercycled\n"); | |
8554 | return; /* eventually reset */ | |
8555 | } | |
8556 | ||
8557 | qib_write_kreg(ppd->dd, kr_hwerrclear, | |
8558 | SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1)); | |
8559 | ||
8560 | /* don't do the full clear_freeze(), not needed for this */ | |
8561 | qib_write_kreg(dd, kr_control, dd->control); | |
8562 | qib_read_kreg32(dd, kr_scratch); | |
8563 | /* take IBC out of reset */ | |
8564 | if (ppd->link_speed_supported) { | |
8565 | ppd->cpspec->ibcctrl_a &= | |
8566 | ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn); | |
8567 | qib_write_kreg_port(ppd, krp_ibcctrl_a, | |
8568 | ppd->cpspec->ibcctrl_a); | |
8569 | qib_read_kreg32(dd, kr_scratch); | |
8570 | if (ppd->lflags & QIBL_IB_LINK_DISABLED) | |
8571 | qib_set_ib_7322_lstate(ppd, 0, | |
8572 | QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); | |
8573 | } | |
8574 | } |