]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
Merge ath-next from ath.git
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / chelsio / cxgb4 / t4_hw.c
1 /*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include <linux/delay.h>
36 #include "cxgb4.h"
37 #include "t4_regs.h"
38 #include "t4_values.h"
39 #include "t4fw_api.h"
40
41 /**
42 * t4_wait_op_done_val - wait until an operation is completed
43 * @adapter: the adapter performing the operation
44 * @reg: the register to check for completion
45 * @mask: a single-bit field within @reg that indicates completion
46 * @polarity: the value of the field when the operation is completed
47 * @attempts: number of check iterations
48 * @delay: delay in usecs between iterations
49 * @valp: where to store the value of the register at completion time
50 *
51 * Wait until an operation is completed by checking a bit in a register
52 * up to @attempts times. If @valp is not NULL the value of the register
53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise.
55 */
56 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp)
58 {
59 while (1) {
60 u32 val = t4_read_reg(adapter, reg);
61
62 if (!!(val & mask) == polarity) {
63 if (valp)
64 *valp = val;
65 return 0;
66 }
67 if (--attempts == 0)
68 return -EAGAIN;
69 if (delay)
70 udelay(delay);
71 }
72 }
73
74 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75 int polarity, int attempts, int delay)
76 {
77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
78 delay, NULL);
79 }
80
81 /**
82 * t4_set_reg_field - set a register field to a value
83 * @adapter: the adapter to program
84 * @addr: the register address
85 * @mask: specifies the portion of the register to modify
86 * @val: the new value for the register field
87 *
88 * Sets a register field specified by the supplied mask to the
89 * given value.
90 */
91 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
92 u32 val)
93 {
94 u32 v = t4_read_reg(adapter, addr) & ~mask;
95
96 t4_write_reg(adapter, addr, v | val);
97 (void) t4_read_reg(adapter, addr); /* flush */
98 }
99
100 /**
101 * t4_read_indirect - read indirectly addressed registers
102 * @adap: the adapter
103 * @addr_reg: register holding the indirect address
104 * @data_reg: register holding the value of the indirect register
105 * @vals: where the read register values are stored
106 * @nregs: how many indirect registers to read
107 * @start_idx: index of first indirect register to read
108 *
109 * Reads registers that are accessed indirectly through an address/data
110 * register pair.
111 */
112 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 unsigned int data_reg, u32 *vals,
114 unsigned int nregs, unsigned int start_idx)
115 {
116 while (nregs--) {
117 t4_write_reg(adap, addr_reg, start_idx);
118 *vals++ = t4_read_reg(adap, data_reg);
119 start_idx++;
120 }
121 }
122
123 /**
124 * t4_write_indirect - write indirectly addressed registers
125 * @adap: the adapter
126 * @addr_reg: register holding the indirect addresses
127 * @data_reg: register holding the value for the indirect registers
128 * @vals: values to write
129 * @nregs: how many indirect registers to write
130 * @start_idx: address of first indirect register to write
131 *
132 * Writes a sequential block of registers that are accessed indirectly
133 * through an address/data register pair.
134 */
135 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
136 unsigned int data_reg, const u32 *vals,
137 unsigned int nregs, unsigned int start_idx)
138 {
139 while (nregs--) {
140 t4_write_reg(adap, addr_reg, start_idx++);
141 t4_write_reg(adap, data_reg, *vals++);
142 }
143 }
144
145 /*
146 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
147 * mechanism. This guarantees that we get the real value even if we're
148 * operating within a Virtual Machine and the Hypervisor is trapping our
149 * Configuration Space accesses.
150 */
151 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
152 {
153 u32 req = ENABLE_F | FUNCTION_V(adap->fn) | REGISTER_V(reg);
154
155 if (is_t4(adap->params.chip))
156 req |= LOCALCFG_F;
157
158 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
159 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
160
161 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
162 * Configuration Space read. (None of the other fields matter when
163 * ENABLE is 0 so a simple register write is easier than a
164 * read-modify-write via t4_set_reg_field().)
165 */
166 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
167 }
168
169 /*
170 * t4_report_fw_error - report firmware error
171 * @adap: the adapter
172 *
173 * The adapter firmware can indicate error conditions to the host.
174 * If the firmware has indicated an error, print out the reason for
175 * the firmware error.
176 */
177 static void t4_report_fw_error(struct adapter *adap)
178 {
179 static const char *const reason[] = {
180 "Crash", /* PCIE_FW_EVAL_CRASH */
181 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
182 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
183 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
184 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
185 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
186 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
187 "Reserved", /* reserved */
188 };
189 u32 pcie_fw;
190
191 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
192 if (pcie_fw & PCIE_FW_ERR_F)
193 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
194 reason[PCIE_FW_EVAL_G(pcie_fw)]);
195 }
196
197 /*
198 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
199 */
200 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
201 u32 mbox_addr)
202 {
203 for ( ; nflit; nflit--, mbox_addr += 8)
204 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
205 }
206
207 /*
208 * Handle a FW assertion reported in a mailbox.
209 */
210 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
211 {
212 struct fw_debug_cmd asrt;
213
214 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
215 dev_alert(adap->pdev_dev,
216 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
217 asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
218 be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
219 }
220
221 static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
222 {
223 dev_err(adap->pdev_dev,
224 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
225 (unsigned long long)t4_read_reg64(adap, data_reg),
226 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
227 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
228 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
229 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
230 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
231 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
232 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
233 }
234
235 /**
236 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
237 * @adap: the adapter
238 * @mbox: index of the mailbox to use
239 * @cmd: the command to write
240 * @size: command length in bytes
241 * @rpl: where to optionally store the reply
242 * @sleep_ok: if true we may sleep while awaiting command completion
243 * @timeout: time to wait for command to finish before timing out
244 *
245 * Sends the given command to FW through the selected mailbox and waits
246 * for the FW to execute the command. If @rpl is not %NULL it is used to
247 * store the FW's reply to the command. The command and its optional
248 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
249 * to respond. @sleep_ok determines whether we may sleep while awaiting
250 * the response. If sleeping is allowed we use progressive backoff
251 * otherwise we spin.
252 *
253 * The return value is 0 on success or a negative errno on failure. A
254 * failure can happen either because we are not able to execute the
255 * command or FW executes it but signals an error. In the latter case
256 * the return value is the error code indicated by FW (negated).
257 */
258 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
259 int size, void *rpl, bool sleep_ok, int timeout)
260 {
261 static const int delay[] = {
262 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
263 };
264
265 u32 v;
266 u64 res;
267 int i, ms, delay_idx;
268 const __be64 *p = cmd;
269 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
270 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
271
272 if ((size & 15) || size > MBOX_LEN)
273 return -EINVAL;
274
275 /*
276 * If the device is off-line, as in EEH, commands will time out.
277 * Fail them early so we don't waste time waiting.
278 */
279 if (adap->pdev->error_state != pci_channel_io_normal)
280 return -EIO;
281
282 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
283 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
284 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
285
286 if (v != MBOX_OWNER_DRV)
287 return v ? -EBUSY : -ETIMEDOUT;
288
289 for (i = 0; i < size; i += 8)
290 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
291
292 t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
293 t4_read_reg(adap, ctl_reg); /* flush write */
294
295 delay_idx = 0;
296 ms = delay[0];
297
298 for (i = 0; i < timeout; i += ms) {
299 if (sleep_ok) {
300 ms = delay[delay_idx]; /* last element may repeat */
301 if (delay_idx < ARRAY_SIZE(delay) - 1)
302 delay_idx++;
303 msleep(ms);
304 } else
305 mdelay(ms);
306
307 v = t4_read_reg(adap, ctl_reg);
308 if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
309 if (!(v & MBMSGVALID_F)) {
310 t4_write_reg(adap, ctl_reg, 0);
311 continue;
312 }
313
314 res = t4_read_reg64(adap, data_reg);
315 if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
316 fw_asrt(adap, data_reg);
317 res = FW_CMD_RETVAL_V(EIO);
318 } else if (rpl) {
319 get_mbox_rpl(adap, rpl, size / 8, data_reg);
320 }
321
322 if (FW_CMD_RETVAL_G((int)res))
323 dump_mbox(adap, mbox, data_reg);
324 t4_write_reg(adap, ctl_reg, 0);
325 return -FW_CMD_RETVAL_G((int)res);
326 }
327 }
328
329 dump_mbox(adap, mbox, data_reg);
330 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
331 *(const u8 *)cmd, mbox);
332 t4_report_fw_error(adap);
333 return -ETIMEDOUT;
334 }
335
336 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
337 void *rpl, bool sleep_ok)
338 {
339 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
340 FW_CMD_MAX_TIMEOUT);
341 }
342
343 /**
344 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
345 * @adap: the adapter
346 * @win: PCI-E Memory Window to use
347 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
348 * @addr: address within indicated memory type
349 * @len: amount of memory to transfer
350 * @hbuf: host memory buffer
351 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
352 *
353 * Reads/writes an [almost] arbitrary memory region in the firmware: the
354 * firmware memory address and host buffer must be aligned on 32-bit
355 * boudaries; the length may be arbitrary. The memory is transferred as
356 * a raw byte sequence from/to the firmware's memory. If this memory
357 * contains data structures which contain multi-byte integers, it's the
358 * caller's responsibility to perform appropriate byte order conversions.
359 */
360 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
361 u32 len, void *hbuf, int dir)
362 {
363 u32 pos, offset, resid, memoffset;
364 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
365 u32 *buf;
366
367 /* Argument sanity checks ...
368 */
369 if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
370 return -EINVAL;
371 buf = (u32 *)hbuf;
372
373 /* It's convenient to be able to handle lengths which aren't a
374 * multiple of 32-bits because we often end up transferring files to
375 * the firmware. So we'll handle that by normalizing the length here
376 * and then handling any residual transfer at the end.
377 */
378 resid = len & 0x3;
379 len -= resid;
380
381 /* Offset into the region of memory which is being accessed
382 * MEM_EDC0 = 0
383 * MEM_EDC1 = 1
384 * MEM_MC = 2 -- T4
385 * MEM_MC0 = 2 -- For T5
386 * MEM_MC1 = 3 -- For T5
387 */
388 edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
389 if (mtype != MEM_MC1)
390 memoffset = (mtype * (edc_size * 1024 * 1024));
391 else {
392 mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
393 MA_EXT_MEMORY0_BAR_A));
394 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
395 }
396
397 /* Determine the PCIE_MEM_ACCESS_OFFSET */
398 addr = addr + memoffset;
399
400 /* Each PCI-E Memory Window is programmed with a window size -- or
401 * "aperture" -- which controls the granularity of its mapping onto
402 * adapter memory. We need to grab that aperture in order to know
403 * how to use the specified window. The window is also programmed
404 * with the base address of the Memory Window in BAR0's address
405 * space. For T4 this is an absolute PCI-E Bus Address. For T5
406 * the address is relative to BAR0.
407 */
408 mem_reg = t4_read_reg(adap,
409 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
410 win));
411 mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
412 mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
413 if (is_t4(adap->params.chip))
414 mem_base -= adap->t4_bar0;
415 win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->fn);
416
417 /* Calculate our initial PCI-E Memory Window Position and Offset into
418 * that Window.
419 */
420 pos = addr & ~(mem_aperture-1);
421 offset = addr - pos;
422
423 /* Set up initial PCI-E Memory Window to cover the start of our
424 * transfer. (Read it back to ensure that changes propagate before we
425 * attempt to use the new value.)
426 */
427 t4_write_reg(adap,
428 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
429 pos | win_pf);
430 t4_read_reg(adap,
431 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
432
433 /* Transfer data to/from the adapter as long as there's an integral
434 * number of 32-bit transfers to complete.
435 *
436 * A note on Endianness issues:
437 *
438 * The "register" reads and writes below from/to the PCI-E Memory
439 * Window invoke the standard adapter Big-Endian to PCI-E Link
440 * Little-Endian "swizzel." As a result, if we have the following
441 * data in adapter memory:
442 *
443 * Memory: ... | b0 | b1 | b2 | b3 | ...
444 * Address: i+0 i+1 i+2 i+3
445 *
446 * Then a read of the adapter memory via the PCI-E Memory Window
447 * will yield:
448 *
449 * x = readl(i)
450 * 31 0
451 * [ b3 | b2 | b1 | b0 ]
452 *
453 * If this value is stored into local memory on a Little-Endian system
454 * it will show up correctly in local memory as:
455 *
456 * ( ..., b0, b1, b2, b3, ... )
457 *
458 * But on a Big-Endian system, the store will show up in memory
459 * incorrectly swizzled as:
460 *
461 * ( ..., b3, b2, b1, b0, ... )
462 *
463 * So we need to account for this in the reads and writes to the
464 * PCI-E Memory Window below by undoing the register read/write
465 * swizzels.
466 */
467 while (len > 0) {
468 if (dir == T4_MEMORY_READ)
469 *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
470 mem_base + offset));
471 else
472 t4_write_reg(adap, mem_base + offset,
473 (__force u32)cpu_to_le32(*buf++));
474 offset += sizeof(__be32);
475 len -= sizeof(__be32);
476
477 /* If we've reached the end of our current window aperture,
478 * move the PCI-E Memory Window on to the next. Note that
479 * doing this here after "len" may be 0 allows us to set up
480 * the PCI-E Memory Window for a possible final residual
481 * transfer below ...
482 */
483 if (offset == mem_aperture) {
484 pos += mem_aperture;
485 offset = 0;
486 t4_write_reg(adap,
487 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
488 win), pos | win_pf);
489 t4_read_reg(adap,
490 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
491 win));
492 }
493 }
494
495 /* If the original transfer had a length which wasn't a multiple of
496 * 32-bits, now's where we need to finish off the transfer of the
497 * residual amount. The PCI-E Memory Window has already been moved
498 * above (if necessary) to cover this final transfer.
499 */
500 if (resid) {
501 union {
502 u32 word;
503 char byte[4];
504 } last;
505 unsigned char *bp;
506 int i;
507
508 if (dir == T4_MEMORY_READ) {
509 last.word = le32_to_cpu(
510 (__force __le32)t4_read_reg(adap,
511 mem_base + offset));
512 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
513 bp[i] = last.byte[i];
514 } else {
515 last.word = *buf;
516 for (i = resid; i < 4; i++)
517 last.byte[i] = 0;
518 t4_write_reg(adap, mem_base + offset,
519 (__force u32)cpu_to_le32(last.word));
520 }
521 }
522
523 return 0;
524 }
525
526 /* Return the specified PCI-E Configuration Space register from our Physical
527 * Function. We try first via a Firmware LDST Command since we prefer to let
528 * the firmware own all of these registers, but if that fails we go for it
529 * directly ourselves.
530 */
531 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
532 {
533 u32 val, ldst_addrspace;
534
535 /* If fw_attach != 0, construct and send the Firmware LDST Command to
536 * retrieve the specified PCI-E Configuration Space register.
537 */
538 struct fw_ldst_cmd ldst_cmd;
539 int ret;
540
541 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
542 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
543 ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
544 FW_CMD_REQUEST_F |
545 FW_CMD_READ_F |
546 ldst_addrspace);
547 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
548 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
549 ldst_cmd.u.pcie.ctrl_to_fn =
550 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->fn));
551 ldst_cmd.u.pcie.r = reg;
552
553 /* If the LDST Command succeeds, return the result, otherwise
554 * fall through to reading it directly ourselves ...
555 */
556 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
557 &ldst_cmd);
558 if (ret == 0)
559 val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
560 else
561 /* Read the desired Configuration Space register via the PCI-E
562 * Backdoor mechanism.
563 */
564 t4_hw_pci_read_cfg4(adap, reg, &val);
565 return val;
566 }
567
568 /* Get the window based on base passed to it.
569 * Window aperture is currently unhandled, but there is no use case for it
570 * right now
571 */
572 static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
573 u32 memwin_base)
574 {
575 u32 ret;
576
577 if (is_t4(adap->params.chip)) {
578 u32 bar0;
579
580 /* Truncation intentional: we only read the bottom 32-bits of
581 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
582 * mechanism to read BAR0 instead of using
583 * pci_resource_start() because we could be operating from
584 * within a Virtual Machine which is trapping our accesses to
585 * our Configuration Space and we need to set up the PCI-E
586 * Memory Window decoders with the actual addresses which will
587 * be coming across the PCI-E link.
588 */
589 bar0 = t4_read_pcie_cfg4(adap, pci_base);
590 bar0 &= pci_mask;
591 adap->t4_bar0 = bar0;
592
593 ret = bar0 + memwin_base;
594 } else {
595 /* For T5, only relative offset inside the PCIe BAR is passed */
596 ret = memwin_base;
597 }
598 return ret;
599 }
600
601 /* Get the default utility window (win0) used by everyone */
602 u32 t4_get_util_window(struct adapter *adap)
603 {
604 return t4_get_window(adap, PCI_BASE_ADDRESS_0,
605 PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
606 }
607
608 /* Set up memory window for accessing adapter memory ranges. (Read
609 * back MA register to ensure that changes propagate before we attempt
610 * to use the new values.)
611 */
612 void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
613 {
614 t4_write_reg(adap,
615 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
616 memwin_base | BIR_V(0) |
617 WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
618 t4_read_reg(adap,
619 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
620 }
621
622 /**
623 * t4_get_regs_len - return the size of the chips register set
624 * @adapter: the adapter
625 *
626 * Returns the size of the chip's BAR0 register space.
627 */
628 unsigned int t4_get_regs_len(struct adapter *adapter)
629 {
630 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
631
632 switch (chip_version) {
633 case CHELSIO_T4:
634 return T4_REGMAP_SIZE;
635
636 case CHELSIO_T5:
637 return T5_REGMAP_SIZE;
638 }
639
640 dev_err(adapter->pdev_dev,
641 "Unsupported chip version %d\n", chip_version);
642 return 0;
643 }
644
645 /**
646 * t4_get_regs - read chip registers into provided buffer
647 * @adap: the adapter
648 * @buf: register buffer
649 * @buf_size: size (in bytes) of register buffer
650 *
651 * If the provided register buffer isn't large enough for the chip's
652 * full register range, the register dump will be truncated to the
653 * register buffer's size.
654 */
655 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
656 {
657 static const unsigned int t4_reg_ranges[] = {
658 0x1008, 0x1108,
659 0x1180, 0x11b4,
660 0x11fc, 0x123c,
661 0x1300, 0x173c,
662 0x1800, 0x18fc,
663 0x3000, 0x305c,
664 0x3068, 0x30d8,
665 0x30e0, 0x5924,
666 0x5960, 0x59d4,
667 0x5a00, 0x5af8,
668 0x6000, 0x6098,
669 0x6100, 0x6150,
670 0x6200, 0x6208,
671 0x6240, 0x6248,
672 0x6280, 0x6338,
673 0x6370, 0x638c,
674 0x6400, 0x643c,
675 0x6500, 0x6524,
676 0x6a00, 0x6a38,
677 0x6a60, 0x6a78,
678 0x6b00, 0x6b84,
679 0x6bf0, 0x6c84,
680 0x6cf0, 0x6d84,
681 0x6df0, 0x6e84,
682 0x6ef0, 0x6f84,
683 0x6ff0, 0x7084,
684 0x70f0, 0x7184,
685 0x71f0, 0x7284,
686 0x72f0, 0x7384,
687 0x73f0, 0x7450,
688 0x7500, 0x7530,
689 0x7600, 0x761c,
690 0x7680, 0x76cc,
691 0x7700, 0x7798,
692 0x77c0, 0x77fc,
693 0x7900, 0x79fc,
694 0x7b00, 0x7c38,
695 0x7d00, 0x7efc,
696 0x8dc0, 0x8e1c,
697 0x8e30, 0x8e78,
698 0x8ea0, 0x8f6c,
699 0x8fc0, 0x9074,
700 0x90fc, 0x90fc,
701 0x9400, 0x9458,
702 0x9600, 0x96bc,
703 0x9800, 0x9808,
704 0x9820, 0x983c,
705 0x9850, 0x9864,
706 0x9c00, 0x9c6c,
707 0x9c80, 0x9cec,
708 0x9d00, 0x9d6c,
709 0x9d80, 0x9dec,
710 0x9e00, 0x9e6c,
711 0x9e80, 0x9eec,
712 0x9f00, 0x9f6c,
713 0x9f80, 0x9fec,
714 0xd004, 0xd03c,
715 0xdfc0, 0xdfe0,
716 0xe000, 0xea7c,
717 0xf000, 0x11110,
718 0x11118, 0x11190,
719 0x19040, 0x1906c,
720 0x19078, 0x19080,
721 0x1908c, 0x19124,
722 0x19150, 0x191b0,
723 0x191d0, 0x191e8,
724 0x19238, 0x1924c,
725 0x193f8, 0x19474,
726 0x19490, 0x194f8,
727 0x19800, 0x19f4c,
728 0x1a000, 0x1a06c,
729 0x1a0b0, 0x1a120,
730 0x1a128, 0x1a138,
731 0x1a190, 0x1a1c4,
732 0x1a1fc, 0x1a1fc,
733 0x1e040, 0x1e04c,
734 0x1e284, 0x1e28c,
735 0x1e2c0, 0x1e2c0,
736 0x1e2e0, 0x1e2e0,
737 0x1e300, 0x1e384,
738 0x1e3c0, 0x1e3c8,
739 0x1e440, 0x1e44c,
740 0x1e684, 0x1e68c,
741 0x1e6c0, 0x1e6c0,
742 0x1e6e0, 0x1e6e0,
743 0x1e700, 0x1e784,
744 0x1e7c0, 0x1e7c8,
745 0x1e840, 0x1e84c,
746 0x1ea84, 0x1ea8c,
747 0x1eac0, 0x1eac0,
748 0x1eae0, 0x1eae0,
749 0x1eb00, 0x1eb84,
750 0x1ebc0, 0x1ebc8,
751 0x1ec40, 0x1ec4c,
752 0x1ee84, 0x1ee8c,
753 0x1eec0, 0x1eec0,
754 0x1eee0, 0x1eee0,
755 0x1ef00, 0x1ef84,
756 0x1efc0, 0x1efc8,
757 0x1f040, 0x1f04c,
758 0x1f284, 0x1f28c,
759 0x1f2c0, 0x1f2c0,
760 0x1f2e0, 0x1f2e0,
761 0x1f300, 0x1f384,
762 0x1f3c0, 0x1f3c8,
763 0x1f440, 0x1f44c,
764 0x1f684, 0x1f68c,
765 0x1f6c0, 0x1f6c0,
766 0x1f6e0, 0x1f6e0,
767 0x1f700, 0x1f784,
768 0x1f7c0, 0x1f7c8,
769 0x1f840, 0x1f84c,
770 0x1fa84, 0x1fa8c,
771 0x1fac0, 0x1fac0,
772 0x1fae0, 0x1fae0,
773 0x1fb00, 0x1fb84,
774 0x1fbc0, 0x1fbc8,
775 0x1fc40, 0x1fc4c,
776 0x1fe84, 0x1fe8c,
777 0x1fec0, 0x1fec0,
778 0x1fee0, 0x1fee0,
779 0x1ff00, 0x1ff84,
780 0x1ffc0, 0x1ffc8,
781 0x20000, 0x2002c,
782 0x20100, 0x2013c,
783 0x20190, 0x201c8,
784 0x20200, 0x20318,
785 0x20400, 0x20528,
786 0x20540, 0x20614,
787 0x21000, 0x21040,
788 0x2104c, 0x21060,
789 0x210c0, 0x210ec,
790 0x21200, 0x21268,
791 0x21270, 0x21284,
792 0x212fc, 0x21388,
793 0x21400, 0x21404,
794 0x21500, 0x21518,
795 0x2152c, 0x2153c,
796 0x21550, 0x21554,
797 0x21600, 0x21600,
798 0x21608, 0x21628,
799 0x21630, 0x2163c,
800 0x21700, 0x2171c,
801 0x21780, 0x2178c,
802 0x21800, 0x21c38,
803 0x21c80, 0x21d7c,
804 0x21e00, 0x21e04,
805 0x22000, 0x2202c,
806 0x22100, 0x2213c,
807 0x22190, 0x221c8,
808 0x22200, 0x22318,
809 0x22400, 0x22528,
810 0x22540, 0x22614,
811 0x23000, 0x23040,
812 0x2304c, 0x23060,
813 0x230c0, 0x230ec,
814 0x23200, 0x23268,
815 0x23270, 0x23284,
816 0x232fc, 0x23388,
817 0x23400, 0x23404,
818 0x23500, 0x23518,
819 0x2352c, 0x2353c,
820 0x23550, 0x23554,
821 0x23600, 0x23600,
822 0x23608, 0x23628,
823 0x23630, 0x2363c,
824 0x23700, 0x2371c,
825 0x23780, 0x2378c,
826 0x23800, 0x23c38,
827 0x23c80, 0x23d7c,
828 0x23e00, 0x23e04,
829 0x24000, 0x2402c,
830 0x24100, 0x2413c,
831 0x24190, 0x241c8,
832 0x24200, 0x24318,
833 0x24400, 0x24528,
834 0x24540, 0x24614,
835 0x25000, 0x25040,
836 0x2504c, 0x25060,
837 0x250c0, 0x250ec,
838 0x25200, 0x25268,
839 0x25270, 0x25284,
840 0x252fc, 0x25388,
841 0x25400, 0x25404,
842 0x25500, 0x25518,
843 0x2552c, 0x2553c,
844 0x25550, 0x25554,
845 0x25600, 0x25600,
846 0x25608, 0x25628,
847 0x25630, 0x2563c,
848 0x25700, 0x2571c,
849 0x25780, 0x2578c,
850 0x25800, 0x25c38,
851 0x25c80, 0x25d7c,
852 0x25e00, 0x25e04,
853 0x26000, 0x2602c,
854 0x26100, 0x2613c,
855 0x26190, 0x261c8,
856 0x26200, 0x26318,
857 0x26400, 0x26528,
858 0x26540, 0x26614,
859 0x27000, 0x27040,
860 0x2704c, 0x27060,
861 0x270c0, 0x270ec,
862 0x27200, 0x27268,
863 0x27270, 0x27284,
864 0x272fc, 0x27388,
865 0x27400, 0x27404,
866 0x27500, 0x27518,
867 0x2752c, 0x2753c,
868 0x27550, 0x27554,
869 0x27600, 0x27600,
870 0x27608, 0x27628,
871 0x27630, 0x2763c,
872 0x27700, 0x2771c,
873 0x27780, 0x2778c,
874 0x27800, 0x27c38,
875 0x27c80, 0x27d7c,
876 0x27e00, 0x27e04,
877 };
878
879 static const unsigned int t5_reg_ranges[] = {
880 0x1008, 0x1148,
881 0x1180, 0x11b4,
882 0x11fc, 0x123c,
883 0x1280, 0x173c,
884 0x1800, 0x18fc,
885 0x3000, 0x3028,
886 0x3068, 0x30d8,
887 0x30e0, 0x30fc,
888 0x3140, 0x357c,
889 0x35a8, 0x35cc,
890 0x35ec, 0x35ec,
891 0x3600, 0x5624,
892 0x56cc, 0x575c,
893 0x580c, 0x5814,
894 0x5890, 0x58bc,
895 0x5940, 0x59dc,
896 0x59fc, 0x5a18,
897 0x5a60, 0x5a9c,
898 0x5b94, 0x5bfc,
899 0x6000, 0x6040,
900 0x6058, 0x614c,
901 0x7700, 0x7798,
902 0x77c0, 0x78fc,
903 0x7b00, 0x7c54,
904 0x7d00, 0x7efc,
905 0x8dc0, 0x8de0,
906 0x8df8, 0x8e84,
907 0x8ea0, 0x8f84,
908 0x8fc0, 0x90f8,
909 0x9400, 0x9470,
910 0x9600, 0x96f4,
911 0x9800, 0x9808,
912 0x9820, 0x983c,
913 0x9850, 0x9864,
914 0x9c00, 0x9c6c,
915 0x9c80, 0x9cec,
916 0x9d00, 0x9d6c,
917 0x9d80, 0x9dec,
918 0x9e00, 0x9e6c,
919 0x9e80, 0x9eec,
920 0x9f00, 0x9f6c,
921 0x9f80, 0xa020,
922 0xd004, 0xd03c,
923 0xdfc0, 0xdfe0,
924 0xe000, 0x11088,
925 0x1109c, 0x11110,
926 0x11118, 0x1117c,
927 0x11190, 0x11204,
928 0x19040, 0x1906c,
929 0x19078, 0x19080,
930 0x1908c, 0x19124,
931 0x19150, 0x191b0,
932 0x191d0, 0x191e8,
933 0x19238, 0x19290,
934 0x193f8, 0x19474,
935 0x19490, 0x194cc,
936 0x194f0, 0x194f8,
937 0x19c00, 0x19c60,
938 0x19c94, 0x19e10,
939 0x19e50, 0x19f34,
940 0x19f40, 0x19f50,
941 0x19f90, 0x19fe4,
942 0x1a000, 0x1a06c,
943 0x1a0b0, 0x1a120,
944 0x1a128, 0x1a138,
945 0x1a190, 0x1a1c4,
946 0x1a1fc, 0x1a1fc,
947 0x1e008, 0x1e00c,
948 0x1e040, 0x1e04c,
949 0x1e284, 0x1e290,
950 0x1e2c0, 0x1e2c0,
951 0x1e2e0, 0x1e2e0,
952 0x1e300, 0x1e384,
953 0x1e3c0, 0x1e3c8,
954 0x1e408, 0x1e40c,
955 0x1e440, 0x1e44c,
956 0x1e684, 0x1e690,
957 0x1e6c0, 0x1e6c0,
958 0x1e6e0, 0x1e6e0,
959 0x1e700, 0x1e784,
960 0x1e7c0, 0x1e7c8,
961 0x1e808, 0x1e80c,
962 0x1e840, 0x1e84c,
963 0x1ea84, 0x1ea90,
964 0x1eac0, 0x1eac0,
965 0x1eae0, 0x1eae0,
966 0x1eb00, 0x1eb84,
967 0x1ebc0, 0x1ebc8,
968 0x1ec08, 0x1ec0c,
969 0x1ec40, 0x1ec4c,
970 0x1ee84, 0x1ee90,
971 0x1eec0, 0x1eec0,
972 0x1eee0, 0x1eee0,
973 0x1ef00, 0x1ef84,
974 0x1efc0, 0x1efc8,
975 0x1f008, 0x1f00c,
976 0x1f040, 0x1f04c,
977 0x1f284, 0x1f290,
978 0x1f2c0, 0x1f2c0,
979 0x1f2e0, 0x1f2e0,
980 0x1f300, 0x1f384,
981 0x1f3c0, 0x1f3c8,
982 0x1f408, 0x1f40c,
983 0x1f440, 0x1f44c,
984 0x1f684, 0x1f690,
985 0x1f6c0, 0x1f6c0,
986 0x1f6e0, 0x1f6e0,
987 0x1f700, 0x1f784,
988 0x1f7c0, 0x1f7c8,
989 0x1f808, 0x1f80c,
990 0x1f840, 0x1f84c,
991 0x1fa84, 0x1fa90,
992 0x1fac0, 0x1fac0,
993 0x1fae0, 0x1fae0,
994 0x1fb00, 0x1fb84,
995 0x1fbc0, 0x1fbc8,
996 0x1fc08, 0x1fc0c,
997 0x1fc40, 0x1fc4c,
998 0x1fe84, 0x1fe90,
999 0x1fec0, 0x1fec0,
1000 0x1fee0, 0x1fee0,
1001 0x1ff00, 0x1ff84,
1002 0x1ffc0, 0x1ffc8,
1003 0x30000, 0x30030,
1004 0x30100, 0x30144,
1005 0x30190, 0x301d0,
1006 0x30200, 0x30318,
1007 0x30400, 0x3052c,
1008 0x30540, 0x3061c,
1009 0x30800, 0x30834,
1010 0x308c0, 0x30908,
1011 0x30910, 0x309ac,
1012 0x30a00, 0x30a2c,
1013 0x30a44, 0x30a50,
1014 0x30a74, 0x30c24,
1015 0x30d00, 0x30d00,
1016 0x30d08, 0x30d14,
1017 0x30d1c, 0x30d20,
1018 0x30d3c, 0x30d50,
1019 0x31200, 0x3120c,
1020 0x31220, 0x31220,
1021 0x31240, 0x31240,
1022 0x31600, 0x3160c,
1023 0x31a00, 0x31a1c,
1024 0x31e00, 0x31e20,
1025 0x31e38, 0x31e3c,
1026 0x31e80, 0x31e80,
1027 0x31e88, 0x31ea8,
1028 0x31eb0, 0x31eb4,
1029 0x31ec8, 0x31ed4,
1030 0x31fb8, 0x32004,
1031 0x32200, 0x32200,
1032 0x32208, 0x32240,
1033 0x32248, 0x32280,
1034 0x32288, 0x322c0,
1035 0x322c8, 0x322fc,
1036 0x32600, 0x32630,
1037 0x32a00, 0x32abc,
1038 0x32b00, 0x32b70,
1039 0x33000, 0x33048,
1040 0x33060, 0x3309c,
1041 0x330f0, 0x33148,
1042 0x33160, 0x3319c,
1043 0x331f0, 0x332e4,
1044 0x332f8, 0x333e4,
1045 0x333f8, 0x33448,
1046 0x33460, 0x3349c,
1047 0x334f0, 0x33548,
1048 0x33560, 0x3359c,
1049 0x335f0, 0x336e4,
1050 0x336f8, 0x337e4,
1051 0x337f8, 0x337fc,
1052 0x33814, 0x33814,
1053 0x3382c, 0x3382c,
1054 0x33880, 0x3388c,
1055 0x338e8, 0x338ec,
1056 0x33900, 0x33948,
1057 0x33960, 0x3399c,
1058 0x339f0, 0x33ae4,
1059 0x33af8, 0x33b10,
1060 0x33b28, 0x33b28,
1061 0x33b3c, 0x33b50,
1062 0x33bf0, 0x33c10,
1063 0x33c28, 0x33c28,
1064 0x33c3c, 0x33c50,
1065 0x33cf0, 0x33cfc,
1066 0x34000, 0x34030,
1067 0x34100, 0x34144,
1068 0x34190, 0x341d0,
1069 0x34200, 0x34318,
1070 0x34400, 0x3452c,
1071 0x34540, 0x3461c,
1072 0x34800, 0x34834,
1073 0x348c0, 0x34908,
1074 0x34910, 0x349ac,
1075 0x34a00, 0x34a2c,
1076 0x34a44, 0x34a50,
1077 0x34a74, 0x34c24,
1078 0x34d00, 0x34d00,
1079 0x34d08, 0x34d14,
1080 0x34d1c, 0x34d20,
1081 0x34d3c, 0x34d50,
1082 0x35200, 0x3520c,
1083 0x35220, 0x35220,
1084 0x35240, 0x35240,
1085 0x35600, 0x3560c,
1086 0x35a00, 0x35a1c,
1087 0x35e00, 0x35e20,
1088 0x35e38, 0x35e3c,
1089 0x35e80, 0x35e80,
1090 0x35e88, 0x35ea8,
1091 0x35eb0, 0x35eb4,
1092 0x35ec8, 0x35ed4,
1093 0x35fb8, 0x36004,
1094 0x36200, 0x36200,
1095 0x36208, 0x36240,
1096 0x36248, 0x36280,
1097 0x36288, 0x362c0,
1098 0x362c8, 0x362fc,
1099 0x36600, 0x36630,
1100 0x36a00, 0x36abc,
1101 0x36b00, 0x36b70,
1102 0x37000, 0x37048,
1103 0x37060, 0x3709c,
1104 0x370f0, 0x37148,
1105 0x37160, 0x3719c,
1106 0x371f0, 0x372e4,
1107 0x372f8, 0x373e4,
1108 0x373f8, 0x37448,
1109 0x37460, 0x3749c,
1110 0x374f0, 0x37548,
1111 0x37560, 0x3759c,
1112 0x375f0, 0x376e4,
1113 0x376f8, 0x377e4,
1114 0x377f8, 0x377fc,
1115 0x37814, 0x37814,
1116 0x3782c, 0x3782c,
1117 0x37880, 0x3788c,
1118 0x378e8, 0x378ec,
1119 0x37900, 0x37948,
1120 0x37960, 0x3799c,
1121 0x379f0, 0x37ae4,
1122 0x37af8, 0x37b10,
1123 0x37b28, 0x37b28,
1124 0x37b3c, 0x37b50,
1125 0x37bf0, 0x37c10,
1126 0x37c28, 0x37c28,
1127 0x37c3c, 0x37c50,
1128 0x37cf0, 0x37cfc,
1129 0x38000, 0x38030,
1130 0x38100, 0x38144,
1131 0x38190, 0x381d0,
1132 0x38200, 0x38318,
1133 0x38400, 0x3852c,
1134 0x38540, 0x3861c,
1135 0x38800, 0x38834,
1136 0x388c0, 0x38908,
1137 0x38910, 0x389ac,
1138 0x38a00, 0x38a2c,
1139 0x38a44, 0x38a50,
1140 0x38a74, 0x38c24,
1141 0x38d00, 0x38d00,
1142 0x38d08, 0x38d14,
1143 0x38d1c, 0x38d20,
1144 0x38d3c, 0x38d50,
1145 0x39200, 0x3920c,
1146 0x39220, 0x39220,
1147 0x39240, 0x39240,
1148 0x39600, 0x3960c,
1149 0x39a00, 0x39a1c,
1150 0x39e00, 0x39e20,
1151 0x39e38, 0x39e3c,
1152 0x39e80, 0x39e80,
1153 0x39e88, 0x39ea8,
1154 0x39eb0, 0x39eb4,
1155 0x39ec8, 0x39ed4,
1156 0x39fb8, 0x3a004,
1157 0x3a200, 0x3a200,
1158 0x3a208, 0x3a240,
1159 0x3a248, 0x3a280,
1160 0x3a288, 0x3a2c0,
1161 0x3a2c8, 0x3a2fc,
1162 0x3a600, 0x3a630,
1163 0x3aa00, 0x3aabc,
1164 0x3ab00, 0x3ab70,
1165 0x3b000, 0x3b048,
1166 0x3b060, 0x3b09c,
1167 0x3b0f0, 0x3b148,
1168 0x3b160, 0x3b19c,
1169 0x3b1f0, 0x3b2e4,
1170 0x3b2f8, 0x3b3e4,
1171 0x3b3f8, 0x3b448,
1172 0x3b460, 0x3b49c,
1173 0x3b4f0, 0x3b548,
1174 0x3b560, 0x3b59c,
1175 0x3b5f0, 0x3b6e4,
1176 0x3b6f8, 0x3b7e4,
1177 0x3b7f8, 0x3b7fc,
1178 0x3b814, 0x3b814,
1179 0x3b82c, 0x3b82c,
1180 0x3b880, 0x3b88c,
1181 0x3b8e8, 0x3b8ec,
1182 0x3b900, 0x3b948,
1183 0x3b960, 0x3b99c,
1184 0x3b9f0, 0x3bae4,
1185 0x3baf8, 0x3bb10,
1186 0x3bb28, 0x3bb28,
1187 0x3bb3c, 0x3bb50,
1188 0x3bbf0, 0x3bc10,
1189 0x3bc28, 0x3bc28,
1190 0x3bc3c, 0x3bc50,
1191 0x3bcf0, 0x3bcfc,
1192 0x3c000, 0x3c030,
1193 0x3c100, 0x3c144,
1194 0x3c190, 0x3c1d0,
1195 0x3c200, 0x3c318,
1196 0x3c400, 0x3c52c,
1197 0x3c540, 0x3c61c,
1198 0x3c800, 0x3c834,
1199 0x3c8c0, 0x3c908,
1200 0x3c910, 0x3c9ac,
1201 0x3ca00, 0x3ca2c,
1202 0x3ca44, 0x3ca50,
1203 0x3ca74, 0x3cc24,
1204 0x3cd00, 0x3cd00,
1205 0x3cd08, 0x3cd14,
1206 0x3cd1c, 0x3cd20,
1207 0x3cd3c, 0x3cd50,
1208 0x3d200, 0x3d20c,
1209 0x3d220, 0x3d220,
1210 0x3d240, 0x3d240,
1211 0x3d600, 0x3d60c,
1212 0x3da00, 0x3da1c,
1213 0x3de00, 0x3de20,
1214 0x3de38, 0x3de3c,
1215 0x3de80, 0x3de80,
1216 0x3de88, 0x3dea8,
1217 0x3deb0, 0x3deb4,
1218 0x3dec8, 0x3ded4,
1219 0x3dfb8, 0x3e004,
1220 0x3e200, 0x3e200,
1221 0x3e208, 0x3e240,
1222 0x3e248, 0x3e280,
1223 0x3e288, 0x3e2c0,
1224 0x3e2c8, 0x3e2fc,
1225 0x3e600, 0x3e630,
1226 0x3ea00, 0x3eabc,
1227 0x3eb00, 0x3eb70,
1228 0x3f000, 0x3f048,
1229 0x3f060, 0x3f09c,
1230 0x3f0f0, 0x3f148,
1231 0x3f160, 0x3f19c,
1232 0x3f1f0, 0x3f2e4,
1233 0x3f2f8, 0x3f3e4,
1234 0x3f3f8, 0x3f448,
1235 0x3f460, 0x3f49c,
1236 0x3f4f0, 0x3f548,
1237 0x3f560, 0x3f59c,
1238 0x3f5f0, 0x3f6e4,
1239 0x3f6f8, 0x3f7e4,
1240 0x3f7f8, 0x3f7fc,
1241 0x3f814, 0x3f814,
1242 0x3f82c, 0x3f82c,
1243 0x3f880, 0x3f88c,
1244 0x3f8e8, 0x3f8ec,
1245 0x3f900, 0x3f948,
1246 0x3f960, 0x3f99c,
1247 0x3f9f0, 0x3fae4,
1248 0x3faf8, 0x3fb10,
1249 0x3fb28, 0x3fb28,
1250 0x3fb3c, 0x3fb50,
1251 0x3fbf0, 0x3fc10,
1252 0x3fc28, 0x3fc28,
1253 0x3fc3c, 0x3fc50,
1254 0x3fcf0, 0x3fcfc,
1255 0x40000, 0x4000c,
1256 0x40040, 0x40068,
1257 0x4007c, 0x40144,
1258 0x40180, 0x4018c,
1259 0x40200, 0x40298,
1260 0x402ac, 0x4033c,
1261 0x403f8, 0x403fc,
1262 0x41304, 0x413c4,
1263 0x41400, 0x4141c,
1264 0x41480, 0x414d0,
1265 0x44000, 0x44078,
1266 0x440c0, 0x44278,
1267 0x442c0, 0x44478,
1268 0x444c0, 0x44678,
1269 0x446c0, 0x44878,
1270 0x448c0, 0x449fc,
1271 0x45000, 0x45068,
1272 0x45080, 0x45084,
1273 0x450a0, 0x450b0,
1274 0x45200, 0x45268,
1275 0x45280, 0x45284,
1276 0x452a0, 0x452b0,
1277 0x460c0, 0x460e4,
1278 0x47000, 0x4708c,
1279 0x47200, 0x47250,
1280 0x47400, 0x47420,
1281 0x47600, 0x47618,
1282 0x47800, 0x47814,
1283 0x48000, 0x4800c,
1284 0x48040, 0x48068,
1285 0x4807c, 0x48144,
1286 0x48180, 0x4818c,
1287 0x48200, 0x48298,
1288 0x482ac, 0x4833c,
1289 0x483f8, 0x483fc,
1290 0x49304, 0x493c4,
1291 0x49400, 0x4941c,
1292 0x49480, 0x494d0,
1293 0x4c000, 0x4c078,
1294 0x4c0c0, 0x4c278,
1295 0x4c2c0, 0x4c478,
1296 0x4c4c0, 0x4c678,
1297 0x4c6c0, 0x4c878,
1298 0x4c8c0, 0x4c9fc,
1299 0x4d000, 0x4d068,
1300 0x4d080, 0x4d084,
1301 0x4d0a0, 0x4d0b0,
1302 0x4d200, 0x4d268,
1303 0x4d280, 0x4d284,
1304 0x4d2a0, 0x4d2b0,
1305 0x4e0c0, 0x4e0e4,
1306 0x4f000, 0x4f08c,
1307 0x4f200, 0x4f250,
1308 0x4f400, 0x4f420,
1309 0x4f600, 0x4f618,
1310 0x4f800, 0x4f814,
1311 0x50000, 0x500cc,
1312 0x50400, 0x50400,
1313 0x50800, 0x508cc,
1314 0x50c00, 0x50c00,
1315 0x51000, 0x5101c,
1316 0x51300, 0x51308,
1317 };
1318
1319 u32 *buf_end = (u32 *)((char *)buf + buf_size);
1320 const unsigned int *reg_ranges;
1321 int reg_ranges_size, range;
1322 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
1323
1324 /* Select the right set of register ranges to dump depending on the
1325 * adapter chip type.
1326 */
1327 switch (chip_version) {
1328 case CHELSIO_T4:
1329 reg_ranges = t4_reg_ranges;
1330 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
1331 break;
1332
1333 case CHELSIO_T5:
1334 reg_ranges = t5_reg_ranges;
1335 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
1336 break;
1337
1338 default:
1339 dev_err(adap->pdev_dev,
1340 "Unsupported chip version %d\n", chip_version);
1341 return;
1342 }
1343
1344 /* Clear the register buffer and insert the appropriate register
1345 * values selected by the above register ranges.
1346 */
1347 memset(buf, 0, buf_size);
1348 for (range = 0; range < reg_ranges_size; range += 2) {
1349 unsigned int reg = reg_ranges[range];
1350 unsigned int last_reg = reg_ranges[range + 1];
1351 u32 *bufp = (u32 *)((char *)buf + reg);
1352
1353 /* Iterate across the register range filling in the register
1354 * buffer but don't write past the end of the register buffer.
1355 */
1356 while (reg <= last_reg && bufp < buf_end) {
1357 *bufp++ = t4_read_reg(adap, reg);
1358 reg += sizeof(u32);
1359 }
1360 }
1361 }
1362
1363 #define EEPROM_STAT_ADDR 0x7bfc
1364 #define VPD_BASE 0x400
1365 #define VPD_BASE_OLD 0
1366 #define VPD_LEN 1024
1367 #define CHELSIO_VPD_UNIQUE_ID 0x82
1368
1369 /**
1370 * t4_seeprom_wp - enable/disable EEPROM write protection
1371 * @adapter: the adapter
1372 * @enable: whether to enable or disable write protection
1373 *
1374 * Enables or disables write protection on the serial EEPROM.
1375 */
1376 int t4_seeprom_wp(struct adapter *adapter, bool enable)
1377 {
1378 unsigned int v = enable ? 0xc : 0;
1379 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
1380 return ret < 0 ? ret : 0;
1381 }
1382
1383 /**
1384 * get_vpd_params - read VPD parameters from VPD EEPROM
1385 * @adapter: adapter to read
1386 * @p: where to store the parameters
1387 *
1388 * Reads card parameters stored in VPD EEPROM.
1389 */
1390 int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
1391 {
1392 u32 cclk_param, cclk_val;
1393 int i, ret, addr;
1394 int ec, sn, pn;
1395 u8 *vpd, csum;
1396 unsigned int vpdr_len, kw_offset, id_len;
1397
1398 vpd = vmalloc(VPD_LEN);
1399 if (!vpd)
1400 return -ENOMEM;
1401
1402 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
1403 if (ret < 0)
1404 goto out;
1405
1406 /* The VPD shall have a unique identifier specified by the PCI SIG.
1407 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
1408 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
1409 * is expected to automatically put this entry at the
1410 * beginning of the VPD.
1411 */
1412 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
1413
1414 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
1415 if (ret < 0)
1416 goto out;
1417
1418 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
1419 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
1420 ret = -EINVAL;
1421 goto out;
1422 }
1423
1424 id_len = pci_vpd_lrdt_size(vpd);
1425 if (id_len > ID_LEN)
1426 id_len = ID_LEN;
1427
1428 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
1429 if (i < 0) {
1430 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
1431 ret = -EINVAL;
1432 goto out;
1433 }
1434
1435 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
1436 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
1437 if (vpdr_len + kw_offset > VPD_LEN) {
1438 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
1439 ret = -EINVAL;
1440 goto out;
1441 }
1442
1443 #define FIND_VPD_KW(var, name) do { \
1444 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
1445 if (var < 0) { \
1446 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
1447 ret = -EINVAL; \
1448 goto out; \
1449 } \
1450 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
1451 } while (0)
1452
1453 FIND_VPD_KW(i, "RV");
1454 for (csum = 0; i >= 0; i--)
1455 csum += vpd[i];
1456
1457 if (csum) {
1458 dev_err(adapter->pdev_dev,
1459 "corrupted VPD EEPROM, actual csum %u\n", csum);
1460 ret = -EINVAL;
1461 goto out;
1462 }
1463
1464 FIND_VPD_KW(ec, "EC");
1465 FIND_VPD_KW(sn, "SN");
1466 FIND_VPD_KW(pn, "PN");
1467 #undef FIND_VPD_KW
1468
1469 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
1470 strim(p->id);
1471 memcpy(p->ec, vpd + ec, EC_LEN);
1472 strim(p->ec);
1473 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
1474 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
1475 strim(p->sn);
1476 i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
1477 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
1478 strim(p->pn);
1479
1480 /*
1481 * Ask firmware for the Core Clock since it knows how to translate the
1482 * Reference Clock ('V2') VPD field into a Core Clock value ...
1483 */
1484 cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
1485 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
1486 ret = t4_query_params(adapter, adapter->mbox, 0, 0,
1487 1, &cclk_param, &cclk_val);
1488
1489 out:
1490 vfree(vpd);
1491 if (ret)
1492 return ret;
1493 p->cclk = cclk_val;
1494
1495 return 0;
1496 }
1497
1498 /* serial flash and firmware constants */
1499 enum {
1500 SF_ATTEMPTS = 10, /* max retries for SF operations */
1501
1502 /* flash command opcodes */
1503 SF_PROG_PAGE = 2, /* program page */
1504 SF_WR_DISABLE = 4, /* disable writes */
1505 SF_RD_STATUS = 5, /* read status register */
1506 SF_WR_ENABLE = 6, /* enable writes */
1507 SF_RD_DATA_FAST = 0xb, /* read flash */
1508 SF_RD_ID = 0x9f, /* read ID */
1509 SF_ERASE_SECTOR = 0xd8, /* erase sector */
1510
1511 FW_MAX_SIZE = 16 * SF_SEC_SIZE,
1512 };
1513
1514 /**
1515 * sf1_read - read data from the serial flash
1516 * @adapter: the adapter
1517 * @byte_cnt: number of bytes to read
1518 * @cont: whether another operation will be chained
1519 * @lock: whether to lock SF for PL access only
1520 * @valp: where to store the read data
1521 *
1522 * Reads up to 4 bytes of data from the serial flash. The location of
1523 * the read needs to be specified prior to calling this by issuing the
1524 * appropriate commands to the serial flash.
1525 */
1526 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
1527 int lock, u32 *valp)
1528 {
1529 int ret;
1530
1531 if (!byte_cnt || byte_cnt > 4)
1532 return -EINVAL;
1533 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
1534 return -EBUSY;
1535 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
1536 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
1537 ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
1538 if (!ret)
1539 *valp = t4_read_reg(adapter, SF_DATA_A);
1540 return ret;
1541 }
1542
1543 /**
1544 * sf1_write - write data to the serial flash
1545 * @adapter: the adapter
1546 * @byte_cnt: number of bytes to write
1547 * @cont: whether another operation will be chained
1548 * @lock: whether to lock SF for PL access only
1549 * @val: value to write
1550 *
1551 * Writes up to 4 bytes of data to the serial flash. The location of
1552 * the write needs to be specified prior to calling this by issuing the
1553 * appropriate commands to the serial flash.
1554 */
1555 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
1556 int lock, u32 val)
1557 {
1558 if (!byte_cnt || byte_cnt > 4)
1559 return -EINVAL;
1560 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
1561 return -EBUSY;
1562 t4_write_reg(adapter, SF_DATA_A, val);
1563 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
1564 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
1565 return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
1566 }
1567
1568 /**
1569 * flash_wait_op - wait for a flash operation to complete
1570 * @adapter: the adapter
1571 * @attempts: max number of polls of the status register
1572 * @delay: delay between polls in ms
1573 *
1574 * Wait for a flash operation to complete by polling the status register.
1575 */
1576 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
1577 {
1578 int ret;
1579 u32 status;
1580
1581 while (1) {
1582 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
1583 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
1584 return ret;
1585 if (!(status & 1))
1586 return 0;
1587 if (--attempts == 0)
1588 return -EAGAIN;
1589 if (delay)
1590 msleep(delay);
1591 }
1592 }
1593
1594 /**
1595 * t4_read_flash - read words from serial flash
1596 * @adapter: the adapter
1597 * @addr: the start address for the read
1598 * @nwords: how many 32-bit words to read
1599 * @data: where to store the read data
1600 * @byte_oriented: whether to store data as bytes or as words
1601 *
1602 * Read the specified number of 32-bit words from the serial flash.
1603 * If @byte_oriented is set the read data is stored as a byte array
1604 * (i.e., big-endian), otherwise as 32-bit words in the platform's
1605 * natural endianness.
1606 */
1607 int t4_read_flash(struct adapter *adapter, unsigned int addr,
1608 unsigned int nwords, u32 *data, int byte_oriented)
1609 {
1610 int ret;
1611
1612 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
1613 return -EINVAL;
1614
1615 addr = swab32(addr) | SF_RD_DATA_FAST;
1616
1617 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
1618 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
1619 return ret;
1620
1621 for ( ; nwords; nwords--, data++) {
1622 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
1623 if (nwords == 1)
1624 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
1625 if (ret)
1626 return ret;
1627 if (byte_oriented)
1628 *data = (__force __u32)(cpu_to_be32(*data));
1629 }
1630 return 0;
1631 }
1632
1633 /**
1634 * t4_write_flash - write up to a page of data to the serial flash
1635 * @adapter: the adapter
1636 * @addr: the start address to write
1637 * @n: length of data to write in bytes
1638 * @data: the data to write
1639 *
1640 * Writes up to a page of data (256 bytes) to the serial flash starting
1641 * at the given address. All the data must be written to the same page.
1642 */
1643 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
1644 unsigned int n, const u8 *data)
1645 {
1646 int ret;
1647 u32 buf[64];
1648 unsigned int i, c, left, val, offset = addr & 0xff;
1649
1650 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
1651 return -EINVAL;
1652
1653 val = swab32(addr) | SF_PROG_PAGE;
1654
1655 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1656 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
1657 goto unlock;
1658
1659 for (left = n; left; left -= c) {
1660 c = min(left, 4U);
1661 for (val = 0, i = 0; i < c; ++i)
1662 val = (val << 8) + *data++;
1663
1664 ret = sf1_write(adapter, c, c != left, 1, val);
1665 if (ret)
1666 goto unlock;
1667 }
1668 ret = flash_wait_op(adapter, 8, 1);
1669 if (ret)
1670 goto unlock;
1671
1672 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
1673
1674 /* Read the page to verify the write succeeded */
1675 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
1676 if (ret)
1677 return ret;
1678
1679 if (memcmp(data - n, (u8 *)buf + offset, n)) {
1680 dev_err(adapter->pdev_dev,
1681 "failed to correctly write the flash page at %#x\n",
1682 addr);
1683 return -EIO;
1684 }
1685 return 0;
1686
1687 unlock:
1688 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
1689 return ret;
1690 }
1691
1692 /**
1693 * t4_get_fw_version - read the firmware version
1694 * @adapter: the adapter
1695 * @vers: where to place the version
1696 *
1697 * Reads the FW version from flash.
1698 */
1699 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
1700 {
1701 return t4_read_flash(adapter, FLASH_FW_START +
1702 offsetof(struct fw_hdr, fw_ver), 1,
1703 vers, 0);
1704 }
1705
1706 /**
1707 * t4_get_tp_version - read the TP microcode version
1708 * @adapter: the adapter
1709 * @vers: where to place the version
1710 *
1711 * Reads the TP microcode version from flash.
1712 */
1713 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
1714 {
1715 return t4_read_flash(adapter, FLASH_FW_START +
1716 offsetof(struct fw_hdr, tp_microcode_ver),
1717 1, vers, 0);
1718 }
1719
1720 /**
1721 * t4_get_exprom_version - return the Expansion ROM version (if any)
1722 * @adapter: the adapter
1723 * @vers: where to place the version
1724 *
1725 * Reads the Expansion ROM header from FLASH and returns the version
1726 * number (if present) through the @vers return value pointer. We return
1727 * this in the Firmware Version Format since it's convenient. Return
1728 * 0 on success, -ENOENT if no Expansion ROM is present.
1729 */
1730 int t4_get_exprom_version(struct adapter *adap, u32 *vers)
1731 {
1732 struct exprom_header {
1733 unsigned char hdr_arr[16]; /* must start with 0x55aa */
1734 unsigned char hdr_ver[4]; /* Expansion ROM version */
1735 } *hdr;
1736 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
1737 sizeof(u32))];
1738 int ret;
1739
1740 ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
1741 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
1742 0);
1743 if (ret)
1744 return ret;
1745
1746 hdr = (struct exprom_header *)exprom_header_buf;
1747 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
1748 return -ENOENT;
1749
1750 *vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
1751 FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
1752 FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
1753 FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
1754 return 0;
1755 }
1756
1757 /* Is the given firmware API compatible with the one the driver was compiled
1758 * with?
1759 */
1760 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
1761 {
1762
1763 /* short circuit if it's the exact same firmware version */
1764 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
1765 return 1;
1766
1767 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1768 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
1769 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
1770 return 1;
1771 #undef SAME_INTF
1772
1773 return 0;
1774 }
1775
1776 /* The firmware in the filesystem is usable, but should it be installed?
1777 * This routine explains itself in detail if it indicates the filesystem
1778 * firmware should be installed.
1779 */
1780 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
1781 int k, int c)
1782 {
1783 const char *reason;
1784
1785 if (!card_fw_usable) {
1786 reason = "incompatible or unusable";
1787 goto install;
1788 }
1789
1790 if (k > c) {
1791 reason = "older than the version supported with this driver";
1792 goto install;
1793 }
1794
1795 return 0;
1796
1797 install:
1798 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
1799 "installing firmware %u.%u.%u.%u on card.\n",
1800 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
1801 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
1802 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
1803 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
1804
1805 return 1;
1806 }
1807
1808 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
1809 const u8 *fw_data, unsigned int fw_size,
1810 struct fw_hdr *card_fw, enum dev_state state,
1811 int *reset)
1812 {
1813 int ret, card_fw_usable, fs_fw_usable;
1814 const struct fw_hdr *fs_fw;
1815 const struct fw_hdr *drv_fw;
1816
1817 drv_fw = &fw_info->fw_hdr;
1818
1819 /* Read the header of the firmware on the card */
1820 ret = -t4_read_flash(adap, FLASH_FW_START,
1821 sizeof(*card_fw) / sizeof(uint32_t),
1822 (uint32_t *)card_fw, 1);
1823 if (ret == 0) {
1824 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
1825 } else {
1826 dev_err(adap->pdev_dev,
1827 "Unable to read card's firmware header: %d\n", ret);
1828 card_fw_usable = 0;
1829 }
1830
1831 if (fw_data != NULL) {
1832 fs_fw = (const void *)fw_data;
1833 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
1834 } else {
1835 fs_fw = NULL;
1836 fs_fw_usable = 0;
1837 }
1838
1839 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
1840 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
1841 /* Common case: the firmware on the card is an exact match and
1842 * the filesystem one is an exact match too, or the filesystem
1843 * one is absent/incompatible.
1844 */
1845 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
1846 should_install_fs_fw(adap, card_fw_usable,
1847 be32_to_cpu(fs_fw->fw_ver),
1848 be32_to_cpu(card_fw->fw_ver))) {
1849 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
1850 fw_size, 0);
1851 if (ret != 0) {
1852 dev_err(adap->pdev_dev,
1853 "failed to install firmware: %d\n", ret);
1854 goto bye;
1855 }
1856
1857 /* Installed successfully, update the cached header too. */
1858 *card_fw = *fs_fw;
1859 card_fw_usable = 1;
1860 *reset = 0; /* already reset as part of load_fw */
1861 }
1862
1863 if (!card_fw_usable) {
1864 uint32_t d, c, k;
1865
1866 d = be32_to_cpu(drv_fw->fw_ver);
1867 c = be32_to_cpu(card_fw->fw_ver);
1868 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
1869
1870 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
1871 "chip state %d, "
1872 "driver compiled with %d.%d.%d.%d, "
1873 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
1874 state,
1875 FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
1876 FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
1877 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
1878 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
1879 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
1880 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
1881 ret = EINVAL;
1882 goto bye;
1883 }
1884
1885 /* We're using whatever's on the card and it's known to be good. */
1886 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
1887 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
1888
1889 bye:
1890 return ret;
1891 }
1892
1893 /**
1894 * t4_flash_erase_sectors - erase a range of flash sectors
1895 * @adapter: the adapter
1896 * @start: the first sector to erase
1897 * @end: the last sector to erase
1898 *
1899 * Erases the sectors in the given inclusive range.
1900 */
1901 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1902 {
1903 int ret = 0;
1904
1905 if (end >= adapter->params.sf_nsec)
1906 return -EINVAL;
1907
1908 while (start <= end) {
1909 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1910 (ret = sf1_write(adapter, 4, 0, 1,
1911 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1912 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
1913 dev_err(adapter->pdev_dev,
1914 "erase of flash sector %d failed, error %d\n",
1915 start, ret);
1916 break;
1917 }
1918 start++;
1919 }
1920 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
1921 return ret;
1922 }
1923
1924 /**
1925 * t4_flash_cfg_addr - return the address of the flash configuration file
1926 * @adapter: the adapter
1927 *
1928 * Return the address within the flash where the Firmware Configuration
1929 * File is stored.
1930 */
1931 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
1932 {
1933 if (adapter->params.sf_size == 0x100000)
1934 return FLASH_FPGA_CFG_START;
1935 else
1936 return FLASH_CFG_START;
1937 }
1938
1939 /* Return TRUE if the specified firmware matches the adapter. I.e. T4
1940 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
1941 * and emit an error message for mismatched firmware to save our caller the
1942 * effort ...
1943 */
1944 static bool t4_fw_matches_chip(const struct adapter *adap,
1945 const struct fw_hdr *hdr)
1946 {
1947 /* The expression below will return FALSE for any unsupported adapter
1948 * which will keep us "honest" in the future ...
1949 */
1950 if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
1951 (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5))
1952 return true;
1953
1954 dev_err(adap->pdev_dev,
1955 "FW image (%d) is not suitable for this adapter (%d)\n",
1956 hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
1957 return false;
1958 }
1959
1960 /**
1961 * t4_load_fw - download firmware
1962 * @adap: the adapter
1963 * @fw_data: the firmware image to write
1964 * @size: image size
1965 *
1966 * Write the supplied firmware image to the card's serial flash.
1967 */
1968 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1969 {
1970 u32 csum;
1971 int ret, addr;
1972 unsigned int i;
1973 u8 first_page[SF_PAGE_SIZE];
1974 const __be32 *p = (const __be32 *)fw_data;
1975 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
1976 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1977 unsigned int fw_img_start = adap->params.sf_fw_start;
1978 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
1979
1980 if (!size) {
1981 dev_err(adap->pdev_dev, "FW image has no data\n");
1982 return -EINVAL;
1983 }
1984 if (size & 511) {
1985 dev_err(adap->pdev_dev,
1986 "FW image size not multiple of 512 bytes\n");
1987 return -EINVAL;
1988 }
1989 if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
1990 dev_err(adap->pdev_dev,
1991 "FW image size differs from size in FW header\n");
1992 return -EINVAL;
1993 }
1994 if (size > FW_MAX_SIZE) {
1995 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
1996 FW_MAX_SIZE);
1997 return -EFBIG;
1998 }
1999 if (!t4_fw_matches_chip(adap, hdr))
2000 return -EINVAL;
2001
2002 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
2003 csum += be32_to_cpu(p[i]);
2004
2005 if (csum != 0xffffffff) {
2006 dev_err(adap->pdev_dev,
2007 "corrupted firmware image, checksum %#x\n", csum);
2008 return -EINVAL;
2009 }
2010
2011 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
2012 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
2013 if (ret)
2014 goto out;
2015
2016 /*
2017 * We write the correct version at the end so the driver can see a bad
2018 * version if the FW write fails. Start by writing a copy of the
2019 * first page with a bad version.
2020 */
2021 memcpy(first_page, fw_data, SF_PAGE_SIZE);
2022 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
2023 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
2024 if (ret)
2025 goto out;
2026
2027 addr = fw_img_start;
2028 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
2029 addr += SF_PAGE_SIZE;
2030 fw_data += SF_PAGE_SIZE;
2031 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
2032 if (ret)
2033 goto out;
2034 }
2035
2036 ret = t4_write_flash(adap,
2037 fw_img_start + offsetof(struct fw_hdr, fw_ver),
2038 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
2039 out:
2040 if (ret)
2041 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
2042 ret);
2043 else
2044 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
2045 return ret;
2046 }
2047
2048 /**
2049 * t4_phy_fw_ver - return current PHY firmware version
2050 * @adap: the adapter
2051 * @phy_fw_ver: return value buffer for PHY firmware version
2052 *
2053 * Returns the current version of external PHY firmware on the
2054 * adapter.
2055 */
2056 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
2057 {
2058 u32 param, val;
2059 int ret;
2060
2061 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2062 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
2063 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
2064 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
2065 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
2066 &param, &val);
2067 if (ret < 0)
2068 return ret;
2069 *phy_fw_ver = val;
2070 return 0;
2071 }
2072
2073 /**
2074 * t4_load_phy_fw - download port PHY firmware
2075 * @adap: the adapter
2076 * @win: the PCI-E Memory Window index to use for t4_memory_rw()
2077 * @win_lock: the lock to use to guard the memory copy
2078 * @phy_fw_version: function to check PHY firmware versions
2079 * @phy_fw_data: the PHY firmware image to write
2080 * @phy_fw_size: image size
2081 *
2082 * Transfer the specified PHY firmware to the adapter. If a non-NULL
2083 * @phy_fw_version is supplied, then it will be used to determine if
2084 * it's necessary to perform the transfer by comparing the version
2085 * of any existing adapter PHY firmware with that of the passed in
2086 * PHY firmware image. If @win_lock is non-NULL then it will be used
2087 * around the call to t4_memory_rw() which transfers the PHY firmware
2088 * to the adapter.
2089 *
2090 * A negative error number will be returned if an error occurs. If
2091 * version number support is available and there's no need to upgrade
2092 * the firmware, 0 will be returned. If firmware is successfully
2093 * transferred to the adapter, 1 will be retured.
2094 *
2095 * NOTE: some adapters only have local RAM to store the PHY firmware. As
2096 * a result, a RESET of the adapter would cause that RAM to lose its
2097 * contents. Thus, loading PHY firmware on such adapters must happen
2098 * after any FW_RESET_CMDs ...
2099 */
2100 int t4_load_phy_fw(struct adapter *adap,
2101 int win, spinlock_t *win_lock,
2102 int (*phy_fw_version)(const u8 *, size_t),
2103 const u8 *phy_fw_data, size_t phy_fw_size)
2104 {
2105 unsigned long mtype = 0, maddr = 0;
2106 u32 param, val;
2107 int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
2108 int ret;
2109
2110 /* If we have version number support, then check to see if the adapter
2111 * already has up-to-date PHY firmware loaded.
2112 */
2113 if (phy_fw_version) {
2114 new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
2115 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
2116 if (ret < 0)
2117 return ret;
2118
2119 if (cur_phy_fw_ver >= new_phy_fw_vers) {
2120 CH_WARN(adap, "PHY Firmware already up-to-date, "
2121 "version %#x\n", cur_phy_fw_ver);
2122 return 0;
2123 }
2124 }
2125
2126 /* Ask the firmware where it wants us to copy the PHY firmware image.
2127 * The size of the file requires a special version of the READ coommand
2128 * which will pass the file size via the values field in PARAMS_CMD and
2129 * retrieve the return value from firmware and place it in the same
2130 * buffer values
2131 */
2132 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2133 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
2134 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
2135 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
2136 val = phy_fw_size;
2137 ret = t4_query_params_rw(adap, adap->mbox, adap->fn, 0, 1,
2138 &param, &val, 1);
2139 if (ret < 0)
2140 return ret;
2141 mtype = val >> 8;
2142 maddr = (val & 0xff) << 16;
2143
2144 /* Copy the supplied PHY Firmware image to the adapter memory location
2145 * allocated by the adapter firmware.
2146 */
2147 if (win_lock)
2148 spin_lock_bh(win_lock);
2149 ret = t4_memory_rw(adap, win, mtype, maddr,
2150 phy_fw_size, (__be32 *)phy_fw_data,
2151 T4_MEMORY_WRITE);
2152 if (win_lock)
2153 spin_unlock_bh(win_lock);
2154 if (ret)
2155 return ret;
2156
2157 /* Tell the firmware that the PHY firmware image has been written to
2158 * RAM and it can now start copying it over to the PHYs. The chip
2159 * firmware will RESET the affected PHYs as part of this operation
2160 * leaving them running the new PHY firmware image.
2161 */
2162 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2163 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
2164 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
2165 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
2166 ret = t4_set_params_timeout(adap, adap->mbox, adap->fn, 0, 1,
2167 &param, &val, 30000);
2168
2169 /* If we have version number support, then check to see that the new
2170 * firmware got loaded properly.
2171 */
2172 if (phy_fw_version) {
2173 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
2174 if (ret < 0)
2175 return ret;
2176
2177 if (cur_phy_fw_ver != new_phy_fw_vers) {
2178 CH_WARN(adap, "PHY Firmware did not update: "
2179 "version on adapter %#x, "
2180 "version flashed %#x\n",
2181 cur_phy_fw_ver, new_phy_fw_vers);
2182 return -ENXIO;
2183 }
2184 }
2185
2186 return 1;
2187 }
2188
2189 /**
2190 * t4_fwcache - firmware cache operation
2191 * @adap: the adapter
2192 * @op : the operation (flush or flush and invalidate)
2193 */
2194 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
2195 {
2196 struct fw_params_cmd c;
2197
2198 memset(&c, 0, sizeof(c));
2199 c.op_to_vfn =
2200 cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
2201 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
2202 FW_PARAMS_CMD_PFN_V(adap->fn) |
2203 FW_PARAMS_CMD_VFN_V(0));
2204 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
2205 c.param[0].mnem =
2206 cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2207 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
2208 c.param[0].val = (__force __be32)op;
2209
2210 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
2211 }
2212
2213 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
2214 {
2215 unsigned int i, j;
2216
2217 for (i = 0; i < 8; i++) {
2218 u32 *p = la_buf + i;
2219
2220 t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
2221 j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
2222 t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
2223 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
2224 *p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
2225 }
2226 }
2227
2228 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
2229 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
2230 FW_PORT_CAP_ANEG)
2231
2232 /**
2233 * t4_link_start - apply link configuration to MAC/PHY
2234 * @phy: the PHY to setup
2235 * @mac: the MAC to setup
2236 * @lc: the requested link configuration
2237 *
2238 * Set up a port's MAC and PHY according to a desired link configuration.
2239 * - If the PHY can auto-negotiate first decide what to advertise, then
2240 * enable/disable auto-negotiation as desired, and reset.
2241 * - If the PHY does not auto-negotiate just reset it.
2242 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
2243 * otherwise do it later based on the outcome of auto-negotiation.
2244 */
2245 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
2246 struct link_config *lc)
2247 {
2248 struct fw_port_cmd c;
2249 unsigned int fc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO);
2250
2251 lc->link_ok = 0;
2252 if (lc->requested_fc & PAUSE_RX)
2253 fc |= FW_PORT_CAP_FC_RX;
2254 if (lc->requested_fc & PAUSE_TX)
2255 fc |= FW_PORT_CAP_FC_TX;
2256
2257 memset(&c, 0, sizeof(c));
2258 c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
2259 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
2260 FW_PORT_CMD_PORTID_V(port));
2261 c.action_to_len16 =
2262 cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
2263 FW_LEN16(c));
2264
2265 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2266 c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
2267 fc);
2268 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
2269 } else if (lc->autoneg == AUTONEG_DISABLE) {
2270 c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
2271 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
2272 } else
2273 c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
2274
2275 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2276 }
2277
2278 /**
2279 * t4_restart_aneg - restart autonegotiation
2280 * @adap: the adapter
2281 * @mbox: mbox to use for the FW command
2282 * @port: the port id
2283 *
2284 * Restarts autonegotiation for the selected port.
2285 */
2286 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
2287 {
2288 struct fw_port_cmd c;
2289
2290 memset(&c, 0, sizeof(c));
2291 c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
2292 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
2293 FW_PORT_CMD_PORTID_V(port));
2294 c.action_to_len16 =
2295 cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
2296 FW_LEN16(c));
2297 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
2298 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2299 }
2300
2301 typedef void (*int_handler_t)(struct adapter *adap);
2302
2303 struct intr_info {
2304 unsigned int mask; /* bits to check in interrupt status */
2305 const char *msg; /* message to print or NULL */
2306 short stat_idx; /* stat counter to increment or -1 */
2307 unsigned short fatal; /* whether the condition reported is fatal */
2308 int_handler_t int_handler; /* platform-specific int handler */
2309 };
2310
2311 /**
2312 * t4_handle_intr_status - table driven interrupt handler
2313 * @adapter: the adapter that generated the interrupt
2314 * @reg: the interrupt status register to process
2315 * @acts: table of interrupt actions
2316 *
2317 * A table driven interrupt handler that applies a set of masks to an
2318 * interrupt status word and performs the corresponding actions if the
2319 * interrupts described by the mask have occurred. The actions include
2320 * optionally emitting a warning or alert message. The table is terminated
2321 * by an entry specifying mask 0. Returns the number of fatal interrupt
2322 * conditions.
2323 */
2324 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
2325 const struct intr_info *acts)
2326 {
2327 int fatal = 0;
2328 unsigned int mask = 0;
2329 unsigned int status = t4_read_reg(adapter, reg);
2330
2331 for ( ; acts->mask; ++acts) {
2332 if (!(status & acts->mask))
2333 continue;
2334 if (acts->fatal) {
2335 fatal++;
2336 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
2337 status & acts->mask);
2338 } else if (acts->msg && printk_ratelimit())
2339 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
2340 status & acts->mask);
2341 if (acts->int_handler)
2342 acts->int_handler(adapter);
2343 mask |= acts->mask;
2344 }
2345 status &= mask;
2346 if (status) /* clear processed interrupts */
2347 t4_write_reg(adapter, reg, status);
2348 return fatal;
2349 }
2350
2351 /*
2352 * Interrupt handler for the PCIE module.
2353 */
2354 static void pcie_intr_handler(struct adapter *adapter)
2355 {
2356 static const struct intr_info sysbus_intr_info[] = {
2357 { RNPP_F, "RXNP array parity error", -1, 1 },
2358 { RPCP_F, "RXPC array parity error", -1, 1 },
2359 { RCIP_F, "RXCIF array parity error", -1, 1 },
2360 { RCCP_F, "Rx completions control array parity error", -1, 1 },
2361 { RFTP_F, "RXFT array parity error", -1, 1 },
2362 { 0 }
2363 };
2364 static const struct intr_info pcie_port_intr_info[] = {
2365 { TPCP_F, "TXPC array parity error", -1, 1 },
2366 { TNPP_F, "TXNP array parity error", -1, 1 },
2367 { TFTP_F, "TXFT array parity error", -1, 1 },
2368 { TCAP_F, "TXCA array parity error", -1, 1 },
2369 { TCIP_F, "TXCIF array parity error", -1, 1 },
2370 { RCAP_F, "RXCA array parity error", -1, 1 },
2371 { OTDD_F, "outbound request TLP discarded", -1, 1 },
2372 { RDPE_F, "Rx data parity error", -1, 1 },
2373 { TDUE_F, "Tx uncorrectable data error", -1, 1 },
2374 { 0 }
2375 };
2376 static const struct intr_info pcie_intr_info[] = {
2377 { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
2378 { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
2379 { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
2380 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
2381 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
2382 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
2383 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
2384 { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
2385 { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
2386 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
2387 { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
2388 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
2389 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
2390 { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
2391 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
2392 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
2393 { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
2394 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
2395 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
2396 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
2397 { FIDPERR_F, "PCI FID parity error", -1, 1 },
2398 { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
2399 { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
2400 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
2401 { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
2402 { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
2403 { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
2404 { PCIESINT_F, "PCI core secondary fault", -1, 1 },
2405 { PCIEPINT_F, "PCI core primary fault", -1, 1 },
2406 { UNXSPLCPLERR_F, "PCI unexpected split completion error",
2407 -1, 0 },
2408 { 0 }
2409 };
2410
2411 static struct intr_info t5_pcie_intr_info[] = {
2412 { MSTGRPPERR_F, "Master Response Read Queue parity error",
2413 -1, 1 },
2414 { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
2415 { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
2416 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
2417 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
2418 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
2419 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
2420 { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
2421 -1, 1 },
2422 { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
2423 -1, 1 },
2424 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
2425 { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
2426 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
2427 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
2428 { DREQWRPERR_F, "PCI DMA channel write request parity error",
2429 -1, 1 },
2430 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
2431 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
2432 { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
2433 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
2434 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
2435 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
2436 { FIDPERR_F, "PCI FID parity error", -1, 1 },
2437 { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
2438 { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
2439 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
2440 { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
2441 -1, 1 },
2442 { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
2443 -1, 1 },
2444 { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
2445 { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
2446 { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
2447 { READRSPERR_F, "Outbound read error", -1, 0 },
2448 { 0 }
2449 };
2450
2451 int fat;
2452
2453 if (is_t4(adapter->params.chip))
2454 fat = t4_handle_intr_status(adapter,
2455 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
2456 sysbus_intr_info) +
2457 t4_handle_intr_status(adapter,
2458 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
2459 pcie_port_intr_info) +
2460 t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
2461 pcie_intr_info);
2462 else
2463 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
2464 t5_pcie_intr_info);
2465
2466 if (fat)
2467 t4_fatal_err(adapter);
2468 }
2469
2470 /*
2471 * TP interrupt handler.
2472 */
2473 static void tp_intr_handler(struct adapter *adapter)
2474 {
2475 static const struct intr_info tp_intr_info[] = {
2476 { 0x3fffffff, "TP parity error", -1, 1 },
2477 { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
2478 { 0 }
2479 };
2480
2481 if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
2482 t4_fatal_err(adapter);
2483 }
2484
2485 /*
2486 * SGE interrupt handler.
2487 */
2488 static void sge_intr_handler(struct adapter *adapter)
2489 {
2490 u64 v;
2491
2492 static const struct intr_info sge_intr_info[] = {
2493 { ERR_CPL_EXCEED_IQE_SIZE_F,
2494 "SGE received CPL exceeding IQE size", -1, 1 },
2495 { ERR_INVALID_CIDX_INC_F,
2496 "SGE GTS CIDX increment too large", -1, 0 },
2497 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
2498 { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
2499 { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
2500 { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
2501 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
2502 "SGE IQID > 1023 received CPL for FL", -1, 0 },
2503 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
2504 0 },
2505 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
2506 0 },
2507 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
2508 0 },
2509 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
2510 0 },
2511 { ERR_ING_CTXT_PRIO_F,
2512 "SGE too many priority ingress contexts", -1, 0 },
2513 { ERR_EGR_CTXT_PRIO_F,
2514 "SGE too many priority egress contexts", -1, 0 },
2515 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
2516 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
2517 { 0 }
2518 };
2519
2520 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
2521 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
2522 if (v) {
2523 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
2524 (unsigned long long)v);
2525 t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
2526 t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
2527 }
2528
2529 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info) ||
2530 v != 0)
2531 t4_fatal_err(adapter);
2532 }
2533
2534 #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
2535 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
2536 #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
2537 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
2538
2539 /*
2540 * CIM interrupt handler.
2541 */
2542 static void cim_intr_handler(struct adapter *adapter)
2543 {
2544 static const struct intr_info cim_intr_info[] = {
2545 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
2546 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
2547 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
2548 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
2549 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
2550 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
2551 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
2552 { 0 }
2553 };
2554 static const struct intr_info cim_upintr_info[] = {
2555 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
2556 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
2557 { ILLWRINT_F, "CIM illegal write", -1, 1 },
2558 { ILLRDINT_F, "CIM illegal read", -1, 1 },
2559 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
2560 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
2561 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
2562 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
2563 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
2564 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
2565 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
2566 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
2567 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
2568 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
2569 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
2570 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
2571 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
2572 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
2573 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
2574 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
2575 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
2576 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
2577 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
2578 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
2579 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
2580 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
2581 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
2582 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
2583 { 0 }
2584 };
2585
2586 int fat;
2587
2588 if (t4_read_reg(adapter, PCIE_FW_A) & PCIE_FW_ERR_F)
2589 t4_report_fw_error(adapter);
2590
2591 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
2592 cim_intr_info) +
2593 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
2594 cim_upintr_info);
2595 if (fat)
2596 t4_fatal_err(adapter);
2597 }
2598
2599 /*
2600 * ULP RX interrupt handler.
2601 */
2602 static void ulprx_intr_handler(struct adapter *adapter)
2603 {
2604 static const struct intr_info ulprx_intr_info[] = {
2605 { 0x1800000, "ULPRX context error", -1, 1 },
2606 { 0x7fffff, "ULPRX parity error", -1, 1 },
2607 { 0 }
2608 };
2609
2610 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
2611 t4_fatal_err(adapter);
2612 }
2613
2614 /*
2615 * ULP TX interrupt handler.
2616 */
2617 static void ulptx_intr_handler(struct adapter *adapter)
2618 {
2619 static const struct intr_info ulptx_intr_info[] = {
2620 { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
2621 0 },
2622 { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
2623 0 },
2624 { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
2625 0 },
2626 { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
2627 0 },
2628 { 0xfffffff, "ULPTX parity error", -1, 1 },
2629 { 0 }
2630 };
2631
2632 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
2633 t4_fatal_err(adapter);
2634 }
2635
2636 /*
2637 * PM TX interrupt handler.
2638 */
2639 static void pmtx_intr_handler(struct adapter *adapter)
2640 {
2641 static const struct intr_info pmtx_intr_info[] = {
2642 { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
2643 { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
2644 { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
2645 { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
2646 { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
2647 { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
2648 { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
2649 -1, 1 },
2650 { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
2651 { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
2652 { 0 }
2653 };
2654
2655 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
2656 t4_fatal_err(adapter);
2657 }
2658
2659 /*
2660 * PM RX interrupt handler.
2661 */
2662 static void pmrx_intr_handler(struct adapter *adapter)
2663 {
2664 static const struct intr_info pmrx_intr_info[] = {
2665 { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
2666 { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
2667 { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
2668 { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
2669 -1, 1 },
2670 { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
2671 { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
2672 { 0 }
2673 };
2674
2675 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
2676 t4_fatal_err(adapter);
2677 }
2678
2679 /*
2680 * CPL switch interrupt handler.
2681 */
2682 static void cplsw_intr_handler(struct adapter *adapter)
2683 {
2684 static const struct intr_info cplsw_intr_info[] = {
2685 { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
2686 { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
2687 { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
2688 { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
2689 { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
2690 { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
2691 { 0 }
2692 };
2693
2694 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
2695 t4_fatal_err(adapter);
2696 }
2697
2698 /*
2699 * LE interrupt handler.
2700 */
2701 static void le_intr_handler(struct adapter *adap)
2702 {
2703 static const struct intr_info le_intr_info[] = {
2704 { LIPMISS_F, "LE LIP miss", -1, 0 },
2705 { LIP0_F, "LE 0 LIP error", -1, 0 },
2706 { PARITYERR_F, "LE parity error", -1, 1 },
2707 { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
2708 { REQQPARERR_F, "LE request queue parity error", -1, 1 },
2709 { 0 }
2710 };
2711
2712 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A, le_intr_info))
2713 t4_fatal_err(adap);
2714 }
2715
2716 /*
2717 * MPS interrupt handler.
2718 */
2719 static void mps_intr_handler(struct adapter *adapter)
2720 {
2721 static const struct intr_info mps_rx_intr_info[] = {
2722 { 0xffffff, "MPS Rx parity error", -1, 1 },
2723 { 0 }
2724 };
2725 static const struct intr_info mps_tx_intr_info[] = {
2726 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
2727 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2728 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
2729 -1, 1 },
2730 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
2731 -1, 1 },
2732 { BUBBLE_F, "MPS Tx underflow", -1, 1 },
2733 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
2734 { FRMERR_F, "MPS Tx framing error", -1, 1 },
2735 { 0 }
2736 };
2737 static const struct intr_info mps_trc_intr_info[] = {
2738 { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
2739 { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
2740 -1, 1 },
2741 { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
2742 { 0 }
2743 };
2744 static const struct intr_info mps_stat_sram_intr_info[] = {
2745 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2746 { 0 }
2747 };
2748 static const struct intr_info mps_stat_tx_intr_info[] = {
2749 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2750 { 0 }
2751 };
2752 static const struct intr_info mps_stat_rx_intr_info[] = {
2753 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2754 { 0 }
2755 };
2756 static const struct intr_info mps_cls_intr_info[] = {
2757 { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
2758 { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
2759 { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
2760 { 0 }
2761 };
2762
2763 int fat;
2764
2765 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
2766 mps_rx_intr_info) +
2767 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
2768 mps_tx_intr_info) +
2769 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
2770 mps_trc_intr_info) +
2771 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
2772 mps_stat_sram_intr_info) +
2773 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
2774 mps_stat_tx_intr_info) +
2775 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
2776 mps_stat_rx_intr_info) +
2777 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
2778 mps_cls_intr_info);
2779
2780 t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
2781 t4_read_reg(adapter, MPS_INT_CAUSE_A); /* flush */
2782 if (fat)
2783 t4_fatal_err(adapter);
2784 }
2785
2786 #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
2787 ECC_UE_INT_CAUSE_F)
2788
2789 /*
2790 * EDC/MC interrupt handler.
2791 */
2792 static void mem_intr_handler(struct adapter *adapter, int idx)
2793 {
2794 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
2795
2796 unsigned int addr, cnt_addr, v;
2797
2798 if (idx <= MEM_EDC1) {
2799 addr = EDC_REG(EDC_INT_CAUSE_A, idx);
2800 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
2801 } else if (idx == MEM_MC) {
2802 if (is_t4(adapter->params.chip)) {
2803 addr = MC_INT_CAUSE_A;
2804 cnt_addr = MC_ECC_STATUS_A;
2805 } else {
2806 addr = MC_P_INT_CAUSE_A;
2807 cnt_addr = MC_P_ECC_STATUS_A;
2808 }
2809 } else {
2810 addr = MC_REG(MC_P_INT_CAUSE_A, 1);
2811 cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
2812 }
2813
2814 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
2815 if (v & PERR_INT_CAUSE_F)
2816 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
2817 name[idx]);
2818 if (v & ECC_CE_INT_CAUSE_F) {
2819 u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
2820
2821 t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
2822 if (printk_ratelimit())
2823 dev_warn(adapter->pdev_dev,
2824 "%u %s correctable ECC data error%s\n",
2825 cnt, name[idx], cnt > 1 ? "s" : "");
2826 }
2827 if (v & ECC_UE_INT_CAUSE_F)
2828 dev_alert(adapter->pdev_dev,
2829 "%s uncorrectable ECC data error\n", name[idx]);
2830
2831 t4_write_reg(adapter, addr, v);
2832 if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
2833 t4_fatal_err(adapter);
2834 }
2835
2836 /*
2837 * MA interrupt handler.
2838 */
2839 static void ma_intr_handler(struct adapter *adap)
2840 {
2841 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
2842
2843 if (status & MEM_PERR_INT_CAUSE_F) {
2844 dev_alert(adap->pdev_dev,
2845 "MA parity error, parity status %#x\n",
2846 t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
2847 if (is_t5(adap->params.chip))
2848 dev_alert(adap->pdev_dev,
2849 "MA parity error, parity status %#x\n",
2850 t4_read_reg(adap,
2851 MA_PARITY_ERROR_STATUS2_A));
2852 }
2853 if (status & MEM_WRAP_INT_CAUSE_F) {
2854 v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
2855 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
2856 "client %u to address %#x\n",
2857 MEM_WRAP_CLIENT_NUM_G(v),
2858 MEM_WRAP_ADDRESS_G(v) << 4);
2859 }
2860 t4_write_reg(adap, MA_INT_CAUSE_A, status);
2861 t4_fatal_err(adap);
2862 }
2863
2864 /*
2865 * SMB interrupt handler.
2866 */
2867 static void smb_intr_handler(struct adapter *adap)
2868 {
2869 static const struct intr_info smb_intr_info[] = {
2870 { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
2871 { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
2872 { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
2873 { 0 }
2874 };
2875
2876 if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
2877 t4_fatal_err(adap);
2878 }
2879
2880 /*
2881 * NC-SI interrupt handler.
2882 */
2883 static void ncsi_intr_handler(struct adapter *adap)
2884 {
2885 static const struct intr_info ncsi_intr_info[] = {
2886 { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
2887 { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
2888 { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
2889 { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
2890 { 0 }
2891 };
2892
2893 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
2894 t4_fatal_err(adap);
2895 }
2896
2897 /*
2898 * XGMAC interrupt handler.
2899 */
2900 static void xgmac_intr_handler(struct adapter *adap, int port)
2901 {
2902 u32 v, int_cause_reg;
2903
2904 if (is_t4(adap->params.chip))
2905 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
2906 else
2907 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
2908
2909 v = t4_read_reg(adap, int_cause_reg);
2910
2911 v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
2912 if (!v)
2913 return;
2914
2915 if (v & TXFIFO_PRTY_ERR_F)
2916 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
2917 port);
2918 if (v & RXFIFO_PRTY_ERR_F)
2919 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
2920 port);
2921 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
2922 t4_fatal_err(adap);
2923 }
2924
2925 /*
2926 * PL interrupt handler.
2927 */
2928 static void pl_intr_handler(struct adapter *adap)
2929 {
2930 static const struct intr_info pl_intr_info[] = {
2931 { FATALPERR_F, "T4 fatal parity error", -1, 1 },
2932 { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
2933 { 0 }
2934 };
2935
2936 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
2937 t4_fatal_err(adap);
2938 }
2939
2940 #define PF_INTR_MASK (PFSW_F)
2941 #define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
2942 EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
2943 CPL_SWITCH_F | SGE_F | ULP_TX_F)
2944
2945 /**
2946 * t4_slow_intr_handler - control path interrupt handler
2947 * @adapter: the adapter
2948 *
2949 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
2950 * The designation 'slow' is because it involves register reads, while
2951 * data interrupts typically don't involve any MMIOs.
2952 */
2953 int t4_slow_intr_handler(struct adapter *adapter)
2954 {
2955 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
2956
2957 if (!(cause & GLBL_INTR_MASK))
2958 return 0;
2959 if (cause & CIM_F)
2960 cim_intr_handler(adapter);
2961 if (cause & MPS_F)
2962 mps_intr_handler(adapter);
2963 if (cause & NCSI_F)
2964 ncsi_intr_handler(adapter);
2965 if (cause & PL_F)
2966 pl_intr_handler(adapter);
2967 if (cause & SMB_F)
2968 smb_intr_handler(adapter);
2969 if (cause & XGMAC0_F)
2970 xgmac_intr_handler(adapter, 0);
2971 if (cause & XGMAC1_F)
2972 xgmac_intr_handler(adapter, 1);
2973 if (cause & XGMAC_KR0_F)
2974 xgmac_intr_handler(adapter, 2);
2975 if (cause & XGMAC_KR1_F)
2976 xgmac_intr_handler(adapter, 3);
2977 if (cause & PCIE_F)
2978 pcie_intr_handler(adapter);
2979 if (cause & MC_F)
2980 mem_intr_handler(adapter, MEM_MC);
2981 if (!is_t4(adapter->params.chip) && (cause & MC1_S))
2982 mem_intr_handler(adapter, MEM_MC1);
2983 if (cause & EDC0_F)
2984 mem_intr_handler(adapter, MEM_EDC0);
2985 if (cause & EDC1_F)
2986 mem_intr_handler(adapter, MEM_EDC1);
2987 if (cause & LE_F)
2988 le_intr_handler(adapter);
2989 if (cause & TP_F)
2990 tp_intr_handler(adapter);
2991 if (cause & MA_F)
2992 ma_intr_handler(adapter);
2993 if (cause & PM_TX_F)
2994 pmtx_intr_handler(adapter);
2995 if (cause & PM_RX_F)
2996 pmrx_intr_handler(adapter);
2997 if (cause & ULP_RX_F)
2998 ulprx_intr_handler(adapter);
2999 if (cause & CPL_SWITCH_F)
3000 cplsw_intr_handler(adapter);
3001 if (cause & SGE_F)
3002 sge_intr_handler(adapter);
3003 if (cause & ULP_TX_F)
3004 ulptx_intr_handler(adapter);
3005
3006 /* Clear the interrupts just processed for which we are the master. */
3007 t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
3008 (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
3009 return 1;
3010 }
3011
3012 /**
3013 * t4_intr_enable - enable interrupts
3014 * @adapter: the adapter whose interrupts should be enabled
3015 *
3016 * Enable PF-specific interrupts for the calling function and the top-level
3017 * interrupt concentrator for global interrupts. Interrupts are already
3018 * enabled at each module, here we just enable the roots of the interrupt
3019 * hierarchies.
3020 *
3021 * Note: this function should be called only when the driver manages
3022 * non PF-specific interrupts from the various HW modules. Only one PCI
3023 * function at a time should be doing this.
3024 */
3025 void t4_intr_enable(struct adapter *adapter)
3026 {
3027 u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
3028
3029 t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
3030 ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
3031 ERR_DROPPED_DB_F | ERR_DATA_CPL_ON_HIGH_QID1_F |
3032 ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
3033 ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
3034 ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
3035 ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F |
3036 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F |
3037 EGRESS_SIZE_ERR_F);
3038 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
3039 t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
3040 }
3041
3042 /**
3043 * t4_intr_disable - disable interrupts
3044 * @adapter: the adapter whose interrupts should be disabled
3045 *
3046 * Disable interrupts. We only disable the top-level interrupt
3047 * concentrators. The caller must be a PCI function managing global
3048 * interrupts.
3049 */
3050 void t4_intr_disable(struct adapter *adapter)
3051 {
3052 u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
3053
3054 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
3055 t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
3056 }
3057
3058 /**
3059 * hash_mac_addr - return the hash value of a MAC address
3060 * @addr: the 48-bit Ethernet MAC address
3061 *
3062 * Hashes a MAC address according to the hash function used by HW inexact
3063 * (hash) address matching.
3064 */
3065 static int hash_mac_addr(const u8 *addr)
3066 {
3067 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
3068 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
3069 a ^= b;
3070 a ^= (a >> 12);
3071 a ^= (a >> 6);
3072 return a & 0x3f;
3073 }
3074
3075 /**
3076 * t4_config_rss_range - configure a portion of the RSS mapping table
3077 * @adapter: the adapter
3078 * @mbox: mbox to use for the FW command
3079 * @viid: virtual interface whose RSS subtable is to be written
3080 * @start: start entry in the table to write
3081 * @n: how many table entries to write
3082 * @rspq: values for the response queue lookup table
3083 * @nrspq: number of values in @rspq
3084 *
3085 * Programs the selected part of the VI's RSS mapping table with the
3086 * provided values. If @nrspq < @n the supplied values are used repeatedly
3087 * until the full table range is populated.
3088 *
3089 * The caller must ensure the values in @rspq are in the range allowed for
3090 * @viid.
3091 */
3092 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
3093 int start, int n, const u16 *rspq, unsigned int nrspq)
3094 {
3095 int ret;
3096 const u16 *rsp = rspq;
3097 const u16 *rsp_end = rspq + nrspq;
3098 struct fw_rss_ind_tbl_cmd cmd;
3099
3100 memset(&cmd, 0, sizeof(cmd));
3101 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
3102 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
3103 FW_RSS_IND_TBL_CMD_VIID_V(viid));
3104 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
3105
3106 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
3107 while (n > 0) {
3108 int nq = min(n, 32);
3109 __be32 *qp = &cmd.iq0_to_iq2;
3110
3111 cmd.niqid = cpu_to_be16(nq);
3112 cmd.startidx = cpu_to_be16(start);
3113
3114 start += nq;
3115 n -= nq;
3116
3117 while (nq > 0) {
3118 unsigned int v;
3119
3120 v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
3121 if (++rsp >= rsp_end)
3122 rsp = rspq;
3123 v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
3124 if (++rsp >= rsp_end)
3125 rsp = rspq;
3126 v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
3127 if (++rsp >= rsp_end)
3128 rsp = rspq;
3129
3130 *qp++ = cpu_to_be32(v);
3131 nq -= 3;
3132 }
3133
3134 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
3135 if (ret)
3136 return ret;
3137 }
3138 return 0;
3139 }
3140
3141 /**
3142 * t4_config_glbl_rss - configure the global RSS mode
3143 * @adapter: the adapter
3144 * @mbox: mbox to use for the FW command
3145 * @mode: global RSS mode
3146 * @flags: mode-specific flags
3147 *
3148 * Sets the global RSS mode.
3149 */
3150 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
3151 unsigned int flags)
3152 {
3153 struct fw_rss_glb_config_cmd c;
3154
3155 memset(&c, 0, sizeof(c));
3156 c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
3157 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3158 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3159 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
3160 c.u.manual.mode_pkd =
3161 cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
3162 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
3163 c.u.basicvirtual.mode_pkd =
3164 cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
3165 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
3166 } else
3167 return -EINVAL;
3168 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
3169 }
3170
3171 /**
3172 * t4_config_vi_rss - configure per VI RSS settings
3173 * @adapter: the adapter
3174 * @mbox: mbox to use for the FW command
3175 * @viid: the VI id
3176 * @flags: RSS flags
3177 * @defq: id of the default RSS queue for the VI.
3178 *
3179 * Configures VI-specific RSS properties.
3180 */
3181 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
3182 unsigned int flags, unsigned int defq)
3183 {
3184 struct fw_rss_vi_config_cmd c;
3185
3186 memset(&c, 0, sizeof(c));
3187 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
3188 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
3189 FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
3190 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3191 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
3192 FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
3193 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
3194 }
3195
3196 /* Read an RSS table row */
3197 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
3198 {
3199 t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
3200 return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
3201 5, 0, val);
3202 }
3203
3204 /**
3205 * t4_read_rss - read the contents of the RSS mapping table
3206 * @adapter: the adapter
3207 * @map: holds the contents of the RSS mapping table
3208 *
3209 * Reads the contents of the RSS hash->queue mapping table.
3210 */
3211 int t4_read_rss(struct adapter *adapter, u16 *map)
3212 {
3213 u32 val;
3214 int i, ret;
3215
3216 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
3217 ret = rd_rss_row(adapter, i, &val);
3218 if (ret)
3219 return ret;
3220 *map++ = LKPTBLQUEUE0_G(val);
3221 *map++ = LKPTBLQUEUE1_G(val);
3222 }
3223 return 0;
3224 }
3225
3226 /**
3227 * t4_read_rss_key - read the global RSS key
3228 * @adap: the adapter
3229 * @key: 10-entry array holding the 320-bit RSS key
3230 *
3231 * Reads the global 320-bit RSS key.
3232 */
3233 void t4_read_rss_key(struct adapter *adap, u32 *key)
3234 {
3235 t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
3236 TP_RSS_SECRET_KEY0_A);
3237 }
3238
3239 /**
3240 * t4_write_rss_key - program one of the RSS keys
3241 * @adap: the adapter
3242 * @key: 10-entry array holding the 320-bit RSS key
3243 * @idx: which RSS key to write
3244 *
3245 * Writes one of the RSS keys with the given 320-bit value. If @idx is
3246 * 0..15 the corresponding entry in the RSS key table is written,
3247 * otherwise the global RSS key is written.
3248 */
3249 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
3250 {
3251 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
3252 TP_RSS_SECRET_KEY0_A);
3253 if (idx >= 0 && idx < 16)
3254 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
3255 KEYWRADDR_V(idx) | KEYWREN_F);
3256 }
3257
3258 /**
3259 * t4_read_rss_pf_config - read PF RSS Configuration Table
3260 * @adapter: the adapter
3261 * @index: the entry in the PF RSS table to read
3262 * @valp: where to store the returned value
3263 *
3264 * Reads the PF RSS Configuration Table at the specified index and returns
3265 * the value found there.
3266 */
3267 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
3268 u32 *valp)
3269 {
3270 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3271 valp, 1, TP_RSS_PF0_CONFIG_A + index);
3272 }
3273
3274 /**
3275 * t4_read_rss_vf_config - read VF RSS Configuration Table
3276 * @adapter: the adapter
3277 * @index: the entry in the VF RSS table to read
3278 * @vfl: where to store the returned VFL
3279 * @vfh: where to store the returned VFH
3280 *
3281 * Reads the VF RSS Configuration Table at the specified index and returns
3282 * the (VFL, VFH) values found there.
3283 */
3284 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
3285 u32 *vfl, u32 *vfh)
3286 {
3287 u32 vrt, mask, data;
3288
3289 mask = VFWRADDR_V(VFWRADDR_M);
3290 data = VFWRADDR_V(index);
3291
3292 /* Request that the index'th VF Table values be read into VFL/VFH.
3293 */
3294 vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
3295 vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
3296 vrt |= data | VFRDEN_F;
3297 t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
3298
3299 /* Grab the VFL/VFH values ...
3300 */
3301 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3302 vfl, 1, TP_RSS_VFL_CONFIG_A);
3303 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3304 vfh, 1, TP_RSS_VFH_CONFIG_A);
3305 }
3306
3307 /**
3308 * t4_read_rss_pf_map - read PF RSS Map
3309 * @adapter: the adapter
3310 *
3311 * Reads the PF RSS Map register and returns its value.
3312 */
3313 u32 t4_read_rss_pf_map(struct adapter *adapter)
3314 {
3315 u32 pfmap;
3316
3317 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3318 &pfmap, 1, TP_RSS_PF_MAP_A);
3319 return pfmap;
3320 }
3321
3322 /**
3323 * t4_read_rss_pf_mask - read PF RSS Mask
3324 * @adapter: the adapter
3325 *
3326 * Reads the PF RSS Mask register and returns its value.
3327 */
3328 u32 t4_read_rss_pf_mask(struct adapter *adapter)
3329 {
3330 u32 pfmask;
3331
3332 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3333 &pfmask, 1, TP_RSS_PF_MSK_A);
3334 return pfmask;
3335 }
3336
3337 /**
3338 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
3339 * @adap: the adapter
3340 * @v4: holds the TCP/IP counter values
3341 * @v6: holds the TCP/IPv6 counter values
3342 *
3343 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
3344 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
3345 */
3346 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
3347 struct tp_tcp_stats *v6)
3348 {
3349 u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
3350
3351 #define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
3352 #define STAT(x) val[STAT_IDX(x)]
3353 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
3354
3355 if (v4) {
3356 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
3357 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A);
3358 v4->tcpOutRsts = STAT(OUT_RST);
3359 v4->tcpInSegs = STAT64(IN_SEG);
3360 v4->tcpOutSegs = STAT64(OUT_SEG);
3361 v4->tcpRetransSegs = STAT64(RXT_SEG);
3362 }
3363 if (v6) {
3364 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
3365 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A);
3366 v6->tcpOutRsts = STAT(OUT_RST);
3367 v6->tcpInSegs = STAT64(IN_SEG);
3368 v6->tcpOutSegs = STAT64(OUT_SEG);
3369 v6->tcpRetransSegs = STAT64(RXT_SEG);
3370 }
3371 #undef STAT64
3372 #undef STAT
3373 #undef STAT_IDX
3374 }
3375
3376 /**
3377 * t4_read_mtu_tbl - returns the values in the HW path MTU table
3378 * @adap: the adapter
3379 * @mtus: where to store the MTU values
3380 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
3381 *
3382 * Reads the HW path MTU table.
3383 */
3384 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
3385 {
3386 u32 v;
3387 int i;
3388
3389 for (i = 0; i < NMTUS; ++i) {
3390 t4_write_reg(adap, TP_MTU_TABLE_A,
3391 MTUINDEX_V(0xff) | MTUVALUE_V(i));
3392 v = t4_read_reg(adap, TP_MTU_TABLE_A);
3393 mtus[i] = MTUVALUE_G(v);
3394 if (mtu_log)
3395 mtu_log[i] = MTUWIDTH_G(v);
3396 }
3397 }
3398
3399 /**
3400 * t4_read_cong_tbl - reads the congestion control table
3401 * @adap: the adapter
3402 * @incr: where to store the alpha values
3403 *
3404 * Reads the additive increments programmed into the HW congestion
3405 * control table.
3406 */
3407 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
3408 {
3409 unsigned int mtu, w;
3410
3411 for (mtu = 0; mtu < NMTUS; ++mtu)
3412 for (w = 0; w < NCCTRL_WIN; ++w) {
3413 t4_write_reg(adap, TP_CCTRL_TABLE_A,
3414 ROWINDEX_V(0xffff) | (mtu << 5) | w);
3415 incr[mtu][w] = (u16)t4_read_reg(adap,
3416 TP_CCTRL_TABLE_A) & 0x1fff;
3417 }
3418 }
3419
3420 /**
3421 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
3422 * @adap: the adapter
3423 * @addr: the indirect TP register address
3424 * @mask: specifies the field within the register to modify
3425 * @val: new value for the field
3426 *
3427 * Sets a field of an indirect TP register to the given value.
3428 */
3429 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
3430 unsigned int mask, unsigned int val)
3431 {
3432 t4_write_reg(adap, TP_PIO_ADDR_A, addr);
3433 val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
3434 t4_write_reg(adap, TP_PIO_DATA_A, val);
3435 }
3436
3437 /**
3438 * init_cong_ctrl - initialize congestion control parameters
3439 * @a: the alpha values for congestion control
3440 * @b: the beta values for congestion control
3441 *
3442 * Initialize the congestion control parameters.
3443 */
3444 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
3445 {
3446 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
3447 a[9] = 2;
3448 a[10] = 3;
3449 a[11] = 4;
3450 a[12] = 5;
3451 a[13] = 6;
3452 a[14] = 7;
3453 a[15] = 8;
3454 a[16] = 9;
3455 a[17] = 10;
3456 a[18] = 14;
3457 a[19] = 17;
3458 a[20] = 21;
3459 a[21] = 25;
3460 a[22] = 30;
3461 a[23] = 35;
3462 a[24] = 45;
3463 a[25] = 60;
3464 a[26] = 80;
3465 a[27] = 100;
3466 a[28] = 200;
3467 a[29] = 300;
3468 a[30] = 400;
3469 a[31] = 500;
3470
3471 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3472 b[9] = b[10] = 1;
3473 b[11] = b[12] = 2;
3474 b[13] = b[14] = b[15] = b[16] = 3;
3475 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3476 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3477 b[28] = b[29] = 6;
3478 b[30] = b[31] = 7;
3479 }
3480
3481 /* The minimum additive increment value for the congestion control table */
3482 #define CC_MIN_INCR 2U
3483
3484 /**
3485 * t4_load_mtus - write the MTU and congestion control HW tables
3486 * @adap: the adapter
3487 * @mtus: the values for the MTU table
3488 * @alpha: the values for the congestion control alpha parameter
3489 * @beta: the values for the congestion control beta parameter
3490 *
3491 * Write the HW MTU table with the supplied MTUs and the high-speed
3492 * congestion control table with the supplied alpha, beta, and MTUs.
3493 * We write the two tables together because the additive increments
3494 * depend on the MTUs.
3495 */
3496 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
3497 const unsigned short *alpha, const unsigned short *beta)
3498 {
3499 static const unsigned int avg_pkts[NCCTRL_WIN] = {
3500 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3501 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3502 28672, 40960, 57344, 81920, 114688, 163840, 229376
3503 };
3504
3505 unsigned int i, w;
3506
3507 for (i = 0; i < NMTUS; ++i) {
3508 unsigned int mtu = mtus[i];
3509 unsigned int log2 = fls(mtu);
3510
3511 if (!(mtu & ((1 << log2) >> 2))) /* round */
3512 log2--;
3513 t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
3514 MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
3515
3516 for (w = 0; w < NCCTRL_WIN; ++w) {
3517 unsigned int inc;
3518
3519 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3520 CC_MIN_INCR);
3521
3522 t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
3523 (w << 16) | (beta[w] << 13) | inc);
3524 }
3525 }
3526 }
3527
3528 /**
3529 * t4_pmtx_get_stats - returns the HW stats from PMTX
3530 * @adap: the adapter
3531 * @cnt: where to store the count statistics
3532 * @cycles: where to store the cycle statistics
3533 *
3534 * Returns performance statistics from PMTX.
3535 */
3536 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3537 {
3538 int i;
3539 u32 data[2];
3540
3541 for (i = 0; i < PM_NSTATS; i++) {
3542 t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
3543 cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
3544 if (is_t4(adap->params.chip)) {
3545 cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
3546 } else {
3547 t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
3548 PM_TX_DBG_DATA_A, data, 2,
3549 PM_TX_DBG_STAT_MSB_A);
3550 cycles[i] = (((u64)data[0] << 32) | data[1]);
3551 }
3552 }
3553 }
3554
3555 /**
3556 * t4_pmrx_get_stats - returns the HW stats from PMRX
3557 * @adap: the adapter
3558 * @cnt: where to store the count statistics
3559 * @cycles: where to store the cycle statistics
3560 *
3561 * Returns performance statistics from PMRX.
3562 */
3563 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3564 {
3565 int i;
3566 u32 data[2];
3567
3568 for (i = 0; i < PM_NSTATS; i++) {
3569 t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
3570 cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
3571 if (is_t4(adap->params.chip)) {
3572 cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
3573 } else {
3574 t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
3575 PM_RX_DBG_DATA_A, data, 2,
3576 PM_RX_DBG_STAT_MSB_A);
3577 cycles[i] = (((u64)data[0] << 32) | data[1]);
3578 }
3579 }
3580 }
3581
3582 /**
3583 * t4_get_mps_bg_map - return the buffer groups associated with a port
3584 * @adap: the adapter
3585 * @idx: the port index
3586 *
3587 * Returns a bitmap indicating which MPS buffer groups are associated
3588 * with the given port. Bit i is set if buffer group i is used by the
3589 * port.
3590 */
3591 unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
3592 {
3593 u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
3594
3595 if (n == 0)
3596 return idx == 0 ? 0xf : 0;
3597 if (n == 1)
3598 return idx < 2 ? (3 << (2 * idx)) : 0;
3599 return 1 << idx;
3600 }
3601
3602 /**
3603 * t4_get_port_type_description - return Port Type string description
3604 * @port_type: firmware Port Type enumeration
3605 */
3606 const char *t4_get_port_type_description(enum fw_port_type port_type)
3607 {
3608 static const char *const port_type_description[] = {
3609 "R XFI",
3610 "R XAUI",
3611 "T SGMII",
3612 "T XFI",
3613 "T XAUI",
3614 "KX4",
3615 "CX4",
3616 "KX",
3617 "KR",
3618 "R SFP+",
3619 "KR/KX",
3620 "KR/KX/KX4",
3621 "R QSFP_10G",
3622 "R QSA",
3623 "R QSFP",
3624 "R BP40_BA",
3625 };
3626
3627 if (port_type < ARRAY_SIZE(port_type_description))
3628 return port_type_description[port_type];
3629 return "UNKNOWN";
3630 }
3631
3632 /**
3633 * t4_get_port_stats - collect port statistics
3634 * @adap: the adapter
3635 * @idx: the port index
3636 * @p: the stats structure to fill
3637 *
3638 * Collect statistics related to the given port from HW.
3639 */
3640 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3641 {
3642 u32 bgmap = t4_get_mps_bg_map(adap, idx);
3643
3644 #define GET_STAT(name) \
3645 t4_read_reg64(adap, \
3646 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
3647 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
3648 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
3649
3650 p->tx_octets = GET_STAT(TX_PORT_BYTES);
3651 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
3652 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
3653 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
3654 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
3655 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
3656 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
3657 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
3658 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
3659 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
3660 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
3661 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3662 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
3663 p->tx_drop = GET_STAT(TX_PORT_DROP);
3664 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
3665 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
3666 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
3667 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
3668 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
3669 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
3670 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
3671 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
3672 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
3673
3674 p->rx_octets = GET_STAT(RX_PORT_BYTES);
3675 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
3676 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
3677 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
3678 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
3679 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
3680 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3681 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
3682 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
3683 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
3684 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
3685 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
3686 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
3687 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
3688 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
3689 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
3690 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3691 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
3692 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
3693 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
3694 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
3695 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
3696 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
3697 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
3698 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
3699 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
3700 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
3701
3702 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3703 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3704 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3705 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3706 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3707 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3708 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3709 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3710
3711 #undef GET_STAT
3712 #undef GET_STAT_COM
3713 }
3714
3715 /**
3716 * t4_wol_magic_enable - enable/disable magic packet WoL
3717 * @adap: the adapter
3718 * @port: the physical port index
3719 * @addr: MAC address expected in magic packets, %NULL to disable
3720 *
3721 * Enables/disables magic packet wake-on-LAN for the selected port.
3722 */
3723 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
3724 const u8 *addr)
3725 {
3726 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
3727
3728 if (is_t4(adap->params.chip)) {
3729 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
3730 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
3731 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
3732 } else {
3733 mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
3734 mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
3735 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
3736 }
3737
3738 if (addr) {
3739 t4_write_reg(adap, mag_id_reg_l,
3740 (addr[2] << 24) | (addr[3] << 16) |
3741 (addr[4] << 8) | addr[5]);
3742 t4_write_reg(adap, mag_id_reg_h,
3743 (addr[0] << 8) | addr[1]);
3744 }
3745 t4_set_reg_field(adap, port_cfg_reg, MAGICEN_F,
3746 addr ? MAGICEN_F : 0);
3747 }
3748
3749 /**
3750 * t4_wol_pat_enable - enable/disable pattern-based WoL
3751 * @adap: the adapter
3752 * @port: the physical port index
3753 * @map: bitmap of which HW pattern filters to set
3754 * @mask0: byte mask for bytes 0-63 of a packet
3755 * @mask1: byte mask for bytes 64-127 of a packet
3756 * @crc: Ethernet CRC for selected bytes
3757 * @enable: enable/disable switch
3758 *
3759 * Sets the pattern filters indicated in @map to mask out the bytes
3760 * specified in @mask0/@mask1 in received packets and compare the CRC of
3761 * the resulting packet against @crc. If @enable is %true pattern-based
3762 * WoL is enabled, otherwise disabled.
3763 */
3764 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
3765 u64 mask0, u64 mask1, unsigned int crc, bool enable)
3766 {
3767 int i;
3768 u32 port_cfg_reg;
3769
3770 if (is_t4(adap->params.chip))
3771 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
3772 else
3773 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
3774
3775 if (!enable) {
3776 t4_set_reg_field(adap, port_cfg_reg, PATEN_F, 0);
3777 return 0;
3778 }
3779 if (map > 0xff)
3780 return -EINVAL;
3781
3782 #define EPIO_REG(name) \
3783 (is_t4(adap->params.chip) ? \
3784 PORT_REG(port, XGMAC_PORT_EPIO_##name##_A) : \
3785 T5_PORT_REG(port, MAC_PORT_EPIO_##name##_A))
3786
3787 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
3788 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
3789 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
3790
3791 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
3792 if (!(map & 1))
3793 continue;
3794
3795 /* write byte masks */
3796 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
3797 t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i) | EPIOWR_F);
3798 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
3799 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
3800 return -ETIMEDOUT;
3801
3802 /* write CRC */
3803 t4_write_reg(adap, EPIO_REG(DATA0), crc);
3804 t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i + 32) | EPIOWR_F);
3805 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
3806 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
3807 return -ETIMEDOUT;
3808 }
3809 #undef EPIO_REG
3810
3811 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2_A), 0, PATEN_F);
3812 return 0;
3813 }
3814
3815 /* t4_mk_filtdelwr - create a delete filter WR
3816 * @ftid: the filter ID
3817 * @wr: the filter work request to populate
3818 * @qid: ingress queue to receive the delete notification
3819 *
3820 * Creates a filter work request to delete the supplied filter. If @qid is
3821 * negative the delete notification is suppressed.
3822 */
3823 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
3824 {
3825 memset(wr, 0, sizeof(*wr));
3826 wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
3827 wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
3828 wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
3829 FW_FILTER_WR_NOREPLY_V(qid < 0));
3830 wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
3831 if (qid >= 0)
3832 wr->rx_chan_rx_rpl_iq =
3833 cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
3834 }
3835
3836 #define INIT_CMD(var, cmd, rd_wr) do { \
3837 (var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
3838 FW_CMD_REQUEST_F | \
3839 FW_CMD_##rd_wr##_F); \
3840 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
3841 } while (0)
3842
3843 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
3844 u32 addr, u32 val)
3845 {
3846 u32 ldst_addrspace;
3847 struct fw_ldst_cmd c;
3848
3849 memset(&c, 0, sizeof(c));
3850 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
3851 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
3852 FW_CMD_REQUEST_F |
3853 FW_CMD_WRITE_F |
3854 ldst_addrspace);
3855 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
3856 c.u.addrval.addr = cpu_to_be32(addr);
3857 c.u.addrval.val = cpu_to_be32(val);
3858
3859 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3860 }
3861
3862 /**
3863 * t4_mdio_rd - read a PHY register through MDIO
3864 * @adap: the adapter
3865 * @mbox: mailbox to use for the FW command
3866 * @phy_addr: the PHY address
3867 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
3868 * @reg: the register to read
3869 * @valp: where to store the value
3870 *
3871 * Issues a FW command through the given mailbox to read a PHY register.
3872 */
3873 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
3874 unsigned int mmd, unsigned int reg, u16 *valp)
3875 {
3876 int ret;
3877 u32 ldst_addrspace;
3878 struct fw_ldst_cmd c;
3879
3880 memset(&c, 0, sizeof(c));
3881 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
3882 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
3883 FW_CMD_REQUEST_F | FW_CMD_READ_F |
3884 ldst_addrspace);
3885 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
3886 c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
3887 FW_LDST_CMD_MMD_V(mmd));
3888 c.u.mdio.raddr = cpu_to_be16(reg);
3889
3890 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3891 if (ret == 0)
3892 *valp = be16_to_cpu(c.u.mdio.rval);
3893 return ret;
3894 }
3895
3896 /**
3897 * t4_mdio_wr - write a PHY register through MDIO
3898 * @adap: the adapter
3899 * @mbox: mailbox to use for the FW command
3900 * @phy_addr: the PHY address
3901 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
3902 * @reg: the register to write
3903 * @valp: value to write
3904 *
3905 * Issues a FW command through the given mailbox to write a PHY register.
3906 */
3907 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
3908 unsigned int mmd, unsigned int reg, u16 val)
3909 {
3910 u32 ldst_addrspace;
3911 struct fw_ldst_cmd c;
3912
3913 memset(&c, 0, sizeof(c));
3914 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
3915 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
3916 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
3917 ldst_addrspace);
3918 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
3919 c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
3920 FW_LDST_CMD_MMD_V(mmd));
3921 c.u.mdio.raddr = cpu_to_be16(reg);
3922 c.u.mdio.rval = cpu_to_be16(val);
3923
3924 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3925 }
3926
3927 /**
3928 * t4_sge_decode_idma_state - decode the idma state
3929 * @adap: the adapter
3930 * @state: the state idma is stuck in
3931 */
3932 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
3933 {
3934 static const char * const t4_decode[] = {
3935 "IDMA_IDLE",
3936 "IDMA_PUSH_MORE_CPL_FIFO",
3937 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
3938 "Not used",
3939 "IDMA_PHYSADDR_SEND_PCIEHDR",
3940 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
3941 "IDMA_PHYSADDR_SEND_PAYLOAD",
3942 "IDMA_SEND_FIFO_TO_IMSG",
3943 "IDMA_FL_REQ_DATA_FL_PREP",
3944 "IDMA_FL_REQ_DATA_FL",
3945 "IDMA_FL_DROP",
3946 "IDMA_FL_H_REQ_HEADER_FL",
3947 "IDMA_FL_H_SEND_PCIEHDR",
3948 "IDMA_FL_H_PUSH_CPL_FIFO",
3949 "IDMA_FL_H_SEND_CPL",
3950 "IDMA_FL_H_SEND_IP_HDR_FIRST",
3951 "IDMA_FL_H_SEND_IP_HDR",
3952 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
3953 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
3954 "IDMA_FL_H_SEND_IP_HDR_PADDING",
3955 "IDMA_FL_D_SEND_PCIEHDR",
3956 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
3957 "IDMA_FL_D_REQ_NEXT_DATA_FL",
3958 "IDMA_FL_SEND_PCIEHDR",
3959 "IDMA_FL_PUSH_CPL_FIFO",
3960 "IDMA_FL_SEND_CPL",
3961 "IDMA_FL_SEND_PAYLOAD_FIRST",
3962 "IDMA_FL_SEND_PAYLOAD",
3963 "IDMA_FL_REQ_NEXT_DATA_FL",
3964 "IDMA_FL_SEND_NEXT_PCIEHDR",
3965 "IDMA_FL_SEND_PADDING",
3966 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
3967 "IDMA_FL_SEND_FIFO_TO_IMSG",
3968 "IDMA_FL_REQ_DATAFL_DONE",
3969 "IDMA_FL_REQ_HEADERFL_DONE",
3970 };
3971 static const char * const t5_decode[] = {
3972 "IDMA_IDLE",
3973 "IDMA_ALMOST_IDLE",
3974 "IDMA_PUSH_MORE_CPL_FIFO",
3975 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
3976 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
3977 "IDMA_PHYSADDR_SEND_PCIEHDR",
3978 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
3979 "IDMA_PHYSADDR_SEND_PAYLOAD",
3980 "IDMA_SEND_FIFO_TO_IMSG",
3981 "IDMA_FL_REQ_DATA_FL",
3982 "IDMA_FL_DROP",
3983 "IDMA_FL_DROP_SEND_INC",
3984 "IDMA_FL_H_REQ_HEADER_FL",
3985 "IDMA_FL_H_SEND_PCIEHDR",
3986 "IDMA_FL_H_PUSH_CPL_FIFO",
3987 "IDMA_FL_H_SEND_CPL",
3988 "IDMA_FL_H_SEND_IP_HDR_FIRST",
3989 "IDMA_FL_H_SEND_IP_HDR",
3990 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
3991 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
3992 "IDMA_FL_H_SEND_IP_HDR_PADDING",
3993 "IDMA_FL_D_SEND_PCIEHDR",
3994 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
3995 "IDMA_FL_D_REQ_NEXT_DATA_FL",
3996 "IDMA_FL_SEND_PCIEHDR",
3997 "IDMA_FL_PUSH_CPL_FIFO",
3998 "IDMA_FL_SEND_CPL",
3999 "IDMA_FL_SEND_PAYLOAD_FIRST",
4000 "IDMA_FL_SEND_PAYLOAD",
4001 "IDMA_FL_REQ_NEXT_DATA_FL",
4002 "IDMA_FL_SEND_NEXT_PCIEHDR",
4003 "IDMA_FL_SEND_PADDING",
4004 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
4005 };
4006 static const u32 sge_regs[] = {
4007 SGE_DEBUG_DATA_LOW_INDEX_2_A,
4008 SGE_DEBUG_DATA_LOW_INDEX_3_A,
4009 SGE_DEBUG_DATA_HIGH_INDEX_10_A,
4010 };
4011 const char **sge_idma_decode;
4012 int sge_idma_decode_nstates;
4013 int i;
4014
4015 if (is_t4(adapter->params.chip)) {
4016 sge_idma_decode = (const char **)t4_decode;
4017 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
4018 } else {
4019 sge_idma_decode = (const char **)t5_decode;
4020 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
4021 }
4022
4023 if (state < sge_idma_decode_nstates)
4024 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
4025 else
4026 CH_WARN(adapter, "idma state %d unknown\n", state);
4027
4028 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
4029 CH_WARN(adapter, "SGE register %#x value %#x\n",
4030 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
4031 }
4032
4033 /**
4034 * t4_fw_hello - establish communication with FW
4035 * @adap: the adapter
4036 * @mbox: mailbox to use for the FW command
4037 * @evt_mbox: mailbox to receive async FW events
4038 * @master: specifies the caller's willingness to be the device master
4039 * @state: returns the current device state (if non-NULL)
4040 *
4041 * Issues a command to establish communication with FW. Returns either
4042 * an error (negative integer) or the mailbox of the Master PF.
4043 */
4044 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
4045 enum dev_master master, enum dev_state *state)
4046 {
4047 int ret;
4048 struct fw_hello_cmd c;
4049 u32 v;
4050 unsigned int master_mbox;
4051 int retries = FW_CMD_HELLO_RETRIES;
4052
4053 retry:
4054 memset(&c, 0, sizeof(c));
4055 INIT_CMD(c, HELLO, WRITE);
4056 c.err_to_clearinit = cpu_to_be32(
4057 FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
4058 FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
4059 FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
4060 mbox : FW_HELLO_CMD_MBMASTER_M) |
4061 FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
4062 FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
4063 FW_HELLO_CMD_CLEARINIT_F);
4064
4065 /*
4066 * Issue the HELLO command to the firmware. If it's not successful
4067 * but indicates that we got a "busy" or "timeout" condition, retry
4068 * the HELLO until we exhaust our retry limit. If we do exceed our
4069 * retry limit, check to see if the firmware left us any error
4070 * information and report that if so.
4071 */
4072 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4073 if (ret < 0) {
4074 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
4075 goto retry;
4076 if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
4077 t4_report_fw_error(adap);
4078 return ret;
4079 }
4080
4081 v = be32_to_cpu(c.err_to_clearinit);
4082 master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
4083 if (state) {
4084 if (v & FW_HELLO_CMD_ERR_F)
4085 *state = DEV_STATE_ERR;
4086 else if (v & FW_HELLO_CMD_INIT_F)
4087 *state = DEV_STATE_INIT;
4088 else
4089 *state = DEV_STATE_UNINIT;
4090 }
4091
4092 /*
4093 * If we're not the Master PF then we need to wait around for the
4094 * Master PF Driver to finish setting up the adapter.
4095 *
4096 * Note that we also do this wait if we're a non-Master-capable PF and
4097 * there is no current Master PF; a Master PF may show up momentarily
4098 * and we wouldn't want to fail pointlessly. (This can happen when an
4099 * OS loads lots of different drivers rapidly at the same time). In
4100 * this case, the Master PF returned by the firmware will be
4101 * PCIE_FW_MASTER_M so the test below will work ...
4102 */
4103 if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
4104 master_mbox != mbox) {
4105 int waiting = FW_CMD_HELLO_TIMEOUT;
4106
4107 /*
4108 * Wait for the firmware to either indicate an error or
4109 * initialized state. If we see either of these we bail out
4110 * and report the issue to the caller. If we exhaust the
4111 * "hello timeout" and we haven't exhausted our retries, try
4112 * again. Otherwise bail with a timeout error.
4113 */
4114 for (;;) {
4115 u32 pcie_fw;
4116
4117 msleep(50);
4118 waiting -= 50;
4119
4120 /*
4121 * If neither Error nor Initialialized are indicated
4122 * by the firmware keep waiting till we exaust our
4123 * timeout ... and then retry if we haven't exhausted
4124 * our retries ...
4125 */
4126 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
4127 if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
4128 if (waiting <= 0) {
4129 if (retries-- > 0)
4130 goto retry;
4131
4132 return -ETIMEDOUT;
4133 }
4134 continue;
4135 }
4136
4137 /*
4138 * We either have an Error or Initialized condition
4139 * report errors preferentially.
4140 */
4141 if (state) {
4142 if (pcie_fw & PCIE_FW_ERR_F)
4143 *state = DEV_STATE_ERR;
4144 else if (pcie_fw & PCIE_FW_INIT_F)
4145 *state = DEV_STATE_INIT;
4146 }
4147
4148 /*
4149 * If we arrived before a Master PF was selected and
4150 * there's not a valid Master PF, grab its identity
4151 * for our caller.
4152 */
4153 if (master_mbox == PCIE_FW_MASTER_M &&
4154 (pcie_fw & PCIE_FW_MASTER_VLD_F))
4155 master_mbox = PCIE_FW_MASTER_G(pcie_fw);
4156 break;
4157 }
4158 }
4159
4160 return master_mbox;
4161 }
4162
4163 /**
4164 * t4_fw_bye - end communication with FW
4165 * @adap: the adapter
4166 * @mbox: mailbox to use for the FW command
4167 *
4168 * Issues a command to terminate communication with FW.
4169 */
4170 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
4171 {
4172 struct fw_bye_cmd c;
4173
4174 memset(&c, 0, sizeof(c));
4175 INIT_CMD(c, BYE, WRITE);
4176 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4177 }
4178
4179 /**
4180 * t4_init_cmd - ask FW to initialize the device
4181 * @adap: the adapter
4182 * @mbox: mailbox to use for the FW command
4183 *
4184 * Issues a command to FW to partially initialize the device. This
4185 * performs initialization that generally doesn't depend on user input.
4186 */
4187 int t4_early_init(struct adapter *adap, unsigned int mbox)
4188 {
4189 struct fw_initialize_cmd c;
4190
4191 memset(&c, 0, sizeof(c));
4192 INIT_CMD(c, INITIALIZE, WRITE);
4193 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4194 }
4195
4196 /**
4197 * t4_fw_reset - issue a reset to FW
4198 * @adap: the adapter
4199 * @mbox: mailbox to use for the FW command
4200 * @reset: specifies the type of reset to perform
4201 *
4202 * Issues a reset command of the specified type to FW.
4203 */
4204 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
4205 {
4206 struct fw_reset_cmd c;
4207
4208 memset(&c, 0, sizeof(c));
4209 INIT_CMD(c, RESET, WRITE);
4210 c.val = cpu_to_be32(reset);
4211 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4212 }
4213
4214 /**
4215 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
4216 * @adap: the adapter
4217 * @mbox: mailbox to use for the FW RESET command (if desired)
4218 * @force: force uP into RESET even if FW RESET command fails
4219 *
4220 * Issues a RESET command to firmware (if desired) with a HALT indication
4221 * and then puts the microprocessor into RESET state. The RESET command
4222 * will only be issued if a legitimate mailbox is provided (mbox <=
4223 * PCIE_FW_MASTER_M).
4224 *
4225 * This is generally used in order for the host to safely manipulate the
4226 * adapter without fear of conflicting with whatever the firmware might
4227 * be doing. The only way out of this state is to RESTART the firmware
4228 * ...
4229 */
4230 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
4231 {
4232 int ret = 0;
4233
4234 /*
4235 * If a legitimate mailbox is provided, issue a RESET command
4236 * with a HALT indication.
4237 */
4238 if (mbox <= PCIE_FW_MASTER_M) {
4239 struct fw_reset_cmd c;
4240
4241 memset(&c, 0, sizeof(c));
4242 INIT_CMD(c, RESET, WRITE);
4243 c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
4244 c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
4245 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4246 }
4247
4248 /*
4249 * Normally we won't complete the operation if the firmware RESET
4250 * command fails but if our caller insists we'll go ahead and put the
4251 * uP into RESET. This can be useful if the firmware is hung or even
4252 * missing ... We'll have to take the risk of putting the uP into
4253 * RESET without the cooperation of firmware in that case.
4254 *
4255 * We also force the firmware's HALT flag to be on in case we bypassed
4256 * the firmware RESET command above or we're dealing with old firmware
4257 * which doesn't have the HALT capability. This will serve as a flag
4258 * for the incoming firmware to know that it's coming out of a HALT
4259 * rather than a RESET ... if it's new enough to understand that ...
4260 */
4261 if (ret == 0 || force) {
4262 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
4263 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
4264 PCIE_FW_HALT_F);
4265 }
4266
4267 /*
4268 * And we always return the result of the firmware RESET command
4269 * even when we force the uP into RESET ...
4270 */
4271 return ret;
4272 }
4273
4274 /**
4275 * t4_fw_restart - restart the firmware by taking the uP out of RESET
4276 * @adap: the adapter
4277 * @reset: if we want to do a RESET to restart things
4278 *
4279 * Restart firmware previously halted by t4_fw_halt(). On successful
4280 * return the previous PF Master remains as the new PF Master and there
4281 * is no need to issue a new HELLO command, etc.
4282 *
4283 * We do this in two ways:
4284 *
4285 * 1. If we're dealing with newer firmware we'll simply want to take
4286 * the chip's microprocessor out of RESET. This will cause the
4287 * firmware to start up from its start vector. And then we'll loop
4288 * until the firmware indicates it's started again (PCIE_FW.HALT
4289 * reset to 0) or we timeout.
4290 *
4291 * 2. If we're dealing with older firmware then we'll need to RESET
4292 * the chip since older firmware won't recognize the PCIE_FW.HALT
4293 * flag and automatically RESET itself on startup.
4294 */
4295 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
4296 {
4297 if (reset) {
4298 /*
4299 * Since we're directing the RESET instead of the firmware
4300 * doing it automatically, we need to clear the PCIE_FW.HALT
4301 * bit.
4302 */
4303 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
4304
4305 /*
4306 * If we've been given a valid mailbox, first try to get the
4307 * firmware to do the RESET. If that works, great and we can
4308 * return success. Otherwise, if we haven't been given a
4309 * valid mailbox or the RESET command failed, fall back to
4310 * hitting the chip with a hammer.
4311 */
4312 if (mbox <= PCIE_FW_MASTER_M) {
4313 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
4314 msleep(100);
4315 if (t4_fw_reset(adap, mbox,
4316 PIORST_F | PIORSTMODE_F) == 0)
4317 return 0;
4318 }
4319
4320 t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
4321 msleep(2000);
4322 } else {
4323 int ms;
4324
4325 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
4326 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
4327 if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
4328 return 0;
4329 msleep(100);
4330 ms += 100;
4331 }
4332 return -ETIMEDOUT;
4333 }
4334 return 0;
4335 }
4336
4337 /**
4338 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
4339 * @adap: the adapter
4340 * @mbox: mailbox to use for the FW RESET command (if desired)
4341 * @fw_data: the firmware image to write
4342 * @size: image size
4343 * @force: force upgrade even if firmware doesn't cooperate
4344 *
4345 * Perform all of the steps necessary for upgrading an adapter's
4346 * firmware image. Normally this requires the cooperation of the
4347 * existing firmware in order to halt all existing activities
4348 * but if an invalid mailbox token is passed in we skip that step
4349 * (though we'll still put the adapter microprocessor into RESET in
4350 * that case).
4351 *
4352 * On successful return the new firmware will have been loaded and
4353 * the adapter will have been fully RESET losing all previous setup
4354 * state. On unsuccessful return the adapter may be completely hosed ...
4355 * positive errno indicates that the adapter is ~probably~ intact, a
4356 * negative errno indicates that things are looking bad ...
4357 */
4358 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
4359 const u8 *fw_data, unsigned int size, int force)
4360 {
4361 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
4362 int reset, ret;
4363
4364 if (!t4_fw_matches_chip(adap, fw_hdr))
4365 return -EINVAL;
4366
4367 ret = t4_fw_halt(adap, mbox, force);
4368 if (ret < 0 && !force)
4369 return ret;
4370
4371 ret = t4_load_fw(adap, fw_data, size);
4372 if (ret < 0)
4373 return ret;
4374
4375 /*
4376 * Older versions of the firmware don't understand the new
4377 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
4378 * restart. So for newly loaded older firmware we'll have to do the
4379 * RESET for it so it starts up on a clean slate. We can tell if
4380 * the newly loaded firmware will handle this right by checking
4381 * its header flags to see if it advertises the capability.
4382 */
4383 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
4384 return t4_fw_restart(adap, mbox, reset);
4385 }
4386
4387 /**
4388 * t4_fixup_host_params - fix up host-dependent parameters
4389 * @adap: the adapter
4390 * @page_size: the host's Base Page Size
4391 * @cache_line_size: the host's Cache Line Size
4392 *
4393 * Various registers in T4 contain values which are dependent on the
4394 * host's Base Page and Cache Line Sizes. This function will fix all of
4395 * those registers with the appropriate values as passed in ...
4396 */
4397 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
4398 unsigned int cache_line_size)
4399 {
4400 unsigned int page_shift = fls(page_size) - 1;
4401 unsigned int sge_hps = page_shift - 10;
4402 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
4403 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
4404 unsigned int fl_align_log = fls(fl_align) - 1;
4405
4406 t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
4407 HOSTPAGESIZEPF0_V(sge_hps) |
4408 HOSTPAGESIZEPF1_V(sge_hps) |
4409 HOSTPAGESIZEPF2_V(sge_hps) |
4410 HOSTPAGESIZEPF3_V(sge_hps) |
4411 HOSTPAGESIZEPF4_V(sge_hps) |
4412 HOSTPAGESIZEPF5_V(sge_hps) |
4413 HOSTPAGESIZEPF6_V(sge_hps) |
4414 HOSTPAGESIZEPF7_V(sge_hps));
4415
4416 if (is_t4(adap->params.chip)) {
4417 t4_set_reg_field(adap, SGE_CONTROL_A,
4418 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
4419 EGRSTATUSPAGESIZE_F,
4420 INGPADBOUNDARY_V(fl_align_log -
4421 INGPADBOUNDARY_SHIFT_X) |
4422 EGRSTATUSPAGESIZE_V(stat_len != 64));
4423 } else {
4424 /* T5 introduced the separation of the Free List Padding and
4425 * Packing Boundaries. Thus, we can select a smaller Padding
4426 * Boundary to avoid uselessly chewing up PCIe Link and Memory
4427 * Bandwidth, and use a Packing Boundary which is large enough
4428 * to avoid false sharing between CPUs, etc.
4429 *
4430 * For the PCI Link, the smaller the Padding Boundary the
4431 * better. For the Memory Controller, a smaller Padding
4432 * Boundary is better until we cross under the Memory Line
4433 * Size (the minimum unit of transfer to/from Memory). If we
4434 * have a Padding Boundary which is smaller than the Memory
4435 * Line Size, that'll involve a Read-Modify-Write cycle on the
4436 * Memory Controller which is never good. For T5 the smallest
4437 * Padding Boundary which we can select is 32 bytes which is
4438 * larger than any known Memory Controller Line Size so we'll
4439 * use that.
4440 *
4441 * T5 has a different interpretation of the "0" value for the
4442 * Packing Boundary. This corresponds to 16 bytes instead of
4443 * the expected 32 bytes. We never have a Packing Boundary
4444 * less than 32 bytes so we can't use that special value but
4445 * on the other hand, if we wanted 32 bytes, the best we can
4446 * really do is 64 bytes.
4447 */
4448 if (fl_align <= 32) {
4449 fl_align = 64;
4450 fl_align_log = 6;
4451 }
4452 t4_set_reg_field(adap, SGE_CONTROL_A,
4453 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
4454 EGRSTATUSPAGESIZE_F,
4455 INGPADBOUNDARY_V(INGPCIEBOUNDARY_32B_X) |
4456 EGRSTATUSPAGESIZE_V(stat_len != 64));
4457 t4_set_reg_field(adap, SGE_CONTROL2_A,
4458 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
4459 INGPACKBOUNDARY_V(fl_align_log -
4460 INGPACKBOUNDARY_SHIFT_X));
4461 }
4462 /*
4463 * Adjust various SGE Free List Host Buffer Sizes.
4464 *
4465 * This is something of a crock since we're using fixed indices into
4466 * the array which are also known by the sge.c code and the T4
4467 * Firmware Configuration File. We need to come up with a much better
4468 * approach to managing this array. For now, the first four entries
4469 * are:
4470 *
4471 * 0: Host Page Size
4472 * 1: 64KB
4473 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
4474 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
4475 *
4476 * For the single-MTU buffers in unpacked mode we need to include
4477 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
4478 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
4479 * Padding boundary. All of these are accommodated in the Factory
4480 * Default Firmware Configuration File but we need to adjust it for
4481 * this host's cache line size.
4482 */
4483 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
4484 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
4485 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
4486 & ~(fl_align-1));
4487 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
4488 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
4489 & ~(fl_align-1));
4490
4491 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
4492
4493 return 0;
4494 }
4495
4496 /**
4497 * t4_fw_initialize - ask FW to initialize the device
4498 * @adap: the adapter
4499 * @mbox: mailbox to use for the FW command
4500 *
4501 * Issues a command to FW to partially initialize the device. This
4502 * performs initialization that generally doesn't depend on user input.
4503 */
4504 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
4505 {
4506 struct fw_initialize_cmd c;
4507
4508 memset(&c, 0, sizeof(c));
4509 INIT_CMD(c, INITIALIZE, WRITE);
4510 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4511 }
4512
4513 /**
4514 * t4_query_params_rw - query FW or device parameters
4515 * @adap: the adapter
4516 * @mbox: mailbox to use for the FW command
4517 * @pf: the PF
4518 * @vf: the VF
4519 * @nparams: the number of parameters
4520 * @params: the parameter names
4521 * @val: the parameter values
4522 * @rw: Write and read flag
4523 *
4524 * Reads the value of FW or device parameters. Up to 7 parameters can be
4525 * queried at once.
4526 */
4527 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
4528 unsigned int vf, unsigned int nparams, const u32 *params,
4529 u32 *val, int rw)
4530 {
4531 int i, ret;
4532 struct fw_params_cmd c;
4533 __be32 *p = &c.param[0].mnem;
4534
4535 if (nparams > 7)
4536 return -EINVAL;
4537
4538 memset(&c, 0, sizeof(c));
4539 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
4540 FW_CMD_REQUEST_F | FW_CMD_READ_F |
4541 FW_PARAMS_CMD_PFN_V(pf) |
4542 FW_PARAMS_CMD_VFN_V(vf));
4543 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4544
4545 for (i = 0; i < nparams; i++) {
4546 *p++ = cpu_to_be32(*params++);
4547 if (rw)
4548 *p = cpu_to_be32(*(val + i));
4549 p++;
4550 }
4551
4552 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4553 if (ret == 0)
4554 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
4555 *val++ = be32_to_cpu(*p);
4556 return ret;
4557 }
4558
4559 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4560 unsigned int vf, unsigned int nparams, const u32 *params,
4561 u32 *val)
4562 {
4563 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
4564 }
4565
4566 /**
4567 * t4_set_params_timeout - sets FW or device parameters
4568 * @adap: the adapter
4569 * @mbox: mailbox to use for the FW command
4570 * @pf: the PF
4571 * @vf: the VF
4572 * @nparams: the number of parameters
4573 * @params: the parameter names
4574 * @val: the parameter values
4575 * @timeout: the timeout time
4576 *
4577 * Sets the value of FW or device parameters. Up to 7 parameters can be
4578 * specified at once.
4579 */
4580 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
4581 unsigned int pf, unsigned int vf,
4582 unsigned int nparams, const u32 *params,
4583 const u32 *val, int timeout)
4584 {
4585 struct fw_params_cmd c;
4586 __be32 *p = &c.param[0].mnem;
4587
4588 if (nparams > 7)
4589 return -EINVAL;
4590
4591 memset(&c, 0, sizeof(c));
4592 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
4593 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
4594 FW_PARAMS_CMD_PFN_V(pf) |
4595 FW_PARAMS_CMD_VFN_V(vf));
4596 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4597
4598 while (nparams--) {
4599 *p++ = cpu_to_be32(*params++);
4600 *p++ = cpu_to_be32(*val++);
4601 }
4602
4603 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
4604 }
4605
4606 /**
4607 * t4_set_params - sets FW or device parameters
4608 * @adap: the adapter
4609 * @mbox: mailbox to use for the FW command
4610 * @pf: the PF
4611 * @vf: the VF
4612 * @nparams: the number of parameters
4613 * @params: the parameter names
4614 * @val: the parameter values
4615 *
4616 * Sets the value of FW or device parameters. Up to 7 parameters can be
4617 * specified at once.
4618 */
4619 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4620 unsigned int vf, unsigned int nparams, const u32 *params,
4621 const u32 *val)
4622 {
4623 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
4624 FW_CMD_MAX_TIMEOUT);
4625 }
4626
4627 /**
4628 * t4_cfg_pfvf - configure PF/VF resource limits
4629 * @adap: the adapter
4630 * @mbox: mailbox to use for the FW command
4631 * @pf: the PF being configured
4632 * @vf: the VF being configured
4633 * @txq: the max number of egress queues
4634 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
4635 * @rxqi: the max number of interrupt-capable ingress queues
4636 * @rxq: the max number of interruptless ingress queues
4637 * @tc: the PCI traffic class
4638 * @vi: the max number of virtual interfaces
4639 * @cmask: the channel access rights mask for the PF/VF
4640 * @pmask: the port access rights mask for the PF/VF
4641 * @nexact: the maximum number of exact MPS filters
4642 * @rcaps: read capabilities
4643 * @wxcaps: write/execute capabilities
4644 *
4645 * Configures resource limits and capabilities for a physical or virtual
4646 * function.
4647 */
4648 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
4649 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
4650 unsigned int rxqi, unsigned int rxq, unsigned int tc,
4651 unsigned int vi, unsigned int cmask, unsigned int pmask,
4652 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
4653 {
4654 struct fw_pfvf_cmd c;
4655
4656 memset(&c, 0, sizeof(c));
4657 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
4658 FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
4659 FW_PFVF_CMD_VFN_V(vf));
4660 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4661 c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
4662 FW_PFVF_CMD_NIQ_V(rxq));
4663 c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
4664 FW_PFVF_CMD_PMASK_V(pmask) |
4665 FW_PFVF_CMD_NEQ_V(txq));
4666 c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
4667 FW_PFVF_CMD_NVI_V(vi) |
4668 FW_PFVF_CMD_NEXACTF_V(nexact));
4669 c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
4670 FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
4671 FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
4672 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4673 }
4674
4675 /**
4676 * t4_alloc_vi - allocate a virtual interface
4677 * @adap: the adapter
4678 * @mbox: mailbox to use for the FW command
4679 * @port: physical port associated with the VI
4680 * @pf: the PF owning the VI
4681 * @vf: the VF owning the VI
4682 * @nmac: number of MAC addresses needed (1 to 5)
4683 * @mac: the MAC addresses of the VI
4684 * @rss_size: size of RSS table slice associated with this VI
4685 *
4686 * Allocates a virtual interface for the given physical port. If @mac is
4687 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
4688 * @mac should be large enough to hold @nmac Ethernet addresses, they are
4689 * stored consecutively so the space needed is @nmac * 6 bytes.
4690 * Returns a negative error number or the non-negative VI id.
4691 */
4692 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
4693 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
4694 unsigned int *rss_size)
4695 {
4696 int ret;
4697 struct fw_vi_cmd c;
4698
4699 memset(&c, 0, sizeof(c));
4700 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
4701 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
4702 FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
4703 c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
4704 c.portid_pkd = FW_VI_CMD_PORTID_V(port);
4705 c.nmac = nmac - 1;
4706
4707 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4708 if (ret)
4709 return ret;
4710
4711 if (mac) {
4712 memcpy(mac, c.mac, sizeof(c.mac));
4713 switch (nmac) {
4714 case 5:
4715 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
4716 case 4:
4717 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
4718 case 3:
4719 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
4720 case 2:
4721 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
4722 }
4723 }
4724 if (rss_size)
4725 *rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
4726 return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
4727 }
4728
4729 /**
4730 * t4_set_rxmode - set Rx properties of a virtual interface
4731 * @adap: the adapter
4732 * @mbox: mailbox to use for the FW command
4733 * @viid: the VI id
4734 * @mtu: the new MTU or -1
4735 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
4736 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
4737 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
4738 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
4739 * @sleep_ok: if true we may sleep while awaiting command completion
4740 *
4741 * Sets Rx properties of a virtual interface.
4742 */
4743 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
4744 int mtu, int promisc, int all_multi, int bcast, int vlanex,
4745 bool sleep_ok)
4746 {
4747 struct fw_vi_rxmode_cmd c;
4748
4749 /* convert to FW values */
4750 if (mtu < 0)
4751 mtu = FW_RXMODE_MTU_NO_CHG;
4752 if (promisc < 0)
4753 promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
4754 if (all_multi < 0)
4755 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
4756 if (bcast < 0)
4757 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
4758 if (vlanex < 0)
4759 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
4760
4761 memset(&c, 0, sizeof(c));
4762 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
4763 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
4764 FW_VI_RXMODE_CMD_VIID_V(viid));
4765 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4766 c.mtu_to_vlanexen =
4767 cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
4768 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
4769 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
4770 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
4771 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
4772 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
4773 }
4774
4775 /**
4776 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
4777 * @adap: the adapter
4778 * @mbox: mailbox to use for the FW command
4779 * @viid: the VI id
4780 * @free: if true any existing filters for this VI id are first removed
4781 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
4782 * @addr: the MAC address(es)
4783 * @idx: where to store the index of each allocated filter
4784 * @hash: pointer to hash address filter bitmap
4785 * @sleep_ok: call is allowed to sleep
4786 *
4787 * Allocates an exact-match filter for each of the supplied addresses and
4788 * sets it to the corresponding address. If @idx is not %NULL it should
4789 * have at least @naddr entries, each of which will be set to the index of
4790 * the filter allocated for the corresponding MAC address. If a filter
4791 * could not be allocated for an address its index is set to 0xffff.
4792 * If @hash is not %NULL addresses that fail to allocate an exact filter
4793 * are hashed and update the hash filter bitmap pointed at by @hash.
4794 *
4795 * Returns a negative error number or the number of filters allocated.
4796 */
4797 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
4798 unsigned int viid, bool free, unsigned int naddr,
4799 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
4800 {
4801 int i, ret;
4802 struct fw_vi_mac_cmd c;
4803 struct fw_vi_mac_exact *p;
4804 unsigned int max_naddr = is_t4(adap->params.chip) ?
4805 NUM_MPS_CLS_SRAM_L_INSTANCES :
4806 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
4807
4808 if (naddr > 7)
4809 return -EINVAL;
4810
4811 memset(&c, 0, sizeof(c));
4812 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
4813 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
4814 (free ? FW_CMD_EXEC_F : 0) |
4815 FW_VI_MAC_CMD_VIID_V(viid));
4816 c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
4817 FW_CMD_LEN16_V((naddr + 2) / 2));
4818
4819 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
4820 p->valid_to_idx =
4821 cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
4822 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
4823 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
4824 }
4825
4826 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
4827 if (ret)
4828 return ret;
4829
4830 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
4831 u16 index = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
4832
4833 if (idx)
4834 idx[i] = index >= max_naddr ? 0xffff : index;
4835 if (index < max_naddr)
4836 ret++;
4837 else if (hash)
4838 *hash |= (1ULL << hash_mac_addr(addr[i]));
4839 }
4840 return ret;
4841 }
4842
4843 /**
4844 * t4_change_mac - modifies the exact-match filter for a MAC address
4845 * @adap: the adapter
4846 * @mbox: mailbox to use for the FW command
4847 * @viid: the VI id
4848 * @idx: index of existing filter for old value of MAC address, or -1
4849 * @addr: the new MAC address value
4850 * @persist: whether a new MAC allocation should be persistent
4851 * @add_smt: if true also add the address to the HW SMT
4852 *
4853 * Modifies an exact-match filter and sets it to the new MAC address.
4854 * Note that in general it is not possible to modify the value of a given
4855 * filter so the generic way to modify an address filter is to free the one
4856 * being used by the old address value and allocate a new filter for the
4857 * new address value. @idx can be -1 if the address is a new addition.
4858 *
4859 * Returns a negative error number or the index of the filter with the new
4860 * MAC value.
4861 */
4862 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
4863 int idx, const u8 *addr, bool persist, bool add_smt)
4864 {
4865 int ret, mode;
4866 struct fw_vi_mac_cmd c;
4867 struct fw_vi_mac_exact *p = c.u.exact;
4868 unsigned int max_mac_addr = is_t4(adap->params.chip) ?
4869 NUM_MPS_CLS_SRAM_L_INSTANCES :
4870 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
4871
4872 if (idx < 0) /* new allocation */
4873 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
4874 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
4875
4876 memset(&c, 0, sizeof(c));
4877 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
4878 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
4879 FW_VI_MAC_CMD_VIID_V(viid));
4880 c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
4881 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
4882 FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
4883 FW_VI_MAC_CMD_IDX_V(idx));
4884 memcpy(p->macaddr, addr, sizeof(p->macaddr));
4885
4886 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4887 if (ret == 0) {
4888 ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
4889 if (ret >= max_mac_addr)
4890 ret = -ENOMEM;
4891 }
4892 return ret;
4893 }
4894
4895 /**
4896 * t4_set_addr_hash - program the MAC inexact-match hash filter
4897 * @adap: the adapter
4898 * @mbox: mailbox to use for the FW command
4899 * @viid: the VI id
4900 * @ucast: whether the hash filter should also match unicast addresses
4901 * @vec: the value to be written to the hash filter
4902 * @sleep_ok: call is allowed to sleep
4903 *
4904 * Sets the 64-bit inexact-match hash filter for a virtual interface.
4905 */
4906 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
4907 bool ucast, u64 vec, bool sleep_ok)
4908 {
4909 struct fw_vi_mac_cmd c;
4910
4911 memset(&c, 0, sizeof(c));
4912 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
4913 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
4914 FW_VI_ENABLE_CMD_VIID_V(viid));
4915 c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
4916 FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
4917 FW_CMD_LEN16_V(1));
4918 c.u.hash.hashvec = cpu_to_be64(vec);
4919 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
4920 }
4921
4922 /**
4923 * t4_enable_vi_params - enable/disable a virtual interface
4924 * @adap: the adapter
4925 * @mbox: mailbox to use for the FW command
4926 * @viid: the VI id
4927 * @rx_en: 1=enable Rx, 0=disable Rx
4928 * @tx_en: 1=enable Tx, 0=disable Tx
4929 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
4930 *
4931 * Enables/disables a virtual interface. Note that setting DCB Enable
4932 * only makes sense when enabling a Virtual Interface ...
4933 */
4934 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
4935 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
4936 {
4937 struct fw_vi_enable_cmd c;
4938
4939 memset(&c, 0, sizeof(c));
4940 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
4941 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
4942 FW_VI_ENABLE_CMD_VIID_V(viid));
4943 c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
4944 FW_VI_ENABLE_CMD_EEN_V(tx_en) |
4945 FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
4946 FW_LEN16(c));
4947 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
4948 }
4949
4950 /**
4951 * t4_enable_vi - enable/disable a virtual interface
4952 * @adap: the adapter
4953 * @mbox: mailbox to use for the FW command
4954 * @viid: the VI id
4955 * @rx_en: 1=enable Rx, 0=disable Rx
4956 * @tx_en: 1=enable Tx, 0=disable Tx
4957 *
4958 * Enables/disables a virtual interface.
4959 */
4960 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
4961 bool rx_en, bool tx_en)
4962 {
4963 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
4964 }
4965
4966 /**
4967 * t4_identify_port - identify a VI's port by blinking its LED
4968 * @adap: the adapter
4969 * @mbox: mailbox to use for the FW command
4970 * @viid: the VI id
4971 * @nblinks: how many times to blink LED at 2.5 Hz
4972 *
4973 * Identifies a VI's port by blinking its LED.
4974 */
4975 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
4976 unsigned int nblinks)
4977 {
4978 struct fw_vi_enable_cmd c;
4979
4980 memset(&c, 0, sizeof(c));
4981 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
4982 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
4983 FW_VI_ENABLE_CMD_VIID_V(viid));
4984 c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
4985 c.blinkdur = cpu_to_be16(nblinks);
4986 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4987 }
4988
4989 /**
4990 * t4_iq_free - free an ingress queue and its FLs
4991 * @adap: the adapter
4992 * @mbox: mailbox to use for the FW command
4993 * @pf: the PF owning the queues
4994 * @vf: the VF owning the queues
4995 * @iqtype: the ingress queue type
4996 * @iqid: ingress queue id
4997 * @fl0id: FL0 queue id or 0xffff if no attached FL0
4998 * @fl1id: FL1 queue id or 0xffff if no attached FL1
4999 *
5000 * Frees an ingress queue and its associated FLs, if any.
5001 */
5002 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5003 unsigned int vf, unsigned int iqtype, unsigned int iqid,
5004 unsigned int fl0id, unsigned int fl1id)
5005 {
5006 struct fw_iq_cmd c;
5007
5008 memset(&c, 0, sizeof(c));
5009 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
5010 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
5011 FW_IQ_CMD_VFN_V(vf));
5012 c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
5013 c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
5014 c.iqid = cpu_to_be16(iqid);
5015 c.fl0id = cpu_to_be16(fl0id);
5016 c.fl1id = cpu_to_be16(fl1id);
5017 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5018 }
5019
5020 /**
5021 * t4_eth_eq_free - free an Ethernet egress queue
5022 * @adap: the adapter
5023 * @mbox: mailbox to use for the FW command
5024 * @pf: the PF owning the queue
5025 * @vf: the VF owning the queue
5026 * @eqid: egress queue id
5027 *
5028 * Frees an Ethernet egress queue.
5029 */
5030 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5031 unsigned int vf, unsigned int eqid)
5032 {
5033 struct fw_eq_eth_cmd c;
5034
5035 memset(&c, 0, sizeof(c));
5036 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
5037 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
5038 FW_EQ_ETH_CMD_PFN_V(pf) |
5039 FW_EQ_ETH_CMD_VFN_V(vf));
5040 c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
5041 c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
5042 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5043 }
5044
5045 /**
5046 * t4_ctrl_eq_free - free a control egress queue
5047 * @adap: the adapter
5048 * @mbox: mailbox to use for the FW command
5049 * @pf: the PF owning the queue
5050 * @vf: the VF owning the queue
5051 * @eqid: egress queue id
5052 *
5053 * Frees a control egress queue.
5054 */
5055 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5056 unsigned int vf, unsigned int eqid)
5057 {
5058 struct fw_eq_ctrl_cmd c;
5059
5060 memset(&c, 0, sizeof(c));
5061 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
5062 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
5063 FW_EQ_CTRL_CMD_PFN_V(pf) |
5064 FW_EQ_CTRL_CMD_VFN_V(vf));
5065 c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
5066 c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
5067 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5068 }
5069
5070 /**
5071 * t4_ofld_eq_free - free an offload egress queue
5072 * @adap: the adapter
5073 * @mbox: mailbox to use for the FW command
5074 * @pf: the PF owning the queue
5075 * @vf: the VF owning the queue
5076 * @eqid: egress queue id
5077 *
5078 * Frees a control egress queue.
5079 */
5080 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5081 unsigned int vf, unsigned int eqid)
5082 {
5083 struct fw_eq_ofld_cmd c;
5084
5085 memset(&c, 0, sizeof(c));
5086 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
5087 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
5088 FW_EQ_OFLD_CMD_PFN_V(pf) |
5089 FW_EQ_OFLD_CMD_VFN_V(vf));
5090 c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
5091 c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
5092 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5093 }
5094
5095 /**
5096 * t4_handle_fw_rpl - process a FW reply message
5097 * @adap: the adapter
5098 * @rpl: start of the FW message
5099 *
5100 * Processes a FW message, such as link state change messages.
5101 */
5102 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
5103 {
5104 u8 opcode = *(const u8 *)rpl;
5105
5106 if (opcode == FW_PORT_CMD) { /* link/module state change message */
5107 int speed = 0, fc = 0;
5108 const struct fw_port_cmd *p = (void *)rpl;
5109 int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
5110 int port = adap->chan_map[chan];
5111 struct port_info *pi = adap2pinfo(adap, port);
5112 struct link_config *lc = &pi->link_cfg;
5113 u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
5114 int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
5115 u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
5116
5117 if (stat & FW_PORT_CMD_RXPAUSE_F)
5118 fc |= PAUSE_RX;
5119 if (stat & FW_PORT_CMD_TXPAUSE_F)
5120 fc |= PAUSE_TX;
5121 if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
5122 speed = 100;
5123 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
5124 speed = 1000;
5125 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
5126 speed = 10000;
5127 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
5128 speed = 40000;
5129
5130 if (link_ok != lc->link_ok || speed != lc->speed ||
5131 fc != lc->fc) { /* something changed */
5132 lc->link_ok = link_ok;
5133 lc->speed = speed;
5134 lc->fc = fc;
5135 lc->supported = be16_to_cpu(p->u.info.pcap);
5136 t4_os_link_changed(adap, port, link_ok);
5137 }
5138 if (mod != pi->mod_type) {
5139 pi->mod_type = mod;
5140 t4_os_portmod_changed(adap, port);
5141 }
5142 }
5143 return 0;
5144 }
5145
5146 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
5147 {
5148 u16 val;
5149
5150 if (pci_is_pcie(adapter->pdev)) {
5151 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
5152 p->speed = val & PCI_EXP_LNKSTA_CLS;
5153 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
5154 }
5155 }
5156
5157 /**
5158 * init_link_config - initialize a link's SW state
5159 * @lc: structure holding the link state
5160 * @caps: link capabilities
5161 *
5162 * Initializes the SW state maintained for each link, including the link's
5163 * capabilities and default speed/flow-control/autonegotiation settings.
5164 */
5165 static void init_link_config(struct link_config *lc, unsigned int caps)
5166 {
5167 lc->supported = caps;
5168 lc->requested_speed = 0;
5169 lc->speed = 0;
5170 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
5171 if (lc->supported & FW_PORT_CAP_ANEG) {
5172 lc->advertising = lc->supported & ADVERT_MASK;
5173 lc->autoneg = AUTONEG_ENABLE;
5174 lc->requested_fc |= PAUSE_AUTONEG;
5175 } else {
5176 lc->advertising = 0;
5177 lc->autoneg = AUTONEG_DISABLE;
5178 }
5179 }
5180
5181 #define CIM_PF_NOACCESS 0xeeeeeeee
5182
5183 int t4_wait_dev_ready(void __iomem *regs)
5184 {
5185 u32 whoami;
5186
5187 whoami = readl(regs + PL_WHOAMI_A);
5188 if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
5189 return 0;
5190
5191 msleep(500);
5192 whoami = readl(regs + PL_WHOAMI_A);
5193 return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
5194 }
5195
5196 struct flash_desc {
5197 u32 vendor_and_model_id;
5198 u32 size_mb;
5199 };
5200
5201 static int get_flash_params(struct adapter *adap)
5202 {
5203 /* Table for non-Numonix supported flash parts. Numonix parts are left
5204 * to the preexisting code. All flash parts have 64KB sectors.
5205 */
5206 static struct flash_desc supported_flash[] = {
5207 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
5208 };
5209
5210 int ret;
5211 u32 info;
5212
5213 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
5214 if (!ret)
5215 ret = sf1_read(adap, 3, 0, 1, &info);
5216 t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */
5217 if (ret)
5218 return ret;
5219
5220 for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
5221 if (supported_flash[ret].vendor_and_model_id == info) {
5222 adap->params.sf_size = supported_flash[ret].size_mb;
5223 adap->params.sf_nsec =
5224 adap->params.sf_size / SF_SEC_SIZE;
5225 return 0;
5226 }
5227
5228 if ((info & 0xff) != 0x20) /* not a Numonix flash */
5229 return -EINVAL;
5230 info >>= 16; /* log2 of size */
5231 if (info >= 0x14 && info < 0x18)
5232 adap->params.sf_nsec = 1 << (info - 16);
5233 else if (info == 0x18)
5234 adap->params.sf_nsec = 64;
5235 else
5236 return -EINVAL;
5237 adap->params.sf_size = 1 << info;
5238 adap->params.sf_fw_start =
5239 t4_read_reg(adap, CIM_BOOT_CFG_A) & BOOTADDR_M;
5240
5241 if (adap->params.sf_size < FLASH_MIN_SIZE)
5242 dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
5243 adap->params.sf_size, FLASH_MIN_SIZE);
5244 return 0;
5245 }
5246
5247 /**
5248 * t4_prep_adapter - prepare SW and HW for operation
5249 * @adapter: the adapter
5250 * @reset: if true perform a HW reset
5251 *
5252 * Initialize adapter SW state for the various HW modules, set initial
5253 * values for some adapter tunables, take PHYs out of reset, and
5254 * initialize the MDIO interface.
5255 */
5256 int t4_prep_adapter(struct adapter *adapter)
5257 {
5258 int ret, ver;
5259 uint16_t device_id;
5260 u32 pl_rev;
5261
5262 get_pci_mode(adapter, &adapter->params.pci);
5263 pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
5264
5265 ret = get_flash_params(adapter);
5266 if (ret < 0) {
5267 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
5268 return ret;
5269 }
5270
5271 /* Retrieve adapter's device ID
5272 */
5273 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
5274 ver = device_id >> 12;
5275 adapter->params.chip = 0;
5276 switch (ver) {
5277 case CHELSIO_T4:
5278 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
5279 break;
5280 case CHELSIO_T5:
5281 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
5282 break;
5283 default:
5284 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
5285 device_id);
5286 return -EINVAL;
5287 }
5288
5289 adapter->params.cim_la_size = CIMLA_SIZE;
5290 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
5291
5292 /*
5293 * Default port for debugging in case we can't reach FW.
5294 */
5295 adapter->params.nports = 1;
5296 adapter->params.portvec = 1;
5297 adapter->params.vpd.cclk = 50000;
5298 return 0;
5299 }
5300
5301 /**
5302 * cxgb4_t4_bar2_sge_qregs - return BAR2 SGE Queue register information
5303 * @adapter: the adapter
5304 * @qid: the Queue ID
5305 * @qtype: the Ingress or Egress type for @qid
5306 * @pbar2_qoffset: BAR2 Queue Offset
5307 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
5308 *
5309 * Returns the BAR2 SGE Queue Registers information associated with the
5310 * indicated Absolute Queue ID. These are passed back in return value
5311 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
5312 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
5313 *
5314 * This may return an error which indicates that BAR2 SGE Queue
5315 * registers aren't available. If an error is not returned, then the
5316 * following values are returned:
5317 *
5318 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
5319 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
5320 *
5321 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
5322 * require the "Inferred Queue ID" ability may be used. E.g. the
5323 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
5324 * then these "Inferred Queue ID" register may not be used.
5325 */
5326 int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
5327 unsigned int qid,
5328 enum t4_bar2_qtype qtype,
5329 u64 *pbar2_qoffset,
5330 unsigned int *pbar2_qid)
5331 {
5332 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
5333 u64 bar2_page_offset, bar2_qoffset;
5334 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
5335
5336 /* T4 doesn't support BAR2 SGE Queue registers.
5337 */
5338 if (is_t4(adapter->params.chip))
5339 return -EINVAL;
5340
5341 /* Get our SGE Page Size parameters.
5342 */
5343 page_shift = adapter->params.sge.hps + 10;
5344 page_size = 1 << page_shift;
5345
5346 /* Get the right Queues per Page parameters for our Queue.
5347 */
5348 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
5349 ? adapter->params.sge.eq_qpp
5350 : adapter->params.sge.iq_qpp);
5351 qpp_mask = (1 << qpp_shift) - 1;
5352
5353 /* Calculate the basics of the BAR2 SGE Queue register area:
5354 * o The BAR2 page the Queue registers will be in.
5355 * o The BAR2 Queue ID.
5356 * o The BAR2 Queue ID Offset into the BAR2 page.
5357 */
5358 bar2_page_offset = ((qid >> qpp_shift) << page_shift);
5359 bar2_qid = qid & qpp_mask;
5360 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
5361
5362 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
5363 * hardware will infer the Absolute Queue ID simply from the writes to
5364 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
5365 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
5366 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
5367 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
5368 * from the BAR2 Page and BAR2 Queue ID.
5369 *
5370 * One important censequence of this is that some BAR2 SGE registers
5371 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
5372 * there. But other registers synthesize the SGE Queue ID purely
5373 * from the writes to the registers -- the Write Combined Doorbell
5374 * Buffer is a good example. These BAR2 SGE Registers are only
5375 * available for those BAR2 SGE Register areas where the SGE Absolute
5376 * Queue ID can be inferred from simple writes.
5377 */
5378 bar2_qoffset = bar2_page_offset;
5379 bar2_qinferred = (bar2_qid_offset < page_size);
5380 if (bar2_qinferred) {
5381 bar2_qoffset += bar2_qid_offset;
5382 bar2_qid = 0;
5383 }
5384
5385 *pbar2_qoffset = bar2_qoffset;
5386 *pbar2_qid = bar2_qid;
5387 return 0;
5388 }
5389
5390 /**
5391 * t4_init_devlog_params - initialize adapter->params.devlog
5392 * @adap: the adapter
5393 *
5394 * Initialize various fields of the adapter's Firmware Device Log
5395 * Parameters structure.
5396 */
5397 int t4_init_devlog_params(struct adapter *adap)
5398 {
5399 struct devlog_params *dparams = &adap->params.devlog;
5400 u32 pf_dparams;
5401 unsigned int devlog_meminfo;
5402 struct fw_devlog_cmd devlog_cmd;
5403 int ret;
5404
5405 /* If we're dealing with newer firmware, the Device Log Paramerters
5406 * are stored in a designated register which allows us to access the
5407 * Device Log even if we can't talk to the firmware.
5408 */
5409 pf_dparams =
5410 t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
5411 if (pf_dparams) {
5412 unsigned int nentries, nentries128;
5413
5414 dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
5415 dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
5416
5417 nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
5418 nentries = (nentries128 + 1) * 128;
5419 dparams->size = nentries * sizeof(struct fw_devlog_e);
5420
5421 return 0;
5422 }
5423
5424 /* Otherwise, ask the firmware for it's Device Log Parameters.
5425 */
5426 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
5427 devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
5428 FW_CMD_REQUEST_F | FW_CMD_READ_F);
5429 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
5430 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
5431 &devlog_cmd);
5432 if (ret)
5433 return ret;
5434
5435 devlog_meminfo =
5436 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
5437 dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
5438 dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
5439 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
5440
5441 return 0;
5442 }
5443
5444 /**
5445 * t4_init_sge_params - initialize adap->params.sge
5446 * @adapter: the adapter
5447 *
5448 * Initialize various fields of the adapter's SGE Parameters structure.
5449 */
5450 int t4_init_sge_params(struct adapter *adapter)
5451 {
5452 struct sge_params *sge_params = &adapter->params.sge;
5453 u32 hps, qpp;
5454 unsigned int s_hps, s_qpp;
5455
5456 /* Extract the SGE Page Size for our PF.
5457 */
5458 hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
5459 s_hps = (HOSTPAGESIZEPF0_S +
5460 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn);
5461 sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
5462
5463 /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
5464 */
5465 s_qpp = (QUEUESPERPAGEPF0_S +
5466 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn);
5467 qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
5468 sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
5469 qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
5470 sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
5471
5472 return 0;
5473 }
5474
5475 /**
5476 * t4_init_tp_params - initialize adap->params.tp
5477 * @adap: the adapter
5478 *
5479 * Initialize various fields of the adapter's TP Parameters structure.
5480 */
5481 int t4_init_tp_params(struct adapter *adap)
5482 {
5483 int chan;
5484 u32 v;
5485
5486 v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
5487 adap->params.tp.tre = TIMERRESOLUTION_G(v);
5488 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
5489
5490 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5491 for (chan = 0; chan < NCHAN; chan++)
5492 adap->params.tp.tx_modq[chan] = chan;
5493
5494 /* Cache the adapter's Compressed Filter Mode and global Incress
5495 * Configuration.
5496 */
5497 t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
5498 &adap->params.tp.vlan_pri_map, 1,
5499 TP_VLAN_PRI_MAP_A);
5500 t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
5501 &adap->params.tp.ingress_config, 1,
5502 TP_INGRESS_CONFIG_A);
5503
5504 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
5505 * shift positions of several elements of the Compressed Filter Tuple
5506 * for this adapter which we need frequently ...
5507 */
5508 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
5509 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
5510 adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
5511 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
5512 PROTOCOL_F);
5513
5514 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
5515 * represents the presence of an Outer VLAN instead of a VNIC ID.
5516 */
5517 if ((adap->params.tp.ingress_config & VNIC_F) == 0)
5518 adap->params.tp.vnic_shift = -1;
5519
5520 return 0;
5521 }
5522
5523 /**
5524 * t4_filter_field_shift - calculate filter field shift
5525 * @adap: the adapter
5526 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
5527 *
5528 * Return the shift position of a filter field within the Compressed
5529 * Filter Tuple. The filter field is specified via its selection bit
5530 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
5531 */
5532 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
5533 {
5534 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
5535 unsigned int sel;
5536 int field_shift;
5537
5538 if ((filter_mode & filter_sel) == 0)
5539 return -1;
5540
5541 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
5542 switch (filter_mode & sel) {
5543 case FCOE_F:
5544 field_shift += FT_FCOE_W;
5545 break;
5546 case PORT_F:
5547 field_shift += FT_PORT_W;
5548 break;
5549 case VNIC_ID_F:
5550 field_shift += FT_VNIC_ID_W;
5551 break;
5552 case VLAN_F:
5553 field_shift += FT_VLAN_W;
5554 break;
5555 case TOS_F:
5556 field_shift += FT_TOS_W;
5557 break;
5558 case PROTOCOL_F:
5559 field_shift += FT_PROTOCOL_W;
5560 break;
5561 case ETHERTYPE_F:
5562 field_shift += FT_ETHERTYPE_W;
5563 break;
5564 case MACMATCH_F:
5565 field_shift += FT_MACMATCH_W;
5566 break;
5567 case MPSHITTYPE_F:
5568 field_shift += FT_MPSHITTYPE_W;
5569 break;
5570 case FRAGMENTATION_F:
5571 field_shift += FT_FRAGMENTATION_W;
5572 break;
5573 }
5574 }
5575 return field_shift;
5576 }
5577
5578 int t4_init_rss_mode(struct adapter *adap, int mbox)
5579 {
5580 int i, ret;
5581 struct fw_rss_vi_config_cmd rvc;
5582
5583 memset(&rvc, 0, sizeof(rvc));
5584
5585 for_each_port(adap, i) {
5586 struct port_info *p = adap2pinfo(adap, i);
5587
5588 rvc.op_to_viid =
5589 cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
5590 FW_CMD_REQUEST_F | FW_CMD_READ_F |
5591 FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
5592 rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
5593 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
5594 if (ret)
5595 return ret;
5596 p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
5597 }
5598 return 0;
5599 }
5600
5601 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
5602 {
5603 u8 addr[6];
5604 int ret, i, j = 0;
5605 struct fw_port_cmd c;
5606 struct fw_rss_vi_config_cmd rvc;
5607
5608 memset(&c, 0, sizeof(c));
5609 memset(&rvc, 0, sizeof(rvc));
5610
5611 for_each_port(adap, i) {
5612 unsigned int rss_size;
5613 struct port_info *p = adap2pinfo(adap, i);
5614
5615 while ((adap->params.portvec & (1 << j)) == 0)
5616 j++;
5617
5618 c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
5619 FW_CMD_REQUEST_F | FW_CMD_READ_F |
5620 FW_PORT_CMD_PORTID_V(j));
5621 c.action_to_len16 = cpu_to_be32(
5622 FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
5623 FW_LEN16(c));
5624 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5625 if (ret)
5626 return ret;
5627
5628 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
5629 if (ret < 0)
5630 return ret;
5631
5632 p->viid = ret;
5633 p->tx_chan = j;
5634 p->lport = j;
5635 p->rss_size = rss_size;
5636 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
5637 adap->port[i]->dev_port = j;
5638
5639 ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
5640 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
5641 FW_PORT_CMD_MDIOADDR_G(ret) : -1;
5642 p->port_type = FW_PORT_CMD_PTYPE_G(ret);
5643 p->mod_type = FW_PORT_MOD_TYPE_NA;
5644
5645 rvc.op_to_viid =
5646 cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
5647 FW_CMD_REQUEST_F | FW_CMD_READ_F |
5648 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
5649 rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
5650 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
5651 if (ret)
5652 return ret;
5653 p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
5654
5655 init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
5656 j++;
5657 }
5658 return 0;
5659 }
5660
5661 /**
5662 * t4_read_cimq_cfg - read CIM queue configuration
5663 * @adap: the adapter
5664 * @base: holds the queue base addresses in bytes
5665 * @size: holds the queue sizes in bytes
5666 * @thres: holds the queue full thresholds in bytes
5667 *
5668 * Returns the current configuration of the CIM queues, starting with
5669 * the IBQs, then the OBQs.
5670 */
5671 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
5672 {
5673 unsigned int i, v;
5674 int cim_num_obq = is_t4(adap->params.chip) ?
5675 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
5676
5677 for (i = 0; i < CIM_NUM_IBQ; i++) {
5678 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
5679 QUENUMSELECT_V(i));
5680 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
5681 /* value is in 256-byte units */
5682 *base++ = CIMQBASE_G(v) * 256;
5683 *size++ = CIMQSIZE_G(v) * 256;
5684 *thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
5685 }
5686 for (i = 0; i < cim_num_obq; i++) {
5687 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
5688 QUENUMSELECT_V(i));
5689 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
5690 /* value is in 256-byte units */
5691 *base++ = CIMQBASE_G(v) * 256;
5692 *size++ = CIMQSIZE_G(v) * 256;
5693 }
5694 }
5695
5696 /**
5697 * t4_read_cim_ibq - read the contents of a CIM inbound queue
5698 * @adap: the adapter
5699 * @qid: the queue index
5700 * @data: where to store the queue contents
5701 * @n: capacity of @data in 32-bit words
5702 *
5703 * Reads the contents of the selected CIM queue starting at address 0 up
5704 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
5705 * error and the number of 32-bit words actually read on success.
5706 */
5707 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
5708 {
5709 int i, err, attempts;
5710 unsigned int addr;
5711 const unsigned int nwords = CIM_IBQ_SIZE * 4;
5712
5713 if (qid > 5 || (n & 3))
5714 return -EINVAL;
5715
5716 addr = qid * nwords;
5717 if (n > nwords)
5718 n = nwords;
5719
5720 /* It might take 3-10ms before the IBQ debug read access is allowed.
5721 * Wait for 1 Sec with a delay of 1 usec.
5722 */
5723 attempts = 1000000;
5724
5725 for (i = 0; i < n; i++, addr++) {
5726 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
5727 IBQDBGEN_F);
5728 err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
5729 attempts, 1);
5730 if (err)
5731 return err;
5732 *data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
5733 }
5734 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
5735 return i;
5736 }
5737
5738 /**
5739 * t4_read_cim_obq - read the contents of a CIM outbound queue
5740 * @adap: the adapter
5741 * @qid: the queue index
5742 * @data: where to store the queue contents
5743 * @n: capacity of @data in 32-bit words
5744 *
5745 * Reads the contents of the selected CIM queue starting at address 0 up
5746 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
5747 * error and the number of 32-bit words actually read on success.
5748 */
5749 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
5750 {
5751 int i, err;
5752 unsigned int addr, v, nwords;
5753 int cim_num_obq = is_t4(adap->params.chip) ?
5754 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
5755
5756 if ((qid > (cim_num_obq - 1)) || (n & 3))
5757 return -EINVAL;
5758
5759 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
5760 QUENUMSELECT_V(qid));
5761 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
5762
5763 addr = CIMQBASE_G(v) * 64; /* muliple of 256 -> muliple of 4 */
5764 nwords = CIMQSIZE_G(v) * 64; /* same */
5765 if (n > nwords)
5766 n = nwords;
5767
5768 for (i = 0; i < n; i++, addr++) {
5769 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
5770 OBQDBGEN_F);
5771 err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
5772 2, 1);
5773 if (err)
5774 return err;
5775 *data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
5776 }
5777 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
5778 return i;
5779 }
5780
5781 /**
5782 * t4_cim_read - read a block from CIM internal address space
5783 * @adap: the adapter
5784 * @addr: the start address within the CIM address space
5785 * @n: number of words to read
5786 * @valp: where to store the result
5787 *
5788 * Reads a block of 4-byte words from the CIM intenal address space.
5789 */
5790 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
5791 unsigned int *valp)
5792 {
5793 int ret = 0;
5794
5795 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
5796 return -EBUSY;
5797
5798 for ( ; !ret && n--; addr += 4) {
5799 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
5800 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
5801 0, 5, 2);
5802 if (!ret)
5803 *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
5804 }
5805 return ret;
5806 }
5807
5808 /**
5809 * t4_cim_write - write a block into CIM internal address space
5810 * @adap: the adapter
5811 * @addr: the start address within the CIM address space
5812 * @n: number of words to write
5813 * @valp: set of values to write
5814 *
5815 * Writes a block of 4-byte words into the CIM intenal address space.
5816 */
5817 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
5818 const unsigned int *valp)
5819 {
5820 int ret = 0;
5821
5822 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
5823 return -EBUSY;
5824
5825 for ( ; !ret && n--; addr += 4) {
5826 t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
5827 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
5828 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
5829 0, 5, 2);
5830 }
5831 return ret;
5832 }
5833
5834 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
5835 unsigned int val)
5836 {
5837 return t4_cim_write(adap, addr, 1, &val);
5838 }
5839
5840 /**
5841 * t4_cim_read_la - read CIM LA capture buffer
5842 * @adap: the adapter
5843 * @la_buf: where to store the LA data
5844 * @wrptr: the HW write pointer within the capture buffer
5845 *
5846 * Reads the contents of the CIM LA buffer with the most recent entry at
5847 * the end of the returned data and with the entry at @wrptr first.
5848 * We try to leave the LA in the running state we find it in.
5849 */
5850 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
5851 {
5852 int i, ret;
5853 unsigned int cfg, val, idx;
5854
5855 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
5856 if (ret)
5857 return ret;
5858
5859 if (cfg & UPDBGLAEN_F) { /* LA is running, freeze it */
5860 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
5861 if (ret)
5862 return ret;
5863 }
5864
5865 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
5866 if (ret)
5867 goto restart;
5868
5869 idx = UPDBGLAWRPTR_G(val);
5870 if (wrptr)
5871 *wrptr = idx;
5872
5873 for (i = 0; i < adap->params.cim_la_size; i++) {
5874 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
5875 UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
5876 if (ret)
5877 break;
5878 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
5879 if (ret)
5880 break;
5881 if (val & UPDBGLARDEN_F) {
5882 ret = -ETIMEDOUT;
5883 break;
5884 }
5885 ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
5886 if (ret)
5887 break;
5888 idx = (idx + 1) & UPDBGLARDPTR_M;
5889 }
5890 restart:
5891 if (cfg & UPDBGLAEN_F) {
5892 int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
5893 cfg & ~UPDBGLARDEN_F);
5894 if (!ret)
5895 ret = r;
5896 }
5897 return ret;
5898 }
5899
5900 /**
5901 * t4_tp_read_la - read TP LA capture buffer
5902 * @adap: the adapter
5903 * @la_buf: where to store the LA data
5904 * @wrptr: the HW write pointer within the capture buffer
5905 *
5906 * Reads the contents of the TP LA buffer with the most recent entry at
5907 * the end of the returned data and with the entry at @wrptr first.
5908 * We leave the LA in the running state we find it in.
5909 */
5910 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
5911 {
5912 bool last_incomplete;
5913 unsigned int i, cfg, val, idx;
5914
5915 cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
5916 if (cfg & DBGLAENABLE_F) /* freeze LA */
5917 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
5918 adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
5919
5920 val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
5921 idx = DBGLAWPTR_G(val);
5922 last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
5923 if (last_incomplete)
5924 idx = (idx + 1) & DBGLARPTR_M;
5925 if (wrptr)
5926 *wrptr = idx;
5927
5928 val &= 0xffff;
5929 val &= ~DBGLARPTR_V(DBGLARPTR_M);
5930 val |= adap->params.tp.la_mask;
5931
5932 for (i = 0; i < TPLA_SIZE; i++) {
5933 t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
5934 la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
5935 idx = (idx + 1) & DBGLARPTR_M;
5936 }
5937
5938 /* Wipe out last entry if it isn't valid */
5939 if (last_incomplete)
5940 la_buf[TPLA_SIZE - 1] = ~0ULL;
5941
5942 if (cfg & DBGLAENABLE_F) /* restore running state */
5943 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
5944 cfg | adap->params.tp.la_mask);
5945 }
5946
5947 /* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
5948 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
5949 * state for more than the Warning Threshold then we'll issue a warning about
5950 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
5951 * appears to be hung every Warning Repeat second till the situation clears.
5952 * If the situation clears, we'll note that as well.
5953 */
5954 #define SGE_IDMA_WARN_THRESH 1
5955 #define SGE_IDMA_WARN_REPEAT 300
5956
5957 /**
5958 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
5959 * @adapter: the adapter
5960 * @idma: the adapter IDMA Monitor state
5961 *
5962 * Initialize the state of an SGE Ingress DMA Monitor.
5963 */
5964 void t4_idma_monitor_init(struct adapter *adapter,
5965 struct sge_idma_monitor_state *idma)
5966 {
5967 /* Initialize the state variables for detecting an SGE Ingress DMA
5968 * hang. The SGE has internal counters which count up on each clock
5969 * tick whenever the SGE finds its Ingress DMA State Engines in the
5970 * same state they were on the previous clock tick. The clock used is
5971 * the Core Clock so we have a limit on the maximum "time" they can
5972 * record; typically a very small number of seconds. For instance,
5973 * with a 600MHz Core Clock, we can only count up to a bit more than
5974 * 7s. So we'll synthesize a larger counter in order to not run the
5975 * risk of having the "timers" overflow and give us the flexibility to
5976 * maintain a Hung SGE State Machine of our own which operates across
5977 * a longer time frame.
5978 */
5979 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
5980 idma->idma_stalled[0] = 0;
5981 idma->idma_stalled[1] = 0;
5982 }
5983
5984 /**
5985 * t4_idma_monitor - monitor SGE Ingress DMA state
5986 * @adapter: the adapter
5987 * @idma: the adapter IDMA Monitor state
5988 * @hz: number of ticks/second
5989 * @ticks: number of ticks since the last IDMA Monitor call
5990 */
5991 void t4_idma_monitor(struct adapter *adapter,
5992 struct sge_idma_monitor_state *idma,
5993 int hz, int ticks)
5994 {
5995 int i, idma_same_state_cnt[2];
5996
5997 /* Read the SGE Debug Ingress DMA Same State Count registers. These
5998 * are counters inside the SGE which count up on each clock when the
5999 * SGE finds its Ingress DMA State Engines in the same states they
6000 * were in the previous clock. The counters will peg out at
6001 * 0xffffffff without wrapping around so once they pass the 1s
6002 * threshold they'll stay above that till the IDMA state changes.
6003 */
6004 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
6005 idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
6006 idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
6007
6008 for (i = 0; i < 2; i++) {
6009 u32 debug0, debug11;
6010
6011 /* If the Ingress DMA Same State Counter ("timer") is less
6012 * than 1s, then we can reset our synthesized Stall Timer and
6013 * continue. If we have previously emitted warnings about a
6014 * potential stalled Ingress Queue, issue a note indicating
6015 * that the Ingress Queue has resumed forward progress.
6016 */
6017 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
6018 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
6019 dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
6020 "resumed after %d seconds\n",
6021 i, idma->idma_qid[i],
6022 idma->idma_stalled[i] / hz);
6023 idma->idma_stalled[i] = 0;
6024 continue;
6025 }
6026
6027 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
6028 * domain. The first time we get here it'll be because we
6029 * passed the 1s Threshold; each additional time it'll be
6030 * because the RX Timer Callback is being fired on its regular
6031 * schedule.
6032 *
6033 * If the stall is below our Potential Hung Ingress Queue
6034 * Warning Threshold, continue.
6035 */
6036 if (idma->idma_stalled[i] == 0) {
6037 idma->idma_stalled[i] = hz;
6038 idma->idma_warn[i] = 0;
6039 } else {
6040 idma->idma_stalled[i] += ticks;
6041 idma->idma_warn[i] -= ticks;
6042 }
6043
6044 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
6045 continue;
6046
6047 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
6048 */
6049 if (idma->idma_warn[i] > 0)
6050 continue;
6051 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
6052
6053 /* Read and save the SGE IDMA State and Queue ID information.
6054 * We do this every time in case it changes across time ...
6055 * can't be too careful ...
6056 */
6057 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
6058 debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
6059 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
6060
6061 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
6062 debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
6063 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
6064
6065 dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
6066 "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
6067 i, idma->idma_qid[i], idma->idma_state[i],
6068 idma->idma_stalled[i] / hz,
6069 debug0, debug11);
6070 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
6071 }
6072 }