2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/delay.h>
38 #include "t4_values.h"
42 * t4_wait_op_done_val - wait until an operation is completed
43 * @adapter: the adapter performing the operation
44 * @reg: the register to check for completion
45 * @mask: a single-bit field within @reg that indicates completion
46 * @polarity: the value of the field when the operation is completed
47 * @attempts: number of check iterations
48 * @delay: delay in usecs between iterations
49 * @valp: where to store the value of the register at completion time
51 * Wait until an operation is completed by checking a bit in a register
52 * up to @attempts times. If @valp is not NULL the value of the register
53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise.
56 static int t4_wait_op_done_val(struct adapter
*adapter
, int reg
, u32 mask
,
57 int polarity
, int attempts
, int delay
, u32
*valp
)
60 u32 val
= t4_read_reg(adapter
, reg
);
62 if (!!(val
& mask
) == polarity
) {
74 static inline int t4_wait_op_done(struct adapter
*adapter
, int reg
, u32 mask
,
75 int polarity
, int attempts
, int delay
)
77 return t4_wait_op_done_val(adapter
, reg
, mask
, polarity
, attempts
,
82 * t4_set_reg_field - set a register field to a value
83 * @adapter: the adapter to program
84 * @addr: the register address
85 * @mask: specifies the portion of the register to modify
86 * @val: the new value for the register field
88 * Sets a register field specified by the supplied mask to the
91 void t4_set_reg_field(struct adapter
*adapter
, unsigned int addr
, u32 mask
,
94 u32 v
= t4_read_reg(adapter
, addr
) & ~mask
;
96 t4_write_reg(adapter
, addr
, v
| val
);
97 (void) t4_read_reg(adapter
, addr
); /* flush */
101 * t4_read_indirect - read indirectly addressed registers
103 * @addr_reg: register holding the indirect address
104 * @data_reg: register holding the value of the indirect register
105 * @vals: where the read register values are stored
106 * @nregs: how many indirect registers to read
107 * @start_idx: index of first indirect register to read
109 * Reads registers that are accessed indirectly through an address/data
112 void t4_read_indirect(struct adapter
*adap
, unsigned int addr_reg
,
113 unsigned int data_reg
, u32
*vals
,
114 unsigned int nregs
, unsigned int start_idx
)
117 t4_write_reg(adap
, addr_reg
, start_idx
);
118 *vals
++ = t4_read_reg(adap
, data_reg
);
124 * t4_write_indirect - write indirectly addressed registers
126 * @addr_reg: register holding the indirect addresses
127 * @data_reg: register holding the value for the indirect registers
128 * @vals: values to write
129 * @nregs: how many indirect registers to write
130 * @start_idx: address of first indirect register to write
132 * Writes a sequential block of registers that are accessed indirectly
133 * through an address/data register pair.
135 void t4_write_indirect(struct adapter
*adap
, unsigned int addr_reg
,
136 unsigned int data_reg
, const u32
*vals
,
137 unsigned int nregs
, unsigned int start_idx
)
140 t4_write_reg(adap
, addr_reg
, start_idx
++);
141 t4_write_reg(adap
, data_reg
, *vals
++);
146 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
147 * mechanism. This guarantees that we get the real value even if we're
148 * operating within a Virtual Machine and the Hypervisor is trapping our
149 * Configuration Space accesses.
151 void t4_hw_pci_read_cfg4(struct adapter
*adap
, int reg
, u32
*val
)
153 u32 req
= ENABLE_F
| FUNCTION_V(adap
->fn
) | REGISTER_V(reg
);
155 if (is_t4(adap
->params
.chip
))
158 t4_write_reg(adap
, PCIE_CFG_SPACE_REQ_A
, req
);
159 *val
= t4_read_reg(adap
, PCIE_CFG_SPACE_DATA_A
);
161 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
162 * Configuration Space read. (None of the other fields matter when
163 * ENABLE is 0 so a simple register write is easier than a
164 * read-modify-write via t4_set_reg_field().)
166 t4_write_reg(adap
, PCIE_CFG_SPACE_REQ_A
, 0);
170 * t4_report_fw_error - report firmware error
173 * The adapter firmware can indicate error conditions to the host.
174 * If the firmware has indicated an error, print out the reason for
175 * the firmware error.
177 static void t4_report_fw_error(struct adapter
*adap
)
179 static const char *const reason
[] = {
180 "Crash", /* PCIE_FW_EVAL_CRASH */
181 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
182 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
183 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
184 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
185 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
186 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
187 "Reserved", /* reserved */
191 pcie_fw
= t4_read_reg(adap
, PCIE_FW_A
);
192 if (pcie_fw
& PCIE_FW_ERR_F
)
193 dev_err(adap
->pdev_dev
, "Firmware reports adapter error: %s\n",
194 reason
[PCIE_FW_EVAL_G(pcie_fw
)]);
198 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
200 static void get_mbox_rpl(struct adapter
*adap
, __be64
*rpl
, int nflit
,
203 for ( ; nflit
; nflit
--, mbox_addr
+= 8)
204 *rpl
++ = cpu_to_be64(t4_read_reg64(adap
, mbox_addr
));
208 * Handle a FW assertion reported in a mailbox.
210 static void fw_asrt(struct adapter
*adap
, u32 mbox_addr
)
212 struct fw_debug_cmd asrt
;
214 get_mbox_rpl(adap
, (__be64
*)&asrt
, sizeof(asrt
) / 8, mbox_addr
);
215 dev_alert(adap
->pdev_dev
,
216 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
217 asrt
.u
.assert.filename_0_7
, be32_to_cpu(asrt
.u
.assert.line
),
218 be32_to_cpu(asrt
.u
.assert.x
), be32_to_cpu(asrt
.u
.assert.y
));
221 static void dump_mbox(struct adapter
*adap
, int mbox
, u32 data_reg
)
223 dev_err(adap
->pdev_dev
,
224 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox
,
225 (unsigned long long)t4_read_reg64(adap
, data_reg
),
226 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 8),
227 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 16),
228 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 24),
229 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 32),
230 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 40),
231 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 48),
232 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 56));
236 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
238 * @mbox: index of the mailbox to use
239 * @cmd: the command to write
240 * @size: command length in bytes
241 * @rpl: where to optionally store the reply
242 * @sleep_ok: if true we may sleep while awaiting command completion
243 * @timeout: time to wait for command to finish before timing out
245 * Sends the given command to FW through the selected mailbox and waits
246 * for the FW to execute the command. If @rpl is not %NULL it is used to
247 * store the FW's reply to the command. The command and its optional
248 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
249 * to respond. @sleep_ok determines whether we may sleep while awaiting
250 * the response. If sleeping is allowed we use progressive backoff
253 * The return value is 0 on success or a negative errno on failure. A
254 * failure can happen either because we are not able to execute the
255 * command or FW executes it but signals an error. In the latter case
256 * the return value is the error code indicated by FW (negated).
258 int t4_wr_mbox_meat_timeout(struct adapter
*adap
, int mbox
, const void *cmd
,
259 int size
, void *rpl
, bool sleep_ok
, int timeout
)
261 static const int delay
[] = {
262 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
267 int i
, ms
, delay_idx
;
268 const __be64
*p
= cmd
;
269 u32 data_reg
= PF_REG(mbox
, CIM_PF_MAILBOX_DATA_A
);
270 u32 ctl_reg
= PF_REG(mbox
, CIM_PF_MAILBOX_CTRL_A
);
272 if ((size
& 15) || size
> MBOX_LEN
)
276 * If the device is off-line, as in EEH, commands will time out.
277 * Fail them early so we don't waste time waiting.
279 if (adap
->pdev
->error_state
!= pci_channel_io_normal
)
282 v
= MBOWNER_G(t4_read_reg(adap
, ctl_reg
));
283 for (i
= 0; v
== MBOX_OWNER_NONE
&& i
< 3; i
++)
284 v
= MBOWNER_G(t4_read_reg(adap
, ctl_reg
));
286 if (v
!= MBOX_OWNER_DRV
)
287 return v
? -EBUSY
: -ETIMEDOUT
;
289 for (i
= 0; i
< size
; i
+= 8)
290 t4_write_reg64(adap
, data_reg
+ i
, be64_to_cpu(*p
++));
292 t4_write_reg(adap
, ctl_reg
, MBMSGVALID_F
| MBOWNER_V(MBOX_OWNER_FW
));
293 t4_read_reg(adap
, ctl_reg
); /* flush write */
298 for (i
= 0; i
< timeout
; i
+= ms
) {
300 ms
= delay
[delay_idx
]; /* last element may repeat */
301 if (delay_idx
< ARRAY_SIZE(delay
) - 1)
307 v
= t4_read_reg(adap
, ctl_reg
);
308 if (MBOWNER_G(v
) == MBOX_OWNER_DRV
) {
309 if (!(v
& MBMSGVALID_F
)) {
310 t4_write_reg(adap
, ctl_reg
, 0);
314 res
= t4_read_reg64(adap
, data_reg
);
315 if (FW_CMD_OP_G(res
>> 32) == FW_DEBUG_CMD
) {
316 fw_asrt(adap
, data_reg
);
317 res
= FW_CMD_RETVAL_V(EIO
);
319 get_mbox_rpl(adap
, rpl
, size
/ 8, data_reg
);
322 if (FW_CMD_RETVAL_G((int)res
))
323 dump_mbox(adap
, mbox
, data_reg
);
324 t4_write_reg(adap
, ctl_reg
, 0);
325 return -FW_CMD_RETVAL_G((int)res
);
329 dump_mbox(adap
, mbox
, data_reg
);
330 dev_err(adap
->pdev_dev
, "command %#x in mailbox %d timed out\n",
331 *(const u8
*)cmd
, mbox
);
332 t4_report_fw_error(adap
);
336 int t4_wr_mbox_meat(struct adapter
*adap
, int mbox
, const void *cmd
, int size
,
337 void *rpl
, bool sleep_ok
)
339 return t4_wr_mbox_meat_timeout(adap
, mbox
, cmd
, size
, rpl
, sleep_ok
,
344 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
346 * @win: PCI-E Memory Window to use
347 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
348 * @addr: address within indicated memory type
349 * @len: amount of memory to transfer
350 * @hbuf: host memory buffer
351 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
353 * Reads/writes an [almost] arbitrary memory region in the firmware: the
354 * firmware memory address and host buffer must be aligned on 32-bit
355 * boudaries; the length may be arbitrary. The memory is transferred as
356 * a raw byte sequence from/to the firmware's memory. If this memory
357 * contains data structures which contain multi-byte integers, it's the
358 * caller's responsibility to perform appropriate byte order conversions.
360 int t4_memory_rw(struct adapter
*adap
, int win
, int mtype
, u32 addr
,
361 u32 len
, void *hbuf
, int dir
)
363 u32 pos
, offset
, resid
, memoffset
;
364 u32 edc_size
, mc_size
, win_pf
, mem_reg
, mem_aperture
, mem_base
;
367 /* Argument sanity checks ...
369 if (addr
& 0x3 || (uintptr_t)hbuf
& 0x3)
373 /* It's convenient to be able to handle lengths which aren't a
374 * multiple of 32-bits because we often end up transferring files to
375 * the firmware. So we'll handle that by normalizing the length here
376 * and then handling any residual transfer at the end.
381 /* Offset into the region of memory which is being accessed
385 * MEM_MC0 = 2 -- For T5
386 * MEM_MC1 = 3 -- For T5
388 edc_size
= EDRAM0_SIZE_G(t4_read_reg(adap
, MA_EDRAM0_BAR_A
));
389 if (mtype
!= MEM_MC1
)
390 memoffset
= (mtype
* (edc_size
* 1024 * 1024));
392 mc_size
= EXT_MEM0_SIZE_G(t4_read_reg(adap
,
393 MA_EXT_MEMORY0_BAR_A
));
394 memoffset
= (MEM_MC0
* edc_size
+ mc_size
) * 1024 * 1024;
397 /* Determine the PCIE_MEM_ACCESS_OFFSET */
398 addr
= addr
+ memoffset
;
400 /* Each PCI-E Memory Window is programmed with a window size -- or
401 * "aperture" -- which controls the granularity of its mapping onto
402 * adapter memory. We need to grab that aperture in order to know
403 * how to use the specified window. The window is also programmed
404 * with the base address of the Memory Window in BAR0's address
405 * space. For T4 this is an absolute PCI-E Bus Address. For T5
406 * the address is relative to BAR0.
408 mem_reg
= t4_read_reg(adap
,
409 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A
,
411 mem_aperture
= 1 << (WINDOW_G(mem_reg
) + WINDOW_SHIFT_X
);
412 mem_base
= PCIEOFST_G(mem_reg
) << PCIEOFST_SHIFT_X
;
413 if (is_t4(adap
->params
.chip
))
414 mem_base
-= adap
->t4_bar0
;
415 win_pf
= is_t4(adap
->params
.chip
) ? 0 : PFNUM_V(adap
->fn
);
417 /* Calculate our initial PCI-E Memory Window Position and Offset into
420 pos
= addr
& ~(mem_aperture
-1);
423 /* Set up initial PCI-E Memory Window to cover the start of our
424 * transfer. (Read it back to ensure that changes propagate before we
425 * attempt to use the new value.)
428 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A
, win
),
431 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A
, win
));
433 /* Transfer data to/from the adapter as long as there's an integral
434 * number of 32-bit transfers to complete.
436 * A note on Endianness issues:
438 * The "register" reads and writes below from/to the PCI-E Memory
439 * Window invoke the standard adapter Big-Endian to PCI-E Link
440 * Little-Endian "swizzel." As a result, if we have the following
441 * data in adapter memory:
443 * Memory: ... | b0 | b1 | b2 | b3 | ...
444 * Address: i+0 i+1 i+2 i+3
446 * Then a read of the adapter memory via the PCI-E Memory Window
451 * [ b3 | b2 | b1 | b0 ]
453 * If this value is stored into local memory on a Little-Endian system
454 * it will show up correctly in local memory as:
456 * ( ..., b0, b1, b2, b3, ... )
458 * But on a Big-Endian system, the store will show up in memory
459 * incorrectly swizzled as:
461 * ( ..., b3, b2, b1, b0, ... )
463 * So we need to account for this in the reads and writes to the
464 * PCI-E Memory Window below by undoing the register read/write
468 if (dir
== T4_MEMORY_READ
)
469 *buf
++ = le32_to_cpu((__force __le32
)t4_read_reg(adap
,
472 t4_write_reg(adap
, mem_base
+ offset
,
473 (__force u32
)cpu_to_le32(*buf
++));
474 offset
+= sizeof(__be32
);
475 len
-= sizeof(__be32
);
477 /* If we've reached the end of our current window aperture,
478 * move the PCI-E Memory Window on to the next. Note that
479 * doing this here after "len" may be 0 allows us to set up
480 * the PCI-E Memory Window for a possible final residual
483 if (offset
== mem_aperture
) {
487 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A
,
490 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A
,
495 /* If the original transfer had a length which wasn't a multiple of
496 * 32-bits, now's where we need to finish off the transfer of the
497 * residual amount. The PCI-E Memory Window has already been moved
498 * above (if necessary) to cover this final transfer.
508 if (dir
== T4_MEMORY_READ
) {
509 last
.word
= le32_to_cpu(
510 (__force __le32
)t4_read_reg(adap
,
512 for (bp
= (unsigned char *)buf
, i
= resid
; i
< 4; i
++)
513 bp
[i
] = last
.byte
[i
];
516 for (i
= resid
; i
< 4; i
++)
518 t4_write_reg(adap
, mem_base
+ offset
,
519 (__force u32
)cpu_to_le32(last
.word
));
526 /* Return the specified PCI-E Configuration Space register from our Physical
527 * Function. We try first via a Firmware LDST Command since we prefer to let
528 * the firmware own all of these registers, but if that fails we go for it
529 * directly ourselves.
531 u32
t4_read_pcie_cfg4(struct adapter
*adap
, int reg
)
533 u32 val
, ldst_addrspace
;
535 /* If fw_attach != 0, construct and send the Firmware LDST Command to
536 * retrieve the specified PCI-E Configuration Space register.
538 struct fw_ldst_cmd ldst_cmd
;
541 memset(&ldst_cmd
, 0, sizeof(ldst_cmd
));
542 ldst_addrspace
= FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE
);
543 ldst_cmd
.op_to_addrspace
= cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD
) |
547 ldst_cmd
.cycles_to_len16
= cpu_to_be32(FW_LEN16(ldst_cmd
));
548 ldst_cmd
.u
.pcie
.select_naccess
= FW_LDST_CMD_NACCESS_V(1);
549 ldst_cmd
.u
.pcie
.ctrl_to_fn
=
550 (FW_LDST_CMD_LC_F
| FW_LDST_CMD_FN_V(adap
->fn
));
551 ldst_cmd
.u
.pcie
.r
= reg
;
553 /* If the LDST Command succeeds, return the result, otherwise
554 * fall through to reading it directly ourselves ...
556 ret
= t4_wr_mbox(adap
, adap
->mbox
, &ldst_cmd
, sizeof(ldst_cmd
),
559 val
= be32_to_cpu(ldst_cmd
.u
.pcie
.data
[0]);
561 /* Read the desired Configuration Space register via the PCI-E
562 * Backdoor mechanism.
564 t4_hw_pci_read_cfg4(adap
, reg
, &val
);
568 /* Get the window based on base passed to it.
569 * Window aperture is currently unhandled, but there is no use case for it
572 static u32
t4_get_window(struct adapter
*adap
, u32 pci_base
, u64 pci_mask
,
577 if (is_t4(adap
->params
.chip
)) {
580 /* Truncation intentional: we only read the bottom 32-bits of
581 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
582 * mechanism to read BAR0 instead of using
583 * pci_resource_start() because we could be operating from
584 * within a Virtual Machine which is trapping our accesses to
585 * our Configuration Space and we need to set up the PCI-E
586 * Memory Window decoders with the actual addresses which will
587 * be coming across the PCI-E link.
589 bar0
= t4_read_pcie_cfg4(adap
, pci_base
);
591 adap
->t4_bar0
= bar0
;
593 ret
= bar0
+ memwin_base
;
595 /* For T5, only relative offset inside the PCIe BAR is passed */
601 /* Get the default utility window (win0) used by everyone */
602 u32
t4_get_util_window(struct adapter
*adap
)
604 return t4_get_window(adap
, PCI_BASE_ADDRESS_0
,
605 PCI_BASE_ADDRESS_MEM_MASK
, MEMWIN0_BASE
);
608 /* Set up memory window for accessing adapter memory ranges. (Read
609 * back MA register to ensure that changes propagate before we attempt
610 * to use the new values.)
612 void t4_setup_memwin(struct adapter
*adap
, u32 memwin_base
, u32 window
)
615 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A
, window
),
616 memwin_base
| BIR_V(0) |
617 WINDOW_V(ilog2(MEMWIN0_APERTURE
) - WINDOW_SHIFT_X
));
619 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A
, window
));
623 * t4_get_regs_len - return the size of the chips register set
624 * @adapter: the adapter
626 * Returns the size of the chip's BAR0 register space.
628 unsigned int t4_get_regs_len(struct adapter
*adapter
)
630 unsigned int chip_version
= CHELSIO_CHIP_VERSION(adapter
->params
.chip
);
632 switch (chip_version
) {
634 return T4_REGMAP_SIZE
;
637 return T5_REGMAP_SIZE
;
640 dev_err(adapter
->pdev_dev
,
641 "Unsupported chip version %d\n", chip_version
);
646 * t4_get_regs - read chip registers into provided buffer
648 * @buf: register buffer
649 * @buf_size: size (in bytes) of register buffer
651 * If the provided register buffer isn't large enough for the chip's
652 * full register range, the register dump will be truncated to the
653 * register buffer's size.
655 void t4_get_regs(struct adapter
*adap
, void *buf
, size_t buf_size
)
657 static const unsigned int t4_reg_ranges
[] = {
879 static const unsigned int t5_reg_ranges
[] = {
1319 u32
*buf_end
= (u32
*)((char *)buf
+ buf_size
);
1320 const unsigned int *reg_ranges
;
1321 int reg_ranges_size
, range
;
1322 unsigned int chip_version
= CHELSIO_CHIP_VERSION(adap
->params
.chip
);
1324 /* Select the right set of register ranges to dump depending on the
1325 * adapter chip type.
1327 switch (chip_version
) {
1329 reg_ranges
= t4_reg_ranges
;
1330 reg_ranges_size
= ARRAY_SIZE(t4_reg_ranges
);
1334 reg_ranges
= t5_reg_ranges
;
1335 reg_ranges_size
= ARRAY_SIZE(t5_reg_ranges
);
1339 dev_err(adap
->pdev_dev
,
1340 "Unsupported chip version %d\n", chip_version
);
1344 /* Clear the register buffer and insert the appropriate register
1345 * values selected by the above register ranges.
1347 memset(buf
, 0, buf_size
);
1348 for (range
= 0; range
< reg_ranges_size
; range
+= 2) {
1349 unsigned int reg
= reg_ranges
[range
];
1350 unsigned int last_reg
= reg_ranges
[range
+ 1];
1351 u32
*bufp
= (u32
*)((char *)buf
+ reg
);
1353 /* Iterate across the register range filling in the register
1354 * buffer but don't write past the end of the register buffer.
1356 while (reg
<= last_reg
&& bufp
< buf_end
) {
1357 *bufp
++ = t4_read_reg(adap
, reg
);
1363 #define EEPROM_STAT_ADDR 0x7bfc
1364 #define VPD_BASE 0x400
1365 #define VPD_BASE_OLD 0
1366 #define VPD_LEN 1024
1367 #define CHELSIO_VPD_UNIQUE_ID 0x82
1370 * t4_seeprom_wp - enable/disable EEPROM write protection
1371 * @adapter: the adapter
1372 * @enable: whether to enable or disable write protection
1374 * Enables or disables write protection on the serial EEPROM.
1376 int t4_seeprom_wp(struct adapter
*adapter
, bool enable
)
1378 unsigned int v
= enable
? 0xc : 0;
1379 int ret
= pci_write_vpd(adapter
->pdev
, EEPROM_STAT_ADDR
, 4, &v
);
1380 return ret
< 0 ? ret
: 0;
1384 * get_vpd_params - read VPD parameters from VPD EEPROM
1385 * @adapter: adapter to read
1386 * @p: where to store the parameters
1388 * Reads card parameters stored in VPD EEPROM.
1390 int get_vpd_params(struct adapter
*adapter
, struct vpd_params
*p
)
1392 u32 cclk_param
, cclk_val
;
1396 unsigned int vpdr_len
, kw_offset
, id_len
;
1398 vpd
= vmalloc(VPD_LEN
);
1402 ret
= pci_read_vpd(adapter
->pdev
, VPD_BASE
, sizeof(u32
), vpd
);
1406 /* The VPD shall have a unique identifier specified by the PCI SIG.
1407 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
1408 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
1409 * is expected to automatically put this entry at the
1410 * beginning of the VPD.
1412 addr
= *vpd
== CHELSIO_VPD_UNIQUE_ID
? VPD_BASE
: VPD_BASE_OLD
;
1414 ret
= pci_read_vpd(adapter
->pdev
, addr
, VPD_LEN
, vpd
);
1418 if (vpd
[0] != PCI_VPD_LRDT_ID_STRING
) {
1419 dev_err(adapter
->pdev_dev
, "missing VPD ID string\n");
1424 id_len
= pci_vpd_lrdt_size(vpd
);
1425 if (id_len
> ID_LEN
)
1428 i
= pci_vpd_find_tag(vpd
, 0, VPD_LEN
, PCI_VPD_LRDT_RO_DATA
);
1430 dev_err(adapter
->pdev_dev
, "missing VPD-R section\n");
1435 vpdr_len
= pci_vpd_lrdt_size(&vpd
[i
]);
1436 kw_offset
= i
+ PCI_VPD_LRDT_TAG_SIZE
;
1437 if (vpdr_len
+ kw_offset
> VPD_LEN
) {
1438 dev_err(adapter
->pdev_dev
, "bad VPD-R length %u\n", vpdr_len
);
1443 #define FIND_VPD_KW(var, name) do { \
1444 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
1446 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
1450 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
1453 FIND_VPD_KW(i
, "RV");
1454 for (csum
= 0; i
>= 0; i
--)
1458 dev_err(adapter
->pdev_dev
,
1459 "corrupted VPD EEPROM, actual csum %u\n", csum
);
1464 FIND_VPD_KW(ec
, "EC");
1465 FIND_VPD_KW(sn
, "SN");
1466 FIND_VPD_KW(pn
, "PN");
1469 memcpy(p
->id
, vpd
+ PCI_VPD_LRDT_TAG_SIZE
, id_len
);
1471 memcpy(p
->ec
, vpd
+ ec
, EC_LEN
);
1473 i
= pci_vpd_info_field_size(vpd
+ sn
- PCI_VPD_INFO_FLD_HDR_SIZE
);
1474 memcpy(p
->sn
, vpd
+ sn
, min(i
, SERNUM_LEN
));
1476 i
= pci_vpd_info_field_size(vpd
+ pn
- PCI_VPD_INFO_FLD_HDR_SIZE
);
1477 memcpy(p
->pn
, vpd
+ pn
, min(i
, PN_LEN
));
1481 * Ask firmware for the Core Clock since it knows how to translate the
1482 * Reference Clock ('V2') VPD field into a Core Clock value ...
1484 cclk_param
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
1485 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK
));
1486 ret
= t4_query_params(adapter
, adapter
->mbox
, 0, 0,
1487 1, &cclk_param
, &cclk_val
);
1498 /* serial flash and firmware constants */
1500 SF_ATTEMPTS
= 10, /* max retries for SF operations */
1502 /* flash command opcodes */
1503 SF_PROG_PAGE
= 2, /* program page */
1504 SF_WR_DISABLE
= 4, /* disable writes */
1505 SF_RD_STATUS
= 5, /* read status register */
1506 SF_WR_ENABLE
= 6, /* enable writes */
1507 SF_RD_DATA_FAST
= 0xb, /* read flash */
1508 SF_RD_ID
= 0x9f, /* read ID */
1509 SF_ERASE_SECTOR
= 0xd8, /* erase sector */
1511 FW_MAX_SIZE
= 16 * SF_SEC_SIZE
,
1515 * sf1_read - read data from the serial flash
1516 * @adapter: the adapter
1517 * @byte_cnt: number of bytes to read
1518 * @cont: whether another operation will be chained
1519 * @lock: whether to lock SF for PL access only
1520 * @valp: where to store the read data
1522 * Reads up to 4 bytes of data from the serial flash. The location of
1523 * the read needs to be specified prior to calling this by issuing the
1524 * appropriate commands to the serial flash.
1526 static int sf1_read(struct adapter
*adapter
, unsigned int byte_cnt
, int cont
,
1527 int lock
, u32
*valp
)
1531 if (!byte_cnt
|| byte_cnt
> 4)
1533 if (t4_read_reg(adapter
, SF_OP_A
) & SF_BUSY_F
)
1535 t4_write_reg(adapter
, SF_OP_A
, SF_LOCK_V(lock
) |
1536 SF_CONT_V(cont
) | BYTECNT_V(byte_cnt
- 1));
1537 ret
= t4_wait_op_done(adapter
, SF_OP_A
, SF_BUSY_F
, 0, SF_ATTEMPTS
, 5);
1539 *valp
= t4_read_reg(adapter
, SF_DATA_A
);
1544 * sf1_write - write data to the serial flash
1545 * @adapter: the adapter
1546 * @byte_cnt: number of bytes to write
1547 * @cont: whether another operation will be chained
1548 * @lock: whether to lock SF for PL access only
1549 * @val: value to write
1551 * Writes up to 4 bytes of data to the serial flash. The location of
1552 * the write needs to be specified prior to calling this by issuing the
1553 * appropriate commands to the serial flash.
1555 static int sf1_write(struct adapter
*adapter
, unsigned int byte_cnt
, int cont
,
1558 if (!byte_cnt
|| byte_cnt
> 4)
1560 if (t4_read_reg(adapter
, SF_OP_A
) & SF_BUSY_F
)
1562 t4_write_reg(adapter
, SF_DATA_A
, val
);
1563 t4_write_reg(adapter
, SF_OP_A
, SF_LOCK_V(lock
) |
1564 SF_CONT_V(cont
) | BYTECNT_V(byte_cnt
- 1) | OP_V(1));
1565 return t4_wait_op_done(adapter
, SF_OP_A
, SF_BUSY_F
, 0, SF_ATTEMPTS
, 5);
1569 * flash_wait_op - wait for a flash operation to complete
1570 * @adapter: the adapter
1571 * @attempts: max number of polls of the status register
1572 * @delay: delay between polls in ms
1574 * Wait for a flash operation to complete by polling the status register.
1576 static int flash_wait_op(struct adapter
*adapter
, int attempts
, int delay
)
1582 if ((ret
= sf1_write(adapter
, 1, 1, 1, SF_RD_STATUS
)) != 0 ||
1583 (ret
= sf1_read(adapter
, 1, 0, 1, &status
)) != 0)
1587 if (--attempts
== 0)
1595 * t4_read_flash - read words from serial flash
1596 * @adapter: the adapter
1597 * @addr: the start address for the read
1598 * @nwords: how many 32-bit words to read
1599 * @data: where to store the read data
1600 * @byte_oriented: whether to store data as bytes or as words
1602 * Read the specified number of 32-bit words from the serial flash.
1603 * If @byte_oriented is set the read data is stored as a byte array
1604 * (i.e., big-endian), otherwise as 32-bit words in the platform's
1605 * natural endianness.
1607 int t4_read_flash(struct adapter
*adapter
, unsigned int addr
,
1608 unsigned int nwords
, u32
*data
, int byte_oriented
)
1612 if (addr
+ nwords
* sizeof(u32
) > adapter
->params
.sf_size
|| (addr
& 3))
1615 addr
= swab32(addr
) | SF_RD_DATA_FAST
;
1617 if ((ret
= sf1_write(adapter
, 4, 1, 0, addr
)) != 0 ||
1618 (ret
= sf1_read(adapter
, 1, 1, 0, data
)) != 0)
1621 for ( ; nwords
; nwords
--, data
++) {
1622 ret
= sf1_read(adapter
, 4, nwords
> 1, nwords
== 1, data
);
1624 t4_write_reg(adapter
, SF_OP_A
, 0); /* unlock SF */
1628 *data
= (__force __u32
)(cpu_to_be32(*data
));
1634 * t4_write_flash - write up to a page of data to the serial flash
1635 * @adapter: the adapter
1636 * @addr: the start address to write
1637 * @n: length of data to write in bytes
1638 * @data: the data to write
1640 * Writes up to a page of data (256 bytes) to the serial flash starting
1641 * at the given address. All the data must be written to the same page.
1643 static int t4_write_flash(struct adapter
*adapter
, unsigned int addr
,
1644 unsigned int n
, const u8
*data
)
1648 unsigned int i
, c
, left
, val
, offset
= addr
& 0xff;
1650 if (addr
>= adapter
->params
.sf_size
|| offset
+ n
> SF_PAGE_SIZE
)
1653 val
= swab32(addr
) | SF_PROG_PAGE
;
1655 if ((ret
= sf1_write(adapter
, 1, 0, 1, SF_WR_ENABLE
)) != 0 ||
1656 (ret
= sf1_write(adapter
, 4, 1, 1, val
)) != 0)
1659 for (left
= n
; left
; left
-= c
) {
1661 for (val
= 0, i
= 0; i
< c
; ++i
)
1662 val
= (val
<< 8) + *data
++;
1664 ret
= sf1_write(adapter
, c
, c
!= left
, 1, val
);
1668 ret
= flash_wait_op(adapter
, 8, 1);
1672 t4_write_reg(adapter
, SF_OP_A
, 0); /* unlock SF */
1674 /* Read the page to verify the write succeeded */
1675 ret
= t4_read_flash(adapter
, addr
& ~0xff, ARRAY_SIZE(buf
), buf
, 1);
1679 if (memcmp(data
- n
, (u8
*)buf
+ offset
, n
)) {
1680 dev_err(adapter
->pdev_dev
,
1681 "failed to correctly write the flash page at %#x\n",
1688 t4_write_reg(adapter
, SF_OP_A
, 0); /* unlock SF */
1693 * t4_get_fw_version - read the firmware version
1694 * @adapter: the adapter
1695 * @vers: where to place the version
1697 * Reads the FW version from flash.
1699 int t4_get_fw_version(struct adapter
*adapter
, u32
*vers
)
1701 return t4_read_flash(adapter
, FLASH_FW_START
+
1702 offsetof(struct fw_hdr
, fw_ver
), 1,
1707 * t4_get_tp_version - read the TP microcode version
1708 * @adapter: the adapter
1709 * @vers: where to place the version
1711 * Reads the TP microcode version from flash.
1713 int t4_get_tp_version(struct adapter
*adapter
, u32
*vers
)
1715 return t4_read_flash(adapter
, FLASH_FW_START
+
1716 offsetof(struct fw_hdr
, tp_microcode_ver
),
1721 * t4_get_exprom_version - return the Expansion ROM version (if any)
1722 * @adapter: the adapter
1723 * @vers: where to place the version
1725 * Reads the Expansion ROM header from FLASH and returns the version
1726 * number (if present) through the @vers return value pointer. We return
1727 * this in the Firmware Version Format since it's convenient. Return
1728 * 0 on success, -ENOENT if no Expansion ROM is present.
1730 int t4_get_exprom_version(struct adapter
*adap
, u32
*vers
)
1732 struct exprom_header
{
1733 unsigned char hdr_arr
[16]; /* must start with 0x55aa */
1734 unsigned char hdr_ver
[4]; /* Expansion ROM version */
1736 u32 exprom_header_buf
[DIV_ROUND_UP(sizeof(struct exprom_header
),
1740 ret
= t4_read_flash(adap
, FLASH_EXP_ROM_START
,
1741 ARRAY_SIZE(exprom_header_buf
), exprom_header_buf
,
1746 hdr
= (struct exprom_header
*)exprom_header_buf
;
1747 if (hdr
->hdr_arr
[0] != 0x55 || hdr
->hdr_arr
[1] != 0xaa)
1750 *vers
= (FW_HDR_FW_VER_MAJOR_V(hdr
->hdr_ver
[0]) |
1751 FW_HDR_FW_VER_MINOR_V(hdr
->hdr_ver
[1]) |
1752 FW_HDR_FW_VER_MICRO_V(hdr
->hdr_ver
[2]) |
1753 FW_HDR_FW_VER_BUILD_V(hdr
->hdr_ver
[3]));
1757 /* Is the given firmware API compatible with the one the driver was compiled
1760 static int fw_compatible(const struct fw_hdr
*hdr1
, const struct fw_hdr
*hdr2
)
1763 /* short circuit if it's the exact same firmware version */
1764 if (hdr1
->chip
== hdr2
->chip
&& hdr1
->fw_ver
== hdr2
->fw_ver
)
1767 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1768 if (hdr1
->chip
== hdr2
->chip
&& SAME_INTF(nic
) && SAME_INTF(vnic
) &&
1769 SAME_INTF(ri
) && SAME_INTF(iscsi
) && SAME_INTF(fcoe
))
1776 /* The firmware in the filesystem is usable, but should it be installed?
1777 * This routine explains itself in detail if it indicates the filesystem
1778 * firmware should be installed.
1780 static int should_install_fs_fw(struct adapter
*adap
, int card_fw_usable
,
1785 if (!card_fw_usable
) {
1786 reason
= "incompatible or unusable";
1791 reason
= "older than the version supported with this driver";
1798 dev_err(adap
->pdev_dev
, "firmware on card (%u.%u.%u.%u) is %s, "
1799 "installing firmware %u.%u.%u.%u on card.\n",
1800 FW_HDR_FW_VER_MAJOR_G(c
), FW_HDR_FW_VER_MINOR_G(c
),
1801 FW_HDR_FW_VER_MICRO_G(c
), FW_HDR_FW_VER_BUILD_G(c
), reason
,
1802 FW_HDR_FW_VER_MAJOR_G(k
), FW_HDR_FW_VER_MINOR_G(k
),
1803 FW_HDR_FW_VER_MICRO_G(k
), FW_HDR_FW_VER_BUILD_G(k
));
1808 int t4_prep_fw(struct adapter
*adap
, struct fw_info
*fw_info
,
1809 const u8
*fw_data
, unsigned int fw_size
,
1810 struct fw_hdr
*card_fw
, enum dev_state state
,
1813 int ret
, card_fw_usable
, fs_fw_usable
;
1814 const struct fw_hdr
*fs_fw
;
1815 const struct fw_hdr
*drv_fw
;
1817 drv_fw
= &fw_info
->fw_hdr
;
1819 /* Read the header of the firmware on the card */
1820 ret
= -t4_read_flash(adap
, FLASH_FW_START
,
1821 sizeof(*card_fw
) / sizeof(uint32_t),
1822 (uint32_t *)card_fw
, 1);
1824 card_fw_usable
= fw_compatible(drv_fw
, (const void *)card_fw
);
1826 dev_err(adap
->pdev_dev
,
1827 "Unable to read card's firmware header: %d\n", ret
);
1831 if (fw_data
!= NULL
) {
1832 fs_fw
= (const void *)fw_data
;
1833 fs_fw_usable
= fw_compatible(drv_fw
, fs_fw
);
1839 if (card_fw_usable
&& card_fw
->fw_ver
== drv_fw
->fw_ver
&&
1840 (!fs_fw_usable
|| fs_fw
->fw_ver
== drv_fw
->fw_ver
)) {
1841 /* Common case: the firmware on the card is an exact match and
1842 * the filesystem one is an exact match too, or the filesystem
1843 * one is absent/incompatible.
1845 } else if (fs_fw_usable
&& state
== DEV_STATE_UNINIT
&&
1846 should_install_fs_fw(adap
, card_fw_usable
,
1847 be32_to_cpu(fs_fw
->fw_ver
),
1848 be32_to_cpu(card_fw
->fw_ver
))) {
1849 ret
= -t4_fw_upgrade(adap
, adap
->mbox
, fw_data
,
1852 dev_err(adap
->pdev_dev
,
1853 "failed to install firmware: %d\n", ret
);
1857 /* Installed successfully, update the cached header too. */
1860 *reset
= 0; /* already reset as part of load_fw */
1863 if (!card_fw_usable
) {
1866 d
= be32_to_cpu(drv_fw
->fw_ver
);
1867 c
= be32_to_cpu(card_fw
->fw_ver
);
1868 k
= fs_fw
? be32_to_cpu(fs_fw
->fw_ver
) : 0;
1870 dev_err(adap
->pdev_dev
, "Cannot find a usable firmware: "
1872 "driver compiled with %d.%d.%d.%d, "
1873 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
1875 FW_HDR_FW_VER_MAJOR_G(d
), FW_HDR_FW_VER_MINOR_G(d
),
1876 FW_HDR_FW_VER_MICRO_G(d
), FW_HDR_FW_VER_BUILD_G(d
),
1877 FW_HDR_FW_VER_MAJOR_G(c
), FW_HDR_FW_VER_MINOR_G(c
),
1878 FW_HDR_FW_VER_MICRO_G(c
), FW_HDR_FW_VER_BUILD_G(c
),
1879 FW_HDR_FW_VER_MAJOR_G(k
), FW_HDR_FW_VER_MINOR_G(k
),
1880 FW_HDR_FW_VER_MICRO_G(k
), FW_HDR_FW_VER_BUILD_G(k
));
1885 /* We're using whatever's on the card and it's known to be good. */
1886 adap
->params
.fw_vers
= be32_to_cpu(card_fw
->fw_ver
);
1887 adap
->params
.tp_vers
= be32_to_cpu(card_fw
->tp_microcode_ver
);
1894 * t4_flash_erase_sectors - erase a range of flash sectors
1895 * @adapter: the adapter
1896 * @start: the first sector to erase
1897 * @end: the last sector to erase
1899 * Erases the sectors in the given inclusive range.
1901 static int t4_flash_erase_sectors(struct adapter
*adapter
, int start
, int end
)
1905 if (end
>= adapter
->params
.sf_nsec
)
1908 while (start
<= end
) {
1909 if ((ret
= sf1_write(adapter
, 1, 0, 1, SF_WR_ENABLE
)) != 0 ||
1910 (ret
= sf1_write(adapter
, 4, 0, 1,
1911 SF_ERASE_SECTOR
| (start
<< 8))) != 0 ||
1912 (ret
= flash_wait_op(adapter
, 14, 500)) != 0) {
1913 dev_err(adapter
->pdev_dev
,
1914 "erase of flash sector %d failed, error %d\n",
1920 t4_write_reg(adapter
, SF_OP_A
, 0); /* unlock SF */
1925 * t4_flash_cfg_addr - return the address of the flash configuration file
1926 * @adapter: the adapter
1928 * Return the address within the flash where the Firmware Configuration
1931 unsigned int t4_flash_cfg_addr(struct adapter
*adapter
)
1933 if (adapter
->params
.sf_size
== 0x100000)
1934 return FLASH_FPGA_CFG_START
;
1936 return FLASH_CFG_START
;
1939 /* Return TRUE if the specified firmware matches the adapter. I.e. T4
1940 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
1941 * and emit an error message for mismatched firmware to save our caller the
1944 static bool t4_fw_matches_chip(const struct adapter
*adap
,
1945 const struct fw_hdr
*hdr
)
1947 /* The expression below will return FALSE for any unsupported adapter
1948 * which will keep us "honest" in the future ...
1950 if ((is_t4(adap
->params
.chip
) && hdr
->chip
== FW_HDR_CHIP_T4
) ||
1951 (is_t5(adap
->params
.chip
) && hdr
->chip
== FW_HDR_CHIP_T5
))
1954 dev_err(adap
->pdev_dev
,
1955 "FW image (%d) is not suitable for this adapter (%d)\n",
1956 hdr
->chip
, CHELSIO_CHIP_VERSION(adap
->params
.chip
));
1961 * t4_load_fw - download firmware
1962 * @adap: the adapter
1963 * @fw_data: the firmware image to write
1966 * Write the supplied firmware image to the card's serial flash.
1968 int t4_load_fw(struct adapter
*adap
, const u8
*fw_data
, unsigned int size
)
1973 u8 first_page
[SF_PAGE_SIZE
];
1974 const __be32
*p
= (const __be32
*)fw_data
;
1975 const struct fw_hdr
*hdr
= (const struct fw_hdr
*)fw_data
;
1976 unsigned int sf_sec_size
= adap
->params
.sf_size
/ adap
->params
.sf_nsec
;
1977 unsigned int fw_img_start
= adap
->params
.sf_fw_start
;
1978 unsigned int fw_start_sec
= fw_img_start
/ sf_sec_size
;
1981 dev_err(adap
->pdev_dev
, "FW image has no data\n");
1985 dev_err(adap
->pdev_dev
,
1986 "FW image size not multiple of 512 bytes\n");
1989 if ((unsigned int)be16_to_cpu(hdr
->len512
) * 512 != size
) {
1990 dev_err(adap
->pdev_dev
,
1991 "FW image size differs from size in FW header\n");
1994 if (size
> FW_MAX_SIZE
) {
1995 dev_err(adap
->pdev_dev
, "FW image too large, max is %u bytes\n",
1999 if (!t4_fw_matches_chip(adap
, hdr
))
2002 for (csum
= 0, i
= 0; i
< size
/ sizeof(csum
); i
++)
2003 csum
+= be32_to_cpu(p
[i
]);
2005 if (csum
!= 0xffffffff) {
2006 dev_err(adap
->pdev_dev
,
2007 "corrupted firmware image, checksum %#x\n", csum
);
2011 i
= DIV_ROUND_UP(size
, sf_sec_size
); /* # of sectors spanned */
2012 ret
= t4_flash_erase_sectors(adap
, fw_start_sec
, fw_start_sec
+ i
- 1);
2017 * We write the correct version at the end so the driver can see a bad
2018 * version if the FW write fails. Start by writing a copy of the
2019 * first page with a bad version.
2021 memcpy(first_page
, fw_data
, SF_PAGE_SIZE
);
2022 ((struct fw_hdr
*)first_page
)->fw_ver
= cpu_to_be32(0xffffffff);
2023 ret
= t4_write_flash(adap
, fw_img_start
, SF_PAGE_SIZE
, first_page
);
2027 addr
= fw_img_start
;
2028 for (size
-= SF_PAGE_SIZE
; size
; size
-= SF_PAGE_SIZE
) {
2029 addr
+= SF_PAGE_SIZE
;
2030 fw_data
+= SF_PAGE_SIZE
;
2031 ret
= t4_write_flash(adap
, addr
, SF_PAGE_SIZE
, fw_data
);
2036 ret
= t4_write_flash(adap
,
2037 fw_img_start
+ offsetof(struct fw_hdr
, fw_ver
),
2038 sizeof(hdr
->fw_ver
), (const u8
*)&hdr
->fw_ver
);
2041 dev_err(adap
->pdev_dev
, "firmware download failed, error %d\n",
2044 ret
= t4_get_fw_version(adap
, &adap
->params
.fw_vers
);
2049 * t4_phy_fw_ver - return current PHY firmware version
2050 * @adap: the adapter
2051 * @phy_fw_ver: return value buffer for PHY firmware version
2053 * Returns the current version of external PHY firmware on the
2056 int t4_phy_fw_ver(struct adapter
*adap
, int *phy_fw_ver
)
2061 param
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
2062 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW
) |
2063 FW_PARAMS_PARAM_Y_V(adap
->params
.portvec
) |
2064 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION
));
2065 ret
= t4_query_params(adap
, adap
->mbox
, adap
->fn
, 0, 1,
2074 * t4_load_phy_fw - download port PHY firmware
2075 * @adap: the adapter
2076 * @win: the PCI-E Memory Window index to use for t4_memory_rw()
2077 * @win_lock: the lock to use to guard the memory copy
2078 * @phy_fw_version: function to check PHY firmware versions
2079 * @phy_fw_data: the PHY firmware image to write
2080 * @phy_fw_size: image size
2082 * Transfer the specified PHY firmware to the adapter. If a non-NULL
2083 * @phy_fw_version is supplied, then it will be used to determine if
2084 * it's necessary to perform the transfer by comparing the version
2085 * of any existing adapter PHY firmware with that of the passed in
2086 * PHY firmware image. If @win_lock is non-NULL then it will be used
2087 * around the call to t4_memory_rw() which transfers the PHY firmware
2090 * A negative error number will be returned if an error occurs. If
2091 * version number support is available and there's no need to upgrade
2092 * the firmware, 0 will be returned. If firmware is successfully
2093 * transferred to the adapter, 1 will be retured.
2095 * NOTE: some adapters only have local RAM to store the PHY firmware. As
2096 * a result, a RESET of the adapter would cause that RAM to lose its
2097 * contents. Thus, loading PHY firmware on such adapters must happen
2098 * after any FW_RESET_CMDs ...
2100 int t4_load_phy_fw(struct adapter
*adap
,
2101 int win
, spinlock_t
*win_lock
,
2102 int (*phy_fw_version
)(const u8
*, size_t),
2103 const u8
*phy_fw_data
, size_t phy_fw_size
)
2105 unsigned long mtype
= 0, maddr
= 0;
2107 int cur_phy_fw_ver
= 0, new_phy_fw_vers
= 0;
2110 /* If we have version number support, then check to see if the adapter
2111 * already has up-to-date PHY firmware loaded.
2113 if (phy_fw_version
) {
2114 new_phy_fw_vers
= phy_fw_version(phy_fw_data
, phy_fw_size
);
2115 ret
= t4_phy_fw_ver(adap
, &cur_phy_fw_ver
);
2119 if (cur_phy_fw_ver
>= new_phy_fw_vers
) {
2120 CH_WARN(adap
, "PHY Firmware already up-to-date, "
2121 "version %#x\n", cur_phy_fw_ver
);
2126 /* Ask the firmware where it wants us to copy the PHY firmware image.
2127 * The size of the file requires a special version of the READ coommand
2128 * which will pass the file size via the values field in PARAMS_CMD and
2129 * retrieve the return value from firmware and place it in the same
2132 param
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
2133 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW
) |
2134 FW_PARAMS_PARAM_Y_V(adap
->params
.portvec
) |
2135 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD
));
2137 ret
= t4_query_params_rw(adap
, adap
->mbox
, adap
->fn
, 0, 1,
2142 maddr
= (val
& 0xff) << 16;
2144 /* Copy the supplied PHY Firmware image to the adapter memory location
2145 * allocated by the adapter firmware.
2148 spin_lock_bh(win_lock
);
2149 ret
= t4_memory_rw(adap
, win
, mtype
, maddr
,
2150 phy_fw_size
, (__be32
*)phy_fw_data
,
2153 spin_unlock_bh(win_lock
);
2157 /* Tell the firmware that the PHY firmware image has been written to
2158 * RAM and it can now start copying it over to the PHYs. The chip
2159 * firmware will RESET the affected PHYs as part of this operation
2160 * leaving them running the new PHY firmware image.
2162 param
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
2163 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW
) |
2164 FW_PARAMS_PARAM_Y_V(adap
->params
.portvec
) |
2165 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD
));
2166 ret
= t4_set_params_timeout(adap
, adap
->mbox
, adap
->fn
, 0, 1,
2167 ¶m
, &val
, 30000);
2169 /* If we have version number support, then check to see that the new
2170 * firmware got loaded properly.
2172 if (phy_fw_version
) {
2173 ret
= t4_phy_fw_ver(adap
, &cur_phy_fw_ver
);
2177 if (cur_phy_fw_ver
!= new_phy_fw_vers
) {
2178 CH_WARN(adap
, "PHY Firmware did not update: "
2179 "version on adapter %#x, "
2180 "version flashed %#x\n",
2181 cur_phy_fw_ver
, new_phy_fw_vers
);
2190 * t4_fwcache - firmware cache operation
2191 * @adap: the adapter
2192 * @op : the operation (flush or flush and invalidate)
2194 int t4_fwcache(struct adapter
*adap
, enum fw_params_param_dev_fwcache op
)
2196 struct fw_params_cmd c
;
2198 memset(&c
, 0, sizeof(c
));
2200 cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD
) |
2201 FW_CMD_REQUEST_F
| FW_CMD_WRITE_F
|
2202 FW_PARAMS_CMD_PFN_V(adap
->fn
) |
2203 FW_PARAMS_CMD_VFN_V(0));
2204 c
.retval_len16
= cpu_to_be32(FW_LEN16(c
));
2206 cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
2207 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE
));
2208 c
.param
[0].val
= (__force __be32
)op
;
2210 return t4_wr_mbox(adap
, adap
->mbox
, &c
, sizeof(c
), NULL
);
2213 void t4_ulprx_read_la(struct adapter
*adap
, u32
*la_buf
)
2217 for (i
= 0; i
< 8; i
++) {
2218 u32
*p
= la_buf
+ i
;
2220 t4_write_reg(adap
, ULP_RX_LA_CTL_A
, i
);
2221 j
= t4_read_reg(adap
, ULP_RX_LA_WRPTR_A
);
2222 t4_write_reg(adap
, ULP_RX_LA_RDPTR_A
, j
);
2223 for (j
= 0; j
< ULPRX_LA_SIZE
; j
++, p
+= 8)
2224 *p
= t4_read_reg(adap
, ULP_RX_LA_RDDATA_A
);
2228 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
2229 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
2233 * t4_link_start - apply link configuration to MAC/PHY
2234 * @phy: the PHY to setup
2235 * @mac: the MAC to setup
2236 * @lc: the requested link configuration
2238 * Set up a port's MAC and PHY according to a desired link configuration.
2239 * - If the PHY can auto-negotiate first decide what to advertise, then
2240 * enable/disable auto-negotiation as desired, and reset.
2241 * - If the PHY does not auto-negotiate just reset it.
2242 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
2243 * otherwise do it later based on the outcome of auto-negotiation.
2245 int t4_link_start(struct adapter
*adap
, unsigned int mbox
, unsigned int port
,
2246 struct link_config
*lc
)
2248 struct fw_port_cmd c
;
2249 unsigned int fc
= 0, mdi
= FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO
);
2252 if (lc
->requested_fc
& PAUSE_RX
)
2253 fc
|= FW_PORT_CAP_FC_RX
;
2254 if (lc
->requested_fc
& PAUSE_TX
)
2255 fc
|= FW_PORT_CAP_FC_TX
;
2257 memset(&c
, 0, sizeof(c
));
2258 c
.op_to_portid
= cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD
) |
2259 FW_CMD_REQUEST_F
| FW_CMD_EXEC_F
|
2260 FW_PORT_CMD_PORTID_V(port
));
2262 cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG
) |
2265 if (!(lc
->supported
& FW_PORT_CAP_ANEG
)) {
2266 c
.u
.l1cfg
.rcap
= cpu_to_be32((lc
->supported
& ADVERT_MASK
) |
2268 lc
->fc
= lc
->requested_fc
& (PAUSE_RX
| PAUSE_TX
);
2269 } else if (lc
->autoneg
== AUTONEG_DISABLE
) {
2270 c
.u
.l1cfg
.rcap
= cpu_to_be32(lc
->requested_speed
| fc
| mdi
);
2271 lc
->fc
= lc
->requested_fc
& (PAUSE_RX
| PAUSE_TX
);
2273 c
.u
.l1cfg
.rcap
= cpu_to_be32(lc
->advertising
| fc
| mdi
);
2275 return t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), NULL
);
2279 * t4_restart_aneg - restart autonegotiation
2280 * @adap: the adapter
2281 * @mbox: mbox to use for the FW command
2282 * @port: the port id
2284 * Restarts autonegotiation for the selected port.
2286 int t4_restart_aneg(struct adapter
*adap
, unsigned int mbox
, unsigned int port
)
2288 struct fw_port_cmd c
;
2290 memset(&c
, 0, sizeof(c
));
2291 c
.op_to_portid
= cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD
) |
2292 FW_CMD_REQUEST_F
| FW_CMD_EXEC_F
|
2293 FW_PORT_CMD_PORTID_V(port
));
2295 cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG
) |
2297 c
.u
.l1cfg
.rcap
= cpu_to_be32(FW_PORT_CAP_ANEG
);
2298 return t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), NULL
);
2301 typedef void (*int_handler_t
)(struct adapter
*adap
);
2304 unsigned int mask
; /* bits to check in interrupt status */
2305 const char *msg
; /* message to print or NULL */
2306 short stat_idx
; /* stat counter to increment or -1 */
2307 unsigned short fatal
; /* whether the condition reported is fatal */
2308 int_handler_t int_handler
; /* platform-specific int handler */
2312 * t4_handle_intr_status - table driven interrupt handler
2313 * @adapter: the adapter that generated the interrupt
2314 * @reg: the interrupt status register to process
2315 * @acts: table of interrupt actions
2317 * A table driven interrupt handler that applies a set of masks to an
2318 * interrupt status word and performs the corresponding actions if the
2319 * interrupts described by the mask have occurred. The actions include
2320 * optionally emitting a warning or alert message. The table is terminated
2321 * by an entry specifying mask 0. Returns the number of fatal interrupt
2324 static int t4_handle_intr_status(struct adapter
*adapter
, unsigned int reg
,
2325 const struct intr_info
*acts
)
2328 unsigned int mask
= 0;
2329 unsigned int status
= t4_read_reg(adapter
, reg
);
2331 for ( ; acts
->mask
; ++acts
) {
2332 if (!(status
& acts
->mask
))
2336 dev_alert(adapter
->pdev_dev
, "%s (0x%x)\n", acts
->msg
,
2337 status
& acts
->mask
);
2338 } else if (acts
->msg
&& printk_ratelimit())
2339 dev_warn(adapter
->pdev_dev
, "%s (0x%x)\n", acts
->msg
,
2340 status
& acts
->mask
);
2341 if (acts
->int_handler
)
2342 acts
->int_handler(adapter
);
2346 if (status
) /* clear processed interrupts */
2347 t4_write_reg(adapter
, reg
, status
);
2352 * Interrupt handler for the PCIE module.
2354 static void pcie_intr_handler(struct adapter
*adapter
)
2356 static const struct intr_info sysbus_intr_info
[] = {
2357 { RNPP_F
, "RXNP array parity error", -1, 1 },
2358 { RPCP_F
, "RXPC array parity error", -1, 1 },
2359 { RCIP_F
, "RXCIF array parity error", -1, 1 },
2360 { RCCP_F
, "Rx completions control array parity error", -1, 1 },
2361 { RFTP_F
, "RXFT array parity error", -1, 1 },
2364 static const struct intr_info pcie_port_intr_info
[] = {
2365 { TPCP_F
, "TXPC array parity error", -1, 1 },
2366 { TNPP_F
, "TXNP array parity error", -1, 1 },
2367 { TFTP_F
, "TXFT array parity error", -1, 1 },
2368 { TCAP_F
, "TXCA array parity error", -1, 1 },
2369 { TCIP_F
, "TXCIF array parity error", -1, 1 },
2370 { RCAP_F
, "RXCA array parity error", -1, 1 },
2371 { OTDD_F
, "outbound request TLP discarded", -1, 1 },
2372 { RDPE_F
, "Rx data parity error", -1, 1 },
2373 { TDUE_F
, "Tx uncorrectable data error", -1, 1 },
2376 static const struct intr_info pcie_intr_info
[] = {
2377 { MSIADDRLPERR_F
, "MSI AddrL parity error", -1, 1 },
2378 { MSIADDRHPERR_F
, "MSI AddrH parity error", -1, 1 },
2379 { MSIDATAPERR_F
, "MSI data parity error", -1, 1 },
2380 { MSIXADDRLPERR_F
, "MSI-X AddrL parity error", -1, 1 },
2381 { MSIXADDRHPERR_F
, "MSI-X AddrH parity error", -1, 1 },
2382 { MSIXDATAPERR_F
, "MSI-X data parity error", -1, 1 },
2383 { MSIXDIPERR_F
, "MSI-X DI parity error", -1, 1 },
2384 { PIOCPLPERR_F
, "PCI PIO completion FIFO parity error", -1, 1 },
2385 { PIOREQPERR_F
, "PCI PIO request FIFO parity error", -1, 1 },
2386 { TARTAGPERR_F
, "PCI PCI target tag FIFO parity error", -1, 1 },
2387 { CCNTPERR_F
, "PCI CMD channel count parity error", -1, 1 },
2388 { CREQPERR_F
, "PCI CMD channel request parity error", -1, 1 },
2389 { CRSPPERR_F
, "PCI CMD channel response parity error", -1, 1 },
2390 { DCNTPERR_F
, "PCI DMA channel count parity error", -1, 1 },
2391 { DREQPERR_F
, "PCI DMA channel request parity error", -1, 1 },
2392 { DRSPPERR_F
, "PCI DMA channel response parity error", -1, 1 },
2393 { HCNTPERR_F
, "PCI HMA channel count parity error", -1, 1 },
2394 { HREQPERR_F
, "PCI HMA channel request parity error", -1, 1 },
2395 { HRSPPERR_F
, "PCI HMA channel response parity error", -1, 1 },
2396 { CFGSNPPERR_F
, "PCI config snoop FIFO parity error", -1, 1 },
2397 { FIDPERR_F
, "PCI FID parity error", -1, 1 },
2398 { INTXCLRPERR_F
, "PCI INTx clear parity error", -1, 1 },
2399 { MATAGPERR_F
, "PCI MA tag parity error", -1, 1 },
2400 { PIOTAGPERR_F
, "PCI PIO tag parity error", -1, 1 },
2401 { RXCPLPERR_F
, "PCI Rx completion parity error", -1, 1 },
2402 { RXWRPERR_F
, "PCI Rx write parity error", -1, 1 },
2403 { RPLPERR_F
, "PCI replay buffer parity error", -1, 1 },
2404 { PCIESINT_F
, "PCI core secondary fault", -1, 1 },
2405 { PCIEPINT_F
, "PCI core primary fault", -1, 1 },
2406 { UNXSPLCPLERR_F
, "PCI unexpected split completion error",
2411 static struct intr_info t5_pcie_intr_info
[] = {
2412 { MSTGRPPERR_F
, "Master Response Read Queue parity error",
2414 { MSTTIMEOUTPERR_F
, "Master Timeout FIFO parity error", -1, 1 },
2415 { MSIXSTIPERR_F
, "MSI-X STI SRAM parity error", -1, 1 },
2416 { MSIXADDRLPERR_F
, "MSI-X AddrL parity error", -1, 1 },
2417 { MSIXADDRHPERR_F
, "MSI-X AddrH parity error", -1, 1 },
2418 { MSIXDATAPERR_F
, "MSI-X data parity error", -1, 1 },
2419 { MSIXDIPERR_F
, "MSI-X DI parity error", -1, 1 },
2420 { PIOCPLGRPPERR_F
, "PCI PIO completion Group FIFO parity error",
2422 { PIOREQGRPPERR_F
, "PCI PIO request Group FIFO parity error",
2424 { TARTAGPERR_F
, "PCI PCI target tag FIFO parity error", -1, 1 },
2425 { MSTTAGQPERR_F
, "PCI master tag queue parity error", -1, 1 },
2426 { CREQPERR_F
, "PCI CMD channel request parity error", -1, 1 },
2427 { CRSPPERR_F
, "PCI CMD channel response parity error", -1, 1 },
2428 { DREQWRPERR_F
, "PCI DMA channel write request parity error",
2430 { DREQPERR_F
, "PCI DMA channel request parity error", -1, 1 },
2431 { DRSPPERR_F
, "PCI DMA channel response parity error", -1, 1 },
2432 { HREQWRPERR_F
, "PCI HMA channel count parity error", -1, 1 },
2433 { HREQPERR_F
, "PCI HMA channel request parity error", -1, 1 },
2434 { HRSPPERR_F
, "PCI HMA channel response parity error", -1, 1 },
2435 { CFGSNPPERR_F
, "PCI config snoop FIFO parity error", -1, 1 },
2436 { FIDPERR_F
, "PCI FID parity error", -1, 1 },
2437 { VFIDPERR_F
, "PCI INTx clear parity error", -1, 1 },
2438 { MAGRPPERR_F
, "PCI MA group FIFO parity error", -1, 1 },
2439 { PIOTAGPERR_F
, "PCI PIO tag parity error", -1, 1 },
2440 { IPRXHDRGRPPERR_F
, "PCI IP Rx header group parity error",
2442 { IPRXDATAGRPPERR_F
, "PCI IP Rx data group parity error",
2444 { RPLPERR_F
, "PCI IP replay buffer parity error", -1, 1 },
2445 { IPSOTPERR_F
, "PCI IP SOT buffer parity error", -1, 1 },
2446 { TRGT1GRPPERR_F
, "PCI TRGT1 group FIFOs parity error", -1, 1 },
2447 { READRSPERR_F
, "Outbound read error", -1, 0 },
2453 if (is_t4(adapter
->params
.chip
))
2454 fat
= t4_handle_intr_status(adapter
,
2455 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A
,
2457 t4_handle_intr_status(adapter
,
2458 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A
,
2459 pcie_port_intr_info
) +
2460 t4_handle_intr_status(adapter
, PCIE_INT_CAUSE_A
,
2463 fat
= t4_handle_intr_status(adapter
, PCIE_INT_CAUSE_A
,
2467 t4_fatal_err(adapter
);
2471 * TP interrupt handler.
2473 static void tp_intr_handler(struct adapter
*adapter
)
2475 static const struct intr_info tp_intr_info
[] = {
2476 { 0x3fffffff, "TP parity error", -1, 1 },
2477 { FLMTXFLSTEMPTY_F
, "TP out of Tx pages", -1, 1 },
2481 if (t4_handle_intr_status(adapter
, TP_INT_CAUSE_A
, tp_intr_info
))
2482 t4_fatal_err(adapter
);
2486 * SGE interrupt handler.
2488 static void sge_intr_handler(struct adapter
*adapter
)
2492 static const struct intr_info sge_intr_info
[] = {
2493 { ERR_CPL_EXCEED_IQE_SIZE_F
,
2494 "SGE received CPL exceeding IQE size", -1, 1 },
2495 { ERR_INVALID_CIDX_INC_F
,
2496 "SGE GTS CIDX increment too large", -1, 0 },
2497 { ERR_CPL_OPCODE_0_F
, "SGE received 0-length CPL", -1, 0 },
2498 { DBFIFO_LP_INT_F
, NULL
, -1, 0, t4_db_full
},
2499 { DBFIFO_HP_INT_F
, NULL
, -1, 0, t4_db_full
},
2500 { ERR_DROPPED_DB_F
, NULL
, -1, 0, t4_db_dropped
},
2501 { ERR_DATA_CPL_ON_HIGH_QID1_F
| ERR_DATA_CPL_ON_HIGH_QID0_F
,
2502 "SGE IQID > 1023 received CPL for FL", -1, 0 },
2503 { ERR_BAD_DB_PIDX3_F
, "SGE DBP 3 pidx increment too large", -1,
2505 { ERR_BAD_DB_PIDX2_F
, "SGE DBP 2 pidx increment too large", -1,
2507 { ERR_BAD_DB_PIDX1_F
, "SGE DBP 1 pidx increment too large", -1,
2509 { ERR_BAD_DB_PIDX0_F
, "SGE DBP 0 pidx increment too large", -1,
2511 { ERR_ING_CTXT_PRIO_F
,
2512 "SGE too many priority ingress contexts", -1, 0 },
2513 { ERR_EGR_CTXT_PRIO_F
,
2514 "SGE too many priority egress contexts", -1, 0 },
2515 { INGRESS_SIZE_ERR_F
, "SGE illegal ingress QID", -1, 0 },
2516 { EGRESS_SIZE_ERR_F
, "SGE illegal egress QID", -1, 0 },
2520 v
= (u64
)t4_read_reg(adapter
, SGE_INT_CAUSE1_A
) |
2521 ((u64
)t4_read_reg(adapter
, SGE_INT_CAUSE2_A
) << 32);
2523 dev_alert(adapter
->pdev_dev
, "SGE parity error (%#llx)\n",
2524 (unsigned long long)v
);
2525 t4_write_reg(adapter
, SGE_INT_CAUSE1_A
, v
);
2526 t4_write_reg(adapter
, SGE_INT_CAUSE2_A
, v
>> 32);
2529 if (t4_handle_intr_status(adapter
, SGE_INT_CAUSE3_A
, sge_intr_info
) ||
2531 t4_fatal_err(adapter
);
2534 #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
2535 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
2536 #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
2537 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
2540 * CIM interrupt handler.
2542 static void cim_intr_handler(struct adapter
*adapter
)
2544 static const struct intr_info cim_intr_info
[] = {
2545 { PREFDROPINT_F
, "CIM control register prefetch drop", -1, 1 },
2546 { CIM_OBQ_INTR
, "CIM OBQ parity error", -1, 1 },
2547 { CIM_IBQ_INTR
, "CIM IBQ parity error", -1, 1 },
2548 { MBUPPARERR_F
, "CIM mailbox uP parity error", -1, 1 },
2549 { MBHOSTPARERR_F
, "CIM mailbox host parity error", -1, 1 },
2550 { TIEQINPARERRINT_F
, "CIM TIEQ outgoing parity error", -1, 1 },
2551 { TIEQOUTPARERRINT_F
, "CIM TIEQ incoming parity error", -1, 1 },
2554 static const struct intr_info cim_upintr_info
[] = {
2555 { RSVDSPACEINT_F
, "CIM reserved space access", -1, 1 },
2556 { ILLTRANSINT_F
, "CIM illegal transaction", -1, 1 },
2557 { ILLWRINT_F
, "CIM illegal write", -1, 1 },
2558 { ILLRDINT_F
, "CIM illegal read", -1, 1 },
2559 { ILLRDBEINT_F
, "CIM illegal read BE", -1, 1 },
2560 { ILLWRBEINT_F
, "CIM illegal write BE", -1, 1 },
2561 { SGLRDBOOTINT_F
, "CIM single read from boot space", -1, 1 },
2562 { SGLWRBOOTINT_F
, "CIM single write to boot space", -1, 1 },
2563 { BLKWRBOOTINT_F
, "CIM block write to boot space", -1, 1 },
2564 { SGLRDFLASHINT_F
, "CIM single read from flash space", -1, 1 },
2565 { SGLWRFLASHINT_F
, "CIM single write to flash space", -1, 1 },
2566 { BLKWRFLASHINT_F
, "CIM block write to flash space", -1, 1 },
2567 { SGLRDEEPROMINT_F
, "CIM single EEPROM read", -1, 1 },
2568 { SGLWREEPROMINT_F
, "CIM single EEPROM write", -1, 1 },
2569 { BLKRDEEPROMINT_F
, "CIM block EEPROM read", -1, 1 },
2570 { BLKWREEPROMINT_F
, "CIM block EEPROM write", -1, 1 },
2571 { SGLRDCTLINT_F
, "CIM single read from CTL space", -1, 1 },
2572 { SGLWRCTLINT_F
, "CIM single write to CTL space", -1, 1 },
2573 { BLKRDCTLINT_F
, "CIM block read from CTL space", -1, 1 },
2574 { BLKWRCTLINT_F
, "CIM block write to CTL space", -1, 1 },
2575 { SGLRDPLINT_F
, "CIM single read from PL space", -1, 1 },
2576 { SGLWRPLINT_F
, "CIM single write to PL space", -1, 1 },
2577 { BLKRDPLINT_F
, "CIM block read from PL space", -1, 1 },
2578 { BLKWRPLINT_F
, "CIM block write to PL space", -1, 1 },
2579 { REQOVRLOOKUPINT_F
, "CIM request FIFO overwrite", -1, 1 },
2580 { RSPOVRLOOKUPINT_F
, "CIM response FIFO overwrite", -1, 1 },
2581 { TIMEOUTINT_F
, "CIM PIF timeout", -1, 1 },
2582 { TIMEOUTMAINT_F
, "CIM PIF MA timeout", -1, 1 },
2588 if (t4_read_reg(adapter
, PCIE_FW_A
) & PCIE_FW_ERR_F
)
2589 t4_report_fw_error(adapter
);
2591 fat
= t4_handle_intr_status(adapter
, CIM_HOST_INT_CAUSE_A
,
2593 t4_handle_intr_status(adapter
, CIM_HOST_UPACC_INT_CAUSE_A
,
2596 t4_fatal_err(adapter
);
2600 * ULP RX interrupt handler.
2602 static void ulprx_intr_handler(struct adapter
*adapter
)
2604 static const struct intr_info ulprx_intr_info
[] = {
2605 { 0x1800000, "ULPRX context error", -1, 1 },
2606 { 0x7fffff, "ULPRX parity error", -1, 1 },
2610 if (t4_handle_intr_status(adapter
, ULP_RX_INT_CAUSE_A
, ulprx_intr_info
))
2611 t4_fatal_err(adapter
);
2615 * ULP TX interrupt handler.
2617 static void ulptx_intr_handler(struct adapter
*adapter
)
2619 static const struct intr_info ulptx_intr_info
[] = {
2620 { PBL_BOUND_ERR_CH3_F
, "ULPTX channel 3 PBL out of bounds", -1,
2622 { PBL_BOUND_ERR_CH2_F
, "ULPTX channel 2 PBL out of bounds", -1,
2624 { PBL_BOUND_ERR_CH1_F
, "ULPTX channel 1 PBL out of bounds", -1,
2626 { PBL_BOUND_ERR_CH0_F
, "ULPTX channel 0 PBL out of bounds", -1,
2628 { 0xfffffff, "ULPTX parity error", -1, 1 },
2632 if (t4_handle_intr_status(adapter
, ULP_TX_INT_CAUSE_A
, ulptx_intr_info
))
2633 t4_fatal_err(adapter
);
2637 * PM TX interrupt handler.
2639 static void pmtx_intr_handler(struct adapter
*adapter
)
2641 static const struct intr_info pmtx_intr_info
[] = {
2642 { PCMD_LEN_OVFL0_F
, "PMTX channel 0 pcmd too large", -1, 1 },
2643 { PCMD_LEN_OVFL1_F
, "PMTX channel 1 pcmd too large", -1, 1 },
2644 { PCMD_LEN_OVFL2_F
, "PMTX channel 2 pcmd too large", -1, 1 },
2645 { ZERO_C_CMD_ERROR_F
, "PMTX 0-length pcmd", -1, 1 },
2646 { PMTX_FRAMING_ERROR_F
, "PMTX framing error", -1, 1 },
2647 { OESPI_PAR_ERROR_F
, "PMTX oespi parity error", -1, 1 },
2648 { DB_OPTIONS_PAR_ERROR_F
, "PMTX db_options parity error",
2650 { ICSPI_PAR_ERROR_F
, "PMTX icspi parity error", -1, 1 },
2651 { PMTX_C_PCMD_PAR_ERROR_F
, "PMTX c_pcmd parity error", -1, 1},
2655 if (t4_handle_intr_status(adapter
, PM_TX_INT_CAUSE_A
, pmtx_intr_info
))
2656 t4_fatal_err(adapter
);
2660 * PM RX interrupt handler.
2662 static void pmrx_intr_handler(struct adapter
*adapter
)
2664 static const struct intr_info pmrx_intr_info
[] = {
2665 { ZERO_E_CMD_ERROR_F
, "PMRX 0-length pcmd", -1, 1 },
2666 { PMRX_FRAMING_ERROR_F
, "PMRX framing error", -1, 1 },
2667 { OCSPI_PAR_ERROR_F
, "PMRX ocspi parity error", -1, 1 },
2668 { DB_OPTIONS_PAR_ERROR_F
, "PMRX db_options parity error",
2670 { IESPI_PAR_ERROR_F
, "PMRX iespi parity error", -1, 1 },
2671 { PMRX_E_PCMD_PAR_ERROR_F
, "PMRX e_pcmd parity error", -1, 1},
2675 if (t4_handle_intr_status(adapter
, PM_RX_INT_CAUSE_A
, pmrx_intr_info
))
2676 t4_fatal_err(adapter
);
2680 * CPL switch interrupt handler.
2682 static void cplsw_intr_handler(struct adapter
*adapter
)
2684 static const struct intr_info cplsw_intr_info
[] = {
2685 { CIM_OP_MAP_PERR_F
, "CPLSW CIM op_map parity error", -1, 1 },
2686 { CIM_OVFL_ERROR_F
, "CPLSW CIM overflow", -1, 1 },
2687 { TP_FRAMING_ERROR_F
, "CPLSW TP framing error", -1, 1 },
2688 { SGE_FRAMING_ERROR_F
, "CPLSW SGE framing error", -1, 1 },
2689 { CIM_FRAMING_ERROR_F
, "CPLSW CIM framing error", -1, 1 },
2690 { ZERO_SWITCH_ERROR_F
, "CPLSW no-switch error", -1, 1 },
2694 if (t4_handle_intr_status(adapter
, CPL_INTR_CAUSE_A
, cplsw_intr_info
))
2695 t4_fatal_err(adapter
);
2699 * LE interrupt handler.
2701 static void le_intr_handler(struct adapter
*adap
)
2703 static const struct intr_info le_intr_info
[] = {
2704 { LIPMISS_F
, "LE LIP miss", -1, 0 },
2705 { LIP0_F
, "LE 0 LIP error", -1, 0 },
2706 { PARITYERR_F
, "LE parity error", -1, 1 },
2707 { UNKNOWNCMD_F
, "LE unknown command", -1, 1 },
2708 { REQQPARERR_F
, "LE request queue parity error", -1, 1 },
2712 if (t4_handle_intr_status(adap
, LE_DB_INT_CAUSE_A
, le_intr_info
))
2717 * MPS interrupt handler.
2719 static void mps_intr_handler(struct adapter
*adapter
)
2721 static const struct intr_info mps_rx_intr_info
[] = {
2722 { 0xffffff, "MPS Rx parity error", -1, 1 },
2725 static const struct intr_info mps_tx_intr_info
[] = {
2726 { TPFIFO_V(TPFIFO_M
), "MPS Tx TP FIFO parity error", -1, 1 },
2727 { NCSIFIFO_F
, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2728 { TXDATAFIFO_V(TXDATAFIFO_M
), "MPS Tx data FIFO parity error",
2730 { TXDESCFIFO_V(TXDESCFIFO_M
), "MPS Tx desc FIFO parity error",
2732 { BUBBLE_F
, "MPS Tx underflow", -1, 1 },
2733 { SECNTERR_F
, "MPS Tx SOP/EOP error", -1, 1 },
2734 { FRMERR_F
, "MPS Tx framing error", -1, 1 },
2737 static const struct intr_info mps_trc_intr_info
[] = {
2738 { FILTMEM_V(FILTMEM_M
), "MPS TRC filter parity error", -1, 1 },
2739 { PKTFIFO_V(PKTFIFO_M
), "MPS TRC packet FIFO parity error",
2741 { MISCPERR_F
, "MPS TRC misc parity error", -1, 1 },
2744 static const struct intr_info mps_stat_sram_intr_info
[] = {
2745 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2748 static const struct intr_info mps_stat_tx_intr_info
[] = {
2749 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2752 static const struct intr_info mps_stat_rx_intr_info
[] = {
2753 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2756 static const struct intr_info mps_cls_intr_info
[] = {
2757 { MATCHSRAM_F
, "MPS match SRAM parity error", -1, 1 },
2758 { MATCHTCAM_F
, "MPS match TCAM parity error", -1, 1 },
2759 { HASHSRAM_F
, "MPS hash SRAM parity error", -1, 1 },
2765 fat
= t4_handle_intr_status(adapter
, MPS_RX_PERR_INT_CAUSE_A
,
2767 t4_handle_intr_status(adapter
, MPS_TX_INT_CAUSE_A
,
2769 t4_handle_intr_status(adapter
, MPS_TRC_INT_CAUSE_A
,
2770 mps_trc_intr_info
) +
2771 t4_handle_intr_status(adapter
, MPS_STAT_PERR_INT_CAUSE_SRAM_A
,
2772 mps_stat_sram_intr_info
) +
2773 t4_handle_intr_status(adapter
, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A
,
2774 mps_stat_tx_intr_info
) +
2775 t4_handle_intr_status(adapter
, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A
,
2776 mps_stat_rx_intr_info
) +
2777 t4_handle_intr_status(adapter
, MPS_CLS_INT_CAUSE_A
,
2780 t4_write_reg(adapter
, MPS_INT_CAUSE_A
, 0);
2781 t4_read_reg(adapter
, MPS_INT_CAUSE_A
); /* flush */
2783 t4_fatal_err(adapter
);
2786 #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
2790 * EDC/MC interrupt handler.
2792 static void mem_intr_handler(struct adapter
*adapter
, int idx
)
2794 static const char name
[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
2796 unsigned int addr
, cnt_addr
, v
;
2798 if (idx
<= MEM_EDC1
) {
2799 addr
= EDC_REG(EDC_INT_CAUSE_A
, idx
);
2800 cnt_addr
= EDC_REG(EDC_ECC_STATUS_A
, idx
);
2801 } else if (idx
== MEM_MC
) {
2802 if (is_t4(adapter
->params
.chip
)) {
2803 addr
= MC_INT_CAUSE_A
;
2804 cnt_addr
= MC_ECC_STATUS_A
;
2806 addr
= MC_P_INT_CAUSE_A
;
2807 cnt_addr
= MC_P_ECC_STATUS_A
;
2810 addr
= MC_REG(MC_P_INT_CAUSE_A
, 1);
2811 cnt_addr
= MC_REG(MC_P_ECC_STATUS_A
, 1);
2814 v
= t4_read_reg(adapter
, addr
) & MEM_INT_MASK
;
2815 if (v
& PERR_INT_CAUSE_F
)
2816 dev_alert(adapter
->pdev_dev
, "%s FIFO parity error\n",
2818 if (v
& ECC_CE_INT_CAUSE_F
) {
2819 u32 cnt
= ECC_CECNT_G(t4_read_reg(adapter
, cnt_addr
));
2821 t4_write_reg(adapter
, cnt_addr
, ECC_CECNT_V(ECC_CECNT_M
));
2822 if (printk_ratelimit())
2823 dev_warn(adapter
->pdev_dev
,
2824 "%u %s correctable ECC data error%s\n",
2825 cnt
, name
[idx
], cnt
> 1 ? "s" : "");
2827 if (v
& ECC_UE_INT_CAUSE_F
)
2828 dev_alert(adapter
->pdev_dev
,
2829 "%s uncorrectable ECC data error\n", name
[idx
]);
2831 t4_write_reg(adapter
, addr
, v
);
2832 if (v
& (PERR_INT_CAUSE_F
| ECC_UE_INT_CAUSE_F
))
2833 t4_fatal_err(adapter
);
2837 * MA interrupt handler.
2839 static void ma_intr_handler(struct adapter
*adap
)
2841 u32 v
, status
= t4_read_reg(adap
, MA_INT_CAUSE_A
);
2843 if (status
& MEM_PERR_INT_CAUSE_F
) {
2844 dev_alert(adap
->pdev_dev
,
2845 "MA parity error, parity status %#x\n",
2846 t4_read_reg(adap
, MA_PARITY_ERROR_STATUS1_A
));
2847 if (is_t5(adap
->params
.chip
))
2848 dev_alert(adap
->pdev_dev
,
2849 "MA parity error, parity status %#x\n",
2851 MA_PARITY_ERROR_STATUS2_A
));
2853 if (status
& MEM_WRAP_INT_CAUSE_F
) {
2854 v
= t4_read_reg(adap
, MA_INT_WRAP_STATUS_A
);
2855 dev_alert(adap
->pdev_dev
, "MA address wrap-around error by "
2856 "client %u to address %#x\n",
2857 MEM_WRAP_CLIENT_NUM_G(v
),
2858 MEM_WRAP_ADDRESS_G(v
) << 4);
2860 t4_write_reg(adap
, MA_INT_CAUSE_A
, status
);
2865 * SMB interrupt handler.
2867 static void smb_intr_handler(struct adapter
*adap
)
2869 static const struct intr_info smb_intr_info
[] = {
2870 { MSTTXFIFOPARINT_F
, "SMB master Tx FIFO parity error", -1, 1 },
2871 { MSTRXFIFOPARINT_F
, "SMB master Rx FIFO parity error", -1, 1 },
2872 { SLVFIFOPARINT_F
, "SMB slave FIFO parity error", -1, 1 },
2876 if (t4_handle_intr_status(adap
, SMB_INT_CAUSE_A
, smb_intr_info
))
2881 * NC-SI interrupt handler.
2883 static void ncsi_intr_handler(struct adapter
*adap
)
2885 static const struct intr_info ncsi_intr_info
[] = {
2886 { CIM_DM_PRTY_ERR_F
, "NC-SI CIM parity error", -1, 1 },
2887 { MPS_DM_PRTY_ERR_F
, "NC-SI MPS parity error", -1, 1 },
2888 { TXFIFO_PRTY_ERR_F
, "NC-SI Tx FIFO parity error", -1, 1 },
2889 { RXFIFO_PRTY_ERR_F
, "NC-SI Rx FIFO parity error", -1, 1 },
2893 if (t4_handle_intr_status(adap
, NCSI_INT_CAUSE_A
, ncsi_intr_info
))
2898 * XGMAC interrupt handler.
2900 static void xgmac_intr_handler(struct adapter
*adap
, int port
)
2902 u32 v
, int_cause_reg
;
2904 if (is_t4(adap
->params
.chip
))
2905 int_cause_reg
= PORT_REG(port
, XGMAC_PORT_INT_CAUSE_A
);
2907 int_cause_reg
= T5_PORT_REG(port
, MAC_PORT_INT_CAUSE_A
);
2909 v
= t4_read_reg(adap
, int_cause_reg
);
2911 v
&= TXFIFO_PRTY_ERR_F
| RXFIFO_PRTY_ERR_F
;
2915 if (v
& TXFIFO_PRTY_ERR_F
)
2916 dev_alert(adap
->pdev_dev
, "XGMAC %d Tx FIFO parity error\n",
2918 if (v
& RXFIFO_PRTY_ERR_F
)
2919 dev_alert(adap
->pdev_dev
, "XGMAC %d Rx FIFO parity error\n",
2921 t4_write_reg(adap
, PORT_REG(port
, XGMAC_PORT_INT_CAUSE_A
), v
);
2926 * PL interrupt handler.
2928 static void pl_intr_handler(struct adapter
*adap
)
2930 static const struct intr_info pl_intr_info
[] = {
2931 { FATALPERR_F
, "T4 fatal parity error", -1, 1 },
2932 { PERRVFID_F
, "PL VFID_MAP parity error", -1, 1 },
2936 if (t4_handle_intr_status(adap
, PL_PL_INT_CAUSE_A
, pl_intr_info
))
2940 #define PF_INTR_MASK (PFSW_F)
2941 #define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
2942 EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
2943 CPL_SWITCH_F | SGE_F | ULP_TX_F)
2946 * t4_slow_intr_handler - control path interrupt handler
2947 * @adapter: the adapter
2949 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
2950 * The designation 'slow' is because it involves register reads, while
2951 * data interrupts typically don't involve any MMIOs.
2953 int t4_slow_intr_handler(struct adapter
*adapter
)
2955 u32 cause
= t4_read_reg(adapter
, PL_INT_CAUSE_A
);
2957 if (!(cause
& GLBL_INTR_MASK
))
2960 cim_intr_handler(adapter
);
2962 mps_intr_handler(adapter
);
2964 ncsi_intr_handler(adapter
);
2966 pl_intr_handler(adapter
);
2968 smb_intr_handler(adapter
);
2969 if (cause
& XGMAC0_F
)
2970 xgmac_intr_handler(adapter
, 0);
2971 if (cause
& XGMAC1_F
)
2972 xgmac_intr_handler(adapter
, 1);
2973 if (cause
& XGMAC_KR0_F
)
2974 xgmac_intr_handler(adapter
, 2);
2975 if (cause
& XGMAC_KR1_F
)
2976 xgmac_intr_handler(adapter
, 3);
2978 pcie_intr_handler(adapter
);
2980 mem_intr_handler(adapter
, MEM_MC
);
2981 if (!is_t4(adapter
->params
.chip
) && (cause
& MC1_S
))
2982 mem_intr_handler(adapter
, MEM_MC1
);
2984 mem_intr_handler(adapter
, MEM_EDC0
);
2986 mem_intr_handler(adapter
, MEM_EDC1
);
2988 le_intr_handler(adapter
);
2990 tp_intr_handler(adapter
);
2992 ma_intr_handler(adapter
);
2993 if (cause
& PM_TX_F
)
2994 pmtx_intr_handler(adapter
);
2995 if (cause
& PM_RX_F
)
2996 pmrx_intr_handler(adapter
);
2997 if (cause
& ULP_RX_F
)
2998 ulprx_intr_handler(adapter
);
2999 if (cause
& CPL_SWITCH_F
)
3000 cplsw_intr_handler(adapter
);
3002 sge_intr_handler(adapter
);
3003 if (cause
& ULP_TX_F
)
3004 ulptx_intr_handler(adapter
);
3006 /* Clear the interrupts just processed for which we are the master. */
3007 t4_write_reg(adapter
, PL_INT_CAUSE_A
, cause
& GLBL_INTR_MASK
);
3008 (void)t4_read_reg(adapter
, PL_INT_CAUSE_A
); /* flush */
3013 * t4_intr_enable - enable interrupts
3014 * @adapter: the adapter whose interrupts should be enabled
3016 * Enable PF-specific interrupts for the calling function and the top-level
3017 * interrupt concentrator for global interrupts. Interrupts are already
3018 * enabled at each module, here we just enable the roots of the interrupt
3021 * Note: this function should be called only when the driver manages
3022 * non PF-specific interrupts from the various HW modules. Only one PCI
3023 * function at a time should be doing this.
3025 void t4_intr_enable(struct adapter
*adapter
)
3027 u32 pf
= SOURCEPF_G(t4_read_reg(adapter
, PL_WHOAMI_A
));
3029 t4_write_reg(adapter
, SGE_INT_ENABLE3_A
, ERR_CPL_EXCEED_IQE_SIZE_F
|
3030 ERR_INVALID_CIDX_INC_F
| ERR_CPL_OPCODE_0_F
|
3031 ERR_DROPPED_DB_F
| ERR_DATA_CPL_ON_HIGH_QID1_F
|
3032 ERR_DATA_CPL_ON_HIGH_QID0_F
| ERR_BAD_DB_PIDX3_F
|
3033 ERR_BAD_DB_PIDX2_F
| ERR_BAD_DB_PIDX1_F
|
3034 ERR_BAD_DB_PIDX0_F
| ERR_ING_CTXT_PRIO_F
|
3035 ERR_EGR_CTXT_PRIO_F
| INGRESS_SIZE_ERR_F
|
3036 DBFIFO_HP_INT_F
| DBFIFO_LP_INT_F
|
3038 t4_write_reg(adapter
, MYPF_REG(PL_PF_INT_ENABLE_A
), PF_INTR_MASK
);
3039 t4_set_reg_field(adapter
, PL_INT_MAP0_A
, 0, 1 << pf
);
3043 * t4_intr_disable - disable interrupts
3044 * @adapter: the adapter whose interrupts should be disabled
3046 * Disable interrupts. We only disable the top-level interrupt
3047 * concentrators. The caller must be a PCI function managing global
3050 void t4_intr_disable(struct adapter
*adapter
)
3052 u32 pf
= SOURCEPF_G(t4_read_reg(adapter
, PL_WHOAMI_A
));
3054 t4_write_reg(adapter
, MYPF_REG(PL_PF_INT_ENABLE_A
), 0);
3055 t4_set_reg_field(adapter
, PL_INT_MAP0_A
, 1 << pf
, 0);
3059 * hash_mac_addr - return the hash value of a MAC address
3060 * @addr: the 48-bit Ethernet MAC address
3062 * Hashes a MAC address according to the hash function used by HW inexact
3063 * (hash) address matching.
3065 static int hash_mac_addr(const u8
*addr
)
3067 u32 a
= ((u32
)addr
[0] << 16) | ((u32
)addr
[1] << 8) | addr
[2];
3068 u32 b
= ((u32
)addr
[3] << 16) | ((u32
)addr
[4] << 8) | addr
[5];
3076 * t4_config_rss_range - configure a portion of the RSS mapping table
3077 * @adapter: the adapter
3078 * @mbox: mbox to use for the FW command
3079 * @viid: virtual interface whose RSS subtable is to be written
3080 * @start: start entry in the table to write
3081 * @n: how many table entries to write
3082 * @rspq: values for the response queue lookup table
3083 * @nrspq: number of values in @rspq
3085 * Programs the selected part of the VI's RSS mapping table with the
3086 * provided values. If @nrspq < @n the supplied values are used repeatedly
3087 * until the full table range is populated.
3089 * The caller must ensure the values in @rspq are in the range allowed for
3092 int t4_config_rss_range(struct adapter
*adapter
, int mbox
, unsigned int viid
,
3093 int start
, int n
, const u16
*rspq
, unsigned int nrspq
)
3096 const u16
*rsp
= rspq
;
3097 const u16
*rsp_end
= rspq
+ nrspq
;
3098 struct fw_rss_ind_tbl_cmd cmd
;
3100 memset(&cmd
, 0, sizeof(cmd
));
3101 cmd
.op_to_viid
= cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD
) |
3102 FW_CMD_REQUEST_F
| FW_CMD_WRITE_F
|
3103 FW_RSS_IND_TBL_CMD_VIID_V(viid
));
3104 cmd
.retval_len16
= cpu_to_be32(FW_LEN16(cmd
));
3106 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
3108 int nq
= min(n
, 32);
3109 __be32
*qp
= &cmd
.iq0_to_iq2
;
3111 cmd
.niqid
= cpu_to_be16(nq
);
3112 cmd
.startidx
= cpu_to_be16(start
);
3120 v
= FW_RSS_IND_TBL_CMD_IQ0_V(*rsp
);
3121 if (++rsp
>= rsp_end
)
3123 v
|= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp
);
3124 if (++rsp
>= rsp_end
)
3126 v
|= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp
);
3127 if (++rsp
>= rsp_end
)
3130 *qp
++ = cpu_to_be32(v
);
3134 ret
= t4_wr_mbox(adapter
, mbox
, &cmd
, sizeof(cmd
), NULL
);
3142 * t4_config_glbl_rss - configure the global RSS mode
3143 * @adapter: the adapter
3144 * @mbox: mbox to use for the FW command
3145 * @mode: global RSS mode
3146 * @flags: mode-specific flags
3148 * Sets the global RSS mode.
3150 int t4_config_glbl_rss(struct adapter
*adapter
, int mbox
, unsigned int mode
,
3153 struct fw_rss_glb_config_cmd c
;
3155 memset(&c
, 0, sizeof(c
));
3156 c
.op_to_write
= cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD
) |
3157 FW_CMD_REQUEST_F
| FW_CMD_WRITE_F
);
3158 c
.retval_len16
= cpu_to_be32(FW_LEN16(c
));
3159 if (mode
== FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL
) {
3160 c
.u
.manual
.mode_pkd
=
3161 cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode
));
3162 } else if (mode
== FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL
) {
3163 c
.u
.basicvirtual
.mode_pkd
=
3164 cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode
));
3165 c
.u
.basicvirtual
.synmapen_to_hashtoeplitz
= cpu_to_be32(flags
);
3168 return t4_wr_mbox(adapter
, mbox
, &c
, sizeof(c
), NULL
);
3172 * t4_config_vi_rss - configure per VI RSS settings
3173 * @adapter: the adapter
3174 * @mbox: mbox to use for the FW command
3177 * @defq: id of the default RSS queue for the VI.
3179 * Configures VI-specific RSS properties.
3181 int t4_config_vi_rss(struct adapter
*adapter
, int mbox
, unsigned int viid
,
3182 unsigned int flags
, unsigned int defq
)
3184 struct fw_rss_vi_config_cmd c
;
3186 memset(&c
, 0, sizeof(c
));
3187 c
.op_to_viid
= cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD
) |
3188 FW_CMD_REQUEST_F
| FW_CMD_WRITE_F
|
3189 FW_RSS_VI_CONFIG_CMD_VIID_V(viid
));
3190 c
.retval_len16
= cpu_to_be32(FW_LEN16(c
));
3191 c
.u
.basicvirtual
.defaultq_to_udpen
= cpu_to_be32(flags
|
3192 FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq
));
3193 return t4_wr_mbox(adapter
, mbox
, &c
, sizeof(c
), NULL
);
3196 /* Read an RSS table row */
3197 static int rd_rss_row(struct adapter
*adap
, int row
, u32
*val
)
3199 t4_write_reg(adap
, TP_RSS_LKP_TABLE_A
, 0xfff00000 | row
);
3200 return t4_wait_op_done_val(adap
, TP_RSS_LKP_TABLE_A
, LKPTBLROWVLD_F
, 1,
3205 * t4_read_rss - read the contents of the RSS mapping table
3206 * @adapter: the adapter
3207 * @map: holds the contents of the RSS mapping table
3209 * Reads the contents of the RSS hash->queue mapping table.
3211 int t4_read_rss(struct adapter
*adapter
, u16
*map
)
3216 for (i
= 0; i
< RSS_NENTRIES
/ 2; ++i
) {
3217 ret
= rd_rss_row(adapter
, i
, &val
);
3220 *map
++ = LKPTBLQUEUE0_G(val
);
3221 *map
++ = LKPTBLQUEUE1_G(val
);
3227 * t4_read_rss_key - read the global RSS key
3228 * @adap: the adapter
3229 * @key: 10-entry array holding the 320-bit RSS key
3231 * Reads the global 320-bit RSS key.
3233 void t4_read_rss_key(struct adapter
*adap
, u32
*key
)
3235 t4_read_indirect(adap
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
, key
, 10,
3236 TP_RSS_SECRET_KEY0_A
);
3240 * t4_write_rss_key - program one of the RSS keys
3241 * @adap: the adapter
3242 * @key: 10-entry array holding the 320-bit RSS key
3243 * @idx: which RSS key to write
3245 * Writes one of the RSS keys with the given 320-bit value. If @idx is
3246 * 0..15 the corresponding entry in the RSS key table is written,
3247 * otherwise the global RSS key is written.
3249 void t4_write_rss_key(struct adapter
*adap
, const u32
*key
, int idx
)
3251 t4_write_indirect(adap
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
, key
, 10,
3252 TP_RSS_SECRET_KEY0_A
);
3253 if (idx
>= 0 && idx
< 16)
3254 t4_write_reg(adap
, TP_RSS_CONFIG_VRT_A
,
3255 KEYWRADDR_V(idx
) | KEYWREN_F
);
3259 * t4_read_rss_pf_config - read PF RSS Configuration Table
3260 * @adapter: the adapter
3261 * @index: the entry in the PF RSS table to read
3262 * @valp: where to store the returned value
3264 * Reads the PF RSS Configuration Table at the specified index and returns
3265 * the value found there.
3267 void t4_read_rss_pf_config(struct adapter
*adapter
, unsigned int index
,
3270 t4_read_indirect(adapter
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
3271 valp
, 1, TP_RSS_PF0_CONFIG_A
+ index
);
3275 * t4_read_rss_vf_config - read VF RSS Configuration Table
3276 * @adapter: the adapter
3277 * @index: the entry in the VF RSS table to read
3278 * @vfl: where to store the returned VFL
3279 * @vfh: where to store the returned VFH
3281 * Reads the VF RSS Configuration Table at the specified index and returns
3282 * the (VFL, VFH) values found there.
3284 void t4_read_rss_vf_config(struct adapter
*adapter
, unsigned int index
,
3287 u32 vrt
, mask
, data
;
3289 mask
= VFWRADDR_V(VFWRADDR_M
);
3290 data
= VFWRADDR_V(index
);
3292 /* Request that the index'th VF Table values be read into VFL/VFH.
3294 vrt
= t4_read_reg(adapter
, TP_RSS_CONFIG_VRT_A
);
3295 vrt
&= ~(VFRDRG_F
| VFWREN_F
| KEYWREN_F
| mask
);
3296 vrt
|= data
| VFRDEN_F
;
3297 t4_write_reg(adapter
, TP_RSS_CONFIG_VRT_A
, vrt
);
3299 /* Grab the VFL/VFH values ...
3301 t4_read_indirect(adapter
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
3302 vfl
, 1, TP_RSS_VFL_CONFIG_A
);
3303 t4_read_indirect(adapter
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
3304 vfh
, 1, TP_RSS_VFH_CONFIG_A
);
3308 * t4_read_rss_pf_map - read PF RSS Map
3309 * @adapter: the adapter
3311 * Reads the PF RSS Map register and returns its value.
3313 u32
t4_read_rss_pf_map(struct adapter
*adapter
)
3317 t4_read_indirect(adapter
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
3318 &pfmap
, 1, TP_RSS_PF_MAP_A
);
3323 * t4_read_rss_pf_mask - read PF RSS Mask
3324 * @adapter: the adapter
3326 * Reads the PF RSS Mask register and returns its value.
3328 u32
t4_read_rss_pf_mask(struct adapter
*adapter
)
3332 t4_read_indirect(adapter
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
3333 &pfmask
, 1, TP_RSS_PF_MSK_A
);
3338 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
3339 * @adap: the adapter
3340 * @v4: holds the TCP/IP counter values
3341 * @v6: holds the TCP/IPv6 counter values
3343 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
3344 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
3346 void t4_tp_get_tcp_stats(struct adapter
*adap
, struct tp_tcp_stats
*v4
,
3347 struct tp_tcp_stats
*v6
)
3349 u32 val
[TP_MIB_TCP_RXT_SEG_LO_A
- TP_MIB_TCP_OUT_RST_A
+ 1];
3351 #define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
3352 #define STAT(x) val[STAT_IDX(x)]
3353 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
3356 t4_read_indirect(adap
, TP_MIB_INDEX_A
, TP_MIB_DATA_A
, val
,
3357 ARRAY_SIZE(val
), TP_MIB_TCP_OUT_RST_A
);
3358 v4
->tcpOutRsts
= STAT(OUT_RST
);
3359 v4
->tcpInSegs
= STAT64(IN_SEG
);
3360 v4
->tcpOutSegs
= STAT64(OUT_SEG
);
3361 v4
->tcpRetransSegs
= STAT64(RXT_SEG
);
3364 t4_read_indirect(adap
, TP_MIB_INDEX_A
, TP_MIB_DATA_A
, val
,
3365 ARRAY_SIZE(val
), TP_MIB_TCP_V6OUT_RST_A
);
3366 v6
->tcpOutRsts
= STAT(OUT_RST
);
3367 v6
->tcpInSegs
= STAT64(IN_SEG
);
3368 v6
->tcpOutSegs
= STAT64(OUT_SEG
);
3369 v6
->tcpRetransSegs
= STAT64(RXT_SEG
);
3377 * t4_read_mtu_tbl - returns the values in the HW path MTU table
3378 * @adap: the adapter
3379 * @mtus: where to store the MTU values
3380 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
3382 * Reads the HW path MTU table.
3384 void t4_read_mtu_tbl(struct adapter
*adap
, u16
*mtus
, u8
*mtu_log
)
3389 for (i
= 0; i
< NMTUS
; ++i
) {
3390 t4_write_reg(adap
, TP_MTU_TABLE_A
,
3391 MTUINDEX_V(0xff) | MTUVALUE_V(i
));
3392 v
= t4_read_reg(adap
, TP_MTU_TABLE_A
);
3393 mtus
[i
] = MTUVALUE_G(v
);
3395 mtu_log
[i
] = MTUWIDTH_G(v
);
3400 * t4_read_cong_tbl - reads the congestion control table
3401 * @adap: the adapter
3402 * @incr: where to store the alpha values
3404 * Reads the additive increments programmed into the HW congestion
3407 void t4_read_cong_tbl(struct adapter
*adap
, u16 incr
[NMTUS
][NCCTRL_WIN
])
3409 unsigned int mtu
, w
;
3411 for (mtu
= 0; mtu
< NMTUS
; ++mtu
)
3412 for (w
= 0; w
< NCCTRL_WIN
; ++w
) {
3413 t4_write_reg(adap
, TP_CCTRL_TABLE_A
,
3414 ROWINDEX_V(0xffff) | (mtu
<< 5) | w
);
3415 incr
[mtu
][w
] = (u16
)t4_read_reg(adap
,
3416 TP_CCTRL_TABLE_A
) & 0x1fff;
3421 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
3422 * @adap: the adapter
3423 * @addr: the indirect TP register address
3424 * @mask: specifies the field within the register to modify
3425 * @val: new value for the field
3427 * Sets a field of an indirect TP register to the given value.
3429 void t4_tp_wr_bits_indirect(struct adapter
*adap
, unsigned int addr
,
3430 unsigned int mask
, unsigned int val
)
3432 t4_write_reg(adap
, TP_PIO_ADDR_A
, addr
);
3433 val
|= t4_read_reg(adap
, TP_PIO_DATA_A
) & ~mask
;
3434 t4_write_reg(adap
, TP_PIO_DATA_A
, val
);
3438 * init_cong_ctrl - initialize congestion control parameters
3439 * @a: the alpha values for congestion control
3440 * @b: the beta values for congestion control
3442 * Initialize the congestion control parameters.
3444 static void init_cong_ctrl(unsigned short *a
, unsigned short *b
)
3446 a
[0] = a
[1] = a
[2] = a
[3] = a
[4] = a
[5] = a
[6] = a
[7] = a
[8] = 1;
3471 b
[0] = b
[1] = b
[2] = b
[3] = b
[4] = b
[5] = b
[6] = b
[7] = b
[8] = 0;
3474 b
[13] = b
[14] = b
[15] = b
[16] = 3;
3475 b
[17] = b
[18] = b
[19] = b
[20] = b
[21] = 4;
3476 b
[22] = b
[23] = b
[24] = b
[25] = b
[26] = b
[27] = 5;
3481 /* The minimum additive increment value for the congestion control table */
3482 #define CC_MIN_INCR 2U
3485 * t4_load_mtus - write the MTU and congestion control HW tables
3486 * @adap: the adapter
3487 * @mtus: the values for the MTU table
3488 * @alpha: the values for the congestion control alpha parameter
3489 * @beta: the values for the congestion control beta parameter
3491 * Write the HW MTU table with the supplied MTUs and the high-speed
3492 * congestion control table with the supplied alpha, beta, and MTUs.
3493 * We write the two tables together because the additive increments
3494 * depend on the MTUs.
3496 void t4_load_mtus(struct adapter
*adap
, const unsigned short *mtus
,
3497 const unsigned short *alpha
, const unsigned short *beta
)
3499 static const unsigned int avg_pkts
[NCCTRL_WIN
] = {
3500 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3501 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3502 28672, 40960, 57344, 81920, 114688, 163840, 229376
3507 for (i
= 0; i
< NMTUS
; ++i
) {
3508 unsigned int mtu
= mtus
[i
];
3509 unsigned int log2
= fls(mtu
);
3511 if (!(mtu
& ((1 << log2
) >> 2))) /* round */
3513 t4_write_reg(adap
, TP_MTU_TABLE_A
, MTUINDEX_V(i
) |
3514 MTUWIDTH_V(log2
) | MTUVALUE_V(mtu
));
3516 for (w
= 0; w
< NCCTRL_WIN
; ++w
) {
3519 inc
= max(((mtu
- 40) * alpha
[w
]) / avg_pkts
[w
],
3522 t4_write_reg(adap
, TP_CCTRL_TABLE_A
, (i
<< 21) |
3523 (w
<< 16) | (beta
[w
] << 13) | inc
);
3529 * t4_pmtx_get_stats - returns the HW stats from PMTX
3530 * @adap: the adapter
3531 * @cnt: where to store the count statistics
3532 * @cycles: where to store the cycle statistics
3534 * Returns performance statistics from PMTX.
3536 void t4_pmtx_get_stats(struct adapter
*adap
, u32 cnt
[], u64 cycles
[])
3541 for (i
= 0; i
< PM_NSTATS
; i
++) {
3542 t4_write_reg(adap
, PM_TX_STAT_CONFIG_A
, i
+ 1);
3543 cnt
[i
] = t4_read_reg(adap
, PM_TX_STAT_COUNT_A
);
3544 if (is_t4(adap
->params
.chip
)) {
3545 cycles
[i
] = t4_read_reg64(adap
, PM_TX_STAT_LSB_A
);
3547 t4_read_indirect(adap
, PM_TX_DBG_CTRL_A
,
3548 PM_TX_DBG_DATA_A
, data
, 2,
3549 PM_TX_DBG_STAT_MSB_A
);
3550 cycles
[i
] = (((u64
)data
[0] << 32) | data
[1]);
3556 * t4_pmrx_get_stats - returns the HW stats from PMRX
3557 * @adap: the adapter
3558 * @cnt: where to store the count statistics
3559 * @cycles: where to store the cycle statistics
3561 * Returns performance statistics from PMRX.
3563 void t4_pmrx_get_stats(struct adapter
*adap
, u32 cnt
[], u64 cycles
[])
3568 for (i
= 0; i
< PM_NSTATS
; i
++) {
3569 t4_write_reg(adap
, PM_RX_STAT_CONFIG_A
, i
+ 1);
3570 cnt
[i
] = t4_read_reg(adap
, PM_RX_STAT_COUNT_A
);
3571 if (is_t4(adap
->params
.chip
)) {
3572 cycles
[i
] = t4_read_reg64(adap
, PM_RX_STAT_LSB_A
);
3574 t4_read_indirect(adap
, PM_RX_DBG_CTRL_A
,
3575 PM_RX_DBG_DATA_A
, data
, 2,
3576 PM_RX_DBG_STAT_MSB_A
);
3577 cycles
[i
] = (((u64
)data
[0] << 32) | data
[1]);
3583 * t4_get_mps_bg_map - return the buffer groups associated with a port
3584 * @adap: the adapter
3585 * @idx: the port index
3587 * Returns a bitmap indicating which MPS buffer groups are associated
3588 * with the given port. Bit i is set if buffer group i is used by the
3591 unsigned int t4_get_mps_bg_map(struct adapter
*adap
, int idx
)
3593 u32 n
= NUMPORTS_G(t4_read_reg(adap
, MPS_CMN_CTL_A
));
3596 return idx
== 0 ? 0xf : 0;
3598 return idx
< 2 ? (3 << (2 * idx
)) : 0;
3603 * t4_get_port_type_description - return Port Type string description
3604 * @port_type: firmware Port Type enumeration
3606 const char *t4_get_port_type_description(enum fw_port_type port_type
)
3608 static const char *const port_type_description
[] = {
3627 if (port_type
< ARRAY_SIZE(port_type_description
))
3628 return port_type_description
[port_type
];
3633 * t4_get_port_stats - collect port statistics
3634 * @adap: the adapter
3635 * @idx: the port index
3636 * @p: the stats structure to fill
3638 * Collect statistics related to the given port from HW.
3640 void t4_get_port_stats(struct adapter
*adap
, int idx
, struct port_stats
*p
)
3642 u32 bgmap
= t4_get_mps_bg_map(adap
, idx
);
3644 #define GET_STAT(name) \
3645 t4_read_reg64(adap, \
3646 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
3647 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
3648 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
3650 p
->tx_octets
= GET_STAT(TX_PORT_BYTES
);
3651 p
->tx_frames
= GET_STAT(TX_PORT_FRAMES
);
3652 p
->tx_bcast_frames
= GET_STAT(TX_PORT_BCAST
);
3653 p
->tx_mcast_frames
= GET_STAT(TX_PORT_MCAST
);
3654 p
->tx_ucast_frames
= GET_STAT(TX_PORT_UCAST
);
3655 p
->tx_error_frames
= GET_STAT(TX_PORT_ERROR
);
3656 p
->tx_frames_64
= GET_STAT(TX_PORT_64B
);
3657 p
->tx_frames_65_127
= GET_STAT(TX_PORT_65B_127B
);
3658 p
->tx_frames_128_255
= GET_STAT(TX_PORT_128B_255B
);
3659 p
->tx_frames_256_511
= GET_STAT(TX_PORT_256B_511B
);
3660 p
->tx_frames_512_1023
= GET_STAT(TX_PORT_512B_1023B
);
3661 p
->tx_frames_1024_1518
= GET_STAT(TX_PORT_1024B_1518B
);
3662 p
->tx_frames_1519_max
= GET_STAT(TX_PORT_1519B_MAX
);
3663 p
->tx_drop
= GET_STAT(TX_PORT_DROP
);
3664 p
->tx_pause
= GET_STAT(TX_PORT_PAUSE
);
3665 p
->tx_ppp0
= GET_STAT(TX_PORT_PPP0
);
3666 p
->tx_ppp1
= GET_STAT(TX_PORT_PPP1
);
3667 p
->tx_ppp2
= GET_STAT(TX_PORT_PPP2
);
3668 p
->tx_ppp3
= GET_STAT(TX_PORT_PPP3
);
3669 p
->tx_ppp4
= GET_STAT(TX_PORT_PPP4
);
3670 p
->tx_ppp5
= GET_STAT(TX_PORT_PPP5
);
3671 p
->tx_ppp6
= GET_STAT(TX_PORT_PPP6
);
3672 p
->tx_ppp7
= GET_STAT(TX_PORT_PPP7
);
3674 p
->rx_octets
= GET_STAT(RX_PORT_BYTES
);
3675 p
->rx_frames
= GET_STAT(RX_PORT_FRAMES
);
3676 p
->rx_bcast_frames
= GET_STAT(RX_PORT_BCAST
);
3677 p
->rx_mcast_frames
= GET_STAT(RX_PORT_MCAST
);
3678 p
->rx_ucast_frames
= GET_STAT(RX_PORT_UCAST
);
3679 p
->rx_too_long
= GET_STAT(RX_PORT_MTU_ERROR
);
3680 p
->rx_jabber
= GET_STAT(RX_PORT_MTU_CRC_ERROR
);
3681 p
->rx_fcs_err
= GET_STAT(RX_PORT_CRC_ERROR
);
3682 p
->rx_len_err
= GET_STAT(RX_PORT_LEN_ERROR
);
3683 p
->rx_symbol_err
= GET_STAT(RX_PORT_SYM_ERROR
);
3684 p
->rx_runt
= GET_STAT(RX_PORT_LESS_64B
);
3685 p
->rx_frames_64
= GET_STAT(RX_PORT_64B
);
3686 p
->rx_frames_65_127
= GET_STAT(RX_PORT_65B_127B
);
3687 p
->rx_frames_128_255
= GET_STAT(RX_PORT_128B_255B
);
3688 p
->rx_frames_256_511
= GET_STAT(RX_PORT_256B_511B
);
3689 p
->rx_frames_512_1023
= GET_STAT(RX_PORT_512B_1023B
);
3690 p
->rx_frames_1024_1518
= GET_STAT(RX_PORT_1024B_1518B
);
3691 p
->rx_frames_1519_max
= GET_STAT(RX_PORT_1519B_MAX
);
3692 p
->rx_pause
= GET_STAT(RX_PORT_PAUSE
);
3693 p
->rx_ppp0
= GET_STAT(RX_PORT_PPP0
);
3694 p
->rx_ppp1
= GET_STAT(RX_PORT_PPP1
);
3695 p
->rx_ppp2
= GET_STAT(RX_PORT_PPP2
);
3696 p
->rx_ppp3
= GET_STAT(RX_PORT_PPP3
);
3697 p
->rx_ppp4
= GET_STAT(RX_PORT_PPP4
);
3698 p
->rx_ppp5
= GET_STAT(RX_PORT_PPP5
);
3699 p
->rx_ppp6
= GET_STAT(RX_PORT_PPP6
);
3700 p
->rx_ppp7
= GET_STAT(RX_PORT_PPP7
);
3702 p
->rx_ovflow0
= (bgmap
& 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME
) : 0;
3703 p
->rx_ovflow1
= (bgmap
& 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME
) : 0;
3704 p
->rx_ovflow2
= (bgmap
& 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME
) : 0;
3705 p
->rx_ovflow3
= (bgmap
& 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME
) : 0;
3706 p
->rx_trunc0
= (bgmap
& 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME
) : 0;
3707 p
->rx_trunc1
= (bgmap
& 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME
) : 0;
3708 p
->rx_trunc2
= (bgmap
& 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME
) : 0;
3709 p
->rx_trunc3
= (bgmap
& 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME
) : 0;
3716 * t4_wol_magic_enable - enable/disable magic packet WoL
3717 * @adap: the adapter
3718 * @port: the physical port index
3719 * @addr: MAC address expected in magic packets, %NULL to disable
3721 * Enables/disables magic packet wake-on-LAN for the selected port.
3723 void t4_wol_magic_enable(struct adapter
*adap
, unsigned int port
,
3726 u32 mag_id_reg_l
, mag_id_reg_h
, port_cfg_reg
;
3728 if (is_t4(adap
->params
.chip
)) {
3729 mag_id_reg_l
= PORT_REG(port
, XGMAC_PORT_MAGIC_MACID_LO
);
3730 mag_id_reg_h
= PORT_REG(port
, XGMAC_PORT_MAGIC_MACID_HI
);
3731 port_cfg_reg
= PORT_REG(port
, XGMAC_PORT_CFG2_A
);
3733 mag_id_reg_l
= T5_PORT_REG(port
, MAC_PORT_MAGIC_MACID_LO
);
3734 mag_id_reg_h
= T5_PORT_REG(port
, MAC_PORT_MAGIC_MACID_HI
);
3735 port_cfg_reg
= T5_PORT_REG(port
, MAC_PORT_CFG2_A
);
3739 t4_write_reg(adap
, mag_id_reg_l
,
3740 (addr
[2] << 24) | (addr
[3] << 16) |
3741 (addr
[4] << 8) | addr
[5]);
3742 t4_write_reg(adap
, mag_id_reg_h
,
3743 (addr
[0] << 8) | addr
[1]);
3745 t4_set_reg_field(adap
, port_cfg_reg
, MAGICEN_F
,
3746 addr
? MAGICEN_F
: 0);
3750 * t4_wol_pat_enable - enable/disable pattern-based WoL
3751 * @adap: the adapter
3752 * @port: the physical port index
3753 * @map: bitmap of which HW pattern filters to set
3754 * @mask0: byte mask for bytes 0-63 of a packet
3755 * @mask1: byte mask for bytes 64-127 of a packet
3756 * @crc: Ethernet CRC for selected bytes
3757 * @enable: enable/disable switch
3759 * Sets the pattern filters indicated in @map to mask out the bytes
3760 * specified in @mask0/@mask1 in received packets and compare the CRC of
3761 * the resulting packet against @crc. If @enable is %true pattern-based
3762 * WoL is enabled, otherwise disabled.
3764 int t4_wol_pat_enable(struct adapter
*adap
, unsigned int port
, unsigned int map
,
3765 u64 mask0
, u64 mask1
, unsigned int crc
, bool enable
)
3770 if (is_t4(adap
->params
.chip
))
3771 port_cfg_reg
= PORT_REG(port
, XGMAC_PORT_CFG2_A
);
3773 port_cfg_reg
= T5_PORT_REG(port
, MAC_PORT_CFG2_A
);
3776 t4_set_reg_field(adap
, port_cfg_reg
, PATEN_F
, 0);
3782 #define EPIO_REG(name) \
3783 (is_t4(adap->params.chip) ? \
3784 PORT_REG(port, XGMAC_PORT_EPIO_##name##_A) : \
3785 T5_PORT_REG(port, MAC_PORT_EPIO_##name##_A))
3787 t4_write_reg(adap
, EPIO_REG(DATA1
), mask0
>> 32);
3788 t4_write_reg(adap
, EPIO_REG(DATA2
), mask1
);
3789 t4_write_reg(adap
, EPIO_REG(DATA3
), mask1
>> 32);
3791 for (i
= 0; i
< NWOL_PAT
; i
++, map
>>= 1) {
3795 /* write byte masks */
3796 t4_write_reg(adap
, EPIO_REG(DATA0
), mask0
);
3797 t4_write_reg(adap
, EPIO_REG(OP
), ADDRESS_V(i
) | EPIOWR_F
);
3798 t4_read_reg(adap
, EPIO_REG(OP
)); /* flush */
3799 if (t4_read_reg(adap
, EPIO_REG(OP
)) & SF_BUSY_F
)
3803 t4_write_reg(adap
, EPIO_REG(DATA0
), crc
);
3804 t4_write_reg(adap
, EPIO_REG(OP
), ADDRESS_V(i
+ 32) | EPIOWR_F
);
3805 t4_read_reg(adap
, EPIO_REG(OP
)); /* flush */
3806 if (t4_read_reg(adap
, EPIO_REG(OP
)) & SF_BUSY_F
)
3811 t4_set_reg_field(adap
, PORT_REG(port
, XGMAC_PORT_CFG2_A
), 0, PATEN_F
);
3815 /* t4_mk_filtdelwr - create a delete filter WR
3816 * @ftid: the filter ID
3817 * @wr: the filter work request to populate
3818 * @qid: ingress queue to receive the delete notification
3820 * Creates a filter work request to delete the supplied filter. If @qid is
3821 * negative the delete notification is suppressed.
3823 void t4_mk_filtdelwr(unsigned int ftid
, struct fw_filter_wr
*wr
, int qid
)
3825 memset(wr
, 0, sizeof(*wr
));
3826 wr
->op_pkd
= cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR
));
3827 wr
->len16_pkd
= cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr
) / 16));
3828 wr
->tid_to_iq
= cpu_to_be32(FW_FILTER_WR_TID_V(ftid
) |
3829 FW_FILTER_WR_NOREPLY_V(qid
< 0));
3830 wr
->del_filter_to_l2tix
= cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F
);
3832 wr
->rx_chan_rx_rpl_iq
=
3833 cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid
));
3836 #define INIT_CMD(var, cmd, rd_wr) do { \
3837 (var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
3838 FW_CMD_REQUEST_F | \
3839 FW_CMD_##rd_wr##_F); \
3840 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
3843 int t4_fwaddrspace_write(struct adapter
*adap
, unsigned int mbox
,
3847 struct fw_ldst_cmd c
;
3849 memset(&c
, 0, sizeof(c
));
3850 ldst_addrspace
= FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE
);
3851 c
.op_to_addrspace
= cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD
) |
3855 c
.cycles_to_len16
= cpu_to_be32(FW_LEN16(c
));
3856 c
.u
.addrval
.addr
= cpu_to_be32(addr
);
3857 c
.u
.addrval
.val
= cpu_to_be32(val
);
3859 return t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), NULL
);
3863 * t4_mdio_rd - read a PHY register through MDIO
3864 * @adap: the adapter
3865 * @mbox: mailbox to use for the FW command
3866 * @phy_addr: the PHY address
3867 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
3868 * @reg: the register to read
3869 * @valp: where to store the value
3871 * Issues a FW command through the given mailbox to read a PHY register.
3873 int t4_mdio_rd(struct adapter
*adap
, unsigned int mbox
, unsigned int phy_addr
,
3874 unsigned int mmd
, unsigned int reg
, u16
*valp
)
3878 struct fw_ldst_cmd c
;
3880 memset(&c
, 0, sizeof(c
));
3881 ldst_addrspace
= FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO
);
3882 c
.op_to_addrspace
= cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD
) |
3883 FW_CMD_REQUEST_F
| FW_CMD_READ_F
|
3885 c
.cycles_to_len16
= cpu_to_be32(FW_LEN16(c
));
3886 c
.u
.mdio
.paddr_mmd
= cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr
) |
3887 FW_LDST_CMD_MMD_V(mmd
));
3888 c
.u
.mdio
.raddr
= cpu_to_be16(reg
);
3890 ret
= t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), &c
);
3892 *valp
= be16_to_cpu(c
.u
.mdio
.rval
);
3897 * t4_mdio_wr - write a PHY register through MDIO
3898 * @adap: the adapter
3899 * @mbox: mailbox to use for the FW command
3900 * @phy_addr: the PHY address
3901 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
3902 * @reg: the register to write
3903 * @valp: value to write
3905 * Issues a FW command through the given mailbox to write a PHY register.
3907 int t4_mdio_wr(struct adapter
*adap
, unsigned int mbox
, unsigned int phy_addr
,
3908 unsigned int mmd
, unsigned int reg
, u16 val
)
3911 struct fw_ldst_cmd c
;
3913 memset(&c
, 0, sizeof(c
));
3914 ldst_addrspace
= FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO
);
3915 c
.op_to_addrspace
= cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD
) |
3916 FW_CMD_REQUEST_F
| FW_CMD_WRITE_F
|
3918 c
.cycles_to_len16
= cpu_to_be32(FW_LEN16(c
));
3919 c
.u
.mdio
.paddr_mmd
= cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr
) |
3920 FW_LDST_CMD_MMD_V(mmd
));
3921 c
.u
.mdio
.raddr
= cpu_to_be16(reg
);
3922 c
.u
.mdio
.rval
= cpu_to_be16(val
);
3924 return t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), NULL
);
3928 * t4_sge_decode_idma_state - decode the idma state
3929 * @adap: the adapter
3930 * @state: the state idma is stuck in
3932 void t4_sge_decode_idma_state(struct adapter
*adapter
, int state
)
3934 static const char * const t4_decode
[] = {
3936 "IDMA_PUSH_MORE_CPL_FIFO",
3937 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
3939 "IDMA_PHYSADDR_SEND_PCIEHDR",
3940 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
3941 "IDMA_PHYSADDR_SEND_PAYLOAD",
3942 "IDMA_SEND_FIFO_TO_IMSG",
3943 "IDMA_FL_REQ_DATA_FL_PREP",
3944 "IDMA_FL_REQ_DATA_FL",
3946 "IDMA_FL_H_REQ_HEADER_FL",
3947 "IDMA_FL_H_SEND_PCIEHDR",
3948 "IDMA_FL_H_PUSH_CPL_FIFO",
3949 "IDMA_FL_H_SEND_CPL",
3950 "IDMA_FL_H_SEND_IP_HDR_FIRST",
3951 "IDMA_FL_H_SEND_IP_HDR",
3952 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
3953 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
3954 "IDMA_FL_H_SEND_IP_HDR_PADDING",
3955 "IDMA_FL_D_SEND_PCIEHDR",
3956 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
3957 "IDMA_FL_D_REQ_NEXT_DATA_FL",
3958 "IDMA_FL_SEND_PCIEHDR",
3959 "IDMA_FL_PUSH_CPL_FIFO",
3961 "IDMA_FL_SEND_PAYLOAD_FIRST",
3962 "IDMA_FL_SEND_PAYLOAD",
3963 "IDMA_FL_REQ_NEXT_DATA_FL",
3964 "IDMA_FL_SEND_NEXT_PCIEHDR",
3965 "IDMA_FL_SEND_PADDING",
3966 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
3967 "IDMA_FL_SEND_FIFO_TO_IMSG",
3968 "IDMA_FL_REQ_DATAFL_DONE",
3969 "IDMA_FL_REQ_HEADERFL_DONE",
3971 static const char * const t5_decode
[] = {
3974 "IDMA_PUSH_MORE_CPL_FIFO",
3975 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
3976 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
3977 "IDMA_PHYSADDR_SEND_PCIEHDR",
3978 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
3979 "IDMA_PHYSADDR_SEND_PAYLOAD",
3980 "IDMA_SEND_FIFO_TO_IMSG",
3981 "IDMA_FL_REQ_DATA_FL",
3983 "IDMA_FL_DROP_SEND_INC",
3984 "IDMA_FL_H_REQ_HEADER_FL",
3985 "IDMA_FL_H_SEND_PCIEHDR",
3986 "IDMA_FL_H_PUSH_CPL_FIFO",
3987 "IDMA_FL_H_SEND_CPL",
3988 "IDMA_FL_H_SEND_IP_HDR_FIRST",
3989 "IDMA_FL_H_SEND_IP_HDR",
3990 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
3991 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
3992 "IDMA_FL_H_SEND_IP_HDR_PADDING",
3993 "IDMA_FL_D_SEND_PCIEHDR",
3994 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
3995 "IDMA_FL_D_REQ_NEXT_DATA_FL",
3996 "IDMA_FL_SEND_PCIEHDR",
3997 "IDMA_FL_PUSH_CPL_FIFO",
3999 "IDMA_FL_SEND_PAYLOAD_FIRST",
4000 "IDMA_FL_SEND_PAYLOAD",
4001 "IDMA_FL_REQ_NEXT_DATA_FL",
4002 "IDMA_FL_SEND_NEXT_PCIEHDR",
4003 "IDMA_FL_SEND_PADDING",
4004 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
4006 static const u32 sge_regs
[] = {
4007 SGE_DEBUG_DATA_LOW_INDEX_2_A
,
4008 SGE_DEBUG_DATA_LOW_INDEX_3_A
,
4009 SGE_DEBUG_DATA_HIGH_INDEX_10_A
,
4011 const char **sge_idma_decode
;
4012 int sge_idma_decode_nstates
;
4015 if (is_t4(adapter
->params
.chip
)) {
4016 sge_idma_decode
= (const char **)t4_decode
;
4017 sge_idma_decode_nstates
= ARRAY_SIZE(t4_decode
);
4019 sge_idma_decode
= (const char **)t5_decode
;
4020 sge_idma_decode_nstates
= ARRAY_SIZE(t5_decode
);
4023 if (state
< sge_idma_decode_nstates
)
4024 CH_WARN(adapter
, "idma state %s\n", sge_idma_decode
[state
]);
4026 CH_WARN(adapter
, "idma state %d unknown\n", state
);
4028 for (i
= 0; i
< ARRAY_SIZE(sge_regs
); i
++)
4029 CH_WARN(adapter
, "SGE register %#x value %#x\n",
4030 sge_regs
[i
], t4_read_reg(adapter
, sge_regs
[i
]));
4034 * t4_fw_hello - establish communication with FW
4035 * @adap: the adapter
4036 * @mbox: mailbox to use for the FW command
4037 * @evt_mbox: mailbox to receive async FW events
4038 * @master: specifies the caller's willingness to be the device master
4039 * @state: returns the current device state (if non-NULL)
4041 * Issues a command to establish communication with FW. Returns either
4042 * an error (negative integer) or the mailbox of the Master PF.
4044 int t4_fw_hello(struct adapter
*adap
, unsigned int mbox
, unsigned int evt_mbox
,
4045 enum dev_master master
, enum dev_state
*state
)
4048 struct fw_hello_cmd c
;
4050 unsigned int master_mbox
;
4051 int retries
= FW_CMD_HELLO_RETRIES
;
4054 memset(&c
, 0, sizeof(c
));
4055 INIT_CMD(c
, HELLO
, WRITE
);
4056 c
.err_to_clearinit
= cpu_to_be32(
4057 FW_HELLO_CMD_MASTERDIS_V(master
== MASTER_CANT
) |
4058 FW_HELLO_CMD_MASTERFORCE_V(master
== MASTER_MUST
) |
4059 FW_HELLO_CMD_MBMASTER_V(master
== MASTER_MUST
?
4060 mbox
: FW_HELLO_CMD_MBMASTER_M
) |
4061 FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox
) |
4062 FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os
) |
4063 FW_HELLO_CMD_CLEARINIT_F
);
4066 * Issue the HELLO command to the firmware. If it's not successful
4067 * but indicates that we got a "busy" or "timeout" condition, retry
4068 * the HELLO until we exhaust our retry limit. If we do exceed our
4069 * retry limit, check to see if the firmware left us any error
4070 * information and report that if so.
4072 ret
= t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), &c
);
4074 if ((ret
== -EBUSY
|| ret
== -ETIMEDOUT
) && retries
-- > 0)
4076 if (t4_read_reg(adap
, PCIE_FW_A
) & PCIE_FW_ERR_F
)
4077 t4_report_fw_error(adap
);
4081 v
= be32_to_cpu(c
.err_to_clearinit
);
4082 master_mbox
= FW_HELLO_CMD_MBMASTER_G(v
);
4084 if (v
& FW_HELLO_CMD_ERR_F
)
4085 *state
= DEV_STATE_ERR
;
4086 else if (v
& FW_HELLO_CMD_INIT_F
)
4087 *state
= DEV_STATE_INIT
;
4089 *state
= DEV_STATE_UNINIT
;
4093 * If we're not the Master PF then we need to wait around for the
4094 * Master PF Driver to finish setting up the adapter.
4096 * Note that we also do this wait if we're a non-Master-capable PF and
4097 * there is no current Master PF; a Master PF may show up momentarily
4098 * and we wouldn't want to fail pointlessly. (This can happen when an
4099 * OS loads lots of different drivers rapidly at the same time). In
4100 * this case, the Master PF returned by the firmware will be
4101 * PCIE_FW_MASTER_M so the test below will work ...
4103 if ((v
& (FW_HELLO_CMD_ERR_F
|FW_HELLO_CMD_INIT_F
)) == 0 &&
4104 master_mbox
!= mbox
) {
4105 int waiting
= FW_CMD_HELLO_TIMEOUT
;
4108 * Wait for the firmware to either indicate an error or
4109 * initialized state. If we see either of these we bail out
4110 * and report the issue to the caller. If we exhaust the
4111 * "hello timeout" and we haven't exhausted our retries, try
4112 * again. Otherwise bail with a timeout error.
4121 * If neither Error nor Initialialized are indicated
4122 * by the firmware keep waiting till we exaust our
4123 * timeout ... and then retry if we haven't exhausted
4126 pcie_fw
= t4_read_reg(adap
, PCIE_FW_A
);
4127 if (!(pcie_fw
& (PCIE_FW_ERR_F
|PCIE_FW_INIT_F
))) {
4138 * We either have an Error or Initialized condition
4139 * report errors preferentially.
4142 if (pcie_fw
& PCIE_FW_ERR_F
)
4143 *state
= DEV_STATE_ERR
;
4144 else if (pcie_fw
& PCIE_FW_INIT_F
)
4145 *state
= DEV_STATE_INIT
;
4149 * If we arrived before a Master PF was selected and
4150 * there's not a valid Master PF, grab its identity
4153 if (master_mbox
== PCIE_FW_MASTER_M
&&
4154 (pcie_fw
& PCIE_FW_MASTER_VLD_F
))
4155 master_mbox
= PCIE_FW_MASTER_G(pcie_fw
);
4164 * t4_fw_bye - end communication with FW
4165 * @adap: the adapter
4166 * @mbox: mailbox to use for the FW command
4168 * Issues a command to terminate communication with FW.
4170 int t4_fw_bye(struct adapter
*adap
, unsigned int mbox
)
4172 struct fw_bye_cmd c
;
4174 memset(&c
, 0, sizeof(c
));
4175 INIT_CMD(c
, BYE
, WRITE
);
4176 return t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), NULL
);
4180 * t4_init_cmd - ask FW to initialize the device
4181 * @adap: the adapter
4182 * @mbox: mailbox to use for the FW command
4184 * Issues a command to FW to partially initialize the device. This
4185 * performs initialization that generally doesn't depend on user input.
4187 int t4_early_init(struct adapter
*adap
, unsigned int mbox
)
4189 struct fw_initialize_cmd c
;
4191 memset(&c
, 0, sizeof(c
));
4192 INIT_CMD(c
, INITIALIZE
, WRITE
);
4193 return t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), NULL
);
4197 * t4_fw_reset - issue a reset to FW
4198 * @adap: the adapter
4199 * @mbox: mailbox to use for the FW command
4200 * @reset: specifies the type of reset to perform
4202 * Issues a reset command of the specified type to FW.
4204 int t4_fw_reset(struct adapter
*adap
, unsigned int mbox
, int reset
)
4206 struct fw_reset_cmd c
;
4208 memset(&c
, 0, sizeof(c
));
4209 INIT_CMD(c
, RESET
, WRITE
);
4210 c
.val
= cpu_to_be32(reset
);
4211 return t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), NULL
);
4215 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
4216 * @adap: the adapter
4217 * @mbox: mailbox to use for the FW RESET command (if desired)
4218 * @force: force uP into RESET even if FW RESET command fails
4220 * Issues a RESET command to firmware (if desired) with a HALT indication
4221 * and then puts the microprocessor into RESET state. The RESET command
4222 * will only be issued if a legitimate mailbox is provided (mbox <=
4223 * PCIE_FW_MASTER_M).
4225 * This is generally used in order for the host to safely manipulate the
4226 * adapter without fear of conflicting with whatever the firmware might
4227 * be doing. The only way out of this state is to RESTART the firmware
4230 static int t4_fw_halt(struct adapter
*adap
, unsigned int mbox
, int force
)
4235 * If a legitimate mailbox is provided, issue a RESET command
4236 * with a HALT indication.
4238 if (mbox
<= PCIE_FW_MASTER_M
) {
4239 struct fw_reset_cmd c
;
4241 memset(&c
, 0, sizeof(c
));
4242 INIT_CMD(c
, RESET
, WRITE
);
4243 c
.val
= cpu_to_be32(PIORST_F
| PIORSTMODE_F
);
4244 c
.halt_pkd
= cpu_to_be32(FW_RESET_CMD_HALT_F
);
4245 ret
= t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), NULL
);
4249 * Normally we won't complete the operation if the firmware RESET
4250 * command fails but if our caller insists we'll go ahead and put the
4251 * uP into RESET. This can be useful if the firmware is hung or even
4252 * missing ... We'll have to take the risk of putting the uP into
4253 * RESET without the cooperation of firmware in that case.
4255 * We also force the firmware's HALT flag to be on in case we bypassed
4256 * the firmware RESET command above or we're dealing with old firmware
4257 * which doesn't have the HALT capability. This will serve as a flag
4258 * for the incoming firmware to know that it's coming out of a HALT
4259 * rather than a RESET ... if it's new enough to understand that ...
4261 if (ret
== 0 || force
) {
4262 t4_set_reg_field(adap
, CIM_BOOT_CFG_A
, UPCRST_F
, UPCRST_F
);
4263 t4_set_reg_field(adap
, PCIE_FW_A
, PCIE_FW_HALT_F
,
4268 * And we always return the result of the firmware RESET command
4269 * even when we force the uP into RESET ...
4275 * t4_fw_restart - restart the firmware by taking the uP out of RESET
4276 * @adap: the adapter
4277 * @reset: if we want to do a RESET to restart things
4279 * Restart firmware previously halted by t4_fw_halt(). On successful
4280 * return the previous PF Master remains as the new PF Master and there
4281 * is no need to issue a new HELLO command, etc.
4283 * We do this in two ways:
4285 * 1. If we're dealing with newer firmware we'll simply want to take
4286 * the chip's microprocessor out of RESET. This will cause the
4287 * firmware to start up from its start vector. And then we'll loop
4288 * until the firmware indicates it's started again (PCIE_FW.HALT
4289 * reset to 0) or we timeout.
4291 * 2. If we're dealing with older firmware then we'll need to RESET
4292 * the chip since older firmware won't recognize the PCIE_FW.HALT
4293 * flag and automatically RESET itself on startup.
4295 static int t4_fw_restart(struct adapter
*adap
, unsigned int mbox
, int reset
)
4299 * Since we're directing the RESET instead of the firmware
4300 * doing it automatically, we need to clear the PCIE_FW.HALT
4303 t4_set_reg_field(adap
, PCIE_FW_A
, PCIE_FW_HALT_F
, 0);
4306 * If we've been given a valid mailbox, first try to get the
4307 * firmware to do the RESET. If that works, great and we can
4308 * return success. Otherwise, if we haven't been given a
4309 * valid mailbox or the RESET command failed, fall back to
4310 * hitting the chip with a hammer.
4312 if (mbox
<= PCIE_FW_MASTER_M
) {
4313 t4_set_reg_field(adap
, CIM_BOOT_CFG_A
, UPCRST_F
, 0);
4315 if (t4_fw_reset(adap
, mbox
,
4316 PIORST_F
| PIORSTMODE_F
) == 0)
4320 t4_write_reg(adap
, PL_RST_A
, PIORST_F
| PIORSTMODE_F
);
4325 t4_set_reg_field(adap
, CIM_BOOT_CFG_A
, UPCRST_F
, 0);
4326 for (ms
= 0; ms
< FW_CMD_MAX_TIMEOUT
; ) {
4327 if (!(t4_read_reg(adap
, PCIE_FW_A
) & PCIE_FW_HALT_F
))
4338 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
4339 * @adap: the adapter
4340 * @mbox: mailbox to use for the FW RESET command (if desired)
4341 * @fw_data: the firmware image to write
4343 * @force: force upgrade even if firmware doesn't cooperate
4345 * Perform all of the steps necessary for upgrading an adapter's
4346 * firmware image. Normally this requires the cooperation of the
4347 * existing firmware in order to halt all existing activities
4348 * but if an invalid mailbox token is passed in we skip that step
4349 * (though we'll still put the adapter microprocessor into RESET in
4352 * On successful return the new firmware will have been loaded and
4353 * the adapter will have been fully RESET losing all previous setup
4354 * state. On unsuccessful return the adapter may be completely hosed ...
4355 * positive errno indicates that the adapter is ~probably~ intact, a
4356 * negative errno indicates that things are looking bad ...
4358 int t4_fw_upgrade(struct adapter
*adap
, unsigned int mbox
,
4359 const u8
*fw_data
, unsigned int size
, int force
)
4361 const struct fw_hdr
*fw_hdr
= (const struct fw_hdr
*)fw_data
;
4364 if (!t4_fw_matches_chip(adap
, fw_hdr
))
4367 ret
= t4_fw_halt(adap
, mbox
, force
);
4368 if (ret
< 0 && !force
)
4371 ret
= t4_load_fw(adap
, fw_data
, size
);
4376 * Older versions of the firmware don't understand the new
4377 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
4378 * restart. So for newly loaded older firmware we'll have to do the
4379 * RESET for it so it starts up on a clean slate. We can tell if
4380 * the newly loaded firmware will handle this right by checking
4381 * its header flags to see if it advertises the capability.
4383 reset
= ((be32_to_cpu(fw_hdr
->flags
) & FW_HDR_FLAGS_RESET_HALT
) == 0);
4384 return t4_fw_restart(adap
, mbox
, reset
);
4388 * t4_fixup_host_params - fix up host-dependent parameters
4389 * @adap: the adapter
4390 * @page_size: the host's Base Page Size
4391 * @cache_line_size: the host's Cache Line Size
4393 * Various registers in T4 contain values which are dependent on the
4394 * host's Base Page and Cache Line Sizes. This function will fix all of
4395 * those registers with the appropriate values as passed in ...
4397 int t4_fixup_host_params(struct adapter
*adap
, unsigned int page_size
,
4398 unsigned int cache_line_size
)
4400 unsigned int page_shift
= fls(page_size
) - 1;
4401 unsigned int sge_hps
= page_shift
- 10;
4402 unsigned int stat_len
= cache_line_size
> 64 ? 128 : 64;
4403 unsigned int fl_align
= cache_line_size
< 32 ? 32 : cache_line_size
;
4404 unsigned int fl_align_log
= fls(fl_align
) - 1;
4406 t4_write_reg(adap
, SGE_HOST_PAGE_SIZE_A
,
4407 HOSTPAGESIZEPF0_V(sge_hps
) |
4408 HOSTPAGESIZEPF1_V(sge_hps
) |
4409 HOSTPAGESIZEPF2_V(sge_hps
) |
4410 HOSTPAGESIZEPF3_V(sge_hps
) |
4411 HOSTPAGESIZEPF4_V(sge_hps
) |
4412 HOSTPAGESIZEPF5_V(sge_hps
) |
4413 HOSTPAGESIZEPF6_V(sge_hps
) |
4414 HOSTPAGESIZEPF7_V(sge_hps
));
4416 if (is_t4(adap
->params
.chip
)) {
4417 t4_set_reg_field(adap
, SGE_CONTROL_A
,
4418 INGPADBOUNDARY_V(INGPADBOUNDARY_M
) |
4419 EGRSTATUSPAGESIZE_F
,
4420 INGPADBOUNDARY_V(fl_align_log
-
4421 INGPADBOUNDARY_SHIFT_X
) |
4422 EGRSTATUSPAGESIZE_V(stat_len
!= 64));
4424 /* T5 introduced the separation of the Free List Padding and
4425 * Packing Boundaries. Thus, we can select a smaller Padding
4426 * Boundary to avoid uselessly chewing up PCIe Link and Memory
4427 * Bandwidth, and use a Packing Boundary which is large enough
4428 * to avoid false sharing between CPUs, etc.
4430 * For the PCI Link, the smaller the Padding Boundary the
4431 * better. For the Memory Controller, a smaller Padding
4432 * Boundary is better until we cross under the Memory Line
4433 * Size (the minimum unit of transfer to/from Memory). If we
4434 * have a Padding Boundary which is smaller than the Memory
4435 * Line Size, that'll involve a Read-Modify-Write cycle on the
4436 * Memory Controller which is never good. For T5 the smallest
4437 * Padding Boundary which we can select is 32 bytes which is
4438 * larger than any known Memory Controller Line Size so we'll
4441 * T5 has a different interpretation of the "0" value for the
4442 * Packing Boundary. This corresponds to 16 bytes instead of
4443 * the expected 32 bytes. We never have a Packing Boundary
4444 * less than 32 bytes so we can't use that special value but
4445 * on the other hand, if we wanted 32 bytes, the best we can
4446 * really do is 64 bytes.
4448 if (fl_align
<= 32) {
4452 t4_set_reg_field(adap
, SGE_CONTROL_A
,
4453 INGPADBOUNDARY_V(INGPADBOUNDARY_M
) |
4454 EGRSTATUSPAGESIZE_F
,
4455 INGPADBOUNDARY_V(INGPCIEBOUNDARY_32B_X
) |
4456 EGRSTATUSPAGESIZE_V(stat_len
!= 64));
4457 t4_set_reg_field(adap
, SGE_CONTROL2_A
,
4458 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M
),
4459 INGPACKBOUNDARY_V(fl_align_log
-
4460 INGPACKBOUNDARY_SHIFT_X
));
4463 * Adjust various SGE Free List Host Buffer Sizes.
4465 * This is something of a crock since we're using fixed indices into
4466 * the array which are also known by the sge.c code and the T4
4467 * Firmware Configuration File. We need to come up with a much better
4468 * approach to managing this array. For now, the first four entries
4473 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
4474 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
4476 * For the single-MTU buffers in unpacked mode we need to include
4477 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
4478 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
4479 * Padding boundary. All of these are accommodated in the Factory
4480 * Default Firmware Configuration File but we need to adjust it for
4481 * this host's cache line size.
4483 t4_write_reg(adap
, SGE_FL_BUFFER_SIZE0_A
, page_size
);
4484 t4_write_reg(adap
, SGE_FL_BUFFER_SIZE2_A
,
4485 (t4_read_reg(adap
, SGE_FL_BUFFER_SIZE2_A
) + fl_align
-1)
4487 t4_write_reg(adap
, SGE_FL_BUFFER_SIZE3_A
,
4488 (t4_read_reg(adap
, SGE_FL_BUFFER_SIZE3_A
) + fl_align
-1)
4491 t4_write_reg(adap
, ULP_RX_TDDP_PSZ_A
, HPZ0_V(page_shift
- 12));
4497 * t4_fw_initialize - ask FW to initialize the device
4498 * @adap: the adapter
4499 * @mbox: mailbox to use for the FW command
4501 * Issues a command to FW to partially initialize the device. This
4502 * performs initialization that generally doesn't depend on user input.
4504 int t4_fw_initialize(struct adapter
*adap
, unsigned int mbox
)
4506 struct fw_initialize_cmd c
;
4508 memset(&c
, 0, sizeof(c
));
4509 INIT_CMD(c
, INITIALIZE
, WRITE
);
4510 return t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), NULL
);
4514 * t4_query_params_rw - query FW or device parameters
4515 * @adap: the adapter
4516 * @mbox: mailbox to use for the FW command
4519 * @nparams: the number of parameters
4520 * @params: the parameter names
4521 * @val: the parameter values
4522 * @rw: Write and read flag
4524 * Reads the value of FW or device parameters. Up to 7 parameters can be
4527 int t4_query_params_rw(struct adapter
*adap
, unsigned int mbox
, unsigned int pf
,
4528 unsigned int vf
, unsigned int nparams
, const u32
*params
,
4532 struct fw_params_cmd c
;
4533 __be32
*p
= &c
.param
[0].mnem
;
4538 memset(&c
, 0, sizeof(c
));
4539 c
.op_to_vfn
= cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD
) |
4540 FW_CMD_REQUEST_F
| FW_CMD_READ_F
|
4541 FW_PARAMS_CMD_PFN_V(pf
) |
4542 FW_PARAMS_CMD_VFN_V(vf
));
4543 c
.retval_len16
= cpu_to_be32(FW_LEN16(c
));
4545 for (i
= 0; i
< nparams
; i
++) {
4546 *p
++ = cpu_to_be32(*params
++);
4548 *p
= cpu_to_be32(*(val
+ i
));
4552 ret
= t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), &c
);
4554 for (i
= 0, p
= &c
.param
[0].val
; i
< nparams
; i
++, p
+= 2)
4555 *val
++ = be32_to_cpu(*p
);
4559 int t4_query_params(struct adapter
*adap
, unsigned int mbox
, unsigned int pf
,
4560 unsigned int vf
, unsigned int nparams
, const u32
*params
,
4563 return t4_query_params_rw(adap
, mbox
, pf
, vf
, nparams
, params
, val
, 0);
4567 * t4_set_params_timeout - sets FW or device parameters
4568 * @adap: the adapter
4569 * @mbox: mailbox to use for the FW command
4572 * @nparams: the number of parameters
4573 * @params: the parameter names
4574 * @val: the parameter values
4575 * @timeout: the timeout time
4577 * Sets the value of FW or device parameters. Up to 7 parameters can be
4578 * specified at once.
4580 int t4_set_params_timeout(struct adapter
*adap
, unsigned int mbox
,
4581 unsigned int pf
, unsigned int vf
,
4582 unsigned int nparams
, const u32
*params
,
4583 const u32
*val
, int timeout
)
4585 struct fw_params_cmd c
;
4586 __be32
*p
= &c
.param
[0].mnem
;
4591 memset(&c
, 0, sizeof(c
));
4592 c
.op_to_vfn
= cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD
) |
4593 FW_CMD_REQUEST_F
| FW_CMD_WRITE_F
|
4594 FW_PARAMS_CMD_PFN_V(pf
) |
4595 FW_PARAMS_CMD_VFN_V(vf
));
4596 c
.retval_len16
= cpu_to_be32(FW_LEN16(c
));
4599 *p
++ = cpu_to_be32(*params
++);
4600 *p
++ = cpu_to_be32(*val
++);
4603 return t4_wr_mbox_timeout(adap
, mbox
, &c
, sizeof(c
), NULL
, timeout
);
4607 * t4_set_params - sets FW or device parameters
4608 * @adap: the adapter
4609 * @mbox: mailbox to use for the FW command
4612 * @nparams: the number of parameters
4613 * @params: the parameter names
4614 * @val: the parameter values
4616 * Sets the value of FW or device parameters. Up to 7 parameters can be
4617 * specified at once.
4619 int t4_set_params(struct adapter
*adap
, unsigned int mbox
, unsigned int pf
,
4620 unsigned int vf
, unsigned int nparams
, const u32
*params
,
4623 return t4_set_params_timeout(adap
, mbox
, pf
, vf
, nparams
, params
, val
,
4624 FW_CMD_MAX_TIMEOUT
);
4628 * t4_cfg_pfvf - configure PF/VF resource limits
4629 * @adap: the adapter
4630 * @mbox: mailbox to use for the FW command
4631 * @pf: the PF being configured
4632 * @vf: the VF being configured
4633 * @txq: the max number of egress queues
4634 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
4635 * @rxqi: the max number of interrupt-capable ingress queues
4636 * @rxq: the max number of interruptless ingress queues
4637 * @tc: the PCI traffic class
4638 * @vi: the max number of virtual interfaces
4639 * @cmask: the channel access rights mask for the PF/VF
4640 * @pmask: the port access rights mask for the PF/VF
4641 * @nexact: the maximum number of exact MPS filters
4642 * @rcaps: read capabilities
4643 * @wxcaps: write/execute capabilities
4645 * Configures resource limits and capabilities for a physical or virtual
4648 int t4_cfg_pfvf(struct adapter
*adap
, unsigned int mbox
, unsigned int pf
,
4649 unsigned int vf
, unsigned int txq
, unsigned int txq_eth_ctrl
,
4650 unsigned int rxqi
, unsigned int rxq
, unsigned int tc
,
4651 unsigned int vi
, unsigned int cmask
, unsigned int pmask
,
4652 unsigned int nexact
, unsigned int rcaps
, unsigned int wxcaps
)
4654 struct fw_pfvf_cmd c
;
4656 memset(&c
, 0, sizeof(c
));
4657 c
.op_to_vfn
= cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD
) | FW_CMD_REQUEST_F
|
4658 FW_CMD_WRITE_F
| FW_PFVF_CMD_PFN_V(pf
) |
4659 FW_PFVF_CMD_VFN_V(vf
));
4660 c
.retval_len16
= cpu_to_be32(FW_LEN16(c
));
4661 c
.niqflint_niq
= cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi
) |
4662 FW_PFVF_CMD_NIQ_V(rxq
));
4663 c
.type_to_neq
= cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask
) |
4664 FW_PFVF_CMD_PMASK_V(pmask
) |
4665 FW_PFVF_CMD_NEQ_V(txq
));
4666 c
.tc_to_nexactf
= cpu_to_be32(FW_PFVF_CMD_TC_V(tc
) |
4667 FW_PFVF_CMD_NVI_V(vi
) |
4668 FW_PFVF_CMD_NEXACTF_V(nexact
));
4669 c
.r_caps_to_nethctrl
= cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps
) |
4670 FW_PFVF_CMD_WX_CAPS_V(wxcaps
) |
4671 FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl
));
4672 return t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), NULL
);
4676 * t4_alloc_vi - allocate a virtual interface
4677 * @adap: the adapter
4678 * @mbox: mailbox to use for the FW command
4679 * @port: physical port associated with the VI
4680 * @pf: the PF owning the VI
4681 * @vf: the VF owning the VI
4682 * @nmac: number of MAC addresses needed (1 to 5)
4683 * @mac: the MAC addresses of the VI
4684 * @rss_size: size of RSS table slice associated with this VI
4686 * Allocates a virtual interface for the given physical port. If @mac is
4687 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
4688 * @mac should be large enough to hold @nmac Ethernet addresses, they are
4689 * stored consecutively so the space needed is @nmac * 6 bytes.
4690 * Returns a negative error number or the non-negative VI id.
4692 int t4_alloc_vi(struct adapter
*adap
, unsigned int mbox
, unsigned int port
,
4693 unsigned int pf
, unsigned int vf
, unsigned int nmac
, u8
*mac
,
4694 unsigned int *rss_size
)
4699 memset(&c
, 0, sizeof(c
));
4700 c
.op_to_vfn
= cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD
) | FW_CMD_REQUEST_F
|
4701 FW_CMD_WRITE_F
| FW_CMD_EXEC_F
|
4702 FW_VI_CMD_PFN_V(pf
) | FW_VI_CMD_VFN_V(vf
));
4703 c
.alloc_to_len16
= cpu_to_be32(FW_VI_CMD_ALLOC_F
| FW_LEN16(c
));
4704 c
.portid_pkd
= FW_VI_CMD_PORTID_V(port
);
4707 ret
= t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), &c
);
4712 memcpy(mac
, c
.mac
, sizeof(c
.mac
));
4715 memcpy(mac
+ 24, c
.nmac3
, sizeof(c
.nmac3
));
4717 memcpy(mac
+ 18, c
.nmac2
, sizeof(c
.nmac2
));
4719 memcpy(mac
+ 12, c
.nmac1
, sizeof(c
.nmac1
));
4721 memcpy(mac
+ 6, c
.nmac0
, sizeof(c
.nmac0
));
4725 *rss_size
= FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c
.rsssize_pkd
));
4726 return FW_VI_CMD_VIID_G(be16_to_cpu(c
.type_viid
));
4730 * t4_set_rxmode - set Rx properties of a virtual interface
4731 * @adap: the adapter
4732 * @mbox: mailbox to use for the FW command
4734 * @mtu: the new MTU or -1
4735 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
4736 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
4737 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
4738 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
4739 * @sleep_ok: if true we may sleep while awaiting command completion
4741 * Sets Rx properties of a virtual interface.
4743 int t4_set_rxmode(struct adapter
*adap
, unsigned int mbox
, unsigned int viid
,
4744 int mtu
, int promisc
, int all_multi
, int bcast
, int vlanex
,
4747 struct fw_vi_rxmode_cmd c
;
4749 /* convert to FW values */
4751 mtu
= FW_RXMODE_MTU_NO_CHG
;
4753 promisc
= FW_VI_RXMODE_CMD_PROMISCEN_M
;
4755 all_multi
= FW_VI_RXMODE_CMD_ALLMULTIEN_M
;
4757 bcast
= FW_VI_RXMODE_CMD_BROADCASTEN_M
;
4759 vlanex
= FW_VI_RXMODE_CMD_VLANEXEN_M
;
4761 memset(&c
, 0, sizeof(c
));
4762 c
.op_to_viid
= cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD
) |
4763 FW_CMD_REQUEST_F
| FW_CMD_WRITE_F
|
4764 FW_VI_RXMODE_CMD_VIID_V(viid
));
4765 c
.retval_len16
= cpu_to_be32(FW_LEN16(c
));
4767 cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu
) |
4768 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc
) |
4769 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi
) |
4770 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast
) |
4771 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex
));
4772 return t4_wr_mbox_meat(adap
, mbox
, &c
, sizeof(c
), NULL
, sleep_ok
);
4776 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
4777 * @adap: the adapter
4778 * @mbox: mailbox to use for the FW command
4780 * @free: if true any existing filters for this VI id are first removed
4781 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
4782 * @addr: the MAC address(es)
4783 * @idx: where to store the index of each allocated filter
4784 * @hash: pointer to hash address filter bitmap
4785 * @sleep_ok: call is allowed to sleep
4787 * Allocates an exact-match filter for each of the supplied addresses and
4788 * sets it to the corresponding address. If @idx is not %NULL it should
4789 * have at least @naddr entries, each of which will be set to the index of
4790 * the filter allocated for the corresponding MAC address. If a filter
4791 * could not be allocated for an address its index is set to 0xffff.
4792 * If @hash is not %NULL addresses that fail to allocate an exact filter
4793 * are hashed and update the hash filter bitmap pointed at by @hash.
4795 * Returns a negative error number or the number of filters allocated.
4797 int t4_alloc_mac_filt(struct adapter
*adap
, unsigned int mbox
,
4798 unsigned int viid
, bool free
, unsigned int naddr
,
4799 const u8
**addr
, u16
*idx
, u64
*hash
, bool sleep_ok
)
4802 struct fw_vi_mac_cmd c
;
4803 struct fw_vi_mac_exact
*p
;
4804 unsigned int max_naddr
= is_t4(adap
->params
.chip
) ?
4805 NUM_MPS_CLS_SRAM_L_INSTANCES
:
4806 NUM_MPS_T5_CLS_SRAM_L_INSTANCES
;
4811 memset(&c
, 0, sizeof(c
));
4812 c
.op_to_viid
= cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD
) |
4813 FW_CMD_REQUEST_F
| FW_CMD_WRITE_F
|
4814 (free
? FW_CMD_EXEC_F
: 0) |
4815 FW_VI_MAC_CMD_VIID_V(viid
));
4816 c
.freemacs_to_len16
= cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free
) |
4817 FW_CMD_LEN16_V((naddr
+ 2) / 2));
4819 for (i
= 0, p
= c
.u
.exact
; i
< naddr
; i
++, p
++) {
4821 cpu_to_be16(FW_VI_MAC_CMD_VALID_F
|
4822 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC
));
4823 memcpy(p
->macaddr
, addr
[i
], sizeof(p
->macaddr
));
4826 ret
= t4_wr_mbox_meat(adap
, mbox
, &c
, sizeof(c
), &c
, sleep_ok
);
4830 for (i
= 0, p
= c
.u
.exact
; i
< naddr
; i
++, p
++) {
4831 u16 index
= FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p
->valid_to_idx
));
4834 idx
[i
] = index
>= max_naddr
? 0xffff : index
;
4835 if (index
< max_naddr
)
4838 *hash
|= (1ULL << hash_mac_addr(addr
[i
]));
4844 * t4_change_mac - modifies the exact-match filter for a MAC address
4845 * @adap: the adapter
4846 * @mbox: mailbox to use for the FW command
4848 * @idx: index of existing filter for old value of MAC address, or -1
4849 * @addr: the new MAC address value
4850 * @persist: whether a new MAC allocation should be persistent
4851 * @add_smt: if true also add the address to the HW SMT
4853 * Modifies an exact-match filter and sets it to the new MAC address.
4854 * Note that in general it is not possible to modify the value of a given
4855 * filter so the generic way to modify an address filter is to free the one
4856 * being used by the old address value and allocate a new filter for the
4857 * new address value. @idx can be -1 if the address is a new addition.
4859 * Returns a negative error number or the index of the filter with the new
4862 int t4_change_mac(struct adapter
*adap
, unsigned int mbox
, unsigned int viid
,
4863 int idx
, const u8
*addr
, bool persist
, bool add_smt
)
4866 struct fw_vi_mac_cmd c
;
4867 struct fw_vi_mac_exact
*p
= c
.u
.exact
;
4868 unsigned int max_mac_addr
= is_t4(adap
->params
.chip
) ?
4869 NUM_MPS_CLS_SRAM_L_INSTANCES
:
4870 NUM_MPS_T5_CLS_SRAM_L_INSTANCES
;
4872 if (idx
< 0) /* new allocation */
4873 idx
= persist
? FW_VI_MAC_ADD_PERSIST_MAC
: FW_VI_MAC_ADD_MAC
;
4874 mode
= add_smt
? FW_VI_MAC_SMT_AND_MPSTCAM
: FW_VI_MAC_MPS_TCAM_ENTRY
;
4876 memset(&c
, 0, sizeof(c
));
4877 c
.op_to_viid
= cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD
) |
4878 FW_CMD_REQUEST_F
| FW_CMD_WRITE_F
|
4879 FW_VI_MAC_CMD_VIID_V(viid
));
4880 c
.freemacs_to_len16
= cpu_to_be32(FW_CMD_LEN16_V(1));
4881 p
->valid_to_idx
= cpu_to_be16(FW_VI_MAC_CMD_VALID_F
|
4882 FW_VI_MAC_CMD_SMAC_RESULT_V(mode
) |
4883 FW_VI_MAC_CMD_IDX_V(idx
));
4884 memcpy(p
->macaddr
, addr
, sizeof(p
->macaddr
));
4886 ret
= t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), &c
);
4888 ret
= FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p
->valid_to_idx
));
4889 if (ret
>= max_mac_addr
)
4896 * t4_set_addr_hash - program the MAC inexact-match hash filter
4897 * @adap: the adapter
4898 * @mbox: mailbox to use for the FW command
4900 * @ucast: whether the hash filter should also match unicast addresses
4901 * @vec: the value to be written to the hash filter
4902 * @sleep_ok: call is allowed to sleep
4904 * Sets the 64-bit inexact-match hash filter for a virtual interface.
4906 int t4_set_addr_hash(struct adapter
*adap
, unsigned int mbox
, unsigned int viid
,
4907 bool ucast
, u64 vec
, bool sleep_ok
)
4909 struct fw_vi_mac_cmd c
;
4911 memset(&c
, 0, sizeof(c
));
4912 c
.op_to_viid
= cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD
) |
4913 FW_CMD_REQUEST_F
| FW_CMD_WRITE_F
|
4914 FW_VI_ENABLE_CMD_VIID_V(viid
));
4915 c
.freemacs_to_len16
= cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F
|
4916 FW_VI_MAC_CMD_HASHUNIEN_V(ucast
) |
4918 c
.u
.hash
.hashvec
= cpu_to_be64(vec
);
4919 return t4_wr_mbox_meat(adap
, mbox
, &c
, sizeof(c
), NULL
, sleep_ok
);
4923 * t4_enable_vi_params - enable/disable a virtual interface
4924 * @adap: the adapter
4925 * @mbox: mailbox to use for the FW command
4927 * @rx_en: 1=enable Rx, 0=disable Rx
4928 * @tx_en: 1=enable Tx, 0=disable Tx
4929 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
4931 * Enables/disables a virtual interface. Note that setting DCB Enable
4932 * only makes sense when enabling a Virtual Interface ...
4934 int t4_enable_vi_params(struct adapter
*adap
, unsigned int mbox
,
4935 unsigned int viid
, bool rx_en
, bool tx_en
, bool dcb_en
)
4937 struct fw_vi_enable_cmd c
;
4939 memset(&c
, 0, sizeof(c
));
4940 c
.op_to_viid
= cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD
) |
4941 FW_CMD_REQUEST_F
| FW_CMD_EXEC_F
|
4942 FW_VI_ENABLE_CMD_VIID_V(viid
));
4943 c
.ien_to_len16
= cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en
) |
4944 FW_VI_ENABLE_CMD_EEN_V(tx_en
) |
4945 FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en
) |
4947 return t4_wr_mbox_ns(adap
, mbox
, &c
, sizeof(c
), NULL
);
4951 * t4_enable_vi - enable/disable a virtual interface
4952 * @adap: the adapter
4953 * @mbox: mailbox to use for the FW command
4955 * @rx_en: 1=enable Rx, 0=disable Rx
4956 * @tx_en: 1=enable Tx, 0=disable Tx
4958 * Enables/disables a virtual interface.
4960 int t4_enable_vi(struct adapter
*adap
, unsigned int mbox
, unsigned int viid
,
4961 bool rx_en
, bool tx_en
)
4963 return t4_enable_vi_params(adap
, mbox
, viid
, rx_en
, tx_en
, 0);
4967 * t4_identify_port - identify a VI's port by blinking its LED
4968 * @adap: the adapter
4969 * @mbox: mailbox to use for the FW command
4971 * @nblinks: how many times to blink LED at 2.5 Hz
4973 * Identifies a VI's port by blinking its LED.
4975 int t4_identify_port(struct adapter
*adap
, unsigned int mbox
, unsigned int viid
,
4976 unsigned int nblinks
)
4978 struct fw_vi_enable_cmd c
;
4980 memset(&c
, 0, sizeof(c
));
4981 c
.op_to_viid
= cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD
) |
4982 FW_CMD_REQUEST_F
| FW_CMD_EXEC_F
|
4983 FW_VI_ENABLE_CMD_VIID_V(viid
));
4984 c
.ien_to_len16
= cpu_to_be32(FW_VI_ENABLE_CMD_LED_F
| FW_LEN16(c
));
4985 c
.blinkdur
= cpu_to_be16(nblinks
);
4986 return t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), NULL
);
4990 * t4_iq_free - free an ingress queue and its FLs
4991 * @adap: the adapter
4992 * @mbox: mailbox to use for the FW command
4993 * @pf: the PF owning the queues
4994 * @vf: the VF owning the queues
4995 * @iqtype: the ingress queue type
4996 * @iqid: ingress queue id
4997 * @fl0id: FL0 queue id or 0xffff if no attached FL0
4998 * @fl1id: FL1 queue id or 0xffff if no attached FL1
5000 * Frees an ingress queue and its associated FLs, if any.
5002 int t4_iq_free(struct adapter
*adap
, unsigned int mbox
, unsigned int pf
,
5003 unsigned int vf
, unsigned int iqtype
, unsigned int iqid
,
5004 unsigned int fl0id
, unsigned int fl1id
)
5008 memset(&c
, 0, sizeof(c
));
5009 c
.op_to_vfn
= cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD
) | FW_CMD_REQUEST_F
|
5010 FW_CMD_EXEC_F
| FW_IQ_CMD_PFN_V(pf
) |
5011 FW_IQ_CMD_VFN_V(vf
));
5012 c
.alloc_to_len16
= cpu_to_be32(FW_IQ_CMD_FREE_F
| FW_LEN16(c
));
5013 c
.type_to_iqandstindex
= cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype
));
5014 c
.iqid
= cpu_to_be16(iqid
);
5015 c
.fl0id
= cpu_to_be16(fl0id
);
5016 c
.fl1id
= cpu_to_be16(fl1id
);
5017 return t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), NULL
);
5021 * t4_eth_eq_free - free an Ethernet egress queue
5022 * @adap: the adapter
5023 * @mbox: mailbox to use for the FW command
5024 * @pf: the PF owning the queue
5025 * @vf: the VF owning the queue
5026 * @eqid: egress queue id
5028 * Frees an Ethernet egress queue.
5030 int t4_eth_eq_free(struct adapter
*adap
, unsigned int mbox
, unsigned int pf
,
5031 unsigned int vf
, unsigned int eqid
)
5033 struct fw_eq_eth_cmd c
;
5035 memset(&c
, 0, sizeof(c
));
5036 c
.op_to_vfn
= cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD
) |
5037 FW_CMD_REQUEST_F
| FW_CMD_EXEC_F
|
5038 FW_EQ_ETH_CMD_PFN_V(pf
) |
5039 FW_EQ_ETH_CMD_VFN_V(vf
));
5040 c
.alloc_to_len16
= cpu_to_be32(FW_EQ_ETH_CMD_FREE_F
| FW_LEN16(c
));
5041 c
.eqid_pkd
= cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid
));
5042 return t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), NULL
);
5046 * t4_ctrl_eq_free - free a control egress queue
5047 * @adap: the adapter
5048 * @mbox: mailbox to use for the FW command
5049 * @pf: the PF owning the queue
5050 * @vf: the VF owning the queue
5051 * @eqid: egress queue id
5053 * Frees a control egress queue.
5055 int t4_ctrl_eq_free(struct adapter
*adap
, unsigned int mbox
, unsigned int pf
,
5056 unsigned int vf
, unsigned int eqid
)
5058 struct fw_eq_ctrl_cmd c
;
5060 memset(&c
, 0, sizeof(c
));
5061 c
.op_to_vfn
= cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD
) |
5062 FW_CMD_REQUEST_F
| FW_CMD_EXEC_F
|
5063 FW_EQ_CTRL_CMD_PFN_V(pf
) |
5064 FW_EQ_CTRL_CMD_VFN_V(vf
));
5065 c
.alloc_to_len16
= cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F
| FW_LEN16(c
));
5066 c
.cmpliqid_eqid
= cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid
));
5067 return t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), NULL
);
5071 * t4_ofld_eq_free - free an offload egress queue
5072 * @adap: the adapter
5073 * @mbox: mailbox to use for the FW command
5074 * @pf: the PF owning the queue
5075 * @vf: the VF owning the queue
5076 * @eqid: egress queue id
5078 * Frees a control egress queue.
5080 int t4_ofld_eq_free(struct adapter
*adap
, unsigned int mbox
, unsigned int pf
,
5081 unsigned int vf
, unsigned int eqid
)
5083 struct fw_eq_ofld_cmd c
;
5085 memset(&c
, 0, sizeof(c
));
5086 c
.op_to_vfn
= cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD
) |
5087 FW_CMD_REQUEST_F
| FW_CMD_EXEC_F
|
5088 FW_EQ_OFLD_CMD_PFN_V(pf
) |
5089 FW_EQ_OFLD_CMD_VFN_V(vf
));
5090 c
.alloc_to_len16
= cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F
| FW_LEN16(c
));
5091 c
.eqid_pkd
= cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid
));
5092 return t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), NULL
);
5096 * t4_handle_fw_rpl - process a FW reply message
5097 * @adap: the adapter
5098 * @rpl: start of the FW message
5100 * Processes a FW message, such as link state change messages.
5102 int t4_handle_fw_rpl(struct adapter
*adap
, const __be64
*rpl
)
5104 u8 opcode
= *(const u8
*)rpl
;
5106 if (opcode
== FW_PORT_CMD
) { /* link/module state change message */
5107 int speed
= 0, fc
= 0;
5108 const struct fw_port_cmd
*p
= (void *)rpl
;
5109 int chan
= FW_PORT_CMD_PORTID_G(be32_to_cpu(p
->op_to_portid
));
5110 int port
= adap
->chan_map
[chan
];
5111 struct port_info
*pi
= adap2pinfo(adap
, port
);
5112 struct link_config
*lc
= &pi
->link_cfg
;
5113 u32 stat
= be32_to_cpu(p
->u
.info
.lstatus_to_modtype
);
5114 int link_ok
= (stat
& FW_PORT_CMD_LSTATUS_F
) != 0;
5115 u32 mod
= FW_PORT_CMD_MODTYPE_G(stat
);
5117 if (stat
& FW_PORT_CMD_RXPAUSE_F
)
5119 if (stat
& FW_PORT_CMD_TXPAUSE_F
)
5121 if (stat
& FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M
))
5123 else if (stat
& FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G
))
5125 else if (stat
& FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G
))
5127 else if (stat
& FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G
))
5130 if (link_ok
!= lc
->link_ok
|| speed
!= lc
->speed
||
5131 fc
!= lc
->fc
) { /* something changed */
5132 lc
->link_ok
= link_ok
;
5135 lc
->supported
= be16_to_cpu(p
->u
.info
.pcap
);
5136 t4_os_link_changed(adap
, port
, link_ok
);
5138 if (mod
!= pi
->mod_type
) {
5140 t4_os_portmod_changed(adap
, port
);
5146 static void get_pci_mode(struct adapter
*adapter
, struct pci_params
*p
)
5150 if (pci_is_pcie(adapter
->pdev
)) {
5151 pcie_capability_read_word(adapter
->pdev
, PCI_EXP_LNKSTA
, &val
);
5152 p
->speed
= val
& PCI_EXP_LNKSTA_CLS
;
5153 p
->width
= (val
& PCI_EXP_LNKSTA_NLW
) >> 4;
5158 * init_link_config - initialize a link's SW state
5159 * @lc: structure holding the link state
5160 * @caps: link capabilities
5162 * Initializes the SW state maintained for each link, including the link's
5163 * capabilities and default speed/flow-control/autonegotiation settings.
5165 static void init_link_config(struct link_config
*lc
, unsigned int caps
)
5167 lc
->supported
= caps
;
5168 lc
->requested_speed
= 0;
5170 lc
->requested_fc
= lc
->fc
= PAUSE_RX
| PAUSE_TX
;
5171 if (lc
->supported
& FW_PORT_CAP_ANEG
) {
5172 lc
->advertising
= lc
->supported
& ADVERT_MASK
;
5173 lc
->autoneg
= AUTONEG_ENABLE
;
5174 lc
->requested_fc
|= PAUSE_AUTONEG
;
5176 lc
->advertising
= 0;
5177 lc
->autoneg
= AUTONEG_DISABLE
;
5181 #define CIM_PF_NOACCESS 0xeeeeeeee
5183 int t4_wait_dev_ready(void __iomem
*regs
)
5187 whoami
= readl(regs
+ PL_WHOAMI_A
);
5188 if (whoami
!= 0xffffffff && whoami
!= CIM_PF_NOACCESS
)
5192 whoami
= readl(regs
+ PL_WHOAMI_A
);
5193 return (whoami
!= 0xffffffff && whoami
!= CIM_PF_NOACCESS
? 0 : -EIO
);
5197 u32 vendor_and_model_id
;
5201 static int get_flash_params(struct adapter
*adap
)
5203 /* Table for non-Numonix supported flash parts. Numonix parts are left
5204 * to the preexisting code. All flash parts have 64KB sectors.
5206 static struct flash_desc supported_flash
[] = {
5207 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
5213 ret
= sf1_write(adap
, 1, 1, 0, SF_RD_ID
);
5215 ret
= sf1_read(adap
, 3, 0, 1, &info
);
5216 t4_write_reg(adap
, SF_OP_A
, 0); /* unlock SF */
5220 for (ret
= 0; ret
< ARRAY_SIZE(supported_flash
); ++ret
)
5221 if (supported_flash
[ret
].vendor_and_model_id
== info
) {
5222 adap
->params
.sf_size
= supported_flash
[ret
].size_mb
;
5223 adap
->params
.sf_nsec
=
5224 adap
->params
.sf_size
/ SF_SEC_SIZE
;
5228 if ((info
& 0xff) != 0x20) /* not a Numonix flash */
5230 info
>>= 16; /* log2 of size */
5231 if (info
>= 0x14 && info
< 0x18)
5232 adap
->params
.sf_nsec
= 1 << (info
- 16);
5233 else if (info
== 0x18)
5234 adap
->params
.sf_nsec
= 64;
5237 adap
->params
.sf_size
= 1 << info
;
5238 adap
->params
.sf_fw_start
=
5239 t4_read_reg(adap
, CIM_BOOT_CFG_A
) & BOOTADDR_M
;
5241 if (adap
->params
.sf_size
< FLASH_MIN_SIZE
)
5242 dev_warn(adap
->pdev_dev
, "WARNING!!! FLASH size %#x < %#x!!!\n",
5243 adap
->params
.sf_size
, FLASH_MIN_SIZE
);
5248 * t4_prep_adapter - prepare SW and HW for operation
5249 * @adapter: the adapter
5250 * @reset: if true perform a HW reset
5252 * Initialize adapter SW state for the various HW modules, set initial
5253 * values for some adapter tunables, take PHYs out of reset, and
5254 * initialize the MDIO interface.
5256 int t4_prep_adapter(struct adapter
*adapter
)
5262 get_pci_mode(adapter
, &adapter
->params
.pci
);
5263 pl_rev
= REV_G(t4_read_reg(adapter
, PL_REV_A
));
5265 ret
= get_flash_params(adapter
);
5267 dev_err(adapter
->pdev_dev
, "error %d identifying flash\n", ret
);
5271 /* Retrieve adapter's device ID
5273 pci_read_config_word(adapter
->pdev
, PCI_DEVICE_ID
, &device_id
);
5274 ver
= device_id
>> 12;
5275 adapter
->params
.chip
= 0;
5278 adapter
->params
.chip
|= CHELSIO_CHIP_CODE(CHELSIO_T4
, pl_rev
);
5281 adapter
->params
.chip
|= CHELSIO_CHIP_CODE(CHELSIO_T5
, pl_rev
);
5284 dev_err(adapter
->pdev_dev
, "Device %d is not supported\n",
5289 adapter
->params
.cim_la_size
= CIMLA_SIZE
;
5290 init_cong_ctrl(adapter
->params
.a_wnd
, adapter
->params
.b_wnd
);
5293 * Default port for debugging in case we can't reach FW.
5295 adapter
->params
.nports
= 1;
5296 adapter
->params
.portvec
= 1;
5297 adapter
->params
.vpd
.cclk
= 50000;
5302 * cxgb4_t4_bar2_sge_qregs - return BAR2 SGE Queue register information
5303 * @adapter: the adapter
5304 * @qid: the Queue ID
5305 * @qtype: the Ingress or Egress type for @qid
5306 * @pbar2_qoffset: BAR2 Queue Offset
5307 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
5309 * Returns the BAR2 SGE Queue Registers information associated with the
5310 * indicated Absolute Queue ID. These are passed back in return value
5311 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
5312 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
5314 * This may return an error which indicates that BAR2 SGE Queue
5315 * registers aren't available. If an error is not returned, then the
5316 * following values are returned:
5318 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
5319 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
5321 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
5322 * require the "Inferred Queue ID" ability may be used. E.g. the
5323 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
5324 * then these "Inferred Queue ID" register may not be used.
5326 int cxgb4_t4_bar2_sge_qregs(struct adapter
*adapter
,
5328 enum t4_bar2_qtype qtype
,
5330 unsigned int *pbar2_qid
)
5332 unsigned int page_shift
, page_size
, qpp_shift
, qpp_mask
;
5333 u64 bar2_page_offset
, bar2_qoffset
;
5334 unsigned int bar2_qid
, bar2_qid_offset
, bar2_qinferred
;
5336 /* T4 doesn't support BAR2 SGE Queue registers.
5338 if (is_t4(adapter
->params
.chip
))
5341 /* Get our SGE Page Size parameters.
5343 page_shift
= adapter
->params
.sge
.hps
+ 10;
5344 page_size
= 1 << page_shift
;
5346 /* Get the right Queues per Page parameters for our Queue.
5348 qpp_shift
= (qtype
== T4_BAR2_QTYPE_EGRESS
5349 ? adapter
->params
.sge
.eq_qpp
5350 : adapter
->params
.sge
.iq_qpp
);
5351 qpp_mask
= (1 << qpp_shift
) - 1;
5353 /* Calculate the basics of the BAR2 SGE Queue register area:
5354 * o The BAR2 page the Queue registers will be in.
5355 * o The BAR2 Queue ID.
5356 * o The BAR2 Queue ID Offset into the BAR2 page.
5358 bar2_page_offset
= ((qid
>> qpp_shift
) << page_shift
);
5359 bar2_qid
= qid
& qpp_mask
;
5360 bar2_qid_offset
= bar2_qid
* SGE_UDB_SIZE
;
5362 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
5363 * hardware will infer the Absolute Queue ID simply from the writes to
5364 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
5365 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
5366 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
5367 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
5368 * from the BAR2 Page and BAR2 Queue ID.
5370 * One important censequence of this is that some BAR2 SGE registers
5371 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
5372 * there. But other registers synthesize the SGE Queue ID purely
5373 * from the writes to the registers -- the Write Combined Doorbell
5374 * Buffer is a good example. These BAR2 SGE Registers are only
5375 * available for those BAR2 SGE Register areas where the SGE Absolute
5376 * Queue ID can be inferred from simple writes.
5378 bar2_qoffset
= bar2_page_offset
;
5379 bar2_qinferred
= (bar2_qid_offset
< page_size
);
5380 if (bar2_qinferred
) {
5381 bar2_qoffset
+= bar2_qid_offset
;
5385 *pbar2_qoffset
= bar2_qoffset
;
5386 *pbar2_qid
= bar2_qid
;
5391 * t4_init_devlog_params - initialize adapter->params.devlog
5392 * @adap: the adapter
5394 * Initialize various fields of the adapter's Firmware Device Log
5395 * Parameters structure.
5397 int t4_init_devlog_params(struct adapter
*adap
)
5399 struct devlog_params
*dparams
= &adap
->params
.devlog
;
5401 unsigned int devlog_meminfo
;
5402 struct fw_devlog_cmd devlog_cmd
;
5405 /* If we're dealing with newer firmware, the Device Log Paramerters
5406 * are stored in a designated register which allows us to access the
5407 * Device Log even if we can't talk to the firmware.
5410 t4_read_reg(adap
, PCIE_FW_REG(PCIE_FW_PF_A
, PCIE_FW_PF_DEVLOG
));
5412 unsigned int nentries
, nentries128
;
5414 dparams
->memtype
= PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams
);
5415 dparams
->start
= PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams
) << 4;
5417 nentries128
= PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams
);
5418 nentries
= (nentries128
+ 1) * 128;
5419 dparams
->size
= nentries
* sizeof(struct fw_devlog_e
);
5424 /* Otherwise, ask the firmware for it's Device Log Parameters.
5426 memset(&devlog_cmd
, 0, sizeof(devlog_cmd
));
5427 devlog_cmd
.op_to_write
= cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD
) |
5428 FW_CMD_REQUEST_F
| FW_CMD_READ_F
);
5429 devlog_cmd
.retval_len16
= cpu_to_be32(FW_LEN16(devlog_cmd
));
5430 ret
= t4_wr_mbox(adap
, adap
->mbox
, &devlog_cmd
, sizeof(devlog_cmd
),
5436 be32_to_cpu(devlog_cmd
.memtype_devlog_memaddr16_devlog
);
5437 dparams
->memtype
= FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo
);
5438 dparams
->start
= FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo
) << 4;
5439 dparams
->size
= be32_to_cpu(devlog_cmd
.memsize_devlog
);
5445 * t4_init_sge_params - initialize adap->params.sge
5446 * @adapter: the adapter
5448 * Initialize various fields of the adapter's SGE Parameters structure.
5450 int t4_init_sge_params(struct adapter
*adapter
)
5452 struct sge_params
*sge_params
= &adapter
->params
.sge
;
5454 unsigned int s_hps
, s_qpp
;
5456 /* Extract the SGE Page Size for our PF.
5458 hps
= t4_read_reg(adapter
, SGE_HOST_PAGE_SIZE_A
);
5459 s_hps
= (HOSTPAGESIZEPF0_S
+
5460 (HOSTPAGESIZEPF1_S
- HOSTPAGESIZEPF0_S
) * adapter
->fn
);
5461 sge_params
->hps
= ((hps
>> s_hps
) & HOSTPAGESIZEPF0_M
);
5463 /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
5465 s_qpp
= (QUEUESPERPAGEPF0_S
+
5466 (QUEUESPERPAGEPF1_S
- QUEUESPERPAGEPF0_S
) * adapter
->fn
);
5467 qpp
= t4_read_reg(adapter
, SGE_EGRESS_QUEUES_PER_PAGE_PF_A
);
5468 sge_params
->eq_qpp
= ((qpp
>> s_qpp
) & QUEUESPERPAGEPF0_M
);
5469 qpp
= t4_read_reg(adapter
, SGE_INGRESS_QUEUES_PER_PAGE_PF_A
);
5470 sge_params
->iq_qpp
= ((qpp
>> s_qpp
) & QUEUESPERPAGEPF0_M
);
5476 * t4_init_tp_params - initialize adap->params.tp
5477 * @adap: the adapter
5479 * Initialize various fields of the adapter's TP Parameters structure.
5481 int t4_init_tp_params(struct adapter
*adap
)
5486 v
= t4_read_reg(adap
, TP_TIMER_RESOLUTION_A
);
5487 adap
->params
.tp
.tre
= TIMERRESOLUTION_G(v
);
5488 adap
->params
.tp
.dack_re
= DELAYEDACKRESOLUTION_G(v
);
5490 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5491 for (chan
= 0; chan
< NCHAN
; chan
++)
5492 adap
->params
.tp
.tx_modq
[chan
] = chan
;
5494 /* Cache the adapter's Compressed Filter Mode and global Incress
5497 t4_read_indirect(adap
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
5498 &adap
->params
.tp
.vlan_pri_map
, 1,
5500 t4_read_indirect(adap
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
5501 &adap
->params
.tp
.ingress_config
, 1,
5502 TP_INGRESS_CONFIG_A
);
5504 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
5505 * shift positions of several elements of the Compressed Filter Tuple
5506 * for this adapter which we need frequently ...
5508 adap
->params
.tp
.vlan_shift
= t4_filter_field_shift(adap
, VLAN_F
);
5509 adap
->params
.tp
.vnic_shift
= t4_filter_field_shift(adap
, VNIC_ID_F
);
5510 adap
->params
.tp
.port_shift
= t4_filter_field_shift(adap
, PORT_F
);
5511 adap
->params
.tp
.protocol_shift
= t4_filter_field_shift(adap
,
5514 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
5515 * represents the presence of an Outer VLAN instead of a VNIC ID.
5517 if ((adap
->params
.tp
.ingress_config
& VNIC_F
) == 0)
5518 adap
->params
.tp
.vnic_shift
= -1;
5524 * t4_filter_field_shift - calculate filter field shift
5525 * @adap: the adapter
5526 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
5528 * Return the shift position of a filter field within the Compressed
5529 * Filter Tuple. The filter field is specified via its selection bit
5530 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
5532 int t4_filter_field_shift(const struct adapter
*adap
, int filter_sel
)
5534 unsigned int filter_mode
= adap
->params
.tp
.vlan_pri_map
;
5538 if ((filter_mode
& filter_sel
) == 0)
5541 for (sel
= 1, field_shift
= 0; sel
< filter_sel
; sel
<<= 1) {
5542 switch (filter_mode
& sel
) {
5544 field_shift
+= FT_FCOE_W
;
5547 field_shift
+= FT_PORT_W
;
5550 field_shift
+= FT_VNIC_ID_W
;
5553 field_shift
+= FT_VLAN_W
;
5556 field_shift
+= FT_TOS_W
;
5559 field_shift
+= FT_PROTOCOL_W
;
5562 field_shift
+= FT_ETHERTYPE_W
;
5565 field_shift
+= FT_MACMATCH_W
;
5568 field_shift
+= FT_MPSHITTYPE_W
;
5570 case FRAGMENTATION_F
:
5571 field_shift
+= FT_FRAGMENTATION_W
;
5578 int t4_init_rss_mode(struct adapter
*adap
, int mbox
)
5581 struct fw_rss_vi_config_cmd rvc
;
5583 memset(&rvc
, 0, sizeof(rvc
));
5585 for_each_port(adap
, i
) {
5586 struct port_info
*p
= adap2pinfo(adap
, i
);
5589 cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD
) |
5590 FW_CMD_REQUEST_F
| FW_CMD_READ_F
|
5591 FW_RSS_VI_CONFIG_CMD_VIID_V(p
->viid
));
5592 rvc
.retval_len16
= cpu_to_be32(FW_LEN16(rvc
));
5593 ret
= t4_wr_mbox(adap
, mbox
, &rvc
, sizeof(rvc
), &rvc
);
5596 p
->rss_mode
= be32_to_cpu(rvc
.u
.basicvirtual
.defaultq_to_udpen
);
5601 int t4_port_init(struct adapter
*adap
, int mbox
, int pf
, int vf
)
5605 struct fw_port_cmd c
;
5606 struct fw_rss_vi_config_cmd rvc
;
5608 memset(&c
, 0, sizeof(c
));
5609 memset(&rvc
, 0, sizeof(rvc
));
5611 for_each_port(adap
, i
) {
5612 unsigned int rss_size
;
5613 struct port_info
*p
= adap2pinfo(adap
, i
);
5615 while ((adap
->params
.portvec
& (1 << j
)) == 0)
5618 c
.op_to_portid
= cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD
) |
5619 FW_CMD_REQUEST_F
| FW_CMD_READ_F
|
5620 FW_PORT_CMD_PORTID_V(j
));
5621 c
.action_to_len16
= cpu_to_be32(
5622 FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO
) |
5624 ret
= t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), &c
);
5628 ret
= t4_alloc_vi(adap
, mbox
, j
, pf
, vf
, 1, addr
, &rss_size
);
5635 p
->rss_size
= rss_size
;
5636 memcpy(adap
->port
[i
]->dev_addr
, addr
, ETH_ALEN
);
5637 adap
->port
[i
]->dev_port
= j
;
5639 ret
= be32_to_cpu(c
.u
.info
.lstatus_to_modtype
);
5640 p
->mdio_addr
= (ret
& FW_PORT_CMD_MDIOCAP_F
) ?
5641 FW_PORT_CMD_MDIOADDR_G(ret
) : -1;
5642 p
->port_type
= FW_PORT_CMD_PTYPE_G(ret
);
5643 p
->mod_type
= FW_PORT_MOD_TYPE_NA
;
5646 cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD
) |
5647 FW_CMD_REQUEST_F
| FW_CMD_READ_F
|
5648 FW_RSS_VI_CONFIG_CMD_VIID(p
->viid
));
5649 rvc
.retval_len16
= cpu_to_be32(FW_LEN16(rvc
));
5650 ret
= t4_wr_mbox(adap
, mbox
, &rvc
, sizeof(rvc
), &rvc
);
5653 p
->rss_mode
= be32_to_cpu(rvc
.u
.basicvirtual
.defaultq_to_udpen
);
5655 init_link_config(&p
->link_cfg
, be16_to_cpu(c
.u
.info
.pcap
));
5662 * t4_read_cimq_cfg - read CIM queue configuration
5663 * @adap: the adapter
5664 * @base: holds the queue base addresses in bytes
5665 * @size: holds the queue sizes in bytes
5666 * @thres: holds the queue full thresholds in bytes
5668 * Returns the current configuration of the CIM queues, starting with
5669 * the IBQs, then the OBQs.
5671 void t4_read_cimq_cfg(struct adapter
*adap
, u16
*base
, u16
*size
, u16
*thres
)
5674 int cim_num_obq
= is_t4(adap
->params
.chip
) ?
5675 CIM_NUM_OBQ
: CIM_NUM_OBQ_T5
;
5677 for (i
= 0; i
< CIM_NUM_IBQ
; i
++) {
5678 t4_write_reg(adap
, CIM_QUEUE_CONFIG_REF_A
, IBQSELECT_F
|
5680 v
= t4_read_reg(adap
, CIM_QUEUE_CONFIG_CTRL_A
);
5681 /* value is in 256-byte units */
5682 *base
++ = CIMQBASE_G(v
) * 256;
5683 *size
++ = CIMQSIZE_G(v
) * 256;
5684 *thres
++ = QUEFULLTHRSH_G(v
) * 8; /* 8-byte unit */
5686 for (i
= 0; i
< cim_num_obq
; i
++) {
5687 t4_write_reg(adap
, CIM_QUEUE_CONFIG_REF_A
, OBQSELECT_F
|
5689 v
= t4_read_reg(adap
, CIM_QUEUE_CONFIG_CTRL_A
);
5690 /* value is in 256-byte units */
5691 *base
++ = CIMQBASE_G(v
) * 256;
5692 *size
++ = CIMQSIZE_G(v
) * 256;
5697 * t4_read_cim_ibq - read the contents of a CIM inbound queue
5698 * @adap: the adapter
5699 * @qid: the queue index
5700 * @data: where to store the queue contents
5701 * @n: capacity of @data in 32-bit words
5703 * Reads the contents of the selected CIM queue starting at address 0 up
5704 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
5705 * error and the number of 32-bit words actually read on success.
5707 int t4_read_cim_ibq(struct adapter
*adap
, unsigned int qid
, u32
*data
, size_t n
)
5709 int i
, err
, attempts
;
5711 const unsigned int nwords
= CIM_IBQ_SIZE
* 4;
5713 if (qid
> 5 || (n
& 3))
5716 addr
= qid
* nwords
;
5720 /* It might take 3-10ms before the IBQ debug read access is allowed.
5721 * Wait for 1 Sec with a delay of 1 usec.
5725 for (i
= 0; i
< n
; i
++, addr
++) {
5726 t4_write_reg(adap
, CIM_IBQ_DBG_CFG_A
, IBQDBGADDR_V(addr
) |
5728 err
= t4_wait_op_done(adap
, CIM_IBQ_DBG_CFG_A
, IBQDBGBUSY_F
, 0,
5732 *data
++ = t4_read_reg(adap
, CIM_IBQ_DBG_DATA_A
);
5734 t4_write_reg(adap
, CIM_IBQ_DBG_CFG_A
, 0);
5739 * t4_read_cim_obq - read the contents of a CIM outbound queue
5740 * @adap: the adapter
5741 * @qid: the queue index
5742 * @data: where to store the queue contents
5743 * @n: capacity of @data in 32-bit words
5745 * Reads the contents of the selected CIM queue starting at address 0 up
5746 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
5747 * error and the number of 32-bit words actually read on success.
5749 int t4_read_cim_obq(struct adapter
*adap
, unsigned int qid
, u32
*data
, size_t n
)
5752 unsigned int addr
, v
, nwords
;
5753 int cim_num_obq
= is_t4(adap
->params
.chip
) ?
5754 CIM_NUM_OBQ
: CIM_NUM_OBQ_T5
;
5756 if ((qid
> (cim_num_obq
- 1)) || (n
& 3))
5759 t4_write_reg(adap
, CIM_QUEUE_CONFIG_REF_A
, OBQSELECT_F
|
5760 QUENUMSELECT_V(qid
));
5761 v
= t4_read_reg(adap
, CIM_QUEUE_CONFIG_CTRL_A
);
5763 addr
= CIMQBASE_G(v
) * 64; /* muliple of 256 -> muliple of 4 */
5764 nwords
= CIMQSIZE_G(v
) * 64; /* same */
5768 for (i
= 0; i
< n
; i
++, addr
++) {
5769 t4_write_reg(adap
, CIM_OBQ_DBG_CFG_A
, OBQDBGADDR_V(addr
) |
5771 err
= t4_wait_op_done(adap
, CIM_OBQ_DBG_CFG_A
, OBQDBGBUSY_F
, 0,
5775 *data
++ = t4_read_reg(adap
, CIM_OBQ_DBG_DATA_A
);
5777 t4_write_reg(adap
, CIM_OBQ_DBG_CFG_A
, 0);
5782 * t4_cim_read - read a block from CIM internal address space
5783 * @adap: the adapter
5784 * @addr: the start address within the CIM address space
5785 * @n: number of words to read
5786 * @valp: where to store the result
5788 * Reads a block of 4-byte words from the CIM intenal address space.
5790 int t4_cim_read(struct adapter
*adap
, unsigned int addr
, unsigned int n
,
5795 if (t4_read_reg(adap
, CIM_HOST_ACC_CTRL_A
) & HOSTBUSY_F
)
5798 for ( ; !ret
&& n
--; addr
+= 4) {
5799 t4_write_reg(adap
, CIM_HOST_ACC_CTRL_A
, addr
);
5800 ret
= t4_wait_op_done(adap
, CIM_HOST_ACC_CTRL_A
, HOSTBUSY_F
,
5803 *valp
++ = t4_read_reg(adap
, CIM_HOST_ACC_DATA_A
);
5809 * t4_cim_write - write a block into CIM internal address space
5810 * @adap: the adapter
5811 * @addr: the start address within the CIM address space
5812 * @n: number of words to write
5813 * @valp: set of values to write
5815 * Writes a block of 4-byte words into the CIM intenal address space.
5817 int t4_cim_write(struct adapter
*adap
, unsigned int addr
, unsigned int n
,
5818 const unsigned int *valp
)
5822 if (t4_read_reg(adap
, CIM_HOST_ACC_CTRL_A
) & HOSTBUSY_F
)
5825 for ( ; !ret
&& n
--; addr
+= 4) {
5826 t4_write_reg(adap
, CIM_HOST_ACC_DATA_A
, *valp
++);
5827 t4_write_reg(adap
, CIM_HOST_ACC_CTRL_A
, addr
| HOSTWRITE_F
);
5828 ret
= t4_wait_op_done(adap
, CIM_HOST_ACC_CTRL_A
, HOSTBUSY_F
,
5834 static int t4_cim_write1(struct adapter
*adap
, unsigned int addr
,
5837 return t4_cim_write(adap
, addr
, 1, &val
);
5841 * t4_cim_read_la - read CIM LA capture buffer
5842 * @adap: the adapter
5843 * @la_buf: where to store the LA data
5844 * @wrptr: the HW write pointer within the capture buffer
5846 * Reads the contents of the CIM LA buffer with the most recent entry at
5847 * the end of the returned data and with the entry at @wrptr first.
5848 * We try to leave the LA in the running state we find it in.
5850 int t4_cim_read_la(struct adapter
*adap
, u32
*la_buf
, unsigned int *wrptr
)
5853 unsigned int cfg
, val
, idx
;
5855 ret
= t4_cim_read(adap
, UP_UP_DBG_LA_CFG_A
, 1, &cfg
);
5859 if (cfg
& UPDBGLAEN_F
) { /* LA is running, freeze it */
5860 ret
= t4_cim_write1(adap
, UP_UP_DBG_LA_CFG_A
, 0);
5865 ret
= t4_cim_read(adap
, UP_UP_DBG_LA_CFG_A
, 1, &val
);
5869 idx
= UPDBGLAWRPTR_G(val
);
5873 for (i
= 0; i
< adap
->params
.cim_la_size
; i
++) {
5874 ret
= t4_cim_write1(adap
, UP_UP_DBG_LA_CFG_A
,
5875 UPDBGLARDPTR_V(idx
) | UPDBGLARDEN_F
);
5878 ret
= t4_cim_read(adap
, UP_UP_DBG_LA_CFG_A
, 1, &val
);
5881 if (val
& UPDBGLARDEN_F
) {
5885 ret
= t4_cim_read(adap
, UP_UP_DBG_LA_DATA_A
, 1, &la_buf
[i
]);
5888 idx
= (idx
+ 1) & UPDBGLARDPTR_M
;
5891 if (cfg
& UPDBGLAEN_F
) {
5892 int r
= t4_cim_write1(adap
, UP_UP_DBG_LA_CFG_A
,
5893 cfg
& ~UPDBGLARDEN_F
);
5901 * t4_tp_read_la - read TP LA capture buffer
5902 * @adap: the adapter
5903 * @la_buf: where to store the LA data
5904 * @wrptr: the HW write pointer within the capture buffer
5906 * Reads the contents of the TP LA buffer with the most recent entry at
5907 * the end of the returned data and with the entry at @wrptr first.
5908 * We leave the LA in the running state we find it in.
5910 void t4_tp_read_la(struct adapter
*adap
, u64
*la_buf
, unsigned int *wrptr
)
5912 bool last_incomplete
;
5913 unsigned int i
, cfg
, val
, idx
;
5915 cfg
= t4_read_reg(adap
, TP_DBG_LA_CONFIG_A
) & 0xffff;
5916 if (cfg
& DBGLAENABLE_F
) /* freeze LA */
5917 t4_write_reg(adap
, TP_DBG_LA_CONFIG_A
,
5918 adap
->params
.tp
.la_mask
| (cfg
^ DBGLAENABLE_F
));
5920 val
= t4_read_reg(adap
, TP_DBG_LA_CONFIG_A
);
5921 idx
= DBGLAWPTR_G(val
);
5922 last_incomplete
= DBGLAMODE_G(val
) >= 2 && (val
& DBGLAWHLF_F
) == 0;
5923 if (last_incomplete
)
5924 idx
= (idx
+ 1) & DBGLARPTR_M
;
5929 val
&= ~DBGLARPTR_V(DBGLARPTR_M
);
5930 val
|= adap
->params
.tp
.la_mask
;
5932 for (i
= 0; i
< TPLA_SIZE
; i
++) {
5933 t4_write_reg(adap
, TP_DBG_LA_CONFIG_A
, DBGLARPTR_V(idx
) | val
);
5934 la_buf
[i
] = t4_read_reg64(adap
, TP_DBG_LA_DATAL_A
);
5935 idx
= (idx
+ 1) & DBGLARPTR_M
;
5938 /* Wipe out last entry if it isn't valid */
5939 if (last_incomplete
)
5940 la_buf
[TPLA_SIZE
- 1] = ~0ULL;
5942 if (cfg
& DBGLAENABLE_F
) /* restore running state */
5943 t4_write_reg(adap
, TP_DBG_LA_CONFIG_A
,
5944 cfg
| adap
->params
.tp
.la_mask
);
5947 /* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
5948 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
5949 * state for more than the Warning Threshold then we'll issue a warning about
5950 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
5951 * appears to be hung every Warning Repeat second till the situation clears.
5952 * If the situation clears, we'll note that as well.
5954 #define SGE_IDMA_WARN_THRESH 1
5955 #define SGE_IDMA_WARN_REPEAT 300
5958 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
5959 * @adapter: the adapter
5960 * @idma: the adapter IDMA Monitor state
5962 * Initialize the state of an SGE Ingress DMA Monitor.
5964 void t4_idma_monitor_init(struct adapter
*adapter
,
5965 struct sge_idma_monitor_state
*idma
)
5967 /* Initialize the state variables for detecting an SGE Ingress DMA
5968 * hang. The SGE has internal counters which count up on each clock
5969 * tick whenever the SGE finds its Ingress DMA State Engines in the
5970 * same state they were on the previous clock tick. The clock used is
5971 * the Core Clock so we have a limit on the maximum "time" they can
5972 * record; typically a very small number of seconds. For instance,
5973 * with a 600MHz Core Clock, we can only count up to a bit more than
5974 * 7s. So we'll synthesize a larger counter in order to not run the
5975 * risk of having the "timers" overflow and give us the flexibility to
5976 * maintain a Hung SGE State Machine of our own which operates across
5977 * a longer time frame.
5979 idma
->idma_1s_thresh
= core_ticks_per_usec(adapter
) * 1000000; /* 1s */
5980 idma
->idma_stalled
[0] = 0;
5981 idma
->idma_stalled
[1] = 0;
5985 * t4_idma_monitor - monitor SGE Ingress DMA state
5986 * @adapter: the adapter
5987 * @idma: the adapter IDMA Monitor state
5988 * @hz: number of ticks/second
5989 * @ticks: number of ticks since the last IDMA Monitor call
5991 void t4_idma_monitor(struct adapter
*adapter
,
5992 struct sge_idma_monitor_state
*idma
,
5995 int i
, idma_same_state_cnt
[2];
5997 /* Read the SGE Debug Ingress DMA Same State Count registers. These
5998 * are counters inside the SGE which count up on each clock when the
5999 * SGE finds its Ingress DMA State Engines in the same states they
6000 * were in the previous clock. The counters will peg out at
6001 * 0xffffffff without wrapping around so once they pass the 1s
6002 * threshold they'll stay above that till the IDMA state changes.
6004 t4_write_reg(adapter
, SGE_DEBUG_INDEX_A
, 13);
6005 idma_same_state_cnt
[0] = t4_read_reg(adapter
, SGE_DEBUG_DATA_HIGH_A
);
6006 idma_same_state_cnt
[1] = t4_read_reg(adapter
, SGE_DEBUG_DATA_LOW_A
);
6008 for (i
= 0; i
< 2; i
++) {
6009 u32 debug0
, debug11
;
6011 /* If the Ingress DMA Same State Counter ("timer") is less
6012 * than 1s, then we can reset our synthesized Stall Timer and
6013 * continue. If we have previously emitted warnings about a
6014 * potential stalled Ingress Queue, issue a note indicating
6015 * that the Ingress Queue has resumed forward progress.
6017 if (idma_same_state_cnt
[i
] < idma
->idma_1s_thresh
) {
6018 if (idma
->idma_stalled
[i
] >= SGE_IDMA_WARN_THRESH
* hz
)
6019 dev_warn(adapter
->pdev_dev
, "SGE idma%d, queue %u, "
6020 "resumed after %d seconds\n",
6021 i
, idma
->idma_qid
[i
],
6022 idma
->idma_stalled
[i
] / hz
);
6023 idma
->idma_stalled
[i
] = 0;
6027 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
6028 * domain. The first time we get here it'll be because we
6029 * passed the 1s Threshold; each additional time it'll be
6030 * because the RX Timer Callback is being fired on its regular
6033 * If the stall is below our Potential Hung Ingress Queue
6034 * Warning Threshold, continue.
6036 if (idma
->idma_stalled
[i
] == 0) {
6037 idma
->idma_stalled
[i
] = hz
;
6038 idma
->idma_warn
[i
] = 0;
6040 idma
->idma_stalled
[i
] += ticks
;
6041 idma
->idma_warn
[i
] -= ticks
;
6044 if (idma
->idma_stalled
[i
] < SGE_IDMA_WARN_THRESH
* hz
)
6047 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
6049 if (idma
->idma_warn
[i
] > 0)
6051 idma
->idma_warn
[i
] = SGE_IDMA_WARN_REPEAT
* hz
;
6053 /* Read and save the SGE IDMA State and Queue ID information.
6054 * We do this every time in case it changes across time ...
6055 * can't be too careful ...
6057 t4_write_reg(adapter
, SGE_DEBUG_INDEX_A
, 0);
6058 debug0
= t4_read_reg(adapter
, SGE_DEBUG_DATA_LOW_A
);
6059 idma
->idma_state
[i
] = (debug0
>> (i
* 9)) & 0x3f;
6061 t4_write_reg(adapter
, SGE_DEBUG_INDEX_A
, 11);
6062 debug11
= t4_read_reg(adapter
, SGE_DEBUG_DATA_LOW_A
);
6063 idma
->idma_qid
[i
] = (debug11
>> (i
* 16)) & 0xffff;
6065 dev_warn(adapter
->pdev_dev
, "SGE idma%u, queue %u, potentially stuck in "
6066 "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
6067 i
, idma
->idma_qid
[i
], idma
->idma_state
[i
],
6068 idma
->idma_stalled
[i
] / hz
,
6070 t4_sge_decode_idma_state(adapter
, idma
->idma_state
[i
]);