1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Chelsio Communications.
6 #include <netinet/in.h>
8 #include <rte_interrupts.h>
10 #include <rte_debug.h>
12 #include <rte_atomic.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_memory.h>
15 #include <rte_tailq.h>
17 #include <rte_alarm.h>
18 #include <rte_ether.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_malloc.h>
21 #include <rte_random.h>
23 #include <rte_byteorder.h>
27 #include "t4_regs_values.h"
28 #include "t4fw_interface.h"
31 * t4_read_mtu_tbl - returns the values in the HW path MTU table
33 * @mtus: where to store the MTU values
34 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
36 * Reads the HW path MTU table.
38 void t4_read_mtu_tbl(struct adapter
*adap
, u16
*mtus
, u8
*mtu_log
)
43 for (i
= 0; i
< NMTUS
; ++i
) {
44 t4_write_reg(adap
, A_TP_MTU_TABLE
,
45 V_MTUINDEX(0xff) | V_MTUVALUE(i
));
46 v
= t4_read_reg(adap
, A_TP_MTU_TABLE
);
47 mtus
[i
] = G_MTUVALUE(v
);
49 mtu_log
[i
] = G_MTUWIDTH(v
);
54 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
56 * @addr: the indirect TP register address
57 * @mask: specifies the field within the register to modify
58 * @val: new value for the field
60 * Sets a field of an indirect TP register to the given value.
62 void t4_tp_wr_bits_indirect(struct adapter
*adap
, unsigned int addr
,
63 unsigned int mask
, unsigned int val
)
65 t4_write_reg(adap
, A_TP_PIO_ADDR
, addr
);
66 val
|= t4_read_reg(adap
, A_TP_PIO_DATA
) & ~mask
;
67 t4_write_reg(adap
, A_TP_PIO_DATA
, val
);
70 /* The minimum additive increment value for the congestion control table */
71 #define CC_MIN_INCR 2U
74 * t4_load_mtus - write the MTU and congestion control HW tables
76 * @mtus: the values for the MTU table
77 * @alpha: the values for the congestion control alpha parameter
78 * @beta: the values for the congestion control beta parameter
80 * Write the HW MTU table with the supplied MTUs and the high-speed
81 * congestion control table with the supplied alpha, beta, and MTUs.
82 * We write the two tables together because the additive increments
85 void t4_load_mtus(struct adapter
*adap
, const unsigned short *mtus
,
86 const unsigned short *alpha
, const unsigned short *beta
)
88 static const unsigned int avg_pkts
[NCCTRL_WIN
] = {
89 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
90 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
91 28672, 40960, 57344, 81920, 114688, 163840, 229376
96 for (i
= 0; i
< NMTUS
; ++i
) {
97 unsigned int mtu
= mtus
[i
];
98 unsigned int log2
= cxgbe_fls(mtu
);
100 if (!(mtu
& ((1 << log2
) >> 2))) /* round */
102 t4_write_reg(adap
, A_TP_MTU_TABLE
, V_MTUINDEX(i
) |
103 V_MTUWIDTH(log2
) | V_MTUVALUE(mtu
));
105 for (w
= 0; w
< NCCTRL_WIN
; ++w
) {
108 inc
= max(((mtu
- 40) * alpha
[w
]) / avg_pkts
[w
],
111 t4_write_reg(adap
, A_TP_CCTRL_TABLE
, (i
<< 21) |
112 (w
<< 16) | (beta
[w
] << 13) | inc
);
118 * t4_wait_op_done_val - wait until an operation is completed
119 * @adapter: the adapter performing the operation
120 * @reg: the register to check for completion
121 * @mask: a single-bit field within @reg that indicates completion
122 * @polarity: the value of the field when the operation is completed
123 * @attempts: number of check iterations
124 * @delay: delay in usecs between iterations
125 * @valp: where to store the value of the register at completion time
127 * Wait until an operation is completed by checking a bit in a register
128 * up to @attempts times. If @valp is not NULL the value of the register
129 * at the time it indicated completion is stored there. Returns 0 if the
130 * operation completes and -EAGAIN otherwise.
132 int t4_wait_op_done_val(struct adapter
*adapter
, int reg
, u32 mask
,
133 int polarity
, int attempts
, int delay
, u32
*valp
)
136 u32 val
= t4_read_reg(adapter
, reg
);
138 if (!!(val
& mask
) == polarity
) {
151 * t4_set_reg_field - set a register field to a value
152 * @adapter: the adapter to program
153 * @addr: the register address
154 * @mask: specifies the portion of the register to modify
155 * @val: the new value for the register field
157 * Sets a register field specified by the supplied mask to the
160 void t4_set_reg_field(struct adapter
*adapter
, unsigned int addr
, u32 mask
,
163 u32 v
= t4_read_reg(adapter
, addr
) & ~mask
;
165 t4_write_reg(adapter
, addr
, v
| val
);
166 (void)t4_read_reg(adapter
, addr
); /* flush */
170 * t4_read_indirect - read indirectly addressed registers
172 * @addr_reg: register holding the indirect address
173 * @data_reg: register holding the value of the indirect register
174 * @vals: where the read register values are stored
175 * @nregs: how many indirect registers to read
176 * @start_idx: index of first indirect register to read
178 * Reads registers that are accessed indirectly through an address/data
181 void t4_read_indirect(struct adapter
*adap
, unsigned int addr_reg
,
182 unsigned int data_reg
, u32
*vals
, unsigned int nregs
,
183 unsigned int start_idx
)
186 t4_write_reg(adap
, addr_reg
, start_idx
);
187 *vals
++ = t4_read_reg(adap
, data_reg
);
193 * t4_write_indirect - write indirectly addressed registers
195 * @addr_reg: register holding the indirect addresses
196 * @data_reg: register holding the value for the indirect registers
197 * @vals: values to write
198 * @nregs: how many indirect registers to write
199 * @start_idx: address of first indirect register to write
201 * Writes a sequential block of registers that are accessed indirectly
202 * through an address/data register pair.
204 void t4_write_indirect(struct adapter
*adap
, unsigned int addr_reg
,
205 unsigned int data_reg
, const u32
*vals
,
206 unsigned int nregs
, unsigned int start_idx
)
209 t4_write_reg(adap
, addr_reg
, start_idx
++);
210 t4_write_reg(adap
, data_reg
, *vals
++);
215 * t4_report_fw_error - report firmware error
218 * The adapter firmware can indicate error conditions to the host.
219 * If the firmware has indicated an error, print out the reason for
220 * the firmware error.
222 static void t4_report_fw_error(struct adapter
*adap
)
224 static const char * const reason
[] = {
225 "Crash", /* PCIE_FW_EVAL_CRASH */
226 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
227 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
228 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
229 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
230 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
231 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
232 "Reserved", /* reserved */
236 pcie_fw
= t4_read_reg(adap
, A_PCIE_FW
);
237 if (pcie_fw
& F_PCIE_FW_ERR
)
238 pr_err("%s: Firmware reports adapter error: %s\n",
239 __func__
, reason
[G_PCIE_FW_EVAL(pcie_fw
)]);
243 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
245 static void get_mbox_rpl(struct adapter
*adap
, __be64
*rpl
, int nflit
,
248 for ( ; nflit
; nflit
--, mbox_addr
+= 8)
249 *rpl
++ = cpu_to_be64(t4_read_reg64(adap
, mbox_addr
));
253 * Handle a FW assertion reported in a mailbox.
255 static void fw_asrt(struct adapter
*adap
, u32 mbox_addr
)
257 struct fw_debug_cmd asrt
;
259 get_mbox_rpl(adap
, (__be64
*)&asrt
, sizeof(asrt
) / 8, mbox_addr
);
260 pr_warn("FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
261 asrt
.u
.assert.filename_0_7
, be32_to_cpu(asrt
.u
.assert.line
),
262 be32_to_cpu(asrt
.u
.assert.x
), be32_to_cpu(asrt
.u
.assert.y
));
265 #define X_CIM_PF_NOACCESS 0xeeeeeeee
268 * If the Host OS Driver needs locking arround accesses to the mailbox, this
269 * can be turned on via the T4_OS_NEEDS_MBOX_LOCKING CPP define ...
271 /* makes single-statement usage a bit cleaner ... */
272 #ifdef T4_OS_NEEDS_MBOX_LOCKING
273 #define T4_OS_MBOX_LOCKING(x) x
275 #define T4_OS_MBOX_LOCKING(x) do {} while (0)
279 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
281 * @mbox: index of the mailbox to use
282 * @cmd: the command to write
283 * @size: command length in bytes
284 * @rpl: where to optionally store the reply
285 * @sleep_ok: if true we may sleep while awaiting command completion
286 * @timeout: time to wait for command to finish before timing out
287 * (negative implies @sleep_ok=false)
289 * Sends the given command to FW through the selected mailbox and waits
290 * for the FW to execute the command. If @rpl is not %NULL it is used to
291 * store the FW's reply to the command. The command and its optional
292 * reply are of the same length. Some FW commands like RESET and
293 * INITIALIZE can take a considerable amount of time to execute.
294 * @sleep_ok determines whether we may sleep while awaiting the response.
295 * If sleeping is allowed we use progressive backoff otherwise we spin.
296 * Note that passing in a negative @timeout is an alternate mechanism
297 * for specifying @sleep_ok=false. This is useful when a higher level
298 * interface allows for specification of @timeout but not @sleep_ok ...
300 * Returns 0 on success or a negative errno on failure. A
301 * failure can happen either because we are not able to execute the
302 * command or FW executes it but signals an error. In the latter case
303 * the return value is the error code indicated by FW (negated).
305 int t4_wr_mbox_meat_timeout(struct adapter
*adap
, int mbox
,
306 const void __attribute__((__may_alias__
)) *cmd
,
307 int size
, void *rpl
, bool sleep_ok
, int timeout
)
310 * We delay in small increments at first in an effort to maintain
311 * responsiveness for simple, fast executing commands but then back
312 * off to larger delays to a maximum retry delay.
314 static const int delay
[] = {
315 1, 1, 3, 5, 10, 10, 20, 50, 100
321 unsigned int delay_idx
;
322 __be64
*temp
= (__be64
*)malloc(size
* sizeof(char));
324 u32 data_reg
= PF_REG(mbox
, A_CIM_PF_MAILBOX_DATA
);
325 u32 ctl_reg
= PF_REG(mbox
, A_CIM_PF_MAILBOX_CTRL
);
327 struct mbox_entry entry
;
333 if ((size
& 15) || size
> MBOX_LEN
) {
339 memcpy(p
, (const __be64
*)cmd
, size
);
342 * If we have a negative timeout, that implies that we can't sleep.
349 #ifdef T4_OS_NEEDS_MBOX_LOCKING
351 * Queue ourselves onto the mailbox access list. When our entry is at
352 * the front of the list, we have rights to access the mailbox. So we
353 * wait [for a while] till we're at the front [or bail out with an
356 t4_os_atomic_add_tail(&entry
, &adap
->mbox_list
, &adap
->mbox_lock
);
361 for (i
= 0; ; i
+= ms
) {
363 * If we've waited too long, return a busy indication. This
364 * really ought to be based on our initial position in the
365 * mailbox access list but this is a start. We very rarely
366 * contend on access to the mailbox ... Also check for a
367 * firmware error which we'll report as a device error.
369 pcie_fw
= t4_read_reg(adap
, A_PCIE_FW
);
370 if (i
> 4 * timeout
|| (pcie_fw
& F_PCIE_FW_ERR
)) {
371 t4_os_atomic_list_del(&entry
, &adap
->mbox_list
,
373 t4_report_fw_error(adap
);
375 return (pcie_fw
& F_PCIE_FW_ERR
) ? -ENXIO
: -EBUSY
;
379 * If we're at the head, break out and start the mailbox
382 if (t4_os_list_first_entry(&adap
->mbox_list
) == &entry
)
386 * Delay for a bit before checking again ...
389 ms
= delay
[delay_idx
]; /* last element may repeat */
390 if (delay_idx
< ARRAY_SIZE(delay
) - 1)
397 #endif /* T4_OS_NEEDS_MBOX_LOCKING */
400 * Attempt to gain access to the mailbox.
402 for (i
= 0; i
< 4; i
++) {
403 ctl
= t4_read_reg(adap
, ctl_reg
);
405 if (v
!= X_MBOWNER_NONE
)
410 * If we were unable to gain access, dequeue ourselves from the
411 * mailbox atomic access list and report the error to our caller.
413 if (v
!= X_MBOWNER_PL
) {
414 T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry
,
417 t4_report_fw_error(adap
);
419 return (v
== X_MBOWNER_FW
? -EBUSY
: -ETIMEDOUT
);
423 * If we gain ownership of the mailbox and there's a "valid" message
424 * in it, this is likely an asynchronous error message from the
425 * firmware. So we'll report that and then proceed on with attempting
426 * to issue our own command ... which may well fail if the error
427 * presaged the firmware crashing ...
429 if (ctl
& F_MBMSGVALID
) {
430 dev_err(adap
, "found VALID command in mbox %u: "
431 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox
,
432 (unsigned long long)t4_read_reg64(adap
, data_reg
),
433 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 8),
434 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 16),
435 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 24),
436 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 32),
437 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 40),
438 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 48),
439 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 56));
443 * Copy in the new mailbox command and send it on its way ...
445 for (i
= 0; i
< size
; i
+= 8, p
++)
446 t4_write_reg64(adap
, data_reg
+ i
, be64_to_cpu(*p
));
448 CXGBE_DEBUG_MBOX(adap
, "%s: mbox %u: %016llx %016llx %016llx %016llx "
449 "%016llx %016llx %016llx %016llx\n", __func__
, (mbox
),
450 (unsigned long long)t4_read_reg64(adap
, data_reg
),
451 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 8),
452 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 16),
453 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 24),
454 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 32),
455 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 40),
456 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 48),
457 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 56));
459 t4_write_reg(adap
, ctl_reg
, F_MBMSGVALID
| V_MBOWNER(X_MBOWNER_FW
));
460 t4_read_reg(adap
, ctl_reg
); /* flush write */
466 * Loop waiting for the reply; bail out if we time out or the firmware
469 pcie_fw
= t4_read_reg(adap
, A_PCIE_FW
);
470 for (i
= 0; i
< timeout
&& !(pcie_fw
& F_PCIE_FW_ERR
); i
+= ms
) {
472 ms
= delay
[delay_idx
]; /* last element may repeat */
473 if (delay_idx
< ARRAY_SIZE(delay
) - 1)
480 pcie_fw
= t4_read_reg(adap
, A_PCIE_FW
);
481 v
= t4_read_reg(adap
, ctl_reg
);
482 if (v
== X_CIM_PF_NOACCESS
)
484 if (G_MBOWNER(v
) == X_MBOWNER_PL
) {
485 if (!(v
& F_MBMSGVALID
)) {
486 t4_write_reg(adap
, ctl_reg
,
487 V_MBOWNER(X_MBOWNER_NONE
));
491 CXGBE_DEBUG_MBOX(adap
,
492 "%s: mbox %u: %016llx %016llx %016llx %016llx "
493 "%016llx %016llx %016llx %016llx\n", __func__
, (mbox
),
494 (unsigned long long)t4_read_reg64(adap
, data_reg
),
495 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 8),
496 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 16),
497 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 24),
498 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 32),
499 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 40),
500 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 48),
501 (unsigned long long)t4_read_reg64(adap
, data_reg
+ 56));
503 CXGBE_DEBUG_MBOX(adap
,
504 "command %#x completed in %d ms (%ssleeping)\n",
506 i
+ ms
, sleep_ok
? "" : "non-");
508 res
= t4_read_reg64(adap
, data_reg
);
509 if (G_FW_CMD_OP(res
>> 32) == FW_DEBUG_CMD
) {
510 fw_asrt(adap
, data_reg
);
511 res
= V_FW_CMD_RETVAL(EIO
);
513 get_mbox_rpl(adap
, rpl
, size
/ 8, data_reg
);
515 t4_write_reg(adap
, ctl_reg
, V_MBOWNER(X_MBOWNER_NONE
));
517 t4_os_atomic_list_del(&entry
, &adap
->mbox_list
,
520 return -G_FW_CMD_RETVAL((int)res
);
525 * We timed out waiting for a reply to our mailbox command. Report
526 * the error and also check to see if the firmware reported any
529 dev_err(adap
, "command %#x in mailbox %d timed out\n",
530 *(const u8
*)cmd
, mbox
);
531 T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry
,
534 t4_report_fw_error(adap
);
536 return (pcie_fw
& F_PCIE_FW_ERR
) ? -ENXIO
: -ETIMEDOUT
;
539 int t4_wr_mbox_meat(struct adapter
*adap
, int mbox
, const void *cmd
, int size
,
540 void *rpl
, bool sleep_ok
)
542 return t4_wr_mbox_meat_timeout(adap
, mbox
, cmd
, size
, rpl
, sleep_ok
,
547 * t4_get_regs_len - return the size of the chips register set
548 * @adapter: the adapter
550 * Returns the size of the chip's BAR0 register space.
552 unsigned int t4_get_regs_len(struct adapter
*adapter
)
554 unsigned int chip_version
= CHELSIO_CHIP_VERSION(adapter
->params
.chip
);
556 switch (chip_version
) {
559 return T5_REGMAP_SIZE
;
563 "Unsupported chip version %d\n", chip_version
);
568 * t4_get_regs - read chip registers into provided buffer
570 * @buf: register buffer
571 * @buf_size: size (in bytes) of register buffer
573 * If the provided register buffer isn't large enough for the chip's
574 * full register range, the register dump will be truncated to the
575 * register buffer's size.
577 void t4_get_regs(struct adapter
*adap
, void *buf
, size_t buf_size
)
579 static const unsigned int t5_reg_ranges
[] = {
1354 static const unsigned int t6_reg_ranges
[] = {
1915 u32
*buf_end
= (u32
*)((char *)buf
+ buf_size
);
1916 const unsigned int *reg_ranges
;
1917 int reg_ranges_size
, range
;
1918 unsigned int chip_version
= CHELSIO_CHIP_VERSION(adap
->params
.chip
);
1920 /* Select the right set of register ranges to dump depending on the
1921 * adapter chip type.
1923 switch (chip_version
) {
1925 reg_ranges
= t5_reg_ranges
;
1926 reg_ranges_size
= ARRAY_SIZE(t5_reg_ranges
);
1930 reg_ranges
= t6_reg_ranges
;
1931 reg_ranges_size
= ARRAY_SIZE(t6_reg_ranges
);
1936 "Unsupported chip version %d\n", chip_version
);
1940 /* Clear the register buffer and insert the appropriate register
1941 * values selected by the above register ranges.
1943 memset(buf
, 0, buf_size
);
1944 for (range
= 0; range
< reg_ranges_size
; range
+= 2) {
1945 unsigned int reg
= reg_ranges
[range
];
1946 unsigned int last_reg
= reg_ranges
[range
+ 1];
1947 u32
*bufp
= (u32
*)((char *)buf
+ reg
);
1949 /* Iterate across the register range filling in the register
1950 * buffer but don't write past the end of the register buffer.
1952 while (reg
<= last_reg
&& bufp
< buf_end
) {
1953 *bufp
++ = t4_read_reg(adap
, reg
);
1959 /* EEPROM reads take a few tens of us while writes can take a bit over 5 ms. */
1960 #define EEPROM_DELAY 10 /* 10us per poll spin */
1961 #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */
1963 #define EEPROM_STAT_ADDR 0x7bfc
1966 * Small utility function to wait till any outstanding VPD Access is complete.
1967 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
1968 * VPD Access in flight. This allows us to handle the problem of having a
1969 * previous VPD Access time out and prevent an attempt to inject a new VPD
1970 * Request before any in-flight VPD request has completed.
1972 static int t4_seeprom_wait(struct adapter
*adapter
)
1974 unsigned int base
= adapter
->params
.pci
.vpd_cap_addr
;
1977 /* If no VPD Access is in flight, we can just return success right
1980 if (!adapter
->vpd_busy
)
1983 /* Poll the VPD Capability Address/Flag register waiting for it
1984 * to indicate that the operation is complete.
1986 max_poll
= EEPROM_MAX_POLL
;
1990 udelay(EEPROM_DELAY
);
1991 t4_os_pci_read_cfg2(adapter
, base
+ PCI_VPD_ADDR
, &val
);
1993 /* If the operation is complete, mark the VPD as no longer
1994 * busy and return success.
1996 if ((val
& PCI_VPD_ADDR_F
) == adapter
->vpd_flag
) {
1997 adapter
->vpd_busy
= 0;
2000 } while (--max_poll
);
2002 /* Failure! Note that we leave the VPD Busy status set in order to
2003 * avoid pushing a new VPD Access request into the VPD Capability till
2004 * the current operation eventually succeeds. It's a bug to issue a
2005 * new request when an existing request is in flight and will result
2006 * in corrupt hardware state.
2012 * t4_seeprom_read - read a serial EEPROM location
2013 * @adapter: adapter to read
2014 * @addr: EEPROM virtual address
2015 * @data: where to store the read data
2017 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
2018 * VPD capability. Note that this function must be called with a virtual
2021 int t4_seeprom_read(struct adapter
*adapter
, u32 addr
, u32
*data
)
2023 unsigned int base
= adapter
->params
.pci
.vpd_cap_addr
;
2026 /* VPD Accesses must alway be 4-byte aligned!
2028 if (addr
>= EEPROMVSIZE
|| (addr
& 3))
2031 /* Wait for any previous operation which may still be in flight to
2034 ret
= t4_seeprom_wait(adapter
);
2036 dev_err(adapter
, "VPD still busy from previous operation\n");
2040 /* Issue our new VPD Read request, mark the VPD as being busy and wait
2041 * for our request to complete. If it doesn't complete, note the
2042 * error and return it to our caller. Note that we do not reset the
2045 t4_os_pci_write_cfg2(adapter
, base
+ PCI_VPD_ADDR
, (u16
)addr
);
2046 adapter
->vpd_busy
= 1;
2047 adapter
->vpd_flag
= PCI_VPD_ADDR_F
;
2048 ret
= t4_seeprom_wait(adapter
);
2050 dev_err(adapter
, "VPD read of address %#x failed\n", addr
);
2054 /* Grab the returned data, swizzle it into our endianness and
2057 t4_os_pci_read_cfg4(adapter
, base
+ PCI_VPD_DATA
, data
);
2058 *data
= le32_to_cpu(*data
);
2063 * t4_seeprom_write - write a serial EEPROM location
2064 * @adapter: adapter to write
2065 * @addr: virtual EEPROM address
2066 * @data: value to write
2068 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
2069 * VPD capability. Note that this function must be called with a virtual
2072 int t4_seeprom_write(struct adapter
*adapter
, u32 addr
, u32 data
)
2074 unsigned int base
= adapter
->params
.pci
.vpd_cap_addr
;
2079 /* VPD Accesses must alway be 4-byte aligned!
2081 if (addr
>= EEPROMVSIZE
|| (addr
& 3))
2084 /* Wait for any previous operation which may still be in flight to
2087 ret
= t4_seeprom_wait(adapter
);
2089 dev_err(adapter
, "VPD still busy from previous operation\n");
2093 /* Issue our new VPD Read request, mark the VPD as being busy and wait
2094 * for our request to complete. If it doesn't complete, note the
2095 * error and return it to our caller. Note that we do not reset the
2098 t4_os_pci_write_cfg4(adapter
, base
+ PCI_VPD_DATA
,
2100 t4_os_pci_write_cfg2(adapter
, base
+ PCI_VPD_ADDR
,
2101 (u16
)addr
| PCI_VPD_ADDR_F
);
2102 adapter
->vpd_busy
= 1;
2103 adapter
->vpd_flag
= 0;
2104 ret
= t4_seeprom_wait(adapter
);
2106 dev_err(adapter
, "VPD write of address %#x failed\n", addr
);
2110 /* Reset PCI_VPD_DATA register after a transaction and wait for our
2111 * request to complete. If it doesn't complete, return error.
2113 t4_os_pci_write_cfg4(adapter
, base
+ PCI_VPD_DATA
, 0);
2114 max_poll
= EEPROM_MAX_POLL
;
2116 udelay(EEPROM_DELAY
);
2117 t4_seeprom_read(adapter
, EEPROM_STAT_ADDR
, &stats_reg
);
2118 } while ((stats_reg
& 0x1) && --max_poll
);
2122 /* Return success! */
2127 * t4_seeprom_wp - enable/disable EEPROM write protection
2128 * @adapter: the adapter
2129 * @enable: whether to enable or disable write protection
2131 * Enables or disables write protection on the serial EEPROM.
2133 int t4_seeprom_wp(struct adapter
*adapter
, int enable
)
2135 return t4_seeprom_write(adapter
, EEPROM_STAT_ADDR
, enable
? 0xc : 0);
2139 * t4_fw_tp_pio_rw - Access TP PIO through LDST
2140 * @adap: the adapter
2141 * @vals: where the indirect register values are stored/written
2142 * @nregs: how many indirect registers to read/write
2143 * @start_idx: index of first indirect register to read/write
2144 * @rw: Read (1) or Write (0)
2146 * Access TP PIO registers through LDST
2148 void t4_fw_tp_pio_rw(struct adapter
*adap
, u32
*vals
, unsigned int nregs
,
2149 unsigned int start_index
, unsigned int rw
)
2151 int cmd
= FW_LDST_ADDRSPC_TP_PIO
;
2152 struct fw_ldst_cmd c
;
2156 for (i
= 0 ; i
< nregs
; i
++) {
2157 memset(&c
, 0, sizeof(c
));
2158 c
.op_to_addrspace
= cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD
) |
2160 (rw
? F_FW_CMD_READ
:
2162 V_FW_LDST_CMD_ADDRSPACE(cmd
));
2163 c
.cycles_to_len16
= cpu_to_be32(FW_LEN16(c
));
2165 c
.u
.addrval
.addr
= cpu_to_be32(start_index
+ i
);
2166 c
.u
.addrval
.val
= rw
? 0 : cpu_to_be32(vals
[i
]);
2167 ret
= t4_wr_mbox(adap
, adap
->mbox
, &c
, sizeof(c
), &c
);
2170 vals
[i
] = be32_to_cpu(c
.u
.addrval
.val
);
2176 * t4_read_rss_key - read the global RSS key
2177 * @adap: the adapter
2178 * @key: 10-entry array holding the 320-bit RSS key
2180 * Reads the global 320-bit RSS key.
2182 void t4_read_rss_key(struct adapter
*adap
, u32
*key
)
2184 t4_fw_tp_pio_rw(adap
, key
, 10, A_TP_RSS_SECRET_KEY0
, 1);
2188 * t4_write_rss_key - program one of the RSS keys
2189 * @adap: the adapter
2190 * @key: 10-entry array holding the 320-bit RSS key
2191 * @idx: which RSS key to write
2193 * Writes one of the RSS keys with the given 320-bit value. If @idx is
2194 * 0..15 the corresponding entry in the RSS key table is written,
2195 * otherwise the global RSS key is written.
2197 void t4_write_rss_key(struct adapter
*adap
, u32
*key
, int idx
)
2199 u32 vrt
= t4_read_reg(adap
, A_TP_RSS_CONFIG_VRT
);
2200 u8 rss_key_addr_cnt
= 16;
2202 /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
2203 * allows access to key addresses 16-63 by using KeyWrAddrX
2204 * as index[5:4](upper 2) into key table
2206 if ((CHELSIO_CHIP_VERSION(adap
->params
.chip
) > CHELSIO_T5
) &&
2207 (vrt
& F_KEYEXTEND
) && (G_KEYMODE(vrt
) == 3))
2208 rss_key_addr_cnt
= 32;
2210 t4_fw_tp_pio_rw(adap
, key
, 10, A_TP_RSS_SECRET_KEY0
, 0);
2212 if (idx
>= 0 && idx
< rss_key_addr_cnt
) {
2213 if (rss_key_addr_cnt
> 16)
2214 t4_write_reg(adap
, A_TP_RSS_CONFIG_VRT
,
2215 V_KEYWRADDRX(idx
>> 4) |
2216 V_T6_VFWRADDR(idx
) | F_KEYWREN
);
2218 t4_write_reg(adap
, A_TP_RSS_CONFIG_VRT
,
2219 V_KEYWRADDR(idx
) | F_KEYWREN
);
2224 * t4_config_rss_range - configure a portion of the RSS mapping table
2225 * @adapter: the adapter
2226 * @mbox: mbox to use for the FW command
2227 * @viid: virtual interface whose RSS subtable is to be written
2228 * @start: start entry in the table to write
2229 * @n: how many table entries to write
2230 * @rspq: values for the "response queue" (Ingress Queue) lookup table
2231 * @nrspq: number of values in @rspq
2233 * Programs the selected part of the VI's RSS mapping table with the
2234 * provided values. If @nrspq < @n the supplied values are used repeatedly
2235 * until the full table range is populated.
2237 * The caller must ensure the values in @rspq are in the range allowed for
2240 int t4_config_rss_range(struct adapter
*adapter
, int mbox
, unsigned int viid
,
2241 int start
, int n
, const u16
*rspq
, unsigned int nrspq
)
2244 const u16
*rsp
= rspq
;
2245 const u16
*rsp_end
= rspq
+ nrspq
;
2246 struct fw_rss_ind_tbl_cmd cmd
;
2248 memset(&cmd
, 0, sizeof(cmd
));
2249 cmd
.op_to_viid
= cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD
) |
2250 F_FW_CMD_REQUEST
| F_FW_CMD_WRITE
|
2251 V_FW_RSS_IND_TBL_CMD_VIID(viid
));
2252 cmd
.retval_len16
= cpu_to_be32(FW_LEN16(cmd
));
2255 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2256 * Queue Identifiers. These Ingress Queue IDs are packed three to
2257 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2261 int nq
= min(n
, 32);
2263 __be32
*qp
= &cmd
.iq0_to_iq2
;
2266 * Set up the firmware RSS command header to send the next
2267 * "nq" Ingress Queue IDs to the firmware.
2269 cmd
.niqid
= cpu_to_be16(nq
);
2270 cmd
.startidx
= cpu_to_be16(start
);
2273 * "nq" more done for the start of the next loop.
2279 * While there are still Ingress Queue IDs to stuff into the
2280 * current firmware RSS command, retrieve them from the
2281 * Ingress Queue ID array and insert them into the command.
2285 * Grab up to the next 3 Ingress Queue IDs (wrapping
2286 * around the Ingress Queue ID array if necessary) and
2287 * insert them into the firmware RSS command at the
2288 * current 3-tuple position within the commad.
2292 int nqbuf
= min(3, nq
);
2298 while (nqbuf
&& nq_packed
< 32) {
2305 *qp
++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf
[0]) |
2306 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf
[1]) |
2307 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf
[2]));
2311 * Send this portion of the RRS table update to the firmware;
2312 * bail out on any errors.
2314 if (is_pf4(adapter
))
2315 ret
= t4_wr_mbox(adapter
, mbox
, &cmd
, sizeof(cmd
),
2318 ret
= t4vf_wr_mbox(adapter
, &cmd
, sizeof(cmd
), NULL
);
2327 * t4_config_vi_rss - configure per VI RSS settings
2328 * @adapter: the adapter
2329 * @mbox: mbox to use for the FW command
2332 * @defq: id of the default RSS queue for the VI.
2334 * Configures VI-specific RSS properties.
2336 int t4_config_vi_rss(struct adapter
*adapter
, int mbox
, unsigned int viid
,
2337 unsigned int flags
, unsigned int defq
)
2339 struct fw_rss_vi_config_cmd c
;
2341 memset(&c
, 0, sizeof(c
));
2342 c
.op_to_viid
= cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD
) |
2343 F_FW_CMD_REQUEST
| F_FW_CMD_WRITE
|
2344 V_FW_RSS_VI_CONFIG_CMD_VIID(viid
));
2345 c
.retval_len16
= cpu_to_be32(FW_LEN16(c
));
2346 c
.u
.basicvirtual
.defaultq_to_udpen
= cpu_to_be32(flags
|
2347 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq
));
2348 if (is_pf4(adapter
))
2349 return t4_wr_mbox(adapter
, mbox
, &c
, sizeof(c
), NULL
);
2351 return t4vf_wr_mbox(adapter
, &c
, sizeof(c
), NULL
);
2355 * t4_read_config_vi_rss - read the configured per VI RSS settings
2356 * @adapter: the adapter
2357 * @mbox: mbox to use for the FW command
2359 * @flags: where to place the configured flags
2360 * @defq: where to place the id of the default RSS queue for the VI.
2362 * Read configured VI-specific RSS properties.
2364 int t4_read_config_vi_rss(struct adapter
*adapter
, int mbox
, unsigned int viid
,
2365 u64
*flags
, unsigned int *defq
)
2367 struct fw_rss_vi_config_cmd c
;
2368 unsigned int result
;
2371 memset(&c
, 0, sizeof(c
));
2372 c
.op_to_viid
= cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD
) |
2373 F_FW_CMD_REQUEST
| F_FW_CMD_READ
|
2374 V_FW_RSS_VI_CONFIG_CMD_VIID(viid
));
2375 c
.retval_len16
= cpu_to_be32(FW_LEN16(c
));
2376 ret
= t4_wr_mbox(adapter
, mbox
, &c
, sizeof(c
), &c
);
2378 result
= be32_to_cpu(c
.u
.basicvirtual
.defaultq_to_udpen
);
2380 *defq
= G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(result
);
2382 *flags
= result
& M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ
;
2389 * init_cong_ctrl - initialize congestion control parameters
2390 * @a: the alpha values for congestion control
2391 * @b: the beta values for congestion control
2393 * Initialize the congestion control parameters.
2395 static void init_cong_ctrl(unsigned short *a
, unsigned short *b
)
2399 for (i
= 0; i
< 9; i
++) {
2453 #define INIT_CMD(var, cmd, rd_wr) do { \
2454 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
2455 F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
2456 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
2459 int t4_get_core_clock(struct adapter
*adapter
, struct vpd_params
*p
)
2461 u32 cclk_param
, cclk_val
;
2465 * Ask firmware for the Core Clock since it knows how to translate the
2466 * Reference Clock ('V2') VPD field into a Core Clock value ...
2468 cclk_param
= (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV
) |
2469 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK
));
2470 ret
= t4_query_params(adapter
, adapter
->mbox
, adapter
->pf
, 0,
2471 1, &cclk_param
, &cclk_val
);
2473 dev_err(adapter
, "%s: error in fetching from coreclock - %d\n",
2479 dev_debug(adapter
, "%s: p->cclk = %u\n", __func__
, p
->cclk
);
2484 * t4_get_pfres - retrieve VF resource limits
2485 * @adapter: the adapter
2487 * Retrieves configured resource limits and capabilities for a physical
2488 * function. The results are stored in @adapter->pfres.
2490 int t4_get_pfres(struct adapter
*adapter
)
2492 struct pf_resources
*pfres
= &adapter
->params
.pfres
;
2493 struct fw_pfvf_cmd cmd
, rpl
;
2498 * Execute PFVF Read command to get VF resource limits; bail out early
2499 * with error on command failure.
2501 memset(&cmd
, 0, sizeof(cmd
));
2502 cmd
.op_to_vfn
= cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD
) |
2505 V_FW_PFVF_CMD_PFN(adapter
->pf
) |
2506 V_FW_PFVF_CMD_VFN(0));
2507 cmd
.retval_len16
= cpu_to_be32(FW_LEN16(cmd
));
2508 v
= t4_wr_mbox(adapter
, adapter
->mbox
, &cmd
, sizeof(cmd
), &rpl
);
2509 if (v
!= FW_SUCCESS
)
2513 * Extract PF resource limits and return success.
2515 word
= be32_to_cpu(rpl
.niqflint_niq
);
2516 pfres
->niqflint
= G_FW_PFVF_CMD_NIQFLINT(word
);
2518 word
= be32_to_cpu(rpl
.type_to_neq
);
2519 pfres
->neq
= G_FW_PFVF_CMD_NEQ(word
);
2523 /* serial flash and firmware constants and flash config file constants */
2525 SF_ATTEMPTS
= 10, /* max retries for SF operations */
2527 /* flash command opcodes */
2528 SF_PROG_PAGE
= 2, /* program page */
2529 SF_WR_DISABLE
= 4, /* disable writes */
2530 SF_RD_STATUS
= 5, /* read status register */
2531 SF_WR_ENABLE
= 6, /* enable writes */
2532 SF_RD_DATA_FAST
= 0xb, /* read flash */
2533 SF_RD_ID
= 0x9f, /* read ID */
2534 SF_ERASE_SECTOR
= 0xd8, /* erase sector */
2538 * sf1_read - read data from the serial flash
2539 * @adapter: the adapter
2540 * @byte_cnt: number of bytes to read
2541 * @cont: whether another operation will be chained
2542 * @lock: whether to lock SF for PL access only
2543 * @valp: where to store the read data
2545 * Reads up to 4 bytes of data from the serial flash. The location of
2546 * the read needs to be specified prior to calling this by issuing the
2547 * appropriate commands to the serial flash.
2549 static int sf1_read(struct adapter
*adapter
, unsigned int byte_cnt
, int cont
,
2550 int lock
, u32
*valp
)
2554 if (!byte_cnt
|| byte_cnt
> 4)
2556 if (t4_read_reg(adapter
, A_SF_OP
) & F_BUSY
)
2558 t4_write_reg(adapter
, A_SF_OP
,
2559 V_SF_LOCK(lock
) | V_CONT(cont
) | V_BYTECNT(byte_cnt
- 1));
2560 ret
= t4_wait_op_done(adapter
, A_SF_OP
, F_BUSY
, 0, SF_ATTEMPTS
, 5);
2562 *valp
= t4_read_reg(adapter
, A_SF_DATA
);
2567 * sf1_write - write data to the serial flash
2568 * @adapter: the adapter
2569 * @byte_cnt: number of bytes to write
2570 * @cont: whether another operation will be chained
2571 * @lock: whether to lock SF for PL access only
2572 * @val: value to write
2574 * Writes up to 4 bytes of data to the serial flash. The location of
2575 * the write needs to be specified prior to calling this by issuing the
2576 * appropriate commands to the serial flash.
2578 static int sf1_write(struct adapter
*adapter
, unsigned int byte_cnt
, int cont
,
2581 if (!byte_cnt
|| byte_cnt
> 4)
2583 if (t4_read_reg(adapter
, A_SF_OP
) & F_BUSY
)
2585 t4_write_reg(adapter
, A_SF_DATA
, val
);
2586 t4_write_reg(adapter
, A_SF_OP
, V_SF_LOCK(lock
) |
2587 V_CONT(cont
) | V_BYTECNT(byte_cnt
- 1) | V_OP(1));
2588 return t4_wait_op_done(adapter
, A_SF_OP
, F_BUSY
, 0, SF_ATTEMPTS
, 5);
2592 * t4_read_flash - read words from serial flash
2593 * @adapter: the adapter
2594 * @addr: the start address for the read
2595 * @nwords: how many 32-bit words to read
2596 * @data: where to store the read data
2597 * @byte_oriented: whether to store data as bytes or as words
2599 * Read the specified number of 32-bit words from the serial flash.
2600 * If @byte_oriented is set the read data is stored as a byte array
2601 * (i.e., big-endian), otherwise as 32-bit words in the platform's
2602 * natural endianness.
2604 int t4_read_flash(struct adapter
*adapter
, unsigned int addr
,
2605 unsigned int nwords
, u32
*data
, int byte_oriented
)
2609 if (((addr
+ nwords
* sizeof(u32
)) > adapter
->params
.sf_size
) ||
2613 addr
= rte_constant_bswap32(addr
) | SF_RD_DATA_FAST
;
2615 ret
= sf1_write(adapter
, 4, 1, 0, addr
);
2619 ret
= sf1_read(adapter
, 1, 1, 0, data
);
2623 for ( ; nwords
; nwords
--, data
++) {
2624 ret
= sf1_read(adapter
, 4, nwords
> 1, nwords
== 1, data
);
2626 t4_write_reg(adapter
, A_SF_OP
, 0); /* unlock SF */
2630 *data
= cpu_to_be32(*data
);
2636 * t4_get_exprom_version - return the Expansion ROM version (if any)
2637 * @adapter: the adapter
2638 * @vers: where to place the version
2640 * Reads the Expansion ROM header from FLASH and returns the version
2641 * number (if present) through the @vers return value pointer. We return
2642 * this in the Firmware Version Format since it's convenient. Return
2643 * 0 on success, -ENOENT if no Expansion ROM is present.
2645 static int t4_get_exprom_version(struct adapter
*adapter
, u32
*vers
)
2647 struct exprom_header
{
2648 unsigned char hdr_arr
[16]; /* must start with 0x55aa */
2649 unsigned char hdr_ver
[4]; /* Expansion ROM version */
2651 u32 exprom_header_buf
[DIV_ROUND_UP(sizeof(struct exprom_header
),
2655 ret
= t4_read_flash(adapter
, FLASH_EXP_ROM_START
,
2656 ARRAY_SIZE(exprom_header_buf
),
2657 exprom_header_buf
, 0);
2661 hdr
= (struct exprom_header
*)exprom_header_buf
;
2662 if (hdr
->hdr_arr
[0] != 0x55 || hdr
->hdr_arr
[1] != 0xaa)
2665 *vers
= (V_FW_HDR_FW_VER_MAJOR(hdr
->hdr_ver
[0]) |
2666 V_FW_HDR_FW_VER_MINOR(hdr
->hdr_ver
[1]) |
2667 V_FW_HDR_FW_VER_MICRO(hdr
->hdr_ver
[2]) |
2668 V_FW_HDR_FW_VER_BUILD(hdr
->hdr_ver
[3]));
2673 * t4_get_fw_version - read the firmware version
2674 * @adapter: the adapter
2675 * @vers: where to place the version
2677 * Reads the FW version from flash.
2679 static int t4_get_fw_version(struct adapter
*adapter
, u32
*vers
)
2681 return t4_read_flash(adapter
, FLASH_FW_START
+
2682 offsetof(struct fw_hdr
, fw_ver
), 1, vers
, 0);
2686 * t4_get_bs_version - read the firmware bootstrap version
2687 * @adapter: the adapter
2688 * @vers: where to place the version
2690 * Reads the FW Bootstrap version from flash.
2692 static int t4_get_bs_version(struct adapter
*adapter
, u32
*vers
)
2694 return t4_read_flash(adapter
, FLASH_FWBOOTSTRAP_START
+
2695 offsetof(struct fw_hdr
, fw_ver
), 1,
2700 * t4_get_tp_version - read the TP microcode version
2701 * @adapter: the adapter
2702 * @vers: where to place the version
2704 * Reads the TP microcode version from flash.
2706 static int t4_get_tp_version(struct adapter
*adapter
, u32
*vers
)
2708 return t4_read_flash(adapter
, FLASH_FW_START
+
2709 offsetof(struct fw_hdr
, tp_microcode_ver
),
2714 * t4_get_version_info - extract various chip/firmware version information
2715 * @adapter: the adapter
2717 * Reads various chip/firmware version numbers and stores them into the
2718 * adapter Adapter Parameters structure. If any of the efforts fails
2719 * the first failure will be returned, but all of the version numbers
2722 int t4_get_version_info(struct adapter
*adapter
)
2726 #define FIRST_RET(__getvinfo) \
2728 int __ret = __getvinfo; \
2729 if (__ret && !ret) \
2733 FIRST_RET(t4_get_fw_version(adapter
, &adapter
->params
.fw_vers
));
2734 FIRST_RET(t4_get_bs_version(adapter
, &adapter
->params
.bs_vers
));
2735 FIRST_RET(t4_get_tp_version(adapter
, &adapter
->params
.tp_vers
));
2736 FIRST_RET(t4_get_exprom_version(adapter
, &adapter
->params
.er_vers
));
2744 * t4_dump_version_info - dump all of the adapter configuration IDs
2745 * @adapter: the adapter
2747 * Dumps all of the various bits of adapter configuration version/revision
2748 * IDs information. This is typically called at some point after
2749 * t4_get_version_info() has been called.
2751 void t4_dump_version_info(struct adapter
*adapter
)
2754 * Device information.
2756 dev_info(adapter
, "Chelsio rev %d\n",
2757 CHELSIO_CHIP_RELEASE(adapter
->params
.chip
));
2762 if (!adapter
->params
.fw_vers
)
2763 dev_warn(adapter
, "No firmware loaded\n");
2765 dev_info(adapter
, "Firmware version: %u.%u.%u.%u\n",
2766 G_FW_HDR_FW_VER_MAJOR(adapter
->params
.fw_vers
),
2767 G_FW_HDR_FW_VER_MINOR(adapter
->params
.fw_vers
),
2768 G_FW_HDR_FW_VER_MICRO(adapter
->params
.fw_vers
),
2769 G_FW_HDR_FW_VER_BUILD(adapter
->params
.fw_vers
));
2772 * Bootstrap Firmware Version.
2774 if (!adapter
->params
.bs_vers
)
2775 dev_warn(adapter
, "No bootstrap loaded\n");
2777 dev_info(adapter
, "Bootstrap version: %u.%u.%u.%u\n",
2778 G_FW_HDR_FW_VER_MAJOR(adapter
->params
.bs_vers
),
2779 G_FW_HDR_FW_VER_MINOR(adapter
->params
.bs_vers
),
2780 G_FW_HDR_FW_VER_MICRO(adapter
->params
.bs_vers
),
2781 G_FW_HDR_FW_VER_BUILD(adapter
->params
.bs_vers
));
2784 * TP Microcode Version.
2786 if (!adapter
->params
.tp_vers
)
2787 dev_warn(adapter
, "No TP Microcode loaded\n");
2789 dev_info(adapter
, "TP Microcode version: %u.%u.%u.%u\n",
2790 G_FW_HDR_FW_VER_MAJOR(adapter
->params
.tp_vers
),
2791 G_FW_HDR_FW_VER_MINOR(adapter
->params
.tp_vers
),
2792 G_FW_HDR_FW_VER_MICRO(adapter
->params
.tp_vers
),
2793 G_FW_HDR_FW_VER_BUILD(adapter
->params
.tp_vers
));
2796 * Expansion ROM version.
2798 if (!adapter
->params
.er_vers
)
2799 dev_info(adapter
, "No Expansion ROM loaded\n");
2801 dev_info(adapter
, "Expansion ROM version: %u.%u.%u.%u\n",
2802 G_FW_HDR_FW_VER_MAJOR(adapter
->params
.er_vers
),
2803 G_FW_HDR_FW_VER_MINOR(adapter
->params
.er_vers
),
2804 G_FW_HDR_FW_VER_MICRO(adapter
->params
.er_vers
),
2805 G_FW_HDR_FW_VER_BUILD(adapter
->params
.er_vers
));
2808 #define ADVERT_MASK (V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED) | \
2811 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
2812 * @caps16: a 16-bit Port Capabilities value
2814 * Returns the equivalent 32-bit Port Capabilities value.
2816 fw_port_cap32_t
fwcaps16_to_caps32(fw_port_cap16_t caps16
)
2818 fw_port_cap32_t caps32
= 0;
2820 #define CAP16_TO_CAP32(__cap) \
2822 if (caps16 & FW_PORT_CAP_##__cap) \
2823 caps32 |= FW_PORT_CAP32_##__cap; \
2826 CAP16_TO_CAP32(SPEED_100M
);
2827 CAP16_TO_CAP32(SPEED_1G
);
2828 CAP16_TO_CAP32(SPEED_25G
);
2829 CAP16_TO_CAP32(SPEED_10G
);
2830 CAP16_TO_CAP32(SPEED_40G
);
2831 CAP16_TO_CAP32(SPEED_100G
);
2832 CAP16_TO_CAP32(FC_RX
);
2833 CAP16_TO_CAP32(FC_TX
);
2834 CAP16_TO_CAP32(ANEG
);
2835 CAP16_TO_CAP32(MDIX
);
2836 CAP16_TO_CAP32(MDIAUTO
);
2837 CAP16_TO_CAP32(FEC_RS
);
2838 CAP16_TO_CAP32(FEC_BASER_RS
);
2839 CAP16_TO_CAP32(802_3_PAUSE
);
2840 CAP16_TO_CAP32(802_3_ASM_DIR
);
2842 #undef CAP16_TO_CAP32
2848 * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
2849 * @caps32: a 32-bit Port Capabilities value
2851 * Returns the equivalent 16-bit Port Capabilities value. Note that
2852 * not all 32-bit Port Capabilities can be represented in the 16-bit
2853 * Port Capabilities and some fields/values may not make it.
2855 static fw_port_cap16_t
fwcaps32_to_caps16(fw_port_cap32_t caps32
)
2857 fw_port_cap16_t caps16
= 0;
2859 #define CAP32_TO_CAP16(__cap) \
2861 if (caps32 & FW_PORT_CAP32_##__cap) \
2862 caps16 |= FW_PORT_CAP_##__cap; \
2865 CAP32_TO_CAP16(SPEED_100M
);
2866 CAP32_TO_CAP16(SPEED_1G
);
2867 CAP32_TO_CAP16(SPEED_10G
);
2868 CAP32_TO_CAP16(SPEED_25G
);
2869 CAP32_TO_CAP16(SPEED_40G
);
2870 CAP32_TO_CAP16(SPEED_100G
);
2871 CAP32_TO_CAP16(FC_RX
);
2872 CAP32_TO_CAP16(FC_TX
);
2873 CAP32_TO_CAP16(802_3_PAUSE
);
2874 CAP32_TO_CAP16(802_3_ASM_DIR
);
2875 CAP32_TO_CAP16(ANEG
);
2876 CAP32_TO_CAP16(MDIX
);
2877 CAP32_TO_CAP16(MDIAUTO
);
2878 CAP32_TO_CAP16(FEC_RS
);
2879 CAP32_TO_CAP16(FEC_BASER_RS
);
2881 #undef CAP32_TO_CAP16
2886 /* Translate Firmware Pause specification to Common Code */
2887 static inline enum cc_pause
fwcap_to_cc_pause(fw_port_cap32_t fw_pause
)
2889 enum cc_pause cc_pause
= 0;
2891 if (fw_pause
& FW_PORT_CAP32_FC_RX
)
2892 cc_pause
|= PAUSE_RX
;
2893 if (fw_pause
& FW_PORT_CAP32_FC_TX
)
2894 cc_pause
|= PAUSE_TX
;
2899 /* Translate Common Code Pause Frame specification into Firmware */
2900 static inline fw_port_cap32_t
cc_to_fwcap_pause(enum cc_pause cc_pause
)
2902 fw_port_cap32_t fw_pause
= 0;
2904 if (cc_pause
& PAUSE_RX
)
2905 fw_pause
|= FW_PORT_CAP32_FC_RX
;
2906 if (cc_pause
& PAUSE_TX
)
2907 fw_pause
|= FW_PORT_CAP32_FC_TX
;
2912 /* Translate Firmware Forward Error Correction specification to Common Code */
2913 static inline enum cc_fec
fwcap_to_cc_fec(fw_port_cap32_t fw_fec
)
2915 enum cc_fec cc_fec
= 0;
2917 if (fw_fec
& FW_PORT_CAP32_FEC_RS
)
2919 if (fw_fec
& FW_PORT_CAP32_FEC_BASER_RS
)
2920 cc_fec
|= FEC_BASER_RS
;
2925 /* Translate Common Code Forward Error Correction specification to Firmware */
2926 static inline fw_port_cap32_t
cc_to_fwcap_fec(enum cc_fec cc_fec
)
2928 fw_port_cap32_t fw_fec
= 0;
2930 if (cc_fec
& FEC_RS
)
2931 fw_fec
|= FW_PORT_CAP32_FEC_RS
;
2932 if (cc_fec
& FEC_BASER_RS
)
2933 fw_fec
|= FW_PORT_CAP32_FEC_BASER_RS
;
2939 * t4_link_l1cfg - apply link configuration to MAC/PHY
2940 * @adapter: the adapter
2941 * @mbox: the Firmware Mailbox to use
2942 * @port: the Port ID
2943 * @lc: the Port's Link Configuration
2945 * Set up a port's MAC and PHY according to a desired link configuration.
2946 * - If the PHY can auto-negotiate first decide what to advertise, then
2947 * enable/disable auto-negotiation as desired, and reset.
2948 * - If the PHY does not auto-negotiate just reset it.
2949 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
2950 * otherwise do it later based on the outcome of auto-negotiation.
2952 int t4_link_l1cfg(struct adapter
*adap
, unsigned int mbox
, unsigned int port
,
2953 struct link_config
*lc
)
2955 unsigned int fw_mdi
= V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO
);
2956 unsigned int fw_caps
= adap
->params
.fw_caps_support
;
2957 fw_port_cap32_t fw_fc
, cc_fec
, fw_fec
, rcap
;
2958 struct fw_port_cmd cmd
;
2962 fw_fc
= cc_to_fwcap_pause(lc
->requested_fc
);
2964 /* Convert Common Code Forward Error Control settings into the
2965 * Firmware's API. If the current Requested FEC has "Automatic"
2966 * (IEEE 802.3) specified, then we use whatever the Firmware
2967 * sent us as part of it's IEEE 802.3-based interpratation of
2968 * the Transceiver Module EPROM FEC parameters. Otherwise we
2969 * use whatever is in the current Requested FEC settings.
2971 if (lc
->requested_fec
& FEC_AUTO
)
2972 cc_fec
= lc
->auto_fec
;
2974 cc_fec
= lc
->requested_fec
;
2975 fw_fec
= cc_to_fwcap_fec(cc_fec
);
2977 /* Figure out what our Requested Port Capabilities are going to be.
2979 if (!(lc
->pcaps
& FW_PORT_CAP32_ANEG
)) {
2980 rcap
= (lc
->pcaps
& ADVERT_MASK
) | fw_fc
| fw_fec
;
2981 lc
->fc
= lc
->requested_fc
& ~PAUSE_AUTONEG
;
2983 } else if (lc
->autoneg
== AUTONEG_DISABLE
) {
2984 rcap
= lc
->requested_speed
| fw_fc
| fw_fec
| fw_mdi
;
2985 lc
->fc
= lc
->requested_fc
& ~PAUSE_AUTONEG
;
2988 rcap
= lc
->acaps
| fw_fc
| fw_fec
| fw_mdi
;
2991 /* And send that on to the Firmware ...
2993 memset(&cmd
, 0, sizeof(cmd
));
2994 cmd
.op_to_portid
= cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD
) |
2995 F_FW_CMD_REQUEST
| F_FW_CMD_EXEC
|
2996 V_FW_PORT_CMD_PORTID(port
));
2997 cmd
.action_to_len16
=
2998 cpu_to_be32(V_FW_PORT_CMD_ACTION(fw_caps
== FW_CAPS16
?
2999 FW_PORT_ACTION_L1_CFG
:
3000 FW_PORT_ACTION_L1_CFG32
) |
3003 if (fw_caps
== FW_CAPS16
)
3004 cmd
.u
.l1cfg
.rcap
= cpu_to_be32(fwcaps32_to_caps16(rcap
));
3006 cmd
.u
.l1cfg32
.rcap32
= cpu_to_be32(rcap
);
3008 return t4_wr_mbox(adap
, mbox
, &cmd
, sizeof(cmd
), NULL
);
3012 * t4_flash_cfg_addr - return the address of the flash configuration file
3013 * @adapter: the adapter
3015 * Return the address within the flash where the Firmware Configuration
3016 * File is stored, or an error if the device FLASH is too small to contain
3017 * a Firmware Configuration File.
3019 int t4_flash_cfg_addr(struct adapter
*adapter
)
3022 * If the device FLASH isn't large enough to hold a Firmware
3023 * Configuration File, return an error.
3025 if (adapter
->params
.sf_size
< FLASH_CFG_START
+ FLASH_CFG_MAX_SIZE
)
3028 return FLASH_CFG_START
;
3031 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
3034 * t4_intr_enable - enable interrupts
3035 * @adapter: the adapter whose interrupts should be enabled
3037 * Enable PF-specific interrupts for the calling function and the top-level
3038 * interrupt concentrator for global interrupts. Interrupts are already
3039 * enabled at each module, here we just enable the roots of the interrupt
3042 * Note: this function should be called only when the driver manages
3043 * non PF-specific interrupts from the various HW modules. Only one PCI
3044 * function at a time should be doing this.
3046 void t4_intr_enable(struct adapter
*adapter
)
3049 u32 whoami
= t4_read_reg(adapter
, A_PL_WHOAMI
);
3050 u32 pf
= CHELSIO_CHIP_VERSION(adapter
->params
.chip
) <= CHELSIO_T5
?
3051 G_SOURCEPF(whoami
) : G_T6_SOURCEPF(whoami
);
3053 if (CHELSIO_CHIP_VERSION(adapter
->params
.chip
) <= CHELSIO_T5
)
3054 val
= F_ERR_DROPPED_DB
| F_ERR_EGR_CTXT_PRIO
| F_DBFIFO_HP_INT
;
3055 t4_write_reg(adapter
, A_SGE_INT_ENABLE3
, F_ERR_CPL_EXCEED_IQE_SIZE
|
3056 F_ERR_INVALID_CIDX_INC
| F_ERR_CPL_OPCODE_0
|
3057 F_ERR_DATA_CPL_ON_HIGH_QID1
| F_INGRESS_SIZE_ERR
|
3058 F_ERR_DATA_CPL_ON_HIGH_QID0
| F_ERR_BAD_DB_PIDX3
|
3059 F_ERR_BAD_DB_PIDX2
| F_ERR_BAD_DB_PIDX1
|
3060 F_ERR_BAD_DB_PIDX0
| F_ERR_ING_CTXT_PRIO
|
3061 F_DBFIFO_LP_INT
| F_EGRESS_SIZE_ERR
| val
);
3062 t4_write_reg(adapter
, MYPF_REG(A_PL_PF_INT_ENABLE
), PF_INTR_MASK
);
3063 t4_set_reg_field(adapter
, A_PL_INT_MAP0
, 0, 1 << pf
);
3067 * t4_intr_disable - disable interrupts
3068 * @adapter: the adapter whose interrupts should be disabled
3070 * Disable interrupts. We only disable the top-level interrupt
3071 * concentrators. The caller must be a PCI function managing global
3074 void t4_intr_disable(struct adapter
*adapter
)
3076 u32 whoami
= t4_read_reg(adapter
, A_PL_WHOAMI
);
3077 u32 pf
= CHELSIO_CHIP_VERSION(adapter
->params
.chip
) <= CHELSIO_T5
?
3078 G_SOURCEPF(whoami
) : G_T6_SOURCEPF(whoami
);
3080 t4_write_reg(adapter
, MYPF_REG(A_PL_PF_INT_ENABLE
), 0);
3081 t4_set_reg_field(adapter
, A_PL_INT_MAP0
, 1 << pf
, 0);
3085 * t4_get_port_type_description - return Port Type string description
3086 * @port_type: firmware Port Type enumeration
3088 const char *t4_get_port_type_description(enum fw_port_type port_type
)
3090 static const char * const port_type_description
[] = {
3115 if (port_type
< ARRAY_SIZE(port_type_description
))
3116 return port_type_description
[port_type
];
3121 * t4_get_mps_bg_map - return the buffer groups associated with a port
3122 * @adap: the adapter
3123 * @pidx: the port index
3125 * Returns a bitmap indicating which MPS buffer groups are associated
3126 * with the given port. Bit i is set if buffer group i is used by the
3129 unsigned int t4_get_mps_bg_map(struct adapter
*adap
, unsigned int pidx
)
3131 unsigned int chip_version
= CHELSIO_CHIP_VERSION(adap
->params
.chip
);
3132 unsigned int nports
= 1 << G_NUMPORTS(t4_read_reg(adap
,
3135 if (pidx
>= nports
) {
3136 dev_warn(adap
, "MPS Port Index %d >= Nports %d\n",
3141 switch (chip_version
) {
3146 case 2: return 3 << (2 * pidx
);
3147 case 4: return 1 << pidx
;
3153 case 2: return 1 << (2 * pidx
);
3158 dev_err(adap
, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
3159 chip_version
, nports
);
3164 * t4_get_tp_ch_map - return TP ingress channels associated with a port
3165 * @adapter: the adapter
3166 * @pidx: the port index
3168 * Returns a bitmap indicating which TP Ingress Channels are associated with
3169 * a given Port. Bit i is set if TP Ingress Channel i is used by the Port.
3171 unsigned int t4_get_tp_ch_map(struct adapter
*adapter
, unsigned int pidx
)
3173 unsigned int chip_version
= CHELSIO_CHIP_VERSION(adapter
->params
.chip
);
3174 unsigned int nports
= 1 << G_NUMPORTS(t4_read_reg(adapter
,
3177 if (pidx
>= nports
) {
3178 dev_warn(adap
, "TP Port Index %d >= Nports %d\n",
3183 switch (chip_version
) {
3186 /* Note that this happens to be the same values as the MPS
3187 * Buffer Group Map for these Chips. But we replicate the code
3188 * here because they're really separate concepts.
3192 case 2: return 3 << (2 * pidx
);
3193 case 4: return 1 << pidx
;
3199 case 2: return 1 << pidx
;
3204 dev_err(adapter
, "Need TP Channel Map for Chip %0x, Nports %d\n",
3205 chip_version
, nports
);
3210 * t4_get_port_stats - collect port statistics
3211 * @adap: the adapter
3212 * @idx: the port index
3213 * @p: the stats structure to fill
3215 * Collect statistics related to the given port from HW.
3217 void t4_get_port_stats(struct adapter
*adap
, int idx
, struct port_stats
*p
)
3219 u32 bgmap
= t4_get_mps_bg_map(adap
, idx
);
3220 u32 stat_ctl
= t4_read_reg(adap
, A_MPS_STAT_CTL
);
3222 #define GET_STAT(name) \
3223 t4_read_reg64(adap, \
3224 (is_t4(adap->params.chip) ? \
3225 PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) :\
3226 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
3227 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3229 p
->tx_octets
= GET_STAT(TX_PORT_BYTES
);
3230 p
->tx_frames
= GET_STAT(TX_PORT_FRAMES
);
3231 p
->tx_bcast_frames
= GET_STAT(TX_PORT_BCAST
);
3232 p
->tx_mcast_frames
= GET_STAT(TX_PORT_MCAST
);
3233 p
->tx_ucast_frames
= GET_STAT(TX_PORT_UCAST
);
3234 p
->tx_error_frames
= GET_STAT(TX_PORT_ERROR
);
3235 p
->tx_frames_64
= GET_STAT(TX_PORT_64B
);
3236 p
->tx_frames_65_127
= GET_STAT(TX_PORT_65B_127B
);
3237 p
->tx_frames_128_255
= GET_STAT(TX_PORT_128B_255B
);
3238 p
->tx_frames_256_511
= GET_STAT(TX_PORT_256B_511B
);
3239 p
->tx_frames_512_1023
= GET_STAT(TX_PORT_512B_1023B
);
3240 p
->tx_frames_1024_1518
= GET_STAT(TX_PORT_1024B_1518B
);
3241 p
->tx_frames_1519_max
= GET_STAT(TX_PORT_1519B_MAX
);
3242 p
->tx_drop
= GET_STAT(TX_PORT_DROP
);
3243 p
->tx_pause
= GET_STAT(TX_PORT_PAUSE
);
3244 p
->tx_ppp0
= GET_STAT(TX_PORT_PPP0
);
3245 p
->tx_ppp1
= GET_STAT(TX_PORT_PPP1
);
3246 p
->tx_ppp2
= GET_STAT(TX_PORT_PPP2
);
3247 p
->tx_ppp3
= GET_STAT(TX_PORT_PPP3
);
3248 p
->tx_ppp4
= GET_STAT(TX_PORT_PPP4
);
3249 p
->tx_ppp5
= GET_STAT(TX_PORT_PPP5
);
3250 p
->tx_ppp6
= GET_STAT(TX_PORT_PPP6
);
3251 p
->tx_ppp7
= GET_STAT(TX_PORT_PPP7
);
3253 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) >= CHELSIO_T5
) {
3254 if (stat_ctl
& F_COUNTPAUSESTATTX
) {
3255 p
->tx_frames
-= p
->tx_pause
;
3256 p
->tx_octets
-= p
->tx_pause
* 64;
3258 if (stat_ctl
& F_COUNTPAUSEMCTX
)
3259 p
->tx_mcast_frames
-= p
->tx_pause
;
3262 p
->rx_octets
= GET_STAT(RX_PORT_BYTES
);
3263 p
->rx_frames
= GET_STAT(RX_PORT_FRAMES
);
3264 p
->rx_bcast_frames
= GET_STAT(RX_PORT_BCAST
);
3265 p
->rx_mcast_frames
= GET_STAT(RX_PORT_MCAST
);
3266 p
->rx_ucast_frames
= GET_STAT(RX_PORT_UCAST
);
3267 p
->rx_too_long
= GET_STAT(RX_PORT_MTU_ERROR
);
3268 p
->rx_jabber
= GET_STAT(RX_PORT_MTU_CRC_ERROR
);
3269 p
->rx_fcs_err
= GET_STAT(RX_PORT_CRC_ERROR
);
3270 p
->rx_len_err
= GET_STAT(RX_PORT_LEN_ERROR
);
3271 p
->rx_symbol_err
= GET_STAT(RX_PORT_SYM_ERROR
);
3272 p
->rx_runt
= GET_STAT(RX_PORT_LESS_64B
);
3273 p
->rx_frames_64
= GET_STAT(RX_PORT_64B
);
3274 p
->rx_frames_65_127
= GET_STAT(RX_PORT_65B_127B
);
3275 p
->rx_frames_128_255
= GET_STAT(RX_PORT_128B_255B
);
3276 p
->rx_frames_256_511
= GET_STAT(RX_PORT_256B_511B
);
3277 p
->rx_frames_512_1023
= GET_STAT(RX_PORT_512B_1023B
);
3278 p
->rx_frames_1024_1518
= GET_STAT(RX_PORT_1024B_1518B
);
3279 p
->rx_frames_1519_max
= GET_STAT(RX_PORT_1519B_MAX
);
3280 p
->rx_pause
= GET_STAT(RX_PORT_PAUSE
);
3281 p
->rx_ppp0
= GET_STAT(RX_PORT_PPP0
);
3282 p
->rx_ppp1
= GET_STAT(RX_PORT_PPP1
);
3283 p
->rx_ppp2
= GET_STAT(RX_PORT_PPP2
);
3284 p
->rx_ppp3
= GET_STAT(RX_PORT_PPP3
);
3285 p
->rx_ppp4
= GET_STAT(RX_PORT_PPP4
);
3286 p
->rx_ppp5
= GET_STAT(RX_PORT_PPP5
);
3287 p
->rx_ppp6
= GET_STAT(RX_PORT_PPP6
);
3288 p
->rx_ppp7
= GET_STAT(RX_PORT_PPP7
);
3290 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) >= CHELSIO_T5
) {
3291 if (stat_ctl
& F_COUNTPAUSESTATRX
) {
3292 p
->rx_frames
-= p
->rx_pause
;
3293 p
->rx_octets
-= p
->rx_pause
* 64;
3295 if (stat_ctl
& F_COUNTPAUSEMCRX
)
3296 p
->rx_mcast_frames
-= p
->rx_pause
;
3299 p
->rx_ovflow0
= (bgmap
& 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME
) : 0;
3300 p
->rx_ovflow1
= (bgmap
& 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME
) : 0;
3301 p
->rx_ovflow2
= (bgmap
& 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME
) : 0;
3302 p
->rx_ovflow3
= (bgmap
& 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME
) : 0;
3303 p
->rx_trunc0
= (bgmap
& 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME
) : 0;
3304 p
->rx_trunc1
= (bgmap
& 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME
) : 0;
3305 p
->rx_trunc2
= (bgmap
& 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME
) : 0;
3306 p
->rx_trunc3
= (bgmap
& 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME
) : 0;
3313 * t4_get_port_stats_offset - collect port stats relative to a previous snapshot
3314 * @adap: The adapter
3316 * @stats: Current stats to fill
3317 * @offset: Previous stats snapshot
3319 void t4_get_port_stats_offset(struct adapter
*adap
, int idx
,
3320 struct port_stats
*stats
,
3321 struct port_stats
*offset
)
3326 t4_get_port_stats(adap
, idx
, stats
);
3327 for (i
= 0, s
= (u64
*)stats
, o
= (u64
*)offset
;
3328 i
< (sizeof(struct port_stats
) / sizeof(u64
));
3334 * t4_clr_port_stats - clear port statistics
3335 * @adap: the adapter
3336 * @idx: the port index
3338 * Clear HW statistics for the given port.
3340 void t4_clr_port_stats(struct adapter
*adap
, int idx
)
3343 u32 bgmap
= t4_get_mps_bg_map(adap
, idx
);
3346 if (is_t4(adap
->params
.chip
))
3347 port_base_addr
= PORT_BASE(idx
);
3349 port_base_addr
= T5_PORT_BASE(idx
);
3351 for (i
= A_MPS_PORT_STAT_TX_PORT_BYTES_L
;
3352 i
<= A_MPS_PORT_STAT_TX_PORT_PPP7_H
; i
+= 8)
3353 t4_write_reg(adap
, port_base_addr
+ i
, 0);
3354 for (i
= A_MPS_PORT_STAT_RX_PORT_BYTES_L
;
3355 i
<= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H
; i
+= 8)
3356 t4_write_reg(adap
, port_base_addr
+ i
, 0);
3357 for (i
= 0; i
< 4; i
++)
3358 if (bgmap
& (1 << i
)) {
3360 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L
+
3363 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L
+
3369 * t4_fw_hello - establish communication with FW
3370 * @adap: the adapter
3371 * @mbox: mailbox to use for the FW command
3372 * @evt_mbox: mailbox to receive async FW events
3373 * @master: specifies the caller's willingness to be the device master
3374 * @state: returns the current device state (if non-NULL)
3376 * Issues a command to establish communication with FW. Returns either
3377 * an error (negative integer) or the mailbox of the Master PF.
3379 int t4_fw_hello(struct adapter
*adap
, unsigned int mbox
, unsigned int evt_mbox
,
3380 enum dev_master master
, enum dev_state
*state
)
3383 struct fw_hello_cmd c
;
3385 unsigned int master_mbox
;
3386 int retries
= FW_CMD_HELLO_RETRIES
;
3389 memset(&c
, 0, sizeof(c
));
3390 INIT_CMD(c
, HELLO
, WRITE
);
3391 c
.err_to_clearinit
= cpu_to_be32(
3392 V_FW_HELLO_CMD_MASTERDIS(master
== MASTER_CANT
) |
3393 V_FW_HELLO_CMD_MASTERFORCE(master
== MASTER_MUST
) |
3394 V_FW_HELLO_CMD_MBMASTER(master
== MASTER_MUST
? mbox
:
3395 M_FW_HELLO_CMD_MBMASTER
) |
3396 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox
) |
3397 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS
) |
3398 F_FW_HELLO_CMD_CLEARINIT
);
3401 * Issue the HELLO command to the firmware. If it's not successful
3402 * but indicates that we got a "busy" or "timeout" condition, retry
3403 * the HELLO until we exhaust our retry limit. If we do exceed our
3404 * retry limit, check to see if the firmware left us any error
3405 * information and report that if so ...
3407 ret
= t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), &c
);
3408 if (ret
!= FW_SUCCESS
) {
3409 if ((ret
== -EBUSY
|| ret
== -ETIMEDOUT
) && retries
-- > 0)
3411 if (t4_read_reg(adap
, A_PCIE_FW
) & F_PCIE_FW_ERR
)
3412 t4_report_fw_error(adap
);
3416 v
= be32_to_cpu(c
.err_to_clearinit
);
3417 master_mbox
= G_FW_HELLO_CMD_MBMASTER(v
);
3419 if (v
& F_FW_HELLO_CMD_ERR
)
3420 *state
= DEV_STATE_ERR
;
3421 else if (v
& F_FW_HELLO_CMD_INIT
)
3422 *state
= DEV_STATE_INIT
;
3424 *state
= DEV_STATE_UNINIT
;
3428 * If we're not the Master PF then we need to wait around for the
3429 * Master PF Driver to finish setting up the adapter.
3431 * Note that we also do this wait if we're a non-Master-capable PF and
3432 * there is no current Master PF; a Master PF may show up momentarily
3433 * and we wouldn't want to fail pointlessly. (This can happen when an
3434 * OS loads lots of different drivers rapidly at the same time). In
3435 * this case, the Master PF returned by the firmware will be
3436 * M_PCIE_FW_MASTER so the test below will work ...
3438 if ((v
& (F_FW_HELLO_CMD_ERR
| F_FW_HELLO_CMD_INIT
)) == 0 &&
3439 master_mbox
!= mbox
) {
3440 int waiting
= FW_CMD_HELLO_TIMEOUT
;
3443 * Wait for the firmware to either indicate an error or
3444 * initialized state. If we see either of these we bail out
3445 * and report the issue to the caller. If we exhaust the
3446 * "hello timeout" and we haven't exhausted our retries, try
3447 * again. Otherwise bail with a timeout error.
3456 * If neither Error nor Initialialized are indicated
3457 * by the firmware keep waiting till we exaust our
3458 * timeout ... and then retry if we haven't exhausted
3461 pcie_fw
= t4_read_reg(adap
, A_PCIE_FW
);
3462 if (!(pcie_fw
& (F_PCIE_FW_ERR
| F_PCIE_FW_INIT
))) {
3473 * We either have an Error or Initialized condition
3474 * report errors preferentially.
3477 if (pcie_fw
& F_PCIE_FW_ERR
)
3478 *state
= DEV_STATE_ERR
;
3479 else if (pcie_fw
& F_PCIE_FW_INIT
)
3480 *state
= DEV_STATE_INIT
;
3484 * If we arrived before a Master PF was selected and
3485 * there's not a valid Master PF, grab its identity
3488 if (master_mbox
== M_PCIE_FW_MASTER
&&
3489 (pcie_fw
& F_PCIE_FW_MASTER_VLD
))
3490 master_mbox
= G_PCIE_FW_MASTER(pcie_fw
);
3499 * t4_fw_bye - end communication with FW
3500 * @adap: the adapter
3501 * @mbox: mailbox to use for the FW command
3503 * Issues a command to terminate communication with FW.
3505 int t4_fw_bye(struct adapter
*adap
, unsigned int mbox
)
3507 struct fw_bye_cmd c
;
3509 memset(&c
, 0, sizeof(c
));
3510 INIT_CMD(c
, BYE
, WRITE
);
3511 return t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), NULL
);
3515 * t4_fw_reset - issue a reset to FW
3516 * @adap: the adapter
3517 * @mbox: mailbox to use for the FW command
3518 * @reset: specifies the type of reset to perform
3520 * Issues a reset command of the specified type to FW.
3522 int t4_fw_reset(struct adapter
*adap
, unsigned int mbox
, int reset
)
3524 struct fw_reset_cmd c
;
3526 memset(&c
, 0, sizeof(c
));
3527 INIT_CMD(c
, RESET
, WRITE
);
3528 c
.val
= cpu_to_be32(reset
);
3529 return t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), NULL
);
3533 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
3534 * @adap: the adapter
3535 * @mbox: mailbox to use for the FW RESET command (if desired)
3536 * @force: force uP into RESET even if FW RESET command fails
3538 * Issues a RESET command to firmware (if desired) with a HALT indication
3539 * and then puts the microprocessor into RESET state. The RESET command
3540 * will only be issued if a legitimate mailbox is provided (mbox <=
3541 * M_PCIE_FW_MASTER).
3543 * This is generally used in order for the host to safely manipulate the
3544 * adapter without fear of conflicting with whatever the firmware might
3545 * be doing. The only way out of this state is to RESTART the firmware
3548 int t4_fw_halt(struct adapter
*adap
, unsigned int mbox
, int force
)
3553 * If a legitimate mailbox is provided, issue a RESET command
3554 * with a HALT indication.
3556 if (mbox
<= M_PCIE_FW_MASTER
) {
3557 struct fw_reset_cmd c
;
3559 memset(&c
, 0, sizeof(c
));
3560 INIT_CMD(c
, RESET
, WRITE
);
3561 c
.val
= cpu_to_be32(F_PIORST
| F_PIORSTMODE
);
3562 c
.halt_pkd
= cpu_to_be32(F_FW_RESET_CMD_HALT
);
3563 ret
= t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), NULL
);
3567 * Normally we won't complete the operation if the firmware RESET
3568 * command fails but if our caller insists we'll go ahead and put the
3569 * uP into RESET. This can be useful if the firmware is hung or even
3570 * missing ... We'll have to take the risk of putting the uP into
3571 * RESET without the cooperation of firmware in that case.
3573 * We also force the firmware's HALT flag to be on in case we bypassed
3574 * the firmware RESET command above or we're dealing with old firmware
3575 * which doesn't have the HALT capability. This will serve as a flag
3576 * for the incoming firmware to know that it's coming out of a HALT
3577 * rather than a RESET ... if it's new enough to understand that ...
3579 if (ret
== 0 || force
) {
3580 t4_set_reg_field(adap
, A_CIM_BOOT_CFG
, F_UPCRST
, F_UPCRST
);
3581 t4_set_reg_field(adap
, A_PCIE_FW
, F_PCIE_FW_HALT
,
3586 * And we always return the result of the firmware RESET command
3587 * even when we force the uP into RESET ...
3593 * t4_fw_restart - restart the firmware by taking the uP out of RESET
3594 * @adap: the adapter
3595 * @mbox: mailbox to use for the FW RESET command (if desired)
3596 * @reset: if we want to do a RESET to restart things
3598 * Restart firmware previously halted by t4_fw_halt(). On successful
3599 * return the previous PF Master remains as the new PF Master and there
3600 * is no need to issue a new HELLO command, etc.
3602 * We do this in two ways:
3604 * 1. If we're dealing with newer firmware we'll simply want to take
3605 * the chip's microprocessor out of RESET. This will cause the
3606 * firmware to start up from its start vector. And then we'll loop
3607 * until the firmware indicates it's started again (PCIE_FW.HALT
3608 * reset to 0) or we timeout.
3610 * 2. If we're dealing with older firmware then we'll need to RESET
3611 * the chip since older firmware won't recognize the PCIE_FW.HALT
3612 * flag and automatically RESET itself on startup.
3614 int t4_fw_restart(struct adapter
*adap
, unsigned int mbox
, int reset
)
3618 * Since we're directing the RESET instead of the firmware
3619 * doing it automatically, we need to clear the PCIE_FW.HALT
3622 t4_set_reg_field(adap
, A_PCIE_FW
, F_PCIE_FW_HALT
, 0);
3625 * If we've been given a valid mailbox, first try to get the
3626 * firmware to do the RESET. If that works, great and we can
3627 * return success. Otherwise, if we haven't been given a
3628 * valid mailbox or the RESET command failed, fall back to
3629 * hitting the chip with a hammer.
3631 if (mbox
<= M_PCIE_FW_MASTER
) {
3632 t4_set_reg_field(adap
, A_CIM_BOOT_CFG
, F_UPCRST
, 0);
3634 if (t4_fw_reset(adap
, mbox
,
3635 F_PIORST
| F_PIORSTMODE
) == 0)
3639 t4_write_reg(adap
, A_PL_RST
, F_PIORST
| F_PIORSTMODE
);
3644 t4_set_reg_field(adap
, A_CIM_BOOT_CFG
, F_UPCRST
, 0);
3645 for (ms
= 0; ms
< FW_CMD_MAX_TIMEOUT
; ) {
3646 if (!(t4_read_reg(adap
, A_PCIE_FW
) & F_PCIE_FW_HALT
))
3657 * t4_fl_pkt_align - return the fl packet alignment
3658 * @adap: the adapter
3660 * T4 has a single field to specify the packing and padding boundary.
3661 * T5 onwards has separate fields for this and hence the alignment for
3662 * next packet offset is maximum of these two.
3664 int t4_fl_pkt_align(struct adapter
*adap
)
3666 u32 sge_control
, sge_control2
;
3667 unsigned int ingpadboundary
, ingpackboundary
, fl_align
, ingpad_shift
;
3669 sge_control
= t4_read_reg(adap
, A_SGE_CONTROL
);
3671 /* T4 uses a single control field to specify both the PCIe Padding and
3672 * Packing Boundary. T5 introduced the ability to specify these
3673 * separately. The actual Ingress Packet Data alignment boundary
3674 * within Packed Buffer Mode is the maximum of these two
3677 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) <= CHELSIO_T5
)
3678 ingpad_shift
= X_INGPADBOUNDARY_SHIFT
;
3680 ingpad_shift
= X_T6_INGPADBOUNDARY_SHIFT
;
3682 ingpadboundary
= 1 << (G_INGPADBOUNDARY(sge_control
) + ingpad_shift
);
3684 fl_align
= ingpadboundary
;
3685 if (!is_t4(adap
->params
.chip
)) {
3686 sge_control2
= t4_read_reg(adap
, A_SGE_CONTROL2
);
3687 ingpackboundary
= G_INGPACKBOUNDARY(sge_control2
);
3688 if (ingpackboundary
== X_INGPACKBOUNDARY_16B
)
3689 ingpackboundary
= 16;
3691 ingpackboundary
= 1 << (ingpackboundary
+
3692 X_INGPACKBOUNDARY_SHIFT
);
3694 fl_align
= max(ingpadboundary
, ingpackboundary
);
3700 * t4_fixup_host_params_compat - fix up host-dependent parameters
3701 * @adap: the adapter
3702 * @page_size: the host's Base Page Size
3703 * @cache_line_size: the host's Cache Line Size
3704 * @chip_compat: maintain compatibility with designated chip
3706 * Various registers in the chip contain values which are dependent on the
3707 * host's Base Page and Cache Line Sizes. This function will fix all of
3708 * those registers with the appropriate values as passed in ...
3710 * @chip_compat is used to limit the set of changes that are made
3711 * to be compatible with the indicated chip release. This is used by
3712 * drivers to maintain compatibility with chip register settings when
3713 * the drivers haven't [yet] been updated with new chip support.
3715 int t4_fixup_host_params_compat(struct adapter
*adap
,
3716 unsigned int page_size
,
3717 unsigned int cache_line_size
,
3718 enum chip_type chip_compat
)
3720 unsigned int page_shift
= cxgbe_fls(page_size
) - 1;
3721 unsigned int sge_hps
= page_shift
- 10;
3722 unsigned int stat_len
= cache_line_size
> 64 ? 128 : 64;
3723 unsigned int fl_align
= cache_line_size
< 32 ? 32 : cache_line_size
;
3724 unsigned int fl_align_log
= cxgbe_fls(fl_align
) - 1;
3726 t4_write_reg(adap
, A_SGE_HOST_PAGE_SIZE
,
3727 V_HOSTPAGESIZEPF0(sge_hps
) |
3728 V_HOSTPAGESIZEPF1(sge_hps
) |
3729 V_HOSTPAGESIZEPF2(sge_hps
) |
3730 V_HOSTPAGESIZEPF3(sge_hps
) |
3731 V_HOSTPAGESIZEPF4(sge_hps
) |
3732 V_HOSTPAGESIZEPF5(sge_hps
) |
3733 V_HOSTPAGESIZEPF6(sge_hps
) |
3734 V_HOSTPAGESIZEPF7(sge_hps
));
3736 if (is_t4(adap
->params
.chip
) || is_t4(chip_compat
))
3737 t4_set_reg_field(adap
, A_SGE_CONTROL
,
3738 V_INGPADBOUNDARY(M_INGPADBOUNDARY
) |
3739 F_EGRSTATUSPAGESIZE
,
3740 V_INGPADBOUNDARY(fl_align_log
-
3741 X_INGPADBOUNDARY_SHIFT
) |
3742 V_EGRSTATUSPAGESIZE(stat_len
!= 64));
3744 unsigned int pack_align
;
3745 unsigned int ingpad
, ingpack
;
3746 unsigned int pcie_cap
;
3749 * T5 introduced the separation of the Free List Padding and
3750 * Packing Boundaries. Thus, we can select a smaller Padding
3751 * Boundary to avoid uselessly chewing up PCIe Link and Memory
3752 * Bandwidth, and use a Packing Boundary which is large enough
3753 * to avoid false sharing between CPUs, etc.
3755 * For the PCI Link, the smaller the Padding Boundary the
3756 * better. For the Memory Controller, a smaller Padding
3757 * Boundary is better until we cross under the Memory Line
3758 * Size (the minimum unit of transfer to/from Memory). If we
3759 * have a Padding Boundary which is smaller than the Memory
3760 * Line Size, that'll involve a Read-Modify-Write cycle on the
3761 * Memory Controller which is never good.
3764 /* We want the Packing Boundary to be based on the Cache Line
3765 * Size in order to help avoid False Sharing performance
3766 * issues between CPUs, etc. We also want the Packing
3767 * Boundary to incorporate the PCI-E Maximum Payload Size. We
3768 * get best performance when the Packing Boundary is a
3769 * multiple of the Maximum Payload Size.
3771 pack_align
= fl_align
;
3772 pcie_cap
= t4_os_find_pci_capability(adap
, PCI_CAP_ID_EXP
);
3774 unsigned int mps
, mps_log
;
3777 /* The PCIe Device Control Maximum Payload Size field
3778 * [bits 7:5] encodes sizes as powers of 2 starting at
3781 t4_os_pci_read_cfg2(adap
, pcie_cap
+ PCI_EXP_DEVCTL
,
3783 mps_log
= ((devctl
& PCI_EXP_DEVCTL_PAYLOAD
) >> 5) + 7;
3785 if (mps
> pack_align
)
3790 * N.B. T5 has a different interpretation of the "0" value for
3791 * the Packing Boundary. This corresponds to 16 bytes instead
3792 * of the expected 32 bytes. We never have a Packing Boundary
3793 * less than 32 bytes so we can't use that special value but
3794 * on the other hand, if we wanted 32 bytes, the best we can
3795 * really do is 64 bytes ...
3797 if (pack_align
<= 16) {
3798 ingpack
= X_INGPACKBOUNDARY_16B
;
3800 } else if (pack_align
== 32) {
3801 ingpack
= X_INGPACKBOUNDARY_64B
;
3804 unsigned int pack_align_log
= cxgbe_fls(pack_align
) - 1;
3806 ingpack
= pack_align_log
- X_INGPACKBOUNDARY_SHIFT
;
3807 fl_align
= pack_align
;
3810 /* Use the smallest Ingress Padding which isn't smaller than
3811 * the Memory Controller Read/Write Size. We'll take that as
3812 * being 8 bytes since we don't know of any system with a
3813 * wider Memory Controller Bus Width.
3815 if (is_t5(adap
->params
.chip
))
3816 ingpad
= X_INGPADBOUNDARY_32B
;
3818 ingpad
= X_T6_INGPADBOUNDARY_8B
;
3819 t4_set_reg_field(adap
, A_SGE_CONTROL
,
3820 V_INGPADBOUNDARY(M_INGPADBOUNDARY
) |
3821 F_EGRSTATUSPAGESIZE
,
3822 V_INGPADBOUNDARY(ingpad
) |
3823 V_EGRSTATUSPAGESIZE(stat_len
!= 64));
3824 t4_set_reg_field(adap
, A_SGE_CONTROL2
,
3825 V_INGPACKBOUNDARY(M_INGPACKBOUNDARY
),
3826 V_INGPACKBOUNDARY(ingpack
));
3830 * Adjust various SGE Free List Host Buffer Sizes.
3832 * The first four entries are:
3836 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
3837 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
3839 * For the single-MTU buffers in unpacked mode we need to include
3840 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
3841 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
3842 * Padding boundary. All of these are accommodated in the Factory
3843 * Default Firmware Configuration File but we need to adjust it for
3844 * this host's cache line size.
3846 t4_write_reg(adap
, A_SGE_FL_BUFFER_SIZE0
, page_size
);
3847 t4_write_reg(adap
, A_SGE_FL_BUFFER_SIZE2
,
3848 (t4_read_reg(adap
, A_SGE_FL_BUFFER_SIZE2
) + fl_align
- 1)
3850 t4_write_reg(adap
, A_SGE_FL_BUFFER_SIZE3
,
3851 (t4_read_reg(adap
, A_SGE_FL_BUFFER_SIZE3
) + fl_align
- 1)
3854 t4_write_reg(adap
, A_ULP_RX_TDDP_PSZ
, V_HPZ0(page_shift
- 12));
3860 * t4_fixup_host_params - fix up host-dependent parameters (T4 compatible)
3861 * @adap: the adapter
3862 * @page_size: the host's Base Page Size
3863 * @cache_line_size: the host's Cache Line Size
3865 * Various registers in T4 contain values which are dependent on the
3866 * host's Base Page and Cache Line Sizes. This function will fix all of
3867 * those registers with the appropriate values as passed in ...
3869 * This routine makes changes which are compatible with T4 chips.
3871 int t4_fixup_host_params(struct adapter
*adap
, unsigned int page_size
,
3872 unsigned int cache_line_size
)
3874 return t4_fixup_host_params_compat(adap
, page_size
, cache_line_size
,
3879 * t4_fw_initialize - ask FW to initialize the device
3880 * @adap: the adapter
3881 * @mbox: mailbox to use for the FW command
3883 * Issues a command to FW to partially initialize the device. This
3884 * performs initialization that generally doesn't depend on user input.
3886 int t4_fw_initialize(struct adapter
*adap
, unsigned int mbox
)
3888 struct fw_initialize_cmd c
;
3890 memset(&c
, 0, sizeof(c
));
3891 INIT_CMD(c
, INITIALIZE
, WRITE
);
3892 return t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), NULL
);
3896 * t4_query_params_rw - query FW or device parameters
3897 * @adap: the adapter
3898 * @mbox: mailbox to use for the FW command
3901 * @nparams: the number of parameters
3902 * @params: the parameter names
3903 * @val: the parameter values
3904 * @rw: Write and read flag
3906 * Reads the value of FW or device parameters. Up to 7 parameters can be
3909 static int t4_query_params_rw(struct adapter
*adap
, unsigned int mbox
,
3910 unsigned int pf
, unsigned int vf
,
3911 unsigned int nparams
, const u32
*params
,
3916 struct fw_params_cmd c
;
3917 __be32
*p
= &c
.param
[0].mnem
;
3922 memset(&c
, 0, sizeof(c
));
3923 c
.op_to_vfn
= cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD
) |
3924 F_FW_CMD_REQUEST
| F_FW_CMD_READ
|
3925 V_FW_PARAMS_CMD_PFN(pf
) |
3926 V_FW_PARAMS_CMD_VFN(vf
));
3927 c
.retval_len16
= cpu_to_be32(FW_LEN16(c
));
3929 for (i
= 0; i
< nparams
; i
++) {
3930 *p
++ = cpu_to_be32(*params
++);
3932 *p
= cpu_to_be32(*(val
+ i
));
3936 ret
= t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), &c
);
3938 for (i
= 0, p
= &c
.param
[0].val
; i
< nparams
; i
++, p
+= 2)
3939 *val
++ = be32_to_cpu(*p
);
3943 int t4_query_params(struct adapter
*adap
, unsigned int mbox
, unsigned int pf
,
3944 unsigned int vf
, unsigned int nparams
, const u32
*params
,
3947 return t4_query_params_rw(adap
, mbox
, pf
, vf
, nparams
, params
, val
, 0);
3951 * t4_set_params_timeout - sets FW or device parameters
3952 * @adap: the adapter
3953 * @mbox: mailbox to use for the FW command
3956 * @nparams: the number of parameters
3957 * @params: the parameter names
3958 * @val: the parameter values
3959 * @timeout: the timeout time
3961 * Sets the value of FW or device parameters. Up to 7 parameters can be
3962 * specified at once.
3964 int t4_set_params_timeout(struct adapter
*adap
, unsigned int mbox
,
3965 unsigned int pf
, unsigned int vf
,
3966 unsigned int nparams
, const u32
*params
,
3967 const u32
*val
, int timeout
)
3969 struct fw_params_cmd c
;
3970 __be32
*p
= &c
.param
[0].mnem
;
3975 memset(&c
, 0, sizeof(c
));
3976 c
.op_to_vfn
= cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD
) |
3977 F_FW_CMD_REQUEST
| F_FW_CMD_WRITE
|
3978 V_FW_PARAMS_CMD_PFN(pf
) |
3979 V_FW_PARAMS_CMD_VFN(vf
));
3980 c
.retval_len16
= cpu_to_be32(FW_LEN16(c
));
3983 *p
++ = cpu_to_be32(*params
++);
3984 *p
++ = cpu_to_be32(*val
++);
3987 return t4_wr_mbox_timeout(adap
, mbox
, &c
, sizeof(c
), NULL
, timeout
);
3990 int t4_set_params(struct adapter
*adap
, unsigned int mbox
, unsigned int pf
,
3991 unsigned int vf
, unsigned int nparams
, const u32
*params
,
3994 return t4_set_params_timeout(adap
, mbox
, pf
, vf
, nparams
, params
, val
,
3995 FW_CMD_MAX_TIMEOUT
);
3999 * t4_alloc_vi_func - allocate a virtual interface
4000 * @adap: the adapter
4001 * @mbox: mailbox to use for the FW command
4002 * @port: physical port associated with the VI
4003 * @pf: the PF owning the VI
4004 * @vf: the VF owning the VI
4005 * @nmac: number of MAC addresses needed (1 to 5)
4006 * @mac: the MAC addresses of the VI
4007 * @rss_size: size of RSS table slice associated with this VI
4008 * @portfunc: which Port Application Function MAC Address is desired
4009 * @idstype: Intrusion Detection Type
4011 * Allocates a virtual interface for the given physical port. If @mac is
4012 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
4013 * @mac should be large enough to hold @nmac Ethernet addresses, they are
4014 * stored consecutively so the space needed is @nmac * 6 bytes.
4015 * Returns a negative error number or the non-negative VI id.
4017 int t4_alloc_vi_func(struct adapter
*adap
, unsigned int mbox
,
4018 unsigned int port
, unsigned int pf
, unsigned int vf
,
4019 unsigned int nmac
, u8
*mac
, unsigned int *rss_size
,
4020 unsigned int portfunc
, unsigned int idstype
,
4026 memset(&c
, 0, sizeof(c
));
4027 c
.op_to_vfn
= cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD
) | F_FW_CMD_REQUEST
|
4028 F_FW_CMD_WRITE
| F_FW_CMD_EXEC
|
4029 V_FW_VI_CMD_PFN(pf
) | V_FW_VI_CMD_VFN(vf
));
4030 c
.alloc_to_len16
= cpu_to_be32(F_FW_VI_CMD_ALLOC
| FW_LEN16(c
));
4031 c
.type_to_viid
= cpu_to_be16(V_FW_VI_CMD_TYPE(idstype
) |
4032 V_FW_VI_CMD_FUNC(portfunc
));
4033 c
.portid_pkd
= V_FW_VI_CMD_PORTID(port
);
4036 ret
= t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), &c
);
4041 memcpy(mac
, c
.mac
, sizeof(c
.mac
));
4044 memcpy(mac
+ 24, c
.nmac3
, sizeof(c
.nmac3
));
4047 memcpy(mac
+ 18, c
.nmac2
, sizeof(c
.nmac2
));
4050 memcpy(mac
+ 12, c
.nmac1
, sizeof(c
.nmac1
));
4053 memcpy(mac
+ 6, c
.nmac0
, sizeof(c
.nmac0
));
4058 *rss_size
= G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c
.norss_rsssize
));
4060 *vivld
= G_FW_VI_CMD_VFVLD(be32_to_cpu(c
.alloc_to_len16
));
4062 *vin
= G_FW_VI_CMD_VIN(be32_to_cpu(c
.alloc_to_len16
));
4063 return G_FW_VI_CMD_VIID(cpu_to_be16(c
.type_to_viid
));
4067 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
4068 * @adap: the adapter
4069 * @mbox: mailbox to use for the FW command
4070 * @port: physical port associated with the VI
4071 * @pf: the PF owning the VI
4072 * @vf: the VF owning the VI
4073 * @nmac: number of MAC addresses needed (1 to 5)
4074 * @mac: the MAC addresses of the VI
4075 * @rss_size: size of RSS table slice associated with this VI
4077 * Backwards compatible and convieniance routine to allocate a Virtual
4078 * Interface with a Ethernet Port Application Function and Intrustion
4079 * Detection System disabled.
4081 int t4_alloc_vi(struct adapter
*adap
, unsigned int mbox
, unsigned int port
,
4082 unsigned int pf
, unsigned int vf
, unsigned int nmac
, u8
*mac
,
4083 unsigned int *rss_size
, u8
*vivld
, u8
*vin
)
4085 return t4_alloc_vi_func(adap
, mbox
, port
, pf
, vf
, nmac
, mac
, rss_size
,
4086 FW_VI_FUNC_ETH
, 0, vivld
, vin
);
4090 * t4_free_vi - free a virtual interface
4091 * @adap: the adapter
4092 * @mbox: mailbox to use for the FW command
4093 * @pf: the PF owning the VI
4094 * @vf: the VF owning the VI
4095 * @viid: virtual interface identifiler
4097 * Free a previously allocated virtual interface.
4099 int t4_free_vi(struct adapter
*adap
, unsigned int mbox
, unsigned int pf
,
4100 unsigned int vf
, unsigned int viid
)
4104 memset(&c
, 0, sizeof(c
));
4105 c
.op_to_vfn
= cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD
) | F_FW_CMD_REQUEST
|
4108 c
.op_to_vfn
|= cpu_to_be32(V_FW_VI_CMD_PFN(pf
) |
4109 V_FW_VI_CMD_VFN(vf
));
4110 c
.alloc_to_len16
= cpu_to_be32(F_FW_VI_CMD_FREE
| FW_LEN16(c
));
4111 c
.type_to_viid
= cpu_to_be16(V_FW_VI_CMD_VIID(viid
));
4114 return t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), &c
);
4116 return t4vf_wr_mbox(adap
, &c
, sizeof(c
), NULL
);
4120 * t4_set_rxmode - set Rx properties of a virtual interface
4121 * @adap: the adapter
4122 * @mbox: mailbox to use for the FW command
4124 * @mtu: the new MTU or -1
4125 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
4126 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
4127 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
4128 * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
4130 * @sleep_ok: if true we may sleep while awaiting command completion
4132 * Sets Rx properties of a virtual interface.
4134 int t4_set_rxmode(struct adapter
*adap
, unsigned int mbox
, unsigned int viid
,
4135 int mtu
, int promisc
, int all_multi
, int bcast
, int vlanex
,
4138 struct fw_vi_rxmode_cmd c
;
4140 /* convert to FW values */
4142 mtu
= M_FW_VI_RXMODE_CMD_MTU
;
4144 promisc
= M_FW_VI_RXMODE_CMD_PROMISCEN
;
4146 all_multi
= M_FW_VI_RXMODE_CMD_ALLMULTIEN
;
4148 bcast
= M_FW_VI_RXMODE_CMD_BROADCASTEN
;
4150 vlanex
= M_FW_VI_RXMODE_CMD_VLANEXEN
;
4152 memset(&c
, 0, sizeof(c
));
4153 c
.op_to_viid
= cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD
) |
4154 F_FW_CMD_REQUEST
| F_FW_CMD_WRITE
|
4155 V_FW_VI_RXMODE_CMD_VIID(viid
));
4156 c
.retval_len16
= cpu_to_be32(FW_LEN16(c
));
4157 c
.mtu_to_vlanexen
= cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu
) |
4158 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc
) |
4159 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi
) |
4160 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast
) |
4161 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex
));
4163 return t4_wr_mbox_meat(adap
, mbox
, &c
, sizeof(c
), NULL
,
4166 return t4vf_wr_mbox(adap
, &c
, sizeof(c
), NULL
);
4170 * t4_alloc_raw_mac_filt - Adds a raw mac entry in mps tcam
4171 * @adap: the adapter
4173 * @mac: the MAC address
4175 * @idx: index at which to add this entry
4176 * @port_id: the port index
4177 * @lookup_type: MAC address for inner (1) or outer (0) header
4178 * @sleep_ok: call is allowed to sleep
4180 * Adds the mac entry at the specified index using raw mac interface.
4182 * Returns a negative error number or the allocated index for this mac.
4184 int t4_alloc_raw_mac_filt(struct adapter
*adap
, unsigned int viid
,
4185 const u8
*addr
, const u8
*mask
, unsigned int idx
,
4186 u8 lookup_type
, u8 port_id
, bool sleep_ok
)
4189 struct fw_vi_mac_cmd c
;
4190 struct fw_vi_mac_raw
*p
= &c
.u
.raw
;
4193 memset(&c
, 0, sizeof(c
));
4194 c
.op_to_viid
= cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD
) |
4195 F_FW_CMD_REQUEST
| F_FW_CMD_WRITE
|
4196 V_FW_VI_MAC_CMD_VIID(viid
));
4197 val
= V_FW_CMD_LEN16(1) |
4198 V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW
);
4199 c
.freemacs_to_len16
= cpu_to_be32(val
);
4201 /* Specify that this is an inner mac address */
4202 p
->raw_idx_pkd
= cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx
));
4204 /* Lookup Type. Outer header: 0, Inner header: 1 */
4205 p
->data0_pkd
= cpu_to_be32(V_DATALKPTYPE(lookup_type
) |
4206 V_DATAPORTNUM(port_id
));
4207 /* Lookup mask and port mask */
4208 p
->data0m_pkd
= cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE
) |
4209 V_DATAPORTNUM(M_DATAPORTNUM
));
4211 /* Copy the address and the mask */
4212 memcpy((u8
*)&p
->data1
[0] + 2, addr
, ETHER_ADDR_LEN
);
4213 memcpy((u8
*)&p
->data1m
[0] + 2, mask
, ETHER_ADDR_LEN
);
4215 ret
= t4_wr_mbox_meat(adap
, adap
->mbox
, &c
, sizeof(c
), &c
, sleep_ok
);
4217 ret
= G_FW_VI_MAC_CMD_RAW_IDX(be32_to_cpu(p
->raw_idx_pkd
));
4218 if (ret
!= (int)idx
)
4226 * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
4227 * @adap: the adapter
4229 * @addr: the MAC address
4231 * @idx: index of the entry in mps tcam
4232 * @lookup_type: MAC address for inner (1) or outer (0) header
4233 * @port_id: the port index
4234 * @sleep_ok: call is allowed to sleep
4236 * Removes the mac entry at the specified index using raw mac interface.
4238 * Returns a negative error number on failure.
4240 int t4_free_raw_mac_filt(struct adapter
*adap
, unsigned int viid
,
4241 const u8
*addr
, const u8
*mask
, unsigned int idx
,
4242 u8 lookup_type
, u8 port_id
, bool sleep_ok
)
4244 struct fw_vi_mac_cmd c
;
4245 struct fw_vi_mac_raw
*p
= &c
.u
.raw
;
4248 memset(&c
, 0, sizeof(c
));
4249 c
.op_to_viid
= cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD
) |
4250 F_FW_CMD_REQUEST
| F_FW_CMD_WRITE
|
4252 V_FW_VI_MAC_CMD_VIID(viid
));
4253 raw
= V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW
);
4254 c
.freemacs_to_len16
= cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0U) |
4258 p
->raw_idx_pkd
= cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx
) |
4259 FW_VI_MAC_ID_BASED_FREE
);
4261 /* Lookup Type. Outer header: 0, Inner header: 1 */
4262 p
->data0_pkd
= cpu_to_be32(V_DATALKPTYPE(lookup_type
) |
4263 V_DATAPORTNUM(port_id
));
4264 /* Lookup mask and port mask */
4265 p
->data0m_pkd
= cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE
) |
4266 V_DATAPORTNUM(M_DATAPORTNUM
));
4268 /* Copy the address and the mask */
4269 memcpy((u8
*)&p
->data1
[0] + 2, addr
, ETHER_ADDR_LEN
);
4270 memcpy((u8
*)&p
->data1m
[0] + 2, mask
, ETHER_ADDR_LEN
);
4272 return t4_wr_mbox_meat(adap
, adap
->mbox
, &c
, sizeof(c
), &c
, sleep_ok
);
4276 * t4_change_mac - modifies the exact-match filter for a MAC address
4277 * @adap: the adapter
4278 * @mbox: mailbox to use for the FW command
4280 * @idx: index of existing filter for old value of MAC address, or -1
4281 * @addr: the new MAC address value
4282 * @persist: whether a new MAC allocation should be persistent
4283 * @add_smt: if true also add the address to the HW SMT
4285 * Modifies an exact-match filter and sets it to the new MAC address if
4286 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
4287 * latter case the address is added persistently if @persist is %true.
4289 * Note that in general it is not possible to modify the value of a given
4290 * filter so the generic way to modify an address filter is to free the one
4291 * being used by the old address value and allocate a new filter for the
4292 * new address value.
4294 * Returns a negative error number or the index of the filter with the new
4295 * MAC value. Note that this index may differ from @idx.
4297 int t4_change_mac(struct adapter
*adap
, unsigned int mbox
, unsigned int viid
,
4298 int idx
, const u8
*addr
, bool persist
, bool add_smt
)
4301 struct fw_vi_mac_cmd c
;
4302 struct fw_vi_mac_exact
*p
= c
.u
.exact
;
4303 int max_mac_addr
= adap
->params
.arch
.mps_tcam_size
;
4305 if (idx
< 0) /* new allocation */
4306 idx
= persist
? FW_VI_MAC_ADD_PERSIST_MAC
: FW_VI_MAC_ADD_MAC
;
4307 mode
= add_smt
? FW_VI_MAC_SMT_AND_MPSTCAM
: FW_VI_MAC_MPS_TCAM_ENTRY
;
4309 memset(&c
, 0, sizeof(c
));
4310 c
.op_to_viid
= cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD
) |
4311 F_FW_CMD_REQUEST
| F_FW_CMD_WRITE
|
4312 V_FW_VI_MAC_CMD_VIID(viid
));
4313 c
.freemacs_to_len16
= cpu_to_be32(V_FW_CMD_LEN16(1));
4314 p
->valid_to_idx
= cpu_to_be16(F_FW_VI_MAC_CMD_VALID
|
4315 V_FW_VI_MAC_CMD_SMAC_RESULT(mode
) |
4316 V_FW_VI_MAC_CMD_IDX(idx
));
4317 memcpy(p
->macaddr
, addr
, sizeof(p
->macaddr
));
4320 ret
= t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), &c
);
4322 ret
= t4vf_wr_mbox(adap
, &c
, sizeof(c
), &c
);
4324 ret
= G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p
->valid_to_idx
));
4325 if (ret
>= max_mac_addr
)
4332 * t4_enable_vi_params - enable/disable a virtual interface
4333 * @adap: the adapter
4334 * @mbox: mailbox to use for the FW command
4336 * @rx_en: 1=enable Rx, 0=disable Rx
4337 * @tx_en: 1=enable Tx, 0=disable Tx
4338 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
4340 * Enables/disables a virtual interface. Note that setting DCB Enable
4341 * only makes sense when enabling a Virtual Interface ...
4343 int t4_enable_vi_params(struct adapter
*adap
, unsigned int mbox
,
4344 unsigned int viid
, bool rx_en
, bool tx_en
, bool dcb_en
)
4346 struct fw_vi_enable_cmd c
;
4348 memset(&c
, 0, sizeof(c
));
4349 c
.op_to_viid
= cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD
) |
4350 F_FW_CMD_REQUEST
| F_FW_CMD_EXEC
|
4351 V_FW_VI_ENABLE_CMD_VIID(viid
));
4352 c
.ien_to_len16
= cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en
) |
4353 V_FW_VI_ENABLE_CMD_EEN(tx_en
) |
4354 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en
) |
4357 return t4_wr_mbox_ns(adap
, mbox
, &c
, sizeof(c
), NULL
);
4359 return t4vf_wr_mbox_ns(adap
, &c
, sizeof(c
), NULL
);
4363 * t4_enable_vi - enable/disable a virtual interface
4364 * @adap: the adapter
4365 * @mbox: mailbox to use for the FW command
4367 * @rx_en: 1=enable Rx, 0=disable Rx
4368 * @tx_en: 1=enable Tx, 0=disable Tx
4370 * Enables/disables a virtual interface. Note that setting DCB Enable
4371 * only makes sense when enabling a Virtual Interface ...
4373 int t4_enable_vi(struct adapter
*adap
, unsigned int mbox
, unsigned int viid
,
4374 bool rx_en
, bool tx_en
)
4376 return t4_enable_vi_params(adap
, mbox
, viid
, rx_en
, tx_en
, 0);
4380 * t4_iq_start_stop - enable/disable an ingress queue and its FLs
4381 * @adap: the adapter
4382 * @mbox: mailbox to use for the FW command
4383 * @start: %true to enable the queues, %false to disable them
4384 * @pf: the PF owning the queues
4385 * @vf: the VF owning the queues
4386 * @iqid: ingress queue id
4387 * @fl0id: FL0 queue id or 0xffff if no attached FL0
4388 * @fl1id: FL1 queue id or 0xffff if no attached FL1
4390 * Starts or stops an ingress queue and its associated FLs, if any.
4392 int t4_iq_start_stop(struct adapter
*adap
, unsigned int mbox
, bool start
,
4393 unsigned int pf
, unsigned int vf
, unsigned int iqid
,
4394 unsigned int fl0id
, unsigned int fl1id
)
4398 memset(&c
, 0, sizeof(c
));
4399 c
.op_to_vfn
= cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD
) | F_FW_CMD_REQUEST
|
4401 c
.alloc_to_len16
= cpu_to_be32(V_FW_IQ_CMD_IQSTART(start
) |
4402 V_FW_IQ_CMD_IQSTOP(!start
) |
4404 c
.iqid
= cpu_to_be16(iqid
);
4405 c
.fl0id
= cpu_to_be16(fl0id
);
4406 c
.fl1id
= cpu_to_be16(fl1id
);
4408 c
.op_to_vfn
|= cpu_to_be32(V_FW_IQ_CMD_PFN(pf
) |
4409 V_FW_IQ_CMD_VFN(vf
));
4410 return t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), NULL
);
4412 return t4vf_wr_mbox(adap
, &c
, sizeof(c
), NULL
);
4417 * t4_iq_free - free an ingress queue and its FLs
4418 * @adap: the adapter
4419 * @mbox: mailbox to use for the FW command
4420 * @pf: the PF owning the queues
4421 * @vf: the VF owning the queues
4422 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
4423 * @iqid: ingress queue id
4424 * @fl0id: FL0 queue id or 0xffff if no attached FL0
4425 * @fl1id: FL1 queue id or 0xffff if no attached FL1
4427 * Frees an ingress queue and its associated FLs, if any.
4429 int t4_iq_free(struct adapter
*adap
, unsigned int mbox
, unsigned int pf
,
4430 unsigned int vf
, unsigned int iqtype
, unsigned int iqid
,
4431 unsigned int fl0id
, unsigned int fl1id
)
4435 memset(&c
, 0, sizeof(c
));
4436 c
.op_to_vfn
= cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD
) | F_FW_CMD_REQUEST
|
4439 c
.op_to_vfn
|= cpu_to_be32(V_FW_IQ_CMD_PFN(pf
) |
4440 V_FW_IQ_CMD_VFN(vf
));
4441 c
.alloc_to_len16
= cpu_to_be32(F_FW_IQ_CMD_FREE
| FW_LEN16(c
));
4442 c
.type_to_iqandstindex
= cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype
));
4443 c
.iqid
= cpu_to_be16(iqid
);
4444 c
.fl0id
= cpu_to_be16(fl0id
);
4445 c
.fl1id
= cpu_to_be16(fl1id
);
4447 return t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), NULL
);
4449 return t4vf_wr_mbox(adap
, &c
, sizeof(c
), NULL
);
4453 * t4_eth_eq_free - free an Ethernet egress queue
4454 * @adap: the adapter
4455 * @mbox: mailbox to use for the FW command
4456 * @pf: the PF owning the queue
4457 * @vf: the VF owning the queue
4458 * @eqid: egress queue id
4460 * Frees an Ethernet egress queue.
4462 int t4_eth_eq_free(struct adapter
*adap
, unsigned int mbox
, unsigned int pf
,
4463 unsigned int vf
, unsigned int eqid
)
4465 struct fw_eq_eth_cmd c
;
4467 memset(&c
, 0, sizeof(c
));
4468 c
.op_to_vfn
= cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD
) |
4469 F_FW_CMD_REQUEST
| F_FW_CMD_EXEC
);
4471 c
.op_to_vfn
|= cpu_to_be32(V_FW_IQ_CMD_PFN(pf
) |
4472 V_FW_IQ_CMD_VFN(vf
));
4473 c
.alloc_to_len16
= cpu_to_be32(F_FW_EQ_ETH_CMD_FREE
| FW_LEN16(c
));
4474 c
.eqid_pkd
= cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid
));
4476 return t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), NULL
);
4478 return t4vf_wr_mbox(adap
, &c
, sizeof(c
), NULL
);
4482 * t4_link_down_rc_str - return a string for a Link Down Reason Code
4483 * @link_down_rc: Link Down Reason Code
4485 * Returns a string representation of the Link Down Reason Code.
4487 static const char *t4_link_down_rc_str(unsigned char link_down_rc
)
4489 static const char * const reason
[] = {
4492 "Auto-negotiation Failure",
4494 "Insufficient Airflow",
4495 "Unable To Determine Reason",
4496 "No RX Signal Detected",
4500 if (link_down_rc
>= ARRAY_SIZE(reason
))
4501 return "Bad Reason Code";
4503 return reason
[link_down_rc
];
4506 /* Return the highest speed set in the port capabilities, in Mb/s. */
4507 static unsigned int fwcap_to_speed(fw_port_cap32_t caps
)
4509 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
4511 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
4515 TEST_SPEED_RETURN(100G
, 100000);
4516 TEST_SPEED_RETURN(50G
, 50000);
4517 TEST_SPEED_RETURN(40G
, 40000);
4518 TEST_SPEED_RETURN(25G
, 25000);
4519 TEST_SPEED_RETURN(10G
, 10000);
4520 TEST_SPEED_RETURN(1G
, 1000);
4521 TEST_SPEED_RETURN(100M
, 100);
4523 #undef TEST_SPEED_RETURN
4529 * t4_handle_get_port_info - process a FW reply message
4530 * @pi: the port info
4531 * @rpl: start of the FW message
4533 * Processes a GET_PORT_INFO FW reply message.
4535 static void t4_handle_get_port_info(struct port_info
*pi
, const __be64
*rpl
)
4537 const struct fw_port_cmd
*cmd
= (const void *)rpl
;
4538 int action
= G_FW_PORT_CMD_ACTION(be32_to_cpu(cmd
->action_to_len16
));
4539 fw_port_cap32_t pcaps
, acaps
, linkattr
;
4540 struct link_config
*lc
= &pi
->link_cfg
;
4541 struct adapter
*adapter
= pi
->adapter
;
4542 enum fw_port_module_type mod_type
;
4543 enum fw_port_type port_type
;
4544 unsigned int speed
, fc
, fec
;
4545 int link_ok
, linkdnrc
;
4547 /* Extract the various fields from the Port Information message.
4550 case FW_PORT_ACTION_GET_PORT_INFO
: {
4551 u32 lstatus
= be32_to_cpu(cmd
->u
.info
.lstatus_to_modtype
);
4553 link_ok
= (lstatus
& F_FW_PORT_CMD_LSTATUS
) != 0;
4554 linkdnrc
= G_FW_PORT_CMD_LINKDNRC(lstatus
);
4555 port_type
= G_FW_PORT_CMD_PTYPE(lstatus
);
4556 mod_type
= G_FW_PORT_CMD_MODTYPE(lstatus
);
4557 pcaps
= fwcaps16_to_caps32(be16_to_cpu(cmd
->u
.info
.pcap
));
4558 acaps
= fwcaps16_to_caps32(be16_to_cpu(cmd
->u
.info
.acap
));
4560 /* Unfortunately the format of the Link Status in the old
4561 * 16-bit Port Information message isn't the same as the
4562 * 16-bit Port Capabilities bitfield used everywhere else ...
4565 if (lstatus
& F_FW_PORT_CMD_RXPAUSE
)
4566 linkattr
|= FW_PORT_CAP32_FC_RX
;
4567 if (lstatus
& F_FW_PORT_CMD_TXPAUSE
)
4568 linkattr
|= FW_PORT_CAP32_FC_TX
;
4569 if (lstatus
& V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M
))
4570 linkattr
|= FW_PORT_CAP32_SPEED_100M
;
4571 if (lstatus
& V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G
))
4572 linkattr
|= FW_PORT_CAP32_SPEED_1G
;
4573 if (lstatus
& V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G
))
4574 linkattr
|= FW_PORT_CAP32_SPEED_10G
;
4575 if (lstatus
& V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G
))
4576 linkattr
|= FW_PORT_CAP32_SPEED_25G
;
4577 if (lstatus
& V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G
))
4578 linkattr
|= FW_PORT_CAP32_SPEED_40G
;
4579 if (lstatus
& V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G
))
4580 linkattr
|= FW_PORT_CAP32_SPEED_100G
;
4585 case FW_PORT_ACTION_GET_PORT_INFO32
: {
4587 be32_to_cpu(cmd
->u
.info32
.lstatus32_to_cbllen32
);
4589 link_ok
= (lstatus32
& F_FW_PORT_CMD_LSTATUS32
) != 0;
4590 linkdnrc
= G_FW_PORT_CMD_LINKDNRC32(lstatus32
);
4591 port_type
= G_FW_PORT_CMD_PORTTYPE32(lstatus32
);
4592 mod_type
= G_FW_PORT_CMD_MODTYPE32(lstatus32
);
4593 pcaps
= be32_to_cpu(cmd
->u
.info32
.pcaps32
);
4594 acaps
= be32_to_cpu(cmd
->u
.info32
.acaps32
);
4595 linkattr
= be32_to_cpu(cmd
->u
.info32
.linkattr32
);
4600 dev_warn(adapter
, "Handle Port Information: Bad Command/Action %#x\n",
4601 be32_to_cpu(cmd
->action_to_len16
));
4605 fec
= fwcap_to_cc_fec(acaps
);
4607 fc
= fwcap_to_cc_pause(linkattr
);
4608 speed
= fwcap_to_speed(linkattr
);
4610 if (mod_type
!= pi
->mod_type
) {
4612 pi
->port_type
= port_type
;
4613 pi
->mod_type
= mod_type
;
4614 t4_os_portmod_changed(adapter
, pi
->pidx
);
4616 if (link_ok
!= lc
->link_ok
|| speed
!= lc
->speed
||
4617 fc
!= lc
->fc
|| fec
!= lc
->fec
) { /* something changed */
4618 if (!link_ok
&& lc
->link_ok
) {
4619 lc
->link_down_rc
= linkdnrc
;
4620 dev_warn(adap
, "Port %d link down, reason: %s\n",
4621 pi
->tx_chan
, t4_link_down_rc_str(linkdnrc
));
4623 lc
->link_ok
= link_ok
;
4628 lc
->acaps
= acaps
& ADVERT_MASK
;
4630 if (lc
->acaps
& FW_PORT_CAP32_ANEG
) {
4631 lc
->autoneg
= AUTONEG_ENABLE
;
4633 /* When Autoneg is disabled, user needs to set
4635 * Similar to cxgb4_ethtool.c: set_link_ksettings
4638 lc
->requested_speed
= fwcap_to_speed(acaps
);
4639 lc
->autoneg
= AUTONEG_DISABLE
;
4645 * t4_ctrl_eq_free - free a control egress queue
4646 * @adap: the adapter
4647 * @mbox: mailbox to use for the FW command
4648 * @pf: the PF owning the queue
4649 * @vf: the VF owning the queue
4650 * @eqid: egress queue id
4652 * Frees a control egress queue.
4654 int t4_ctrl_eq_free(struct adapter
*adap
, unsigned int mbox
, unsigned int pf
,
4655 unsigned int vf
, unsigned int eqid
)
4657 struct fw_eq_ctrl_cmd c
;
4659 memset(&c
, 0, sizeof(c
));
4660 c
.op_to_vfn
= cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD
) |
4661 F_FW_CMD_REQUEST
| F_FW_CMD_EXEC
|
4662 V_FW_EQ_CTRL_CMD_PFN(pf
) |
4663 V_FW_EQ_CTRL_CMD_VFN(vf
));
4664 c
.alloc_to_len16
= cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE
| FW_LEN16(c
));
4665 c
.cmpliqid_eqid
= cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid
));
4666 return t4_wr_mbox(adap
, mbox
, &c
, sizeof(c
), NULL
);
4670 * t4_handle_fw_rpl - process a FW reply message
4671 * @adap: the adapter
4672 * @rpl: start of the FW message
4674 * Processes a FW message, such as link state change messages.
4676 int t4_handle_fw_rpl(struct adapter
*adap
, const __be64
*rpl
)
4678 u8 opcode
= *(const u8
*)rpl
;
4681 * This might be a port command ... this simplifies the following
4682 * conditionals ... We can get away with pre-dereferencing
4683 * action_to_len16 because it's in the first 16 bytes and all messages
4684 * will be at least that long.
4686 const struct fw_port_cmd
*p
= (const void *)rpl
;
4687 unsigned int action
=
4688 G_FW_PORT_CMD_ACTION(be32_to_cpu(p
->action_to_len16
));
4690 if (opcode
== FW_PORT_CMD
&&
4691 (action
== FW_PORT_ACTION_GET_PORT_INFO
||
4692 action
== FW_PORT_ACTION_GET_PORT_INFO32
)) {
4693 /* link/module state change message */
4694 int chan
= G_FW_PORT_CMD_PORTID(be32_to_cpu(p
->op_to_portid
));
4695 struct port_info
*pi
= NULL
;
4698 for_each_port(adap
, i
) {
4699 pi
= adap2pinfo(adap
, i
);
4700 if (pi
->tx_chan
== chan
)
4704 t4_handle_get_port_info(pi
, rpl
);
4706 dev_warn(adap
, "Unknown firmware reply %d\n", opcode
);
4712 void t4_reset_link_config(struct adapter
*adap
, int idx
)
4714 struct port_info
*pi
= adap2pinfo(adap
, idx
);
4715 struct link_config
*lc
= &pi
->link_cfg
;
4718 lc
->requested_speed
= 0;
4719 lc
->requested_fc
= 0;
4725 * init_link_config - initialize a link's SW state
4726 * @lc: structure holding the link state
4727 * @pcaps: link Port Capabilities
4728 * @acaps: link current Advertised Port Capabilities
4730 * Initializes the SW state maintained for each link, including the link's
4731 * capabilities and default speed/flow-control/autonegotiation settings.
4733 void init_link_config(struct link_config
*lc
, fw_port_cap32_t pcaps
,
4734 fw_port_cap32_t acaps
)
4737 lc
->requested_speed
= 0;
4739 lc
->requested_fc
= 0;
4743 * For Forward Error Control, we default to whatever the Firmware
4744 * tells us the Link is currently advertising.
4746 lc
->auto_fec
= fwcap_to_cc_fec(acaps
);
4747 lc
->requested_fec
= FEC_AUTO
;
4748 lc
->fec
= lc
->auto_fec
;
4750 if (lc
->pcaps
& FW_PORT_CAP32_ANEG
) {
4751 lc
->acaps
= lc
->pcaps
& ADVERT_MASK
;
4752 lc
->autoneg
= AUTONEG_ENABLE
;
4753 lc
->requested_fc
|= PAUSE_AUTONEG
;
4756 lc
->autoneg
= AUTONEG_DISABLE
;
4761 * t4_wait_dev_ready - wait till to reads of registers work
4763 * Right after the device is RESET is can take a small amount of time
4764 * for it to respond to register reads. Until then, all reads will
4765 * return either 0xff...ff or 0xee...ee. Return an error if reads
4766 * don't work within a reasonable time frame.
4768 static int t4_wait_dev_ready(struct adapter
*adapter
)
4772 whoami
= t4_read_reg(adapter
, A_PL_WHOAMI
);
4774 if (whoami
!= 0xffffffff && whoami
!= X_CIM_PF_NOACCESS
)
4778 whoami
= t4_read_reg(adapter
, A_PL_WHOAMI
);
4779 if (whoami
!= 0xffffffff && whoami
!= X_CIM_PF_NOACCESS
)
4782 dev_err(adapter
, "Device didn't become ready for access, whoami = %#x\n",
4788 u32 vendor_and_model_id
;
4792 int t4_get_flash_params(struct adapter
*adapter
)
4795 * Table for non-standard supported Flash parts. Note, all Flash
4796 * parts must have 64KB sectors.
4798 static struct flash_desc supported_flash
[] = {
4799 { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
4804 unsigned int part
, manufacturer
;
4805 unsigned int density
, size
= 0;
4808 * Issue a Read ID Command to the Flash part. We decode supported
4809 * Flash parts and their sizes from this. There's a newer Query
4810 * Command which can retrieve detailed geometry information but
4811 * many Flash parts don't support it.
4813 ret
= sf1_write(adapter
, 1, 1, 0, SF_RD_ID
);
4815 ret
= sf1_read(adapter
, 3, 0, 1, &flashid
);
4816 t4_write_reg(adapter
, A_SF_OP
, 0); /* unlock SF */
4821 * Check to see if it's one of our non-standard supported Flash parts.
4823 for (part
= 0; part
< ARRAY_SIZE(supported_flash
); part
++) {
4824 if (supported_flash
[part
].vendor_and_model_id
== flashid
) {
4825 adapter
->params
.sf_size
=
4826 supported_flash
[part
].size_mb
;
4827 adapter
->params
.sf_nsec
=
4828 adapter
->params
.sf_size
/ SF_SEC_SIZE
;
4834 * Decode Flash part size. The code below looks repetative with
4835 * common encodings, but that's not guaranteed in the JEDEC
4836 * specification for the Read JADEC ID command. The only thing that
4837 * we're guaranteed by the JADEC specification is where the
4838 * Manufacturer ID is in the returned result. After that each
4839 * Manufacturer ~could~ encode things completely differently.
4840 * Note, all Flash parts must have 64KB sectors.
4842 manufacturer
= flashid
& 0xff;
4843 switch (manufacturer
) {
4844 case 0x20: { /* Micron/Numonix */
4846 * This Density -> Size decoding table is taken from Micron
4849 density
= (flashid
>> 16) & 0xff;
4852 size
= 1 << 20; /* 1MB */
4855 size
= 1 << 21; /* 2MB */
4858 size
= 1 << 22; /* 4MB */
4861 size
= 1 << 23; /* 8MB */
4864 size
= 1 << 24; /* 16MB */
4867 size
= 1 << 25; /* 32MB */
4870 size
= 1 << 26; /* 64MB */
4873 size
= 1 << 27; /* 128MB */
4876 size
= 1 << 28; /* 256MB */
4882 case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
4884 * This Density -> Size decoding table is taken from ISSI
4887 density
= (flashid
>> 16) & 0xff;
4890 size
= 1 << 25; /* 32MB */
4893 size
= 1 << 26; /* 64MB */
4899 case 0xc2: { /* Macronix */
4901 * This Density -> Size decoding table is taken from Macronix
4904 density
= (flashid
>> 16) & 0xff;
4907 size
= 1 << 23; /* 8MB */
4910 size
= 1 << 24; /* 16MB */
4916 case 0xef: { /* Winbond */
4918 * This Density -> Size decoding table is taken from Winbond
4921 density
= (flashid
>> 16) & 0xff;
4924 size
= 1 << 23; /* 8MB */
4927 size
= 1 << 24; /* 16MB */
4934 /* If we didn't recognize the FLASH part, that's no real issue: the
4935 * Hardware/Software contract says that Hardware will _*ALWAYS*_
4936 * use a FLASH part which is at least 4MB in size and has 64KB
4937 * sectors. The unrecognized FLASH part is likely to be much larger
4938 * than 4MB, but that's all we really need.
4942 "Unknown Flash Part, ID = %#x, assuming 4MB\n",
4948 * Store decoded Flash size and fall through into vetting code.
4950 adapter
->params
.sf_size
= size
;
4951 adapter
->params
.sf_nsec
= size
/ SF_SEC_SIZE
;
4955 * We should reject adapters with FLASHes which are too small. So, emit
4958 if (adapter
->params
.sf_size
< FLASH_MIN_SIZE
)
4959 dev_warn(adapter
, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
4960 flashid
, adapter
->params
.sf_size
, FLASH_MIN_SIZE
);
4965 static void set_pcie_completion_timeout(struct adapter
*adapter
,
4971 pcie_cap
= t4_os_find_pci_capability(adapter
, PCI_CAP_ID_EXP
);
4973 t4_os_pci_read_cfg2(adapter
, pcie_cap
+ PCI_EXP_DEVCTL2
, &val
);
4976 t4_os_pci_write_cfg2(adapter
, pcie_cap
+ PCI_EXP_DEVCTL2
, val
);
4981 * t4_get_chip_type - Determine chip type from device ID
4982 * @adap: the adapter
4983 * @ver: adapter version
4985 int t4_get_chip_type(struct adapter
*adap
, int ver
)
4987 enum chip_type chip
= 0;
4988 u32 pl_rev
= G_REV(t4_read_reg(adap
, A_PL_REV
));
4990 /* Retrieve adapter's device ID */
4993 chip
|= CHELSIO_CHIP_CODE(CHELSIO_T5
, pl_rev
);
4996 chip
|= CHELSIO_CHIP_CODE(CHELSIO_T6
, pl_rev
);
4999 dev_err(adap
, "Device %d is not supported\n",
5000 adap
->params
.pci
.device_id
);
5008 * t4_prep_adapter - prepare SW and HW for operation
5009 * @adapter: the adapter
5011 * Initialize adapter SW state for the various HW modules, set initial
5012 * values for some adapter tunables, take PHYs out of reset, and
5013 * initialize the MDIO interface.
5015 int t4_prep_adapter(struct adapter
*adapter
)
5020 ret
= t4_wait_dev_ready(adapter
);
5024 pl_rev
= G_REV(t4_read_reg(adapter
, A_PL_REV
));
5025 adapter
->params
.pci
.device_id
= adapter
->pdev
->id
.device_id
;
5026 adapter
->params
.pci
.vendor_id
= adapter
->pdev
->id
.vendor_id
;
5029 * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS
5030 * ADAPTER (VERSION << 4 | REVISION)
5032 ver
= CHELSIO_PCI_ID_VER(adapter
->params
.pci
.device_id
);
5033 adapter
->params
.chip
= 0;
5036 adapter
->params
.chip
|= CHELSIO_CHIP_CODE(CHELSIO_T5
, pl_rev
);
5037 adapter
->params
.arch
.sge_fl_db
= F_DBPRIO
| F_DBTYPE
;
5038 adapter
->params
.arch
.mps_tcam_size
=
5039 NUM_MPS_T5_CLS_SRAM_L_INSTANCES
;
5040 adapter
->params
.arch
.mps_rplc_size
= 128;
5041 adapter
->params
.arch
.nchan
= NCHAN
;
5042 adapter
->params
.arch
.vfcount
= 128;
5045 adapter
->params
.chip
|= CHELSIO_CHIP_CODE(CHELSIO_T6
, pl_rev
);
5046 adapter
->params
.arch
.sge_fl_db
= 0;
5047 adapter
->params
.arch
.mps_tcam_size
=
5048 NUM_MPS_T5_CLS_SRAM_L_INSTANCES
;
5049 adapter
->params
.arch
.mps_rplc_size
= 256;
5050 adapter
->params
.arch
.nchan
= 2;
5051 adapter
->params
.arch
.vfcount
= 256;
5054 dev_err(adapter
, "%s: Device %d is not supported\n",
5055 __func__
, adapter
->params
.pci
.device_id
);
5059 adapter
->params
.pci
.vpd_cap_addr
=
5060 t4_os_find_pci_capability(adapter
, PCI_CAP_ID_VPD
);
5062 ret
= t4_get_flash_params(adapter
);
5064 dev_err(adapter
, "Unable to retrieve Flash Parameters, ret = %d\n",
5069 adapter
->params
.cim_la_size
= CIMLA_SIZE
;
5071 init_cong_ctrl(adapter
->params
.a_wnd
, adapter
->params
.b_wnd
);
5074 * Default port and clock for debugging in case we can't reach FW.
5076 adapter
->params
.nports
= 1;
5077 adapter
->params
.portvec
= 1;
5078 adapter
->params
.vpd
.cclk
= 50000;
5080 /* Set pci completion timeout value to 4 seconds. */
5081 set_pcie_completion_timeout(adapter
, 0xd);
5086 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
5087 * @adapter: the adapter
5088 * @qid: the Queue ID
5089 * @qtype: the Ingress or Egress type for @qid
5090 * @pbar2_qoffset: BAR2 Queue Offset
5091 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
5093 * Returns the BAR2 SGE Queue Registers information associated with the
5094 * indicated Absolute Queue ID. These are passed back in return value
5095 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
5096 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
5098 * This may return an error which indicates that BAR2 SGE Queue
5099 * registers aren't available. If an error is not returned, then the
5100 * following values are returned:
5102 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
5103 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
5105 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
5106 * require the "Inferred Queue ID" ability may be used. E.g. the
5107 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
5108 * then these "Inferred Queue ID" register may not be used.
5110 int t4_bar2_sge_qregs(struct adapter
*adapter
, unsigned int qid
,
5111 enum t4_bar2_qtype qtype
, u64
*pbar2_qoffset
,
5112 unsigned int *pbar2_qid
)
5114 unsigned int page_shift
, page_size
, qpp_shift
, qpp_mask
;
5115 u64 bar2_page_offset
, bar2_qoffset
;
5116 unsigned int bar2_qid
, bar2_qid_offset
, bar2_qinferred
;
5119 * T4 doesn't support BAR2 SGE Queue registers.
5121 if (is_t4(adapter
->params
.chip
))
5125 * Get our SGE Page Size parameters.
5127 page_shift
= adapter
->params
.sge
.hps
+ 10;
5128 page_size
= 1 << page_shift
;
5131 * Get the right Queues per Page parameters for our Queue.
5133 qpp_shift
= (qtype
== T4_BAR2_QTYPE_EGRESS
?
5134 adapter
->params
.sge
.eq_qpp
:
5135 adapter
->params
.sge
.iq_qpp
);
5136 qpp_mask
= (1 << qpp_shift
) - 1;
5139 * Calculate the basics of the BAR2 SGE Queue register area:
5140 * o The BAR2 page the Queue registers will be in.
5141 * o The BAR2 Queue ID.
5142 * o The BAR2 Queue ID Offset into the BAR2 page.
5144 bar2_page_offset
= ((qid
>> qpp_shift
) << page_shift
);
5145 bar2_qid
= qid
& qpp_mask
;
5146 bar2_qid_offset
= bar2_qid
* SGE_UDB_SIZE
;
5149 * If the BAR2 Queue ID Offset is less than the Page Size, then the
5150 * hardware will infer the Absolute Queue ID simply from the writes to
5151 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
5152 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
5153 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
5154 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
5155 * from the BAR2 Page and BAR2 Queue ID.
5157 * One important censequence of this is that some BAR2 SGE registers
5158 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
5159 * there. But other registers synthesize the SGE Queue ID purely
5160 * from the writes to the registers -- the Write Combined Doorbell
5161 * Buffer is a good example. These BAR2 SGE Registers are only
5162 * available for those BAR2 SGE Register areas where the SGE Absolute
5163 * Queue ID can be inferred from simple writes.
5165 bar2_qoffset
= bar2_page_offset
;
5166 bar2_qinferred
= (bar2_qid_offset
< page_size
);
5167 if (bar2_qinferred
) {
5168 bar2_qoffset
+= bar2_qid_offset
;
5172 *pbar2_qoffset
= bar2_qoffset
;
5173 *pbar2_qid
= bar2_qid
;
5178 * t4_init_sge_params - initialize adap->params.sge
5179 * @adapter: the adapter
5181 * Initialize various fields of the adapter's SGE Parameters structure.
5183 int t4_init_sge_params(struct adapter
*adapter
)
5185 struct sge_params
*sge_params
= &adapter
->params
.sge
;
5187 unsigned int s_hps
, s_qpp
;
5190 * Extract the SGE Page Size for our PF.
5192 hps
= t4_read_reg(adapter
, A_SGE_HOST_PAGE_SIZE
);
5193 s_hps
= (S_HOSTPAGESIZEPF0
+ (S_HOSTPAGESIZEPF1
- S_HOSTPAGESIZEPF0
) *
5195 sge_params
->hps
= ((hps
>> s_hps
) & M_HOSTPAGESIZEPF0
);
5198 * Extract the SGE Egress and Ingess Queues Per Page for our PF.
5200 s_qpp
= (S_QUEUESPERPAGEPF0
+
5201 (S_QUEUESPERPAGEPF1
- S_QUEUESPERPAGEPF0
) * adapter
->pf
);
5202 qpp
= t4_read_reg(adapter
, A_SGE_EGRESS_QUEUES_PER_PAGE_PF
);
5203 sge_params
->eq_qpp
= ((qpp
>> s_qpp
) & M_QUEUESPERPAGEPF0
);
5204 qpp
= t4_read_reg(adapter
, A_SGE_INGRESS_QUEUES_PER_PAGE_PF
);
5205 sge_params
->iq_qpp
= ((qpp
>> s_qpp
) & M_QUEUESPERPAGEPF0
);
5211 * t4_init_tp_params - initialize adap->params.tp
5212 * @adap: the adapter
5214 * Initialize various fields of the adapter's TP Parameters structure.
5216 int t4_init_tp_params(struct adapter
*adap
)
5221 v
= t4_read_reg(adap
, A_TP_TIMER_RESOLUTION
);
5222 adap
->params
.tp
.tre
= G_TIMERRESOLUTION(v
);
5223 adap
->params
.tp
.dack_re
= G_DELAYEDACKRESOLUTION(v
);
5225 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5226 for (chan
= 0; chan
< NCHAN
; chan
++)
5227 adap
->params
.tp
.tx_modq
[chan
] = chan
;
5230 * Cache the adapter's Compressed Filter Mode/Mask and global Ingress
5233 param
= (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV
) |
5234 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER
) |
5235 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK
));
5237 /* Read current value */
5238 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0,
5241 dev_info(adap
, "Current filter mode/mask 0x%x:0x%x\n",
5242 G_FW_PARAMS_PARAM_FILTER_MODE(v
),
5243 G_FW_PARAMS_PARAM_FILTER_MASK(v
));
5244 adap
->params
.tp
.vlan_pri_map
=
5245 G_FW_PARAMS_PARAM_FILTER_MODE(v
);
5246 adap
->params
.tp
.filter_mask
=
5247 G_FW_PARAMS_PARAM_FILTER_MASK(v
);
5250 "Failed to read filter mode/mask via fw api, using indirect-reg-read\n");
5252 /* In case of older-fw (which doesn't expose the api
5253 * FW_PARAM_DEV_FILTER_MODE_MASK) and newer-driver (which uses
5254 * the fw api) combination, fall-back to older method of reading
5255 * the filter mode from indirect-register
5257 t4_read_indirect(adap
, A_TP_PIO_ADDR
, A_TP_PIO_DATA
,
5258 &adap
->params
.tp
.vlan_pri_map
, 1,
5261 /* With the older-fw and newer-driver combination we might run
5262 * into an issue when user wants to use hash filter region but
5263 * the filter_mask is zero, in this case filter_mask validation
5264 * is tough. To avoid that we set the filter_mask same as filter
5265 * mode, which will behave exactly as the older way of ignoring
5266 * the filter mask validation.
5268 adap
->params
.tp
.filter_mask
= adap
->params
.tp
.vlan_pri_map
;
5271 t4_read_indirect(adap
, A_TP_PIO_ADDR
, A_TP_PIO_DATA
,
5272 &adap
->params
.tp
.ingress_config
, 1,
5273 A_TP_INGRESS_CONFIG
);
5275 /* For T6, cache the adapter's compressed error vector
5276 * and passing outer header info for encapsulated packets.
5278 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) > CHELSIO_T5
) {
5279 v
= t4_read_reg(adap
, A_TP_OUT_CONFIG
);
5280 adap
->params
.tp
.rx_pkt_encap
= (v
& F_CRXPKTENC
) ? 1 : 0;
5284 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
5285 * shift positions of several elements of the Compressed Filter Tuple
5286 * for this adapter which we need frequently ...
5288 adap
->params
.tp
.vlan_shift
= t4_filter_field_shift(adap
, F_VLAN
);
5289 adap
->params
.tp
.vnic_shift
= t4_filter_field_shift(adap
, F_VNIC_ID
);
5290 adap
->params
.tp
.port_shift
= t4_filter_field_shift(adap
, F_PORT
);
5291 adap
->params
.tp
.protocol_shift
= t4_filter_field_shift(adap
,
5293 adap
->params
.tp
.ethertype_shift
= t4_filter_field_shift(adap
,
5295 adap
->params
.tp
.macmatch_shift
= t4_filter_field_shift(adap
,
5297 adap
->params
.tp
.tos_shift
= t4_filter_field_shift(adap
, F_TOS
);
5299 v
= t4_read_reg(adap
, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A
);
5300 adap
->params
.tp
.hash_filter_mask
= v
;
5301 v
= t4_read_reg(adap
, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A
);
5302 adap
->params
.tp
.hash_filter_mask
|= ((u64
)v
<< 32);
5308 * t4_filter_field_shift - calculate filter field shift
5309 * @adap: the adapter
5310 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
5312 * Return the shift position of a filter field within the Compressed
5313 * Filter Tuple. The filter field is specified via its selection bit
5314 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
5316 int t4_filter_field_shift(const struct adapter
*adap
, unsigned int filter_sel
)
5318 unsigned int filter_mode
= adap
->params
.tp
.vlan_pri_map
;
5322 if ((filter_mode
& filter_sel
) == 0)
5325 for (sel
= 1, field_shift
= 0; sel
< filter_sel
; sel
<<= 1) {
5326 switch (filter_mode
& sel
) {
5328 field_shift
+= W_FT_FCOE
;
5331 field_shift
+= W_FT_PORT
;
5334 field_shift
+= W_FT_VNIC_ID
;
5337 field_shift
+= W_FT_VLAN
;
5340 field_shift
+= W_FT_TOS
;
5343 field_shift
+= W_FT_PROTOCOL
;
5346 field_shift
+= W_FT_ETHERTYPE
;
5349 field_shift
+= W_FT_MACMATCH
;
5352 field_shift
+= W_FT_MPSHITTYPE
;
5354 case F_FRAGMENTATION
:
5355 field_shift
+= W_FT_FRAGMENTATION
;
5362 int t4_init_rss_mode(struct adapter
*adap
, int mbox
)
5365 struct fw_rss_vi_config_cmd rvc
;
5367 memset(&rvc
, 0, sizeof(rvc
));
5369 for_each_port(adap
, i
) {
5370 struct port_info
*p
= adap2pinfo(adap
, i
);
5372 rvc
.op_to_viid
= htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD
) |
5373 F_FW_CMD_REQUEST
| F_FW_CMD_READ
|
5374 V_FW_RSS_VI_CONFIG_CMD_VIID(p
->viid
));
5375 rvc
.retval_len16
= htonl(FW_LEN16(rvc
));
5376 ret
= t4_wr_mbox(adap
, mbox
, &rvc
, sizeof(rvc
), &rvc
);
5379 p
->rss_mode
= ntohl(rvc
.u
.basicvirtual
.defaultq_to_udpen
);
5384 int t4_port_init(struct adapter
*adap
, int mbox
, int pf
, int vf
)
5386 unsigned int fw_caps
= adap
->params
.fw_caps_support
;
5387 fw_port_cap32_t pcaps
, acaps
;
5388 enum fw_port_type port_type
;
5389 struct fw_port_cmd cmd
;
5390 u8 vivld
= 0, vin
= 0;
5396 memset(&cmd
, 0, sizeof(cmd
));
5398 for_each_port(adap
, i
) {
5399 struct port_info
*pi
= adap2pinfo(adap
, i
);
5400 unsigned int rss_size
= 0;
5402 while ((adap
->params
.portvec
& (1 << j
)) == 0)
5405 /* If we haven't yet determined whether we're talking to
5406 * Firmware which knows the new 32-bit Port Capabilities, it's
5407 * time to find out now. This will also tell new Firmware to
5408 * send us Port Status Updates using the new 32-bit Port
5409 * Capabilities version of the Port Information message.
5411 if (fw_caps
== FW_CAPS_UNKNOWN
) {
5412 u32 param
, val
, caps
;
5414 caps
= FW_PARAMS_PARAM_PFVF_PORT_CAPS32
;
5415 param
= (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF
) |
5416 V_FW_PARAMS_PARAM_X(caps
));
5418 ret
= t4_set_params(adap
, mbox
, pf
, vf
, 1, ¶m
,
5420 fw_caps
= ret
== 0 ? FW_CAPS32
: FW_CAPS16
;
5421 adap
->params
.fw_caps_support
= fw_caps
;
5424 memset(&cmd
, 0, sizeof(cmd
));
5425 cmd
.op_to_portid
= cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD
) |
5428 V_FW_PORT_CMD_PORTID(j
));
5429 action
= fw_caps
== FW_CAPS16
? FW_PORT_ACTION_GET_PORT_INFO
:
5430 FW_PORT_ACTION_GET_PORT_INFO32
;
5431 cmd
.action_to_len16
= cpu_to_be32(V_FW_PORT_CMD_ACTION(action
) |
5433 ret
= t4_wr_mbox(pi
->adapter
, mbox
, &cmd
, sizeof(cmd
), &cmd
);
5437 /* Extract the various fields from the Port Information message.
5439 if (fw_caps
== FW_CAPS16
) {
5441 be32_to_cpu(cmd
.u
.info
.lstatus_to_modtype
);
5443 port_type
= G_FW_PORT_CMD_PTYPE(lstatus
);
5444 mdio_addr
= (lstatus
& F_FW_PORT_CMD_MDIOCAP
) ?
5445 (int)G_FW_PORT_CMD_MDIOADDR(lstatus
) : -1;
5446 pcaps
= be16_to_cpu(cmd
.u
.info
.pcap
);
5447 acaps
= be16_to_cpu(cmd
.u
.info
.acap
);
5448 pcaps
= fwcaps16_to_caps32(pcaps
);
5449 acaps
= fwcaps16_to_caps32(acaps
);
5452 be32_to_cpu(cmd
.u
.info32
.lstatus32_to_cbllen32
);
5454 port_type
= G_FW_PORT_CMD_PORTTYPE32(lstatus32
);
5455 mdio_addr
= (lstatus32
& F_FW_PORT_CMD_MDIOCAP32
) ?
5456 (int)G_FW_PORT_CMD_MDIOADDR32(lstatus32
) :
5458 pcaps
= be32_to_cpu(cmd
.u
.info32
.pcaps32
);
5459 acaps
= be32_to_cpu(cmd
.u
.info32
.acaps32
);
5462 ret
= t4_alloc_vi(adap
, mbox
, j
, pf
, vf
, 1, addr
, &rss_size
,
5469 pi
->rss_size
= rss_size
;
5470 t4_os_set_hw_addr(adap
, i
, addr
);
5472 /* If fw supports returning the VIN as part of FW_VI_CMD,
5473 * save the returned values.
5475 if (adap
->params
.viid_smt_extn_support
) {
5479 /* Retrieve the values from VIID */
5480 pi
->vivld
= G_FW_VIID_VIVLD(pi
->viid
);
5481 pi
->vin
= G_FW_VIID_VIN(pi
->viid
);
5484 pi
->port_type
= port_type
;
5485 pi
->mdio_addr
= mdio_addr
;
5486 pi
->mod_type
= FW_PORT_MOD_TYPE_NA
;
5488 init_link_config(&pi
->link_cfg
, pcaps
, acaps
);
5495 * t4_memory_rw_addr - read/write adapter memory via PCIE memory window
5496 * @adap: the adapter
5497 * @win: PCI-E Memory Window to use
5498 * @addr: address within adapter memory
5499 * @len: amount of memory to transfer
5500 * @hbuf: host memory buffer
5501 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
5503 * Reads/writes an [almost] arbitrary memory region in the firmware: the
5504 * firmware memory address and host buffer must be aligned on 32-bit
5505 * boudaries; the length may be arbitrary.
5508 * 1. The memory is transferred as a raw byte sequence from/to the
5509 * firmware's memory. If this memory contains data structures which
5510 * contain multi-byte integers, it's the caller's responsibility to
5511 * perform appropriate byte order conversions.
5513 * 2. It is the Caller's responsibility to ensure that no other code
5514 * uses the specified PCI-E Memory Window while this routine is
5515 * using it. This is typically done via the use of OS-specific
5518 int t4_memory_rw_addr(struct adapter
*adap
, int win
, u32 addr
,
5519 u32 len
, void *hbuf
, int dir
)
5521 u32 pos
, offset
, resid
;
5522 u32 win_pf
, mem_reg
, mem_aperture
, mem_base
;
5525 /* Argument sanity checks ...*/
5526 if (addr
& 0x3 || (uintptr_t)hbuf
& 0x3)
5530 /* It's convenient to be able to handle lengths which aren't a
5531 * multiple of 32-bits because we often end up transferring files to
5532 * the firmware. So we'll handle that by normalizing the length here
5533 * and then handling any residual transfer at the end.
5538 /* Each PCI-E Memory Window is programmed with a window size -- or
5539 * "aperture" -- which controls the granularity of its mapping onto
5540 * adapter memory. We need to grab that aperture in order to know
5541 * how to use the specified window. The window is also programmed
5542 * with the base address of the Memory Window in BAR0's address
5543 * space. For T4 this is an absolute PCI-E Bus Address. For T5
5544 * the address is relative to BAR0.
5546 mem_reg
= t4_read_reg(adap
,
5547 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN
,
5549 mem_aperture
= 1 << (G_WINDOW(mem_reg
) + X_WINDOW_SHIFT
);
5550 mem_base
= G_PCIEOFST(mem_reg
) << X_PCIEOFST_SHIFT
;
5552 win_pf
= is_t4(adap
->params
.chip
) ? 0 : V_PFNUM(adap
->pf
);
5554 /* Calculate our initial PCI-E Memory Window Position and Offset into
5557 pos
= addr
& ~(mem_aperture
- 1);
5558 offset
= addr
- pos
;
5560 /* Set up initial PCI-E Memory Window to cover the start of our
5561 * transfer. (Read it back to ensure that changes propagate before we
5562 * attempt to use the new value.)
5565 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET
, win
),
5568 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET
, win
));
5570 /* Transfer data to/from the adapter as long as there's an integral
5571 * number of 32-bit transfers to complete.
5573 * A note on Endianness issues:
5575 * The "register" reads and writes below from/to the PCI-E Memory
5576 * Window invoke the standard adapter Big-Endian to PCI-E Link
5577 * Little-Endian "swizzel." As a result, if we have the following
5578 * data in adapter memory:
5580 * Memory: ... | b0 | b1 | b2 | b3 | ...
5581 * Address: i+0 i+1 i+2 i+3
5583 * Then a read of the adapter memory via the PCI-E Memory Window
5588 * [ b3 | b2 | b1 | b0 ]
5590 * If this value is stored into local memory on a Little-Endian system
5591 * it will show up correctly in local memory as:
5593 * ( ..., b0, b1, b2, b3, ... )
5595 * But on a Big-Endian system, the store will show up in memory
5596 * incorrectly swizzled as:
5598 * ( ..., b3, b2, b1, b0, ... )
5600 * So we need to account for this in the reads and writes to the
5601 * PCI-E Memory Window below by undoing the register read/write
5605 if (dir
== T4_MEMORY_READ
)
5606 *buf
++ = le32_to_cpu((__le32
)t4_read_reg(adap
,
5610 t4_write_reg(adap
, mem_base
+ offset
,
5611 (u32
)cpu_to_le32(*buf
++));
5612 offset
+= sizeof(__be32
);
5613 len
-= sizeof(__be32
);
5615 /* If we've reached the end of our current window aperture,
5616 * move the PCI-E Memory Window on to the next. Note that
5617 * doing this here after "len" may be 0 allows us to set up
5618 * the PCI-E Memory Window for a possible final residual
5619 * transfer below ...
5621 if (offset
== mem_aperture
) {
5622 pos
+= mem_aperture
;
5625 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET
,
5626 win
), pos
| win_pf
);
5628 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET
,
5633 /* If the original transfer had a length which wasn't a multiple of
5634 * 32-bits, now's where we need to finish off the transfer of the
5635 * residual amount. The PCI-E Memory Window has already been moved
5636 * above (if necessary) to cover this final transfer.
5646 if (dir
== T4_MEMORY_READ
) {
5647 last
.word
= le32_to_cpu((__le32
)t4_read_reg(adap
,
5650 for (bp
= (unsigned char *)buf
, i
= resid
; i
< 4; i
++)
5651 bp
[i
] = last
.byte
[i
];
5654 for (i
= resid
; i
< 4; i
++)
5656 t4_write_reg(adap
, mem_base
+ offset
,
5657 (u32
)cpu_to_le32(last
.word
));
5665 * t4_memory_rw_mtype -read/write EDC 0, EDC 1 or MC via PCIE memory window
5666 * @adap: the adapter
5667 * @win: PCI-E Memory Window to use
5668 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
5669 * @maddr: address within indicated memory type
5670 * @len: amount of memory to transfer
5671 * @hbuf: host memory buffer
5672 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
5674 * Reads/writes adapter memory using t4_memory_rw_addr(). This routine
5675 * provides an (memory type, address within memory type) interface.
5677 int t4_memory_rw_mtype(struct adapter
*adap
, int win
, int mtype
, u32 maddr
,
5678 u32 len
, void *hbuf
, int dir
)
5681 u32 edc_size
, mc_size
;
5683 /* Offset into the region of memory which is being accessed
5686 * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
5687 * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
5689 edc_size
= G_EDRAM0_SIZE(t4_read_reg(adap
, A_MA_EDRAM0_BAR
));
5690 if (mtype
!= MEM_MC1
) {
5691 mtype_offset
= (mtype
* (edc_size
* 1024 * 1024));
5693 mc_size
= G_EXT_MEM0_SIZE(t4_read_reg(adap
,
5694 A_MA_EXT_MEMORY0_BAR
));
5695 mtype_offset
= (MEM_MC0
* edc_size
+ mc_size
) * 1024 * 1024;
5698 return t4_memory_rw_addr(adap
, win
,
5699 mtype_offset
+ maddr
, len
,