]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / cxgbe / base / t4_hw.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Chelsio Communications.
3 * All rights reserved.
4 */
5
6 #include <netinet/in.h>
7
8 #include <rte_interrupts.h>
9 #include <rte_log.h>
10 #include <rte_debug.h>
11 #include <rte_pci.h>
12 #include <rte_atomic.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_memory.h>
15 #include <rte_tailq.h>
16 #include <rte_eal.h>
17 #include <rte_alarm.h>
18 #include <rte_ether.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_malloc.h>
21 #include <rte_random.h>
22 #include <rte_dev.h>
23 #include <rte_byteorder.h>
24
25 #include "common.h"
26 #include "t4_regs.h"
27 #include "t4_regs_values.h"
28 #include "t4fw_interface.h"
29
30 /**
31 * t4_read_mtu_tbl - returns the values in the HW path MTU table
32 * @adap: the adapter
33 * @mtus: where to store the MTU values
34 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
35 *
36 * Reads the HW path MTU table.
37 */
38 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
39 {
40 u32 v;
41 int i;
42
43 for (i = 0; i < NMTUS; ++i) {
44 t4_write_reg(adap, A_TP_MTU_TABLE,
45 V_MTUINDEX(0xff) | V_MTUVALUE(i));
46 v = t4_read_reg(adap, A_TP_MTU_TABLE);
47 mtus[i] = G_MTUVALUE(v);
48 if (mtu_log)
49 mtu_log[i] = G_MTUWIDTH(v);
50 }
51 }
52
53 /**
54 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
55 * @adap: the adapter
56 * @addr: the indirect TP register address
57 * @mask: specifies the field within the register to modify
58 * @val: new value for the field
59 *
60 * Sets a field of an indirect TP register to the given value.
61 */
62 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
63 unsigned int mask, unsigned int val)
64 {
65 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
66 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
67 t4_write_reg(adap, A_TP_PIO_DATA, val);
68 }
69
70 /* The minimum additive increment value for the congestion control table */
71 #define CC_MIN_INCR 2U
72
73 /**
74 * t4_load_mtus - write the MTU and congestion control HW tables
75 * @adap: the adapter
76 * @mtus: the values for the MTU table
77 * @alpha: the values for the congestion control alpha parameter
78 * @beta: the values for the congestion control beta parameter
79 *
80 * Write the HW MTU table with the supplied MTUs and the high-speed
81 * congestion control table with the supplied alpha, beta, and MTUs.
82 * We write the two tables together because the additive increments
83 * depend on the MTUs.
84 */
85 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
86 const unsigned short *alpha, const unsigned short *beta)
87 {
88 static const unsigned int avg_pkts[NCCTRL_WIN] = {
89 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
90 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
91 28672, 40960, 57344, 81920, 114688, 163840, 229376
92 };
93
94 unsigned int i, w;
95
96 for (i = 0; i < NMTUS; ++i) {
97 unsigned int mtu = mtus[i];
98 unsigned int log2 = cxgbe_fls(mtu);
99
100 if (!(mtu & ((1 << log2) >> 2))) /* round */
101 log2--;
102 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
103 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
104
105 for (w = 0; w < NCCTRL_WIN; ++w) {
106 unsigned int inc;
107
108 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
109 CC_MIN_INCR);
110
111 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
112 (w << 16) | (beta[w] << 13) | inc);
113 }
114 }
115 }
116
117 /**
118 * t4_wait_op_done_val - wait until an operation is completed
119 * @adapter: the adapter performing the operation
120 * @reg: the register to check for completion
121 * @mask: a single-bit field within @reg that indicates completion
122 * @polarity: the value of the field when the operation is completed
123 * @attempts: number of check iterations
124 * @delay: delay in usecs between iterations
125 * @valp: where to store the value of the register at completion time
126 *
127 * Wait until an operation is completed by checking a bit in a register
128 * up to @attempts times. If @valp is not NULL the value of the register
129 * at the time it indicated completion is stored there. Returns 0 if the
130 * operation completes and -EAGAIN otherwise.
131 */
132 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
133 int polarity, int attempts, int delay, u32 *valp)
134 {
135 while (1) {
136 u32 val = t4_read_reg(adapter, reg);
137
138 if (!!(val & mask) == polarity) {
139 if (valp)
140 *valp = val;
141 return 0;
142 }
143 if (--attempts == 0)
144 return -EAGAIN;
145 if (delay)
146 udelay(delay);
147 }
148 }
149
150 /**
151 * t4_set_reg_field - set a register field to a value
152 * @adapter: the adapter to program
153 * @addr: the register address
154 * @mask: specifies the portion of the register to modify
155 * @val: the new value for the register field
156 *
157 * Sets a register field specified by the supplied mask to the
158 * given value.
159 */
160 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
161 u32 val)
162 {
163 u32 v = t4_read_reg(adapter, addr) & ~mask;
164
165 t4_write_reg(adapter, addr, v | val);
166 (void)t4_read_reg(adapter, addr); /* flush */
167 }
168
169 /**
170 * t4_read_indirect - read indirectly addressed registers
171 * @adap: the adapter
172 * @addr_reg: register holding the indirect address
173 * @data_reg: register holding the value of the indirect register
174 * @vals: where the read register values are stored
175 * @nregs: how many indirect registers to read
176 * @start_idx: index of first indirect register to read
177 *
178 * Reads registers that are accessed indirectly through an address/data
179 * register pair.
180 */
181 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
182 unsigned int data_reg, u32 *vals, unsigned int nregs,
183 unsigned int start_idx)
184 {
185 while (nregs--) {
186 t4_write_reg(adap, addr_reg, start_idx);
187 *vals++ = t4_read_reg(adap, data_reg);
188 start_idx++;
189 }
190 }
191
192 /**
193 * t4_write_indirect - write indirectly addressed registers
194 * @adap: the adapter
195 * @addr_reg: register holding the indirect addresses
196 * @data_reg: register holding the value for the indirect registers
197 * @vals: values to write
198 * @nregs: how many indirect registers to write
199 * @start_idx: address of first indirect register to write
200 *
201 * Writes a sequential block of registers that are accessed indirectly
202 * through an address/data register pair.
203 */
204 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
205 unsigned int data_reg, const u32 *vals,
206 unsigned int nregs, unsigned int start_idx)
207 {
208 while (nregs--) {
209 t4_write_reg(adap, addr_reg, start_idx++);
210 t4_write_reg(adap, data_reg, *vals++);
211 }
212 }
213
214 /**
215 * t4_report_fw_error - report firmware error
216 * @adap: the adapter
217 *
218 * The adapter firmware can indicate error conditions to the host.
219 * If the firmware has indicated an error, print out the reason for
220 * the firmware error.
221 */
222 static void t4_report_fw_error(struct adapter *adap)
223 {
224 static const char * const reason[] = {
225 "Crash", /* PCIE_FW_EVAL_CRASH */
226 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
227 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
228 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
229 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
230 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
231 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
232 "Reserved", /* reserved */
233 };
234 u32 pcie_fw;
235
236 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
237 if (pcie_fw & F_PCIE_FW_ERR)
238 pr_err("%s: Firmware reports adapter error: %s\n",
239 __func__, reason[G_PCIE_FW_EVAL(pcie_fw)]);
240 }
241
242 /*
243 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
244 */
245 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
246 u32 mbox_addr)
247 {
248 for ( ; nflit; nflit--, mbox_addr += 8)
249 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
250 }
251
252 /*
253 * Handle a FW assertion reported in a mailbox.
254 */
255 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
256 {
257 struct fw_debug_cmd asrt;
258
259 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
260 pr_warn("FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
261 asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
262 be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
263 }
264
265 #define X_CIM_PF_NOACCESS 0xeeeeeeee
266
267 /*
268 * If the Host OS Driver needs locking arround accesses to the mailbox, this
269 * can be turned on via the T4_OS_NEEDS_MBOX_LOCKING CPP define ...
270 */
271 /* makes single-statement usage a bit cleaner ... */
272 #ifdef T4_OS_NEEDS_MBOX_LOCKING
273 #define T4_OS_MBOX_LOCKING(x) x
274 #else
275 #define T4_OS_MBOX_LOCKING(x) do {} while (0)
276 #endif
277
278 /**
279 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
280 * @adap: the adapter
281 * @mbox: index of the mailbox to use
282 * @cmd: the command to write
283 * @size: command length in bytes
284 * @rpl: where to optionally store the reply
285 * @sleep_ok: if true we may sleep while awaiting command completion
286 * @timeout: time to wait for command to finish before timing out
287 * (negative implies @sleep_ok=false)
288 *
289 * Sends the given command to FW through the selected mailbox and waits
290 * for the FW to execute the command. If @rpl is not %NULL it is used to
291 * store the FW's reply to the command. The command and its optional
292 * reply are of the same length. Some FW commands like RESET and
293 * INITIALIZE can take a considerable amount of time to execute.
294 * @sleep_ok determines whether we may sleep while awaiting the response.
295 * If sleeping is allowed we use progressive backoff otherwise we spin.
296 * Note that passing in a negative @timeout is an alternate mechanism
297 * for specifying @sleep_ok=false. This is useful when a higher level
298 * interface allows for specification of @timeout but not @sleep_ok ...
299 *
300 * Returns 0 on success or a negative errno on failure. A
301 * failure can happen either because we are not able to execute the
302 * command or FW executes it but signals an error. In the latter case
303 * the return value is the error code indicated by FW (negated).
304 */
305 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
306 const void __attribute__((__may_alias__)) *cmd,
307 int size, void *rpl, bool sleep_ok, int timeout)
308 {
309 /*
310 * We delay in small increments at first in an effort to maintain
311 * responsiveness for simple, fast executing commands but then back
312 * off to larger delays to a maximum retry delay.
313 */
314 static const int delay[] = {
315 1, 1, 3, 5, 10, 10, 20, 50, 100
316 };
317
318 u32 v;
319 u64 res;
320 int i, ms;
321 unsigned int delay_idx;
322 __be64 *temp = (__be64 *)malloc(size * sizeof(char));
323 __be64 *p = temp;
324 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
325 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
326 u32 ctl;
327 struct mbox_entry entry;
328 u32 pcie_fw = 0;
329
330 if (!temp)
331 return -ENOMEM;
332
333 if ((size & 15) || size > MBOX_LEN) {
334 free(temp);
335 return -EINVAL;
336 }
337
338 memset(p, 0, size);
339 memcpy(p, (const __be64 *)cmd, size);
340
341 /*
342 * If we have a negative timeout, that implies that we can't sleep.
343 */
344 if (timeout < 0) {
345 sleep_ok = false;
346 timeout = -timeout;
347 }
348
349 #ifdef T4_OS_NEEDS_MBOX_LOCKING
350 /*
351 * Queue ourselves onto the mailbox access list. When our entry is at
352 * the front of the list, we have rights to access the mailbox. So we
353 * wait [for a while] till we're at the front [or bail out with an
354 * EBUSY] ...
355 */
356 t4_os_atomic_add_tail(&entry, &adap->mbox_list, &adap->mbox_lock);
357
358 delay_idx = 0;
359 ms = delay[0];
360
361 for (i = 0; ; i += ms) {
362 /*
363 * If we've waited too long, return a busy indication. This
364 * really ought to be based on our initial position in the
365 * mailbox access list but this is a start. We very rarely
366 * contend on access to the mailbox ... Also check for a
367 * firmware error which we'll report as a device error.
368 */
369 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
370 if (i > 4 * timeout || (pcie_fw & F_PCIE_FW_ERR)) {
371 t4_os_atomic_list_del(&entry, &adap->mbox_list,
372 &adap->mbox_lock);
373 t4_report_fw_error(adap);
374 free(temp);
375 return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
376 }
377
378 /*
379 * If we're at the head, break out and start the mailbox
380 * protocol.
381 */
382 if (t4_os_list_first_entry(&adap->mbox_list) == &entry)
383 break;
384
385 /*
386 * Delay for a bit before checking again ...
387 */
388 if (sleep_ok) {
389 ms = delay[delay_idx]; /* last element may repeat */
390 if (delay_idx < ARRAY_SIZE(delay) - 1)
391 delay_idx++;
392 msleep(ms);
393 } else {
394 rte_delay_ms(ms);
395 }
396 }
397 #endif /* T4_OS_NEEDS_MBOX_LOCKING */
398
399 /*
400 * Attempt to gain access to the mailbox.
401 */
402 for (i = 0; i < 4; i++) {
403 ctl = t4_read_reg(adap, ctl_reg);
404 v = G_MBOWNER(ctl);
405 if (v != X_MBOWNER_NONE)
406 break;
407 }
408
409 /*
410 * If we were unable to gain access, dequeue ourselves from the
411 * mailbox atomic access list and report the error to our caller.
412 */
413 if (v != X_MBOWNER_PL) {
414 T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
415 &adap->mbox_list,
416 &adap->mbox_lock));
417 t4_report_fw_error(adap);
418 free(temp);
419 return (v == X_MBOWNER_FW ? -EBUSY : -ETIMEDOUT);
420 }
421
422 /*
423 * If we gain ownership of the mailbox and there's a "valid" message
424 * in it, this is likely an asynchronous error message from the
425 * firmware. So we'll report that and then proceed on with attempting
426 * to issue our own command ... which may well fail if the error
427 * presaged the firmware crashing ...
428 */
429 if (ctl & F_MBMSGVALID) {
430 dev_err(adap, "found VALID command in mbox %u: "
431 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
432 (unsigned long long)t4_read_reg64(adap, data_reg),
433 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
434 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
435 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
436 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
437 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
438 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
439 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
440 }
441
442 /*
443 * Copy in the new mailbox command and send it on its way ...
444 */
445 for (i = 0; i < size; i += 8, p++)
446 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
447
448 CXGBE_DEBUG_MBOX(adap, "%s: mbox %u: %016llx %016llx %016llx %016llx "
449 "%016llx %016llx %016llx %016llx\n", __func__, (mbox),
450 (unsigned long long)t4_read_reg64(adap, data_reg),
451 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
452 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
453 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
454 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
455 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
456 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
457 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
458
459 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
460 t4_read_reg(adap, ctl_reg); /* flush write */
461
462 delay_idx = 0;
463 ms = delay[0];
464
465 /*
466 * Loop waiting for the reply; bail out if we time out or the firmware
467 * reports an error.
468 */
469 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
470 for (i = 0; i < timeout && !(pcie_fw & F_PCIE_FW_ERR); i += ms) {
471 if (sleep_ok) {
472 ms = delay[delay_idx]; /* last element may repeat */
473 if (delay_idx < ARRAY_SIZE(delay) - 1)
474 delay_idx++;
475 msleep(ms);
476 } else {
477 msleep(ms);
478 }
479
480 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
481 v = t4_read_reg(adap, ctl_reg);
482 if (v == X_CIM_PF_NOACCESS)
483 continue;
484 if (G_MBOWNER(v) == X_MBOWNER_PL) {
485 if (!(v & F_MBMSGVALID)) {
486 t4_write_reg(adap, ctl_reg,
487 V_MBOWNER(X_MBOWNER_NONE));
488 continue;
489 }
490
491 CXGBE_DEBUG_MBOX(adap,
492 "%s: mbox %u: %016llx %016llx %016llx %016llx "
493 "%016llx %016llx %016llx %016llx\n", __func__, (mbox),
494 (unsigned long long)t4_read_reg64(adap, data_reg),
495 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
496 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
497 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
498 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
499 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
500 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
501 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
502
503 CXGBE_DEBUG_MBOX(adap,
504 "command %#x completed in %d ms (%ssleeping)\n",
505 *(const u8 *)cmd,
506 i + ms, sleep_ok ? "" : "non-");
507
508 res = t4_read_reg64(adap, data_reg);
509 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
510 fw_asrt(adap, data_reg);
511 res = V_FW_CMD_RETVAL(EIO);
512 } else if (rpl) {
513 get_mbox_rpl(adap, rpl, size / 8, data_reg);
514 }
515 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
516 T4_OS_MBOX_LOCKING(
517 t4_os_atomic_list_del(&entry, &adap->mbox_list,
518 &adap->mbox_lock));
519 free(temp);
520 return -G_FW_CMD_RETVAL((int)res);
521 }
522 }
523
524 /*
525 * We timed out waiting for a reply to our mailbox command. Report
526 * the error and also check to see if the firmware reported any
527 * errors ...
528 */
529 dev_err(adap, "command %#x in mailbox %d timed out\n",
530 *(const u8 *)cmd, mbox);
531 T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
532 &adap->mbox_list,
533 &adap->mbox_lock));
534 t4_report_fw_error(adap);
535 free(temp);
536 return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
537 }
538
539 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
540 void *rpl, bool sleep_ok)
541 {
542 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
543 FW_CMD_MAX_TIMEOUT);
544 }
545
546 /**
547 * t4_get_regs_len - return the size of the chips register set
548 * @adapter: the adapter
549 *
550 * Returns the size of the chip's BAR0 register space.
551 */
552 unsigned int t4_get_regs_len(struct adapter *adapter)
553 {
554 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
555
556 switch (chip_version) {
557 case CHELSIO_T5:
558 case CHELSIO_T6:
559 return T5_REGMAP_SIZE;
560 }
561
562 dev_err(adapter,
563 "Unsupported chip version %d\n", chip_version);
564 return 0;
565 }
566
567 /**
568 * t4_get_regs - read chip registers into provided buffer
569 * @adap: the adapter
570 * @buf: register buffer
571 * @buf_size: size (in bytes) of register buffer
572 *
573 * If the provided register buffer isn't large enough for the chip's
574 * full register range, the register dump will be truncated to the
575 * register buffer's size.
576 */
577 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
578 {
579 static const unsigned int t5_reg_ranges[] = {
580 0x1008, 0x10c0,
581 0x10cc, 0x10f8,
582 0x1100, 0x1100,
583 0x110c, 0x1148,
584 0x1180, 0x1184,
585 0x1190, 0x1194,
586 0x11a0, 0x11a4,
587 0x11b0, 0x11b4,
588 0x11fc, 0x123c,
589 0x1280, 0x173c,
590 0x1800, 0x18fc,
591 0x3000, 0x3028,
592 0x3060, 0x30b0,
593 0x30b8, 0x30d8,
594 0x30e0, 0x30fc,
595 0x3140, 0x357c,
596 0x35a8, 0x35cc,
597 0x35ec, 0x35ec,
598 0x3600, 0x5624,
599 0x56cc, 0x56ec,
600 0x56f4, 0x5720,
601 0x5728, 0x575c,
602 0x580c, 0x5814,
603 0x5890, 0x589c,
604 0x58a4, 0x58ac,
605 0x58b8, 0x58bc,
606 0x5940, 0x59c8,
607 0x59d0, 0x59dc,
608 0x59fc, 0x5a18,
609 0x5a60, 0x5a70,
610 0x5a80, 0x5a9c,
611 0x5b94, 0x5bfc,
612 0x6000, 0x6020,
613 0x6028, 0x6040,
614 0x6058, 0x609c,
615 0x60a8, 0x614c,
616 0x7700, 0x7798,
617 0x77c0, 0x78fc,
618 0x7b00, 0x7b58,
619 0x7b60, 0x7b84,
620 0x7b8c, 0x7c54,
621 0x7d00, 0x7d38,
622 0x7d40, 0x7d80,
623 0x7d8c, 0x7ddc,
624 0x7de4, 0x7e04,
625 0x7e10, 0x7e1c,
626 0x7e24, 0x7e38,
627 0x7e40, 0x7e44,
628 0x7e4c, 0x7e78,
629 0x7e80, 0x7edc,
630 0x7ee8, 0x7efc,
631 0x8dc0, 0x8de0,
632 0x8df8, 0x8e04,
633 0x8e10, 0x8e84,
634 0x8ea0, 0x8f84,
635 0x8fc0, 0x9058,
636 0x9060, 0x9060,
637 0x9068, 0x90f8,
638 0x9400, 0x9408,
639 0x9410, 0x9470,
640 0x9600, 0x9600,
641 0x9608, 0x9638,
642 0x9640, 0x96f4,
643 0x9800, 0x9808,
644 0x9820, 0x983c,
645 0x9850, 0x9864,
646 0x9c00, 0x9c6c,
647 0x9c80, 0x9cec,
648 0x9d00, 0x9d6c,
649 0x9d80, 0x9dec,
650 0x9e00, 0x9e6c,
651 0x9e80, 0x9eec,
652 0x9f00, 0x9f6c,
653 0x9f80, 0xa020,
654 0xd004, 0xd004,
655 0xd010, 0xd03c,
656 0xdfc0, 0xdfe0,
657 0xe000, 0x1106c,
658 0x11074, 0x11088,
659 0x1109c, 0x1117c,
660 0x11190, 0x11204,
661 0x19040, 0x1906c,
662 0x19078, 0x19080,
663 0x1908c, 0x190e8,
664 0x190f0, 0x190f8,
665 0x19100, 0x19110,
666 0x19120, 0x19124,
667 0x19150, 0x19194,
668 0x1919c, 0x191b0,
669 0x191d0, 0x191e8,
670 0x19238, 0x19290,
671 0x193f8, 0x19428,
672 0x19430, 0x19444,
673 0x1944c, 0x1946c,
674 0x19474, 0x19474,
675 0x19490, 0x194cc,
676 0x194f0, 0x194f8,
677 0x19c00, 0x19c08,
678 0x19c10, 0x19c60,
679 0x19c94, 0x19ce4,
680 0x19cf0, 0x19d40,
681 0x19d50, 0x19d94,
682 0x19da0, 0x19de8,
683 0x19df0, 0x19e10,
684 0x19e50, 0x19e90,
685 0x19ea0, 0x19f24,
686 0x19f34, 0x19f34,
687 0x19f40, 0x19f50,
688 0x19f90, 0x19fb4,
689 0x19fc4, 0x19fe4,
690 0x1a000, 0x1a004,
691 0x1a010, 0x1a06c,
692 0x1a0b0, 0x1a0e4,
693 0x1a0ec, 0x1a0f8,
694 0x1a100, 0x1a108,
695 0x1a114, 0x1a120,
696 0x1a128, 0x1a130,
697 0x1a138, 0x1a138,
698 0x1a190, 0x1a1c4,
699 0x1a1fc, 0x1a1fc,
700 0x1e008, 0x1e00c,
701 0x1e040, 0x1e044,
702 0x1e04c, 0x1e04c,
703 0x1e284, 0x1e290,
704 0x1e2c0, 0x1e2c0,
705 0x1e2e0, 0x1e2e0,
706 0x1e300, 0x1e384,
707 0x1e3c0, 0x1e3c8,
708 0x1e408, 0x1e40c,
709 0x1e440, 0x1e444,
710 0x1e44c, 0x1e44c,
711 0x1e684, 0x1e690,
712 0x1e6c0, 0x1e6c0,
713 0x1e6e0, 0x1e6e0,
714 0x1e700, 0x1e784,
715 0x1e7c0, 0x1e7c8,
716 0x1e808, 0x1e80c,
717 0x1e840, 0x1e844,
718 0x1e84c, 0x1e84c,
719 0x1ea84, 0x1ea90,
720 0x1eac0, 0x1eac0,
721 0x1eae0, 0x1eae0,
722 0x1eb00, 0x1eb84,
723 0x1ebc0, 0x1ebc8,
724 0x1ec08, 0x1ec0c,
725 0x1ec40, 0x1ec44,
726 0x1ec4c, 0x1ec4c,
727 0x1ee84, 0x1ee90,
728 0x1eec0, 0x1eec0,
729 0x1eee0, 0x1eee0,
730 0x1ef00, 0x1ef84,
731 0x1efc0, 0x1efc8,
732 0x1f008, 0x1f00c,
733 0x1f040, 0x1f044,
734 0x1f04c, 0x1f04c,
735 0x1f284, 0x1f290,
736 0x1f2c0, 0x1f2c0,
737 0x1f2e0, 0x1f2e0,
738 0x1f300, 0x1f384,
739 0x1f3c0, 0x1f3c8,
740 0x1f408, 0x1f40c,
741 0x1f440, 0x1f444,
742 0x1f44c, 0x1f44c,
743 0x1f684, 0x1f690,
744 0x1f6c0, 0x1f6c0,
745 0x1f6e0, 0x1f6e0,
746 0x1f700, 0x1f784,
747 0x1f7c0, 0x1f7c8,
748 0x1f808, 0x1f80c,
749 0x1f840, 0x1f844,
750 0x1f84c, 0x1f84c,
751 0x1fa84, 0x1fa90,
752 0x1fac0, 0x1fac0,
753 0x1fae0, 0x1fae0,
754 0x1fb00, 0x1fb84,
755 0x1fbc0, 0x1fbc8,
756 0x1fc08, 0x1fc0c,
757 0x1fc40, 0x1fc44,
758 0x1fc4c, 0x1fc4c,
759 0x1fe84, 0x1fe90,
760 0x1fec0, 0x1fec0,
761 0x1fee0, 0x1fee0,
762 0x1ff00, 0x1ff84,
763 0x1ffc0, 0x1ffc8,
764 0x30000, 0x30030,
765 0x30038, 0x30038,
766 0x30040, 0x30040,
767 0x30100, 0x30144,
768 0x30190, 0x301a0,
769 0x301a8, 0x301b8,
770 0x301c4, 0x301c8,
771 0x301d0, 0x301d0,
772 0x30200, 0x30318,
773 0x30400, 0x304b4,
774 0x304c0, 0x3052c,
775 0x30540, 0x3061c,
776 0x30800, 0x30828,
777 0x30834, 0x30834,
778 0x308c0, 0x30908,
779 0x30910, 0x309ac,
780 0x30a00, 0x30a14,
781 0x30a1c, 0x30a2c,
782 0x30a44, 0x30a50,
783 0x30a74, 0x30a74,
784 0x30a7c, 0x30afc,
785 0x30b08, 0x30c24,
786 0x30d00, 0x30d00,
787 0x30d08, 0x30d14,
788 0x30d1c, 0x30d20,
789 0x30d3c, 0x30d3c,
790 0x30d48, 0x30d50,
791 0x31200, 0x3120c,
792 0x31220, 0x31220,
793 0x31240, 0x31240,
794 0x31600, 0x3160c,
795 0x31a00, 0x31a1c,
796 0x31e00, 0x31e20,
797 0x31e38, 0x31e3c,
798 0x31e80, 0x31e80,
799 0x31e88, 0x31ea8,
800 0x31eb0, 0x31eb4,
801 0x31ec8, 0x31ed4,
802 0x31fb8, 0x32004,
803 0x32200, 0x32200,
804 0x32208, 0x32240,
805 0x32248, 0x32280,
806 0x32288, 0x322c0,
807 0x322c8, 0x322fc,
808 0x32600, 0x32630,
809 0x32a00, 0x32abc,
810 0x32b00, 0x32b10,
811 0x32b20, 0x32b30,
812 0x32b40, 0x32b50,
813 0x32b60, 0x32b70,
814 0x33000, 0x33028,
815 0x33030, 0x33048,
816 0x33060, 0x33068,
817 0x33070, 0x3309c,
818 0x330f0, 0x33128,
819 0x33130, 0x33148,
820 0x33160, 0x33168,
821 0x33170, 0x3319c,
822 0x331f0, 0x33238,
823 0x33240, 0x33240,
824 0x33248, 0x33250,
825 0x3325c, 0x33264,
826 0x33270, 0x332b8,
827 0x332c0, 0x332e4,
828 0x332f8, 0x33338,
829 0x33340, 0x33340,
830 0x33348, 0x33350,
831 0x3335c, 0x33364,
832 0x33370, 0x333b8,
833 0x333c0, 0x333e4,
834 0x333f8, 0x33428,
835 0x33430, 0x33448,
836 0x33460, 0x33468,
837 0x33470, 0x3349c,
838 0x334f0, 0x33528,
839 0x33530, 0x33548,
840 0x33560, 0x33568,
841 0x33570, 0x3359c,
842 0x335f0, 0x33638,
843 0x33640, 0x33640,
844 0x33648, 0x33650,
845 0x3365c, 0x33664,
846 0x33670, 0x336b8,
847 0x336c0, 0x336e4,
848 0x336f8, 0x33738,
849 0x33740, 0x33740,
850 0x33748, 0x33750,
851 0x3375c, 0x33764,
852 0x33770, 0x337b8,
853 0x337c0, 0x337e4,
854 0x337f8, 0x337fc,
855 0x33814, 0x33814,
856 0x3382c, 0x3382c,
857 0x33880, 0x3388c,
858 0x338e8, 0x338ec,
859 0x33900, 0x33928,
860 0x33930, 0x33948,
861 0x33960, 0x33968,
862 0x33970, 0x3399c,
863 0x339f0, 0x33a38,
864 0x33a40, 0x33a40,
865 0x33a48, 0x33a50,
866 0x33a5c, 0x33a64,
867 0x33a70, 0x33ab8,
868 0x33ac0, 0x33ae4,
869 0x33af8, 0x33b10,
870 0x33b28, 0x33b28,
871 0x33b3c, 0x33b50,
872 0x33bf0, 0x33c10,
873 0x33c28, 0x33c28,
874 0x33c3c, 0x33c50,
875 0x33cf0, 0x33cfc,
876 0x34000, 0x34030,
877 0x34038, 0x34038,
878 0x34040, 0x34040,
879 0x34100, 0x34144,
880 0x34190, 0x341a0,
881 0x341a8, 0x341b8,
882 0x341c4, 0x341c8,
883 0x341d0, 0x341d0,
884 0x34200, 0x34318,
885 0x34400, 0x344b4,
886 0x344c0, 0x3452c,
887 0x34540, 0x3461c,
888 0x34800, 0x34828,
889 0x34834, 0x34834,
890 0x348c0, 0x34908,
891 0x34910, 0x349ac,
892 0x34a00, 0x34a14,
893 0x34a1c, 0x34a2c,
894 0x34a44, 0x34a50,
895 0x34a74, 0x34a74,
896 0x34a7c, 0x34afc,
897 0x34b08, 0x34c24,
898 0x34d00, 0x34d00,
899 0x34d08, 0x34d14,
900 0x34d1c, 0x34d20,
901 0x34d3c, 0x34d3c,
902 0x34d48, 0x34d50,
903 0x35200, 0x3520c,
904 0x35220, 0x35220,
905 0x35240, 0x35240,
906 0x35600, 0x3560c,
907 0x35a00, 0x35a1c,
908 0x35e00, 0x35e20,
909 0x35e38, 0x35e3c,
910 0x35e80, 0x35e80,
911 0x35e88, 0x35ea8,
912 0x35eb0, 0x35eb4,
913 0x35ec8, 0x35ed4,
914 0x35fb8, 0x36004,
915 0x36200, 0x36200,
916 0x36208, 0x36240,
917 0x36248, 0x36280,
918 0x36288, 0x362c0,
919 0x362c8, 0x362fc,
920 0x36600, 0x36630,
921 0x36a00, 0x36abc,
922 0x36b00, 0x36b10,
923 0x36b20, 0x36b30,
924 0x36b40, 0x36b50,
925 0x36b60, 0x36b70,
926 0x37000, 0x37028,
927 0x37030, 0x37048,
928 0x37060, 0x37068,
929 0x37070, 0x3709c,
930 0x370f0, 0x37128,
931 0x37130, 0x37148,
932 0x37160, 0x37168,
933 0x37170, 0x3719c,
934 0x371f0, 0x37238,
935 0x37240, 0x37240,
936 0x37248, 0x37250,
937 0x3725c, 0x37264,
938 0x37270, 0x372b8,
939 0x372c0, 0x372e4,
940 0x372f8, 0x37338,
941 0x37340, 0x37340,
942 0x37348, 0x37350,
943 0x3735c, 0x37364,
944 0x37370, 0x373b8,
945 0x373c0, 0x373e4,
946 0x373f8, 0x37428,
947 0x37430, 0x37448,
948 0x37460, 0x37468,
949 0x37470, 0x3749c,
950 0x374f0, 0x37528,
951 0x37530, 0x37548,
952 0x37560, 0x37568,
953 0x37570, 0x3759c,
954 0x375f0, 0x37638,
955 0x37640, 0x37640,
956 0x37648, 0x37650,
957 0x3765c, 0x37664,
958 0x37670, 0x376b8,
959 0x376c0, 0x376e4,
960 0x376f8, 0x37738,
961 0x37740, 0x37740,
962 0x37748, 0x37750,
963 0x3775c, 0x37764,
964 0x37770, 0x377b8,
965 0x377c0, 0x377e4,
966 0x377f8, 0x377fc,
967 0x37814, 0x37814,
968 0x3782c, 0x3782c,
969 0x37880, 0x3788c,
970 0x378e8, 0x378ec,
971 0x37900, 0x37928,
972 0x37930, 0x37948,
973 0x37960, 0x37968,
974 0x37970, 0x3799c,
975 0x379f0, 0x37a38,
976 0x37a40, 0x37a40,
977 0x37a48, 0x37a50,
978 0x37a5c, 0x37a64,
979 0x37a70, 0x37ab8,
980 0x37ac0, 0x37ae4,
981 0x37af8, 0x37b10,
982 0x37b28, 0x37b28,
983 0x37b3c, 0x37b50,
984 0x37bf0, 0x37c10,
985 0x37c28, 0x37c28,
986 0x37c3c, 0x37c50,
987 0x37cf0, 0x37cfc,
988 0x38000, 0x38030,
989 0x38038, 0x38038,
990 0x38040, 0x38040,
991 0x38100, 0x38144,
992 0x38190, 0x381a0,
993 0x381a8, 0x381b8,
994 0x381c4, 0x381c8,
995 0x381d0, 0x381d0,
996 0x38200, 0x38318,
997 0x38400, 0x384b4,
998 0x384c0, 0x3852c,
999 0x38540, 0x3861c,
1000 0x38800, 0x38828,
1001 0x38834, 0x38834,
1002 0x388c0, 0x38908,
1003 0x38910, 0x389ac,
1004 0x38a00, 0x38a14,
1005 0x38a1c, 0x38a2c,
1006 0x38a44, 0x38a50,
1007 0x38a74, 0x38a74,
1008 0x38a7c, 0x38afc,
1009 0x38b08, 0x38c24,
1010 0x38d00, 0x38d00,
1011 0x38d08, 0x38d14,
1012 0x38d1c, 0x38d20,
1013 0x38d3c, 0x38d3c,
1014 0x38d48, 0x38d50,
1015 0x39200, 0x3920c,
1016 0x39220, 0x39220,
1017 0x39240, 0x39240,
1018 0x39600, 0x3960c,
1019 0x39a00, 0x39a1c,
1020 0x39e00, 0x39e20,
1021 0x39e38, 0x39e3c,
1022 0x39e80, 0x39e80,
1023 0x39e88, 0x39ea8,
1024 0x39eb0, 0x39eb4,
1025 0x39ec8, 0x39ed4,
1026 0x39fb8, 0x3a004,
1027 0x3a200, 0x3a200,
1028 0x3a208, 0x3a240,
1029 0x3a248, 0x3a280,
1030 0x3a288, 0x3a2c0,
1031 0x3a2c8, 0x3a2fc,
1032 0x3a600, 0x3a630,
1033 0x3aa00, 0x3aabc,
1034 0x3ab00, 0x3ab10,
1035 0x3ab20, 0x3ab30,
1036 0x3ab40, 0x3ab50,
1037 0x3ab60, 0x3ab70,
1038 0x3b000, 0x3b028,
1039 0x3b030, 0x3b048,
1040 0x3b060, 0x3b068,
1041 0x3b070, 0x3b09c,
1042 0x3b0f0, 0x3b128,
1043 0x3b130, 0x3b148,
1044 0x3b160, 0x3b168,
1045 0x3b170, 0x3b19c,
1046 0x3b1f0, 0x3b238,
1047 0x3b240, 0x3b240,
1048 0x3b248, 0x3b250,
1049 0x3b25c, 0x3b264,
1050 0x3b270, 0x3b2b8,
1051 0x3b2c0, 0x3b2e4,
1052 0x3b2f8, 0x3b338,
1053 0x3b340, 0x3b340,
1054 0x3b348, 0x3b350,
1055 0x3b35c, 0x3b364,
1056 0x3b370, 0x3b3b8,
1057 0x3b3c0, 0x3b3e4,
1058 0x3b3f8, 0x3b428,
1059 0x3b430, 0x3b448,
1060 0x3b460, 0x3b468,
1061 0x3b470, 0x3b49c,
1062 0x3b4f0, 0x3b528,
1063 0x3b530, 0x3b548,
1064 0x3b560, 0x3b568,
1065 0x3b570, 0x3b59c,
1066 0x3b5f0, 0x3b638,
1067 0x3b640, 0x3b640,
1068 0x3b648, 0x3b650,
1069 0x3b65c, 0x3b664,
1070 0x3b670, 0x3b6b8,
1071 0x3b6c0, 0x3b6e4,
1072 0x3b6f8, 0x3b738,
1073 0x3b740, 0x3b740,
1074 0x3b748, 0x3b750,
1075 0x3b75c, 0x3b764,
1076 0x3b770, 0x3b7b8,
1077 0x3b7c0, 0x3b7e4,
1078 0x3b7f8, 0x3b7fc,
1079 0x3b814, 0x3b814,
1080 0x3b82c, 0x3b82c,
1081 0x3b880, 0x3b88c,
1082 0x3b8e8, 0x3b8ec,
1083 0x3b900, 0x3b928,
1084 0x3b930, 0x3b948,
1085 0x3b960, 0x3b968,
1086 0x3b970, 0x3b99c,
1087 0x3b9f0, 0x3ba38,
1088 0x3ba40, 0x3ba40,
1089 0x3ba48, 0x3ba50,
1090 0x3ba5c, 0x3ba64,
1091 0x3ba70, 0x3bab8,
1092 0x3bac0, 0x3bae4,
1093 0x3baf8, 0x3bb10,
1094 0x3bb28, 0x3bb28,
1095 0x3bb3c, 0x3bb50,
1096 0x3bbf0, 0x3bc10,
1097 0x3bc28, 0x3bc28,
1098 0x3bc3c, 0x3bc50,
1099 0x3bcf0, 0x3bcfc,
1100 0x3c000, 0x3c030,
1101 0x3c038, 0x3c038,
1102 0x3c040, 0x3c040,
1103 0x3c100, 0x3c144,
1104 0x3c190, 0x3c1a0,
1105 0x3c1a8, 0x3c1b8,
1106 0x3c1c4, 0x3c1c8,
1107 0x3c1d0, 0x3c1d0,
1108 0x3c200, 0x3c318,
1109 0x3c400, 0x3c4b4,
1110 0x3c4c0, 0x3c52c,
1111 0x3c540, 0x3c61c,
1112 0x3c800, 0x3c828,
1113 0x3c834, 0x3c834,
1114 0x3c8c0, 0x3c908,
1115 0x3c910, 0x3c9ac,
1116 0x3ca00, 0x3ca14,
1117 0x3ca1c, 0x3ca2c,
1118 0x3ca44, 0x3ca50,
1119 0x3ca74, 0x3ca74,
1120 0x3ca7c, 0x3cafc,
1121 0x3cb08, 0x3cc24,
1122 0x3cd00, 0x3cd00,
1123 0x3cd08, 0x3cd14,
1124 0x3cd1c, 0x3cd20,
1125 0x3cd3c, 0x3cd3c,
1126 0x3cd48, 0x3cd50,
1127 0x3d200, 0x3d20c,
1128 0x3d220, 0x3d220,
1129 0x3d240, 0x3d240,
1130 0x3d600, 0x3d60c,
1131 0x3da00, 0x3da1c,
1132 0x3de00, 0x3de20,
1133 0x3de38, 0x3de3c,
1134 0x3de80, 0x3de80,
1135 0x3de88, 0x3dea8,
1136 0x3deb0, 0x3deb4,
1137 0x3dec8, 0x3ded4,
1138 0x3dfb8, 0x3e004,
1139 0x3e200, 0x3e200,
1140 0x3e208, 0x3e240,
1141 0x3e248, 0x3e280,
1142 0x3e288, 0x3e2c0,
1143 0x3e2c8, 0x3e2fc,
1144 0x3e600, 0x3e630,
1145 0x3ea00, 0x3eabc,
1146 0x3eb00, 0x3eb10,
1147 0x3eb20, 0x3eb30,
1148 0x3eb40, 0x3eb50,
1149 0x3eb60, 0x3eb70,
1150 0x3f000, 0x3f028,
1151 0x3f030, 0x3f048,
1152 0x3f060, 0x3f068,
1153 0x3f070, 0x3f09c,
1154 0x3f0f0, 0x3f128,
1155 0x3f130, 0x3f148,
1156 0x3f160, 0x3f168,
1157 0x3f170, 0x3f19c,
1158 0x3f1f0, 0x3f238,
1159 0x3f240, 0x3f240,
1160 0x3f248, 0x3f250,
1161 0x3f25c, 0x3f264,
1162 0x3f270, 0x3f2b8,
1163 0x3f2c0, 0x3f2e4,
1164 0x3f2f8, 0x3f338,
1165 0x3f340, 0x3f340,
1166 0x3f348, 0x3f350,
1167 0x3f35c, 0x3f364,
1168 0x3f370, 0x3f3b8,
1169 0x3f3c0, 0x3f3e4,
1170 0x3f3f8, 0x3f428,
1171 0x3f430, 0x3f448,
1172 0x3f460, 0x3f468,
1173 0x3f470, 0x3f49c,
1174 0x3f4f0, 0x3f528,
1175 0x3f530, 0x3f548,
1176 0x3f560, 0x3f568,
1177 0x3f570, 0x3f59c,
1178 0x3f5f0, 0x3f638,
1179 0x3f640, 0x3f640,
1180 0x3f648, 0x3f650,
1181 0x3f65c, 0x3f664,
1182 0x3f670, 0x3f6b8,
1183 0x3f6c0, 0x3f6e4,
1184 0x3f6f8, 0x3f738,
1185 0x3f740, 0x3f740,
1186 0x3f748, 0x3f750,
1187 0x3f75c, 0x3f764,
1188 0x3f770, 0x3f7b8,
1189 0x3f7c0, 0x3f7e4,
1190 0x3f7f8, 0x3f7fc,
1191 0x3f814, 0x3f814,
1192 0x3f82c, 0x3f82c,
1193 0x3f880, 0x3f88c,
1194 0x3f8e8, 0x3f8ec,
1195 0x3f900, 0x3f928,
1196 0x3f930, 0x3f948,
1197 0x3f960, 0x3f968,
1198 0x3f970, 0x3f99c,
1199 0x3f9f0, 0x3fa38,
1200 0x3fa40, 0x3fa40,
1201 0x3fa48, 0x3fa50,
1202 0x3fa5c, 0x3fa64,
1203 0x3fa70, 0x3fab8,
1204 0x3fac0, 0x3fae4,
1205 0x3faf8, 0x3fb10,
1206 0x3fb28, 0x3fb28,
1207 0x3fb3c, 0x3fb50,
1208 0x3fbf0, 0x3fc10,
1209 0x3fc28, 0x3fc28,
1210 0x3fc3c, 0x3fc50,
1211 0x3fcf0, 0x3fcfc,
1212 0x40000, 0x4000c,
1213 0x40040, 0x40050,
1214 0x40060, 0x40068,
1215 0x4007c, 0x4008c,
1216 0x40094, 0x400b0,
1217 0x400c0, 0x40144,
1218 0x40180, 0x4018c,
1219 0x40200, 0x40254,
1220 0x40260, 0x40264,
1221 0x40270, 0x40288,
1222 0x40290, 0x40298,
1223 0x402ac, 0x402c8,
1224 0x402d0, 0x402e0,
1225 0x402f0, 0x402f0,
1226 0x40300, 0x4033c,
1227 0x403f8, 0x403fc,
1228 0x41304, 0x413c4,
1229 0x41400, 0x4140c,
1230 0x41414, 0x4141c,
1231 0x41480, 0x414d0,
1232 0x44000, 0x44054,
1233 0x4405c, 0x44078,
1234 0x440c0, 0x44174,
1235 0x44180, 0x441ac,
1236 0x441b4, 0x441b8,
1237 0x441c0, 0x44254,
1238 0x4425c, 0x44278,
1239 0x442c0, 0x44374,
1240 0x44380, 0x443ac,
1241 0x443b4, 0x443b8,
1242 0x443c0, 0x44454,
1243 0x4445c, 0x44478,
1244 0x444c0, 0x44574,
1245 0x44580, 0x445ac,
1246 0x445b4, 0x445b8,
1247 0x445c0, 0x44654,
1248 0x4465c, 0x44678,
1249 0x446c0, 0x44774,
1250 0x44780, 0x447ac,
1251 0x447b4, 0x447b8,
1252 0x447c0, 0x44854,
1253 0x4485c, 0x44878,
1254 0x448c0, 0x44974,
1255 0x44980, 0x449ac,
1256 0x449b4, 0x449b8,
1257 0x449c0, 0x449fc,
1258 0x45000, 0x45004,
1259 0x45010, 0x45030,
1260 0x45040, 0x45060,
1261 0x45068, 0x45068,
1262 0x45080, 0x45084,
1263 0x450a0, 0x450b0,
1264 0x45200, 0x45204,
1265 0x45210, 0x45230,
1266 0x45240, 0x45260,
1267 0x45268, 0x45268,
1268 0x45280, 0x45284,
1269 0x452a0, 0x452b0,
1270 0x460c0, 0x460e4,
1271 0x47000, 0x4703c,
1272 0x47044, 0x4708c,
1273 0x47200, 0x47250,
1274 0x47400, 0x47408,
1275 0x47414, 0x47420,
1276 0x47600, 0x47618,
1277 0x47800, 0x47814,
1278 0x48000, 0x4800c,
1279 0x48040, 0x48050,
1280 0x48060, 0x48068,
1281 0x4807c, 0x4808c,
1282 0x48094, 0x480b0,
1283 0x480c0, 0x48144,
1284 0x48180, 0x4818c,
1285 0x48200, 0x48254,
1286 0x48260, 0x48264,
1287 0x48270, 0x48288,
1288 0x48290, 0x48298,
1289 0x482ac, 0x482c8,
1290 0x482d0, 0x482e0,
1291 0x482f0, 0x482f0,
1292 0x48300, 0x4833c,
1293 0x483f8, 0x483fc,
1294 0x49304, 0x493c4,
1295 0x49400, 0x4940c,
1296 0x49414, 0x4941c,
1297 0x49480, 0x494d0,
1298 0x4c000, 0x4c054,
1299 0x4c05c, 0x4c078,
1300 0x4c0c0, 0x4c174,
1301 0x4c180, 0x4c1ac,
1302 0x4c1b4, 0x4c1b8,
1303 0x4c1c0, 0x4c254,
1304 0x4c25c, 0x4c278,
1305 0x4c2c0, 0x4c374,
1306 0x4c380, 0x4c3ac,
1307 0x4c3b4, 0x4c3b8,
1308 0x4c3c0, 0x4c454,
1309 0x4c45c, 0x4c478,
1310 0x4c4c0, 0x4c574,
1311 0x4c580, 0x4c5ac,
1312 0x4c5b4, 0x4c5b8,
1313 0x4c5c0, 0x4c654,
1314 0x4c65c, 0x4c678,
1315 0x4c6c0, 0x4c774,
1316 0x4c780, 0x4c7ac,
1317 0x4c7b4, 0x4c7b8,
1318 0x4c7c0, 0x4c854,
1319 0x4c85c, 0x4c878,
1320 0x4c8c0, 0x4c974,
1321 0x4c980, 0x4c9ac,
1322 0x4c9b4, 0x4c9b8,
1323 0x4c9c0, 0x4c9fc,
1324 0x4d000, 0x4d004,
1325 0x4d010, 0x4d030,
1326 0x4d040, 0x4d060,
1327 0x4d068, 0x4d068,
1328 0x4d080, 0x4d084,
1329 0x4d0a0, 0x4d0b0,
1330 0x4d200, 0x4d204,
1331 0x4d210, 0x4d230,
1332 0x4d240, 0x4d260,
1333 0x4d268, 0x4d268,
1334 0x4d280, 0x4d284,
1335 0x4d2a0, 0x4d2b0,
1336 0x4e0c0, 0x4e0e4,
1337 0x4f000, 0x4f03c,
1338 0x4f044, 0x4f08c,
1339 0x4f200, 0x4f250,
1340 0x4f400, 0x4f408,
1341 0x4f414, 0x4f420,
1342 0x4f600, 0x4f618,
1343 0x4f800, 0x4f814,
1344 0x50000, 0x50084,
1345 0x50090, 0x500cc,
1346 0x50400, 0x50400,
1347 0x50800, 0x50884,
1348 0x50890, 0x508cc,
1349 0x50c00, 0x50c00,
1350 0x51000, 0x5101c,
1351 0x51300, 0x51308,
1352 };
1353
1354 static const unsigned int t6_reg_ranges[] = {
1355 0x1008, 0x101c,
1356 0x1024, 0x10a8,
1357 0x10b4, 0x10f8,
1358 0x1100, 0x1114,
1359 0x111c, 0x112c,
1360 0x1138, 0x113c,
1361 0x1144, 0x114c,
1362 0x1180, 0x1184,
1363 0x1190, 0x1194,
1364 0x11a0, 0x11a4,
1365 0x11b0, 0x11b4,
1366 0x11fc, 0x1274,
1367 0x1280, 0x133c,
1368 0x1800, 0x18fc,
1369 0x3000, 0x302c,
1370 0x3060, 0x30b0,
1371 0x30b8, 0x30d8,
1372 0x30e0, 0x30fc,
1373 0x3140, 0x357c,
1374 0x35a8, 0x35cc,
1375 0x35ec, 0x35ec,
1376 0x3600, 0x5624,
1377 0x56cc, 0x56ec,
1378 0x56f4, 0x5720,
1379 0x5728, 0x575c,
1380 0x580c, 0x5814,
1381 0x5890, 0x589c,
1382 0x58a4, 0x58ac,
1383 0x58b8, 0x58bc,
1384 0x5940, 0x595c,
1385 0x5980, 0x598c,
1386 0x59b0, 0x59c8,
1387 0x59d0, 0x59dc,
1388 0x59fc, 0x5a18,
1389 0x5a60, 0x5a6c,
1390 0x5a80, 0x5a8c,
1391 0x5a94, 0x5a9c,
1392 0x5b94, 0x5bfc,
1393 0x5c10, 0x5e48,
1394 0x5e50, 0x5e94,
1395 0x5ea0, 0x5eb0,
1396 0x5ec0, 0x5ec0,
1397 0x5ec8, 0x5ed0,
1398 0x5ee0, 0x5ee0,
1399 0x5ef0, 0x5ef0,
1400 0x5f00, 0x5f00,
1401 0x6000, 0x6020,
1402 0x6028, 0x6040,
1403 0x6058, 0x609c,
1404 0x60a8, 0x619c,
1405 0x7700, 0x7798,
1406 0x77c0, 0x7880,
1407 0x78cc, 0x78fc,
1408 0x7b00, 0x7b58,
1409 0x7b60, 0x7b84,
1410 0x7b8c, 0x7c54,
1411 0x7d00, 0x7d38,
1412 0x7d40, 0x7d84,
1413 0x7d8c, 0x7ddc,
1414 0x7de4, 0x7e04,
1415 0x7e10, 0x7e1c,
1416 0x7e24, 0x7e38,
1417 0x7e40, 0x7e44,
1418 0x7e4c, 0x7e78,
1419 0x7e80, 0x7edc,
1420 0x7ee8, 0x7efc,
1421 0x8dc0, 0x8de4,
1422 0x8df8, 0x8e04,
1423 0x8e10, 0x8e84,
1424 0x8ea0, 0x8f88,
1425 0x8fb8, 0x9058,
1426 0x9060, 0x9060,
1427 0x9068, 0x90f8,
1428 0x9100, 0x9124,
1429 0x9400, 0x9470,
1430 0x9600, 0x9600,
1431 0x9608, 0x9638,
1432 0x9640, 0x9704,
1433 0x9710, 0x971c,
1434 0x9800, 0x9808,
1435 0x9820, 0x983c,
1436 0x9850, 0x9864,
1437 0x9c00, 0x9c6c,
1438 0x9c80, 0x9cec,
1439 0x9d00, 0x9d6c,
1440 0x9d80, 0x9dec,
1441 0x9e00, 0x9e6c,
1442 0x9e80, 0x9eec,
1443 0x9f00, 0x9f6c,
1444 0x9f80, 0xa020,
1445 0xd004, 0xd03c,
1446 0xd100, 0xd118,
1447 0xd200, 0xd214,
1448 0xd220, 0xd234,
1449 0xd240, 0xd254,
1450 0xd260, 0xd274,
1451 0xd280, 0xd294,
1452 0xd2a0, 0xd2b4,
1453 0xd2c0, 0xd2d4,
1454 0xd2e0, 0xd2f4,
1455 0xd300, 0xd31c,
1456 0xdfc0, 0xdfe0,
1457 0xe000, 0xf008,
1458 0xf010, 0xf018,
1459 0xf020, 0xf028,
1460 0x11000, 0x11014,
1461 0x11048, 0x1106c,
1462 0x11074, 0x11088,
1463 0x11098, 0x11120,
1464 0x1112c, 0x1117c,
1465 0x11190, 0x112e0,
1466 0x11300, 0x1130c,
1467 0x12000, 0x1206c,
1468 0x19040, 0x1906c,
1469 0x19078, 0x19080,
1470 0x1908c, 0x190e8,
1471 0x190f0, 0x190f8,
1472 0x19100, 0x19110,
1473 0x19120, 0x19124,
1474 0x19150, 0x19194,
1475 0x1919c, 0x191b0,
1476 0x191d0, 0x191e8,
1477 0x19238, 0x19290,
1478 0x192a4, 0x192b0,
1479 0x192bc, 0x192bc,
1480 0x19348, 0x1934c,
1481 0x193f8, 0x19418,
1482 0x19420, 0x19428,
1483 0x19430, 0x19444,
1484 0x1944c, 0x1946c,
1485 0x19474, 0x19474,
1486 0x19490, 0x194cc,
1487 0x194f0, 0x194f8,
1488 0x19c00, 0x19c48,
1489 0x19c50, 0x19c80,
1490 0x19c94, 0x19c98,
1491 0x19ca0, 0x19cbc,
1492 0x19ce4, 0x19ce4,
1493 0x19cf0, 0x19cf8,
1494 0x19d00, 0x19d28,
1495 0x19d50, 0x19d78,
1496 0x19d94, 0x19d98,
1497 0x19da0, 0x19dc8,
1498 0x19df0, 0x19e10,
1499 0x19e50, 0x19e6c,
1500 0x19ea0, 0x19ebc,
1501 0x19ec4, 0x19ef4,
1502 0x19f04, 0x19f2c,
1503 0x19f34, 0x19f34,
1504 0x19f40, 0x19f50,
1505 0x19f90, 0x19fac,
1506 0x19fc4, 0x19fc8,
1507 0x19fd0, 0x19fe4,
1508 0x1a000, 0x1a004,
1509 0x1a010, 0x1a06c,
1510 0x1a0b0, 0x1a0e4,
1511 0x1a0ec, 0x1a0f8,
1512 0x1a100, 0x1a108,
1513 0x1a114, 0x1a120,
1514 0x1a128, 0x1a130,
1515 0x1a138, 0x1a138,
1516 0x1a190, 0x1a1c4,
1517 0x1a1fc, 0x1a1fc,
1518 0x1e008, 0x1e00c,
1519 0x1e040, 0x1e044,
1520 0x1e04c, 0x1e04c,
1521 0x1e284, 0x1e290,
1522 0x1e2c0, 0x1e2c0,
1523 0x1e2e0, 0x1e2e0,
1524 0x1e300, 0x1e384,
1525 0x1e3c0, 0x1e3c8,
1526 0x1e408, 0x1e40c,
1527 0x1e440, 0x1e444,
1528 0x1e44c, 0x1e44c,
1529 0x1e684, 0x1e690,
1530 0x1e6c0, 0x1e6c0,
1531 0x1e6e0, 0x1e6e0,
1532 0x1e700, 0x1e784,
1533 0x1e7c0, 0x1e7c8,
1534 0x1e808, 0x1e80c,
1535 0x1e840, 0x1e844,
1536 0x1e84c, 0x1e84c,
1537 0x1ea84, 0x1ea90,
1538 0x1eac0, 0x1eac0,
1539 0x1eae0, 0x1eae0,
1540 0x1eb00, 0x1eb84,
1541 0x1ebc0, 0x1ebc8,
1542 0x1ec08, 0x1ec0c,
1543 0x1ec40, 0x1ec44,
1544 0x1ec4c, 0x1ec4c,
1545 0x1ee84, 0x1ee90,
1546 0x1eec0, 0x1eec0,
1547 0x1eee0, 0x1eee0,
1548 0x1ef00, 0x1ef84,
1549 0x1efc0, 0x1efc8,
1550 0x1f008, 0x1f00c,
1551 0x1f040, 0x1f044,
1552 0x1f04c, 0x1f04c,
1553 0x1f284, 0x1f290,
1554 0x1f2c0, 0x1f2c0,
1555 0x1f2e0, 0x1f2e0,
1556 0x1f300, 0x1f384,
1557 0x1f3c0, 0x1f3c8,
1558 0x1f408, 0x1f40c,
1559 0x1f440, 0x1f444,
1560 0x1f44c, 0x1f44c,
1561 0x1f684, 0x1f690,
1562 0x1f6c0, 0x1f6c0,
1563 0x1f6e0, 0x1f6e0,
1564 0x1f700, 0x1f784,
1565 0x1f7c0, 0x1f7c8,
1566 0x1f808, 0x1f80c,
1567 0x1f840, 0x1f844,
1568 0x1f84c, 0x1f84c,
1569 0x1fa84, 0x1fa90,
1570 0x1fac0, 0x1fac0,
1571 0x1fae0, 0x1fae0,
1572 0x1fb00, 0x1fb84,
1573 0x1fbc0, 0x1fbc8,
1574 0x1fc08, 0x1fc0c,
1575 0x1fc40, 0x1fc44,
1576 0x1fc4c, 0x1fc4c,
1577 0x1fe84, 0x1fe90,
1578 0x1fec0, 0x1fec0,
1579 0x1fee0, 0x1fee0,
1580 0x1ff00, 0x1ff84,
1581 0x1ffc0, 0x1ffc8,
1582 0x30000, 0x30030,
1583 0x30100, 0x30168,
1584 0x30190, 0x301a0,
1585 0x301a8, 0x301b8,
1586 0x301c4, 0x301c8,
1587 0x301d0, 0x301d0,
1588 0x30200, 0x30320,
1589 0x30400, 0x304b4,
1590 0x304c0, 0x3052c,
1591 0x30540, 0x3061c,
1592 0x30800, 0x308a0,
1593 0x308c0, 0x30908,
1594 0x30910, 0x309b8,
1595 0x30a00, 0x30a04,
1596 0x30a0c, 0x30a14,
1597 0x30a1c, 0x30a2c,
1598 0x30a44, 0x30a50,
1599 0x30a74, 0x30a74,
1600 0x30a7c, 0x30afc,
1601 0x30b08, 0x30c24,
1602 0x30d00, 0x30d14,
1603 0x30d1c, 0x30d3c,
1604 0x30d44, 0x30d4c,
1605 0x30d54, 0x30d74,
1606 0x30d7c, 0x30d7c,
1607 0x30de0, 0x30de0,
1608 0x30e00, 0x30ed4,
1609 0x30f00, 0x30fa4,
1610 0x30fc0, 0x30fc4,
1611 0x31000, 0x31004,
1612 0x31080, 0x310fc,
1613 0x31208, 0x31220,
1614 0x3123c, 0x31254,
1615 0x31300, 0x31300,
1616 0x31308, 0x3131c,
1617 0x31338, 0x3133c,
1618 0x31380, 0x31380,
1619 0x31388, 0x313a8,
1620 0x313b4, 0x313b4,
1621 0x31400, 0x31420,
1622 0x31438, 0x3143c,
1623 0x31480, 0x31480,
1624 0x314a8, 0x314a8,
1625 0x314b0, 0x314b4,
1626 0x314c8, 0x314d4,
1627 0x31a40, 0x31a4c,
1628 0x31af0, 0x31b20,
1629 0x31b38, 0x31b3c,
1630 0x31b80, 0x31b80,
1631 0x31ba8, 0x31ba8,
1632 0x31bb0, 0x31bb4,
1633 0x31bc8, 0x31bd4,
1634 0x32140, 0x3218c,
1635 0x321f0, 0x321f4,
1636 0x32200, 0x32200,
1637 0x32218, 0x32218,
1638 0x32400, 0x32400,
1639 0x32408, 0x3241c,
1640 0x32618, 0x32620,
1641 0x32664, 0x32664,
1642 0x326a8, 0x326a8,
1643 0x326ec, 0x326ec,
1644 0x32a00, 0x32abc,
1645 0x32b00, 0x32b38,
1646 0x32b20, 0x32b38,
1647 0x32b40, 0x32b58,
1648 0x32b60, 0x32b78,
1649 0x32c00, 0x32c00,
1650 0x32c08, 0x32c3c,
1651 0x33000, 0x3302c,
1652 0x33034, 0x33050,
1653 0x33058, 0x33058,
1654 0x33060, 0x3308c,
1655 0x3309c, 0x330ac,
1656 0x330c0, 0x330c0,
1657 0x330c8, 0x330d0,
1658 0x330d8, 0x330e0,
1659 0x330ec, 0x3312c,
1660 0x33134, 0x33150,
1661 0x33158, 0x33158,
1662 0x33160, 0x3318c,
1663 0x3319c, 0x331ac,
1664 0x331c0, 0x331c0,
1665 0x331c8, 0x331d0,
1666 0x331d8, 0x331e0,
1667 0x331ec, 0x33290,
1668 0x33298, 0x332c4,
1669 0x332e4, 0x33390,
1670 0x33398, 0x333c4,
1671 0x333e4, 0x3342c,
1672 0x33434, 0x33450,
1673 0x33458, 0x33458,
1674 0x33460, 0x3348c,
1675 0x3349c, 0x334ac,
1676 0x334c0, 0x334c0,
1677 0x334c8, 0x334d0,
1678 0x334d8, 0x334e0,
1679 0x334ec, 0x3352c,
1680 0x33534, 0x33550,
1681 0x33558, 0x33558,
1682 0x33560, 0x3358c,
1683 0x3359c, 0x335ac,
1684 0x335c0, 0x335c0,
1685 0x335c8, 0x335d0,
1686 0x335d8, 0x335e0,
1687 0x335ec, 0x33690,
1688 0x33698, 0x336c4,
1689 0x336e4, 0x33790,
1690 0x33798, 0x337c4,
1691 0x337e4, 0x337fc,
1692 0x33814, 0x33814,
1693 0x33854, 0x33868,
1694 0x33880, 0x3388c,
1695 0x338c0, 0x338d0,
1696 0x338e8, 0x338ec,
1697 0x33900, 0x3392c,
1698 0x33934, 0x33950,
1699 0x33958, 0x33958,
1700 0x33960, 0x3398c,
1701 0x3399c, 0x339ac,
1702 0x339c0, 0x339c0,
1703 0x339c8, 0x339d0,
1704 0x339d8, 0x339e0,
1705 0x339ec, 0x33a90,
1706 0x33a98, 0x33ac4,
1707 0x33ae4, 0x33b10,
1708 0x33b24, 0x33b28,
1709 0x33b38, 0x33b50,
1710 0x33bf0, 0x33c10,
1711 0x33c24, 0x33c28,
1712 0x33c38, 0x33c50,
1713 0x33cf0, 0x33cfc,
1714 0x34000, 0x34030,
1715 0x34100, 0x34168,
1716 0x34190, 0x341a0,
1717 0x341a8, 0x341b8,
1718 0x341c4, 0x341c8,
1719 0x341d0, 0x341d0,
1720 0x34200, 0x34320,
1721 0x34400, 0x344b4,
1722 0x344c0, 0x3452c,
1723 0x34540, 0x3461c,
1724 0x34800, 0x348a0,
1725 0x348c0, 0x34908,
1726 0x34910, 0x349b8,
1727 0x34a00, 0x34a04,
1728 0x34a0c, 0x34a14,
1729 0x34a1c, 0x34a2c,
1730 0x34a44, 0x34a50,
1731 0x34a74, 0x34a74,
1732 0x34a7c, 0x34afc,
1733 0x34b08, 0x34c24,
1734 0x34d00, 0x34d14,
1735 0x34d1c, 0x34d3c,
1736 0x34d44, 0x34d4c,
1737 0x34d54, 0x34d74,
1738 0x34d7c, 0x34d7c,
1739 0x34de0, 0x34de0,
1740 0x34e00, 0x34ed4,
1741 0x34f00, 0x34fa4,
1742 0x34fc0, 0x34fc4,
1743 0x35000, 0x35004,
1744 0x35080, 0x350fc,
1745 0x35208, 0x35220,
1746 0x3523c, 0x35254,
1747 0x35300, 0x35300,
1748 0x35308, 0x3531c,
1749 0x35338, 0x3533c,
1750 0x35380, 0x35380,
1751 0x35388, 0x353a8,
1752 0x353b4, 0x353b4,
1753 0x35400, 0x35420,
1754 0x35438, 0x3543c,
1755 0x35480, 0x35480,
1756 0x354a8, 0x354a8,
1757 0x354b0, 0x354b4,
1758 0x354c8, 0x354d4,
1759 0x35a40, 0x35a4c,
1760 0x35af0, 0x35b20,
1761 0x35b38, 0x35b3c,
1762 0x35b80, 0x35b80,
1763 0x35ba8, 0x35ba8,
1764 0x35bb0, 0x35bb4,
1765 0x35bc8, 0x35bd4,
1766 0x36140, 0x3618c,
1767 0x361f0, 0x361f4,
1768 0x36200, 0x36200,
1769 0x36218, 0x36218,
1770 0x36400, 0x36400,
1771 0x36408, 0x3641c,
1772 0x36618, 0x36620,
1773 0x36664, 0x36664,
1774 0x366a8, 0x366a8,
1775 0x366ec, 0x366ec,
1776 0x36a00, 0x36abc,
1777 0x36b00, 0x36b38,
1778 0x36b20, 0x36b38,
1779 0x36b40, 0x36b58,
1780 0x36b60, 0x36b78,
1781 0x36c00, 0x36c00,
1782 0x36c08, 0x36c3c,
1783 0x37000, 0x3702c,
1784 0x37034, 0x37050,
1785 0x37058, 0x37058,
1786 0x37060, 0x3708c,
1787 0x3709c, 0x370ac,
1788 0x370c0, 0x370c0,
1789 0x370c8, 0x370d0,
1790 0x370d8, 0x370e0,
1791 0x370ec, 0x3712c,
1792 0x37134, 0x37150,
1793 0x37158, 0x37158,
1794 0x37160, 0x3718c,
1795 0x3719c, 0x371ac,
1796 0x371c0, 0x371c0,
1797 0x371c8, 0x371d0,
1798 0x371d8, 0x371e0,
1799 0x371ec, 0x37290,
1800 0x37298, 0x372c4,
1801 0x372e4, 0x37390,
1802 0x37398, 0x373c4,
1803 0x373e4, 0x3742c,
1804 0x37434, 0x37450,
1805 0x37458, 0x37458,
1806 0x37460, 0x3748c,
1807 0x3749c, 0x374ac,
1808 0x374c0, 0x374c0,
1809 0x374c8, 0x374d0,
1810 0x374d8, 0x374e0,
1811 0x374ec, 0x3752c,
1812 0x37534, 0x37550,
1813 0x37558, 0x37558,
1814 0x37560, 0x3758c,
1815 0x3759c, 0x375ac,
1816 0x375c0, 0x375c0,
1817 0x375c8, 0x375d0,
1818 0x375d8, 0x375e0,
1819 0x375ec, 0x37690,
1820 0x37698, 0x376c4,
1821 0x376e4, 0x37790,
1822 0x37798, 0x377c4,
1823 0x377e4, 0x377fc,
1824 0x37814, 0x37814,
1825 0x37854, 0x37868,
1826 0x37880, 0x3788c,
1827 0x378c0, 0x378d0,
1828 0x378e8, 0x378ec,
1829 0x37900, 0x3792c,
1830 0x37934, 0x37950,
1831 0x37958, 0x37958,
1832 0x37960, 0x3798c,
1833 0x3799c, 0x379ac,
1834 0x379c0, 0x379c0,
1835 0x379c8, 0x379d0,
1836 0x379d8, 0x379e0,
1837 0x379ec, 0x37a90,
1838 0x37a98, 0x37ac4,
1839 0x37ae4, 0x37b10,
1840 0x37b24, 0x37b28,
1841 0x37b38, 0x37b50,
1842 0x37bf0, 0x37c10,
1843 0x37c24, 0x37c28,
1844 0x37c38, 0x37c50,
1845 0x37cf0, 0x37cfc,
1846 0x40040, 0x40040,
1847 0x40080, 0x40084,
1848 0x40100, 0x40100,
1849 0x40140, 0x401bc,
1850 0x40200, 0x40214,
1851 0x40228, 0x40228,
1852 0x40240, 0x40258,
1853 0x40280, 0x40280,
1854 0x40304, 0x40304,
1855 0x40330, 0x4033c,
1856 0x41304, 0x413c8,
1857 0x413d0, 0x413dc,
1858 0x413f0, 0x413f0,
1859 0x41400, 0x4140c,
1860 0x41414, 0x4141c,
1861 0x41480, 0x414d0,
1862 0x44000, 0x4407c,
1863 0x440c0, 0x441ac,
1864 0x441b4, 0x4427c,
1865 0x442c0, 0x443ac,
1866 0x443b4, 0x4447c,
1867 0x444c0, 0x445ac,
1868 0x445b4, 0x4467c,
1869 0x446c0, 0x447ac,
1870 0x447b4, 0x4487c,
1871 0x448c0, 0x449ac,
1872 0x449b4, 0x44a7c,
1873 0x44ac0, 0x44bac,
1874 0x44bb4, 0x44c7c,
1875 0x44cc0, 0x44dac,
1876 0x44db4, 0x44e7c,
1877 0x44ec0, 0x44fac,
1878 0x44fb4, 0x4507c,
1879 0x450c0, 0x451ac,
1880 0x451b4, 0x451fc,
1881 0x45800, 0x45804,
1882 0x45810, 0x45830,
1883 0x45840, 0x45860,
1884 0x45868, 0x45868,
1885 0x45880, 0x45884,
1886 0x458a0, 0x458b0,
1887 0x45a00, 0x45a04,
1888 0x45a10, 0x45a30,
1889 0x45a40, 0x45a60,
1890 0x45a68, 0x45a68,
1891 0x45a80, 0x45a84,
1892 0x45aa0, 0x45ab0,
1893 0x460c0, 0x460e4,
1894 0x47000, 0x4703c,
1895 0x47044, 0x4708c,
1896 0x47200, 0x47250,
1897 0x47400, 0x47408,
1898 0x47414, 0x47420,
1899 0x47600, 0x47618,
1900 0x47800, 0x47814,
1901 0x47820, 0x4782c,
1902 0x50000, 0x50084,
1903 0x50090, 0x500cc,
1904 0x50300, 0x50384,
1905 0x50400, 0x50400,
1906 0x50800, 0x50884,
1907 0x50890, 0x508cc,
1908 0x50b00, 0x50b84,
1909 0x50c00, 0x50c00,
1910 0x51000, 0x51020,
1911 0x51028, 0x510b0,
1912 0x51300, 0x51324,
1913 };
1914
1915 u32 *buf_end = (u32 *)((char *)buf + buf_size);
1916 const unsigned int *reg_ranges;
1917 int reg_ranges_size, range;
1918 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
1919
1920 /* Select the right set of register ranges to dump depending on the
1921 * adapter chip type.
1922 */
1923 switch (chip_version) {
1924 case CHELSIO_T5:
1925 reg_ranges = t5_reg_ranges;
1926 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
1927 break;
1928
1929 case CHELSIO_T6:
1930 reg_ranges = t6_reg_ranges;
1931 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
1932 break;
1933
1934 default:
1935 dev_err(adap,
1936 "Unsupported chip version %d\n", chip_version);
1937 return;
1938 }
1939
1940 /* Clear the register buffer and insert the appropriate register
1941 * values selected by the above register ranges.
1942 */
1943 memset(buf, 0, buf_size);
1944 for (range = 0; range < reg_ranges_size; range += 2) {
1945 unsigned int reg = reg_ranges[range];
1946 unsigned int last_reg = reg_ranges[range + 1];
1947 u32 *bufp = (u32 *)((char *)buf + reg);
1948
1949 /* Iterate across the register range filling in the register
1950 * buffer but don't write past the end of the register buffer.
1951 */
1952 while (reg <= last_reg && bufp < buf_end) {
1953 *bufp++ = t4_read_reg(adap, reg);
1954 reg += sizeof(u32);
1955 }
1956 }
1957 }
1958
1959 /* EEPROM reads take a few tens of us while writes can take a bit over 5 ms. */
1960 #define EEPROM_DELAY 10 /* 10us per poll spin */
1961 #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */
1962
1963 #define EEPROM_STAT_ADDR 0x7bfc
1964
1965 /**
1966 * Small utility function to wait till any outstanding VPD Access is complete.
1967 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
1968 * VPD Access in flight. This allows us to handle the problem of having a
1969 * previous VPD Access time out and prevent an attempt to inject a new VPD
1970 * Request before any in-flight VPD request has completed.
1971 */
1972 static int t4_seeprom_wait(struct adapter *adapter)
1973 {
1974 unsigned int base = adapter->params.pci.vpd_cap_addr;
1975 int max_poll;
1976
1977 /* If no VPD Access is in flight, we can just return success right
1978 * away.
1979 */
1980 if (!adapter->vpd_busy)
1981 return 0;
1982
1983 /* Poll the VPD Capability Address/Flag register waiting for it
1984 * to indicate that the operation is complete.
1985 */
1986 max_poll = EEPROM_MAX_POLL;
1987 do {
1988 u16 val;
1989
1990 udelay(EEPROM_DELAY);
1991 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
1992
1993 /* If the operation is complete, mark the VPD as no longer
1994 * busy and return success.
1995 */
1996 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
1997 adapter->vpd_busy = 0;
1998 return 0;
1999 }
2000 } while (--max_poll);
2001
2002 /* Failure! Note that we leave the VPD Busy status set in order to
2003 * avoid pushing a new VPD Access request into the VPD Capability till
2004 * the current operation eventually succeeds. It's a bug to issue a
2005 * new request when an existing request is in flight and will result
2006 * in corrupt hardware state.
2007 */
2008 return -ETIMEDOUT;
2009 }
2010
2011 /**
2012 * t4_seeprom_read - read a serial EEPROM location
2013 * @adapter: adapter to read
2014 * @addr: EEPROM virtual address
2015 * @data: where to store the read data
2016 *
2017 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
2018 * VPD capability. Note that this function must be called with a virtual
2019 * address.
2020 */
2021 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
2022 {
2023 unsigned int base = adapter->params.pci.vpd_cap_addr;
2024 int ret;
2025
2026 /* VPD Accesses must alway be 4-byte aligned!
2027 */
2028 if (addr >= EEPROMVSIZE || (addr & 3))
2029 return -EINVAL;
2030
2031 /* Wait for any previous operation which may still be in flight to
2032 * complete.
2033 */
2034 ret = t4_seeprom_wait(adapter);
2035 if (ret) {
2036 dev_err(adapter, "VPD still busy from previous operation\n");
2037 return ret;
2038 }
2039
2040 /* Issue our new VPD Read request, mark the VPD as being busy and wait
2041 * for our request to complete. If it doesn't complete, note the
2042 * error and return it to our caller. Note that we do not reset the
2043 * VPD Busy status!
2044 */
2045 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
2046 adapter->vpd_busy = 1;
2047 adapter->vpd_flag = PCI_VPD_ADDR_F;
2048 ret = t4_seeprom_wait(adapter);
2049 if (ret) {
2050 dev_err(adapter, "VPD read of address %#x failed\n", addr);
2051 return ret;
2052 }
2053
2054 /* Grab the returned data, swizzle it into our endianness and
2055 * return success.
2056 */
2057 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
2058 *data = le32_to_cpu(*data);
2059 return 0;
2060 }
2061
2062 /**
2063 * t4_seeprom_write - write a serial EEPROM location
2064 * @adapter: adapter to write
2065 * @addr: virtual EEPROM address
2066 * @data: value to write
2067 *
2068 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
2069 * VPD capability. Note that this function must be called with a virtual
2070 * address.
2071 */
2072 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
2073 {
2074 unsigned int base = adapter->params.pci.vpd_cap_addr;
2075 int ret;
2076 u32 stats_reg = 0;
2077 int max_poll;
2078
2079 /* VPD Accesses must alway be 4-byte aligned!
2080 */
2081 if (addr >= EEPROMVSIZE || (addr & 3))
2082 return -EINVAL;
2083
2084 /* Wait for any previous operation which may still be in flight to
2085 * complete.
2086 */
2087 ret = t4_seeprom_wait(adapter);
2088 if (ret) {
2089 dev_err(adapter, "VPD still busy from previous operation\n");
2090 return ret;
2091 }
2092
2093 /* Issue our new VPD Read request, mark the VPD as being busy and wait
2094 * for our request to complete. If it doesn't complete, note the
2095 * error and return it to our caller. Note that we do not reset the
2096 * VPD Busy status!
2097 */
2098 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
2099 cpu_to_le32(data));
2100 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
2101 (u16)addr | PCI_VPD_ADDR_F);
2102 adapter->vpd_busy = 1;
2103 adapter->vpd_flag = 0;
2104 ret = t4_seeprom_wait(adapter);
2105 if (ret) {
2106 dev_err(adapter, "VPD write of address %#x failed\n", addr);
2107 return ret;
2108 }
2109
2110 /* Reset PCI_VPD_DATA register after a transaction and wait for our
2111 * request to complete. If it doesn't complete, return error.
2112 */
2113 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
2114 max_poll = EEPROM_MAX_POLL;
2115 do {
2116 udelay(EEPROM_DELAY);
2117 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
2118 } while ((stats_reg & 0x1) && --max_poll);
2119 if (!max_poll)
2120 return -ETIMEDOUT;
2121
2122 /* Return success! */
2123 return 0;
2124 }
2125
2126 /**
2127 * t4_seeprom_wp - enable/disable EEPROM write protection
2128 * @adapter: the adapter
2129 * @enable: whether to enable or disable write protection
2130 *
2131 * Enables or disables write protection on the serial EEPROM.
2132 */
2133 int t4_seeprom_wp(struct adapter *adapter, int enable)
2134 {
2135 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
2136 }
2137
2138 /**
2139 * t4_fw_tp_pio_rw - Access TP PIO through LDST
2140 * @adap: the adapter
2141 * @vals: where the indirect register values are stored/written
2142 * @nregs: how many indirect registers to read/write
2143 * @start_idx: index of first indirect register to read/write
2144 * @rw: Read (1) or Write (0)
2145 *
2146 * Access TP PIO registers through LDST
2147 */
2148 void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
2149 unsigned int start_index, unsigned int rw)
2150 {
2151 int cmd = FW_LDST_ADDRSPC_TP_PIO;
2152 struct fw_ldst_cmd c;
2153 unsigned int i;
2154 int ret;
2155
2156 for (i = 0 ; i < nregs; i++) {
2157 memset(&c, 0, sizeof(c));
2158 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
2159 F_FW_CMD_REQUEST |
2160 (rw ? F_FW_CMD_READ :
2161 F_FW_CMD_WRITE) |
2162 V_FW_LDST_CMD_ADDRSPACE(cmd));
2163 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
2164
2165 c.u.addrval.addr = cpu_to_be32(start_index + i);
2166 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
2167 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
2168 if (ret == 0) {
2169 if (rw)
2170 vals[i] = be32_to_cpu(c.u.addrval.val);
2171 }
2172 }
2173 }
2174
2175 /**
2176 * t4_read_rss_key - read the global RSS key
2177 * @adap: the adapter
2178 * @key: 10-entry array holding the 320-bit RSS key
2179 *
2180 * Reads the global 320-bit RSS key.
2181 */
2182 void t4_read_rss_key(struct adapter *adap, u32 *key)
2183 {
2184 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 1);
2185 }
2186
2187 /**
2188 * t4_write_rss_key - program one of the RSS keys
2189 * @adap: the adapter
2190 * @key: 10-entry array holding the 320-bit RSS key
2191 * @idx: which RSS key to write
2192 *
2193 * Writes one of the RSS keys with the given 320-bit value. If @idx is
2194 * 0..15 the corresponding entry in the RSS key table is written,
2195 * otherwise the global RSS key is written.
2196 */
2197 void t4_write_rss_key(struct adapter *adap, u32 *key, int idx)
2198 {
2199 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
2200 u8 rss_key_addr_cnt = 16;
2201
2202 /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
2203 * allows access to key addresses 16-63 by using KeyWrAddrX
2204 * as index[5:4](upper 2) into key table
2205 */
2206 if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
2207 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
2208 rss_key_addr_cnt = 32;
2209
2210 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 0);
2211
2212 if (idx >= 0 && idx < rss_key_addr_cnt) {
2213 if (rss_key_addr_cnt > 16)
2214 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2215 V_KEYWRADDRX(idx >> 4) |
2216 V_T6_VFWRADDR(idx) | F_KEYWREN);
2217 else
2218 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2219 V_KEYWRADDR(idx) | F_KEYWREN);
2220 }
2221 }
2222
2223 /**
2224 * t4_config_rss_range - configure a portion of the RSS mapping table
2225 * @adapter: the adapter
2226 * @mbox: mbox to use for the FW command
2227 * @viid: virtual interface whose RSS subtable is to be written
2228 * @start: start entry in the table to write
2229 * @n: how many table entries to write
2230 * @rspq: values for the "response queue" (Ingress Queue) lookup table
2231 * @nrspq: number of values in @rspq
2232 *
2233 * Programs the selected part of the VI's RSS mapping table with the
2234 * provided values. If @nrspq < @n the supplied values are used repeatedly
2235 * until the full table range is populated.
2236 *
2237 * The caller must ensure the values in @rspq are in the range allowed for
2238 * @viid.
2239 */
2240 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2241 int start, int n, const u16 *rspq, unsigned int nrspq)
2242 {
2243 int ret;
2244 const u16 *rsp = rspq;
2245 const u16 *rsp_end = rspq + nrspq;
2246 struct fw_rss_ind_tbl_cmd cmd;
2247
2248 memset(&cmd, 0, sizeof(cmd));
2249 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2250 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2251 V_FW_RSS_IND_TBL_CMD_VIID(viid));
2252 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
2253
2254 /*
2255 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2256 * Queue Identifiers. These Ingress Queue IDs are packed three to
2257 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2258 * reserved.
2259 */
2260 while (n > 0) {
2261 int nq = min(n, 32);
2262 int nq_packed = 0;
2263 __be32 *qp = &cmd.iq0_to_iq2;
2264
2265 /*
2266 * Set up the firmware RSS command header to send the next
2267 * "nq" Ingress Queue IDs to the firmware.
2268 */
2269 cmd.niqid = cpu_to_be16(nq);
2270 cmd.startidx = cpu_to_be16(start);
2271
2272 /*
2273 * "nq" more done for the start of the next loop.
2274 */
2275 start += nq;
2276 n -= nq;
2277
2278 /*
2279 * While there are still Ingress Queue IDs to stuff into the
2280 * current firmware RSS command, retrieve them from the
2281 * Ingress Queue ID array and insert them into the command.
2282 */
2283 while (nq > 0) {
2284 /*
2285 * Grab up to the next 3 Ingress Queue IDs (wrapping
2286 * around the Ingress Queue ID array if necessary) and
2287 * insert them into the firmware RSS command at the
2288 * current 3-tuple position within the commad.
2289 */
2290 u16 qbuf[3];
2291 u16 *qbp = qbuf;
2292 int nqbuf = min(3, nq);
2293
2294 nq -= nqbuf;
2295 qbuf[0] = 0;
2296 qbuf[1] = 0;
2297 qbuf[2] = 0;
2298 while (nqbuf && nq_packed < 32) {
2299 nqbuf--;
2300 nq_packed++;
2301 *qbp++ = *rsp++;
2302 if (rsp >= rsp_end)
2303 rsp = rspq;
2304 }
2305 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2306 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2307 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2308 }
2309
2310 /*
2311 * Send this portion of the RRS table update to the firmware;
2312 * bail out on any errors.
2313 */
2314 if (is_pf4(adapter))
2315 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd),
2316 NULL);
2317 else
2318 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
2319 if (ret)
2320 return ret;
2321 }
2322
2323 return 0;
2324 }
2325
2326 /**
2327 * t4_config_vi_rss - configure per VI RSS settings
2328 * @adapter: the adapter
2329 * @mbox: mbox to use for the FW command
2330 * @viid: the VI id
2331 * @flags: RSS flags
2332 * @defq: id of the default RSS queue for the VI.
2333 *
2334 * Configures VI-specific RSS properties.
2335 */
2336 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2337 unsigned int flags, unsigned int defq)
2338 {
2339 struct fw_rss_vi_config_cmd c;
2340
2341 memset(&c, 0, sizeof(c));
2342 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2343 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2344 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2345 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
2346 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
2347 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2348 if (is_pf4(adapter))
2349 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2350 else
2351 return t4vf_wr_mbox(adapter, &c, sizeof(c), NULL);
2352 }
2353
2354 /**
2355 * t4_read_config_vi_rss - read the configured per VI RSS settings
2356 * @adapter: the adapter
2357 * @mbox: mbox to use for the FW command
2358 * @viid: the VI id
2359 * @flags: where to place the configured flags
2360 * @defq: where to place the id of the default RSS queue for the VI.
2361 *
2362 * Read configured VI-specific RSS properties.
2363 */
2364 int t4_read_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2365 u64 *flags, unsigned int *defq)
2366 {
2367 struct fw_rss_vi_config_cmd c;
2368 unsigned int result;
2369 int ret;
2370
2371 memset(&c, 0, sizeof(c));
2372 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2373 F_FW_CMD_REQUEST | F_FW_CMD_READ |
2374 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2375 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
2376 ret = t4_wr_mbox(adapter, mbox, &c, sizeof(c), &c);
2377 if (!ret) {
2378 result = be32_to_cpu(c.u.basicvirtual.defaultq_to_udpen);
2379 if (defq)
2380 *defq = G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(result);
2381 if (flags)
2382 *flags = result & M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ;
2383 }
2384
2385 return ret;
2386 }
2387
2388 /**
2389 * init_cong_ctrl - initialize congestion control parameters
2390 * @a: the alpha values for congestion control
2391 * @b: the beta values for congestion control
2392 *
2393 * Initialize the congestion control parameters.
2394 */
2395 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2396 {
2397 int i;
2398
2399 for (i = 0; i < 9; i++) {
2400 a[i] = 1;
2401 b[i] = 0;
2402 }
2403
2404 a[9] = 2;
2405 a[10] = 3;
2406 a[11] = 4;
2407 a[12] = 5;
2408 a[13] = 6;
2409 a[14] = 7;
2410 a[15] = 8;
2411 a[16] = 9;
2412 a[17] = 10;
2413 a[18] = 14;
2414 a[19] = 17;
2415 a[20] = 21;
2416 a[21] = 25;
2417 a[22] = 30;
2418 a[23] = 35;
2419 a[24] = 45;
2420 a[25] = 60;
2421 a[26] = 80;
2422 a[27] = 100;
2423 a[28] = 200;
2424 a[29] = 300;
2425 a[30] = 400;
2426 a[31] = 500;
2427
2428 b[9] = 1;
2429 b[10] = 1;
2430 b[11] = 2;
2431 b[12] = 2;
2432 b[13] = 3;
2433 b[14] = 3;
2434 b[15] = 3;
2435 b[16] = 3;
2436 b[17] = 4;
2437 b[18] = 4;
2438 b[19] = 4;
2439 b[20] = 4;
2440 b[21] = 4;
2441 b[22] = 5;
2442 b[23] = 5;
2443 b[24] = 5;
2444 b[25] = 5;
2445 b[26] = 5;
2446 b[27] = 5;
2447 b[28] = 6;
2448 b[29] = 6;
2449 b[30] = 7;
2450 b[31] = 7;
2451 }
2452
2453 #define INIT_CMD(var, cmd, rd_wr) do { \
2454 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
2455 F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
2456 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
2457 } while (0)
2458
2459 int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p)
2460 {
2461 u32 cclk_param, cclk_val;
2462 int ret;
2463
2464 /*
2465 * Ask firmware for the Core Clock since it knows how to translate the
2466 * Reference Clock ('V2') VPD field into a Core Clock value ...
2467 */
2468 cclk_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
2469 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
2470 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
2471 1, &cclk_param, &cclk_val);
2472 if (ret) {
2473 dev_err(adapter, "%s: error in fetching from coreclock - %d\n",
2474 __func__, ret);
2475 return ret;
2476 }
2477
2478 p->cclk = cclk_val;
2479 dev_debug(adapter, "%s: p->cclk = %u\n", __func__, p->cclk);
2480 return 0;
2481 }
2482
2483 /**
2484 * t4_get_pfres - retrieve VF resource limits
2485 * @adapter: the adapter
2486 *
2487 * Retrieves configured resource limits and capabilities for a physical
2488 * function. The results are stored in @adapter->pfres.
2489 */
2490 int t4_get_pfres(struct adapter *adapter)
2491 {
2492 struct pf_resources *pfres = &adapter->params.pfres;
2493 struct fw_pfvf_cmd cmd, rpl;
2494 u32 word;
2495 int v;
2496
2497 /*
2498 * Execute PFVF Read command to get VF resource limits; bail out early
2499 * with error on command failure.
2500 */
2501 memset(&cmd, 0, sizeof(cmd));
2502 cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) |
2503 F_FW_CMD_REQUEST |
2504 F_FW_CMD_READ |
2505 V_FW_PFVF_CMD_PFN(adapter->pf) |
2506 V_FW_PFVF_CMD_VFN(0));
2507 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
2508 v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
2509 if (v != FW_SUCCESS)
2510 return v;
2511
2512 /*
2513 * Extract PF resource limits and return success.
2514 */
2515 word = be32_to_cpu(rpl.niqflint_niq);
2516 pfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word);
2517
2518 word = be32_to_cpu(rpl.type_to_neq);
2519 pfres->neq = G_FW_PFVF_CMD_NEQ(word);
2520 return 0;
2521 }
2522
2523 /* serial flash and firmware constants and flash config file constants */
2524 enum {
2525 SF_ATTEMPTS = 10, /* max retries for SF operations */
2526
2527 /* flash command opcodes */
2528 SF_PROG_PAGE = 2, /* program page */
2529 SF_WR_DISABLE = 4, /* disable writes */
2530 SF_RD_STATUS = 5, /* read status register */
2531 SF_WR_ENABLE = 6, /* enable writes */
2532 SF_RD_DATA_FAST = 0xb, /* read flash */
2533 SF_RD_ID = 0x9f, /* read ID */
2534 SF_ERASE_SECTOR = 0xd8, /* erase sector */
2535 };
2536
2537 /**
2538 * sf1_read - read data from the serial flash
2539 * @adapter: the adapter
2540 * @byte_cnt: number of bytes to read
2541 * @cont: whether another operation will be chained
2542 * @lock: whether to lock SF for PL access only
2543 * @valp: where to store the read data
2544 *
2545 * Reads up to 4 bytes of data from the serial flash. The location of
2546 * the read needs to be specified prior to calling this by issuing the
2547 * appropriate commands to the serial flash.
2548 */
2549 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
2550 int lock, u32 *valp)
2551 {
2552 int ret;
2553
2554 if (!byte_cnt || byte_cnt > 4)
2555 return -EINVAL;
2556 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
2557 return -EBUSY;
2558 t4_write_reg(adapter, A_SF_OP,
2559 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
2560 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
2561 if (!ret)
2562 *valp = t4_read_reg(adapter, A_SF_DATA);
2563 return ret;
2564 }
2565
2566 /**
2567 * sf1_write - write data to the serial flash
2568 * @adapter: the adapter
2569 * @byte_cnt: number of bytes to write
2570 * @cont: whether another operation will be chained
2571 * @lock: whether to lock SF for PL access only
2572 * @val: value to write
2573 *
2574 * Writes up to 4 bytes of data to the serial flash. The location of
2575 * the write needs to be specified prior to calling this by issuing the
2576 * appropriate commands to the serial flash.
2577 */
2578 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
2579 int lock, u32 val)
2580 {
2581 if (!byte_cnt || byte_cnt > 4)
2582 return -EINVAL;
2583 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
2584 return -EBUSY;
2585 t4_write_reg(adapter, A_SF_DATA, val);
2586 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
2587 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
2588 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
2589 }
2590
2591 /**
2592 * t4_read_flash - read words from serial flash
2593 * @adapter: the adapter
2594 * @addr: the start address for the read
2595 * @nwords: how many 32-bit words to read
2596 * @data: where to store the read data
2597 * @byte_oriented: whether to store data as bytes or as words
2598 *
2599 * Read the specified number of 32-bit words from the serial flash.
2600 * If @byte_oriented is set the read data is stored as a byte array
2601 * (i.e., big-endian), otherwise as 32-bit words in the platform's
2602 * natural endianness.
2603 */
2604 int t4_read_flash(struct adapter *adapter, unsigned int addr,
2605 unsigned int nwords, u32 *data, int byte_oriented)
2606 {
2607 int ret;
2608
2609 if (((addr + nwords * sizeof(u32)) > adapter->params.sf_size) ||
2610 (addr & 3))
2611 return -EINVAL;
2612
2613 addr = rte_constant_bswap32(addr) | SF_RD_DATA_FAST;
2614
2615 ret = sf1_write(adapter, 4, 1, 0, addr);
2616 if (ret != 0)
2617 return ret;
2618
2619 ret = sf1_read(adapter, 1, 1, 0, data);
2620 if (ret != 0)
2621 return ret;
2622
2623 for ( ; nwords; nwords--, data++) {
2624 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
2625 if (nwords == 1)
2626 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
2627 if (ret)
2628 return ret;
2629 if (byte_oriented)
2630 *data = cpu_to_be32(*data);
2631 }
2632 return 0;
2633 }
2634
2635 /**
2636 * t4_get_exprom_version - return the Expansion ROM version (if any)
2637 * @adapter: the adapter
2638 * @vers: where to place the version
2639 *
2640 * Reads the Expansion ROM header from FLASH and returns the version
2641 * number (if present) through the @vers return value pointer. We return
2642 * this in the Firmware Version Format since it's convenient. Return
2643 * 0 on success, -ENOENT if no Expansion ROM is present.
2644 */
2645 static int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
2646 {
2647 struct exprom_header {
2648 unsigned char hdr_arr[16]; /* must start with 0x55aa */
2649 unsigned char hdr_ver[4]; /* Expansion ROM version */
2650 } *hdr;
2651 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
2652 sizeof(u32))];
2653 int ret;
2654
2655 ret = t4_read_flash(adapter, FLASH_EXP_ROM_START,
2656 ARRAY_SIZE(exprom_header_buf),
2657 exprom_header_buf, 0);
2658 if (ret)
2659 return ret;
2660
2661 hdr = (struct exprom_header *)exprom_header_buf;
2662 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
2663 return -ENOENT;
2664
2665 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
2666 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
2667 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
2668 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
2669 return 0;
2670 }
2671
2672 /**
2673 * t4_get_fw_version - read the firmware version
2674 * @adapter: the adapter
2675 * @vers: where to place the version
2676 *
2677 * Reads the FW version from flash.
2678 */
2679 static int t4_get_fw_version(struct adapter *adapter, u32 *vers)
2680 {
2681 return t4_read_flash(adapter, FLASH_FW_START +
2682 offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
2683 }
2684
2685 /**
2686 * t4_get_bs_version - read the firmware bootstrap version
2687 * @adapter: the adapter
2688 * @vers: where to place the version
2689 *
2690 * Reads the FW Bootstrap version from flash.
2691 */
2692 static int t4_get_bs_version(struct adapter *adapter, u32 *vers)
2693 {
2694 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
2695 offsetof(struct fw_hdr, fw_ver), 1,
2696 vers, 0);
2697 }
2698
2699 /**
2700 * t4_get_tp_version - read the TP microcode version
2701 * @adapter: the adapter
2702 * @vers: where to place the version
2703 *
2704 * Reads the TP microcode version from flash.
2705 */
2706 static int t4_get_tp_version(struct adapter *adapter, u32 *vers)
2707 {
2708 return t4_read_flash(adapter, FLASH_FW_START +
2709 offsetof(struct fw_hdr, tp_microcode_ver),
2710 1, vers, 0);
2711 }
2712
2713 /**
2714 * t4_get_version_info - extract various chip/firmware version information
2715 * @adapter: the adapter
2716 *
2717 * Reads various chip/firmware version numbers and stores them into the
2718 * adapter Adapter Parameters structure. If any of the efforts fails
2719 * the first failure will be returned, but all of the version numbers
2720 * will be read.
2721 */
2722 int t4_get_version_info(struct adapter *adapter)
2723 {
2724 int ret = 0;
2725
2726 #define FIRST_RET(__getvinfo) \
2727 do { \
2728 int __ret = __getvinfo; \
2729 if (__ret && !ret) \
2730 ret = __ret; \
2731 } while (0)
2732
2733 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
2734 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
2735 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
2736 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
2737
2738 #undef FIRST_RET
2739
2740 return ret;
2741 }
2742
2743 /**
2744 * t4_dump_version_info - dump all of the adapter configuration IDs
2745 * @adapter: the adapter
2746 *
2747 * Dumps all of the various bits of adapter configuration version/revision
2748 * IDs information. This is typically called at some point after
2749 * t4_get_version_info() has been called.
2750 */
2751 void t4_dump_version_info(struct adapter *adapter)
2752 {
2753 /**
2754 * Device information.
2755 */
2756 dev_info(adapter, "Chelsio rev %d\n",
2757 CHELSIO_CHIP_RELEASE(adapter->params.chip));
2758
2759 /**
2760 * Firmware Version.
2761 */
2762 if (!adapter->params.fw_vers)
2763 dev_warn(adapter, "No firmware loaded\n");
2764 else
2765 dev_info(adapter, "Firmware version: %u.%u.%u.%u\n",
2766 G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
2767 G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
2768 G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
2769 G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
2770
2771 /**
2772 * Bootstrap Firmware Version.
2773 */
2774 if (!adapter->params.bs_vers)
2775 dev_warn(adapter, "No bootstrap loaded\n");
2776 else
2777 dev_info(adapter, "Bootstrap version: %u.%u.%u.%u\n",
2778 G_FW_HDR_FW_VER_MAJOR(adapter->params.bs_vers),
2779 G_FW_HDR_FW_VER_MINOR(adapter->params.bs_vers),
2780 G_FW_HDR_FW_VER_MICRO(adapter->params.bs_vers),
2781 G_FW_HDR_FW_VER_BUILD(adapter->params.bs_vers));
2782
2783 /**
2784 * TP Microcode Version.
2785 */
2786 if (!adapter->params.tp_vers)
2787 dev_warn(adapter, "No TP Microcode loaded\n");
2788 else
2789 dev_info(adapter, "TP Microcode version: %u.%u.%u.%u\n",
2790 G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers),
2791 G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers),
2792 G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers),
2793 G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers));
2794
2795 /**
2796 * Expansion ROM version.
2797 */
2798 if (!adapter->params.er_vers)
2799 dev_info(adapter, "No Expansion ROM loaded\n");
2800 else
2801 dev_info(adapter, "Expansion ROM version: %u.%u.%u.%u\n",
2802 G_FW_HDR_FW_VER_MAJOR(adapter->params.er_vers),
2803 G_FW_HDR_FW_VER_MINOR(adapter->params.er_vers),
2804 G_FW_HDR_FW_VER_MICRO(adapter->params.er_vers),
2805 G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers));
2806 }
2807
2808 #define ADVERT_MASK (V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED) | \
2809 FW_PORT_CAP32_ANEG)
2810 /**
2811 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
2812 * @caps16: a 16-bit Port Capabilities value
2813 *
2814 * Returns the equivalent 32-bit Port Capabilities value.
2815 */
2816 fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
2817 {
2818 fw_port_cap32_t caps32 = 0;
2819
2820 #define CAP16_TO_CAP32(__cap) \
2821 do { \
2822 if (caps16 & FW_PORT_CAP_##__cap) \
2823 caps32 |= FW_PORT_CAP32_##__cap; \
2824 } while (0)
2825
2826 CAP16_TO_CAP32(SPEED_100M);
2827 CAP16_TO_CAP32(SPEED_1G);
2828 CAP16_TO_CAP32(SPEED_25G);
2829 CAP16_TO_CAP32(SPEED_10G);
2830 CAP16_TO_CAP32(SPEED_40G);
2831 CAP16_TO_CAP32(SPEED_100G);
2832 CAP16_TO_CAP32(FC_RX);
2833 CAP16_TO_CAP32(FC_TX);
2834 CAP16_TO_CAP32(ANEG);
2835 CAP16_TO_CAP32(MDIX);
2836 CAP16_TO_CAP32(MDIAUTO);
2837 CAP16_TO_CAP32(FEC_RS);
2838 CAP16_TO_CAP32(FEC_BASER_RS);
2839 CAP16_TO_CAP32(802_3_PAUSE);
2840 CAP16_TO_CAP32(802_3_ASM_DIR);
2841
2842 #undef CAP16_TO_CAP32
2843
2844 return caps32;
2845 }
2846
2847 /**
2848 * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
2849 * @caps32: a 32-bit Port Capabilities value
2850 *
2851 * Returns the equivalent 16-bit Port Capabilities value. Note that
2852 * not all 32-bit Port Capabilities can be represented in the 16-bit
2853 * Port Capabilities and some fields/values may not make it.
2854 */
2855 static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
2856 {
2857 fw_port_cap16_t caps16 = 0;
2858
2859 #define CAP32_TO_CAP16(__cap) \
2860 do { \
2861 if (caps32 & FW_PORT_CAP32_##__cap) \
2862 caps16 |= FW_PORT_CAP_##__cap; \
2863 } while (0)
2864
2865 CAP32_TO_CAP16(SPEED_100M);
2866 CAP32_TO_CAP16(SPEED_1G);
2867 CAP32_TO_CAP16(SPEED_10G);
2868 CAP32_TO_CAP16(SPEED_25G);
2869 CAP32_TO_CAP16(SPEED_40G);
2870 CAP32_TO_CAP16(SPEED_100G);
2871 CAP32_TO_CAP16(FC_RX);
2872 CAP32_TO_CAP16(FC_TX);
2873 CAP32_TO_CAP16(802_3_PAUSE);
2874 CAP32_TO_CAP16(802_3_ASM_DIR);
2875 CAP32_TO_CAP16(ANEG);
2876 CAP32_TO_CAP16(MDIX);
2877 CAP32_TO_CAP16(MDIAUTO);
2878 CAP32_TO_CAP16(FEC_RS);
2879 CAP32_TO_CAP16(FEC_BASER_RS);
2880
2881 #undef CAP32_TO_CAP16
2882
2883 return caps16;
2884 }
2885
2886 /* Translate Firmware Pause specification to Common Code */
2887 static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
2888 {
2889 enum cc_pause cc_pause = 0;
2890
2891 if (fw_pause & FW_PORT_CAP32_FC_RX)
2892 cc_pause |= PAUSE_RX;
2893 if (fw_pause & FW_PORT_CAP32_FC_TX)
2894 cc_pause |= PAUSE_TX;
2895
2896 return cc_pause;
2897 }
2898
2899 /* Translate Common Code Pause Frame specification into Firmware */
2900 static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
2901 {
2902 fw_port_cap32_t fw_pause = 0;
2903
2904 if (cc_pause & PAUSE_RX)
2905 fw_pause |= FW_PORT_CAP32_FC_RX;
2906 if (cc_pause & PAUSE_TX)
2907 fw_pause |= FW_PORT_CAP32_FC_TX;
2908
2909 return fw_pause;
2910 }
2911
2912 /* Translate Firmware Forward Error Correction specification to Common Code */
2913 static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
2914 {
2915 enum cc_fec cc_fec = 0;
2916
2917 if (fw_fec & FW_PORT_CAP32_FEC_RS)
2918 cc_fec |= FEC_RS;
2919 if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
2920 cc_fec |= FEC_BASER_RS;
2921
2922 return cc_fec;
2923 }
2924
2925 /* Translate Common Code Forward Error Correction specification to Firmware */
2926 static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
2927 {
2928 fw_port_cap32_t fw_fec = 0;
2929
2930 if (cc_fec & FEC_RS)
2931 fw_fec |= FW_PORT_CAP32_FEC_RS;
2932 if (cc_fec & FEC_BASER_RS)
2933 fw_fec |= FW_PORT_CAP32_FEC_BASER_RS;
2934
2935 return fw_fec;
2936 }
2937
2938 /**
2939 * t4_link_l1cfg - apply link configuration to MAC/PHY
2940 * @adapter: the adapter
2941 * @mbox: the Firmware Mailbox to use
2942 * @port: the Port ID
2943 * @lc: the Port's Link Configuration
2944 *
2945 * Set up a port's MAC and PHY according to a desired link configuration.
2946 * - If the PHY can auto-negotiate first decide what to advertise, then
2947 * enable/disable auto-negotiation as desired, and reset.
2948 * - If the PHY does not auto-negotiate just reset it.
2949 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
2950 * otherwise do it later based on the outcome of auto-negotiation.
2951 */
2952 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
2953 struct link_config *lc)
2954 {
2955 unsigned int fw_mdi = V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO);
2956 unsigned int fw_caps = adap->params.fw_caps_support;
2957 fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap;
2958 struct fw_port_cmd cmd;
2959
2960 lc->link_ok = 0;
2961
2962 fw_fc = cc_to_fwcap_pause(lc->requested_fc);
2963
2964 /* Convert Common Code Forward Error Control settings into the
2965 * Firmware's API. If the current Requested FEC has "Automatic"
2966 * (IEEE 802.3) specified, then we use whatever the Firmware
2967 * sent us as part of it's IEEE 802.3-based interpratation of
2968 * the Transceiver Module EPROM FEC parameters. Otherwise we
2969 * use whatever is in the current Requested FEC settings.
2970 */
2971 if (lc->requested_fec & FEC_AUTO)
2972 cc_fec = lc->auto_fec;
2973 else
2974 cc_fec = lc->requested_fec;
2975 fw_fec = cc_to_fwcap_fec(cc_fec);
2976
2977 /* Figure out what our Requested Port Capabilities are going to be.
2978 */
2979 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
2980 rcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec;
2981 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
2982 lc->fec = cc_fec;
2983 } else if (lc->autoneg == AUTONEG_DISABLE) {
2984 rcap = lc->requested_speed | fw_fc | fw_fec | fw_mdi;
2985 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
2986 lc->fec = cc_fec;
2987 } else {
2988 rcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
2989 }
2990
2991 /* And send that on to the Firmware ...
2992 */
2993 memset(&cmd, 0, sizeof(cmd));
2994 cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
2995 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
2996 V_FW_PORT_CMD_PORTID(port));
2997 cmd.action_to_len16 =
2998 cpu_to_be32(V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 ?
2999 FW_PORT_ACTION_L1_CFG :
3000 FW_PORT_ACTION_L1_CFG32) |
3001 FW_LEN16(cmd));
3002
3003 if (fw_caps == FW_CAPS16)
3004 cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
3005 else
3006 cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
3007
3008 return t4_wr_mbox(adap, mbox, &cmd, sizeof(cmd), NULL);
3009 }
3010
3011 /**
3012 * t4_flash_cfg_addr - return the address of the flash configuration file
3013 * @adapter: the adapter
3014 *
3015 * Return the address within the flash where the Firmware Configuration
3016 * File is stored, or an error if the device FLASH is too small to contain
3017 * a Firmware Configuration File.
3018 */
3019 int t4_flash_cfg_addr(struct adapter *adapter)
3020 {
3021 /*
3022 * If the device FLASH isn't large enough to hold a Firmware
3023 * Configuration File, return an error.
3024 */
3025 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
3026 return -ENOSPC;
3027
3028 return FLASH_CFG_START;
3029 }
3030
3031 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
3032
3033 /**
3034 * t4_intr_enable - enable interrupts
3035 * @adapter: the adapter whose interrupts should be enabled
3036 *
3037 * Enable PF-specific interrupts for the calling function and the top-level
3038 * interrupt concentrator for global interrupts. Interrupts are already
3039 * enabled at each module, here we just enable the roots of the interrupt
3040 * hierarchies.
3041 *
3042 * Note: this function should be called only when the driver manages
3043 * non PF-specific interrupts from the various HW modules. Only one PCI
3044 * function at a time should be doing this.
3045 */
3046 void t4_intr_enable(struct adapter *adapter)
3047 {
3048 u32 val = 0;
3049 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
3050 u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
3051 G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
3052
3053 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
3054 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
3055 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
3056 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
3057 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
3058 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
3059 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
3060 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
3061 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
3062 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
3063 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
3064 }
3065
3066 /**
3067 * t4_intr_disable - disable interrupts
3068 * @adapter: the adapter whose interrupts should be disabled
3069 *
3070 * Disable interrupts. We only disable the top-level interrupt
3071 * concentrators. The caller must be a PCI function managing global
3072 * interrupts.
3073 */
3074 void t4_intr_disable(struct adapter *adapter)
3075 {
3076 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
3077 u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
3078 G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
3079
3080 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
3081 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
3082 }
3083
3084 /**
3085 * t4_get_port_type_description - return Port Type string description
3086 * @port_type: firmware Port Type enumeration
3087 */
3088 const char *t4_get_port_type_description(enum fw_port_type port_type)
3089 {
3090 static const char * const port_type_description[] = {
3091 "Fiber_XFI",
3092 "Fiber_XAUI",
3093 "BT_SGMII",
3094 "BT_XFI",
3095 "BT_XAUI",
3096 "KX4",
3097 "CX4",
3098 "KX",
3099 "KR",
3100 "SFP",
3101 "BP_AP",
3102 "BP4_AP",
3103 "QSFP_10G",
3104 "QSA",
3105 "QSFP",
3106 "BP40_BA",
3107 "KR4_100G",
3108 "CR4_QSFP",
3109 "CR_QSFP",
3110 "CR2_QSFP",
3111 "SFP28",
3112 "KR_SFP28",
3113 };
3114
3115 if (port_type < ARRAY_SIZE(port_type_description))
3116 return port_type_description[port_type];
3117 return "UNKNOWN";
3118 }
3119
3120 /**
3121 * t4_get_mps_bg_map - return the buffer groups associated with a port
3122 * @adap: the adapter
3123 * @pidx: the port index
3124 *
3125 * Returns a bitmap indicating which MPS buffer groups are associated
3126 * with the given port. Bit i is set if buffer group i is used by the
3127 * port.
3128 */
3129 unsigned int t4_get_mps_bg_map(struct adapter *adap, unsigned int pidx)
3130 {
3131 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
3132 unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adap,
3133 A_MPS_CMN_CTL));
3134
3135 if (pidx >= nports) {
3136 dev_warn(adap, "MPS Port Index %d >= Nports %d\n",
3137 pidx, nports);
3138 return 0;
3139 }
3140
3141 switch (chip_version) {
3142 case CHELSIO_T4:
3143 case CHELSIO_T5:
3144 switch (nports) {
3145 case 1: return 0xf;
3146 case 2: return 3 << (2 * pidx);
3147 case 4: return 1 << pidx;
3148 }
3149 break;
3150
3151 case CHELSIO_T6:
3152 switch (nports) {
3153 case 2: return 1 << (2 * pidx);
3154 }
3155 break;
3156 }
3157
3158 dev_err(adap, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
3159 chip_version, nports);
3160 return 0;
3161 }
3162
3163 /**
3164 * t4_get_tp_ch_map - return TP ingress channels associated with a port
3165 * @adapter: the adapter
3166 * @pidx: the port index
3167 *
3168 * Returns a bitmap indicating which TP Ingress Channels are associated with
3169 * a given Port. Bit i is set if TP Ingress Channel i is used by the Port.
3170 */
3171 unsigned int t4_get_tp_ch_map(struct adapter *adapter, unsigned int pidx)
3172 {
3173 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
3174 unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter,
3175 A_MPS_CMN_CTL));
3176
3177 if (pidx >= nports) {
3178 dev_warn(adap, "TP Port Index %d >= Nports %d\n",
3179 pidx, nports);
3180 return 0;
3181 }
3182
3183 switch (chip_version) {
3184 case CHELSIO_T4:
3185 case CHELSIO_T5:
3186 /* Note that this happens to be the same values as the MPS
3187 * Buffer Group Map for these Chips. But we replicate the code
3188 * here because they're really separate concepts.
3189 */
3190 switch (nports) {
3191 case 1: return 0xf;
3192 case 2: return 3 << (2 * pidx);
3193 case 4: return 1 << pidx;
3194 }
3195 break;
3196
3197 case CHELSIO_T6:
3198 switch (nports) {
3199 case 2: return 1 << pidx;
3200 }
3201 break;
3202 }
3203
3204 dev_err(adapter, "Need TP Channel Map for Chip %0x, Nports %d\n",
3205 chip_version, nports);
3206 return 0;
3207 }
3208
3209 /**
3210 * t4_get_port_stats - collect port statistics
3211 * @adap: the adapter
3212 * @idx: the port index
3213 * @p: the stats structure to fill
3214 *
3215 * Collect statistics related to the given port from HW.
3216 */
3217 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3218 {
3219 u32 bgmap = t4_get_mps_bg_map(adap, idx);
3220 u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
3221
3222 #define GET_STAT(name) \
3223 t4_read_reg64(adap, \
3224 (is_t4(adap->params.chip) ? \
3225 PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) :\
3226 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
3227 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3228
3229 p->tx_octets = GET_STAT(TX_PORT_BYTES);
3230 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
3231 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
3232 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
3233 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
3234 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
3235 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
3236 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
3237 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
3238 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
3239 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
3240 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3241 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
3242 p->tx_drop = GET_STAT(TX_PORT_DROP);
3243 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
3244 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
3245 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
3246 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
3247 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
3248 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
3249 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
3250 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
3251 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
3252
3253 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
3254 if (stat_ctl & F_COUNTPAUSESTATTX) {
3255 p->tx_frames -= p->tx_pause;
3256 p->tx_octets -= p->tx_pause * 64;
3257 }
3258 if (stat_ctl & F_COUNTPAUSEMCTX)
3259 p->tx_mcast_frames -= p->tx_pause;
3260 }
3261
3262 p->rx_octets = GET_STAT(RX_PORT_BYTES);
3263 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
3264 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
3265 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
3266 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
3267 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
3268 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3269 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
3270 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
3271 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
3272 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
3273 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
3274 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
3275 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
3276 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
3277 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
3278 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3279 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
3280 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
3281 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
3282 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
3283 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
3284 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
3285 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
3286 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
3287 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
3288 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
3289
3290 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
3291 if (stat_ctl & F_COUNTPAUSESTATRX) {
3292 p->rx_frames -= p->rx_pause;
3293 p->rx_octets -= p->rx_pause * 64;
3294 }
3295 if (stat_ctl & F_COUNTPAUSEMCRX)
3296 p->rx_mcast_frames -= p->rx_pause;
3297 }
3298
3299 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3300 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3301 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3302 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3303 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3304 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3305 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3306 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3307
3308 #undef GET_STAT
3309 #undef GET_STAT_COM
3310 }
3311
3312 /**
3313 * t4_get_port_stats_offset - collect port stats relative to a previous snapshot
3314 * @adap: The adapter
3315 * @idx: The port
3316 * @stats: Current stats to fill
3317 * @offset: Previous stats snapshot
3318 */
3319 void t4_get_port_stats_offset(struct adapter *adap, int idx,
3320 struct port_stats *stats,
3321 struct port_stats *offset)
3322 {
3323 u64 *s, *o;
3324 unsigned int i;
3325
3326 t4_get_port_stats(adap, idx, stats);
3327 for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
3328 i < (sizeof(struct port_stats) / sizeof(u64));
3329 i++, s++, o++)
3330 *s -= *o;
3331 }
3332
3333 /**
3334 * t4_clr_port_stats - clear port statistics
3335 * @adap: the adapter
3336 * @idx: the port index
3337 *
3338 * Clear HW statistics for the given port.
3339 */
3340 void t4_clr_port_stats(struct adapter *adap, int idx)
3341 {
3342 unsigned int i;
3343 u32 bgmap = t4_get_mps_bg_map(adap, idx);
3344 u32 port_base_addr;
3345
3346 if (is_t4(adap->params.chip))
3347 port_base_addr = PORT_BASE(idx);
3348 else
3349 port_base_addr = T5_PORT_BASE(idx);
3350
3351 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3352 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3353 t4_write_reg(adap, port_base_addr + i, 0);
3354 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3355 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3356 t4_write_reg(adap, port_base_addr + i, 0);
3357 for (i = 0; i < 4; i++)
3358 if (bgmap & (1 << i)) {
3359 t4_write_reg(adap,
3360 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
3361 i * 8, 0);
3362 t4_write_reg(adap,
3363 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
3364 i * 8, 0);
3365 }
3366 }
3367
3368 /**
3369 * t4_fw_hello - establish communication with FW
3370 * @adap: the adapter
3371 * @mbox: mailbox to use for the FW command
3372 * @evt_mbox: mailbox to receive async FW events
3373 * @master: specifies the caller's willingness to be the device master
3374 * @state: returns the current device state (if non-NULL)
3375 *
3376 * Issues a command to establish communication with FW. Returns either
3377 * an error (negative integer) or the mailbox of the Master PF.
3378 */
3379 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
3380 enum dev_master master, enum dev_state *state)
3381 {
3382 int ret;
3383 struct fw_hello_cmd c;
3384 u32 v;
3385 unsigned int master_mbox;
3386 int retries = FW_CMD_HELLO_RETRIES;
3387
3388 retry:
3389 memset(&c, 0, sizeof(c));
3390 INIT_CMD(c, HELLO, WRITE);
3391 c.err_to_clearinit = cpu_to_be32(
3392 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
3393 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
3394 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
3395 M_FW_HELLO_CMD_MBMASTER) |
3396 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
3397 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
3398 F_FW_HELLO_CMD_CLEARINIT);
3399
3400 /*
3401 * Issue the HELLO command to the firmware. If it's not successful
3402 * but indicates that we got a "busy" or "timeout" condition, retry
3403 * the HELLO until we exhaust our retry limit. If we do exceed our
3404 * retry limit, check to see if the firmware left us any error
3405 * information and report that if so ...
3406 */
3407 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3408 if (ret != FW_SUCCESS) {
3409 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
3410 goto retry;
3411 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
3412 t4_report_fw_error(adap);
3413 return ret;
3414 }
3415
3416 v = be32_to_cpu(c.err_to_clearinit);
3417 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
3418 if (state) {
3419 if (v & F_FW_HELLO_CMD_ERR)
3420 *state = DEV_STATE_ERR;
3421 else if (v & F_FW_HELLO_CMD_INIT)
3422 *state = DEV_STATE_INIT;
3423 else
3424 *state = DEV_STATE_UNINIT;
3425 }
3426
3427 /*
3428 * If we're not the Master PF then we need to wait around for the
3429 * Master PF Driver to finish setting up the adapter.
3430 *
3431 * Note that we also do this wait if we're a non-Master-capable PF and
3432 * there is no current Master PF; a Master PF may show up momentarily
3433 * and we wouldn't want to fail pointlessly. (This can happen when an
3434 * OS loads lots of different drivers rapidly at the same time). In
3435 * this case, the Master PF returned by the firmware will be
3436 * M_PCIE_FW_MASTER so the test below will work ...
3437 */
3438 if ((v & (F_FW_HELLO_CMD_ERR | F_FW_HELLO_CMD_INIT)) == 0 &&
3439 master_mbox != mbox) {
3440 int waiting = FW_CMD_HELLO_TIMEOUT;
3441
3442 /*
3443 * Wait for the firmware to either indicate an error or
3444 * initialized state. If we see either of these we bail out
3445 * and report the issue to the caller. If we exhaust the
3446 * "hello timeout" and we haven't exhausted our retries, try
3447 * again. Otherwise bail with a timeout error.
3448 */
3449 for (;;) {
3450 u32 pcie_fw;
3451
3452 msleep(50);
3453 waiting -= 50;
3454
3455 /*
3456 * If neither Error nor Initialialized are indicated
3457 * by the firmware keep waiting till we exaust our
3458 * timeout ... and then retry if we haven't exhausted
3459 * our retries ...
3460 */
3461 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
3462 if (!(pcie_fw & (F_PCIE_FW_ERR | F_PCIE_FW_INIT))) {
3463 if (waiting <= 0) {
3464 if (retries-- > 0)
3465 goto retry;
3466
3467 return -ETIMEDOUT;
3468 }
3469 continue;
3470 }
3471
3472 /*
3473 * We either have an Error or Initialized condition
3474 * report errors preferentially.
3475 */
3476 if (state) {
3477 if (pcie_fw & F_PCIE_FW_ERR)
3478 *state = DEV_STATE_ERR;
3479 else if (pcie_fw & F_PCIE_FW_INIT)
3480 *state = DEV_STATE_INIT;
3481 }
3482
3483 /*
3484 * If we arrived before a Master PF was selected and
3485 * there's not a valid Master PF, grab its identity
3486 * for our caller.
3487 */
3488 if (master_mbox == M_PCIE_FW_MASTER &&
3489 (pcie_fw & F_PCIE_FW_MASTER_VLD))
3490 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
3491 break;
3492 }
3493 }
3494
3495 return master_mbox;
3496 }
3497
3498 /**
3499 * t4_fw_bye - end communication with FW
3500 * @adap: the adapter
3501 * @mbox: mailbox to use for the FW command
3502 *
3503 * Issues a command to terminate communication with FW.
3504 */
3505 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
3506 {
3507 struct fw_bye_cmd c;
3508
3509 memset(&c, 0, sizeof(c));
3510 INIT_CMD(c, BYE, WRITE);
3511 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3512 }
3513
3514 /**
3515 * t4_fw_reset - issue a reset to FW
3516 * @adap: the adapter
3517 * @mbox: mailbox to use for the FW command
3518 * @reset: specifies the type of reset to perform
3519 *
3520 * Issues a reset command of the specified type to FW.
3521 */
3522 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
3523 {
3524 struct fw_reset_cmd c;
3525
3526 memset(&c, 0, sizeof(c));
3527 INIT_CMD(c, RESET, WRITE);
3528 c.val = cpu_to_be32(reset);
3529 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3530 }
3531
3532 /**
3533 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
3534 * @adap: the adapter
3535 * @mbox: mailbox to use for the FW RESET command (if desired)
3536 * @force: force uP into RESET even if FW RESET command fails
3537 *
3538 * Issues a RESET command to firmware (if desired) with a HALT indication
3539 * and then puts the microprocessor into RESET state. The RESET command
3540 * will only be issued if a legitimate mailbox is provided (mbox <=
3541 * M_PCIE_FW_MASTER).
3542 *
3543 * This is generally used in order for the host to safely manipulate the
3544 * adapter without fear of conflicting with whatever the firmware might
3545 * be doing. The only way out of this state is to RESTART the firmware
3546 * ...
3547 */
3548 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
3549 {
3550 int ret = 0;
3551
3552 /*
3553 * If a legitimate mailbox is provided, issue a RESET command
3554 * with a HALT indication.
3555 */
3556 if (mbox <= M_PCIE_FW_MASTER) {
3557 struct fw_reset_cmd c;
3558
3559 memset(&c, 0, sizeof(c));
3560 INIT_CMD(c, RESET, WRITE);
3561 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
3562 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
3563 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3564 }
3565
3566 /*
3567 * Normally we won't complete the operation if the firmware RESET
3568 * command fails but if our caller insists we'll go ahead and put the
3569 * uP into RESET. This can be useful if the firmware is hung or even
3570 * missing ... We'll have to take the risk of putting the uP into
3571 * RESET without the cooperation of firmware in that case.
3572 *
3573 * We also force the firmware's HALT flag to be on in case we bypassed
3574 * the firmware RESET command above or we're dealing with old firmware
3575 * which doesn't have the HALT capability. This will serve as a flag
3576 * for the incoming firmware to know that it's coming out of a HALT
3577 * rather than a RESET ... if it's new enough to understand that ...
3578 */
3579 if (ret == 0 || force) {
3580 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
3581 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
3582 F_PCIE_FW_HALT);
3583 }
3584
3585 /*
3586 * And we always return the result of the firmware RESET command
3587 * even when we force the uP into RESET ...
3588 */
3589 return ret;
3590 }
3591
3592 /**
3593 * t4_fw_restart - restart the firmware by taking the uP out of RESET
3594 * @adap: the adapter
3595 * @mbox: mailbox to use for the FW RESET command (if desired)
3596 * @reset: if we want to do a RESET to restart things
3597 *
3598 * Restart firmware previously halted by t4_fw_halt(). On successful
3599 * return the previous PF Master remains as the new PF Master and there
3600 * is no need to issue a new HELLO command, etc.
3601 *
3602 * We do this in two ways:
3603 *
3604 * 1. If we're dealing with newer firmware we'll simply want to take
3605 * the chip's microprocessor out of RESET. This will cause the
3606 * firmware to start up from its start vector. And then we'll loop
3607 * until the firmware indicates it's started again (PCIE_FW.HALT
3608 * reset to 0) or we timeout.
3609 *
3610 * 2. If we're dealing with older firmware then we'll need to RESET
3611 * the chip since older firmware won't recognize the PCIE_FW.HALT
3612 * flag and automatically RESET itself on startup.
3613 */
3614 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
3615 {
3616 if (reset) {
3617 /*
3618 * Since we're directing the RESET instead of the firmware
3619 * doing it automatically, we need to clear the PCIE_FW.HALT
3620 * bit.
3621 */
3622 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
3623
3624 /*
3625 * If we've been given a valid mailbox, first try to get the
3626 * firmware to do the RESET. If that works, great and we can
3627 * return success. Otherwise, if we haven't been given a
3628 * valid mailbox or the RESET command failed, fall back to
3629 * hitting the chip with a hammer.
3630 */
3631 if (mbox <= M_PCIE_FW_MASTER) {
3632 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
3633 msleep(100);
3634 if (t4_fw_reset(adap, mbox,
3635 F_PIORST | F_PIORSTMODE) == 0)
3636 return 0;
3637 }
3638
3639 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
3640 msleep(2000);
3641 } else {
3642 int ms;
3643
3644 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
3645 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
3646 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
3647 return FW_SUCCESS;
3648 msleep(100);
3649 ms += 100;
3650 }
3651 return -ETIMEDOUT;
3652 }
3653 return 0;
3654 }
3655
3656 /**
3657 * t4_fl_pkt_align - return the fl packet alignment
3658 * @adap: the adapter
3659 *
3660 * T4 has a single field to specify the packing and padding boundary.
3661 * T5 onwards has separate fields for this and hence the alignment for
3662 * next packet offset is maximum of these two.
3663 */
3664 int t4_fl_pkt_align(struct adapter *adap)
3665 {
3666 u32 sge_control, sge_control2;
3667 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
3668
3669 sge_control = t4_read_reg(adap, A_SGE_CONTROL);
3670
3671 /* T4 uses a single control field to specify both the PCIe Padding and
3672 * Packing Boundary. T5 introduced the ability to specify these
3673 * separately. The actual Ingress Packet Data alignment boundary
3674 * within Packed Buffer Mode is the maximum of these two
3675 * specifications.
3676 */
3677 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
3678 ingpad_shift = X_INGPADBOUNDARY_SHIFT;
3679 else
3680 ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT;
3681
3682 ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift);
3683
3684 fl_align = ingpadboundary;
3685 if (!is_t4(adap->params.chip)) {
3686 sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2);
3687 ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
3688 if (ingpackboundary == X_INGPACKBOUNDARY_16B)
3689 ingpackboundary = 16;
3690 else
3691 ingpackboundary = 1 << (ingpackboundary +
3692 X_INGPACKBOUNDARY_SHIFT);
3693
3694 fl_align = max(ingpadboundary, ingpackboundary);
3695 }
3696 return fl_align;
3697 }
3698
3699 /**
3700 * t4_fixup_host_params_compat - fix up host-dependent parameters
3701 * @adap: the adapter
3702 * @page_size: the host's Base Page Size
3703 * @cache_line_size: the host's Cache Line Size
3704 * @chip_compat: maintain compatibility with designated chip
3705 *
3706 * Various registers in the chip contain values which are dependent on the
3707 * host's Base Page and Cache Line Sizes. This function will fix all of
3708 * those registers with the appropriate values as passed in ...
3709 *
3710 * @chip_compat is used to limit the set of changes that are made
3711 * to be compatible with the indicated chip release. This is used by
3712 * drivers to maintain compatibility with chip register settings when
3713 * the drivers haven't [yet] been updated with new chip support.
3714 */
3715 int t4_fixup_host_params_compat(struct adapter *adap,
3716 unsigned int page_size,
3717 unsigned int cache_line_size,
3718 enum chip_type chip_compat)
3719 {
3720 unsigned int page_shift = cxgbe_fls(page_size) - 1;
3721 unsigned int sge_hps = page_shift - 10;
3722 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
3723 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
3724 unsigned int fl_align_log = cxgbe_fls(fl_align) - 1;
3725
3726 t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE,
3727 V_HOSTPAGESIZEPF0(sge_hps) |
3728 V_HOSTPAGESIZEPF1(sge_hps) |
3729 V_HOSTPAGESIZEPF2(sge_hps) |
3730 V_HOSTPAGESIZEPF3(sge_hps) |
3731 V_HOSTPAGESIZEPF4(sge_hps) |
3732 V_HOSTPAGESIZEPF5(sge_hps) |
3733 V_HOSTPAGESIZEPF6(sge_hps) |
3734 V_HOSTPAGESIZEPF7(sge_hps));
3735
3736 if (is_t4(adap->params.chip) || is_t4(chip_compat))
3737 t4_set_reg_field(adap, A_SGE_CONTROL,
3738 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
3739 F_EGRSTATUSPAGESIZE,
3740 V_INGPADBOUNDARY(fl_align_log -
3741 X_INGPADBOUNDARY_SHIFT) |
3742 V_EGRSTATUSPAGESIZE(stat_len != 64));
3743 else {
3744 unsigned int pack_align;
3745 unsigned int ingpad, ingpack;
3746 unsigned int pcie_cap;
3747
3748 /*
3749 * T5 introduced the separation of the Free List Padding and
3750 * Packing Boundaries. Thus, we can select a smaller Padding
3751 * Boundary to avoid uselessly chewing up PCIe Link and Memory
3752 * Bandwidth, and use a Packing Boundary which is large enough
3753 * to avoid false sharing between CPUs, etc.
3754 *
3755 * For the PCI Link, the smaller the Padding Boundary the
3756 * better. For the Memory Controller, a smaller Padding
3757 * Boundary is better until we cross under the Memory Line
3758 * Size (the minimum unit of transfer to/from Memory). If we
3759 * have a Padding Boundary which is smaller than the Memory
3760 * Line Size, that'll involve a Read-Modify-Write cycle on the
3761 * Memory Controller which is never good.
3762 */
3763
3764 /* We want the Packing Boundary to be based on the Cache Line
3765 * Size in order to help avoid False Sharing performance
3766 * issues between CPUs, etc. We also want the Packing
3767 * Boundary to incorporate the PCI-E Maximum Payload Size. We
3768 * get best performance when the Packing Boundary is a
3769 * multiple of the Maximum Payload Size.
3770 */
3771 pack_align = fl_align;
3772 pcie_cap = t4_os_find_pci_capability(adap, PCI_CAP_ID_EXP);
3773 if (pcie_cap) {
3774 unsigned int mps, mps_log;
3775 u16 devctl;
3776
3777 /* The PCIe Device Control Maximum Payload Size field
3778 * [bits 7:5] encodes sizes as powers of 2 starting at
3779 * 128 bytes.
3780 */
3781 t4_os_pci_read_cfg2(adap, pcie_cap + PCI_EXP_DEVCTL,
3782 &devctl);
3783 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
3784 mps = 1 << mps_log;
3785 if (mps > pack_align)
3786 pack_align = mps;
3787 }
3788
3789 /*
3790 * N.B. T5 has a different interpretation of the "0" value for
3791 * the Packing Boundary. This corresponds to 16 bytes instead
3792 * of the expected 32 bytes. We never have a Packing Boundary
3793 * less than 32 bytes so we can't use that special value but
3794 * on the other hand, if we wanted 32 bytes, the best we can
3795 * really do is 64 bytes ...
3796 */
3797 if (pack_align <= 16) {
3798 ingpack = X_INGPACKBOUNDARY_16B;
3799 fl_align = 16;
3800 } else if (pack_align == 32) {
3801 ingpack = X_INGPACKBOUNDARY_64B;
3802 fl_align = 64;
3803 } else {
3804 unsigned int pack_align_log = cxgbe_fls(pack_align) - 1;
3805
3806 ingpack = pack_align_log - X_INGPACKBOUNDARY_SHIFT;
3807 fl_align = pack_align;
3808 }
3809
3810 /* Use the smallest Ingress Padding which isn't smaller than
3811 * the Memory Controller Read/Write Size. We'll take that as
3812 * being 8 bytes since we don't know of any system with a
3813 * wider Memory Controller Bus Width.
3814 */
3815 if (is_t5(adap->params.chip))
3816 ingpad = X_INGPADBOUNDARY_32B;
3817 else
3818 ingpad = X_T6_INGPADBOUNDARY_8B;
3819 t4_set_reg_field(adap, A_SGE_CONTROL,
3820 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
3821 F_EGRSTATUSPAGESIZE,
3822 V_INGPADBOUNDARY(ingpad) |
3823 V_EGRSTATUSPAGESIZE(stat_len != 64));
3824 t4_set_reg_field(adap, A_SGE_CONTROL2,
3825 V_INGPACKBOUNDARY(M_INGPACKBOUNDARY),
3826 V_INGPACKBOUNDARY(ingpack));
3827 }
3828
3829 /*
3830 * Adjust various SGE Free List Host Buffer Sizes.
3831 *
3832 * The first four entries are:
3833 *
3834 * 0: Host Page Size
3835 * 1: 64KB
3836 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
3837 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
3838 *
3839 * For the single-MTU buffers in unpacked mode we need to include
3840 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
3841 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
3842 * Padding boundary. All of these are accommodated in the Factory
3843 * Default Firmware Configuration File but we need to adjust it for
3844 * this host's cache line size.
3845 */
3846 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size);
3847 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2,
3848 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align - 1)
3849 & ~(fl_align - 1));
3850 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3,
3851 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align - 1)
3852 & ~(fl_align - 1));
3853
3854 t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12));
3855
3856 return 0;
3857 }
3858
3859 /**
3860 * t4_fixup_host_params - fix up host-dependent parameters (T4 compatible)
3861 * @adap: the adapter
3862 * @page_size: the host's Base Page Size
3863 * @cache_line_size: the host's Cache Line Size
3864 *
3865 * Various registers in T4 contain values which are dependent on the
3866 * host's Base Page and Cache Line Sizes. This function will fix all of
3867 * those registers with the appropriate values as passed in ...
3868 *
3869 * This routine makes changes which are compatible with T4 chips.
3870 */
3871 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3872 unsigned int cache_line_size)
3873 {
3874 return t4_fixup_host_params_compat(adap, page_size, cache_line_size,
3875 T4_LAST_REV);
3876 }
3877
3878 /**
3879 * t4_fw_initialize - ask FW to initialize the device
3880 * @adap: the adapter
3881 * @mbox: mailbox to use for the FW command
3882 *
3883 * Issues a command to FW to partially initialize the device. This
3884 * performs initialization that generally doesn't depend on user input.
3885 */
3886 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
3887 {
3888 struct fw_initialize_cmd c;
3889
3890 memset(&c, 0, sizeof(c));
3891 INIT_CMD(c, INITIALIZE, WRITE);
3892 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3893 }
3894
3895 /**
3896 * t4_query_params_rw - query FW or device parameters
3897 * @adap: the adapter
3898 * @mbox: mailbox to use for the FW command
3899 * @pf: the PF
3900 * @vf: the VF
3901 * @nparams: the number of parameters
3902 * @params: the parameter names
3903 * @val: the parameter values
3904 * @rw: Write and read flag
3905 *
3906 * Reads the value of FW or device parameters. Up to 7 parameters can be
3907 * queried at once.
3908 */
3909 static int t4_query_params_rw(struct adapter *adap, unsigned int mbox,
3910 unsigned int pf, unsigned int vf,
3911 unsigned int nparams, const u32 *params,
3912 u32 *val, int rw)
3913 {
3914 unsigned int i;
3915 int ret;
3916 struct fw_params_cmd c;
3917 __be32 *p = &c.param[0].mnem;
3918
3919 if (nparams > 7)
3920 return -EINVAL;
3921
3922 memset(&c, 0, sizeof(c));
3923 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3924 F_FW_CMD_REQUEST | F_FW_CMD_READ |
3925 V_FW_PARAMS_CMD_PFN(pf) |
3926 V_FW_PARAMS_CMD_VFN(vf));
3927 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3928
3929 for (i = 0; i < nparams; i++) {
3930 *p++ = cpu_to_be32(*params++);
3931 if (rw)
3932 *p = cpu_to_be32(*(val + i));
3933 p++;
3934 }
3935
3936 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3937 if (ret == 0)
3938 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3939 *val++ = be32_to_cpu(*p);
3940 return ret;
3941 }
3942
3943 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3944 unsigned int vf, unsigned int nparams, const u32 *params,
3945 u32 *val)
3946 {
3947 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
3948 }
3949
3950 /**
3951 * t4_set_params_timeout - sets FW or device parameters
3952 * @adap: the adapter
3953 * @mbox: mailbox to use for the FW command
3954 * @pf: the PF
3955 * @vf: the VF
3956 * @nparams: the number of parameters
3957 * @params: the parameter names
3958 * @val: the parameter values
3959 * @timeout: the timeout time
3960 *
3961 * Sets the value of FW or device parameters. Up to 7 parameters can be
3962 * specified at once.
3963 */
3964 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
3965 unsigned int pf, unsigned int vf,
3966 unsigned int nparams, const u32 *params,
3967 const u32 *val, int timeout)
3968 {
3969 struct fw_params_cmd c;
3970 __be32 *p = &c.param[0].mnem;
3971
3972 if (nparams > 7)
3973 return -EINVAL;
3974
3975 memset(&c, 0, sizeof(c));
3976 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3977 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3978 V_FW_PARAMS_CMD_PFN(pf) |
3979 V_FW_PARAMS_CMD_VFN(vf));
3980 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3981
3982 while (nparams--) {
3983 *p++ = cpu_to_be32(*params++);
3984 *p++ = cpu_to_be32(*val++);
3985 }
3986
3987 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
3988 }
3989
3990 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3991 unsigned int vf, unsigned int nparams, const u32 *params,
3992 const u32 *val)
3993 {
3994 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
3995 FW_CMD_MAX_TIMEOUT);
3996 }
3997
3998 /**
3999 * t4_alloc_vi_func - allocate a virtual interface
4000 * @adap: the adapter
4001 * @mbox: mailbox to use for the FW command
4002 * @port: physical port associated with the VI
4003 * @pf: the PF owning the VI
4004 * @vf: the VF owning the VI
4005 * @nmac: number of MAC addresses needed (1 to 5)
4006 * @mac: the MAC addresses of the VI
4007 * @rss_size: size of RSS table slice associated with this VI
4008 * @portfunc: which Port Application Function MAC Address is desired
4009 * @idstype: Intrusion Detection Type
4010 *
4011 * Allocates a virtual interface for the given physical port. If @mac is
4012 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
4013 * @mac should be large enough to hold @nmac Ethernet addresses, they are
4014 * stored consecutively so the space needed is @nmac * 6 bytes.
4015 * Returns a negative error number or the non-negative VI id.
4016 */
4017 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
4018 unsigned int port, unsigned int pf, unsigned int vf,
4019 unsigned int nmac, u8 *mac, unsigned int *rss_size,
4020 unsigned int portfunc, unsigned int idstype,
4021 u8 *vivld, u8 *vin)
4022 {
4023 int ret;
4024 struct fw_vi_cmd c;
4025
4026 memset(&c, 0, sizeof(c));
4027 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
4028 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
4029 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
4030 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
4031 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
4032 V_FW_VI_CMD_FUNC(portfunc));
4033 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
4034 c.nmac = nmac - 1;
4035
4036 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4037 if (ret)
4038 return ret;
4039
4040 if (mac) {
4041 memcpy(mac, c.mac, sizeof(c.mac));
4042 switch (nmac) {
4043 case 5:
4044 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
4045 /* FALLTHROUGH */
4046 case 4:
4047 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
4048 /* FALLTHROUGH */
4049 case 3:
4050 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
4051 /* FALLTHROUGH */
4052 case 2:
4053 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
4054 /* FALLTHROUGH */
4055 }
4056 }
4057 if (rss_size)
4058 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
4059 if (vivld)
4060 *vivld = G_FW_VI_CMD_VFVLD(be32_to_cpu(c.alloc_to_len16));
4061 if (vin)
4062 *vin = G_FW_VI_CMD_VIN(be32_to_cpu(c.alloc_to_len16));
4063 return G_FW_VI_CMD_VIID(cpu_to_be16(c.type_to_viid));
4064 }
4065
4066 /**
4067 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
4068 * @adap: the adapter
4069 * @mbox: mailbox to use for the FW command
4070 * @port: physical port associated with the VI
4071 * @pf: the PF owning the VI
4072 * @vf: the VF owning the VI
4073 * @nmac: number of MAC addresses needed (1 to 5)
4074 * @mac: the MAC addresses of the VI
4075 * @rss_size: size of RSS table slice associated with this VI
4076 *
4077 * Backwards compatible and convieniance routine to allocate a Virtual
4078 * Interface with a Ethernet Port Application Function and Intrustion
4079 * Detection System disabled.
4080 */
4081 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
4082 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
4083 unsigned int *rss_size, u8 *vivld, u8 *vin)
4084 {
4085 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
4086 FW_VI_FUNC_ETH, 0, vivld, vin);
4087 }
4088
4089 /**
4090 * t4_free_vi - free a virtual interface
4091 * @adap: the adapter
4092 * @mbox: mailbox to use for the FW command
4093 * @pf: the PF owning the VI
4094 * @vf: the VF owning the VI
4095 * @viid: virtual interface identifiler
4096 *
4097 * Free a previously allocated virtual interface.
4098 */
4099 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
4100 unsigned int vf, unsigned int viid)
4101 {
4102 struct fw_vi_cmd c;
4103
4104 memset(&c, 0, sizeof(c));
4105 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
4106 F_FW_CMD_EXEC);
4107 if (is_pf4(adap))
4108 c.op_to_vfn |= cpu_to_be32(V_FW_VI_CMD_PFN(pf) |
4109 V_FW_VI_CMD_VFN(vf));
4110 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
4111 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
4112
4113 if (is_pf4(adap))
4114 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4115 else
4116 return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4117 }
4118
4119 /**
4120 * t4_set_rxmode - set Rx properties of a virtual interface
4121 * @adap: the adapter
4122 * @mbox: mailbox to use for the FW command
4123 * @viid: the VI id
4124 * @mtu: the new MTU or -1
4125 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
4126 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
4127 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
4128 * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
4129 * -1 no change
4130 * @sleep_ok: if true we may sleep while awaiting command completion
4131 *
4132 * Sets Rx properties of a virtual interface.
4133 */
4134 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
4135 int mtu, int promisc, int all_multi, int bcast, int vlanex,
4136 bool sleep_ok)
4137 {
4138 struct fw_vi_rxmode_cmd c;
4139
4140 /* convert to FW values */
4141 if (mtu < 0)
4142 mtu = M_FW_VI_RXMODE_CMD_MTU;
4143 if (promisc < 0)
4144 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
4145 if (all_multi < 0)
4146 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
4147 if (bcast < 0)
4148 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
4149 if (vlanex < 0)
4150 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
4151
4152 memset(&c, 0, sizeof(c));
4153 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
4154 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4155 V_FW_VI_RXMODE_CMD_VIID(viid));
4156 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4157 c.mtu_to_vlanexen = cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
4158 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
4159 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
4160 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
4161 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
4162 if (is_pf4(adap))
4163 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL,
4164 sleep_ok);
4165 else
4166 return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4167 }
4168
4169 /**
4170 * t4_alloc_raw_mac_filt - Adds a raw mac entry in mps tcam
4171 * @adap: the adapter
4172 * @viid: the VI id
4173 * @mac: the MAC address
4174 * @mask: the mask
4175 * @idx: index at which to add this entry
4176 * @port_id: the port index
4177 * @lookup_type: MAC address for inner (1) or outer (0) header
4178 * @sleep_ok: call is allowed to sleep
4179 *
4180 * Adds the mac entry at the specified index using raw mac interface.
4181 *
4182 * Returns a negative error number or the allocated index for this mac.
4183 */
4184 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
4185 const u8 *addr, const u8 *mask, unsigned int idx,
4186 u8 lookup_type, u8 port_id, bool sleep_ok)
4187 {
4188 int ret = 0;
4189 struct fw_vi_mac_cmd c;
4190 struct fw_vi_mac_raw *p = &c.u.raw;
4191 u32 val;
4192
4193 memset(&c, 0, sizeof(c));
4194 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4195 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4196 V_FW_VI_MAC_CMD_VIID(viid));
4197 val = V_FW_CMD_LEN16(1) |
4198 V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
4199 c.freemacs_to_len16 = cpu_to_be32(val);
4200
4201 /* Specify that this is an inner mac address */
4202 p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx));
4203
4204 /* Lookup Type. Outer header: 0, Inner header: 1 */
4205 p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
4206 V_DATAPORTNUM(port_id));
4207 /* Lookup mask and port mask */
4208 p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
4209 V_DATAPORTNUM(M_DATAPORTNUM));
4210
4211 /* Copy the address and the mask */
4212 memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN);
4213 memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN);
4214
4215 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
4216 if (ret == 0) {
4217 ret = G_FW_VI_MAC_CMD_RAW_IDX(be32_to_cpu(p->raw_idx_pkd));
4218 if (ret != (int)idx)
4219 ret = -ENOMEM;
4220 }
4221
4222 return ret;
4223 }
4224
4225 /**
4226 * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
4227 * @adap: the adapter
4228 * @viid: the VI id
4229 * @addr: the MAC address
4230 * @mask: the mask
4231 * @idx: index of the entry in mps tcam
4232 * @lookup_type: MAC address for inner (1) or outer (0) header
4233 * @port_id: the port index
4234 * @sleep_ok: call is allowed to sleep
4235 *
4236 * Removes the mac entry at the specified index using raw mac interface.
4237 *
4238 * Returns a negative error number on failure.
4239 */
4240 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
4241 const u8 *addr, const u8 *mask, unsigned int idx,
4242 u8 lookup_type, u8 port_id, bool sleep_ok)
4243 {
4244 struct fw_vi_mac_cmd c;
4245 struct fw_vi_mac_raw *p = &c.u.raw;
4246 u32 raw;
4247
4248 memset(&c, 0, sizeof(c));
4249 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4250 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4251 V_FW_CMD_EXEC(0) |
4252 V_FW_VI_MAC_CMD_VIID(viid));
4253 raw = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
4254 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0U) |
4255 raw |
4256 V_FW_CMD_LEN16(1));
4257
4258 p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx) |
4259 FW_VI_MAC_ID_BASED_FREE);
4260
4261 /* Lookup Type. Outer header: 0, Inner header: 1 */
4262 p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
4263 V_DATAPORTNUM(port_id));
4264 /* Lookup mask and port mask */
4265 p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
4266 V_DATAPORTNUM(M_DATAPORTNUM));
4267
4268 /* Copy the address and the mask */
4269 memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN);
4270 memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN);
4271
4272 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
4273 }
4274
4275 /**
4276 * t4_change_mac - modifies the exact-match filter for a MAC address
4277 * @adap: the adapter
4278 * @mbox: mailbox to use for the FW command
4279 * @viid: the VI id
4280 * @idx: index of existing filter for old value of MAC address, or -1
4281 * @addr: the new MAC address value
4282 * @persist: whether a new MAC allocation should be persistent
4283 * @add_smt: if true also add the address to the HW SMT
4284 *
4285 * Modifies an exact-match filter and sets it to the new MAC address if
4286 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
4287 * latter case the address is added persistently if @persist is %true.
4288 *
4289 * Note that in general it is not possible to modify the value of a given
4290 * filter so the generic way to modify an address filter is to free the one
4291 * being used by the old address value and allocate a new filter for the
4292 * new address value.
4293 *
4294 * Returns a negative error number or the index of the filter with the new
4295 * MAC value. Note that this index may differ from @idx.
4296 */
4297 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
4298 int idx, const u8 *addr, bool persist, bool add_smt)
4299 {
4300 int ret, mode;
4301 struct fw_vi_mac_cmd c;
4302 struct fw_vi_mac_exact *p = c.u.exact;
4303 int max_mac_addr = adap->params.arch.mps_tcam_size;
4304
4305 if (idx < 0) /* new allocation */
4306 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
4307 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
4308
4309 memset(&c, 0, sizeof(c));
4310 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4311 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4312 V_FW_VI_MAC_CMD_VIID(viid));
4313 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
4314 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
4315 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
4316 V_FW_VI_MAC_CMD_IDX(idx));
4317 memcpy(p->macaddr, addr, sizeof(p->macaddr));
4318
4319 if (is_pf4(adap))
4320 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4321 else
4322 ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
4323 if (ret == 0) {
4324 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
4325 if (ret >= max_mac_addr)
4326 ret = -ENOMEM;
4327 }
4328 return ret;
4329 }
4330
4331 /**
4332 * t4_enable_vi_params - enable/disable a virtual interface
4333 * @adap: the adapter
4334 * @mbox: mailbox to use for the FW command
4335 * @viid: the VI id
4336 * @rx_en: 1=enable Rx, 0=disable Rx
4337 * @tx_en: 1=enable Tx, 0=disable Tx
4338 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
4339 *
4340 * Enables/disables a virtual interface. Note that setting DCB Enable
4341 * only makes sense when enabling a Virtual Interface ...
4342 */
4343 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
4344 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
4345 {
4346 struct fw_vi_enable_cmd c;
4347
4348 memset(&c, 0, sizeof(c));
4349 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
4350 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
4351 V_FW_VI_ENABLE_CMD_VIID(viid));
4352 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
4353 V_FW_VI_ENABLE_CMD_EEN(tx_en) |
4354 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
4355 FW_LEN16(c));
4356 if (is_pf4(adap))
4357 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
4358 else
4359 return t4vf_wr_mbox_ns(adap, &c, sizeof(c), NULL);
4360 }
4361
4362 /**
4363 * t4_enable_vi - enable/disable a virtual interface
4364 * @adap: the adapter
4365 * @mbox: mailbox to use for the FW command
4366 * @viid: the VI id
4367 * @rx_en: 1=enable Rx, 0=disable Rx
4368 * @tx_en: 1=enable Tx, 0=disable Tx
4369 *
4370 * Enables/disables a virtual interface. Note that setting DCB Enable
4371 * only makes sense when enabling a Virtual Interface ...
4372 */
4373 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
4374 bool rx_en, bool tx_en)
4375 {
4376 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
4377 }
4378
4379 /**
4380 * t4_iq_start_stop - enable/disable an ingress queue and its FLs
4381 * @adap: the adapter
4382 * @mbox: mailbox to use for the FW command
4383 * @start: %true to enable the queues, %false to disable them
4384 * @pf: the PF owning the queues
4385 * @vf: the VF owning the queues
4386 * @iqid: ingress queue id
4387 * @fl0id: FL0 queue id or 0xffff if no attached FL0
4388 * @fl1id: FL1 queue id or 0xffff if no attached FL1
4389 *
4390 * Starts or stops an ingress queue and its associated FLs, if any.
4391 */
4392 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
4393 unsigned int pf, unsigned int vf, unsigned int iqid,
4394 unsigned int fl0id, unsigned int fl1id)
4395 {
4396 struct fw_iq_cmd c;
4397
4398 memset(&c, 0, sizeof(c));
4399 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4400 F_FW_CMD_EXEC);
4401 c.alloc_to_len16 = cpu_to_be32(V_FW_IQ_CMD_IQSTART(start) |
4402 V_FW_IQ_CMD_IQSTOP(!start) |
4403 FW_LEN16(c));
4404 c.iqid = cpu_to_be16(iqid);
4405 c.fl0id = cpu_to_be16(fl0id);
4406 c.fl1id = cpu_to_be16(fl1id);
4407 if (is_pf4(adap)) {
4408 c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
4409 V_FW_IQ_CMD_VFN(vf));
4410 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4411 } else {
4412 return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4413 }
4414 }
4415
4416 /**
4417 * t4_iq_free - free an ingress queue and its FLs
4418 * @adap: the adapter
4419 * @mbox: mailbox to use for the FW command
4420 * @pf: the PF owning the queues
4421 * @vf: the VF owning the queues
4422 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
4423 * @iqid: ingress queue id
4424 * @fl0id: FL0 queue id or 0xffff if no attached FL0
4425 * @fl1id: FL1 queue id or 0xffff if no attached FL1
4426 *
4427 * Frees an ingress queue and its associated FLs, if any.
4428 */
4429 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4430 unsigned int vf, unsigned int iqtype, unsigned int iqid,
4431 unsigned int fl0id, unsigned int fl1id)
4432 {
4433 struct fw_iq_cmd c;
4434
4435 memset(&c, 0, sizeof(c));
4436 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4437 F_FW_CMD_EXEC);
4438 if (is_pf4(adap))
4439 c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
4440 V_FW_IQ_CMD_VFN(vf));
4441 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
4442 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
4443 c.iqid = cpu_to_be16(iqid);
4444 c.fl0id = cpu_to_be16(fl0id);
4445 c.fl1id = cpu_to_be16(fl1id);
4446 if (is_pf4(adap))
4447 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4448 else
4449 return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4450 }
4451
4452 /**
4453 * t4_eth_eq_free - free an Ethernet egress queue
4454 * @adap: the adapter
4455 * @mbox: mailbox to use for the FW command
4456 * @pf: the PF owning the queue
4457 * @vf: the VF owning the queue
4458 * @eqid: egress queue id
4459 *
4460 * Frees an Ethernet egress queue.
4461 */
4462 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4463 unsigned int vf, unsigned int eqid)
4464 {
4465 struct fw_eq_eth_cmd c;
4466
4467 memset(&c, 0, sizeof(c));
4468 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
4469 F_FW_CMD_REQUEST | F_FW_CMD_EXEC);
4470 if (is_pf4(adap))
4471 c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
4472 V_FW_IQ_CMD_VFN(vf));
4473 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
4474 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
4475 if (is_pf4(adap))
4476 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4477 else
4478 return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4479 }
4480
4481 /**
4482 * t4_link_down_rc_str - return a string for a Link Down Reason Code
4483 * @link_down_rc: Link Down Reason Code
4484 *
4485 * Returns a string representation of the Link Down Reason Code.
4486 */
4487 static const char *t4_link_down_rc_str(unsigned char link_down_rc)
4488 {
4489 static const char * const reason[] = {
4490 "Link Down",
4491 "Remote Fault",
4492 "Auto-negotiation Failure",
4493 "Reserved",
4494 "Insufficient Airflow",
4495 "Unable To Determine Reason",
4496 "No RX Signal Detected",
4497 "Reserved",
4498 };
4499
4500 if (link_down_rc >= ARRAY_SIZE(reason))
4501 return "Bad Reason Code";
4502
4503 return reason[link_down_rc];
4504 }
4505
4506 /* Return the highest speed set in the port capabilities, in Mb/s. */
4507 static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
4508 {
4509 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
4510 do { \
4511 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
4512 return __speed; \
4513 } while (0)
4514
4515 TEST_SPEED_RETURN(100G, 100000);
4516 TEST_SPEED_RETURN(50G, 50000);
4517 TEST_SPEED_RETURN(40G, 40000);
4518 TEST_SPEED_RETURN(25G, 25000);
4519 TEST_SPEED_RETURN(10G, 10000);
4520 TEST_SPEED_RETURN(1G, 1000);
4521 TEST_SPEED_RETURN(100M, 100);
4522
4523 #undef TEST_SPEED_RETURN
4524
4525 return 0;
4526 }
4527
4528 /**
4529 * t4_handle_get_port_info - process a FW reply message
4530 * @pi: the port info
4531 * @rpl: start of the FW message
4532 *
4533 * Processes a GET_PORT_INFO FW reply message.
4534 */
4535 static void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
4536 {
4537 const struct fw_port_cmd *cmd = (const void *)rpl;
4538 int action = G_FW_PORT_CMD_ACTION(be32_to_cpu(cmd->action_to_len16));
4539 fw_port_cap32_t pcaps, acaps, linkattr;
4540 struct link_config *lc = &pi->link_cfg;
4541 struct adapter *adapter = pi->adapter;
4542 enum fw_port_module_type mod_type;
4543 enum fw_port_type port_type;
4544 unsigned int speed, fc, fec;
4545 int link_ok, linkdnrc;
4546
4547 /* Extract the various fields from the Port Information message.
4548 */
4549 switch (action) {
4550 case FW_PORT_ACTION_GET_PORT_INFO: {
4551 u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
4552
4553 link_ok = (lstatus & F_FW_PORT_CMD_LSTATUS) != 0;
4554 linkdnrc = G_FW_PORT_CMD_LINKDNRC(lstatus);
4555 port_type = G_FW_PORT_CMD_PTYPE(lstatus);
4556 mod_type = G_FW_PORT_CMD_MODTYPE(lstatus);
4557 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
4558 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
4559
4560 /* Unfortunately the format of the Link Status in the old
4561 * 16-bit Port Information message isn't the same as the
4562 * 16-bit Port Capabilities bitfield used everywhere else ...
4563 */
4564 linkattr = 0;
4565 if (lstatus & F_FW_PORT_CMD_RXPAUSE)
4566 linkattr |= FW_PORT_CAP32_FC_RX;
4567 if (lstatus & F_FW_PORT_CMD_TXPAUSE)
4568 linkattr |= FW_PORT_CAP32_FC_TX;
4569 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
4570 linkattr |= FW_PORT_CAP32_SPEED_100M;
4571 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
4572 linkattr |= FW_PORT_CAP32_SPEED_1G;
4573 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
4574 linkattr |= FW_PORT_CAP32_SPEED_10G;
4575 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
4576 linkattr |= FW_PORT_CAP32_SPEED_25G;
4577 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
4578 linkattr |= FW_PORT_CAP32_SPEED_40G;
4579 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
4580 linkattr |= FW_PORT_CAP32_SPEED_100G;
4581
4582 break;
4583 }
4584
4585 case FW_PORT_ACTION_GET_PORT_INFO32: {
4586 u32 lstatus32 =
4587 be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
4588
4589 link_ok = (lstatus32 & F_FW_PORT_CMD_LSTATUS32) != 0;
4590 linkdnrc = G_FW_PORT_CMD_LINKDNRC32(lstatus32);
4591 port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
4592 mod_type = G_FW_PORT_CMD_MODTYPE32(lstatus32);
4593 pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
4594 acaps = be32_to_cpu(cmd->u.info32.acaps32);
4595 linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
4596 break;
4597 }
4598
4599 default:
4600 dev_warn(adapter, "Handle Port Information: Bad Command/Action %#x\n",
4601 be32_to_cpu(cmd->action_to_len16));
4602 return;
4603 }
4604
4605 fec = fwcap_to_cc_fec(acaps);
4606
4607 fc = fwcap_to_cc_pause(linkattr);
4608 speed = fwcap_to_speed(linkattr);
4609
4610 if (mod_type != pi->mod_type) {
4611 lc->auto_fec = fec;
4612 pi->port_type = port_type;
4613 pi->mod_type = mod_type;
4614 t4_os_portmod_changed(adapter, pi->pidx);
4615 }
4616 if (link_ok != lc->link_ok || speed != lc->speed ||
4617 fc != lc->fc || fec != lc->fec) { /* something changed */
4618 if (!link_ok && lc->link_ok) {
4619 lc->link_down_rc = linkdnrc;
4620 dev_warn(adap, "Port %d link down, reason: %s\n",
4621 pi->tx_chan, t4_link_down_rc_str(linkdnrc));
4622 }
4623 lc->link_ok = link_ok;
4624 lc->speed = speed;
4625 lc->fc = fc;
4626 lc->fec = fec;
4627 lc->pcaps = pcaps;
4628 lc->acaps = acaps & ADVERT_MASK;
4629
4630 if (lc->acaps & FW_PORT_CAP32_ANEG) {
4631 lc->autoneg = AUTONEG_ENABLE;
4632 } else {
4633 /* When Autoneg is disabled, user needs to set
4634 * single speed.
4635 * Similar to cxgb4_ethtool.c: set_link_ksettings
4636 */
4637 lc->acaps = 0;
4638 lc->requested_speed = fwcap_to_speed(acaps);
4639 lc->autoneg = AUTONEG_DISABLE;
4640 }
4641 }
4642 }
4643
4644 /**
4645 * t4_ctrl_eq_free - free a control egress queue
4646 * @adap: the adapter
4647 * @mbox: mailbox to use for the FW command
4648 * @pf: the PF owning the queue
4649 * @vf: the VF owning the queue
4650 * @eqid: egress queue id
4651 *
4652 * Frees a control egress queue.
4653 */
4654 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4655 unsigned int vf, unsigned int eqid)
4656 {
4657 struct fw_eq_ctrl_cmd c;
4658
4659 memset(&c, 0, sizeof(c));
4660 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
4661 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
4662 V_FW_EQ_CTRL_CMD_PFN(pf) |
4663 V_FW_EQ_CTRL_CMD_VFN(vf));
4664 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
4665 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
4666 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4667 }
4668
4669 /**
4670 * t4_handle_fw_rpl - process a FW reply message
4671 * @adap: the adapter
4672 * @rpl: start of the FW message
4673 *
4674 * Processes a FW message, such as link state change messages.
4675 */
4676 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
4677 {
4678 u8 opcode = *(const u8 *)rpl;
4679
4680 /*
4681 * This might be a port command ... this simplifies the following
4682 * conditionals ... We can get away with pre-dereferencing
4683 * action_to_len16 because it's in the first 16 bytes and all messages
4684 * will be at least that long.
4685 */
4686 const struct fw_port_cmd *p = (const void *)rpl;
4687 unsigned int action =
4688 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
4689
4690 if (opcode == FW_PORT_CMD &&
4691 (action == FW_PORT_ACTION_GET_PORT_INFO ||
4692 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
4693 /* link/module state change message */
4694 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
4695 struct port_info *pi = NULL;
4696 int i;
4697
4698 for_each_port(adap, i) {
4699 pi = adap2pinfo(adap, i);
4700 if (pi->tx_chan == chan)
4701 break;
4702 }
4703
4704 t4_handle_get_port_info(pi, rpl);
4705 } else {
4706 dev_warn(adap, "Unknown firmware reply %d\n", opcode);
4707 return -EINVAL;
4708 }
4709 return 0;
4710 }
4711
4712 void t4_reset_link_config(struct adapter *adap, int idx)
4713 {
4714 struct port_info *pi = adap2pinfo(adap, idx);
4715 struct link_config *lc = &pi->link_cfg;
4716
4717 lc->link_ok = 0;
4718 lc->requested_speed = 0;
4719 lc->requested_fc = 0;
4720 lc->speed = 0;
4721 lc->fc = 0;
4722 }
4723
4724 /**
4725 * init_link_config - initialize a link's SW state
4726 * @lc: structure holding the link state
4727 * @pcaps: link Port Capabilities
4728 * @acaps: link current Advertised Port Capabilities
4729 *
4730 * Initializes the SW state maintained for each link, including the link's
4731 * capabilities and default speed/flow-control/autonegotiation settings.
4732 */
4733 void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
4734 fw_port_cap32_t acaps)
4735 {
4736 lc->pcaps = pcaps;
4737 lc->requested_speed = 0;
4738 lc->speed = 0;
4739 lc->requested_fc = 0;
4740 lc->fc = 0;
4741
4742 /**
4743 * For Forward Error Control, we default to whatever the Firmware
4744 * tells us the Link is currently advertising.
4745 */
4746 lc->auto_fec = fwcap_to_cc_fec(acaps);
4747 lc->requested_fec = FEC_AUTO;
4748 lc->fec = lc->auto_fec;
4749
4750 if (lc->pcaps & FW_PORT_CAP32_ANEG) {
4751 lc->acaps = lc->pcaps & ADVERT_MASK;
4752 lc->autoneg = AUTONEG_ENABLE;
4753 lc->requested_fc |= PAUSE_AUTONEG;
4754 } else {
4755 lc->acaps = 0;
4756 lc->autoneg = AUTONEG_DISABLE;
4757 }
4758 }
4759
4760 /**
4761 * t4_wait_dev_ready - wait till to reads of registers work
4762 *
4763 * Right after the device is RESET is can take a small amount of time
4764 * for it to respond to register reads. Until then, all reads will
4765 * return either 0xff...ff or 0xee...ee. Return an error if reads
4766 * don't work within a reasonable time frame.
4767 */
4768 static int t4_wait_dev_ready(struct adapter *adapter)
4769 {
4770 u32 whoami;
4771
4772 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4773
4774 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
4775 return 0;
4776
4777 msleep(500);
4778 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4779 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
4780 return 0;
4781
4782 dev_err(adapter, "Device didn't become ready for access, whoami = %#x\n",
4783 whoami);
4784 return -EIO;
4785 }
4786
4787 struct flash_desc {
4788 u32 vendor_and_model_id;
4789 u32 size_mb;
4790 };
4791
4792 int t4_get_flash_params(struct adapter *adapter)
4793 {
4794 /*
4795 * Table for non-standard supported Flash parts. Note, all Flash
4796 * parts must have 64KB sectors.
4797 */
4798 static struct flash_desc supported_flash[] = {
4799 { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
4800 };
4801
4802 int ret;
4803 u32 flashid = 0;
4804 unsigned int part, manufacturer;
4805 unsigned int density, size = 0;
4806
4807 /**
4808 * Issue a Read ID Command to the Flash part. We decode supported
4809 * Flash parts and their sizes from this. There's a newer Query
4810 * Command which can retrieve detailed geometry information but
4811 * many Flash parts don't support it.
4812 */
4813 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
4814 if (!ret)
4815 ret = sf1_read(adapter, 3, 0, 1, &flashid);
4816 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
4817 if (ret < 0)
4818 return ret;
4819
4820 /**
4821 * Check to see if it's one of our non-standard supported Flash parts.
4822 */
4823 for (part = 0; part < ARRAY_SIZE(supported_flash); part++) {
4824 if (supported_flash[part].vendor_and_model_id == flashid) {
4825 adapter->params.sf_size =
4826 supported_flash[part].size_mb;
4827 adapter->params.sf_nsec =
4828 adapter->params.sf_size / SF_SEC_SIZE;
4829 goto found;
4830 }
4831 }
4832
4833 /**
4834 * Decode Flash part size. The code below looks repetative with
4835 * common encodings, but that's not guaranteed in the JEDEC
4836 * specification for the Read JADEC ID command. The only thing that
4837 * we're guaranteed by the JADEC specification is where the
4838 * Manufacturer ID is in the returned result. After that each
4839 * Manufacturer ~could~ encode things completely differently.
4840 * Note, all Flash parts must have 64KB sectors.
4841 */
4842 manufacturer = flashid & 0xff;
4843 switch (manufacturer) {
4844 case 0x20: { /* Micron/Numonix */
4845 /**
4846 * This Density -> Size decoding table is taken from Micron
4847 * Data Sheets.
4848 */
4849 density = (flashid >> 16) & 0xff;
4850 switch (density) {
4851 case 0x14:
4852 size = 1 << 20; /* 1MB */
4853 break;
4854 case 0x15:
4855 size = 1 << 21; /* 2MB */
4856 break;
4857 case 0x16:
4858 size = 1 << 22; /* 4MB */
4859 break;
4860 case 0x17:
4861 size = 1 << 23; /* 8MB */
4862 break;
4863 case 0x18:
4864 size = 1 << 24; /* 16MB */
4865 break;
4866 case 0x19:
4867 size = 1 << 25; /* 32MB */
4868 break;
4869 case 0x20:
4870 size = 1 << 26; /* 64MB */
4871 break;
4872 case 0x21:
4873 size = 1 << 27; /* 128MB */
4874 break;
4875 case 0x22:
4876 size = 1 << 28; /* 256MB */
4877 break;
4878 }
4879 break;
4880 }
4881
4882 case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
4883 /**
4884 * This Density -> Size decoding table is taken from ISSI
4885 * Data Sheets.
4886 */
4887 density = (flashid >> 16) & 0xff;
4888 switch (density) {
4889 case 0x16:
4890 size = 1 << 25; /* 32MB */
4891 break;
4892 case 0x17:
4893 size = 1 << 26; /* 64MB */
4894 break;
4895 }
4896 break;
4897 }
4898
4899 case 0xc2: { /* Macronix */
4900 /**
4901 * This Density -> Size decoding table is taken from Macronix
4902 * Data Sheets.
4903 */
4904 density = (flashid >> 16) & 0xff;
4905 switch (density) {
4906 case 0x17:
4907 size = 1 << 23; /* 8MB */
4908 break;
4909 case 0x18:
4910 size = 1 << 24; /* 16MB */
4911 break;
4912 }
4913 break;
4914 }
4915
4916 case 0xef: { /* Winbond */
4917 /**
4918 * This Density -> Size decoding table is taken from Winbond
4919 * Data Sheets.
4920 */
4921 density = (flashid >> 16) & 0xff;
4922 switch (density) {
4923 case 0x17:
4924 size = 1 << 23; /* 8MB */
4925 break;
4926 case 0x18:
4927 size = 1 << 24; /* 16MB */
4928 break;
4929 }
4930 break;
4931 }
4932 }
4933
4934 /* If we didn't recognize the FLASH part, that's no real issue: the
4935 * Hardware/Software contract says that Hardware will _*ALWAYS*_
4936 * use a FLASH part which is at least 4MB in size and has 64KB
4937 * sectors. The unrecognized FLASH part is likely to be much larger
4938 * than 4MB, but that's all we really need.
4939 */
4940 if (size == 0) {
4941 dev_warn(adapter,
4942 "Unknown Flash Part, ID = %#x, assuming 4MB\n",
4943 flashid);
4944 size = 1 << 22;
4945 }
4946
4947 /**
4948 * Store decoded Flash size and fall through into vetting code.
4949 */
4950 adapter->params.sf_size = size;
4951 adapter->params.sf_nsec = size / SF_SEC_SIZE;
4952
4953 found:
4954 /*
4955 * We should reject adapters with FLASHes which are too small. So, emit
4956 * a warning.
4957 */
4958 if (adapter->params.sf_size < FLASH_MIN_SIZE)
4959 dev_warn(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
4960 flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
4961
4962 return 0;
4963 }
4964
4965 static void set_pcie_completion_timeout(struct adapter *adapter,
4966 u8 range)
4967 {
4968 u32 pcie_cap;
4969 u16 val;
4970
4971 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
4972 if (pcie_cap) {
4973 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
4974 val &= 0xfff0;
4975 val |= range;
4976 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
4977 }
4978 }
4979
4980 /**
4981 * t4_get_chip_type - Determine chip type from device ID
4982 * @adap: the adapter
4983 * @ver: adapter version
4984 */
4985 int t4_get_chip_type(struct adapter *adap, int ver)
4986 {
4987 enum chip_type chip = 0;
4988 u32 pl_rev = G_REV(t4_read_reg(adap, A_PL_REV));
4989
4990 /* Retrieve adapter's device ID */
4991 switch (ver) {
4992 case CHELSIO_T5:
4993 chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
4994 break;
4995 case CHELSIO_T6:
4996 chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
4997 break;
4998 default:
4999 dev_err(adap, "Device %d is not supported\n",
5000 adap->params.pci.device_id);
5001 return -EINVAL;
5002 }
5003
5004 return chip;
5005 }
5006
5007 /**
5008 * t4_prep_adapter - prepare SW and HW for operation
5009 * @adapter: the adapter
5010 *
5011 * Initialize adapter SW state for the various HW modules, set initial
5012 * values for some adapter tunables, take PHYs out of reset, and
5013 * initialize the MDIO interface.
5014 */
5015 int t4_prep_adapter(struct adapter *adapter)
5016 {
5017 int ret, ver;
5018 u32 pl_rev;
5019
5020 ret = t4_wait_dev_ready(adapter);
5021 if (ret < 0)
5022 return ret;
5023
5024 pl_rev = G_REV(t4_read_reg(adapter, A_PL_REV));
5025 adapter->params.pci.device_id = adapter->pdev->id.device_id;
5026 adapter->params.pci.vendor_id = adapter->pdev->id.vendor_id;
5027
5028 /*
5029 * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS
5030 * ADAPTER (VERSION << 4 | REVISION)
5031 */
5032 ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
5033 adapter->params.chip = 0;
5034 switch (ver) {
5035 case CHELSIO_T5:
5036 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
5037 adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
5038 adapter->params.arch.mps_tcam_size =
5039 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5040 adapter->params.arch.mps_rplc_size = 128;
5041 adapter->params.arch.nchan = NCHAN;
5042 adapter->params.arch.vfcount = 128;
5043 break;
5044 case CHELSIO_T6:
5045 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
5046 adapter->params.arch.sge_fl_db = 0;
5047 adapter->params.arch.mps_tcam_size =
5048 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5049 adapter->params.arch.mps_rplc_size = 256;
5050 adapter->params.arch.nchan = 2;
5051 adapter->params.arch.vfcount = 256;
5052 break;
5053 default:
5054 dev_err(adapter, "%s: Device %d is not supported\n",
5055 __func__, adapter->params.pci.device_id);
5056 return -EINVAL;
5057 }
5058
5059 adapter->params.pci.vpd_cap_addr =
5060 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
5061
5062 ret = t4_get_flash_params(adapter);
5063 if (ret < 0) {
5064 dev_err(adapter, "Unable to retrieve Flash Parameters, ret = %d\n",
5065 -ret);
5066 return ret;
5067 }
5068
5069 adapter->params.cim_la_size = CIMLA_SIZE;
5070
5071 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
5072
5073 /*
5074 * Default port and clock for debugging in case we can't reach FW.
5075 */
5076 adapter->params.nports = 1;
5077 adapter->params.portvec = 1;
5078 adapter->params.vpd.cclk = 50000;
5079
5080 /* Set pci completion timeout value to 4 seconds. */
5081 set_pcie_completion_timeout(adapter, 0xd);
5082 return 0;
5083 }
5084
5085 /**
5086 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
5087 * @adapter: the adapter
5088 * @qid: the Queue ID
5089 * @qtype: the Ingress or Egress type for @qid
5090 * @pbar2_qoffset: BAR2 Queue Offset
5091 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
5092 *
5093 * Returns the BAR2 SGE Queue Registers information associated with the
5094 * indicated Absolute Queue ID. These are passed back in return value
5095 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
5096 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
5097 *
5098 * This may return an error which indicates that BAR2 SGE Queue
5099 * registers aren't available. If an error is not returned, then the
5100 * following values are returned:
5101 *
5102 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
5103 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
5104 *
5105 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
5106 * require the "Inferred Queue ID" ability may be used. E.g. the
5107 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
5108 * then these "Inferred Queue ID" register may not be used.
5109 */
5110 int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid,
5111 enum t4_bar2_qtype qtype, u64 *pbar2_qoffset,
5112 unsigned int *pbar2_qid)
5113 {
5114 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
5115 u64 bar2_page_offset, bar2_qoffset;
5116 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
5117
5118 /*
5119 * T4 doesn't support BAR2 SGE Queue registers.
5120 */
5121 if (is_t4(adapter->params.chip))
5122 return -EINVAL;
5123
5124 /*
5125 * Get our SGE Page Size parameters.
5126 */
5127 page_shift = adapter->params.sge.hps + 10;
5128 page_size = 1 << page_shift;
5129
5130 /*
5131 * Get the right Queues per Page parameters for our Queue.
5132 */
5133 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS ?
5134 adapter->params.sge.eq_qpp :
5135 adapter->params.sge.iq_qpp);
5136 qpp_mask = (1 << qpp_shift) - 1;
5137
5138 /*
5139 * Calculate the basics of the BAR2 SGE Queue register area:
5140 * o The BAR2 page the Queue registers will be in.
5141 * o The BAR2 Queue ID.
5142 * o The BAR2 Queue ID Offset into the BAR2 page.
5143 */
5144 bar2_page_offset = ((qid >> qpp_shift) << page_shift);
5145 bar2_qid = qid & qpp_mask;
5146 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
5147
5148 /*
5149 * If the BAR2 Queue ID Offset is less than the Page Size, then the
5150 * hardware will infer the Absolute Queue ID simply from the writes to
5151 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
5152 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
5153 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
5154 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
5155 * from the BAR2 Page and BAR2 Queue ID.
5156 *
5157 * One important censequence of this is that some BAR2 SGE registers
5158 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
5159 * there. But other registers synthesize the SGE Queue ID purely
5160 * from the writes to the registers -- the Write Combined Doorbell
5161 * Buffer is a good example. These BAR2 SGE Registers are only
5162 * available for those BAR2 SGE Register areas where the SGE Absolute
5163 * Queue ID can be inferred from simple writes.
5164 */
5165 bar2_qoffset = bar2_page_offset;
5166 bar2_qinferred = (bar2_qid_offset < page_size);
5167 if (bar2_qinferred) {
5168 bar2_qoffset += bar2_qid_offset;
5169 bar2_qid = 0;
5170 }
5171
5172 *pbar2_qoffset = bar2_qoffset;
5173 *pbar2_qid = bar2_qid;
5174 return 0;
5175 }
5176
5177 /**
5178 * t4_init_sge_params - initialize adap->params.sge
5179 * @adapter: the adapter
5180 *
5181 * Initialize various fields of the adapter's SGE Parameters structure.
5182 */
5183 int t4_init_sge_params(struct adapter *adapter)
5184 {
5185 struct sge_params *sge_params = &adapter->params.sge;
5186 u32 hps, qpp;
5187 unsigned int s_hps, s_qpp;
5188
5189 /*
5190 * Extract the SGE Page Size for our PF.
5191 */
5192 hps = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
5193 s_hps = (S_HOSTPAGESIZEPF0 + (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) *
5194 adapter->pf);
5195 sge_params->hps = ((hps >> s_hps) & M_HOSTPAGESIZEPF0);
5196
5197 /*
5198 * Extract the SGE Egress and Ingess Queues Per Page for our PF.
5199 */
5200 s_qpp = (S_QUEUESPERPAGEPF0 +
5201 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf);
5202 qpp = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
5203 sge_params->eq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
5204 qpp = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
5205 sge_params->iq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
5206
5207 return 0;
5208 }
5209
5210 /**
5211 * t4_init_tp_params - initialize adap->params.tp
5212 * @adap: the adapter
5213 *
5214 * Initialize various fields of the adapter's TP Parameters structure.
5215 */
5216 int t4_init_tp_params(struct adapter *adap)
5217 {
5218 int chan, ret;
5219 u32 param, v;
5220
5221 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
5222 adap->params.tp.tre = G_TIMERRESOLUTION(v);
5223 adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
5224
5225 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5226 for (chan = 0; chan < NCHAN; chan++)
5227 adap->params.tp.tx_modq[chan] = chan;
5228
5229 /*
5230 * Cache the adapter's Compressed Filter Mode/Mask and global Ingress
5231 * Configuration.
5232 */
5233 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5234 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) |
5235 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK));
5236
5237 /* Read current value */
5238 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5239 1, &param, &v);
5240 if (!ret) {
5241 dev_info(adap, "Current filter mode/mask 0x%x:0x%x\n",
5242 G_FW_PARAMS_PARAM_FILTER_MODE(v),
5243 G_FW_PARAMS_PARAM_FILTER_MASK(v));
5244 adap->params.tp.vlan_pri_map =
5245 G_FW_PARAMS_PARAM_FILTER_MODE(v);
5246 adap->params.tp.filter_mask =
5247 G_FW_PARAMS_PARAM_FILTER_MASK(v);
5248 } else {
5249 dev_info(adap,
5250 "Failed to read filter mode/mask via fw api, using indirect-reg-read\n");
5251
5252 /* In case of older-fw (which doesn't expose the api
5253 * FW_PARAM_DEV_FILTER_MODE_MASK) and newer-driver (which uses
5254 * the fw api) combination, fall-back to older method of reading
5255 * the filter mode from indirect-register
5256 */
5257 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5258 &adap->params.tp.vlan_pri_map, 1,
5259 A_TP_VLAN_PRI_MAP);
5260
5261 /* With the older-fw and newer-driver combination we might run
5262 * into an issue when user wants to use hash filter region but
5263 * the filter_mask is zero, in this case filter_mask validation
5264 * is tough. To avoid that we set the filter_mask same as filter
5265 * mode, which will behave exactly as the older way of ignoring
5266 * the filter mask validation.
5267 */
5268 adap->params.tp.filter_mask = adap->params.tp.vlan_pri_map;
5269 }
5270
5271 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5272 &adap->params.tp.ingress_config, 1,
5273 A_TP_INGRESS_CONFIG);
5274
5275 /* For T6, cache the adapter's compressed error vector
5276 * and passing outer header info for encapsulated packets.
5277 */
5278 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
5279 v = t4_read_reg(adap, A_TP_OUT_CONFIG);
5280 adap->params.tp.rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0;
5281 }
5282
5283 /*
5284 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
5285 * shift positions of several elements of the Compressed Filter Tuple
5286 * for this adapter which we need frequently ...
5287 */
5288 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
5289 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
5290 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
5291 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
5292 F_PROTOCOL);
5293 adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
5294 F_ETHERTYPE);
5295 adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
5296 F_MACMATCH);
5297 adap->params.tp.tos_shift = t4_filter_field_shift(adap, F_TOS);
5298
5299 v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
5300 adap->params.tp.hash_filter_mask = v;
5301 v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
5302 adap->params.tp.hash_filter_mask |= ((u64)v << 32);
5303
5304 return 0;
5305 }
5306
5307 /**
5308 * t4_filter_field_shift - calculate filter field shift
5309 * @adap: the adapter
5310 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
5311 *
5312 * Return the shift position of a filter field within the Compressed
5313 * Filter Tuple. The filter field is specified via its selection bit
5314 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
5315 */
5316 int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel)
5317 {
5318 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
5319 unsigned int sel;
5320 int field_shift;
5321
5322 if ((filter_mode & filter_sel) == 0)
5323 return -1;
5324
5325 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
5326 switch (filter_mode & sel) {
5327 case F_FCOE:
5328 field_shift += W_FT_FCOE;
5329 break;
5330 case F_PORT:
5331 field_shift += W_FT_PORT;
5332 break;
5333 case F_VNIC_ID:
5334 field_shift += W_FT_VNIC_ID;
5335 break;
5336 case F_VLAN:
5337 field_shift += W_FT_VLAN;
5338 break;
5339 case F_TOS:
5340 field_shift += W_FT_TOS;
5341 break;
5342 case F_PROTOCOL:
5343 field_shift += W_FT_PROTOCOL;
5344 break;
5345 case F_ETHERTYPE:
5346 field_shift += W_FT_ETHERTYPE;
5347 break;
5348 case F_MACMATCH:
5349 field_shift += W_FT_MACMATCH;
5350 break;
5351 case F_MPSHITTYPE:
5352 field_shift += W_FT_MPSHITTYPE;
5353 break;
5354 case F_FRAGMENTATION:
5355 field_shift += W_FT_FRAGMENTATION;
5356 break;
5357 }
5358 }
5359 return field_shift;
5360 }
5361
5362 int t4_init_rss_mode(struct adapter *adap, int mbox)
5363 {
5364 int i, ret;
5365 struct fw_rss_vi_config_cmd rvc;
5366
5367 memset(&rvc, 0, sizeof(rvc));
5368
5369 for_each_port(adap, i) {
5370 struct port_info *p = adap2pinfo(adap, i);
5371
5372 rvc.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
5373 F_FW_CMD_REQUEST | F_FW_CMD_READ |
5374 V_FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
5375 rvc.retval_len16 = htonl(FW_LEN16(rvc));
5376 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
5377 if (ret)
5378 return ret;
5379 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
5380 }
5381 return 0;
5382 }
5383
5384 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
5385 {
5386 unsigned int fw_caps = adap->params.fw_caps_support;
5387 fw_port_cap32_t pcaps, acaps;
5388 enum fw_port_type port_type;
5389 struct fw_port_cmd cmd;
5390 u8 vivld = 0, vin = 0;
5391 int ret, i, j = 0;
5392 int mdio_addr;
5393 u32 action;
5394 u8 addr[6];
5395
5396 memset(&cmd, 0, sizeof(cmd));
5397
5398 for_each_port(adap, i) {
5399 struct port_info *pi = adap2pinfo(adap, i);
5400 unsigned int rss_size = 0;
5401
5402 while ((adap->params.portvec & (1 << j)) == 0)
5403 j++;
5404
5405 /* If we haven't yet determined whether we're talking to
5406 * Firmware which knows the new 32-bit Port Capabilities, it's
5407 * time to find out now. This will also tell new Firmware to
5408 * send us Port Status Updates using the new 32-bit Port
5409 * Capabilities version of the Port Information message.
5410 */
5411 if (fw_caps == FW_CAPS_UNKNOWN) {
5412 u32 param, val, caps;
5413
5414 caps = FW_PARAMS_PARAM_PFVF_PORT_CAPS32;
5415 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
5416 V_FW_PARAMS_PARAM_X(caps));
5417 val = 1;
5418 ret = t4_set_params(adap, mbox, pf, vf, 1, &param,
5419 &val);
5420 fw_caps = ret == 0 ? FW_CAPS32 : FW_CAPS16;
5421 adap->params.fw_caps_support = fw_caps;
5422 }
5423
5424 memset(&cmd, 0, sizeof(cmd));
5425 cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
5426 F_FW_CMD_REQUEST |
5427 F_FW_CMD_READ |
5428 V_FW_PORT_CMD_PORTID(j));
5429 action = fw_caps == FW_CAPS16 ? FW_PORT_ACTION_GET_PORT_INFO :
5430 FW_PORT_ACTION_GET_PORT_INFO32;
5431 cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) |
5432 FW_LEN16(cmd));
5433 ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
5434 if (ret)
5435 return ret;
5436
5437 /* Extract the various fields from the Port Information message.
5438 */
5439 if (fw_caps == FW_CAPS16) {
5440 u32 lstatus =
5441 be32_to_cpu(cmd.u.info.lstatus_to_modtype);
5442
5443 port_type = G_FW_PORT_CMD_PTYPE(lstatus);
5444 mdio_addr = (lstatus & F_FW_PORT_CMD_MDIOCAP) ?
5445 (int)G_FW_PORT_CMD_MDIOADDR(lstatus) : -1;
5446 pcaps = be16_to_cpu(cmd.u.info.pcap);
5447 acaps = be16_to_cpu(cmd.u.info.acap);
5448 pcaps = fwcaps16_to_caps32(pcaps);
5449 acaps = fwcaps16_to_caps32(acaps);
5450 } else {
5451 u32 lstatus32 =
5452 be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
5453
5454 port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
5455 mdio_addr = (lstatus32 & F_FW_PORT_CMD_MDIOCAP32) ?
5456 (int)G_FW_PORT_CMD_MDIOADDR32(lstatus32) :
5457 -1;
5458 pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
5459 acaps = be32_to_cpu(cmd.u.info32.acaps32);
5460 }
5461
5462 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size,
5463 &vivld, &vin);
5464 if (ret < 0)
5465 return ret;
5466
5467 pi->viid = ret;
5468 pi->tx_chan = j;
5469 pi->rss_size = rss_size;
5470 t4_os_set_hw_addr(adap, i, addr);
5471
5472 /* If fw supports returning the VIN as part of FW_VI_CMD,
5473 * save the returned values.
5474 */
5475 if (adap->params.viid_smt_extn_support) {
5476 pi->vivld = vivld;
5477 pi->vin = vin;
5478 } else {
5479 /* Retrieve the values from VIID */
5480 pi->vivld = G_FW_VIID_VIVLD(pi->viid);
5481 pi->vin = G_FW_VIID_VIN(pi->viid);
5482 }
5483
5484 pi->port_type = port_type;
5485 pi->mdio_addr = mdio_addr;
5486 pi->mod_type = FW_PORT_MOD_TYPE_NA;
5487
5488 init_link_config(&pi->link_cfg, pcaps, acaps);
5489 j++;
5490 }
5491 return 0;
5492 }
5493
5494 /**
5495 * t4_memory_rw_addr - read/write adapter memory via PCIE memory window
5496 * @adap: the adapter
5497 * @win: PCI-E Memory Window to use
5498 * @addr: address within adapter memory
5499 * @len: amount of memory to transfer
5500 * @hbuf: host memory buffer
5501 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
5502 *
5503 * Reads/writes an [almost] arbitrary memory region in the firmware: the
5504 * firmware memory address and host buffer must be aligned on 32-bit
5505 * boudaries; the length may be arbitrary.
5506 *
5507 * NOTES:
5508 * 1. The memory is transferred as a raw byte sequence from/to the
5509 * firmware's memory. If this memory contains data structures which
5510 * contain multi-byte integers, it's the caller's responsibility to
5511 * perform appropriate byte order conversions.
5512 *
5513 * 2. It is the Caller's responsibility to ensure that no other code
5514 * uses the specified PCI-E Memory Window while this routine is
5515 * using it. This is typically done via the use of OS-specific
5516 * locks, etc.
5517 */
5518 int t4_memory_rw_addr(struct adapter *adap, int win, u32 addr,
5519 u32 len, void *hbuf, int dir)
5520 {
5521 u32 pos, offset, resid;
5522 u32 win_pf, mem_reg, mem_aperture, mem_base;
5523 u32 *buf;
5524
5525 /* Argument sanity checks ...*/
5526 if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
5527 return -EINVAL;
5528 buf = (u32 *)hbuf;
5529
5530 /* It's convenient to be able to handle lengths which aren't a
5531 * multiple of 32-bits because we often end up transferring files to
5532 * the firmware. So we'll handle that by normalizing the length here
5533 * and then handling any residual transfer at the end.
5534 */
5535 resid = len & 0x3;
5536 len -= resid;
5537
5538 /* Each PCI-E Memory Window is programmed with a window size -- or
5539 * "aperture" -- which controls the granularity of its mapping onto
5540 * adapter memory. We need to grab that aperture in order to know
5541 * how to use the specified window. The window is also programmed
5542 * with the base address of the Memory Window in BAR0's address
5543 * space. For T4 this is an absolute PCI-E Bus Address. For T5
5544 * the address is relative to BAR0.
5545 */
5546 mem_reg = t4_read_reg(adap,
5547 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
5548 win));
5549 mem_aperture = 1 << (G_WINDOW(mem_reg) + X_WINDOW_SHIFT);
5550 mem_base = G_PCIEOFST(mem_reg) << X_PCIEOFST_SHIFT;
5551
5552 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->pf);
5553
5554 /* Calculate our initial PCI-E Memory Window Position and Offset into
5555 * that Window.
5556 */
5557 pos = addr & ~(mem_aperture - 1);
5558 offset = addr - pos;
5559
5560 /* Set up initial PCI-E Memory Window to cover the start of our
5561 * transfer. (Read it back to ensure that changes propagate before we
5562 * attempt to use the new value.)
5563 */
5564 t4_write_reg(adap,
5565 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win),
5566 pos | win_pf);
5567 t4_read_reg(adap,
5568 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win));
5569
5570 /* Transfer data to/from the adapter as long as there's an integral
5571 * number of 32-bit transfers to complete.
5572 *
5573 * A note on Endianness issues:
5574 *
5575 * The "register" reads and writes below from/to the PCI-E Memory
5576 * Window invoke the standard adapter Big-Endian to PCI-E Link
5577 * Little-Endian "swizzel." As a result, if we have the following
5578 * data in adapter memory:
5579 *
5580 * Memory: ... | b0 | b1 | b2 | b3 | ...
5581 * Address: i+0 i+1 i+2 i+3
5582 *
5583 * Then a read of the adapter memory via the PCI-E Memory Window
5584 * will yield:
5585 *
5586 * x = readl(i)
5587 * 31 0
5588 * [ b3 | b2 | b1 | b0 ]
5589 *
5590 * If this value is stored into local memory on a Little-Endian system
5591 * it will show up correctly in local memory as:
5592 *
5593 * ( ..., b0, b1, b2, b3, ... )
5594 *
5595 * But on a Big-Endian system, the store will show up in memory
5596 * incorrectly swizzled as:
5597 *
5598 * ( ..., b3, b2, b1, b0, ... )
5599 *
5600 * So we need to account for this in the reads and writes to the
5601 * PCI-E Memory Window below by undoing the register read/write
5602 * swizzels.
5603 */
5604 while (len > 0) {
5605 if (dir == T4_MEMORY_READ)
5606 *buf++ = le32_to_cpu((__le32)t4_read_reg(adap,
5607 mem_base +
5608 offset));
5609 else
5610 t4_write_reg(adap, mem_base + offset,
5611 (u32)cpu_to_le32(*buf++));
5612 offset += sizeof(__be32);
5613 len -= sizeof(__be32);
5614
5615 /* If we've reached the end of our current window aperture,
5616 * move the PCI-E Memory Window on to the next. Note that
5617 * doing this here after "len" may be 0 allows us to set up
5618 * the PCI-E Memory Window for a possible final residual
5619 * transfer below ...
5620 */
5621 if (offset == mem_aperture) {
5622 pos += mem_aperture;
5623 offset = 0;
5624 t4_write_reg(adap,
5625 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
5626 win), pos | win_pf);
5627 t4_read_reg(adap,
5628 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
5629 win));
5630 }
5631 }
5632
5633 /* If the original transfer had a length which wasn't a multiple of
5634 * 32-bits, now's where we need to finish off the transfer of the
5635 * residual amount. The PCI-E Memory Window has already been moved
5636 * above (if necessary) to cover this final transfer.
5637 */
5638 if (resid) {
5639 union {
5640 u32 word;
5641 char byte[4];
5642 } last;
5643 unsigned char *bp;
5644 int i;
5645
5646 if (dir == T4_MEMORY_READ) {
5647 last.word = le32_to_cpu((__le32)t4_read_reg(adap,
5648 mem_base +
5649 offset));
5650 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
5651 bp[i] = last.byte[i];
5652 } else {
5653 last.word = *buf;
5654 for (i = resid; i < 4; i++)
5655 last.byte[i] = 0;
5656 t4_write_reg(adap, mem_base + offset,
5657 (u32)cpu_to_le32(last.word));
5658 }
5659 }
5660
5661 return 0;
5662 }
5663
5664 /**
5665 * t4_memory_rw_mtype -read/write EDC 0, EDC 1 or MC via PCIE memory window
5666 * @adap: the adapter
5667 * @win: PCI-E Memory Window to use
5668 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
5669 * @maddr: address within indicated memory type
5670 * @len: amount of memory to transfer
5671 * @hbuf: host memory buffer
5672 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
5673 *
5674 * Reads/writes adapter memory using t4_memory_rw_addr(). This routine
5675 * provides an (memory type, address within memory type) interface.
5676 */
5677 int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr,
5678 u32 len, void *hbuf, int dir)
5679 {
5680 u32 mtype_offset;
5681 u32 edc_size, mc_size;
5682
5683 /* Offset into the region of memory which is being accessed
5684 * MEM_EDC0 = 0
5685 * MEM_EDC1 = 1
5686 * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
5687 * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
5688 */
5689 edc_size = G_EDRAM0_SIZE(t4_read_reg(adap, A_MA_EDRAM0_BAR));
5690 if (mtype != MEM_MC1) {
5691 mtype_offset = (mtype * (edc_size * 1024 * 1024));
5692 } else {
5693 mc_size = G_EXT_MEM0_SIZE(t4_read_reg(adap,
5694 A_MA_EXT_MEMORY0_BAR));
5695 mtype_offset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
5696 }
5697
5698 return t4_memory_rw_addr(adap, win,
5699 mtype_offset + maddr, len,
5700 hbuf, dir);
5701 }