]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
Merge remote-tracking branch 'regulator/topic/palmas' into v3.9-rc8
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / chelsio / cxgb4 / t4_hw.c
CommitLineData
56d36be4
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/init.h>
36#include <linux/delay.h>
37#include "cxgb4.h"
38#include "t4_regs.h"
39#include "t4fw_api.h"
40
41/**
42 * t4_wait_op_done_val - wait until an operation is completed
43 * @adapter: the adapter performing the operation
44 * @reg: the register to check for completion
45 * @mask: a single-bit field within @reg that indicates completion
46 * @polarity: the value of the field when the operation is completed
47 * @attempts: number of check iterations
48 * @delay: delay in usecs between iterations
49 * @valp: where to store the value of the register at completion time
50 *
51 * Wait until an operation is completed by checking a bit in a register
52 * up to @attempts times. If @valp is not NULL the value of the register
53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise.
55 */
de498c89
RD
56static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp)
56d36be4
DM
58{
59 while (1) {
60 u32 val = t4_read_reg(adapter, reg);
61
62 if (!!(val & mask) == polarity) {
63 if (valp)
64 *valp = val;
65 return 0;
66 }
67 if (--attempts == 0)
68 return -EAGAIN;
69 if (delay)
70 udelay(delay);
71 }
72}
73
74static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75 int polarity, int attempts, int delay)
76{
77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
78 delay, NULL);
79}
80
81/**
82 * t4_set_reg_field - set a register field to a value
83 * @adapter: the adapter to program
84 * @addr: the register address
85 * @mask: specifies the portion of the register to modify
86 * @val: the new value for the register field
87 *
88 * Sets a register field specified by the supplied mask to the
89 * given value.
90 */
91void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
92 u32 val)
93{
94 u32 v = t4_read_reg(adapter, addr) & ~mask;
95
96 t4_write_reg(adapter, addr, v | val);
97 (void) t4_read_reg(adapter, addr); /* flush */
98}
99
100/**
101 * t4_read_indirect - read indirectly addressed registers
102 * @adap: the adapter
103 * @addr_reg: register holding the indirect address
104 * @data_reg: register holding the value of the indirect register
105 * @vals: where the read register values are stored
106 * @nregs: how many indirect registers to read
107 * @start_idx: index of first indirect register to read
108 *
109 * Reads registers that are accessed indirectly through an address/data
110 * register pair.
111 */
f2b7e78d 112void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
de498c89
RD
113 unsigned int data_reg, u32 *vals,
114 unsigned int nregs, unsigned int start_idx)
56d36be4
DM
115{
116 while (nregs--) {
117 t4_write_reg(adap, addr_reg, start_idx);
118 *vals++ = t4_read_reg(adap, data_reg);
119 start_idx++;
120 }
121}
122
13ee15d3
VP
123/**
124 * t4_write_indirect - write indirectly addressed registers
125 * @adap: the adapter
126 * @addr_reg: register holding the indirect addresses
127 * @data_reg: register holding the value for the indirect registers
128 * @vals: values to write
129 * @nregs: how many indirect registers to write
130 * @start_idx: address of first indirect register to write
131 *
132 * Writes a sequential block of registers that are accessed indirectly
133 * through an address/data register pair.
134 */
135void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
136 unsigned int data_reg, const u32 *vals,
137 unsigned int nregs, unsigned int start_idx)
138{
139 while (nregs--) {
140 t4_write_reg(adap, addr_reg, start_idx++);
141 t4_write_reg(adap, data_reg, *vals++);
142 }
143}
144
56d36be4
DM
145/*
146 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
147 */
148static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
149 u32 mbox_addr)
150{
151 for ( ; nflit; nflit--, mbox_addr += 8)
152 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
153}
154
155/*
156 * Handle a FW assertion reported in a mailbox.
157 */
158static void fw_asrt(struct adapter *adap, u32 mbox_addr)
159{
160 struct fw_debug_cmd asrt;
161
162 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
163 dev_alert(adap->pdev_dev,
164 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
165 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
166 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
167}
168
169static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
170{
171 dev_err(adap->pdev_dev,
172 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
173 (unsigned long long)t4_read_reg64(adap, data_reg),
174 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
175 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
176 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
177 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
178 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
179 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
180 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
181}
182
183/**
184 * t4_wr_mbox_meat - send a command to FW through the given mailbox
185 * @adap: the adapter
186 * @mbox: index of the mailbox to use
187 * @cmd: the command to write
188 * @size: command length in bytes
189 * @rpl: where to optionally store the reply
190 * @sleep_ok: if true we may sleep while awaiting command completion
191 *
192 * Sends the given command to FW through the selected mailbox and waits
193 * for the FW to execute the command. If @rpl is not %NULL it is used to
194 * store the FW's reply to the command. The command and its optional
195 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
196 * to respond. @sleep_ok determines whether we may sleep while awaiting
197 * the response. If sleeping is allowed we use progressive backoff
198 * otherwise we spin.
199 *
200 * The return value is 0 on success or a negative errno on failure. A
201 * failure can happen either because we are not able to execute the
202 * command or FW executes it but signals an error. In the latter case
203 * the return value is the error code indicated by FW (negated).
204 */
205int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
206 void *rpl, bool sleep_ok)
207{
005b5717 208 static const int delay[] = {
56d36be4
DM
209 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
210 };
211
212 u32 v;
213 u64 res;
214 int i, ms, delay_idx;
215 const __be64 *p = cmd;
216 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
217 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
218
219 if ((size & 15) || size > MBOX_LEN)
220 return -EINVAL;
221
204dc3c0
DM
222 /*
223 * If the device is off-line, as in EEH, commands will time out.
224 * Fail them early so we don't waste time waiting.
225 */
226 if (adap->pdev->error_state != pci_channel_io_normal)
227 return -EIO;
228
56d36be4
DM
229 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
230 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
231 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
232
233 if (v != MBOX_OWNER_DRV)
234 return v ? -EBUSY : -ETIMEDOUT;
235
236 for (i = 0; i < size; i += 8)
237 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
238
239 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
240 t4_read_reg(adap, ctl_reg); /* flush write */
241
242 delay_idx = 0;
243 ms = delay[0];
244
245 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
246 if (sleep_ok) {
247 ms = delay[delay_idx]; /* last element may repeat */
248 if (delay_idx < ARRAY_SIZE(delay) - 1)
249 delay_idx++;
250 msleep(ms);
251 } else
252 mdelay(ms);
253
254 v = t4_read_reg(adap, ctl_reg);
255 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
256 if (!(v & MBMSGVALID)) {
257 t4_write_reg(adap, ctl_reg, 0);
258 continue;
259 }
260
261 res = t4_read_reg64(adap, data_reg);
262 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
263 fw_asrt(adap, data_reg);
264 res = FW_CMD_RETVAL(EIO);
265 } else if (rpl)
266 get_mbox_rpl(adap, rpl, size / 8, data_reg);
267
268 if (FW_CMD_RETVAL_GET((int)res))
269 dump_mbox(adap, mbox, data_reg);
270 t4_write_reg(adap, ctl_reg, 0);
271 return -FW_CMD_RETVAL_GET((int)res);
272 }
273 }
274
275 dump_mbox(adap, mbox, data_reg);
276 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
277 *(const u8 *)cmd, mbox);
278 return -ETIMEDOUT;
279}
280
281/**
282 * t4_mc_read - read from MC through backdoor accesses
283 * @adap: the adapter
284 * @addr: address of first byte requested
285 * @data: 64 bytes of data containing the requested address
286 * @ecc: where to store the corresponding 64-bit ECC word
287 *
288 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
289 * that covers the requested address @addr. If @parity is not %NULL it
290 * is assigned the 64-bit ECC word for the read data.
291 */
292int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
293{
294 int i;
295
296 if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
297 return -EBUSY;
298 t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
299 t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
300 t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
301 t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
302 BIST_CMD_GAP(1));
303 i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
304 if (i)
305 return i;
306
307#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
308
309 for (i = 15; i >= 0; i--)
310 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
311 if (ecc)
312 *ecc = t4_read_reg64(adap, MC_DATA(16));
313#undef MC_DATA
314 return 0;
315}
316
317/**
318 * t4_edc_read - read from EDC through backdoor accesses
319 * @adap: the adapter
320 * @idx: which EDC to access
321 * @addr: address of first byte requested
322 * @data: 64 bytes of data containing the requested address
323 * @ecc: where to store the corresponding 64-bit ECC word
324 *
325 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
326 * that covers the requested address @addr. If @parity is not %NULL it
327 * is assigned the 64-bit ECC word for the read data.
328 */
329int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
330{
331 int i;
332
333 idx *= EDC_STRIDE;
334 if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
335 return -EBUSY;
336 t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
337 t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
338 t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
339 t4_write_reg(adap, EDC_BIST_CMD + idx,
340 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
341 i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
342 if (i)
343 return i;
344
345#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
346
347 for (i = 15; i >= 0; i--)
348 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
349 if (ecc)
350 *ecc = t4_read_reg64(adap, EDC_DATA(16));
351#undef EDC_DATA
352 return 0;
353}
354
5afc8b84
VP
355/*
356 * t4_mem_win_rw - read/write memory through PCIE memory window
357 * @adap: the adapter
358 * @addr: address of first byte requested
359 * @data: MEMWIN0_APERTURE bytes of data containing the requested address
360 * @dir: direction of transfer 1 => read, 0 => write
361 *
362 * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a
363 * MEMWIN0_APERTURE-byte-aligned address that covers the requested
364 * address @addr.
365 */
366static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
367{
368 int i;
369
370 /*
371 * Setup offset into PCIE memory window. Address must be a
372 * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to
373 * ensure that changes propagate before we attempt to use the new
374 * values.)
375 */
376 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
377 addr & ~(MEMWIN0_APERTURE - 1));
378 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
379
380 /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
381 for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) {
382 if (dir)
404d9e3f
VP
383 *data++ = (__force __be32) t4_read_reg(adap,
384 (MEMWIN0_BASE + i));
5afc8b84 385 else
404d9e3f
VP
386 t4_write_reg(adap, (MEMWIN0_BASE + i),
387 (__force u32) *data++);
5afc8b84
VP
388 }
389
390 return 0;
391}
392
393/**
394 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
395 * @adap: the adapter
396 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
397 * @addr: address within indicated memory type
398 * @len: amount of memory to transfer
399 * @buf: host memory buffer
400 * @dir: direction of transfer 1 => read, 0 => write
401 *
402 * Reads/writes an [almost] arbitrary memory region in the firmware: the
403 * firmware memory address, length and host buffer must be aligned on
404 * 32-bit boudaries. The memory is transferred as a raw byte sequence
405 * from/to the firmware's memory. If this memory contains data
406 * structures which contain multi-byte integers, it's the callers
407 * responsibility to perform appropriate byte order conversions.
408 */
409static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
410 __be32 *buf, int dir)
411{
412 u32 pos, start, end, offset, memoffset;
8c357ebd
VP
413 int ret = 0;
414 __be32 *data;
5afc8b84
VP
415
416 /*
417 * Argument sanity checks ...
418 */
419 if ((addr & 0x3) || (len & 0x3))
420 return -EINVAL;
421
594f88e9 422 data = vmalloc(MEMWIN0_APERTURE);
8c357ebd
VP
423 if (!data)
424 return -ENOMEM;
425
5afc8b84
VP
426 /*
427 * Offset into the region of memory which is being accessed
428 * MEM_EDC0 = 0
429 * MEM_EDC1 = 1
430 * MEM_MC = 2
431 */
432 memoffset = (mtype * (5 * 1024 * 1024));
433
434 /* Determine the PCIE_MEM_ACCESS_OFFSET */
435 addr = addr + memoffset;
436
437 /*
438 * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes
439 * at a time so we need to round down the start and round up the end.
440 * We'll start copying out of the first line at (addr - start) a word
441 * at a time.
442 */
443 start = addr & ~(MEMWIN0_APERTURE-1);
444 end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1);
445 offset = (addr - start)/sizeof(__be32);
446
447 for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) {
5afc8b84
VP
448
449 /*
450 * If we're writing, copy the data from the caller's memory
451 * buffer
452 */
453 if (!dir) {
454 /*
455 * If we're doing a partial write, then we need to do
456 * a read-modify-write ...
457 */
458 if (offset || len < MEMWIN0_APERTURE) {
459 ret = t4_mem_win_rw(adap, pos, data, 1);
460 if (ret)
8c357ebd 461 break;
5afc8b84
VP
462 }
463 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
464 len > 0) {
465 data[offset++] = *buf++;
466 len -= sizeof(__be32);
467 }
468 }
469
470 /*
471 * Transfer a block of memory and bail if there's an error.
472 */
473 ret = t4_mem_win_rw(adap, pos, data, dir);
474 if (ret)
8c357ebd 475 break;
5afc8b84
VP
476
477 /*
478 * If we're reading, copy the data into the caller's memory
479 * buffer.
480 */
481 if (dir)
482 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
483 len > 0) {
484 *buf++ = data[offset++];
485 len -= sizeof(__be32);
486 }
487 }
488
8c357ebd
VP
489 vfree(data);
490 return ret;
5afc8b84
VP
491}
492
493int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
494 __be32 *buf)
495{
496 return t4_memory_rw(adap, mtype, addr, len, buf, 0);
497}
498
56d36be4 499#define EEPROM_STAT_ADDR 0x7bfc
226ec5fd 500#define VPD_LEN 512
47ce9c48
SR
501#define VPD_BASE 0x400
502#define VPD_BASE_OLD 0
56d36be4
DM
503
504/**
505 * t4_seeprom_wp - enable/disable EEPROM write protection
506 * @adapter: the adapter
507 * @enable: whether to enable or disable write protection
508 *
509 * Enables or disables write protection on the serial EEPROM.
510 */
511int t4_seeprom_wp(struct adapter *adapter, bool enable)
512{
513 unsigned int v = enable ? 0xc : 0;
514 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
515 return ret < 0 ? ret : 0;
516}
517
518/**
519 * get_vpd_params - read VPD parameters from VPD EEPROM
520 * @adapter: adapter to read
521 * @p: where to store the parameters
522 *
523 * Reads card parameters stored in VPD EEPROM.
524 */
636f9d37 525int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
56d36be4 526{
636f9d37 527 u32 cclk_param, cclk_val;
47ce9c48 528 int i, ret, addr;
ec164008 529 int ec, sn;
8c357ebd 530 u8 *vpd, csum;
23d88e1d 531 unsigned int vpdr_len, kw_offset, id_len;
56d36be4 532
8c357ebd
VP
533 vpd = vmalloc(VPD_LEN);
534 if (!vpd)
535 return -ENOMEM;
536
47ce9c48
SR
537 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
538 if (ret < 0)
539 goto out;
540 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
541
542 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
56d36be4 543 if (ret < 0)
8c357ebd 544 goto out;
56d36be4 545
23d88e1d
DM
546 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
547 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
8c357ebd
VP
548 ret = -EINVAL;
549 goto out;
23d88e1d
DM
550 }
551
552 id_len = pci_vpd_lrdt_size(vpd);
553 if (id_len > ID_LEN)
554 id_len = ID_LEN;
555
556 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
557 if (i < 0) {
558 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
8c357ebd
VP
559 ret = -EINVAL;
560 goto out;
23d88e1d
DM
561 }
562
563 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
564 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
565 if (vpdr_len + kw_offset > VPD_LEN) {
226ec5fd 566 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
8c357ebd
VP
567 ret = -EINVAL;
568 goto out;
226ec5fd
DM
569 }
570
571#define FIND_VPD_KW(var, name) do { \
23d88e1d 572 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
226ec5fd
DM
573 if (var < 0) { \
574 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
8c357ebd
VP
575 ret = -EINVAL; \
576 goto out; \
226ec5fd
DM
577 } \
578 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
579} while (0)
580
581 FIND_VPD_KW(i, "RV");
582 for (csum = 0; i >= 0; i--)
583 csum += vpd[i];
56d36be4
DM
584
585 if (csum) {
586 dev_err(adapter->pdev_dev,
587 "corrupted VPD EEPROM, actual csum %u\n", csum);
8c357ebd
VP
588 ret = -EINVAL;
589 goto out;
56d36be4
DM
590 }
591
226ec5fd
DM
592 FIND_VPD_KW(ec, "EC");
593 FIND_VPD_KW(sn, "SN");
226ec5fd
DM
594#undef FIND_VPD_KW
595
23d88e1d 596 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
56d36be4 597 strim(p->id);
226ec5fd 598 memcpy(p->ec, vpd + ec, EC_LEN);
56d36be4 599 strim(p->ec);
226ec5fd
DM
600 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
601 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
56d36be4 602 strim(p->sn);
636f9d37
VP
603
604 /*
605 * Ask firmware for the Core Clock since it knows how to translate the
606 * Reference Clock ('V2') VPD field into a Core Clock value ...
607 */
608 cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
609 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
610 ret = t4_query_params(adapter, adapter->mbox, 0, 0,
611 1, &cclk_param, &cclk_val);
8c357ebd
VP
612
613out:
614 vfree(vpd);
636f9d37
VP
615 if (ret)
616 return ret;
617 p->cclk = cclk_val;
618
56d36be4
DM
619 return 0;
620}
621
622/* serial flash and firmware constants */
623enum {
624 SF_ATTEMPTS = 10, /* max retries for SF operations */
625
626 /* flash command opcodes */
627 SF_PROG_PAGE = 2, /* program page */
628 SF_WR_DISABLE = 4, /* disable writes */
629 SF_RD_STATUS = 5, /* read status register */
630 SF_WR_ENABLE = 6, /* enable writes */
631 SF_RD_DATA_FAST = 0xb, /* read flash */
900a6596 632 SF_RD_ID = 0x9f, /* read ID */
56d36be4
DM
633 SF_ERASE_SECTOR = 0xd8, /* erase sector */
634
900a6596 635 FW_MAX_SIZE = 512 * 1024,
56d36be4
DM
636};
637
638/**
639 * sf1_read - read data from the serial flash
640 * @adapter: the adapter
641 * @byte_cnt: number of bytes to read
642 * @cont: whether another operation will be chained
643 * @lock: whether to lock SF for PL access only
644 * @valp: where to store the read data
645 *
646 * Reads up to 4 bytes of data from the serial flash. The location of
647 * the read needs to be specified prior to calling this by issuing the
648 * appropriate commands to the serial flash.
649 */
650static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
651 int lock, u32 *valp)
652{
653 int ret;
654
655 if (!byte_cnt || byte_cnt > 4)
656 return -EINVAL;
ce91a923 657 if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
56d36be4
DM
658 return -EBUSY;
659 cont = cont ? SF_CONT : 0;
660 lock = lock ? SF_LOCK : 0;
661 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
ce91a923 662 ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
56d36be4
DM
663 if (!ret)
664 *valp = t4_read_reg(adapter, SF_DATA);
665 return ret;
666}
667
668/**
669 * sf1_write - write data to the serial flash
670 * @adapter: the adapter
671 * @byte_cnt: number of bytes to write
672 * @cont: whether another operation will be chained
673 * @lock: whether to lock SF for PL access only
674 * @val: value to write
675 *
676 * Writes up to 4 bytes of data to the serial flash. The location of
677 * the write needs to be specified prior to calling this by issuing the
678 * appropriate commands to the serial flash.
679 */
680static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
681 int lock, u32 val)
682{
683 if (!byte_cnt || byte_cnt > 4)
684 return -EINVAL;
ce91a923 685 if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
56d36be4
DM
686 return -EBUSY;
687 cont = cont ? SF_CONT : 0;
688 lock = lock ? SF_LOCK : 0;
689 t4_write_reg(adapter, SF_DATA, val);
690 t4_write_reg(adapter, SF_OP, lock |
691 cont | BYTECNT(byte_cnt - 1) | OP_WR);
ce91a923 692 return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
56d36be4
DM
693}
694
695/**
696 * flash_wait_op - wait for a flash operation to complete
697 * @adapter: the adapter
698 * @attempts: max number of polls of the status register
699 * @delay: delay between polls in ms
700 *
701 * Wait for a flash operation to complete by polling the status register.
702 */
703static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
704{
705 int ret;
706 u32 status;
707
708 while (1) {
709 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
710 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
711 return ret;
712 if (!(status & 1))
713 return 0;
714 if (--attempts == 0)
715 return -EAGAIN;
716 if (delay)
717 msleep(delay);
718 }
719}
720
721/**
722 * t4_read_flash - read words from serial flash
723 * @adapter: the adapter
724 * @addr: the start address for the read
725 * @nwords: how many 32-bit words to read
726 * @data: where to store the read data
727 * @byte_oriented: whether to store data as bytes or as words
728 *
729 * Read the specified number of 32-bit words from the serial flash.
730 * If @byte_oriented is set the read data is stored as a byte array
731 * (i.e., big-endian), otherwise as 32-bit words in the platform's
732 * natural endianess.
733 */
de498c89
RD
734static int t4_read_flash(struct adapter *adapter, unsigned int addr,
735 unsigned int nwords, u32 *data, int byte_oriented)
56d36be4
DM
736{
737 int ret;
738
900a6596 739 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
56d36be4
DM
740 return -EINVAL;
741
742 addr = swab32(addr) | SF_RD_DATA_FAST;
743
744 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
745 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
746 return ret;
747
748 for ( ; nwords; nwords--, data++) {
749 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
750 if (nwords == 1)
751 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
752 if (ret)
753 return ret;
754 if (byte_oriented)
404d9e3f 755 *data = (__force __u32) (htonl(*data));
56d36be4
DM
756 }
757 return 0;
758}
759
760/**
761 * t4_write_flash - write up to a page of data to the serial flash
762 * @adapter: the adapter
763 * @addr: the start address to write
764 * @n: length of data to write in bytes
765 * @data: the data to write
766 *
767 * Writes up to a page of data (256 bytes) to the serial flash starting
768 * at the given address. All the data must be written to the same page.
769 */
770static int t4_write_flash(struct adapter *adapter, unsigned int addr,
771 unsigned int n, const u8 *data)
772{
773 int ret;
774 u32 buf[64];
775 unsigned int i, c, left, val, offset = addr & 0xff;
776
900a6596 777 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
56d36be4
DM
778 return -EINVAL;
779
780 val = swab32(addr) | SF_PROG_PAGE;
781
782 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
783 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
784 goto unlock;
785
786 for (left = n; left; left -= c) {
787 c = min(left, 4U);
788 for (val = 0, i = 0; i < c; ++i)
789 val = (val << 8) + *data++;
790
791 ret = sf1_write(adapter, c, c != left, 1, val);
792 if (ret)
793 goto unlock;
794 }
900a6596 795 ret = flash_wait_op(adapter, 8, 1);
56d36be4
DM
796 if (ret)
797 goto unlock;
798
799 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
800
801 /* Read the page to verify the write succeeded */
802 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
803 if (ret)
804 return ret;
805
806 if (memcmp(data - n, (u8 *)buf + offset, n)) {
807 dev_err(adapter->pdev_dev,
808 "failed to correctly write the flash page at %#x\n",
809 addr);
810 return -EIO;
811 }
812 return 0;
813
814unlock:
815 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
816 return ret;
817}
818
819/**
820 * get_fw_version - read the firmware version
821 * @adapter: the adapter
822 * @vers: where to place the version
823 *
824 * Reads the FW version from flash.
825 */
826static int get_fw_version(struct adapter *adapter, u32 *vers)
827{
900a6596
DM
828 return t4_read_flash(adapter, adapter->params.sf_fw_start +
829 offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
56d36be4
DM
830}
831
832/**
833 * get_tp_version - read the TP microcode version
834 * @adapter: the adapter
835 * @vers: where to place the version
836 *
837 * Reads the TP microcode version from flash.
838 */
839static int get_tp_version(struct adapter *adapter, u32 *vers)
840{
900a6596
DM
841 return t4_read_flash(adapter, adapter->params.sf_fw_start +
842 offsetof(struct fw_hdr, tp_microcode_ver),
56d36be4
DM
843 1, vers, 0);
844}
845
846/**
847 * t4_check_fw_version - check if the FW is compatible with this driver
848 * @adapter: the adapter
849 *
850 * Checks if an adapter's FW is compatible with the driver. Returns 0
851 * if there's exact match, a negative error if the version could not be
852 * read or there's a major version mismatch, and a positive value if the
853 * expected major version is found but there's a minor version mismatch.
854 */
855int t4_check_fw_version(struct adapter *adapter)
856{
857 u32 api_vers[2];
858 int ret, major, minor, micro;
859
860 ret = get_fw_version(adapter, &adapter->params.fw_vers);
861 if (!ret)
862 ret = get_tp_version(adapter, &adapter->params.tp_vers);
863 if (!ret)
900a6596
DM
864 ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
865 offsetof(struct fw_hdr, intfver_nic),
866 2, api_vers, 1);
56d36be4
DM
867 if (ret)
868 return ret;
869
870 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
871 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
872 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
873 memcpy(adapter->params.api_vers, api_vers,
874 sizeof(adapter->params.api_vers));
875
876 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
877 dev_err(adapter->pdev_dev,
878 "card FW has major version %u, driver wants %u\n",
879 major, FW_VERSION_MAJOR);
880 return -EINVAL;
881 }
882
883 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
884 return 0; /* perfect match */
885
886 /* Minor/micro version mismatch. Report it but often it's OK. */
887 return 1;
888}
889
890/**
891 * t4_flash_erase_sectors - erase a range of flash sectors
892 * @adapter: the adapter
893 * @start: the first sector to erase
894 * @end: the last sector to erase
895 *
896 * Erases the sectors in the given inclusive range.
897 */
898static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
899{
900 int ret = 0;
901
902 while (start <= end) {
903 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
904 (ret = sf1_write(adapter, 4, 0, 1,
905 SF_ERASE_SECTOR | (start << 8))) != 0 ||
900a6596 906 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
56d36be4
DM
907 dev_err(adapter->pdev_dev,
908 "erase of flash sector %d failed, error %d\n",
909 start, ret);
910 break;
911 }
912 start++;
913 }
914 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
915 return ret;
916}
917
636f9d37
VP
918/**
919 * t4_flash_cfg_addr - return the address of the flash configuration file
920 * @adapter: the adapter
921 *
922 * Return the address within the flash where the Firmware Configuration
923 * File is stored.
924 */
925unsigned int t4_flash_cfg_addr(struct adapter *adapter)
926{
927 if (adapter->params.sf_size == 0x100000)
928 return FLASH_FPGA_CFG_START;
929 else
930 return FLASH_CFG_START;
931}
932
933/**
934 * t4_load_cfg - download config file
935 * @adap: the adapter
936 * @cfg_data: the cfg text file to write
937 * @size: text file size
938 *
939 * Write the supplied config text file to the card's serial flash.
940 */
941int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
942{
943 int ret, i, n;
944 unsigned int addr;
945 unsigned int flash_cfg_start_sec;
946 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
947
948 addr = t4_flash_cfg_addr(adap);
949 flash_cfg_start_sec = addr / SF_SEC_SIZE;
950
951 if (size > FLASH_CFG_MAX_SIZE) {
952 dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
953 FLASH_CFG_MAX_SIZE);
954 return -EFBIG;
955 }
956
957 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
958 sf_sec_size);
959 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
960 flash_cfg_start_sec + i - 1);
961 /*
962 * If size == 0 then we're simply erasing the FLASH sectors associated
963 * with the on-adapter Firmware Configuration File.
964 */
965 if (ret || size == 0)
966 goto out;
967
968 /* this will write to the flash up to SF_PAGE_SIZE at a time */
969 for (i = 0; i < size; i += SF_PAGE_SIZE) {
970 if ((size - i) < SF_PAGE_SIZE)
971 n = size - i;
972 else
973 n = SF_PAGE_SIZE;
974 ret = t4_write_flash(adap, addr, n, cfg_data);
975 if (ret)
976 goto out;
977
978 addr += SF_PAGE_SIZE;
979 cfg_data += SF_PAGE_SIZE;
980 }
981
982out:
983 if (ret)
984 dev_err(adap->pdev_dev, "config file %s failed %d\n",
985 (size == 0 ? "clear" : "download"), ret);
986 return ret;
987}
988
56d36be4
DM
989/**
990 * t4_load_fw - download firmware
991 * @adap: the adapter
992 * @fw_data: the firmware image to write
993 * @size: image size
994 *
995 * Write the supplied firmware image to the card's serial flash.
996 */
997int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
998{
999 u32 csum;
1000 int ret, addr;
1001 unsigned int i;
1002 u8 first_page[SF_PAGE_SIZE];
404d9e3f 1003 const __be32 *p = (const __be32 *)fw_data;
56d36be4 1004 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
900a6596
DM
1005 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1006 unsigned int fw_img_start = adap->params.sf_fw_start;
1007 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
56d36be4
DM
1008
1009 if (!size) {
1010 dev_err(adap->pdev_dev, "FW image has no data\n");
1011 return -EINVAL;
1012 }
1013 if (size & 511) {
1014 dev_err(adap->pdev_dev,
1015 "FW image size not multiple of 512 bytes\n");
1016 return -EINVAL;
1017 }
1018 if (ntohs(hdr->len512) * 512 != size) {
1019 dev_err(adap->pdev_dev,
1020 "FW image size differs from size in FW header\n");
1021 return -EINVAL;
1022 }
1023 if (size > FW_MAX_SIZE) {
1024 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
1025 FW_MAX_SIZE);
1026 return -EFBIG;
1027 }
1028
1029 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1030 csum += ntohl(p[i]);
1031
1032 if (csum != 0xffffffff) {
1033 dev_err(adap->pdev_dev,
1034 "corrupted firmware image, checksum %#x\n", csum);
1035 return -EINVAL;
1036 }
1037
900a6596
DM
1038 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
1039 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
56d36be4
DM
1040 if (ret)
1041 goto out;
1042
1043 /*
1044 * We write the correct version at the end so the driver can see a bad
1045 * version if the FW write fails. Start by writing a copy of the
1046 * first page with a bad version.
1047 */
1048 memcpy(first_page, fw_data, SF_PAGE_SIZE);
1049 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
900a6596 1050 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
56d36be4
DM
1051 if (ret)
1052 goto out;
1053
900a6596 1054 addr = fw_img_start;
56d36be4
DM
1055 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1056 addr += SF_PAGE_SIZE;
1057 fw_data += SF_PAGE_SIZE;
1058 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
1059 if (ret)
1060 goto out;
1061 }
1062
1063 ret = t4_write_flash(adap,
900a6596 1064 fw_img_start + offsetof(struct fw_hdr, fw_ver),
56d36be4
DM
1065 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
1066out:
1067 if (ret)
1068 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
1069 ret);
1070 return ret;
1071}
1072
1073#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1074 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
1075
1076/**
1077 * t4_link_start - apply link configuration to MAC/PHY
1078 * @phy: the PHY to setup
1079 * @mac: the MAC to setup
1080 * @lc: the requested link configuration
1081 *
1082 * Set up a port's MAC and PHY according to a desired link configuration.
1083 * - If the PHY can auto-negotiate first decide what to advertise, then
1084 * enable/disable auto-negotiation as desired, and reset.
1085 * - If the PHY does not auto-negotiate just reset it.
1086 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1087 * otherwise do it later based on the outcome of auto-negotiation.
1088 */
1089int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1090 struct link_config *lc)
1091{
1092 struct fw_port_cmd c;
1093 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
1094
1095 lc->link_ok = 0;
1096 if (lc->requested_fc & PAUSE_RX)
1097 fc |= FW_PORT_CAP_FC_RX;
1098 if (lc->requested_fc & PAUSE_TX)
1099 fc |= FW_PORT_CAP_FC_TX;
1100
1101 memset(&c, 0, sizeof(c));
1102 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1103 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1104 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1105 FW_LEN16(c));
1106
1107 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1108 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1109 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1110 } else if (lc->autoneg == AUTONEG_DISABLE) {
1111 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1112 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1113 } else
1114 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1115
1116 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1117}
1118
1119/**
1120 * t4_restart_aneg - restart autonegotiation
1121 * @adap: the adapter
1122 * @mbox: mbox to use for the FW command
1123 * @port: the port id
1124 *
1125 * Restarts autonegotiation for the selected port.
1126 */
1127int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1128{
1129 struct fw_port_cmd c;
1130
1131 memset(&c, 0, sizeof(c));
1132 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1133 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1134 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1135 FW_LEN16(c));
1136 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1137 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1138}
1139
8caa1e84
VP
1140typedef void (*int_handler_t)(struct adapter *adap);
1141
56d36be4
DM
1142struct intr_info {
1143 unsigned int mask; /* bits to check in interrupt status */
1144 const char *msg; /* message to print or NULL */
1145 short stat_idx; /* stat counter to increment or -1 */
1146 unsigned short fatal; /* whether the condition reported is fatal */
8caa1e84 1147 int_handler_t int_handler; /* platform-specific int handler */
56d36be4
DM
1148};
1149
1150/**
1151 * t4_handle_intr_status - table driven interrupt handler
1152 * @adapter: the adapter that generated the interrupt
1153 * @reg: the interrupt status register to process
1154 * @acts: table of interrupt actions
1155 *
1156 * A table driven interrupt handler that applies a set of masks to an
1157 * interrupt status word and performs the corresponding actions if the
25985edc 1158 * interrupts described by the mask have occurred. The actions include
56d36be4
DM
1159 * optionally emitting a warning or alert message. The table is terminated
1160 * by an entry specifying mask 0. Returns the number of fatal interrupt
1161 * conditions.
1162 */
1163static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1164 const struct intr_info *acts)
1165{
1166 int fatal = 0;
1167 unsigned int mask = 0;
1168 unsigned int status = t4_read_reg(adapter, reg);
1169
1170 for ( ; acts->mask; ++acts) {
1171 if (!(status & acts->mask))
1172 continue;
1173 if (acts->fatal) {
1174 fatal++;
1175 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1176 status & acts->mask);
1177 } else if (acts->msg && printk_ratelimit())
1178 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1179 status & acts->mask);
8caa1e84
VP
1180 if (acts->int_handler)
1181 acts->int_handler(adapter);
56d36be4
DM
1182 mask |= acts->mask;
1183 }
1184 status &= mask;
1185 if (status) /* clear processed interrupts */
1186 t4_write_reg(adapter, reg, status);
1187 return fatal;
1188}
1189
1190/*
1191 * Interrupt handler for the PCIE module.
1192 */
1193static void pcie_intr_handler(struct adapter *adapter)
1194{
005b5717 1195 static const struct intr_info sysbus_intr_info[] = {
56d36be4
DM
1196 { RNPP, "RXNP array parity error", -1, 1 },
1197 { RPCP, "RXPC array parity error", -1, 1 },
1198 { RCIP, "RXCIF array parity error", -1, 1 },
1199 { RCCP, "Rx completions control array parity error", -1, 1 },
1200 { RFTP, "RXFT array parity error", -1, 1 },
1201 { 0 }
1202 };
005b5717 1203 static const struct intr_info pcie_port_intr_info[] = {
56d36be4
DM
1204 { TPCP, "TXPC array parity error", -1, 1 },
1205 { TNPP, "TXNP array parity error", -1, 1 },
1206 { TFTP, "TXFT array parity error", -1, 1 },
1207 { TCAP, "TXCA array parity error", -1, 1 },
1208 { TCIP, "TXCIF array parity error", -1, 1 },
1209 { RCAP, "RXCA array parity error", -1, 1 },
1210 { OTDD, "outbound request TLP discarded", -1, 1 },
1211 { RDPE, "Rx data parity error", -1, 1 },
1212 { TDUE, "Tx uncorrectable data error", -1, 1 },
1213 { 0 }
1214 };
005b5717 1215 static const struct intr_info pcie_intr_info[] = {
56d36be4
DM
1216 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1217 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1218 { MSIDATAPERR, "MSI data parity error", -1, 1 },
1219 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1220 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1221 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1222 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1223 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1224 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1225 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1226 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1227 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1228 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1229 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1230 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1231 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1232 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1233 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1234 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1235 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1236 { FIDPERR, "PCI FID parity error", -1, 1 },
1237 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1238 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
1239 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1240 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1241 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
1242 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
1243 { PCIESINT, "PCI core secondary fault", -1, 1 },
1244 { PCIEPINT, "PCI core primary fault", -1, 1 },
1245 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
1246 { 0 }
1247 };
1248
1249 int fat;
1250
1251 fat = t4_handle_intr_status(adapter,
1252 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1253 sysbus_intr_info) +
1254 t4_handle_intr_status(adapter,
1255 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1256 pcie_port_intr_info) +
1257 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
1258 if (fat)
1259 t4_fatal_err(adapter);
1260}
1261
1262/*
1263 * TP interrupt handler.
1264 */
1265static void tp_intr_handler(struct adapter *adapter)
1266{
005b5717 1267 static const struct intr_info tp_intr_info[] = {
56d36be4
DM
1268 { 0x3fffffff, "TP parity error", -1, 1 },
1269 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1270 { 0 }
1271 };
1272
1273 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1274 t4_fatal_err(adapter);
1275}
1276
1277/*
1278 * SGE interrupt handler.
1279 */
1280static void sge_intr_handler(struct adapter *adapter)
1281{
1282 u64 v;
1283
005b5717 1284 static const struct intr_info sge_intr_info[] = {
56d36be4
DM
1285 { ERR_CPL_EXCEED_IQE_SIZE,
1286 "SGE received CPL exceeding IQE size", -1, 1 },
1287 { ERR_INVALID_CIDX_INC,
1288 "SGE GTS CIDX increment too large", -1, 0 },
1289 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
840f3000
VP
1290 { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
1291 { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
1292 { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
56d36be4
DM
1293 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1294 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1295 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1296 0 },
1297 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1298 0 },
1299 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1300 0 },
1301 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1302 0 },
1303 { ERR_ING_CTXT_PRIO,
1304 "SGE too many priority ingress contexts", -1, 0 },
1305 { ERR_EGR_CTXT_PRIO,
1306 "SGE too many priority egress contexts", -1, 0 },
1307 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1308 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1309 { 0 }
1310 };
1311
1312 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
8caa1e84 1313 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
56d36be4
DM
1314 if (v) {
1315 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
8caa1e84 1316 (unsigned long long)v);
56d36be4
DM
1317 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1318 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1319 }
1320
1321 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1322 v != 0)
1323 t4_fatal_err(adapter);
1324}
1325
1326/*
1327 * CIM interrupt handler.
1328 */
1329static void cim_intr_handler(struct adapter *adapter)
1330{
005b5717 1331 static const struct intr_info cim_intr_info[] = {
56d36be4
DM
1332 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1333 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1334 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1335 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1336 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1337 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1338 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1339 { 0 }
1340 };
005b5717 1341 static const struct intr_info cim_upintr_info[] = {
56d36be4
DM
1342 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1343 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1344 { ILLWRINT, "CIM illegal write", -1, 1 },
1345 { ILLRDINT, "CIM illegal read", -1, 1 },
1346 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1347 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1348 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1349 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1350 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1351 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1352 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1353 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1354 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1355 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1356 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1357 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1358 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1359 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1360 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1361 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1362 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1363 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1364 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1365 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1366 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1367 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1368 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1369 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1370 { 0 }
1371 };
1372
1373 int fat;
1374
1375 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1376 cim_intr_info) +
1377 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1378 cim_upintr_info);
1379 if (fat)
1380 t4_fatal_err(adapter);
1381}
1382
1383/*
1384 * ULP RX interrupt handler.
1385 */
1386static void ulprx_intr_handler(struct adapter *adapter)
1387{
005b5717 1388 static const struct intr_info ulprx_intr_info[] = {
91e9a1ec 1389 { 0x1800000, "ULPRX context error", -1, 1 },
56d36be4
DM
1390 { 0x7fffff, "ULPRX parity error", -1, 1 },
1391 { 0 }
1392 };
1393
1394 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1395 t4_fatal_err(adapter);
1396}
1397
1398/*
1399 * ULP TX interrupt handler.
1400 */
1401static void ulptx_intr_handler(struct adapter *adapter)
1402{
005b5717 1403 static const struct intr_info ulptx_intr_info[] = {
56d36be4
DM
1404 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1405 0 },
1406 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1407 0 },
1408 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1409 0 },
1410 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1411 0 },
1412 { 0xfffffff, "ULPTX parity error", -1, 1 },
1413 { 0 }
1414 };
1415
1416 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1417 t4_fatal_err(adapter);
1418}
1419
1420/*
1421 * PM TX interrupt handler.
1422 */
1423static void pmtx_intr_handler(struct adapter *adapter)
1424{
005b5717 1425 static const struct intr_info pmtx_intr_info[] = {
56d36be4
DM
1426 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1427 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1428 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1429 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1430 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1431 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1432 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1433 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1434 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1435 { 0 }
1436 };
1437
1438 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1439 t4_fatal_err(adapter);
1440}
1441
1442/*
1443 * PM RX interrupt handler.
1444 */
1445static void pmrx_intr_handler(struct adapter *adapter)
1446{
005b5717 1447 static const struct intr_info pmrx_intr_info[] = {
56d36be4
DM
1448 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1449 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1450 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1451 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1452 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1453 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1454 { 0 }
1455 };
1456
1457 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1458 t4_fatal_err(adapter);
1459}
1460
1461/*
1462 * CPL switch interrupt handler.
1463 */
1464static void cplsw_intr_handler(struct adapter *adapter)
1465{
005b5717 1466 static const struct intr_info cplsw_intr_info[] = {
56d36be4
DM
1467 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1468 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1469 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1470 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1471 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1472 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1473 { 0 }
1474 };
1475
1476 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1477 t4_fatal_err(adapter);
1478}
1479
1480/*
1481 * LE interrupt handler.
1482 */
1483static void le_intr_handler(struct adapter *adap)
1484{
005b5717 1485 static const struct intr_info le_intr_info[] = {
56d36be4
DM
1486 { LIPMISS, "LE LIP miss", -1, 0 },
1487 { LIP0, "LE 0 LIP error", -1, 0 },
1488 { PARITYERR, "LE parity error", -1, 1 },
1489 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1490 { REQQPARERR, "LE request queue parity error", -1, 1 },
1491 { 0 }
1492 };
1493
1494 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1495 t4_fatal_err(adap);
1496}
1497
1498/*
1499 * MPS interrupt handler.
1500 */
1501static void mps_intr_handler(struct adapter *adapter)
1502{
005b5717 1503 static const struct intr_info mps_rx_intr_info[] = {
56d36be4
DM
1504 { 0xffffff, "MPS Rx parity error", -1, 1 },
1505 { 0 }
1506 };
005b5717 1507 static const struct intr_info mps_tx_intr_info[] = {
56d36be4
DM
1508 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1509 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1510 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1511 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1512 { BUBBLE, "MPS Tx underflow", -1, 1 },
1513 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1514 { FRMERR, "MPS Tx framing error", -1, 1 },
1515 { 0 }
1516 };
005b5717 1517 static const struct intr_info mps_trc_intr_info[] = {
56d36be4
DM
1518 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1519 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1520 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1521 { 0 }
1522 };
005b5717 1523 static const struct intr_info mps_stat_sram_intr_info[] = {
56d36be4
DM
1524 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1525 { 0 }
1526 };
005b5717 1527 static const struct intr_info mps_stat_tx_intr_info[] = {
56d36be4
DM
1528 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1529 { 0 }
1530 };
005b5717 1531 static const struct intr_info mps_stat_rx_intr_info[] = {
56d36be4
DM
1532 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1533 { 0 }
1534 };
005b5717 1535 static const struct intr_info mps_cls_intr_info[] = {
56d36be4
DM
1536 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1537 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1538 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1539 { 0 }
1540 };
1541
1542 int fat;
1543
1544 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1545 mps_rx_intr_info) +
1546 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1547 mps_tx_intr_info) +
1548 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1549 mps_trc_intr_info) +
1550 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1551 mps_stat_sram_intr_info) +
1552 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1553 mps_stat_tx_intr_info) +
1554 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1555 mps_stat_rx_intr_info) +
1556 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1557 mps_cls_intr_info);
1558
1559 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1560 RXINT | TXINT | STATINT);
1561 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1562 if (fat)
1563 t4_fatal_err(adapter);
1564}
1565
1566#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1567
1568/*
1569 * EDC/MC interrupt handler.
1570 */
1571static void mem_intr_handler(struct adapter *adapter, int idx)
1572{
1573 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
1574
1575 unsigned int addr, cnt_addr, v;
1576
1577 if (idx <= MEM_EDC1) {
1578 addr = EDC_REG(EDC_INT_CAUSE, idx);
1579 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1580 } else {
1581 addr = MC_INT_CAUSE;
1582 cnt_addr = MC_ECC_STATUS;
1583 }
1584
1585 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1586 if (v & PERR_INT_CAUSE)
1587 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1588 name[idx]);
1589 if (v & ECC_CE_INT_CAUSE) {
1590 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1591
1592 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1593 if (printk_ratelimit())
1594 dev_warn(adapter->pdev_dev,
1595 "%u %s correctable ECC data error%s\n",
1596 cnt, name[idx], cnt > 1 ? "s" : "");
1597 }
1598 if (v & ECC_UE_INT_CAUSE)
1599 dev_alert(adapter->pdev_dev,
1600 "%s uncorrectable ECC data error\n", name[idx]);
1601
1602 t4_write_reg(adapter, addr, v);
1603 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1604 t4_fatal_err(adapter);
1605}
1606
1607/*
1608 * MA interrupt handler.
1609 */
1610static void ma_intr_handler(struct adapter *adap)
1611{
1612 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1613
1614 if (status & MEM_PERR_INT_CAUSE)
1615 dev_alert(adap->pdev_dev,
1616 "MA parity error, parity status %#x\n",
1617 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1618 if (status & MEM_WRAP_INT_CAUSE) {
1619 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1620 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1621 "client %u to address %#x\n",
1622 MEM_WRAP_CLIENT_NUM_GET(v),
1623 MEM_WRAP_ADDRESS_GET(v) << 4);
1624 }
1625 t4_write_reg(adap, MA_INT_CAUSE, status);
1626 t4_fatal_err(adap);
1627}
1628
1629/*
1630 * SMB interrupt handler.
1631 */
1632static void smb_intr_handler(struct adapter *adap)
1633{
005b5717 1634 static const struct intr_info smb_intr_info[] = {
56d36be4
DM
1635 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1636 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1637 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1638 { 0 }
1639 };
1640
1641 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1642 t4_fatal_err(adap);
1643}
1644
1645/*
1646 * NC-SI interrupt handler.
1647 */
1648static void ncsi_intr_handler(struct adapter *adap)
1649{
005b5717 1650 static const struct intr_info ncsi_intr_info[] = {
56d36be4
DM
1651 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1652 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1653 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1654 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1655 { 0 }
1656 };
1657
1658 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1659 t4_fatal_err(adap);
1660}
1661
1662/*
1663 * XGMAC interrupt handler.
1664 */
1665static void xgmac_intr_handler(struct adapter *adap, int port)
1666{
1667 u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
1668
1669 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1670 if (!v)
1671 return;
1672
1673 if (v & TXFIFO_PRTY_ERR)
1674 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1675 port);
1676 if (v & RXFIFO_PRTY_ERR)
1677 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1678 port);
1679 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1680 t4_fatal_err(adap);
1681}
1682
1683/*
1684 * PL interrupt handler.
1685 */
1686static void pl_intr_handler(struct adapter *adap)
1687{
005b5717 1688 static const struct intr_info pl_intr_info[] = {
56d36be4
DM
1689 { FATALPERR, "T4 fatal parity error", -1, 1 },
1690 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1691 { 0 }
1692 };
1693
1694 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1695 t4_fatal_err(adap);
1696}
1697
63bcceec 1698#define PF_INTR_MASK (PFSW)
56d36be4
DM
1699#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1700 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1701 CPL_SWITCH | SGE | ULP_TX)
1702
1703/**
1704 * t4_slow_intr_handler - control path interrupt handler
1705 * @adapter: the adapter
1706 *
1707 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1708 * The designation 'slow' is because it involves register reads, while
1709 * data interrupts typically don't involve any MMIOs.
1710 */
1711int t4_slow_intr_handler(struct adapter *adapter)
1712{
1713 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1714
1715 if (!(cause & GLBL_INTR_MASK))
1716 return 0;
1717 if (cause & CIM)
1718 cim_intr_handler(adapter);
1719 if (cause & MPS)
1720 mps_intr_handler(adapter);
1721 if (cause & NCSI)
1722 ncsi_intr_handler(adapter);
1723 if (cause & PL)
1724 pl_intr_handler(adapter);
1725 if (cause & SMB)
1726 smb_intr_handler(adapter);
1727 if (cause & XGMAC0)
1728 xgmac_intr_handler(adapter, 0);
1729 if (cause & XGMAC1)
1730 xgmac_intr_handler(adapter, 1);
1731 if (cause & XGMAC_KR0)
1732 xgmac_intr_handler(adapter, 2);
1733 if (cause & XGMAC_KR1)
1734 xgmac_intr_handler(adapter, 3);
1735 if (cause & PCIE)
1736 pcie_intr_handler(adapter);
1737 if (cause & MC)
1738 mem_intr_handler(adapter, MEM_MC);
1739 if (cause & EDC0)
1740 mem_intr_handler(adapter, MEM_EDC0);
1741 if (cause & EDC1)
1742 mem_intr_handler(adapter, MEM_EDC1);
1743 if (cause & LE)
1744 le_intr_handler(adapter);
1745 if (cause & TP)
1746 tp_intr_handler(adapter);
1747 if (cause & MA)
1748 ma_intr_handler(adapter);
1749 if (cause & PM_TX)
1750 pmtx_intr_handler(adapter);
1751 if (cause & PM_RX)
1752 pmrx_intr_handler(adapter);
1753 if (cause & ULP_RX)
1754 ulprx_intr_handler(adapter);
1755 if (cause & CPL_SWITCH)
1756 cplsw_intr_handler(adapter);
1757 if (cause & SGE)
1758 sge_intr_handler(adapter);
1759 if (cause & ULP_TX)
1760 ulptx_intr_handler(adapter);
1761
1762 /* Clear the interrupts just processed for which we are the master. */
1763 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1764 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1765 return 1;
1766}
1767
1768/**
1769 * t4_intr_enable - enable interrupts
1770 * @adapter: the adapter whose interrupts should be enabled
1771 *
1772 * Enable PF-specific interrupts for the calling function and the top-level
1773 * interrupt concentrator for global interrupts. Interrupts are already
1774 * enabled at each module, here we just enable the roots of the interrupt
1775 * hierarchies.
1776 *
1777 * Note: this function should be called only when the driver manages
1778 * non PF-specific interrupts from the various HW modules. Only one PCI
1779 * function at a time should be doing this.
1780 */
1781void t4_intr_enable(struct adapter *adapter)
1782{
1783 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1784
1785 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1786 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1787 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1788 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1789 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1790 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1791 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
840f3000 1792 DBFIFO_HP_INT | DBFIFO_LP_INT |
56d36be4
DM
1793 EGRESS_SIZE_ERR);
1794 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1795 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1796}
1797
1798/**
1799 * t4_intr_disable - disable interrupts
1800 * @adapter: the adapter whose interrupts should be disabled
1801 *
1802 * Disable interrupts. We only disable the top-level interrupt
1803 * concentrators. The caller must be a PCI function managing global
1804 * interrupts.
1805 */
1806void t4_intr_disable(struct adapter *adapter)
1807{
1808 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1809
1810 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1811 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1812}
1813
56d36be4
DM
1814/**
1815 * hash_mac_addr - return the hash value of a MAC address
1816 * @addr: the 48-bit Ethernet MAC address
1817 *
1818 * Hashes a MAC address according to the hash function used by HW inexact
1819 * (hash) address matching.
1820 */
1821static int hash_mac_addr(const u8 *addr)
1822{
1823 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1824 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1825 a ^= b;
1826 a ^= (a >> 12);
1827 a ^= (a >> 6);
1828 return a & 0x3f;
1829}
1830
1831/**
1832 * t4_config_rss_range - configure a portion of the RSS mapping table
1833 * @adapter: the adapter
1834 * @mbox: mbox to use for the FW command
1835 * @viid: virtual interface whose RSS subtable is to be written
1836 * @start: start entry in the table to write
1837 * @n: how many table entries to write
1838 * @rspq: values for the response queue lookup table
1839 * @nrspq: number of values in @rspq
1840 *
1841 * Programs the selected part of the VI's RSS mapping table with the
1842 * provided values. If @nrspq < @n the supplied values are used repeatedly
1843 * until the full table range is populated.
1844 *
1845 * The caller must ensure the values in @rspq are in the range allowed for
1846 * @viid.
1847 */
1848int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1849 int start, int n, const u16 *rspq, unsigned int nrspq)
1850{
1851 int ret;
1852 const u16 *rsp = rspq;
1853 const u16 *rsp_end = rspq + nrspq;
1854 struct fw_rss_ind_tbl_cmd cmd;
1855
1856 memset(&cmd, 0, sizeof(cmd));
1857 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
1858 FW_CMD_REQUEST | FW_CMD_WRITE |
1859 FW_RSS_IND_TBL_CMD_VIID(viid));
1860 cmd.retval_len16 = htonl(FW_LEN16(cmd));
1861
1862 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
1863 while (n > 0) {
1864 int nq = min(n, 32);
1865 __be32 *qp = &cmd.iq0_to_iq2;
1866
1867 cmd.niqid = htons(nq);
1868 cmd.startidx = htons(start);
1869
1870 start += nq;
1871 n -= nq;
1872
1873 while (nq > 0) {
1874 unsigned int v;
1875
1876 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
1877 if (++rsp >= rsp_end)
1878 rsp = rspq;
1879 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
1880 if (++rsp >= rsp_end)
1881 rsp = rspq;
1882 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
1883 if (++rsp >= rsp_end)
1884 rsp = rspq;
1885
1886 *qp++ = htonl(v);
1887 nq -= 3;
1888 }
1889
1890 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
1891 if (ret)
1892 return ret;
1893 }
1894 return 0;
1895}
1896
1897/**
1898 * t4_config_glbl_rss - configure the global RSS mode
1899 * @adapter: the adapter
1900 * @mbox: mbox to use for the FW command
1901 * @mode: global RSS mode
1902 * @flags: mode-specific flags
1903 *
1904 * Sets the global RSS mode.
1905 */
1906int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1907 unsigned int flags)
1908{
1909 struct fw_rss_glb_config_cmd c;
1910
1911 memset(&c, 0, sizeof(c));
1912 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
1913 FW_CMD_REQUEST | FW_CMD_WRITE);
1914 c.retval_len16 = htonl(FW_LEN16(c));
1915 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
1916 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1917 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
1918 c.u.basicvirtual.mode_pkd =
1919 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1920 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
1921 } else
1922 return -EINVAL;
1923 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
1924}
1925
56d36be4
DM
1926/**
1927 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
1928 * @adap: the adapter
1929 * @v4: holds the TCP/IP counter values
1930 * @v6: holds the TCP/IPv6 counter values
1931 *
1932 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
1933 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
1934 */
1935void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1936 struct tp_tcp_stats *v6)
1937{
1938 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
1939
1940#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
1941#define STAT(x) val[STAT_IDX(x)]
1942#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
1943
1944 if (v4) {
1945 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1946 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
1947 v4->tcpOutRsts = STAT(OUT_RST);
1948 v4->tcpInSegs = STAT64(IN_SEG);
1949 v4->tcpOutSegs = STAT64(OUT_SEG);
1950 v4->tcpRetransSegs = STAT64(RXT_SEG);
1951 }
1952 if (v6) {
1953 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1954 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
1955 v6->tcpOutRsts = STAT(OUT_RST);
1956 v6->tcpInSegs = STAT64(IN_SEG);
1957 v6->tcpOutSegs = STAT64(OUT_SEG);
1958 v6->tcpRetransSegs = STAT64(RXT_SEG);
1959 }
1960#undef STAT64
1961#undef STAT
1962#undef STAT_IDX
1963}
1964
56d36be4
DM
1965/**
1966 * t4_read_mtu_tbl - returns the values in the HW path MTU table
1967 * @adap: the adapter
1968 * @mtus: where to store the MTU values
1969 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
1970 *
1971 * Reads the HW path MTU table.
1972 */
1973void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
1974{
1975 u32 v;
1976 int i;
1977
1978 for (i = 0; i < NMTUS; ++i) {
1979 t4_write_reg(adap, TP_MTU_TABLE,
1980 MTUINDEX(0xff) | MTUVALUE(i));
1981 v = t4_read_reg(adap, TP_MTU_TABLE);
1982 mtus[i] = MTUVALUE_GET(v);
1983 if (mtu_log)
1984 mtu_log[i] = MTUWIDTH_GET(v);
1985 }
1986}
1987
636f9d37
VP
1988/**
1989 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
1990 * @adap: the adapter
1991 * @addr: the indirect TP register address
1992 * @mask: specifies the field within the register to modify
1993 * @val: new value for the field
1994 *
1995 * Sets a field of an indirect TP register to the given value.
1996 */
1997void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
1998 unsigned int mask, unsigned int val)
1999{
2000 t4_write_reg(adap, TP_PIO_ADDR, addr);
2001 val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
2002 t4_write_reg(adap, TP_PIO_DATA, val);
2003}
2004
56d36be4
DM
2005/**
2006 * init_cong_ctrl - initialize congestion control parameters
2007 * @a: the alpha values for congestion control
2008 * @b: the beta values for congestion control
2009 *
2010 * Initialize the congestion control parameters.
2011 */
91744948 2012static void init_cong_ctrl(unsigned short *a, unsigned short *b)
56d36be4
DM
2013{
2014 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2015 a[9] = 2;
2016 a[10] = 3;
2017 a[11] = 4;
2018 a[12] = 5;
2019 a[13] = 6;
2020 a[14] = 7;
2021 a[15] = 8;
2022 a[16] = 9;
2023 a[17] = 10;
2024 a[18] = 14;
2025 a[19] = 17;
2026 a[20] = 21;
2027 a[21] = 25;
2028 a[22] = 30;
2029 a[23] = 35;
2030 a[24] = 45;
2031 a[25] = 60;
2032 a[26] = 80;
2033 a[27] = 100;
2034 a[28] = 200;
2035 a[29] = 300;
2036 a[30] = 400;
2037 a[31] = 500;
2038
2039 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2040 b[9] = b[10] = 1;
2041 b[11] = b[12] = 2;
2042 b[13] = b[14] = b[15] = b[16] = 3;
2043 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2044 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2045 b[28] = b[29] = 6;
2046 b[30] = b[31] = 7;
2047}
2048
2049/* The minimum additive increment value for the congestion control table */
2050#define CC_MIN_INCR 2U
2051
2052/**
2053 * t4_load_mtus - write the MTU and congestion control HW tables
2054 * @adap: the adapter
2055 * @mtus: the values for the MTU table
2056 * @alpha: the values for the congestion control alpha parameter
2057 * @beta: the values for the congestion control beta parameter
2058 *
2059 * Write the HW MTU table with the supplied MTUs and the high-speed
2060 * congestion control table with the supplied alpha, beta, and MTUs.
2061 * We write the two tables together because the additive increments
2062 * depend on the MTUs.
2063 */
2064void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2065 const unsigned short *alpha, const unsigned short *beta)
2066{
2067 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2068 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2069 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2070 28672, 40960, 57344, 81920, 114688, 163840, 229376
2071 };
2072
2073 unsigned int i, w;
2074
2075 for (i = 0; i < NMTUS; ++i) {
2076 unsigned int mtu = mtus[i];
2077 unsigned int log2 = fls(mtu);
2078
2079 if (!(mtu & ((1 << log2) >> 2))) /* round */
2080 log2--;
2081 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
2082 MTUWIDTH(log2) | MTUVALUE(mtu));
2083
2084 for (w = 0; w < NCCTRL_WIN; ++w) {
2085 unsigned int inc;
2086
2087 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2088 CC_MIN_INCR);
2089
2090 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
2091 (w << 16) | (beta[w] << 13) | inc);
2092 }
2093 }
2094}
2095
56d36be4
DM
2096/**
2097 * get_mps_bg_map - return the buffer groups associated with a port
2098 * @adap: the adapter
2099 * @idx: the port index
2100 *
2101 * Returns a bitmap indicating which MPS buffer groups are associated
2102 * with the given port. Bit i is set if buffer group i is used by the
2103 * port.
2104 */
2105static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2106{
2107 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2108
2109 if (n == 0)
2110 return idx == 0 ? 0xf : 0;
2111 if (n == 1)
2112 return idx < 2 ? (3 << (2 * idx)) : 0;
2113 return 1 << idx;
2114}
2115
2116/**
2117 * t4_get_port_stats - collect port statistics
2118 * @adap: the adapter
2119 * @idx: the port index
2120 * @p: the stats structure to fill
2121 *
2122 * Collect statistics related to the given port from HW.
2123 */
2124void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2125{
2126 u32 bgmap = get_mps_bg_map(adap, idx);
2127
2128#define GET_STAT(name) \
2129 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
2130#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2131
2132 p->tx_octets = GET_STAT(TX_PORT_BYTES);
2133 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2134 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2135 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2136 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2137 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2138 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2139 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2140 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2141 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2142 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2143 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2144 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2145 p->tx_drop = GET_STAT(TX_PORT_DROP);
2146 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2147 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2148 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2149 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2150 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2151 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2152 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2153 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2154 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2155
2156 p->rx_octets = GET_STAT(RX_PORT_BYTES);
2157 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2158 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2159 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2160 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2161 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2162 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2163 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2164 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2165 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2166 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2167 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2168 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
2169 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
2170 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
2171 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
2172 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2173 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
2174 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
2175 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
2176 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
2177 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
2178 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
2179 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
2180 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
2181 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
2182 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
2183
2184 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2185 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2186 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2187 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2188 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2189 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2190 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2191 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2192
2193#undef GET_STAT
2194#undef GET_STAT_COM
2195}
2196
56d36be4
DM
2197/**
2198 * t4_wol_magic_enable - enable/disable magic packet WoL
2199 * @adap: the adapter
2200 * @port: the physical port index
2201 * @addr: MAC address expected in magic packets, %NULL to disable
2202 *
2203 * Enables/disables magic packet wake-on-LAN for the selected port.
2204 */
2205void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2206 const u8 *addr)
2207{
2208 if (addr) {
2209 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
2210 (addr[2] << 24) | (addr[3] << 16) |
2211 (addr[4] << 8) | addr[5]);
2212 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
2213 (addr[0] << 8) | addr[1]);
2214 }
2215 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
2216 addr ? MAGICEN : 0);
2217}
2218
2219/**
2220 * t4_wol_pat_enable - enable/disable pattern-based WoL
2221 * @adap: the adapter
2222 * @port: the physical port index
2223 * @map: bitmap of which HW pattern filters to set
2224 * @mask0: byte mask for bytes 0-63 of a packet
2225 * @mask1: byte mask for bytes 64-127 of a packet
2226 * @crc: Ethernet CRC for selected bytes
2227 * @enable: enable/disable switch
2228 *
2229 * Sets the pattern filters indicated in @map to mask out the bytes
2230 * specified in @mask0/@mask1 in received packets and compare the CRC of
2231 * the resulting packet against @crc. If @enable is %true pattern-based
2232 * WoL is enabled, otherwise disabled.
2233 */
2234int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2235 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2236{
2237 int i;
2238
2239 if (!enable) {
2240 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
2241 PATEN, 0);
2242 return 0;
2243 }
2244 if (map > 0xff)
2245 return -EINVAL;
2246
2247#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
2248
2249 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2250 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2251 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2252
2253 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2254 if (!(map & 1))
2255 continue;
2256
2257 /* write byte masks */
2258 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2259 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2260 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
ce91a923 2261 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
56d36be4
DM
2262 return -ETIMEDOUT;
2263
2264 /* write CRC */
2265 t4_write_reg(adap, EPIO_REG(DATA0), crc);
2266 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2267 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
ce91a923 2268 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
56d36be4
DM
2269 return -ETIMEDOUT;
2270 }
2271#undef EPIO_REG
2272
2273 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2274 return 0;
2275}
2276
f2b7e78d
VP
2277/* t4_mk_filtdelwr - create a delete filter WR
2278 * @ftid: the filter ID
2279 * @wr: the filter work request to populate
2280 * @qid: ingress queue to receive the delete notification
2281 *
2282 * Creates a filter work request to delete the supplied filter. If @qid is
2283 * negative the delete notification is suppressed.
2284 */
2285void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
2286{
2287 memset(wr, 0, sizeof(*wr));
2288 wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
2289 wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16));
2290 wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
2291 V_FW_FILTER_WR_NOREPLY(qid < 0));
2292 wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
2293 if (qid >= 0)
2294 wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
2295}
2296
56d36be4
DM
2297#define INIT_CMD(var, cmd, rd_wr) do { \
2298 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
2299 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
2300 (var).retval_len16 = htonl(FW_LEN16(var)); \
2301} while (0)
2302
8caa1e84
VP
2303int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2304 u32 addr, u32 val)
2305{
2306 struct fw_ldst_cmd c;
2307
2308 memset(&c, 0, sizeof(c));
636f9d37
VP
2309 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2310 FW_CMD_WRITE |
2311 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
8caa1e84
VP
2312 c.cycles_to_len16 = htonl(FW_LEN16(c));
2313 c.u.addrval.addr = htonl(addr);
2314 c.u.addrval.val = htonl(val);
2315
2316 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2317}
2318
49ce9c2c 2319/**
8caa1e84
VP
2320 * t4_mem_win_read_len - read memory through PCIE memory window
2321 * @adap: the adapter
2322 * @addr: address of first byte requested aligned on 32b.
2323 * @data: len bytes to hold the data read
2324 * @len: amount of data to read from window. Must be <=
2325 * MEMWIN0_APERATURE after adjusting for 16B alignment
2326 * requirements of the the memory window.
2327 *
2328 * Read len bytes of data from MC starting at @addr.
2329 */
2330int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
2331{
2332 int i;
2333 int off;
2334
2335 /*
2336 * Align on a 16B boundary.
2337 */
2338 off = addr & 15;
2339 if ((addr & 3) || (len + off) > MEMWIN0_APERTURE)
2340 return -EINVAL;
2341
840f3000
VP
2342 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, addr & ~15);
2343 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
8caa1e84
VP
2344
2345 for (i = 0; i < len; i += 4)
404d9e3f
VP
2346 *data++ = (__force __be32) t4_read_reg(adap,
2347 (MEMWIN0_BASE + off + i));
8caa1e84
VP
2348
2349 return 0;
2350}
2351
56d36be4
DM
2352/**
2353 * t4_mdio_rd - read a PHY register through MDIO
2354 * @adap: the adapter
2355 * @mbox: mailbox to use for the FW command
2356 * @phy_addr: the PHY address
2357 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2358 * @reg: the register to read
2359 * @valp: where to store the value
2360 *
2361 * Issues a FW command through the given mailbox to read a PHY register.
2362 */
2363int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2364 unsigned int mmd, unsigned int reg, u16 *valp)
2365{
2366 int ret;
2367 struct fw_ldst_cmd c;
2368
2369 memset(&c, 0, sizeof(c));
2370 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2371 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2372 c.cycles_to_len16 = htonl(FW_LEN16(c));
2373 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2374 FW_LDST_CMD_MMD(mmd));
2375 c.u.mdio.raddr = htons(reg);
2376
2377 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2378 if (ret == 0)
2379 *valp = ntohs(c.u.mdio.rval);
2380 return ret;
2381}
2382
2383/**
2384 * t4_mdio_wr - write a PHY register through MDIO
2385 * @adap: the adapter
2386 * @mbox: mailbox to use for the FW command
2387 * @phy_addr: the PHY address
2388 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2389 * @reg: the register to write
2390 * @valp: value to write
2391 *
2392 * Issues a FW command through the given mailbox to write a PHY register.
2393 */
2394int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2395 unsigned int mmd, unsigned int reg, u16 val)
2396{
2397 struct fw_ldst_cmd c;
2398
2399 memset(&c, 0, sizeof(c));
2400 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2401 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2402 c.cycles_to_len16 = htonl(FW_LEN16(c));
2403 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2404 FW_LDST_CMD_MMD(mmd));
2405 c.u.mdio.raddr = htons(reg);
2406 c.u.mdio.rval = htons(val);
2407
2408 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2409}
2410
2411/**
636f9d37
VP
2412 * t4_fw_hello - establish communication with FW
2413 * @adap: the adapter
2414 * @mbox: mailbox to use for the FW command
2415 * @evt_mbox: mailbox to receive async FW events
2416 * @master: specifies the caller's willingness to be the device master
2417 * @state: returns the current device state (if non-NULL)
56d36be4 2418 *
636f9d37
VP
2419 * Issues a command to establish communication with FW. Returns either
2420 * an error (negative integer) or the mailbox of the Master PF.
56d36be4
DM
2421 */
2422int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2423 enum dev_master master, enum dev_state *state)
2424{
2425 int ret;
2426 struct fw_hello_cmd c;
636f9d37
VP
2427 u32 v;
2428 unsigned int master_mbox;
2429 int retries = FW_CMD_HELLO_RETRIES;
56d36be4 2430
636f9d37
VP
2431retry:
2432 memset(&c, 0, sizeof(c));
56d36be4 2433 INIT_CMD(c, HELLO, WRITE);
ce91a923 2434 c.err_to_clearinit = htonl(
56d36be4
DM
2435 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2436 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
636f9d37
VP
2437 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
2438 FW_HELLO_CMD_MBMASTER_MASK) |
2439 FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
2440 FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
2441 FW_HELLO_CMD_CLEARINIT);
56d36be4 2442
636f9d37
VP
2443 /*
2444 * Issue the HELLO command to the firmware. If it's not successful
2445 * but indicates that we got a "busy" or "timeout" condition, retry
2446 * the HELLO until we exhaust our retry limit.
2447 */
56d36be4 2448 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
636f9d37
VP
2449 if (ret < 0) {
2450 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2451 goto retry;
2452 return ret;
2453 }
2454
ce91a923 2455 v = ntohl(c.err_to_clearinit);
636f9d37
VP
2456 master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
2457 if (state) {
2458 if (v & FW_HELLO_CMD_ERR)
56d36be4 2459 *state = DEV_STATE_ERR;
636f9d37
VP
2460 else if (v & FW_HELLO_CMD_INIT)
2461 *state = DEV_STATE_INIT;
56d36be4
DM
2462 else
2463 *state = DEV_STATE_UNINIT;
2464 }
636f9d37
VP
2465
2466 /*
2467 * If we're not the Master PF then we need to wait around for the
2468 * Master PF Driver to finish setting up the adapter.
2469 *
2470 * Note that we also do this wait if we're a non-Master-capable PF and
2471 * there is no current Master PF; a Master PF may show up momentarily
2472 * and we wouldn't want to fail pointlessly. (This can happen when an
2473 * OS loads lots of different drivers rapidly at the same time). In
2474 * this case, the Master PF returned by the firmware will be
2475 * FW_PCIE_FW_MASTER_MASK so the test below will work ...
2476 */
2477 if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 &&
2478 master_mbox != mbox) {
2479 int waiting = FW_CMD_HELLO_TIMEOUT;
2480
2481 /*
2482 * Wait for the firmware to either indicate an error or
2483 * initialized state. If we see either of these we bail out
2484 * and report the issue to the caller. If we exhaust the
2485 * "hello timeout" and we haven't exhausted our retries, try
2486 * again. Otherwise bail with a timeout error.
2487 */
2488 for (;;) {
2489 u32 pcie_fw;
2490
2491 msleep(50);
2492 waiting -= 50;
2493
2494 /*
2495 * If neither Error nor Initialialized are indicated
2496 * by the firmware keep waiting till we exaust our
2497 * timeout ... and then retry if we haven't exhausted
2498 * our retries ...
2499 */
2500 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
2501 if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) {
2502 if (waiting <= 0) {
2503 if (retries-- > 0)
2504 goto retry;
2505
2506 return -ETIMEDOUT;
2507 }
2508 continue;
2509 }
2510
2511 /*
2512 * We either have an Error or Initialized condition
2513 * report errors preferentially.
2514 */
2515 if (state) {
2516 if (pcie_fw & FW_PCIE_FW_ERR)
2517 *state = DEV_STATE_ERR;
2518 else if (pcie_fw & FW_PCIE_FW_INIT)
2519 *state = DEV_STATE_INIT;
2520 }
2521
2522 /*
2523 * If we arrived before a Master PF was selected and
2524 * there's not a valid Master PF, grab its identity
2525 * for our caller.
2526 */
2527 if (master_mbox == FW_PCIE_FW_MASTER_MASK &&
2528 (pcie_fw & FW_PCIE_FW_MASTER_VLD))
2529 master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw);
2530 break;
2531 }
2532 }
2533
2534 return master_mbox;
56d36be4
DM
2535}
2536
2537/**
2538 * t4_fw_bye - end communication with FW
2539 * @adap: the adapter
2540 * @mbox: mailbox to use for the FW command
2541 *
2542 * Issues a command to terminate communication with FW.
2543 */
2544int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2545{
2546 struct fw_bye_cmd c;
2547
0062b15c 2548 memset(&c, 0, sizeof(c));
56d36be4
DM
2549 INIT_CMD(c, BYE, WRITE);
2550 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2551}
2552
2553/**
2554 * t4_init_cmd - ask FW to initialize the device
2555 * @adap: the adapter
2556 * @mbox: mailbox to use for the FW command
2557 *
2558 * Issues a command to FW to partially initialize the device. This
2559 * performs initialization that generally doesn't depend on user input.
2560 */
2561int t4_early_init(struct adapter *adap, unsigned int mbox)
2562{
2563 struct fw_initialize_cmd c;
2564
0062b15c 2565 memset(&c, 0, sizeof(c));
56d36be4
DM
2566 INIT_CMD(c, INITIALIZE, WRITE);
2567 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2568}
2569
2570/**
2571 * t4_fw_reset - issue a reset to FW
2572 * @adap: the adapter
2573 * @mbox: mailbox to use for the FW command
2574 * @reset: specifies the type of reset to perform
2575 *
2576 * Issues a reset command of the specified type to FW.
2577 */
2578int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2579{
2580 struct fw_reset_cmd c;
2581
0062b15c 2582 memset(&c, 0, sizeof(c));
56d36be4
DM
2583 INIT_CMD(c, RESET, WRITE);
2584 c.val = htonl(reset);
2585 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2586}
2587
26f7cbc0
VP
2588/**
2589 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
2590 * @adap: the adapter
2591 * @mbox: mailbox to use for the FW RESET command (if desired)
2592 * @force: force uP into RESET even if FW RESET command fails
2593 *
2594 * Issues a RESET command to firmware (if desired) with a HALT indication
2595 * and then puts the microprocessor into RESET state. The RESET command
2596 * will only be issued if a legitimate mailbox is provided (mbox <=
2597 * FW_PCIE_FW_MASTER_MASK).
2598 *
2599 * This is generally used in order for the host to safely manipulate the
2600 * adapter without fear of conflicting with whatever the firmware might
2601 * be doing. The only way out of this state is to RESTART the firmware
2602 * ...
2603 */
2604int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
2605{
2606 int ret = 0;
2607
2608 /*
2609 * If a legitimate mailbox is provided, issue a RESET command
2610 * with a HALT indication.
2611 */
2612 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2613 struct fw_reset_cmd c;
2614
2615 memset(&c, 0, sizeof(c));
2616 INIT_CMD(c, RESET, WRITE);
2617 c.val = htonl(PIORST | PIORSTMODE);
2618 c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U));
2619 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2620 }
2621
2622 /*
2623 * Normally we won't complete the operation if the firmware RESET
2624 * command fails but if our caller insists we'll go ahead and put the
2625 * uP into RESET. This can be useful if the firmware is hung or even
2626 * missing ... We'll have to take the risk of putting the uP into
2627 * RESET without the cooperation of firmware in that case.
2628 *
2629 * We also force the firmware's HALT flag to be on in case we bypassed
2630 * the firmware RESET command above or we're dealing with old firmware
2631 * which doesn't have the HALT capability. This will serve as a flag
2632 * for the incoming firmware to know that it's coming out of a HALT
2633 * rather than a RESET ... if it's new enough to understand that ...
2634 */
2635 if (ret == 0 || force) {
2636 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
2637 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT,
2638 FW_PCIE_FW_HALT);
2639 }
2640
2641 /*
2642 * And we always return the result of the firmware RESET command
2643 * even when we force the uP into RESET ...
2644 */
2645 return ret;
2646}
2647
2648/**
2649 * t4_fw_restart - restart the firmware by taking the uP out of RESET
2650 * @adap: the adapter
2651 * @reset: if we want to do a RESET to restart things
2652 *
2653 * Restart firmware previously halted by t4_fw_halt(). On successful
2654 * return the previous PF Master remains as the new PF Master and there
2655 * is no need to issue a new HELLO command, etc.
2656 *
2657 * We do this in two ways:
2658 *
2659 * 1. If we're dealing with newer firmware we'll simply want to take
2660 * the chip's microprocessor out of RESET. This will cause the
2661 * firmware to start up from its start vector. And then we'll loop
2662 * until the firmware indicates it's started again (PCIE_FW.HALT
2663 * reset to 0) or we timeout.
2664 *
2665 * 2. If we're dealing with older firmware then we'll need to RESET
2666 * the chip since older firmware won't recognize the PCIE_FW.HALT
2667 * flag and automatically RESET itself on startup.
2668 */
2669int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
2670{
2671 if (reset) {
2672 /*
2673 * Since we're directing the RESET instead of the firmware
2674 * doing it automatically, we need to clear the PCIE_FW.HALT
2675 * bit.
2676 */
2677 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0);
2678
2679 /*
2680 * If we've been given a valid mailbox, first try to get the
2681 * firmware to do the RESET. If that works, great and we can
2682 * return success. Otherwise, if we haven't been given a
2683 * valid mailbox or the RESET command failed, fall back to
2684 * hitting the chip with a hammer.
2685 */
2686 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2687 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
2688 msleep(100);
2689 if (t4_fw_reset(adap, mbox,
2690 PIORST | PIORSTMODE) == 0)
2691 return 0;
2692 }
2693
2694 t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
2695 msleep(2000);
2696 } else {
2697 int ms;
2698
2699 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
2700 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
2701 if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT))
2702 return 0;
2703 msleep(100);
2704 ms += 100;
2705 }
2706 return -ETIMEDOUT;
2707 }
2708 return 0;
2709}
2710
2711/**
2712 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
2713 * @adap: the adapter
2714 * @mbox: mailbox to use for the FW RESET command (if desired)
2715 * @fw_data: the firmware image to write
2716 * @size: image size
2717 * @force: force upgrade even if firmware doesn't cooperate
2718 *
2719 * Perform all of the steps necessary for upgrading an adapter's
2720 * firmware image. Normally this requires the cooperation of the
2721 * existing firmware in order to halt all existing activities
2722 * but if an invalid mailbox token is passed in we skip that step
2723 * (though we'll still put the adapter microprocessor into RESET in
2724 * that case).
2725 *
2726 * On successful return the new firmware will have been loaded and
2727 * the adapter will have been fully RESET losing all previous setup
2728 * state. On unsuccessful return the adapter may be completely hosed ...
2729 * positive errno indicates that the adapter is ~probably~ intact, a
2730 * negative errno indicates that things are looking bad ...
2731 */
2732int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
2733 const u8 *fw_data, unsigned int size, int force)
2734{
2735 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
2736 int reset, ret;
2737
2738 ret = t4_fw_halt(adap, mbox, force);
2739 if (ret < 0 && !force)
2740 return ret;
2741
2742 ret = t4_load_fw(adap, fw_data, size);
2743 if (ret < 0)
2744 return ret;
2745
2746 /*
2747 * Older versions of the firmware don't understand the new
2748 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
2749 * restart. So for newly loaded older firmware we'll have to do the
2750 * RESET for it so it starts up on a clean slate. We can tell if
2751 * the newly loaded firmware will handle this right by checking
2752 * its header flags to see if it advertises the capability.
2753 */
2754 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
2755 return t4_fw_restart(adap, mbox, reset);
2756}
2757
2758
636f9d37
VP
2759/**
2760 * t4_fw_config_file - setup an adapter via a Configuration File
2761 * @adap: the adapter
2762 * @mbox: mailbox to use for the FW command
2763 * @mtype: the memory type where the Configuration File is located
2764 * @maddr: the memory address where the Configuration File is located
2765 * @finiver: return value for CF [fini] version
2766 * @finicsum: return value for CF [fini] checksum
2767 * @cfcsum: return value for CF computed checksum
2768 *
2769 * Issue a command to get the firmware to process the Configuration
2770 * File located at the specified mtype/maddress. If the Configuration
2771 * File is processed successfully and return value pointers are
2772 * provided, the Configuration File "[fini] section version and
2773 * checksum values will be returned along with the computed checksum.
2774 * It's up to the caller to decide how it wants to respond to the
2775 * checksums not matching but it recommended that a prominant warning
2776 * be emitted in order to help people rapidly identify changed or
2777 * corrupted Configuration Files.
2778 *
2779 * Also note that it's possible to modify things like "niccaps",
2780 * "toecaps",etc. between processing the Configuration File and telling
2781 * the firmware to use the new configuration. Callers which want to
2782 * do this will need to "hand-roll" their own CAPS_CONFIGS commands for
2783 * Configuration Files if they want to do this.
2784 */
2785int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
2786 unsigned int mtype, unsigned int maddr,
2787 u32 *finiver, u32 *finicsum, u32 *cfcsum)
2788{
2789 struct fw_caps_config_cmd caps_cmd;
2790 int ret;
2791
2792 /*
2793 * Tell the firmware to process the indicated Configuration File.
2794 * If there are no errors and the caller has provided return value
2795 * pointers for the [fini] section version, checksum and computed
2796 * checksum, pass those back to the caller.
2797 */
2798 memset(&caps_cmd, 0, sizeof(caps_cmd));
2799 caps_cmd.op_to_write =
2800 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2801 FW_CMD_REQUEST |
2802 FW_CMD_READ);
ce91a923 2803 caps_cmd.cfvalid_to_len16 =
636f9d37
VP
2804 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
2805 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2806 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
2807 FW_LEN16(caps_cmd));
2808 ret = t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd);
2809 if (ret < 0)
2810 return ret;
2811
2812 if (finiver)
2813 *finiver = ntohl(caps_cmd.finiver);
2814 if (finicsum)
2815 *finicsum = ntohl(caps_cmd.finicsum);
2816 if (cfcsum)
2817 *cfcsum = ntohl(caps_cmd.cfcsum);
2818
2819 /*
2820 * And now tell the firmware to use the configuration we just loaded.
2821 */
2822 caps_cmd.op_to_write =
2823 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2824 FW_CMD_REQUEST |
2825 FW_CMD_WRITE);
ce91a923 2826 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
2827 return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL);
2828}
2829
2830/**
2831 * t4_fixup_host_params - fix up host-dependent parameters
2832 * @adap: the adapter
2833 * @page_size: the host's Base Page Size
2834 * @cache_line_size: the host's Cache Line Size
2835 *
2836 * Various registers in T4 contain values which are dependent on the
2837 * host's Base Page and Cache Line Sizes. This function will fix all of
2838 * those registers with the appropriate values as passed in ...
2839 */
2840int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
2841 unsigned int cache_line_size)
2842{
2843 unsigned int page_shift = fls(page_size) - 1;
2844 unsigned int sge_hps = page_shift - 10;
2845 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
2846 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
2847 unsigned int fl_align_log = fls(fl_align) - 1;
2848
2849 t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
2850 HOSTPAGESIZEPF0(sge_hps) |
2851 HOSTPAGESIZEPF1(sge_hps) |
2852 HOSTPAGESIZEPF2(sge_hps) |
2853 HOSTPAGESIZEPF3(sge_hps) |
2854 HOSTPAGESIZEPF4(sge_hps) |
2855 HOSTPAGESIZEPF5(sge_hps) |
2856 HOSTPAGESIZEPF6(sge_hps) |
2857 HOSTPAGESIZEPF7(sge_hps));
2858
2859 t4_set_reg_field(adap, SGE_CONTROL,
0dad9e94 2860 INGPADBOUNDARY_MASK |
636f9d37
VP
2861 EGRSTATUSPAGESIZE_MASK,
2862 INGPADBOUNDARY(fl_align_log - 5) |
2863 EGRSTATUSPAGESIZE(stat_len != 64));
2864
2865 /*
2866 * Adjust various SGE Free List Host Buffer Sizes.
2867 *
2868 * This is something of a crock since we're using fixed indices into
2869 * the array which are also known by the sge.c code and the T4
2870 * Firmware Configuration File. We need to come up with a much better
2871 * approach to managing this array. For now, the first four entries
2872 * are:
2873 *
2874 * 0: Host Page Size
2875 * 1: 64KB
2876 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
2877 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
2878 *
2879 * For the single-MTU buffers in unpacked mode we need to include
2880 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
2881 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
2882 * Padding boundry. All of these are accommodated in the Factory
2883 * Default Firmware Configuration File but we need to adjust it for
2884 * this host's cache line size.
2885 */
2886 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
2887 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
2888 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
2889 & ~(fl_align-1));
2890 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
2891 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
2892 & ~(fl_align-1));
2893
2894 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
2895
2896 return 0;
2897}
2898
2899/**
2900 * t4_fw_initialize - ask FW to initialize the device
2901 * @adap: the adapter
2902 * @mbox: mailbox to use for the FW command
2903 *
2904 * Issues a command to FW to partially initialize the device. This
2905 * performs initialization that generally doesn't depend on user input.
2906 */
2907int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
2908{
2909 struct fw_initialize_cmd c;
2910
2911 memset(&c, 0, sizeof(c));
2912 INIT_CMD(c, INITIALIZE, WRITE);
2913 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2914}
2915
56d36be4
DM
2916/**
2917 * t4_query_params - query FW or device parameters
2918 * @adap: the adapter
2919 * @mbox: mailbox to use for the FW command
2920 * @pf: the PF
2921 * @vf: the VF
2922 * @nparams: the number of parameters
2923 * @params: the parameter names
2924 * @val: the parameter values
2925 *
2926 * Reads the value of FW or device parameters. Up to 7 parameters can be
2927 * queried at once.
2928 */
2929int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2930 unsigned int vf, unsigned int nparams, const u32 *params,
2931 u32 *val)
2932{
2933 int i, ret;
2934 struct fw_params_cmd c;
2935 __be32 *p = &c.param[0].mnem;
2936
2937 if (nparams > 7)
2938 return -EINVAL;
2939
2940 memset(&c, 0, sizeof(c));
2941 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2942 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
2943 FW_PARAMS_CMD_VFN(vf));
2944 c.retval_len16 = htonl(FW_LEN16(c));
2945 for (i = 0; i < nparams; i++, p += 2)
2946 *p = htonl(*params++);
2947
2948 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2949 if (ret == 0)
2950 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
2951 *val++ = ntohl(*p);
2952 return ret;
2953}
2954
2955/**
2956 * t4_set_params - sets FW or device parameters
2957 * @adap: the adapter
2958 * @mbox: mailbox to use for the FW command
2959 * @pf: the PF
2960 * @vf: the VF
2961 * @nparams: the number of parameters
2962 * @params: the parameter names
2963 * @val: the parameter values
2964 *
2965 * Sets the value of FW or device parameters. Up to 7 parameters can be
2966 * specified at once.
2967 */
2968int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2969 unsigned int vf, unsigned int nparams, const u32 *params,
2970 const u32 *val)
2971{
2972 struct fw_params_cmd c;
2973 __be32 *p = &c.param[0].mnem;
2974
2975 if (nparams > 7)
2976 return -EINVAL;
2977
2978 memset(&c, 0, sizeof(c));
2979 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2980 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
2981 FW_PARAMS_CMD_VFN(vf));
2982 c.retval_len16 = htonl(FW_LEN16(c));
2983 while (nparams--) {
2984 *p++ = htonl(*params++);
2985 *p++ = htonl(*val++);
2986 }
2987
2988 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2989}
2990
2991/**
2992 * t4_cfg_pfvf - configure PF/VF resource limits
2993 * @adap: the adapter
2994 * @mbox: mailbox to use for the FW command
2995 * @pf: the PF being configured
2996 * @vf: the VF being configured
2997 * @txq: the max number of egress queues
2998 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
2999 * @rxqi: the max number of interrupt-capable ingress queues
3000 * @rxq: the max number of interruptless ingress queues
3001 * @tc: the PCI traffic class
3002 * @vi: the max number of virtual interfaces
3003 * @cmask: the channel access rights mask for the PF/VF
3004 * @pmask: the port access rights mask for the PF/VF
3005 * @nexact: the maximum number of exact MPS filters
3006 * @rcaps: read capabilities
3007 * @wxcaps: write/execute capabilities
3008 *
3009 * Configures resource limits and capabilities for a physical or virtual
3010 * function.
3011 */
3012int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
3013 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
3014 unsigned int rxqi, unsigned int rxq, unsigned int tc,
3015 unsigned int vi, unsigned int cmask, unsigned int pmask,
3016 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
3017{
3018 struct fw_pfvf_cmd c;
3019
3020 memset(&c, 0, sizeof(c));
3021 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
3022 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
3023 FW_PFVF_CMD_VFN(vf));
3024 c.retval_len16 = htonl(FW_LEN16(c));
3025 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
3026 FW_PFVF_CMD_NIQ(rxq));
81323b74 3027 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
56d36be4
DM
3028 FW_PFVF_CMD_PMASK(pmask) |
3029 FW_PFVF_CMD_NEQ(txq));
3030 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
3031 FW_PFVF_CMD_NEXACTF(nexact));
3032 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
3033 FW_PFVF_CMD_WX_CAPS(wxcaps) |
3034 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
3035 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3036}
3037
3038/**
3039 * t4_alloc_vi - allocate a virtual interface
3040 * @adap: the adapter
3041 * @mbox: mailbox to use for the FW command
3042 * @port: physical port associated with the VI
3043 * @pf: the PF owning the VI
3044 * @vf: the VF owning the VI
3045 * @nmac: number of MAC addresses needed (1 to 5)
3046 * @mac: the MAC addresses of the VI
3047 * @rss_size: size of RSS table slice associated with this VI
3048 *
3049 * Allocates a virtual interface for the given physical port. If @mac is
3050 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
3051 * @mac should be large enough to hold @nmac Ethernet addresses, they are
3052 * stored consecutively so the space needed is @nmac * 6 bytes.
3053 * Returns a negative error number or the non-negative VI id.
3054 */
3055int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
3056 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
3057 unsigned int *rss_size)
3058{
3059 int ret;
3060 struct fw_vi_cmd c;
3061
3062 memset(&c, 0, sizeof(c));
3063 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
3064 FW_CMD_WRITE | FW_CMD_EXEC |
3065 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
3066 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
3067 c.portid_pkd = FW_VI_CMD_PORTID(port);
3068 c.nmac = nmac - 1;
3069
3070 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3071 if (ret)
3072 return ret;
3073
3074 if (mac) {
3075 memcpy(mac, c.mac, sizeof(c.mac));
3076 switch (nmac) {
3077 case 5:
3078 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
3079 case 4:
3080 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
3081 case 3:
3082 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
3083 case 2:
3084 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
3085 }
3086 }
3087 if (rss_size)
3088 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
a0881cab 3089 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
56d36be4
DM
3090}
3091
56d36be4
DM
3092/**
3093 * t4_set_rxmode - set Rx properties of a virtual interface
3094 * @adap: the adapter
3095 * @mbox: mailbox to use for the FW command
3096 * @viid: the VI id
3097 * @mtu: the new MTU or -1
3098 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
3099 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
3100 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
f8f5aafa 3101 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
56d36be4
DM
3102 * @sleep_ok: if true we may sleep while awaiting command completion
3103 *
3104 * Sets Rx properties of a virtual interface.
3105 */
3106int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
f8f5aafa
DM
3107 int mtu, int promisc, int all_multi, int bcast, int vlanex,
3108 bool sleep_ok)
56d36be4
DM
3109{
3110 struct fw_vi_rxmode_cmd c;
3111
3112 /* convert to FW values */
3113 if (mtu < 0)
3114 mtu = FW_RXMODE_MTU_NO_CHG;
3115 if (promisc < 0)
3116 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
3117 if (all_multi < 0)
3118 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
3119 if (bcast < 0)
3120 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
f8f5aafa
DM
3121 if (vlanex < 0)
3122 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
56d36be4
DM
3123
3124 memset(&c, 0, sizeof(c));
3125 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
3126 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
3127 c.retval_len16 = htonl(FW_LEN16(c));
f8f5aafa
DM
3128 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
3129 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
3130 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
3131 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
3132 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
56d36be4
DM
3133 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3134}
3135
3136/**
3137 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
3138 * @adap: the adapter
3139 * @mbox: mailbox to use for the FW command
3140 * @viid: the VI id
3141 * @free: if true any existing filters for this VI id are first removed
3142 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
3143 * @addr: the MAC address(es)
3144 * @idx: where to store the index of each allocated filter
3145 * @hash: pointer to hash address filter bitmap
3146 * @sleep_ok: call is allowed to sleep
3147 *
3148 * Allocates an exact-match filter for each of the supplied addresses and
3149 * sets it to the corresponding address. If @idx is not %NULL it should
3150 * have at least @naddr entries, each of which will be set to the index of
3151 * the filter allocated for the corresponding MAC address. If a filter
3152 * could not be allocated for an address its index is set to 0xffff.
3153 * If @hash is not %NULL addresses that fail to allocate an exact filter
3154 * are hashed and update the hash filter bitmap pointed at by @hash.
3155 *
3156 * Returns a negative error number or the number of filters allocated.
3157 */
3158int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
3159 unsigned int viid, bool free, unsigned int naddr,
3160 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
3161{
3162 int i, ret;
3163 struct fw_vi_mac_cmd c;
3164 struct fw_vi_mac_exact *p;
3165
3166 if (naddr > 7)
3167 return -EINVAL;
3168
3169 memset(&c, 0, sizeof(c));
3170 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3171 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
3172 FW_VI_MAC_CMD_VIID(viid));
3173 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
3174 FW_CMD_LEN16((naddr + 2) / 2));
3175
3176 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3177 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3178 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
3179 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
3180 }
3181
3182 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
3183 if (ret)
3184 return ret;
3185
3186 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3187 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3188
3189 if (idx)
3190 idx[i] = index >= NEXACT_MAC ? 0xffff : index;
3191 if (index < NEXACT_MAC)
3192 ret++;
3193 else if (hash)
ce9aeb58 3194 *hash |= (1ULL << hash_mac_addr(addr[i]));
56d36be4
DM
3195 }
3196 return ret;
3197}
3198
3199/**
3200 * t4_change_mac - modifies the exact-match filter for a MAC address
3201 * @adap: the adapter
3202 * @mbox: mailbox to use for the FW command
3203 * @viid: the VI id
3204 * @idx: index of existing filter for old value of MAC address, or -1
3205 * @addr: the new MAC address value
3206 * @persist: whether a new MAC allocation should be persistent
3207 * @add_smt: if true also add the address to the HW SMT
3208 *
3209 * Modifies an exact-match filter and sets it to the new MAC address.
3210 * Note that in general it is not possible to modify the value of a given
3211 * filter so the generic way to modify an address filter is to free the one
3212 * being used by the old address value and allocate a new filter for the
3213 * new address value. @idx can be -1 if the address is a new addition.
3214 *
3215 * Returns a negative error number or the index of the filter with the new
3216 * MAC value.
3217 */
3218int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3219 int idx, const u8 *addr, bool persist, bool add_smt)
3220{
3221 int ret, mode;
3222 struct fw_vi_mac_cmd c;
3223 struct fw_vi_mac_exact *p = c.u.exact;
3224
3225 if (idx < 0) /* new allocation */
3226 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
3227 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
3228
3229 memset(&c, 0, sizeof(c));
3230 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3231 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
3232 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
3233 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3234 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
3235 FW_VI_MAC_CMD_IDX(idx));
3236 memcpy(p->macaddr, addr, sizeof(p->macaddr));
3237
3238 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3239 if (ret == 0) {
3240 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3241 if (ret >= NEXACT_MAC)
3242 ret = -ENOMEM;
3243 }
3244 return ret;
3245}
3246
3247/**
3248 * t4_set_addr_hash - program the MAC inexact-match hash filter
3249 * @adap: the adapter
3250 * @mbox: mailbox to use for the FW command
3251 * @viid: the VI id
3252 * @ucast: whether the hash filter should also match unicast addresses
3253 * @vec: the value to be written to the hash filter
3254 * @sleep_ok: call is allowed to sleep
3255 *
3256 * Sets the 64-bit inexact-match hash filter for a virtual interface.
3257 */
3258int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
3259 bool ucast, u64 vec, bool sleep_ok)
3260{
3261 struct fw_vi_mac_cmd c;
3262
3263 memset(&c, 0, sizeof(c));
3264 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3265 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
3266 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
3267 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
3268 FW_CMD_LEN16(1));
3269 c.u.hash.hashvec = cpu_to_be64(vec);
3270 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3271}
3272
3273/**
3274 * t4_enable_vi - enable/disable a virtual interface
3275 * @adap: the adapter
3276 * @mbox: mailbox to use for the FW command
3277 * @viid: the VI id
3278 * @rx_en: 1=enable Rx, 0=disable Rx
3279 * @tx_en: 1=enable Tx, 0=disable Tx
3280 *
3281 * Enables/disables a virtual interface.
3282 */
3283int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
3284 bool rx_en, bool tx_en)
3285{
3286 struct fw_vi_enable_cmd c;
3287
3288 memset(&c, 0, sizeof(c));
3289 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3290 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3291 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
3292 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
3293 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3294}
3295
3296/**
3297 * t4_identify_port - identify a VI's port by blinking its LED
3298 * @adap: the adapter
3299 * @mbox: mailbox to use for the FW command
3300 * @viid: the VI id
3301 * @nblinks: how many times to blink LED at 2.5 Hz
3302 *
3303 * Identifies a VI's port by blinking its LED.
3304 */
3305int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
3306 unsigned int nblinks)
3307{
3308 struct fw_vi_enable_cmd c;
3309
0062b15c 3310 memset(&c, 0, sizeof(c));
56d36be4
DM
3311 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3312 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3313 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
3314 c.blinkdur = htons(nblinks);
3315 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
56d36be4
DM
3316}
3317
3318/**
3319 * t4_iq_free - free an ingress queue and its FLs
3320 * @adap: the adapter
3321 * @mbox: mailbox to use for the FW command
3322 * @pf: the PF owning the queues
3323 * @vf: the VF owning the queues
3324 * @iqtype: the ingress queue type
3325 * @iqid: ingress queue id
3326 * @fl0id: FL0 queue id or 0xffff if no attached FL0
3327 * @fl1id: FL1 queue id or 0xffff if no attached FL1
3328 *
3329 * Frees an ingress queue and its associated FLs, if any.
3330 */
3331int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3332 unsigned int vf, unsigned int iqtype, unsigned int iqid,
3333 unsigned int fl0id, unsigned int fl1id)
3334{
3335 struct fw_iq_cmd c;
3336
3337 memset(&c, 0, sizeof(c));
3338 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
3339 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
3340 FW_IQ_CMD_VFN(vf));
3341 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
3342 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
3343 c.iqid = htons(iqid);
3344 c.fl0id = htons(fl0id);
3345 c.fl1id = htons(fl1id);
3346 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3347}
3348
3349/**
3350 * t4_eth_eq_free - free an Ethernet egress queue
3351 * @adap: the adapter
3352 * @mbox: mailbox to use for the FW command
3353 * @pf: the PF owning the queue
3354 * @vf: the VF owning the queue
3355 * @eqid: egress queue id
3356 *
3357 * Frees an Ethernet egress queue.
3358 */
3359int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3360 unsigned int vf, unsigned int eqid)
3361{
3362 struct fw_eq_eth_cmd c;
3363
3364 memset(&c, 0, sizeof(c));
3365 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
3366 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
3367 FW_EQ_ETH_CMD_VFN(vf));
3368 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
3369 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
3370 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3371}
3372
3373/**
3374 * t4_ctrl_eq_free - free a control egress queue
3375 * @adap: the adapter
3376 * @mbox: mailbox to use for the FW command
3377 * @pf: the PF owning the queue
3378 * @vf: the VF owning the queue
3379 * @eqid: egress queue id
3380 *
3381 * Frees a control egress queue.
3382 */
3383int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3384 unsigned int vf, unsigned int eqid)
3385{
3386 struct fw_eq_ctrl_cmd c;
3387
3388 memset(&c, 0, sizeof(c));
3389 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
3390 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
3391 FW_EQ_CTRL_CMD_VFN(vf));
3392 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
3393 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
3394 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3395}
3396
3397/**
3398 * t4_ofld_eq_free - free an offload egress queue
3399 * @adap: the adapter
3400 * @mbox: mailbox to use for the FW command
3401 * @pf: the PF owning the queue
3402 * @vf: the VF owning the queue
3403 * @eqid: egress queue id
3404 *
3405 * Frees a control egress queue.
3406 */
3407int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3408 unsigned int vf, unsigned int eqid)
3409{
3410 struct fw_eq_ofld_cmd c;
3411
3412 memset(&c, 0, sizeof(c));
3413 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
3414 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
3415 FW_EQ_OFLD_CMD_VFN(vf));
3416 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
3417 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
3418 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3419}
3420
3421/**
3422 * t4_handle_fw_rpl - process a FW reply message
3423 * @adap: the adapter
3424 * @rpl: start of the FW message
3425 *
3426 * Processes a FW message, such as link state change messages.
3427 */
3428int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3429{
3430 u8 opcode = *(const u8 *)rpl;
3431
3432 if (opcode == FW_PORT_CMD) { /* link/module state change message */
3433 int speed = 0, fc = 0;
3434 const struct fw_port_cmd *p = (void *)rpl;
3435 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
3436 int port = adap->chan_map[chan];
3437 struct port_info *pi = adap2pinfo(adap, port);
3438 struct link_config *lc = &pi->link_cfg;
3439 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
3440 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
3441 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
3442
3443 if (stat & FW_PORT_CMD_RXPAUSE)
3444 fc |= PAUSE_RX;
3445 if (stat & FW_PORT_CMD_TXPAUSE)
3446 fc |= PAUSE_TX;
3447 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
3448 speed = SPEED_100;
3449 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
3450 speed = SPEED_1000;
3451 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
3452 speed = SPEED_10000;
3453
3454 if (link_ok != lc->link_ok || speed != lc->speed ||
3455 fc != lc->fc) { /* something changed */
3456 lc->link_ok = link_ok;
3457 lc->speed = speed;
3458 lc->fc = fc;
3459 t4_os_link_changed(adap, port, link_ok);
3460 }
3461 if (mod != pi->mod_type) {
3462 pi->mod_type = mod;
3463 t4_os_portmod_changed(adap, port);
3464 }
3465 }
3466 return 0;
3467}
3468
1dd06ae8 3469static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
56d36be4
DM
3470{
3471 u16 val;
56d36be4 3472
e5c8ae5f
JL
3473 if (pci_is_pcie(adapter->pdev)) {
3474 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
56d36be4
DM
3475 p->speed = val & PCI_EXP_LNKSTA_CLS;
3476 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3477 }
3478}
3479
3480/**
3481 * init_link_config - initialize a link's SW state
3482 * @lc: structure holding the link state
3483 * @caps: link capabilities
3484 *
3485 * Initializes the SW state maintained for each link, including the link's
3486 * capabilities and default speed/flow-control/autonegotiation settings.
3487 */
1dd06ae8 3488static void init_link_config(struct link_config *lc, unsigned int caps)
56d36be4
DM
3489{
3490 lc->supported = caps;
3491 lc->requested_speed = 0;
3492 lc->speed = 0;
3493 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3494 if (lc->supported & FW_PORT_CAP_ANEG) {
3495 lc->advertising = lc->supported & ADVERT_MASK;
3496 lc->autoneg = AUTONEG_ENABLE;
3497 lc->requested_fc |= PAUSE_AUTONEG;
3498 } else {
3499 lc->advertising = 0;
3500 lc->autoneg = AUTONEG_DISABLE;
3501 }
3502}
3503
204dc3c0 3504int t4_wait_dev_ready(struct adapter *adap)
56d36be4
DM
3505{
3506 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
3507 return 0;
3508 msleep(500);
3509 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
3510}
3511
91744948 3512static int get_flash_params(struct adapter *adap)
900a6596
DM
3513{
3514 int ret;
3515 u32 info;
3516
3517 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3518 if (!ret)
3519 ret = sf1_read(adap, 3, 0, 1, &info);
3520 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
3521 if (ret)
3522 return ret;
3523
3524 if ((info & 0xff) != 0x20) /* not a Numonix flash */
3525 return -EINVAL;
3526 info >>= 16; /* log2 of size */
3527 if (info >= 0x14 && info < 0x18)
3528 adap->params.sf_nsec = 1 << (info - 16);
3529 else if (info == 0x18)
3530 adap->params.sf_nsec = 64;
3531 else
3532 return -EINVAL;
3533 adap->params.sf_size = 1 << info;
3534 adap->params.sf_fw_start =
3535 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
3536 return 0;
3537}
3538
56d36be4
DM
3539/**
3540 * t4_prep_adapter - prepare SW and HW for operation
3541 * @adapter: the adapter
3542 * @reset: if true perform a HW reset
3543 *
3544 * Initialize adapter SW state for the various HW modules, set initial
3545 * values for some adapter tunables, take PHYs out of reset, and
3546 * initialize the MDIO interface.
3547 */
91744948 3548int t4_prep_adapter(struct adapter *adapter)
56d36be4
DM
3549{
3550 int ret;
3551
204dc3c0 3552 ret = t4_wait_dev_ready(adapter);
56d36be4
DM
3553 if (ret < 0)
3554 return ret;
3555
3556 get_pci_mode(adapter, &adapter->params.pci);
3557 adapter->params.rev = t4_read_reg(adapter, PL_REV);
3558
900a6596
DM
3559 ret = get_flash_params(adapter);
3560 if (ret < 0) {
3561 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
3562 return ret;
3563 }
3564
56d36be4
DM
3565 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3566
3567 /*
3568 * Default port for debugging in case we can't reach FW.
3569 */
3570 adapter->params.nports = 1;
3571 adapter->params.portvec = 1;
636f9d37 3572 adapter->params.vpd.cclk = 50000;
56d36be4
DM
3573 return 0;
3574}
3575
91744948 3576int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
56d36be4
DM
3577{
3578 u8 addr[6];
3579 int ret, i, j = 0;
3580 struct fw_port_cmd c;
f796564a 3581 struct fw_rss_vi_config_cmd rvc;
56d36be4
DM
3582
3583 memset(&c, 0, sizeof(c));
f796564a 3584 memset(&rvc, 0, sizeof(rvc));
56d36be4
DM
3585
3586 for_each_port(adap, i) {
3587 unsigned int rss_size;
3588 struct port_info *p = adap2pinfo(adap, i);
3589
3590 while ((adap->params.portvec & (1 << j)) == 0)
3591 j++;
3592
3593 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
3594 FW_CMD_REQUEST | FW_CMD_READ |
3595 FW_PORT_CMD_PORTID(j));
3596 c.action_to_len16 = htonl(
3597 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
3598 FW_LEN16(c));
3599 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3600 if (ret)
3601 return ret;
3602
3603 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
3604 if (ret < 0)
3605 return ret;
3606
3607 p->viid = ret;
3608 p->tx_chan = j;
3609 p->lport = j;
3610 p->rss_size = rss_size;
3611 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
f21ce1c3 3612 adap->port[i]->dev_id = j;
56d36be4
DM
3613
3614 ret = ntohl(c.u.info.lstatus_to_modtype);
3615 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
3616 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
3617 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
a0881cab 3618 p->mod_type = FW_PORT_MOD_TYPE_NA;
56d36be4 3619
f796564a
DM
3620 rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
3621 FW_CMD_REQUEST | FW_CMD_READ |
3622 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
3623 rvc.retval_len16 = htonl(FW_LEN16(rvc));
3624 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
3625 if (ret)
3626 return ret;
3627 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
3628
56d36be4
DM
3629 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
3630 j++;
3631 }
3632 return 0;
3633}