]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
cxgb3: Use PCI Express Capability accessors
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / chelsio / cxgb4 / t4_hw.c
CommitLineData
56d36be4
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/init.h>
36#include <linux/delay.h>
37#include "cxgb4.h"
38#include "t4_regs.h"
39#include "t4fw_api.h"
40
41/**
42 * t4_wait_op_done_val - wait until an operation is completed
43 * @adapter: the adapter performing the operation
44 * @reg: the register to check for completion
45 * @mask: a single-bit field within @reg that indicates completion
46 * @polarity: the value of the field when the operation is completed
47 * @attempts: number of check iterations
48 * @delay: delay in usecs between iterations
49 * @valp: where to store the value of the register at completion time
50 *
51 * Wait until an operation is completed by checking a bit in a register
52 * up to @attempts times. If @valp is not NULL the value of the register
53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise.
55 */
de498c89
RD
56static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp)
56d36be4
DM
58{
59 while (1) {
60 u32 val = t4_read_reg(adapter, reg);
61
62 if (!!(val & mask) == polarity) {
63 if (valp)
64 *valp = val;
65 return 0;
66 }
67 if (--attempts == 0)
68 return -EAGAIN;
69 if (delay)
70 udelay(delay);
71 }
72}
73
74static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75 int polarity, int attempts, int delay)
76{
77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
78 delay, NULL);
79}
80
81/**
82 * t4_set_reg_field - set a register field to a value
83 * @adapter: the adapter to program
84 * @addr: the register address
85 * @mask: specifies the portion of the register to modify
86 * @val: the new value for the register field
87 *
88 * Sets a register field specified by the supplied mask to the
89 * given value.
90 */
91void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
92 u32 val)
93{
94 u32 v = t4_read_reg(adapter, addr) & ~mask;
95
96 t4_write_reg(adapter, addr, v | val);
97 (void) t4_read_reg(adapter, addr); /* flush */
98}
99
100/**
101 * t4_read_indirect - read indirectly addressed registers
102 * @adap: the adapter
103 * @addr_reg: register holding the indirect address
104 * @data_reg: register holding the value of the indirect register
105 * @vals: where the read register values are stored
106 * @nregs: how many indirect registers to read
107 * @start_idx: index of first indirect register to read
108 *
109 * Reads registers that are accessed indirectly through an address/data
110 * register pair.
111 */
de498c89
RD
112static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 unsigned int data_reg, u32 *vals,
114 unsigned int nregs, unsigned int start_idx)
56d36be4
DM
115{
116 while (nregs--) {
117 t4_write_reg(adap, addr_reg, start_idx);
118 *vals++ = t4_read_reg(adap, data_reg);
119 start_idx++;
120 }
121}
122
56d36be4
DM
123/*
124 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
125 */
126static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
127 u32 mbox_addr)
128{
129 for ( ; nflit; nflit--, mbox_addr += 8)
130 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
131}
132
133/*
134 * Handle a FW assertion reported in a mailbox.
135 */
136static void fw_asrt(struct adapter *adap, u32 mbox_addr)
137{
138 struct fw_debug_cmd asrt;
139
140 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
141 dev_alert(adap->pdev_dev,
142 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
143 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
144 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
145}
146
147static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
148{
149 dev_err(adap->pdev_dev,
150 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
151 (unsigned long long)t4_read_reg64(adap, data_reg),
152 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
153 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
154 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
155 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
156 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
157 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
158 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
159}
160
161/**
162 * t4_wr_mbox_meat - send a command to FW through the given mailbox
163 * @adap: the adapter
164 * @mbox: index of the mailbox to use
165 * @cmd: the command to write
166 * @size: command length in bytes
167 * @rpl: where to optionally store the reply
168 * @sleep_ok: if true we may sleep while awaiting command completion
169 *
170 * Sends the given command to FW through the selected mailbox and waits
171 * for the FW to execute the command. If @rpl is not %NULL it is used to
172 * store the FW's reply to the command. The command and its optional
173 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
174 * to respond. @sleep_ok determines whether we may sleep while awaiting
175 * the response. If sleeping is allowed we use progressive backoff
176 * otherwise we spin.
177 *
178 * The return value is 0 on success or a negative errno on failure. A
179 * failure can happen either because we are not able to execute the
180 * command or FW executes it but signals an error. In the latter case
181 * the return value is the error code indicated by FW (negated).
182 */
183int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
184 void *rpl, bool sleep_ok)
185{
005b5717 186 static const int delay[] = {
56d36be4
DM
187 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
188 };
189
190 u32 v;
191 u64 res;
192 int i, ms, delay_idx;
193 const __be64 *p = cmd;
194 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
195 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
196
197 if ((size & 15) || size > MBOX_LEN)
198 return -EINVAL;
199
204dc3c0
DM
200 /*
201 * If the device is off-line, as in EEH, commands will time out.
202 * Fail them early so we don't waste time waiting.
203 */
204 if (adap->pdev->error_state != pci_channel_io_normal)
205 return -EIO;
206
56d36be4
DM
207 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
208 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
209 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
210
211 if (v != MBOX_OWNER_DRV)
212 return v ? -EBUSY : -ETIMEDOUT;
213
214 for (i = 0; i < size; i += 8)
215 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
216
217 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
218 t4_read_reg(adap, ctl_reg); /* flush write */
219
220 delay_idx = 0;
221 ms = delay[0];
222
223 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
224 if (sleep_ok) {
225 ms = delay[delay_idx]; /* last element may repeat */
226 if (delay_idx < ARRAY_SIZE(delay) - 1)
227 delay_idx++;
228 msleep(ms);
229 } else
230 mdelay(ms);
231
232 v = t4_read_reg(adap, ctl_reg);
233 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
234 if (!(v & MBMSGVALID)) {
235 t4_write_reg(adap, ctl_reg, 0);
236 continue;
237 }
238
239 res = t4_read_reg64(adap, data_reg);
240 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
241 fw_asrt(adap, data_reg);
242 res = FW_CMD_RETVAL(EIO);
243 } else if (rpl)
244 get_mbox_rpl(adap, rpl, size / 8, data_reg);
245
246 if (FW_CMD_RETVAL_GET((int)res))
247 dump_mbox(adap, mbox, data_reg);
248 t4_write_reg(adap, ctl_reg, 0);
249 return -FW_CMD_RETVAL_GET((int)res);
250 }
251 }
252
253 dump_mbox(adap, mbox, data_reg);
254 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
255 *(const u8 *)cmd, mbox);
256 return -ETIMEDOUT;
257}
258
259/**
260 * t4_mc_read - read from MC through backdoor accesses
261 * @adap: the adapter
262 * @addr: address of first byte requested
263 * @data: 64 bytes of data containing the requested address
264 * @ecc: where to store the corresponding 64-bit ECC word
265 *
266 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
267 * that covers the requested address @addr. If @parity is not %NULL it
268 * is assigned the 64-bit ECC word for the read data.
269 */
270int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
271{
272 int i;
273
274 if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
275 return -EBUSY;
276 t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
277 t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
278 t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
279 t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
280 BIST_CMD_GAP(1));
281 i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
282 if (i)
283 return i;
284
285#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
286
287 for (i = 15; i >= 0; i--)
288 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
289 if (ecc)
290 *ecc = t4_read_reg64(adap, MC_DATA(16));
291#undef MC_DATA
292 return 0;
293}
294
295/**
296 * t4_edc_read - read from EDC through backdoor accesses
297 * @adap: the adapter
298 * @idx: which EDC to access
299 * @addr: address of first byte requested
300 * @data: 64 bytes of data containing the requested address
301 * @ecc: where to store the corresponding 64-bit ECC word
302 *
303 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
304 * that covers the requested address @addr. If @parity is not %NULL it
305 * is assigned the 64-bit ECC word for the read data.
306 */
307int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
308{
309 int i;
310
311 idx *= EDC_STRIDE;
312 if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
313 return -EBUSY;
314 t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
315 t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
316 t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
317 t4_write_reg(adap, EDC_BIST_CMD + idx,
318 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
319 i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
320 if (i)
321 return i;
322
323#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
324
325 for (i = 15; i >= 0; i--)
326 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
327 if (ecc)
328 *ecc = t4_read_reg64(adap, EDC_DATA(16));
329#undef EDC_DATA
330 return 0;
331}
332
56d36be4
DM
333#define EEPROM_STAT_ADDR 0x7bfc
334#define VPD_BASE 0
226ec5fd 335#define VPD_LEN 512
56d36be4
DM
336
337/**
338 * t4_seeprom_wp - enable/disable EEPROM write protection
339 * @adapter: the adapter
340 * @enable: whether to enable or disable write protection
341 *
342 * Enables or disables write protection on the serial EEPROM.
343 */
344int t4_seeprom_wp(struct adapter *adapter, bool enable)
345{
346 unsigned int v = enable ? 0xc : 0;
347 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
348 return ret < 0 ? ret : 0;
349}
350
351/**
352 * get_vpd_params - read VPD parameters from VPD EEPROM
353 * @adapter: adapter to read
354 * @p: where to store the parameters
355 *
356 * Reads card parameters stored in VPD EEPROM.
357 */
358static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
359{
226ec5fd 360 int i, ret;
ec164008 361 int ec, sn;
226ec5fd 362 u8 vpd[VPD_LEN], csum;
23d88e1d 363 unsigned int vpdr_len, kw_offset, id_len;
56d36be4 364
226ec5fd 365 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
56d36be4
DM
366 if (ret < 0)
367 return ret;
368
23d88e1d
DM
369 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
370 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
371 return -EINVAL;
372 }
373
374 id_len = pci_vpd_lrdt_size(vpd);
375 if (id_len > ID_LEN)
376 id_len = ID_LEN;
377
378 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
379 if (i < 0) {
380 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
381 return -EINVAL;
382 }
383
384 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
385 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
386 if (vpdr_len + kw_offset > VPD_LEN) {
226ec5fd
DM
387 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
388 return -EINVAL;
389 }
390
391#define FIND_VPD_KW(var, name) do { \
23d88e1d 392 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
226ec5fd
DM
393 if (var < 0) { \
394 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
395 return -EINVAL; \
396 } \
397 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
398} while (0)
399
400 FIND_VPD_KW(i, "RV");
401 for (csum = 0; i >= 0; i--)
402 csum += vpd[i];
56d36be4
DM
403
404 if (csum) {
405 dev_err(adapter->pdev_dev,
406 "corrupted VPD EEPROM, actual csum %u\n", csum);
407 return -EINVAL;
408 }
409
226ec5fd
DM
410 FIND_VPD_KW(ec, "EC");
411 FIND_VPD_KW(sn, "SN");
226ec5fd
DM
412#undef FIND_VPD_KW
413
23d88e1d 414 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
56d36be4 415 strim(p->id);
226ec5fd 416 memcpy(p->ec, vpd + ec, EC_LEN);
56d36be4 417 strim(p->ec);
226ec5fd
DM
418 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
419 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
56d36be4
DM
420 strim(p->sn);
421 return 0;
422}
423
424/* serial flash and firmware constants */
425enum {
426 SF_ATTEMPTS = 10, /* max retries for SF operations */
427
428 /* flash command opcodes */
429 SF_PROG_PAGE = 2, /* program page */
430 SF_WR_DISABLE = 4, /* disable writes */
431 SF_RD_STATUS = 5, /* read status register */
432 SF_WR_ENABLE = 6, /* enable writes */
433 SF_RD_DATA_FAST = 0xb, /* read flash */
900a6596 434 SF_RD_ID = 0x9f, /* read ID */
56d36be4
DM
435 SF_ERASE_SECTOR = 0xd8, /* erase sector */
436
900a6596 437 FW_MAX_SIZE = 512 * 1024,
56d36be4
DM
438};
439
440/**
441 * sf1_read - read data from the serial flash
442 * @adapter: the adapter
443 * @byte_cnt: number of bytes to read
444 * @cont: whether another operation will be chained
445 * @lock: whether to lock SF for PL access only
446 * @valp: where to store the read data
447 *
448 * Reads up to 4 bytes of data from the serial flash. The location of
449 * the read needs to be specified prior to calling this by issuing the
450 * appropriate commands to the serial flash.
451 */
452static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
453 int lock, u32 *valp)
454{
455 int ret;
456
457 if (!byte_cnt || byte_cnt > 4)
458 return -EINVAL;
459 if (t4_read_reg(adapter, SF_OP) & BUSY)
460 return -EBUSY;
461 cont = cont ? SF_CONT : 0;
462 lock = lock ? SF_LOCK : 0;
463 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
464 ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
465 if (!ret)
466 *valp = t4_read_reg(adapter, SF_DATA);
467 return ret;
468}
469
470/**
471 * sf1_write - write data to the serial flash
472 * @adapter: the adapter
473 * @byte_cnt: number of bytes to write
474 * @cont: whether another operation will be chained
475 * @lock: whether to lock SF for PL access only
476 * @val: value to write
477 *
478 * Writes up to 4 bytes of data to the serial flash. The location of
479 * the write needs to be specified prior to calling this by issuing the
480 * appropriate commands to the serial flash.
481 */
482static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
483 int lock, u32 val)
484{
485 if (!byte_cnt || byte_cnt > 4)
486 return -EINVAL;
487 if (t4_read_reg(adapter, SF_OP) & BUSY)
488 return -EBUSY;
489 cont = cont ? SF_CONT : 0;
490 lock = lock ? SF_LOCK : 0;
491 t4_write_reg(adapter, SF_DATA, val);
492 t4_write_reg(adapter, SF_OP, lock |
493 cont | BYTECNT(byte_cnt - 1) | OP_WR);
494 return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
495}
496
497/**
498 * flash_wait_op - wait for a flash operation to complete
499 * @adapter: the adapter
500 * @attempts: max number of polls of the status register
501 * @delay: delay between polls in ms
502 *
503 * Wait for a flash operation to complete by polling the status register.
504 */
505static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
506{
507 int ret;
508 u32 status;
509
510 while (1) {
511 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
512 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
513 return ret;
514 if (!(status & 1))
515 return 0;
516 if (--attempts == 0)
517 return -EAGAIN;
518 if (delay)
519 msleep(delay);
520 }
521}
522
523/**
524 * t4_read_flash - read words from serial flash
525 * @adapter: the adapter
526 * @addr: the start address for the read
527 * @nwords: how many 32-bit words to read
528 * @data: where to store the read data
529 * @byte_oriented: whether to store data as bytes or as words
530 *
531 * Read the specified number of 32-bit words from the serial flash.
532 * If @byte_oriented is set the read data is stored as a byte array
533 * (i.e., big-endian), otherwise as 32-bit words in the platform's
534 * natural endianess.
535 */
de498c89
RD
536static int t4_read_flash(struct adapter *adapter, unsigned int addr,
537 unsigned int nwords, u32 *data, int byte_oriented)
56d36be4
DM
538{
539 int ret;
540
900a6596 541 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
56d36be4
DM
542 return -EINVAL;
543
544 addr = swab32(addr) | SF_RD_DATA_FAST;
545
546 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
547 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
548 return ret;
549
550 for ( ; nwords; nwords--, data++) {
551 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
552 if (nwords == 1)
553 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
554 if (ret)
555 return ret;
556 if (byte_oriented)
557 *data = htonl(*data);
558 }
559 return 0;
560}
561
562/**
563 * t4_write_flash - write up to a page of data to the serial flash
564 * @adapter: the adapter
565 * @addr: the start address to write
566 * @n: length of data to write in bytes
567 * @data: the data to write
568 *
569 * Writes up to a page of data (256 bytes) to the serial flash starting
570 * at the given address. All the data must be written to the same page.
571 */
572static int t4_write_flash(struct adapter *adapter, unsigned int addr,
573 unsigned int n, const u8 *data)
574{
575 int ret;
576 u32 buf[64];
577 unsigned int i, c, left, val, offset = addr & 0xff;
578
900a6596 579 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
56d36be4
DM
580 return -EINVAL;
581
582 val = swab32(addr) | SF_PROG_PAGE;
583
584 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
585 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
586 goto unlock;
587
588 for (left = n; left; left -= c) {
589 c = min(left, 4U);
590 for (val = 0, i = 0; i < c; ++i)
591 val = (val << 8) + *data++;
592
593 ret = sf1_write(adapter, c, c != left, 1, val);
594 if (ret)
595 goto unlock;
596 }
900a6596 597 ret = flash_wait_op(adapter, 8, 1);
56d36be4
DM
598 if (ret)
599 goto unlock;
600
601 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
602
603 /* Read the page to verify the write succeeded */
604 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
605 if (ret)
606 return ret;
607
608 if (memcmp(data - n, (u8 *)buf + offset, n)) {
609 dev_err(adapter->pdev_dev,
610 "failed to correctly write the flash page at %#x\n",
611 addr);
612 return -EIO;
613 }
614 return 0;
615
616unlock:
617 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
618 return ret;
619}
620
621/**
622 * get_fw_version - read the firmware version
623 * @adapter: the adapter
624 * @vers: where to place the version
625 *
626 * Reads the FW version from flash.
627 */
628static int get_fw_version(struct adapter *adapter, u32 *vers)
629{
900a6596
DM
630 return t4_read_flash(adapter, adapter->params.sf_fw_start +
631 offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
56d36be4
DM
632}
633
634/**
635 * get_tp_version - read the TP microcode version
636 * @adapter: the adapter
637 * @vers: where to place the version
638 *
639 * Reads the TP microcode version from flash.
640 */
641static int get_tp_version(struct adapter *adapter, u32 *vers)
642{
900a6596
DM
643 return t4_read_flash(adapter, adapter->params.sf_fw_start +
644 offsetof(struct fw_hdr, tp_microcode_ver),
56d36be4
DM
645 1, vers, 0);
646}
647
648/**
649 * t4_check_fw_version - check if the FW is compatible with this driver
650 * @adapter: the adapter
651 *
652 * Checks if an adapter's FW is compatible with the driver. Returns 0
653 * if there's exact match, a negative error if the version could not be
654 * read or there's a major version mismatch, and a positive value if the
655 * expected major version is found but there's a minor version mismatch.
656 */
657int t4_check_fw_version(struct adapter *adapter)
658{
659 u32 api_vers[2];
660 int ret, major, minor, micro;
661
662 ret = get_fw_version(adapter, &adapter->params.fw_vers);
663 if (!ret)
664 ret = get_tp_version(adapter, &adapter->params.tp_vers);
665 if (!ret)
900a6596
DM
666 ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
667 offsetof(struct fw_hdr, intfver_nic),
668 2, api_vers, 1);
56d36be4
DM
669 if (ret)
670 return ret;
671
672 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
673 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
674 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
675 memcpy(adapter->params.api_vers, api_vers,
676 sizeof(adapter->params.api_vers));
677
678 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
679 dev_err(adapter->pdev_dev,
680 "card FW has major version %u, driver wants %u\n",
681 major, FW_VERSION_MAJOR);
682 return -EINVAL;
683 }
684
685 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
686 return 0; /* perfect match */
687
688 /* Minor/micro version mismatch. Report it but often it's OK. */
689 return 1;
690}
691
692/**
693 * t4_flash_erase_sectors - erase a range of flash sectors
694 * @adapter: the adapter
695 * @start: the first sector to erase
696 * @end: the last sector to erase
697 *
698 * Erases the sectors in the given inclusive range.
699 */
700static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
701{
702 int ret = 0;
703
704 while (start <= end) {
705 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
706 (ret = sf1_write(adapter, 4, 0, 1,
707 SF_ERASE_SECTOR | (start << 8))) != 0 ||
900a6596 708 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
56d36be4
DM
709 dev_err(adapter->pdev_dev,
710 "erase of flash sector %d failed, error %d\n",
711 start, ret);
712 break;
713 }
714 start++;
715 }
716 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
717 return ret;
718}
719
720/**
721 * t4_load_fw - download firmware
722 * @adap: the adapter
723 * @fw_data: the firmware image to write
724 * @size: image size
725 *
726 * Write the supplied firmware image to the card's serial flash.
727 */
728int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
729{
730 u32 csum;
731 int ret, addr;
732 unsigned int i;
733 u8 first_page[SF_PAGE_SIZE];
734 const u32 *p = (const u32 *)fw_data;
735 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
900a6596
DM
736 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
737 unsigned int fw_img_start = adap->params.sf_fw_start;
738 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
56d36be4
DM
739
740 if (!size) {
741 dev_err(adap->pdev_dev, "FW image has no data\n");
742 return -EINVAL;
743 }
744 if (size & 511) {
745 dev_err(adap->pdev_dev,
746 "FW image size not multiple of 512 bytes\n");
747 return -EINVAL;
748 }
749 if (ntohs(hdr->len512) * 512 != size) {
750 dev_err(adap->pdev_dev,
751 "FW image size differs from size in FW header\n");
752 return -EINVAL;
753 }
754 if (size > FW_MAX_SIZE) {
755 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
756 FW_MAX_SIZE);
757 return -EFBIG;
758 }
759
760 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
761 csum += ntohl(p[i]);
762
763 if (csum != 0xffffffff) {
764 dev_err(adap->pdev_dev,
765 "corrupted firmware image, checksum %#x\n", csum);
766 return -EINVAL;
767 }
768
900a6596
DM
769 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
770 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
56d36be4
DM
771 if (ret)
772 goto out;
773
774 /*
775 * We write the correct version at the end so the driver can see a bad
776 * version if the FW write fails. Start by writing a copy of the
777 * first page with a bad version.
778 */
779 memcpy(first_page, fw_data, SF_PAGE_SIZE);
780 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
900a6596 781 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
56d36be4
DM
782 if (ret)
783 goto out;
784
900a6596 785 addr = fw_img_start;
56d36be4
DM
786 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
787 addr += SF_PAGE_SIZE;
788 fw_data += SF_PAGE_SIZE;
789 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
790 if (ret)
791 goto out;
792 }
793
794 ret = t4_write_flash(adap,
900a6596 795 fw_img_start + offsetof(struct fw_hdr, fw_ver),
56d36be4
DM
796 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
797out:
798 if (ret)
799 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
800 ret);
801 return ret;
802}
803
804#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
805 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
806
807/**
808 * t4_link_start - apply link configuration to MAC/PHY
809 * @phy: the PHY to setup
810 * @mac: the MAC to setup
811 * @lc: the requested link configuration
812 *
813 * Set up a port's MAC and PHY according to a desired link configuration.
814 * - If the PHY can auto-negotiate first decide what to advertise, then
815 * enable/disable auto-negotiation as desired, and reset.
816 * - If the PHY does not auto-negotiate just reset it.
817 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
818 * otherwise do it later based on the outcome of auto-negotiation.
819 */
820int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
821 struct link_config *lc)
822{
823 struct fw_port_cmd c;
824 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
825
826 lc->link_ok = 0;
827 if (lc->requested_fc & PAUSE_RX)
828 fc |= FW_PORT_CAP_FC_RX;
829 if (lc->requested_fc & PAUSE_TX)
830 fc |= FW_PORT_CAP_FC_TX;
831
832 memset(&c, 0, sizeof(c));
833 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
834 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
835 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
836 FW_LEN16(c));
837
838 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
839 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
840 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
841 } else if (lc->autoneg == AUTONEG_DISABLE) {
842 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
843 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
844 } else
845 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
846
847 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
848}
849
850/**
851 * t4_restart_aneg - restart autonegotiation
852 * @adap: the adapter
853 * @mbox: mbox to use for the FW command
854 * @port: the port id
855 *
856 * Restarts autonegotiation for the selected port.
857 */
858int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
859{
860 struct fw_port_cmd c;
861
862 memset(&c, 0, sizeof(c));
863 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
864 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
865 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
866 FW_LEN16(c));
867 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
868 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
869}
870
8caa1e84
VP
871typedef void (*int_handler_t)(struct adapter *adap);
872
56d36be4
DM
873struct intr_info {
874 unsigned int mask; /* bits to check in interrupt status */
875 const char *msg; /* message to print or NULL */
876 short stat_idx; /* stat counter to increment or -1 */
877 unsigned short fatal; /* whether the condition reported is fatal */
8caa1e84 878 int_handler_t int_handler; /* platform-specific int handler */
56d36be4
DM
879};
880
881/**
882 * t4_handle_intr_status - table driven interrupt handler
883 * @adapter: the adapter that generated the interrupt
884 * @reg: the interrupt status register to process
885 * @acts: table of interrupt actions
886 *
887 * A table driven interrupt handler that applies a set of masks to an
888 * interrupt status word and performs the corresponding actions if the
25985edc 889 * interrupts described by the mask have occurred. The actions include
56d36be4
DM
890 * optionally emitting a warning or alert message. The table is terminated
891 * by an entry specifying mask 0. Returns the number of fatal interrupt
892 * conditions.
893 */
894static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
895 const struct intr_info *acts)
896{
897 int fatal = 0;
898 unsigned int mask = 0;
899 unsigned int status = t4_read_reg(adapter, reg);
900
901 for ( ; acts->mask; ++acts) {
902 if (!(status & acts->mask))
903 continue;
904 if (acts->fatal) {
905 fatal++;
906 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
907 status & acts->mask);
908 } else if (acts->msg && printk_ratelimit())
909 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
910 status & acts->mask);
8caa1e84
VP
911 if (acts->int_handler)
912 acts->int_handler(adapter);
56d36be4
DM
913 mask |= acts->mask;
914 }
915 status &= mask;
916 if (status) /* clear processed interrupts */
917 t4_write_reg(adapter, reg, status);
918 return fatal;
919}
920
921/*
922 * Interrupt handler for the PCIE module.
923 */
924static void pcie_intr_handler(struct adapter *adapter)
925{
005b5717 926 static const struct intr_info sysbus_intr_info[] = {
56d36be4
DM
927 { RNPP, "RXNP array parity error", -1, 1 },
928 { RPCP, "RXPC array parity error", -1, 1 },
929 { RCIP, "RXCIF array parity error", -1, 1 },
930 { RCCP, "Rx completions control array parity error", -1, 1 },
931 { RFTP, "RXFT array parity error", -1, 1 },
932 { 0 }
933 };
005b5717 934 static const struct intr_info pcie_port_intr_info[] = {
56d36be4
DM
935 { TPCP, "TXPC array parity error", -1, 1 },
936 { TNPP, "TXNP array parity error", -1, 1 },
937 { TFTP, "TXFT array parity error", -1, 1 },
938 { TCAP, "TXCA array parity error", -1, 1 },
939 { TCIP, "TXCIF array parity error", -1, 1 },
940 { RCAP, "RXCA array parity error", -1, 1 },
941 { OTDD, "outbound request TLP discarded", -1, 1 },
942 { RDPE, "Rx data parity error", -1, 1 },
943 { TDUE, "Tx uncorrectable data error", -1, 1 },
944 { 0 }
945 };
005b5717 946 static const struct intr_info pcie_intr_info[] = {
56d36be4
DM
947 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
948 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
949 { MSIDATAPERR, "MSI data parity error", -1, 1 },
950 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
951 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
952 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
953 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
954 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
955 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
956 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
957 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
958 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
959 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
960 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
961 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
962 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
963 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
964 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
965 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
966 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
967 { FIDPERR, "PCI FID parity error", -1, 1 },
968 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
969 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
970 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
971 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
972 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
973 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
974 { PCIESINT, "PCI core secondary fault", -1, 1 },
975 { PCIEPINT, "PCI core primary fault", -1, 1 },
976 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
977 { 0 }
978 };
979
980 int fat;
981
982 fat = t4_handle_intr_status(adapter,
983 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
984 sysbus_intr_info) +
985 t4_handle_intr_status(adapter,
986 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
987 pcie_port_intr_info) +
988 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
989 if (fat)
990 t4_fatal_err(adapter);
991}
992
993/*
994 * TP interrupt handler.
995 */
996static void tp_intr_handler(struct adapter *adapter)
997{
005b5717 998 static const struct intr_info tp_intr_info[] = {
56d36be4
DM
999 { 0x3fffffff, "TP parity error", -1, 1 },
1000 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1001 { 0 }
1002 };
1003
1004 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1005 t4_fatal_err(adapter);
1006}
1007
1008/*
1009 * SGE interrupt handler.
1010 */
1011static void sge_intr_handler(struct adapter *adapter)
1012{
1013 u64 v;
1014
005b5717 1015 static const struct intr_info sge_intr_info[] = {
56d36be4
DM
1016 { ERR_CPL_EXCEED_IQE_SIZE,
1017 "SGE received CPL exceeding IQE size", -1, 1 },
1018 { ERR_INVALID_CIDX_INC,
1019 "SGE GTS CIDX increment too large", -1, 0 },
1020 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
8caa1e84
VP
1021 { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
1022 { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
1023 { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
56d36be4
DM
1024 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1025 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1026 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1027 0 },
1028 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1029 0 },
1030 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1031 0 },
1032 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1033 0 },
1034 { ERR_ING_CTXT_PRIO,
1035 "SGE too many priority ingress contexts", -1, 0 },
1036 { ERR_EGR_CTXT_PRIO,
1037 "SGE too many priority egress contexts", -1, 0 },
1038 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1039 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1040 { 0 }
1041 };
1042
1043 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
8caa1e84 1044 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
56d36be4
DM
1045 if (v) {
1046 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
8caa1e84 1047 (unsigned long long)v);
56d36be4
DM
1048 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1049 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1050 }
1051
1052 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1053 v != 0)
1054 t4_fatal_err(adapter);
1055}
1056
1057/*
1058 * CIM interrupt handler.
1059 */
1060static void cim_intr_handler(struct adapter *adapter)
1061{
005b5717 1062 static const struct intr_info cim_intr_info[] = {
56d36be4
DM
1063 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1064 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1065 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1066 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1067 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1068 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1069 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1070 { 0 }
1071 };
005b5717 1072 static const struct intr_info cim_upintr_info[] = {
56d36be4
DM
1073 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1074 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1075 { ILLWRINT, "CIM illegal write", -1, 1 },
1076 { ILLRDINT, "CIM illegal read", -1, 1 },
1077 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1078 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1079 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1080 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1081 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1082 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1083 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1084 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1085 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1086 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1087 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1088 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1089 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1090 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1091 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1092 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1093 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1094 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1095 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1096 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1097 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1098 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1099 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1100 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1101 { 0 }
1102 };
1103
1104 int fat;
1105
1106 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1107 cim_intr_info) +
1108 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1109 cim_upintr_info);
1110 if (fat)
1111 t4_fatal_err(adapter);
1112}
1113
1114/*
1115 * ULP RX interrupt handler.
1116 */
1117static void ulprx_intr_handler(struct adapter *adapter)
1118{
005b5717 1119 static const struct intr_info ulprx_intr_info[] = {
91e9a1ec 1120 { 0x1800000, "ULPRX context error", -1, 1 },
56d36be4
DM
1121 { 0x7fffff, "ULPRX parity error", -1, 1 },
1122 { 0 }
1123 };
1124
1125 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1126 t4_fatal_err(adapter);
1127}
1128
1129/*
1130 * ULP TX interrupt handler.
1131 */
1132static void ulptx_intr_handler(struct adapter *adapter)
1133{
005b5717 1134 static const struct intr_info ulptx_intr_info[] = {
56d36be4
DM
1135 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1136 0 },
1137 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1138 0 },
1139 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1140 0 },
1141 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1142 0 },
1143 { 0xfffffff, "ULPTX parity error", -1, 1 },
1144 { 0 }
1145 };
1146
1147 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1148 t4_fatal_err(adapter);
1149}
1150
1151/*
1152 * PM TX interrupt handler.
1153 */
1154static void pmtx_intr_handler(struct adapter *adapter)
1155{
005b5717 1156 static const struct intr_info pmtx_intr_info[] = {
56d36be4
DM
1157 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1158 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1159 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1160 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1161 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1162 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1163 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1164 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1165 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1166 { 0 }
1167 };
1168
1169 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1170 t4_fatal_err(adapter);
1171}
1172
1173/*
1174 * PM RX interrupt handler.
1175 */
1176static void pmrx_intr_handler(struct adapter *adapter)
1177{
005b5717 1178 static const struct intr_info pmrx_intr_info[] = {
56d36be4
DM
1179 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1180 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1181 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1182 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1183 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1184 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1185 { 0 }
1186 };
1187
1188 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1189 t4_fatal_err(adapter);
1190}
1191
1192/*
1193 * CPL switch interrupt handler.
1194 */
1195static void cplsw_intr_handler(struct adapter *adapter)
1196{
005b5717 1197 static const struct intr_info cplsw_intr_info[] = {
56d36be4
DM
1198 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1199 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1200 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1201 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1202 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1203 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1204 { 0 }
1205 };
1206
1207 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1208 t4_fatal_err(adapter);
1209}
1210
1211/*
1212 * LE interrupt handler.
1213 */
1214static void le_intr_handler(struct adapter *adap)
1215{
005b5717 1216 static const struct intr_info le_intr_info[] = {
56d36be4
DM
1217 { LIPMISS, "LE LIP miss", -1, 0 },
1218 { LIP0, "LE 0 LIP error", -1, 0 },
1219 { PARITYERR, "LE parity error", -1, 1 },
1220 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1221 { REQQPARERR, "LE request queue parity error", -1, 1 },
1222 { 0 }
1223 };
1224
1225 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1226 t4_fatal_err(adap);
1227}
1228
1229/*
1230 * MPS interrupt handler.
1231 */
1232static void mps_intr_handler(struct adapter *adapter)
1233{
005b5717 1234 static const struct intr_info mps_rx_intr_info[] = {
56d36be4
DM
1235 { 0xffffff, "MPS Rx parity error", -1, 1 },
1236 { 0 }
1237 };
005b5717 1238 static const struct intr_info mps_tx_intr_info[] = {
56d36be4
DM
1239 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1240 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1241 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1242 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1243 { BUBBLE, "MPS Tx underflow", -1, 1 },
1244 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1245 { FRMERR, "MPS Tx framing error", -1, 1 },
1246 { 0 }
1247 };
005b5717 1248 static const struct intr_info mps_trc_intr_info[] = {
56d36be4
DM
1249 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1250 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1251 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1252 { 0 }
1253 };
005b5717 1254 static const struct intr_info mps_stat_sram_intr_info[] = {
56d36be4
DM
1255 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1256 { 0 }
1257 };
005b5717 1258 static const struct intr_info mps_stat_tx_intr_info[] = {
56d36be4
DM
1259 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1260 { 0 }
1261 };
005b5717 1262 static const struct intr_info mps_stat_rx_intr_info[] = {
56d36be4
DM
1263 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1264 { 0 }
1265 };
005b5717 1266 static const struct intr_info mps_cls_intr_info[] = {
56d36be4
DM
1267 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1268 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1269 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1270 { 0 }
1271 };
1272
1273 int fat;
1274
1275 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1276 mps_rx_intr_info) +
1277 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1278 mps_tx_intr_info) +
1279 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1280 mps_trc_intr_info) +
1281 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1282 mps_stat_sram_intr_info) +
1283 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1284 mps_stat_tx_intr_info) +
1285 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1286 mps_stat_rx_intr_info) +
1287 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1288 mps_cls_intr_info);
1289
1290 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1291 RXINT | TXINT | STATINT);
1292 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1293 if (fat)
1294 t4_fatal_err(adapter);
1295}
1296
1297#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1298
1299/*
1300 * EDC/MC interrupt handler.
1301 */
1302static void mem_intr_handler(struct adapter *adapter, int idx)
1303{
1304 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
1305
1306 unsigned int addr, cnt_addr, v;
1307
1308 if (idx <= MEM_EDC1) {
1309 addr = EDC_REG(EDC_INT_CAUSE, idx);
1310 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1311 } else {
1312 addr = MC_INT_CAUSE;
1313 cnt_addr = MC_ECC_STATUS;
1314 }
1315
1316 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1317 if (v & PERR_INT_CAUSE)
1318 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1319 name[idx]);
1320 if (v & ECC_CE_INT_CAUSE) {
1321 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1322
1323 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1324 if (printk_ratelimit())
1325 dev_warn(adapter->pdev_dev,
1326 "%u %s correctable ECC data error%s\n",
1327 cnt, name[idx], cnt > 1 ? "s" : "");
1328 }
1329 if (v & ECC_UE_INT_CAUSE)
1330 dev_alert(adapter->pdev_dev,
1331 "%s uncorrectable ECC data error\n", name[idx]);
1332
1333 t4_write_reg(adapter, addr, v);
1334 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1335 t4_fatal_err(adapter);
1336}
1337
1338/*
1339 * MA interrupt handler.
1340 */
1341static void ma_intr_handler(struct adapter *adap)
1342{
1343 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1344
1345 if (status & MEM_PERR_INT_CAUSE)
1346 dev_alert(adap->pdev_dev,
1347 "MA parity error, parity status %#x\n",
1348 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1349 if (status & MEM_WRAP_INT_CAUSE) {
1350 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1351 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1352 "client %u to address %#x\n",
1353 MEM_WRAP_CLIENT_NUM_GET(v),
1354 MEM_WRAP_ADDRESS_GET(v) << 4);
1355 }
1356 t4_write_reg(adap, MA_INT_CAUSE, status);
1357 t4_fatal_err(adap);
1358}
1359
1360/*
1361 * SMB interrupt handler.
1362 */
1363static void smb_intr_handler(struct adapter *adap)
1364{
005b5717 1365 static const struct intr_info smb_intr_info[] = {
56d36be4
DM
1366 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1367 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1368 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1369 { 0 }
1370 };
1371
1372 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1373 t4_fatal_err(adap);
1374}
1375
1376/*
1377 * NC-SI interrupt handler.
1378 */
1379static void ncsi_intr_handler(struct adapter *adap)
1380{
005b5717 1381 static const struct intr_info ncsi_intr_info[] = {
56d36be4
DM
1382 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1383 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1384 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1385 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1386 { 0 }
1387 };
1388
1389 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1390 t4_fatal_err(adap);
1391}
1392
1393/*
1394 * XGMAC interrupt handler.
1395 */
1396static void xgmac_intr_handler(struct adapter *adap, int port)
1397{
1398 u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
1399
1400 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1401 if (!v)
1402 return;
1403
1404 if (v & TXFIFO_PRTY_ERR)
1405 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1406 port);
1407 if (v & RXFIFO_PRTY_ERR)
1408 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1409 port);
1410 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1411 t4_fatal_err(adap);
1412}
1413
1414/*
1415 * PL interrupt handler.
1416 */
1417static void pl_intr_handler(struct adapter *adap)
1418{
005b5717 1419 static const struct intr_info pl_intr_info[] = {
56d36be4
DM
1420 { FATALPERR, "T4 fatal parity error", -1, 1 },
1421 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1422 { 0 }
1423 };
1424
1425 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1426 t4_fatal_err(adap);
1427}
1428
63bcceec 1429#define PF_INTR_MASK (PFSW)
56d36be4
DM
1430#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1431 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1432 CPL_SWITCH | SGE | ULP_TX)
1433
1434/**
1435 * t4_slow_intr_handler - control path interrupt handler
1436 * @adapter: the adapter
1437 *
1438 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1439 * The designation 'slow' is because it involves register reads, while
1440 * data interrupts typically don't involve any MMIOs.
1441 */
1442int t4_slow_intr_handler(struct adapter *adapter)
1443{
1444 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1445
1446 if (!(cause & GLBL_INTR_MASK))
1447 return 0;
1448 if (cause & CIM)
1449 cim_intr_handler(adapter);
1450 if (cause & MPS)
1451 mps_intr_handler(adapter);
1452 if (cause & NCSI)
1453 ncsi_intr_handler(adapter);
1454 if (cause & PL)
1455 pl_intr_handler(adapter);
1456 if (cause & SMB)
1457 smb_intr_handler(adapter);
1458 if (cause & XGMAC0)
1459 xgmac_intr_handler(adapter, 0);
1460 if (cause & XGMAC1)
1461 xgmac_intr_handler(adapter, 1);
1462 if (cause & XGMAC_KR0)
1463 xgmac_intr_handler(adapter, 2);
1464 if (cause & XGMAC_KR1)
1465 xgmac_intr_handler(adapter, 3);
1466 if (cause & PCIE)
1467 pcie_intr_handler(adapter);
1468 if (cause & MC)
1469 mem_intr_handler(adapter, MEM_MC);
1470 if (cause & EDC0)
1471 mem_intr_handler(adapter, MEM_EDC0);
1472 if (cause & EDC1)
1473 mem_intr_handler(adapter, MEM_EDC1);
1474 if (cause & LE)
1475 le_intr_handler(adapter);
1476 if (cause & TP)
1477 tp_intr_handler(adapter);
1478 if (cause & MA)
1479 ma_intr_handler(adapter);
1480 if (cause & PM_TX)
1481 pmtx_intr_handler(adapter);
1482 if (cause & PM_RX)
1483 pmrx_intr_handler(adapter);
1484 if (cause & ULP_RX)
1485 ulprx_intr_handler(adapter);
1486 if (cause & CPL_SWITCH)
1487 cplsw_intr_handler(adapter);
1488 if (cause & SGE)
1489 sge_intr_handler(adapter);
1490 if (cause & ULP_TX)
1491 ulptx_intr_handler(adapter);
1492
1493 /* Clear the interrupts just processed for which we are the master. */
1494 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1495 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1496 return 1;
1497}
1498
1499/**
1500 * t4_intr_enable - enable interrupts
1501 * @adapter: the adapter whose interrupts should be enabled
1502 *
1503 * Enable PF-specific interrupts for the calling function and the top-level
1504 * interrupt concentrator for global interrupts. Interrupts are already
1505 * enabled at each module, here we just enable the roots of the interrupt
1506 * hierarchies.
1507 *
1508 * Note: this function should be called only when the driver manages
1509 * non PF-specific interrupts from the various HW modules. Only one PCI
1510 * function at a time should be doing this.
1511 */
1512void t4_intr_enable(struct adapter *adapter)
1513{
1514 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1515
1516 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1517 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1518 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1519 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1520 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1521 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1522 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
881806bc 1523 F_DBFIFO_HP_INT | F_DBFIFO_LP_INT |
56d36be4
DM
1524 EGRESS_SIZE_ERR);
1525 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1526 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1527}
1528
1529/**
1530 * t4_intr_disable - disable interrupts
1531 * @adapter: the adapter whose interrupts should be disabled
1532 *
1533 * Disable interrupts. We only disable the top-level interrupt
1534 * concentrators. The caller must be a PCI function managing global
1535 * interrupts.
1536 */
1537void t4_intr_disable(struct adapter *adapter)
1538{
1539 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1540
1541 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1542 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1543}
1544
56d36be4
DM
1545/**
1546 * hash_mac_addr - return the hash value of a MAC address
1547 * @addr: the 48-bit Ethernet MAC address
1548 *
1549 * Hashes a MAC address according to the hash function used by HW inexact
1550 * (hash) address matching.
1551 */
1552static int hash_mac_addr(const u8 *addr)
1553{
1554 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1555 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1556 a ^= b;
1557 a ^= (a >> 12);
1558 a ^= (a >> 6);
1559 return a & 0x3f;
1560}
1561
1562/**
1563 * t4_config_rss_range - configure a portion of the RSS mapping table
1564 * @adapter: the adapter
1565 * @mbox: mbox to use for the FW command
1566 * @viid: virtual interface whose RSS subtable is to be written
1567 * @start: start entry in the table to write
1568 * @n: how many table entries to write
1569 * @rspq: values for the response queue lookup table
1570 * @nrspq: number of values in @rspq
1571 *
1572 * Programs the selected part of the VI's RSS mapping table with the
1573 * provided values. If @nrspq < @n the supplied values are used repeatedly
1574 * until the full table range is populated.
1575 *
1576 * The caller must ensure the values in @rspq are in the range allowed for
1577 * @viid.
1578 */
1579int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1580 int start, int n, const u16 *rspq, unsigned int nrspq)
1581{
1582 int ret;
1583 const u16 *rsp = rspq;
1584 const u16 *rsp_end = rspq + nrspq;
1585 struct fw_rss_ind_tbl_cmd cmd;
1586
1587 memset(&cmd, 0, sizeof(cmd));
1588 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
1589 FW_CMD_REQUEST | FW_CMD_WRITE |
1590 FW_RSS_IND_TBL_CMD_VIID(viid));
1591 cmd.retval_len16 = htonl(FW_LEN16(cmd));
1592
1593 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
1594 while (n > 0) {
1595 int nq = min(n, 32);
1596 __be32 *qp = &cmd.iq0_to_iq2;
1597
1598 cmd.niqid = htons(nq);
1599 cmd.startidx = htons(start);
1600
1601 start += nq;
1602 n -= nq;
1603
1604 while (nq > 0) {
1605 unsigned int v;
1606
1607 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
1608 if (++rsp >= rsp_end)
1609 rsp = rspq;
1610 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
1611 if (++rsp >= rsp_end)
1612 rsp = rspq;
1613 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
1614 if (++rsp >= rsp_end)
1615 rsp = rspq;
1616
1617 *qp++ = htonl(v);
1618 nq -= 3;
1619 }
1620
1621 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
1622 if (ret)
1623 return ret;
1624 }
1625 return 0;
1626}
1627
1628/**
1629 * t4_config_glbl_rss - configure the global RSS mode
1630 * @adapter: the adapter
1631 * @mbox: mbox to use for the FW command
1632 * @mode: global RSS mode
1633 * @flags: mode-specific flags
1634 *
1635 * Sets the global RSS mode.
1636 */
1637int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1638 unsigned int flags)
1639{
1640 struct fw_rss_glb_config_cmd c;
1641
1642 memset(&c, 0, sizeof(c));
1643 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
1644 FW_CMD_REQUEST | FW_CMD_WRITE);
1645 c.retval_len16 = htonl(FW_LEN16(c));
1646 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
1647 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1648 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
1649 c.u.basicvirtual.mode_pkd =
1650 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1651 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
1652 } else
1653 return -EINVAL;
1654 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
1655}
1656
56d36be4
DM
1657/**
1658 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
1659 * @adap: the adapter
1660 * @v4: holds the TCP/IP counter values
1661 * @v6: holds the TCP/IPv6 counter values
1662 *
1663 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
1664 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
1665 */
1666void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1667 struct tp_tcp_stats *v6)
1668{
1669 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
1670
1671#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
1672#define STAT(x) val[STAT_IDX(x)]
1673#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
1674
1675 if (v4) {
1676 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1677 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
1678 v4->tcpOutRsts = STAT(OUT_RST);
1679 v4->tcpInSegs = STAT64(IN_SEG);
1680 v4->tcpOutSegs = STAT64(OUT_SEG);
1681 v4->tcpRetransSegs = STAT64(RXT_SEG);
1682 }
1683 if (v6) {
1684 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1685 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
1686 v6->tcpOutRsts = STAT(OUT_RST);
1687 v6->tcpInSegs = STAT64(IN_SEG);
1688 v6->tcpOutSegs = STAT64(OUT_SEG);
1689 v6->tcpRetransSegs = STAT64(RXT_SEG);
1690 }
1691#undef STAT64
1692#undef STAT
1693#undef STAT_IDX
1694}
1695
56d36be4
DM
1696/**
1697 * t4_read_mtu_tbl - returns the values in the HW path MTU table
1698 * @adap: the adapter
1699 * @mtus: where to store the MTU values
1700 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
1701 *
1702 * Reads the HW path MTU table.
1703 */
1704void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
1705{
1706 u32 v;
1707 int i;
1708
1709 for (i = 0; i < NMTUS; ++i) {
1710 t4_write_reg(adap, TP_MTU_TABLE,
1711 MTUINDEX(0xff) | MTUVALUE(i));
1712 v = t4_read_reg(adap, TP_MTU_TABLE);
1713 mtus[i] = MTUVALUE_GET(v);
1714 if (mtu_log)
1715 mtu_log[i] = MTUWIDTH_GET(v);
1716 }
1717}
1718
1719/**
1720 * init_cong_ctrl - initialize congestion control parameters
1721 * @a: the alpha values for congestion control
1722 * @b: the beta values for congestion control
1723 *
1724 * Initialize the congestion control parameters.
1725 */
1726static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
1727{
1728 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
1729 a[9] = 2;
1730 a[10] = 3;
1731 a[11] = 4;
1732 a[12] = 5;
1733 a[13] = 6;
1734 a[14] = 7;
1735 a[15] = 8;
1736 a[16] = 9;
1737 a[17] = 10;
1738 a[18] = 14;
1739 a[19] = 17;
1740 a[20] = 21;
1741 a[21] = 25;
1742 a[22] = 30;
1743 a[23] = 35;
1744 a[24] = 45;
1745 a[25] = 60;
1746 a[26] = 80;
1747 a[27] = 100;
1748 a[28] = 200;
1749 a[29] = 300;
1750 a[30] = 400;
1751 a[31] = 500;
1752
1753 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
1754 b[9] = b[10] = 1;
1755 b[11] = b[12] = 2;
1756 b[13] = b[14] = b[15] = b[16] = 3;
1757 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
1758 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
1759 b[28] = b[29] = 6;
1760 b[30] = b[31] = 7;
1761}
1762
1763/* The minimum additive increment value for the congestion control table */
1764#define CC_MIN_INCR 2U
1765
1766/**
1767 * t4_load_mtus - write the MTU and congestion control HW tables
1768 * @adap: the adapter
1769 * @mtus: the values for the MTU table
1770 * @alpha: the values for the congestion control alpha parameter
1771 * @beta: the values for the congestion control beta parameter
1772 *
1773 * Write the HW MTU table with the supplied MTUs and the high-speed
1774 * congestion control table with the supplied alpha, beta, and MTUs.
1775 * We write the two tables together because the additive increments
1776 * depend on the MTUs.
1777 */
1778void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1779 const unsigned short *alpha, const unsigned short *beta)
1780{
1781 static const unsigned int avg_pkts[NCCTRL_WIN] = {
1782 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
1783 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
1784 28672, 40960, 57344, 81920, 114688, 163840, 229376
1785 };
1786
1787 unsigned int i, w;
1788
1789 for (i = 0; i < NMTUS; ++i) {
1790 unsigned int mtu = mtus[i];
1791 unsigned int log2 = fls(mtu);
1792
1793 if (!(mtu & ((1 << log2) >> 2))) /* round */
1794 log2--;
1795 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
1796 MTUWIDTH(log2) | MTUVALUE(mtu));
1797
1798 for (w = 0; w < NCCTRL_WIN; ++w) {
1799 unsigned int inc;
1800
1801 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
1802 CC_MIN_INCR);
1803
1804 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
1805 (w << 16) | (beta[w] << 13) | inc);
1806 }
1807 }
1808}
1809
56d36be4
DM
1810/**
1811 * get_mps_bg_map - return the buffer groups associated with a port
1812 * @adap: the adapter
1813 * @idx: the port index
1814 *
1815 * Returns a bitmap indicating which MPS buffer groups are associated
1816 * with the given port. Bit i is set if buffer group i is used by the
1817 * port.
1818 */
1819static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
1820{
1821 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
1822
1823 if (n == 0)
1824 return idx == 0 ? 0xf : 0;
1825 if (n == 1)
1826 return idx < 2 ? (3 << (2 * idx)) : 0;
1827 return 1 << idx;
1828}
1829
1830/**
1831 * t4_get_port_stats - collect port statistics
1832 * @adap: the adapter
1833 * @idx: the port index
1834 * @p: the stats structure to fill
1835 *
1836 * Collect statistics related to the given port from HW.
1837 */
1838void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
1839{
1840 u32 bgmap = get_mps_bg_map(adap, idx);
1841
1842#define GET_STAT(name) \
1843 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
1844#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
1845
1846 p->tx_octets = GET_STAT(TX_PORT_BYTES);
1847 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
1848 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
1849 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
1850 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
1851 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
1852 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
1853 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
1854 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
1855 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
1856 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
1857 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
1858 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
1859 p->tx_drop = GET_STAT(TX_PORT_DROP);
1860 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
1861 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
1862 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
1863 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
1864 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
1865 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
1866 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
1867 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
1868 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
1869
1870 p->rx_octets = GET_STAT(RX_PORT_BYTES);
1871 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
1872 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
1873 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
1874 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
1875 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
1876 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
1877 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
1878 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
1879 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
1880 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
1881 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
1882 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
1883 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
1884 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
1885 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
1886 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
1887 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
1888 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
1889 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
1890 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
1891 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
1892 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
1893 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
1894 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
1895 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
1896 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
1897
1898 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
1899 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
1900 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
1901 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
1902 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
1903 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
1904 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
1905 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
1906
1907#undef GET_STAT
1908#undef GET_STAT_COM
1909}
1910
56d36be4
DM
1911/**
1912 * t4_wol_magic_enable - enable/disable magic packet WoL
1913 * @adap: the adapter
1914 * @port: the physical port index
1915 * @addr: MAC address expected in magic packets, %NULL to disable
1916 *
1917 * Enables/disables magic packet wake-on-LAN for the selected port.
1918 */
1919void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
1920 const u8 *addr)
1921{
1922 if (addr) {
1923 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
1924 (addr[2] << 24) | (addr[3] << 16) |
1925 (addr[4] << 8) | addr[5]);
1926 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
1927 (addr[0] << 8) | addr[1]);
1928 }
1929 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
1930 addr ? MAGICEN : 0);
1931}
1932
1933/**
1934 * t4_wol_pat_enable - enable/disable pattern-based WoL
1935 * @adap: the adapter
1936 * @port: the physical port index
1937 * @map: bitmap of which HW pattern filters to set
1938 * @mask0: byte mask for bytes 0-63 of a packet
1939 * @mask1: byte mask for bytes 64-127 of a packet
1940 * @crc: Ethernet CRC for selected bytes
1941 * @enable: enable/disable switch
1942 *
1943 * Sets the pattern filters indicated in @map to mask out the bytes
1944 * specified in @mask0/@mask1 in received packets and compare the CRC of
1945 * the resulting packet against @crc. If @enable is %true pattern-based
1946 * WoL is enabled, otherwise disabled.
1947 */
1948int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
1949 u64 mask0, u64 mask1, unsigned int crc, bool enable)
1950{
1951 int i;
1952
1953 if (!enable) {
1954 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
1955 PATEN, 0);
1956 return 0;
1957 }
1958 if (map > 0xff)
1959 return -EINVAL;
1960
1961#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
1962
1963 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
1964 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
1965 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
1966
1967 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
1968 if (!(map & 1))
1969 continue;
1970
1971 /* write byte masks */
1972 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
1973 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
1974 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
1975 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
1976 return -ETIMEDOUT;
1977
1978 /* write CRC */
1979 t4_write_reg(adap, EPIO_REG(DATA0), crc);
1980 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
1981 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
1982 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
1983 return -ETIMEDOUT;
1984 }
1985#undef EPIO_REG
1986
1987 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
1988 return 0;
1989}
1990
1991#define INIT_CMD(var, cmd, rd_wr) do { \
1992 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
1993 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
1994 (var).retval_len16 = htonl(FW_LEN16(var)); \
1995} while (0)
1996
8caa1e84
VP
1997int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
1998 u32 addr, u32 val)
1999{
2000 struct fw_ldst_cmd c;
2001
2002 memset(&c, 0, sizeof(c));
2003 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
2004 F_FW_CMD_WRITE |
2005 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
2006 c.cycles_to_len16 = htonl(FW_LEN16(c));
2007 c.u.addrval.addr = htonl(addr);
2008 c.u.addrval.val = htonl(val);
2009
2010 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2011}
2012
49ce9c2c 2013/**
8caa1e84
VP
2014 * t4_mem_win_read_len - read memory through PCIE memory window
2015 * @adap: the adapter
2016 * @addr: address of first byte requested aligned on 32b.
2017 * @data: len bytes to hold the data read
2018 * @len: amount of data to read from window. Must be <=
2019 * MEMWIN0_APERATURE after adjusting for 16B alignment
2020 * requirements of the the memory window.
2021 *
2022 * Read len bytes of data from MC starting at @addr.
2023 */
2024int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
2025{
2026 int i;
2027 int off;
2028
2029 /*
2030 * Align on a 16B boundary.
2031 */
2032 off = addr & 15;
2033 if ((addr & 3) || (len + off) > MEMWIN0_APERTURE)
2034 return -EINVAL;
2035
2036 t4_write_reg(adap, A_PCIE_MEM_ACCESS_OFFSET, addr & ~15);
2037 t4_read_reg(adap, A_PCIE_MEM_ACCESS_OFFSET);
2038
2039 for (i = 0; i < len; i += 4)
2040 *data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i));
2041
2042 return 0;
2043}
2044
56d36be4
DM
2045/**
2046 * t4_mdio_rd - read a PHY register through MDIO
2047 * @adap: the adapter
2048 * @mbox: mailbox to use for the FW command
2049 * @phy_addr: the PHY address
2050 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2051 * @reg: the register to read
2052 * @valp: where to store the value
2053 *
2054 * Issues a FW command through the given mailbox to read a PHY register.
2055 */
2056int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2057 unsigned int mmd, unsigned int reg, u16 *valp)
2058{
2059 int ret;
2060 struct fw_ldst_cmd c;
2061
2062 memset(&c, 0, sizeof(c));
2063 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2064 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2065 c.cycles_to_len16 = htonl(FW_LEN16(c));
2066 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2067 FW_LDST_CMD_MMD(mmd));
2068 c.u.mdio.raddr = htons(reg);
2069
2070 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2071 if (ret == 0)
2072 *valp = ntohs(c.u.mdio.rval);
2073 return ret;
2074}
2075
2076/**
2077 * t4_mdio_wr - write a PHY register through MDIO
2078 * @adap: the adapter
2079 * @mbox: mailbox to use for the FW command
2080 * @phy_addr: the PHY address
2081 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2082 * @reg: the register to write
2083 * @valp: value to write
2084 *
2085 * Issues a FW command through the given mailbox to write a PHY register.
2086 */
2087int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2088 unsigned int mmd, unsigned int reg, u16 val)
2089{
2090 struct fw_ldst_cmd c;
2091
2092 memset(&c, 0, sizeof(c));
2093 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2094 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2095 c.cycles_to_len16 = htonl(FW_LEN16(c));
2096 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2097 FW_LDST_CMD_MMD(mmd));
2098 c.u.mdio.raddr = htons(reg);
2099 c.u.mdio.rval = htons(val);
2100
2101 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2102}
2103
2104/**
2105 * t4_fw_hello - establish communication with FW
2106 * @adap: the adapter
2107 * @mbox: mailbox to use for the FW command
2108 * @evt_mbox: mailbox to receive async FW events
2109 * @master: specifies the caller's willingness to be the device master
2110 * @state: returns the current device state
2111 *
2112 * Issues a command to establish communication with FW.
2113 */
2114int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2115 enum dev_master master, enum dev_state *state)
2116{
2117 int ret;
2118 struct fw_hello_cmd c;
2119
2120 INIT_CMD(c, HELLO, WRITE);
2121 c.err_to_mbasyncnot = htonl(
2122 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2123 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
2124 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) |
2125 FW_HELLO_CMD_MBASYNCNOT(evt_mbox));
2126
2127 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2128 if (ret == 0 && state) {
2129 u32 v = ntohl(c.err_to_mbasyncnot);
2130 if (v & FW_HELLO_CMD_INIT)
2131 *state = DEV_STATE_INIT;
2132 else if (v & FW_HELLO_CMD_ERR)
2133 *state = DEV_STATE_ERR;
2134 else
2135 *state = DEV_STATE_UNINIT;
2136 }
2137 return ret;
2138}
2139
2140/**
2141 * t4_fw_bye - end communication with FW
2142 * @adap: the adapter
2143 * @mbox: mailbox to use for the FW command
2144 *
2145 * Issues a command to terminate communication with FW.
2146 */
2147int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2148{
2149 struct fw_bye_cmd c;
2150
2151 INIT_CMD(c, BYE, WRITE);
2152 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2153}
2154
2155/**
2156 * t4_init_cmd - ask FW to initialize the device
2157 * @adap: the adapter
2158 * @mbox: mailbox to use for the FW command
2159 *
2160 * Issues a command to FW to partially initialize the device. This
2161 * performs initialization that generally doesn't depend on user input.
2162 */
2163int t4_early_init(struct adapter *adap, unsigned int mbox)
2164{
2165 struct fw_initialize_cmd c;
2166
2167 INIT_CMD(c, INITIALIZE, WRITE);
2168 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2169}
2170
2171/**
2172 * t4_fw_reset - issue a reset to FW
2173 * @adap: the adapter
2174 * @mbox: mailbox to use for the FW command
2175 * @reset: specifies the type of reset to perform
2176 *
2177 * Issues a reset command of the specified type to FW.
2178 */
2179int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2180{
2181 struct fw_reset_cmd c;
2182
2183 INIT_CMD(c, RESET, WRITE);
2184 c.val = htonl(reset);
2185 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2186}
2187
2188/**
2189 * t4_query_params - query FW or device parameters
2190 * @adap: the adapter
2191 * @mbox: mailbox to use for the FW command
2192 * @pf: the PF
2193 * @vf: the VF
2194 * @nparams: the number of parameters
2195 * @params: the parameter names
2196 * @val: the parameter values
2197 *
2198 * Reads the value of FW or device parameters. Up to 7 parameters can be
2199 * queried at once.
2200 */
2201int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2202 unsigned int vf, unsigned int nparams, const u32 *params,
2203 u32 *val)
2204{
2205 int i, ret;
2206 struct fw_params_cmd c;
2207 __be32 *p = &c.param[0].mnem;
2208
2209 if (nparams > 7)
2210 return -EINVAL;
2211
2212 memset(&c, 0, sizeof(c));
2213 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2214 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
2215 FW_PARAMS_CMD_VFN(vf));
2216 c.retval_len16 = htonl(FW_LEN16(c));
2217 for (i = 0; i < nparams; i++, p += 2)
2218 *p = htonl(*params++);
2219
2220 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2221 if (ret == 0)
2222 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
2223 *val++ = ntohl(*p);
2224 return ret;
2225}
2226
2227/**
2228 * t4_set_params - sets FW or device parameters
2229 * @adap: the adapter
2230 * @mbox: mailbox to use for the FW command
2231 * @pf: the PF
2232 * @vf: the VF
2233 * @nparams: the number of parameters
2234 * @params: the parameter names
2235 * @val: the parameter values
2236 *
2237 * Sets the value of FW or device parameters. Up to 7 parameters can be
2238 * specified at once.
2239 */
2240int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2241 unsigned int vf, unsigned int nparams, const u32 *params,
2242 const u32 *val)
2243{
2244 struct fw_params_cmd c;
2245 __be32 *p = &c.param[0].mnem;
2246
2247 if (nparams > 7)
2248 return -EINVAL;
2249
2250 memset(&c, 0, sizeof(c));
2251 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2252 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
2253 FW_PARAMS_CMD_VFN(vf));
2254 c.retval_len16 = htonl(FW_LEN16(c));
2255 while (nparams--) {
2256 *p++ = htonl(*params++);
2257 *p++ = htonl(*val++);
2258 }
2259
2260 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2261}
2262
2263/**
2264 * t4_cfg_pfvf - configure PF/VF resource limits
2265 * @adap: the adapter
2266 * @mbox: mailbox to use for the FW command
2267 * @pf: the PF being configured
2268 * @vf: the VF being configured
2269 * @txq: the max number of egress queues
2270 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
2271 * @rxqi: the max number of interrupt-capable ingress queues
2272 * @rxq: the max number of interruptless ingress queues
2273 * @tc: the PCI traffic class
2274 * @vi: the max number of virtual interfaces
2275 * @cmask: the channel access rights mask for the PF/VF
2276 * @pmask: the port access rights mask for the PF/VF
2277 * @nexact: the maximum number of exact MPS filters
2278 * @rcaps: read capabilities
2279 * @wxcaps: write/execute capabilities
2280 *
2281 * Configures resource limits and capabilities for a physical or virtual
2282 * function.
2283 */
2284int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
2285 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
2286 unsigned int rxqi, unsigned int rxq, unsigned int tc,
2287 unsigned int vi, unsigned int cmask, unsigned int pmask,
2288 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
2289{
2290 struct fw_pfvf_cmd c;
2291
2292 memset(&c, 0, sizeof(c));
2293 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
2294 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
2295 FW_PFVF_CMD_VFN(vf));
2296 c.retval_len16 = htonl(FW_LEN16(c));
2297 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
2298 FW_PFVF_CMD_NIQ(rxq));
81323b74 2299 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
56d36be4
DM
2300 FW_PFVF_CMD_PMASK(pmask) |
2301 FW_PFVF_CMD_NEQ(txq));
2302 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
2303 FW_PFVF_CMD_NEXACTF(nexact));
2304 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
2305 FW_PFVF_CMD_WX_CAPS(wxcaps) |
2306 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
2307 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2308}
2309
2310/**
2311 * t4_alloc_vi - allocate a virtual interface
2312 * @adap: the adapter
2313 * @mbox: mailbox to use for the FW command
2314 * @port: physical port associated with the VI
2315 * @pf: the PF owning the VI
2316 * @vf: the VF owning the VI
2317 * @nmac: number of MAC addresses needed (1 to 5)
2318 * @mac: the MAC addresses of the VI
2319 * @rss_size: size of RSS table slice associated with this VI
2320 *
2321 * Allocates a virtual interface for the given physical port. If @mac is
2322 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
2323 * @mac should be large enough to hold @nmac Ethernet addresses, they are
2324 * stored consecutively so the space needed is @nmac * 6 bytes.
2325 * Returns a negative error number or the non-negative VI id.
2326 */
2327int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
2328 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
2329 unsigned int *rss_size)
2330{
2331 int ret;
2332 struct fw_vi_cmd c;
2333
2334 memset(&c, 0, sizeof(c));
2335 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2336 FW_CMD_WRITE | FW_CMD_EXEC |
2337 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
2338 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
2339 c.portid_pkd = FW_VI_CMD_PORTID(port);
2340 c.nmac = nmac - 1;
2341
2342 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2343 if (ret)
2344 return ret;
2345
2346 if (mac) {
2347 memcpy(mac, c.mac, sizeof(c.mac));
2348 switch (nmac) {
2349 case 5:
2350 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
2351 case 4:
2352 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
2353 case 3:
2354 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
2355 case 2:
2356 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
2357 }
2358 }
2359 if (rss_size)
2360 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
a0881cab 2361 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
56d36be4
DM
2362}
2363
56d36be4
DM
2364/**
2365 * t4_set_rxmode - set Rx properties of a virtual interface
2366 * @adap: the adapter
2367 * @mbox: mailbox to use for the FW command
2368 * @viid: the VI id
2369 * @mtu: the new MTU or -1
2370 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
2371 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
2372 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
f8f5aafa 2373 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
56d36be4
DM
2374 * @sleep_ok: if true we may sleep while awaiting command completion
2375 *
2376 * Sets Rx properties of a virtual interface.
2377 */
2378int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
f8f5aafa
DM
2379 int mtu, int promisc, int all_multi, int bcast, int vlanex,
2380 bool sleep_ok)
56d36be4
DM
2381{
2382 struct fw_vi_rxmode_cmd c;
2383
2384 /* convert to FW values */
2385 if (mtu < 0)
2386 mtu = FW_RXMODE_MTU_NO_CHG;
2387 if (promisc < 0)
2388 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
2389 if (all_multi < 0)
2390 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
2391 if (bcast < 0)
2392 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
f8f5aafa
DM
2393 if (vlanex < 0)
2394 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
56d36be4
DM
2395
2396 memset(&c, 0, sizeof(c));
2397 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
2398 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
2399 c.retval_len16 = htonl(FW_LEN16(c));
f8f5aafa
DM
2400 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
2401 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
2402 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
2403 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
2404 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
56d36be4
DM
2405 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2406}
2407
2408/**
2409 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
2410 * @adap: the adapter
2411 * @mbox: mailbox to use for the FW command
2412 * @viid: the VI id
2413 * @free: if true any existing filters for this VI id are first removed
2414 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
2415 * @addr: the MAC address(es)
2416 * @idx: where to store the index of each allocated filter
2417 * @hash: pointer to hash address filter bitmap
2418 * @sleep_ok: call is allowed to sleep
2419 *
2420 * Allocates an exact-match filter for each of the supplied addresses and
2421 * sets it to the corresponding address. If @idx is not %NULL it should
2422 * have at least @naddr entries, each of which will be set to the index of
2423 * the filter allocated for the corresponding MAC address. If a filter
2424 * could not be allocated for an address its index is set to 0xffff.
2425 * If @hash is not %NULL addresses that fail to allocate an exact filter
2426 * are hashed and update the hash filter bitmap pointed at by @hash.
2427 *
2428 * Returns a negative error number or the number of filters allocated.
2429 */
2430int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
2431 unsigned int viid, bool free, unsigned int naddr,
2432 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
2433{
2434 int i, ret;
2435 struct fw_vi_mac_cmd c;
2436 struct fw_vi_mac_exact *p;
2437
2438 if (naddr > 7)
2439 return -EINVAL;
2440
2441 memset(&c, 0, sizeof(c));
2442 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2443 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
2444 FW_VI_MAC_CMD_VIID(viid));
2445 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
2446 FW_CMD_LEN16((naddr + 2) / 2));
2447
2448 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2449 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2450 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
2451 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
2452 }
2453
2454 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
2455 if (ret)
2456 return ret;
2457
2458 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2459 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2460
2461 if (idx)
2462 idx[i] = index >= NEXACT_MAC ? 0xffff : index;
2463 if (index < NEXACT_MAC)
2464 ret++;
2465 else if (hash)
ce9aeb58 2466 *hash |= (1ULL << hash_mac_addr(addr[i]));
56d36be4
DM
2467 }
2468 return ret;
2469}
2470
2471/**
2472 * t4_change_mac - modifies the exact-match filter for a MAC address
2473 * @adap: the adapter
2474 * @mbox: mailbox to use for the FW command
2475 * @viid: the VI id
2476 * @idx: index of existing filter for old value of MAC address, or -1
2477 * @addr: the new MAC address value
2478 * @persist: whether a new MAC allocation should be persistent
2479 * @add_smt: if true also add the address to the HW SMT
2480 *
2481 * Modifies an exact-match filter and sets it to the new MAC address.
2482 * Note that in general it is not possible to modify the value of a given
2483 * filter so the generic way to modify an address filter is to free the one
2484 * being used by the old address value and allocate a new filter for the
2485 * new address value. @idx can be -1 if the address is a new addition.
2486 *
2487 * Returns a negative error number or the index of the filter with the new
2488 * MAC value.
2489 */
2490int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
2491 int idx, const u8 *addr, bool persist, bool add_smt)
2492{
2493 int ret, mode;
2494 struct fw_vi_mac_cmd c;
2495 struct fw_vi_mac_exact *p = c.u.exact;
2496
2497 if (idx < 0) /* new allocation */
2498 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
2499 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
2500
2501 memset(&c, 0, sizeof(c));
2502 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2503 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
2504 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
2505 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2506 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
2507 FW_VI_MAC_CMD_IDX(idx));
2508 memcpy(p->macaddr, addr, sizeof(p->macaddr));
2509
2510 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2511 if (ret == 0) {
2512 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2513 if (ret >= NEXACT_MAC)
2514 ret = -ENOMEM;
2515 }
2516 return ret;
2517}
2518
2519/**
2520 * t4_set_addr_hash - program the MAC inexact-match hash filter
2521 * @adap: the adapter
2522 * @mbox: mailbox to use for the FW command
2523 * @viid: the VI id
2524 * @ucast: whether the hash filter should also match unicast addresses
2525 * @vec: the value to be written to the hash filter
2526 * @sleep_ok: call is allowed to sleep
2527 *
2528 * Sets the 64-bit inexact-match hash filter for a virtual interface.
2529 */
2530int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
2531 bool ucast, u64 vec, bool sleep_ok)
2532{
2533 struct fw_vi_mac_cmd c;
2534
2535 memset(&c, 0, sizeof(c));
2536 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2537 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
2538 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
2539 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
2540 FW_CMD_LEN16(1));
2541 c.u.hash.hashvec = cpu_to_be64(vec);
2542 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2543}
2544
2545/**
2546 * t4_enable_vi - enable/disable a virtual interface
2547 * @adap: the adapter
2548 * @mbox: mailbox to use for the FW command
2549 * @viid: the VI id
2550 * @rx_en: 1=enable Rx, 0=disable Rx
2551 * @tx_en: 1=enable Tx, 0=disable Tx
2552 *
2553 * Enables/disables a virtual interface.
2554 */
2555int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
2556 bool rx_en, bool tx_en)
2557{
2558 struct fw_vi_enable_cmd c;
2559
2560 memset(&c, 0, sizeof(c));
2561 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2562 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2563 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
2564 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
2565 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2566}
2567
2568/**
2569 * t4_identify_port - identify a VI's port by blinking its LED
2570 * @adap: the adapter
2571 * @mbox: mailbox to use for the FW command
2572 * @viid: the VI id
2573 * @nblinks: how many times to blink LED at 2.5 Hz
2574 *
2575 * Identifies a VI's port by blinking its LED.
2576 */
2577int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
2578 unsigned int nblinks)
2579{
2580 struct fw_vi_enable_cmd c;
2581
2582 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2583 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2584 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
2585 c.blinkdur = htons(nblinks);
2586 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
56d36be4
DM
2587}
2588
2589/**
2590 * t4_iq_free - free an ingress queue and its FLs
2591 * @adap: the adapter
2592 * @mbox: mailbox to use for the FW command
2593 * @pf: the PF owning the queues
2594 * @vf: the VF owning the queues
2595 * @iqtype: the ingress queue type
2596 * @iqid: ingress queue id
2597 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2598 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2599 *
2600 * Frees an ingress queue and its associated FLs, if any.
2601 */
2602int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2603 unsigned int vf, unsigned int iqtype, unsigned int iqid,
2604 unsigned int fl0id, unsigned int fl1id)
2605{
2606 struct fw_iq_cmd c;
2607
2608 memset(&c, 0, sizeof(c));
2609 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2610 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2611 FW_IQ_CMD_VFN(vf));
2612 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
2613 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
2614 c.iqid = htons(iqid);
2615 c.fl0id = htons(fl0id);
2616 c.fl1id = htons(fl1id);
2617 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2618}
2619
2620/**
2621 * t4_eth_eq_free - free an Ethernet egress queue
2622 * @adap: the adapter
2623 * @mbox: mailbox to use for the FW command
2624 * @pf: the PF owning the queue
2625 * @vf: the VF owning the queue
2626 * @eqid: egress queue id
2627 *
2628 * Frees an Ethernet egress queue.
2629 */
2630int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2631 unsigned int vf, unsigned int eqid)
2632{
2633 struct fw_eq_eth_cmd c;
2634
2635 memset(&c, 0, sizeof(c));
2636 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2637 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
2638 FW_EQ_ETH_CMD_VFN(vf));
2639 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
2640 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
2641 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2642}
2643
2644/**
2645 * t4_ctrl_eq_free - free a control egress queue
2646 * @adap: the adapter
2647 * @mbox: mailbox to use for the FW command
2648 * @pf: the PF owning the queue
2649 * @vf: the VF owning the queue
2650 * @eqid: egress queue id
2651 *
2652 * Frees a control egress queue.
2653 */
2654int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2655 unsigned int vf, unsigned int eqid)
2656{
2657 struct fw_eq_ctrl_cmd c;
2658
2659 memset(&c, 0, sizeof(c));
2660 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2661 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
2662 FW_EQ_CTRL_CMD_VFN(vf));
2663 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
2664 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
2665 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2666}
2667
2668/**
2669 * t4_ofld_eq_free - free an offload egress queue
2670 * @adap: the adapter
2671 * @mbox: mailbox to use for the FW command
2672 * @pf: the PF owning the queue
2673 * @vf: the VF owning the queue
2674 * @eqid: egress queue id
2675 *
2676 * Frees a control egress queue.
2677 */
2678int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2679 unsigned int vf, unsigned int eqid)
2680{
2681 struct fw_eq_ofld_cmd c;
2682
2683 memset(&c, 0, sizeof(c));
2684 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2685 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
2686 FW_EQ_OFLD_CMD_VFN(vf));
2687 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
2688 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
2689 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2690}
2691
2692/**
2693 * t4_handle_fw_rpl - process a FW reply message
2694 * @adap: the adapter
2695 * @rpl: start of the FW message
2696 *
2697 * Processes a FW message, such as link state change messages.
2698 */
2699int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
2700{
2701 u8 opcode = *(const u8 *)rpl;
2702
2703 if (opcode == FW_PORT_CMD) { /* link/module state change message */
2704 int speed = 0, fc = 0;
2705 const struct fw_port_cmd *p = (void *)rpl;
2706 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
2707 int port = adap->chan_map[chan];
2708 struct port_info *pi = adap2pinfo(adap, port);
2709 struct link_config *lc = &pi->link_cfg;
2710 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
2711 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
2712 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
2713
2714 if (stat & FW_PORT_CMD_RXPAUSE)
2715 fc |= PAUSE_RX;
2716 if (stat & FW_PORT_CMD_TXPAUSE)
2717 fc |= PAUSE_TX;
2718 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
2719 speed = SPEED_100;
2720 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
2721 speed = SPEED_1000;
2722 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
2723 speed = SPEED_10000;
2724
2725 if (link_ok != lc->link_ok || speed != lc->speed ||
2726 fc != lc->fc) { /* something changed */
2727 lc->link_ok = link_ok;
2728 lc->speed = speed;
2729 lc->fc = fc;
2730 t4_os_link_changed(adap, port, link_ok);
2731 }
2732 if (mod != pi->mod_type) {
2733 pi->mod_type = mod;
2734 t4_os_portmod_changed(adap, port);
2735 }
2736 }
2737 return 0;
2738}
2739
2740static void __devinit get_pci_mode(struct adapter *adapter,
2741 struct pci_params *p)
2742{
2743 u16 val;
2744 u32 pcie_cap = pci_pcie_cap(adapter->pdev);
2745
2746 if (pcie_cap) {
2747 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
2748 &val);
2749 p->speed = val & PCI_EXP_LNKSTA_CLS;
2750 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
2751 }
2752}
2753
2754/**
2755 * init_link_config - initialize a link's SW state
2756 * @lc: structure holding the link state
2757 * @caps: link capabilities
2758 *
2759 * Initializes the SW state maintained for each link, including the link's
2760 * capabilities and default speed/flow-control/autonegotiation settings.
2761 */
2762static void __devinit init_link_config(struct link_config *lc,
2763 unsigned int caps)
2764{
2765 lc->supported = caps;
2766 lc->requested_speed = 0;
2767 lc->speed = 0;
2768 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
2769 if (lc->supported & FW_PORT_CAP_ANEG) {
2770 lc->advertising = lc->supported & ADVERT_MASK;
2771 lc->autoneg = AUTONEG_ENABLE;
2772 lc->requested_fc |= PAUSE_AUTONEG;
2773 } else {
2774 lc->advertising = 0;
2775 lc->autoneg = AUTONEG_DISABLE;
2776 }
2777}
2778
204dc3c0 2779int t4_wait_dev_ready(struct adapter *adap)
56d36be4
DM
2780{
2781 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
2782 return 0;
2783 msleep(500);
2784 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
2785}
2786
900a6596
DM
2787static int __devinit get_flash_params(struct adapter *adap)
2788{
2789 int ret;
2790 u32 info;
2791
2792 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
2793 if (!ret)
2794 ret = sf1_read(adap, 3, 0, 1, &info);
2795 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
2796 if (ret)
2797 return ret;
2798
2799 if ((info & 0xff) != 0x20) /* not a Numonix flash */
2800 return -EINVAL;
2801 info >>= 16; /* log2 of size */
2802 if (info >= 0x14 && info < 0x18)
2803 adap->params.sf_nsec = 1 << (info - 16);
2804 else if (info == 0x18)
2805 adap->params.sf_nsec = 64;
2806 else
2807 return -EINVAL;
2808 adap->params.sf_size = 1 << info;
2809 adap->params.sf_fw_start =
2810 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
2811 return 0;
2812}
2813
56d36be4
DM
2814/**
2815 * t4_prep_adapter - prepare SW and HW for operation
2816 * @adapter: the adapter
2817 * @reset: if true perform a HW reset
2818 *
2819 * Initialize adapter SW state for the various HW modules, set initial
2820 * values for some adapter tunables, take PHYs out of reset, and
2821 * initialize the MDIO interface.
2822 */
2823int __devinit t4_prep_adapter(struct adapter *adapter)
2824{
2825 int ret;
2826
204dc3c0 2827 ret = t4_wait_dev_ready(adapter);
56d36be4
DM
2828 if (ret < 0)
2829 return ret;
2830
2831 get_pci_mode(adapter, &adapter->params.pci);
2832 adapter->params.rev = t4_read_reg(adapter, PL_REV);
2833
900a6596
DM
2834 ret = get_flash_params(adapter);
2835 if (ret < 0) {
2836 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
2837 return ret;
2838 }
2839
56d36be4
DM
2840 ret = get_vpd_params(adapter, &adapter->params.vpd);
2841 if (ret < 0)
2842 return ret;
2843
2844 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
2845
2846 /*
2847 * Default port for debugging in case we can't reach FW.
2848 */
2849 adapter->params.nports = 1;
2850 adapter->params.portvec = 1;
2851 return 0;
2852}
2853
2854int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
2855{
2856 u8 addr[6];
2857 int ret, i, j = 0;
2858 struct fw_port_cmd c;
f796564a 2859 struct fw_rss_vi_config_cmd rvc;
56d36be4
DM
2860
2861 memset(&c, 0, sizeof(c));
f796564a 2862 memset(&rvc, 0, sizeof(rvc));
56d36be4
DM
2863
2864 for_each_port(adap, i) {
2865 unsigned int rss_size;
2866 struct port_info *p = adap2pinfo(adap, i);
2867
2868 while ((adap->params.portvec & (1 << j)) == 0)
2869 j++;
2870
2871 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
2872 FW_CMD_REQUEST | FW_CMD_READ |
2873 FW_PORT_CMD_PORTID(j));
2874 c.action_to_len16 = htonl(
2875 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
2876 FW_LEN16(c));
2877 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2878 if (ret)
2879 return ret;
2880
2881 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
2882 if (ret < 0)
2883 return ret;
2884
2885 p->viid = ret;
2886 p->tx_chan = j;
2887 p->lport = j;
2888 p->rss_size = rss_size;
2889 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
2890 memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
f21ce1c3 2891 adap->port[i]->dev_id = j;
56d36be4
DM
2892
2893 ret = ntohl(c.u.info.lstatus_to_modtype);
2894 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
2895 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
2896 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
a0881cab 2897 p->mod_type = FW_PORT_MOD_TYPE_NA;
56d36be4 2898
f796564a
DM
2899 rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2900 FW_CMD_REQUEST | FW_CMD_READ |
2901 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
2902 rvc.retval_len16 = htonl(FW_LEN16(rvc));
2903 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
2904 if (ret)
2905 return ret;
2906 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
2907
56d36be4
DM
2908 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
2909 j++;
2910 }
2911 return 0;
2912}