]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/cxgb3/t3_hw.c
pasemi: export pasemi_dma_init()
[mirror_ubuntu-zesty-kernel.git] / drivers / net / cxgb3 / t3_hw.c
CommitLineData
4d22de3e 1/*
1d68e93d 2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include "common.h"
33#include "regs.h"
34#include "sge_defs.h"
35#include "firmware_exports.h"
36
f2c6879e
DLR
37/**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
46 *
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
51 */
4d22de3e
DLR
52
53int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
55{
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
58
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
63 }
64 if (--attempts == 0)
65 return -EAGAIN;
66 if (delay)
67 udelay(delay);
68 }
69}
70
71/**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
77 *
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
81 */
82void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
84{
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
88 }
89}
90
91/**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
97 *
98 * Sets a register field specified by the supplied mask to the
99 * given value.
100 */
101void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
103{
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
105
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
108}
109
110/**
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
118 *
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
121 */
9265fabf
SH
122static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
4d22de3e
DLR
125{
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
130 }
131}
132
133/**
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
139 *
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
142 */
143int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
145{
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
148
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
151
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
154
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
159
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
163
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
172
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
183 }
184 start += 8;
185 }
186 *buf++ = val64;
187 }
188 return 0;
189}
190
191/*
192 * Initialize MI1.
193 */
194static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
195{
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
198 V_CLKDIV(clkdiv);
199
200 if (!(ai->caps & SUPPORTED_10000baseT_Full))
201 val |= V_ST(1);
202 t3_write_reg(adap, A_MI1_CFG, val);
203}
204
205#define MDIO_ATTEMPTS 10
206
207/*
208 * MI1 read/write operations for direct-addressed PHYs.
209 */
210static int mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
211 int reg_addr, unsigned int *valp)
212{
213 int ret;
214 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
215
216 if (mmd_addr)
217 return -EINVAL;
218
219 mutex_lock(&adapter->mdio_lock);
220 t3_write_reg(adapter, A_MI1_ADDR, addr);
221 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
222 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
223 if (!ret)
224 *valp = t3_read_reg(adapter, A_MI1_DATA);
225 mutex_unlock(&adapter->mdio_lock);
226 return ret;
227}
228
229static int mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
230 int reg_addr, unsigned int val)
231{
232 int ret;
233 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
234
235 if (mmd_addr)
236 return -EINVAL;
237
238 mutex_lock(&adapter->mdio_lock);
239 t3_write_reg(adapter, A_MI1_ADDR, addr);
240 t3_write_reg(adapter, A_MI1_DATA, val);
241 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
242 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
243 mutex_unlock(&adapter->mdio_lock);
244 return ret;
245}
246
247static const struct mdio_ops mi1_mdio_ops = {
248 mi1_read,
249 mi1_write
250};
251
252/*
253 * MI1 read/write operations for indirect-addressed PHYs.
254 */
255static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
256 int reg_addr, unsigned int *valp)
257{
258 int ret;
259 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
260
261 mutex_lock(&adapter->mdio_lock);
262 t3_write_reg(adapter, A_MI1_ADDR, addr);
263 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
264 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
265 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
266 if (!ret) {
267 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
268 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
269 MDIO_ATTEMPTS, 20);
270 if (!ret)
271 *valp = t3_read_reg(adapter, A_MI1_DATA);
272 }
273 mutex_unlock(&adapter->mdio_lock);
274 return ret;
275}
276
277static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
278 int reg_addr, unsigned int val)
279{
280 int ret;
281 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
282
283 mutex_lock(&adapter->mdio_lock);
284 t3_write_reg(adapter, A_MI1_ADDR, addr);
285 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
286 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
287 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
288 if (!ret) {
289 t3_write_reg(adapter, A_MI1_DATA, val);
290 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
291 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
292 MDIO_ATTEMPTS, 20);
293 }
294 mutex_unlock(&adapter->mdio_lock);
295 return ret;
296}
297
298static const struct mdio_ops mi1_mdio_ext_ops = {
299 mi1_ext_read,
300 mi1_ext_write
301};
302
303/**
304 * t3_mdio_change_bits - modify the value of a PHY register
305 * @phy: the PHY to operate on
306 * @mmd: the device address
307 * @reg: the register address
308 * @clear: what part of the register value to mask off
309 * @set: what part of the register value to set
310 *
311 * Changes the value of a PHY register by applying a mask to its current
312 * value and ORing the result with a new value.
313 */
314int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
315 unsigned int set)
316{
317 int ret;
318 unsigned int val;
319
320 ret = mdio_read(phy, mmd, reg, &val);
321 if (!ret) {
322 val &= ~clear;
323 ret = mdio_write(phy, mmd, reg, val | set);
324 }
325 return ret;
326}
327
328/**
329 * t3_phy_reset - reset a PHY block
330 * @phy: the PHY to operate on
331 * @mmd: the device address of the PHY block to reset
332 * @wait: how long to wait for the reset to complete in 1ms increments
333 *
334 * Resets a PHY block and optionally waits for the reset to complete.
335 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
336 * for 10G PHYs.
337 */
338int t3_phy_reset(struct cphy *phy, int mmd, int wait)
339{
340 int err;
341 unsigned int ctl;
342
343 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
344 if (err || !wait)
345 return err;
346
347 do {
348 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
349 if (err)
350 return err;
351 ctl &= BMCR_RESET;
352 if (ctl)
353 msleep(1);
354 } while (ctl && --wait);
355
356 return ctl ? -1 : 0;
357}
358
359/**
360 * t3_phy_advertise - set the PHY advertisement registers for autoneg
361 * @phy: the PHY to operate on
362 * @advert: bitmap of capabilities the PHY should advertise
363 *
364 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
365 * requested capabilities.
366 */
367int t3_phy_advertise(struct cphy *phy, unsigned int advert)
368{
369 int err;
370 unsigned int val = 0;
371
372 err = mdio_read(phy, 0, MII_CTRL1000, &val);
373 if (err)
374 return err;
375
376 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
377 if (advert & ADVERTISED_1000baseT_Half)
378 val |= ADVERTISE_1000HALF;
379 if (advert & ADVERTISED_1000baseT_Full)
380 val |= ADVERTISE_1000FULL;
381
382 err = mdio_write(phy, 0, MII_CTRL1000, val);
383 if (err)
384 return err;
385
386 val = 1;
387 if (advert & ADVERTISED_10baseT_Half)
388 val |= ADVERTISE_10HALF;
389 if (advert & ADVERTISED_10baseT_Full)
390 val |= ADVERTISE_10FULL;
391 if (advert & ADVERTISED_100baseT_Half)
392 val |= ADVERTISE_100HALF;
393 if (advert & ADVERTISED_100baseT_Full)
394 val |= ADVERTISE_100FULL;
395 if (advert & ADVERTISED_Pause)
396 val |= ADVERTISE_PAUSE_CAP;
397 if (advert & ADVERTISED_Asym_Pause)
398 val |= ADVERTISE_PAUSE_ASYM;
399 return mdio_write(phy, 0, MII_ADVERTISE, val);
400}
401
402/**
403 * t3_set_phy_speed_duplex - force PHY speed and duplex
404 * @phy: the PHY to operate on
405 * @speed: requested PHY speed
406 * @duplex: requested PHY duplex
407 *
408 * Force a 10/100/1000 PHY's speed and duplex. This also disables
409 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
410 */
411int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
412{
413 int err;
414 unsigned int ctl;
415
416 err = mdio_read(phy, 0, MII_BMCR, &ctl);
417 if (err)
418 return err;
419
420 if (speed >= 0) {
421 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
422 if (speed == SPEED_100)
423 ctl |= BMCR_SPEED100;
424 else if (speed == SPEED_1000)
425 ctl |= BMCR_SPEED1000;
426 }
427 if (duplex >= 0) {
428 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
429 if (duplex == DUPLEX_FULL)
430 ctl |= BMCR_FULLDPLX;
431 }
432 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
433 ctl |= BMCR_ANENABLE;
434 return mdio_write(phy, 0, MII_BMCR, ctl);
435}
436
437static const struct adapter_info t3_adap_info[] = {
438 {2, 0, 0, 0,
439 F_GPIO2_OEN | F_GPIO4_OEN |
440 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
8ac3ba68 441 0,
4d22de3e
DLR
442 &mi1_mdio_ops, "Chelsio PE9000"},
443 {2, 0, 0, 0,
444 F_GPIO2_OEN | F_GPIO4_OEN |
445 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
8ac3ba68 446 0,
4d22de3e
DLR
447 &mi1_mdio_ops, "Chelsio T302"},
448 {1, 0, 0, 0,
449 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
75758e8a
DLR
450 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
451 0, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
4d22de3e
DLR
452 &mi1_mdio_ext_ops, "Chelsio T310"},
453 {2, 0, 0, 0,
454 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
455 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
456 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
8ac3ba68 457 SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
4d22de3e
DLR
458 &mi1_mdio_ext_ops, "Chelsio T320"},
459};
460
461/*
462 * Return the adapter_info structure with a given index. Out-of-range indices
463 * return NULL.
464 */
465const struct adapter_info *t3_get_adapter_info(unsigned int id)
466{
467 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
468}
469
470#define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
471 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
472#define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
473
474static const struct port_type_info port_types[] = {
475 {NULL},
476 {t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
477 "10GBASE-XR"},
478 {t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
479 "10/100/1000BASE-T"},
480 {NULL, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
481 "10/100/1000BASE-T"},
482 {t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
483 {NULL, CAPS_10G, "10GBASE-KX4"},
484 {t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
485 {t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
486 "10GBASE-SR"},
487 {NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
488};
489
490#undef CAPS_1G
491#undef CAPS_10G
492
493#define VPD_ENTRY(name, len) \
494 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
495
496/*
497 * Partial EEPROM Vital Product Data structure. Includes only the ID and
498 * VPD-R sections.
499 */
500struct t3_vpd {
501 u8 id_tag;
502 u8 id_len[2];
503 u8 id_data[16];
504 u8 vpdr_tag;
505 u8 vpdr_len[2];
506 VPD_ENTRY(pn, 16); /* part number */
507 VPD_ENTRY(ec, 16); /* EC level */
167cdf5f 508 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
4d22de3e
DLR
509 VPD_ENTRY(na, 12); /* MAC address base */
510 VPD_ENTRY(cclk, 6); /* core clock */
511 VPD_ENTRY(mclk, 6); /* mem clock */
512 VPD_ENTRY(uclk, 6); /* uP clk */
513 VPD_ENTRY(mdc, 6); /* MDIO clk */
514 VPD_ENTRY(mt, 2); /* mem timing */
515 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
516 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
517 VPD_ENTRY(port0, 2); /* PHY0 complex */
518 VPD_ENTRY(port1, 2); /* PHY1 complex */
519 VPD_ENTRY(port2, 2); /* PHY2 complex */
520 VPD_ENTRY(port3, 2); /* PHY3 complex */
521 VPD_ENTRY(rv, 1); /* csum */
522 u32 pad; /* for multiple-of-4 sizing and alignment */
523};
524
525#define EEPROM_MAX_POLL 4
526#define EEPROM_STAT_ADDR 0x4000
527#define VPD_BASE 0xc00
528
529/**
530 * t3_seeprom_read - read a VPD EEPROM location
531 * @adapter: adapter to read
532 * @addr: EEPROM address
533 * @data: where to store the read data
534 *
535 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
536 * VPD ROM capability. A zero is written to the flag bit when the
537 * addres is written to the control register. The hardware device will
538 * set the flag to 1 when 4 bytes have been read into the data register.
539 */
540int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
541{
542 u16 val;
543 int attempts = EEPROM_MAX_POLL;
544 unsigned int base = adapter->params.pci.vpd_cap_addr;
545
546 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
547 return -EINVAL;
548
549 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
550 do {
551 udelay(10);
552 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
553 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
554
555 if (!(val & PCI_VPD_ADDR_F)) {
556 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
557 return -EIO;
558 }
559 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, data);
560 *data = le32_to_cpu(*data);
561 return 0;
562}
563
564/**
565 * t3_seeprom_write - write a VPD EEPROM location
566 * @adapter: adapter to write
567 * @addr: EEPROM address
568 * @data: value to write
569 *
570 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
571 * VPD ROM capability.
572 */
573int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
574{
575 u16 val;
576 int attempts = EEPROM_MAX_POLL;
577 unsigned int base = adapter->params.pci.vpd_cap_addr;
578
579 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
580 return -EINVAL;
581
582 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
583 cpu_to_le32(data));
584 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
585 addr | PCI_VPD_ADDR_F);
586 do {
587 msleep(1);
588 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
589 } while ((val & PCI_VPD_ADDR_F) && --attempts);
590
591 if (val & PCI_VPD_ADDR_F) {
592 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
593 return -EIO;
594 }
595 return 0;
596}
597
598/**
599 * t3_seeprom_wp - enable/disable EEPROM write protection
600 * @adapter: the adapter
601 * @enable: 1 to enable write protection, 0 to disable it
602 *
603 * Enables or disables write protection on the serial EEPROM.
604 */
605int t3_seeprom_wp(struct adapter *adapter, int enable)
606{
607 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
608}
609
610/*
611 * Convert a character holding a hex digit to a number.
612 */
613static unsigned int hex2int(unsigned char c)
614{
615 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
616}
617
618/**
619 * get_vpd_params - read VPD parameters from VPD EEPROM
620 * @adapter: adapter to read
621 * @p: where to store the parameters
622 *
623 * Reads card parameters stored in VPD EEPROM.
624 */
625static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
626{
627 int i, addr, ret;
628 struct t3_vpd vpd;
629
630 /*
631 * Card information is normally at VPD_BASE but some early cards had
632 * it at 0.
633 */
634 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
635 if (ret)
636 return ret;
637 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
638
639 for (i = 0; i < sizeof(vpd); i += 4) {
640 ret = t3_seeprom_read(adapter, addr + i,
641 (u32 *)((u8 *)&vpd + i));
642 if (ret)
643 return ret;
644 }
645
646 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
647 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
648 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
649 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
650 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
167cdf5f 651 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
4d22de3e
DLR
652
653 /* Old eeproms didn't have port information */
654 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
655 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
656 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
657 } else {
658 p->port_type[0] = hex2int(vpd.port0_data[0]);
659 p->port_type[1] = hex2int(vpd.port1_data[0]);
660 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
661 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
662 }
663
664 for (i = 0; i < 6; i++)
665 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
666 hex2int(vpd.na_data[2 * i + 1]);
667 return 0;
668}
669
670/* serial flash and firmware constants */
671enum {
672 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
673 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
674 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
675
676 /* flash command opcodes */
677 SF_PROG_PAGE = 2, /* program page */
678 SF_WR_DISABLE = 4, /* disable writes */
679 SF_RD_STATUS = 5, /* read status register */
680 SF_WR_ENABLE = 6, /* enable writes */
681 SF_RD_DATA_FAST = 0xb, /* read flash */
682 SF_ERASE_SECTOR = 0xd8, /* erase sector */
683
684 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
2e283962
DLR
685 FW_VERS_ADDR = 0x77ffc, /* flash address holding FW version */
686 FW_MIN_SIZE = 8 /* at least version and csum */
4d22de3e
DLR
687};
688
689/**
690 * sf1_read - read data from the serial flash
691 * @adapter: the adapter
692 * @byte_cnt: number of bytes to read
693 * @cont: whether another operation will be chained
694 * @valp: where to store the read data
695 *
696 * Reads up to 4 bytes of data from the serial flash. The location of
697 * the read needs to be specified prior to calling this by issuing the
698 * appropriate commands to the serial flash.
699 */
700static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
701 u32 *valp)
702{
703 int ret;
704
705 if (!byte_cnt || byte_cnt > 4)
706 return -EINVAL;
707 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
708 return -EBUSY;
709 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
710 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
711 if (!ret)
712 *valp = t3_read_reg(adapter, A_SF_DATA);
713 return ret;
714}
715
716/**
717 * sf1_write - write data to the serial flash
718 * @adapter: the adapter
719 * @byte_cnt: number of bytes to write
720 * @cont: whether another operation will be chained
721 * @val: value to write
722 *
723 * Writes up to 4 bytes of data to the serial flash. The location of
724 * the write needs to be specified prior to calling this by issuing the
725 * appropriate commands to the serial flash.
726 */
727static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
728 u32 val)
729{
730 if (!byte_cnt || byte_cnt > 4)
731 return -EINVAL;
732 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
733 return -EBUSY;
734 t3_write_reg(adapter, A_SF_DATA, val);
735 t3_write_reg(adapter, A_SF_OP,
736 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
737 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
738}
739
740/**
741 * flash_wait_op - wait for a flash operation to complete
742 * @adapter: the adapter
743 * @attempts: max number of polls of the status register
744 * @delay: delay between polls in ms
745 *
746 * Wait for a flash operation to complete by polling the status register.
747 */
748static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
749{
750 int ret;
751 u32 status;
752
753 while (1) {
754 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
755 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
756 return ret;
757 if (!(status & 1))
758 return 0;
759 if (--attempts == 0)
760 return -EAGAIN;
761 if (delay)
762 msleep(delay);
763 }
764}
765
766/**
767 * t3_read_flash - read words from serial flash
768 * @adapter: the adapter
769 * @addr: the start address for the read
770 * @nwords: how many 32-bit words to read
771 * @data: where to store the read data
772 * @byte_oriented: whether to store data as bytes or as words
773 *
774 * Read the specified number of 32-bit words from the serial flash.
775 * If @byte_oriented is set the read data is stored as a byte array
776 * (i.e., big-endian), otherwise as 32-bit words in the platform's
777 * natural endianess.
778 */
779int t3_read_flash(struct adapter *adapter, unsigned int addr,
780 unsigned int nwords, u32 *data, int byte_oriented)
781{
782 int ret;
783
784 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
785 return -EINVAL;
786
787 addr = swab32(addr) | SF_RD_DATA_FAST;
788
789 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
790 (ret = sf1_read(adapter, 1, 1, data)) != 0)
791 return ret;
792
793 for (; nwords; nwords--, data++) {
794 ret = sf1_read(adapter, 4, nwords > 1, data);
795 if (ret)
796 return ret;
797 if (byte_oriented)
798 *data = htonl(*data);
799 }
800 return 0;
801}
802
803/**
804 * t3_write_flash - write up to a page of data to the serial flash
805 * @adapter: the adapter
806 * @addr: the start address to write
807 * @n: length of data to write
808 * @data: the data to write
809 *
810 * Writes up to a page of data (256 bytes) to the serial flash starting
811 * at the given address.
812 */
813static int t3_write_flash(struct adapter *adapter, unsigned int addr,
814 unsigned int n, const u8 *data)
815{
816 int ret;
817 u32 buf[64];
818 unsigned int i, c, left, val, offset = addr & 0xff;
819
820 if (addr + n > SF_SIZE || offset + n > 256)
821 return -EINVAL;
822
823 val = swab32(addr) | SF_PROG_PAGE;
824
825 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
826 (ret = sf1_write(adapter, 4, 1, val)) != 0)
827 return ret;
828
829 for (left = n; left; left -= c) {
830 c = min(left, 4U);
831 for (val = 0, i = 0; i < c; ++i)
832 val = (val << 8) + *data++;
833
834 ret = sf1_write(adapter, c, c != left, val);
835 if (ret)
836 return ret;
837 }
838 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
839 return ret;
840
841 /* Read the page to verify the write succeeded */
842 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
843 if (ret)
844 return ret;
845
846 if (memcmp(data - n, (u8 *) buf + offset, n))
847 return -EIO;
848 return 0;
849}
850
480fe1a3 851/**
47330077 852 * t3_get_tp_version - read the tp sram version
480fe1a3 853 * @adapter: the adapter
47330077 854 * @vers: where to place the version
480fe1a3 855 *
47330077 856 * Reads the protocol sram version from sram.
480fe1a3 857 */
47330077 858int t3_get_tp_version(struct adapter *adapter, u32 *vers)
480fe1a3
DLR
859{
860 int ret;
480fe1a3
DLR
861
862 /* Get version loaded in SRAM */
863 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
864 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
865 1, 1, 5, 1);
866 if (ret)
867 return ret;
2eab17ab 868
47330077
DLR
869 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
870
871 return 0;
872}
873
874/**
875 * t3_check_tpsram_version - read the tp sram version
876 * @adapter: the adapter
877 * @must_load: set to 1 if loading a new microcode image is required
878 *
879 * Reads the protocol sram version from flash.
880 */
881int t3_check_tpsram_version(struct adapter *adapter, int *must_load)
882{
883 int ret;
884 u32 vers;
885 unsigned int major, minor;
886
887 if (adapter->params.rev == T3_REV_A)
888 return 0;
889
890 *must_load = 1;
891
892 ret = t3_get_tp_version(adapter, &vers);
893 if (ret)
894 return ret;
480fe1a3
DLR
895
896 major = G_TP_VERSION_MAJOR(vers);
897 minor = G_TP_VERSION_MINOR(vers);
898
2eab17ab 899 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
480fe1a3
DLR
900 return 0;
901
47330077
DLR
902 if (major != TP_VERSION_MAJOR)
903 CH_ERR(adapter, "found wrong TP version (%u.%u), "
904 "driver needs version %d.%d\n", major, minor,
905 TP_VERSION_MAJOR, TP_VERSION_MINOR);
906 else {
907 *must_load = 0;
908 CH_ERR(adapter, "found wrong TP version (%u.%u), "
909 "driver compiled for version %d.%d\n", major, minor,
910 TP_VERSION_MAJOR, TP_VERSION_MINOR);
911 }
480fe1a3
DLR
912 return -EINVAL;
913}
914
915/**
2eab17ab 916 * t3_check_tpsram - check if provided protocol SRAM
480fe1a3
DLR
917 * is compatible with this driver
918 * @adapter: the adapter
919 * @tp_sram: the firmware image to write
920 * @size: image size
921 *
922 * Checks if an adapter's tp sram is compatible with the driver.
923 * Returns 0 if the versions are compatible, a negative error otherwise.
924 */
925int t3_check_tpsram(struct adapter *adapter, u8 *tp_sram, unsigned int size)
926{
927 u32 csum;
928 unsigned int i;
929 const u32 *p = (const u32 *)tp_sram;
930
931 /* Verify checksum */
932 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
933 csum += ntohl(p[i]);
934 if (csum != 0xffffffff) {
935 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
936 csum);
937 return -EINVAL;
938 }
939
940 return 0;
941}
942
4aac3899
DLR
943enum fw_version_type {
944 FW_VERSION_N3,
945 FW_VERSION_T3
946};
947
4d22de3e
DLR
948/**
949 * t3_get_fw_version - read the firmware version
950 * @adapter: the adapter
951 * @vers: where to place the version
952 *
953 * Reads the FW version from flash.
954 */
955int t3_get_fw_version(struct adapter *adapter, u32 *vers)
956{
957 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
958}
959
960/**
961 * t3_check_fw_version - check if the FW is compatible with this driver
962 * @adapter: the adapter
a5a3b460
DLR
963 * @must_load: set to 1 if loading a new FW image is required
964
4d22de3e
DLR
965 * Checks if an adapter's FW is compatible with the driver. Returns 0
966 * if the versions are compatible, a negative error otherwise.
967 */
a5a3b460 968int t3_check_fw_version(struct adapter *adapter, int *must_load)
4d22de3e
DLR
969{
970 int ret;
971 u32 vers;
4aac3899 972 unsigned int type, major, minor;
4d22de3e 973
a5a3b460 974 *must_load = 1;
4d22de3e
DLR
975 ret = t3_get_fw_version(adapter, &vers);
976 if (ret)
977 return ret;
978
4aac3899
DLR
979 type = G_FW_VERSION_TYPE(vers);
980 major = G_FW_VERSION_MAJOR(vers);
981 minor = G_FW_VERSION_MINOR(vers);
4d22de3e 982
75d8626f
DLR
983 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
984 minor == FW_VERSION_MINOR)
4d22de3e
DLR
985 return 0;
986
a5a3b460
DLR
987 if (major != FW_VERSION_MAJOR)
988 CH_ERR(adapter, "found wrong FW version(%u.%u), "
989 "driver needs version %u.%u\n", major, minor,
990 FW_VERSION_MAJOR, FW_VERSION_MINOR);
273fa904 991 else if (minor < FW_VERSION_MINOR) {
a5a3b460 992 *must_load = 0;
273fa904
DLR
993 CH_WARN(adapter, "found old FW minor version(%u.%u), "
994 "driver compiled for version %u.%u\n", major, minor,
995 FW_VERSION_MAJOR, FW_VERSION_MINOR);
996 } else {
997 CH_WARN(adapter, "found newer FW version(%u.%u), "
a5a3b460
DLR
998 "driver compiled for version %u.%u\n", major, minor,
999 FW_VERSION_MAJOR, FW_VERSION_MINOR);
273fa904 1000 return 0;
a5a3b460 1001 }
4d22de3e
DLR
1002 return -EINVAL;
1003}
1004
1005/**
1006 * t3_flash_erase_sectors - erase a range of flash sectors
1007 * @adapter: the adapter
1008 * @start: the first sector to erase
1009 * @end: the last sector to erase
1010 *
1011 * Erases the sectors in the given range.
1012 */
1013static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1014{
1015 while (start <= end) {
1016 int ret;
1017
1018 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1019 (ret = sf1_write(adapter, 4, 0,
1020 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1021 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1022 return ret;
1023 start++;
1024 }
1025 return 0;
1026}
1027
1028/*
1029 * t3_load_fw - download firmware
1030 * @adapter: the adapter
8a9fab22 1031 * @fw_data: the firmware image to write
4d22de3e
DLR
1032 * @size: image size
1033 *
1034 * Write the supplied firmware image to the card's serial flash.
1035 * The FW image has the following sections: @size - 8 bytes of code and
1036 * data, followed by 4 bytes of FW version, followed by the 32-bit
1037 * 1's complement checksum of the whole image.
1038 */
1039int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1040{
1041 u32 csum;
1042 unsigned int i;
1043 const u32 *p = (const u32 *)fw_data;
1044 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1045
2e283962 1046 if ((size & 3) || size < FW_MIN_SIZE)
4d22de3e
DLR
1047 return -EINVAL;
1048 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1049 return -EFBIG;
1050
1051 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1052 csum += ntohl(p[i]);
1053 if (csum != 0xffffffff) {
1054 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1055 csum);
1056 return -EINVAL;
1057 }
1058
1059 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1060 if (ret)
1061 goto out;
1062
1063 size -= 8; /* trim off version and checksum */
1064 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1065 unsigned int chunk_size = min(size, 256U);
1066
1067 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1068 if (ret)
1069 goto out;
1070
1071 addr += chunk_size;
1072 fw_data += chunk_size;
1073 size -= chunk_size;
1074 }
1075
1076 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1077out:
1078 if (ret)
1079 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1080 return ret;
1081}
1082
1083#define CIM_CTL_BASE 0x2000
1084
1085/**
1086 * t3_cim_ctl_blk_read - read a block from CIM control region
1087 *
1088 * @adap: the adapter
1089 * @addr: the start address within the CIM control region
1090 * @n: number of words to read
1091 * @valp: where to store the result
1092 *
1093 * Reads a block of 4-byte words from the CIM control region.
1094 */
1095int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1096 unsigned int n, unsigned int *valp)
1097{
1098 int ret = 0;
1099
1100 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1101 return -EBUSY;
1102
1103 for ( ; !ret && n--; addr += 4) {
1104 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1105 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1106 0, 5, 2);
1107 if (!ret)
1108 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1109 }
1110 return ret;
1111}
1112
1113
1114/**
1115 * t3_link_changed - handle interface link changes
1116 * @adapter: the adapter
1117 * @port_id: the port index that changed link state
1118 *
1119 * Called when a port's link settings change to propagate the new values
1120 * to the associated PHY and MAC. After performing the common tasks it
1121 * invokes an OS-specific handler.
1122 */
1123void t3_link_changed(struct adapter *adapter, int port_id)
1124{
1125 int link_ok, speed, duplex, fc;
1126 struct port_info *pi = adap2pinfo(adapter, port_id);
1127 struct cphy *phy = &pi->phy;
1128 struct cmac *mac = &pi->mac;
1129 struct link_config *lc = &pi->link_config;
1130
1131 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1132
1133 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1134 uses_xaui(adapter)) {
1135 if (link_ok)
1136 t3b_pcs_reset(mac);
1137 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1138 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1139 }
1140 lc->link_ok = link_ok;
1141 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1142 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1143 if (lc->requested_fc & PAUSE_AUTONEG)
1144 fc &= lc->requested_fc;
1145 else
1146 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1147
1148 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1149 /* Set MAC speed, duplex, and flow control to match PHY. */
1150 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1151 lc->fc = fc;
1152 }
1153
1154 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1155}
1156
1157/**
1158 * t3_link_start - apply link configuration to MAC/PHY
1159 * @phy: the PHY to setup
1160 * @mac: the MAC to setup
1161 * @lc: the requested link configuration
1162 *
1163 * Set up a port's MAC and PHY according to a desired link configuration.
1164 * - If the PHY can auto-negotiate first decide what to advertise, then
1165 * enable/disable auto-negotiation as desired, and reset.
1166 * - If the PHY does not auto-negotiate just reset it.
1167 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1168 * otherwise do it later based on the outcome of auto-negotiation.
1169 */
1170int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1171{
1172 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1173
1174 lc->link_ok = 0;
1175 if (lc->supported & SUPPORTED_Autoneg) {
1176 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1177 if (fc) {
1178 lc->advertising |= ADVERTISED_Asym_Pause;
1179 if (fc & PAUSE_RX)
1180 lc->advertising |= ADVERTISED_Pause;
1181 }
1182 phy->ops->advertise(phy, lc->advertising);
1183
1184 if (lc->autoneg == AUTONEG_DISABLE) {
1185 lc->speed = lc->requested_speed;
1186 lc->duplex = lc->requested_duplex;
1187 lc->fc = (unsigned char)fc;
1188 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1189 fc);
1190 /* Also disables autoneg */
1191 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1192 phy->ops->reset(phy, 0);
1193 } else
1194 phy->ops->autoneg_enable(phy);
1195 } else {
1196 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1197 lc->fc = (unsigned char)fc;
1198 phy->ops->reset(phy, 0);
1199 }
1200 return 0;
1201}
1202
1203/**
1204 * t3_set_vlan_accel - control HW VLAN extraction
1205 * @adapter: the adapter
1206 * @ports: bitmap of adapter ports to operate on
1207 * @on: enable (1) or disable (0) HW VLAN extraction
1208 *
1209 * Enables or disables HW extraction of VLAN tags for the given port.
1210 */
1211void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1212{
1213 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1214 ports << S_VLANEXTRACTIONENABLE,
1215 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1216}
1217
1218struct intr_info {
1219 unsigned int mask; /* bits to check in interrupt status */
1220 const char *msg; /* message to print or NULL */
1221 short stat_idx; /* stat counter to increment or -1 */
1222 unsigned short fatal:1; /* whether the condition reported is fatal */
1223};
1224
1225/**
1226 * t3_handle_intr_status - table driven interrupt handler
1227 * @adapter: the adapter that generated the interrupt
1228 * @reg: the interrupt status register to process
1229 * @mask: a mask to apply to the interrupt status
1230 * @acts: table of interrupt actions
1231 * @stats: statistics counters tracking interrupt occurences
1232 *
1233 * A table driven interrupt handler that applies a set of masks to an
1234 * interrupt status word and performs the corresponding actions if the
1235 * interrupts described by the mask have occured. The actions include
1236 * optionally printing a warning or alert message, and optionally
1237 * incrementing a stat counter. The table is terminated by an entry
1238 * specifying mask 0. Returns the number of fatal interrupt conditions.
1239 */
1240static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1241 unsigned int mask,
1242 const struct intr_info *acts,
1243 unsigned long *stats)
1244{
1245 int fatal = 0;
1246 unsigned int status = t3_read_reg(adapter, reg) & mask;
1247
1248 for (; acts->mask; ++acts) {
1249 if (!(status & acts->mask))
1250 continue;
1251 if (acts->fatal) {
1252 fatal++;
1253 CH_ALERT(adapter, "%s (0x%x)\n",
1254 acts->msg, status & acts->mask);
1255 } else if (acts->msg)
1256 CH_WARN(adapter, "%s (0x%x)\n",
1257 acts->msg, status & acts->mask);
1258 if (acts->stat_idx >= 0)
1259 stats[acts->stat_idx]++;
1260 }
1261 if (status) /* clear processed interrupts */
1262 t3_write_reg(adapter, reg, status);
1263 return fatal;
1264}
1265
1266#define SGE_INTR_MASK (F_RSPQDISABLED)
1267#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1268 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1269 F_NFASRCHFAIL)
1270#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1271#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1272 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1273 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1274#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1275 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1276 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1277 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1278 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1279 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1280#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1281 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1282 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
a2604be5 1283 V_BISTERR(M_BISTERR))
4d22de3e
DLR
1284#define ULPRX_INTR_MASK F_PARERR
1285#define ULPTX_INTR_MASK 0
1286#define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
1287 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1288 F_ZERO_SWITCH_ERROR)
1289#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1290 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1291 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1292 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
1293#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1294 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1295 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1296#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1297 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1298 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1299#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1300 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1301 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1302 V_MCAPARERRENB(M_MCAPARERRENB))
1303#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1304 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1305 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1306 F_MPS0 | F_CPL_SWITCH)
1307
1308/*
1309 * Interrupt handler for the PCIX1 module.
1310 */
1311static void pci_intr_handler(struct adapter *adapter)
1312{
1313 static const struct intr_info pcix1_intr_info[] = {
4d22de3e
DLR
1314 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1315 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1316 {F_RCVTARABT, "PCI received target abort", -1, 1},
1317 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1318 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1319 {F_DETPARERR, "PCI detected parity error", -1, 1},
1320 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1321 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1322 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1323 1},
1324 {F_DETCORECCERR, "PCI correctable ECC error",
1325 STAT_PCI_CORR_ECC, 0},
1326 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1327 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1328 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1329 1},
1330 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1331 1},
1332 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1333 1},
1334 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1335 "error", -1, 1},
1336 {0}
1337 };
1338
1339 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1340 pcix1_intr_info, adapter->irq_stats))
1341 t3_fatal_err(adapter);
1342}
1343
1344/*
1345 * Interrupt handler for the PCIE module.
1346 */
1347static void pcie_intr_handler(struct adapter *adapter)
1348{
1349 static const struct intr_info pcie_intr_info[] = {
b5a44bcb 1350 {F_PEXERR, "PCI PEX error", -1, 1},
4d22de3e
DLR
1351 {F_UNXSPLCPLERRR,
1352 "PCI unexpected split completion DMA read error", -1, 1},
1353 {F_UNXSPLCPLERRC,
1354 "PCI unexpected split completion DMA command error", -1, 1},
1355 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1356 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1357 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1358 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1359 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1360 "PCI MSI-X table/PBA parity error", -1, 1},
1361 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1362 {0}
1363 };
1364
3eea3337
DLR
1365 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1366 CH_ALERT(adapter, "PEX error code 0x%x\n",
1367 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1368
4d22de3e
DLR
1369 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1370 pcie_intr_info, adapter->irq_stats))
1371 t3_fatal_err(adapter);
1372}
1373
1374/*
1375 * TP interrupt handler.
1376 */
1377static void tp_intr_handler(struct adapter *adapter)
1378{
1379 static const struct intr_info tp_intr_info[] = {
1380 {0xffffff, "TP parity error", -1, 1},
1381 {0x1000000, "TP out of Rx pages", -1, 1},
1382 {0x2000000, "TP out of Tx pages", -1, 1},
1383 {0}
1384 };
1385
a2604be5
DLR
1386 static struct intr_info tp_intr_info_t3c[] = {
1387 { 0x1ffffff, "TP parity error", -1, 1 },
1388 { F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1 },
1389 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1390 { 0 }
1391 };
1392
4d22de3e 1393 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
a2604be5
DLR
1394 adapter->params.rev < T3_REV_C ?
1395 tp_intr_info : tp_intr_info_t3c, NULL))
4d22de3e
DLR
1396 t3_fatal_err(adapter);
1397}
1398
1399/*
1400 * CIM interrupt handler.
1401 */
1402static void cim_intr_handler(struct adapter *adapter)
1403{
1404 static const struct intr_info cim_intr_info[] = {
1405 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1406 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1407 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1408 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1409 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1410 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1411 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1412 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1413 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1414 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1415 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1416 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1417 {0}
1418 };
1419
1420 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1421 cim_intr_info, NULL))
1422 t3_fatal_err(adapter);
1423}
1424
1425/*
1426 * ULP RX interrupt handler.
1427 */
1428static void ulprx_intr_handler(struct adapter *adapter)
1429{
1430 static const struct intr_info ulprx_intr_info[] = {
1431 {F_PARERR, "ULP RX parity error", -1, 1},
1432 {0}
1433 };
1434
1435 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1436 ulprx_intr_info, NULL))
1437 t3_fatal_err(adapter);
1438}
1439
1440/*
1441 * ULP TX interrupt handler.
1442 */
1443static void ulptx_intr_handler(struct adapter *adapter)
1444{
1445 static const struct intr_info ulptx_intr_info[] = {
1446 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1447 STAT_ULP_CH0_PBL_OOB, 0},
1448 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1449 STAT_ULP_CH1_PBL_OOB, 0},
1450 {0}
1451 };
1452
1453 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1454 ulptx_intr_info, adapter->irq_stats))
1455 t3_fatal_err(adapter);
1456}
1457
1458#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1459 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1460 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1461 F_ICSPI1_TX_FRAMING_ERROR)
1462#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1463 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1464 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1465 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1466
1467/*
1468 * PM TX interrupt handler.
1469 */
1470static void pmtx_intr_handler(struct adapter *adapter)
1471{
1472 static const struct intr_info pmtx_intr_info[] = {
1473 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1474 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1475 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1476 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1477 "PMTX ispi parity error", -1, 1},
1478 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1479 "PMTX ospi parity error", -1, 1},
1480 {0}
1481 };
1482
1483 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1484 pmtx_intr_info, NULL))
1485 t3_fatal_err(adapter);
1486}
1487
1488#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1489 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1490 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1491 F_IESPI1_TX_FRAMING_ERROR)
1492#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1493 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1494 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1495 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1496
1497/*
1498 * PM RX interrupt handler.
1499 */
1500static void pmrx_intr_handler(struct adapter *adapter)
1501{
1502 static const struct intr_info pmrx_intr_info[] = {
1503 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1504 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1505 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1506 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1507 "PMRX ispi parity error", -1, 1},
1508 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1509 "PMRX ospi parity error", -1, 1},
1510 {0}
1511 };
1512
1513 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1514 pmrx_intr_info, NULL))
1515 t3_fatal_err(adapter);
1516}
1517
1518/*
1519 * CPL switch interrupt handler.
1520 */
1521static void cplsw_intr_handler(struct adapter *adapter)
1522{
1523 static const struct intr_info cplsw_intr_info[] = {
1524/* { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 }, */
1525 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1526 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1527 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1528 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1529 {0}
1530 };
1531
1532 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1533 cplsw_intr_info, NULL))
1534 t3_fatal_err(adapter);
1535}
1536
1537/*
1538 * MPS interrupt handler.
1539 */
1540static void mps_intr_handler(struct adapter *adapter)
1541{
1542 static const struct intr_info mps_intr_info[] = {
1543 {0x1ff, "MPS parity error", -1, 1},
1544 {0}
1545 };
1546
1547 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1548 mps_intr_info, NULL))
1549 t3_fatal_err(adapter);
1550}
1551
1552#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1553
1554/*
1555 * MC7 interrupt handler.
1556 */
1557static void mc7_intr_handler(struct mc7 *mc7)
1558{
1559 struct adapter *adapter = mc7->adapter;
1560 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1561
1562 if (cause & F_CE) {
1563 mc7->stats.corr_err++;
1564 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1565 "data 0x%x 0x%x 0x%x\n", mc7->name,
1566 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1567 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1568 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1569 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1570 }
1571
1572 if (cause & F_UE) {
1573 mc7->stats.uncorr_err++;
1574 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1575 "data 0x%x 0x%x 0x%x\n", mc7->name,
1576 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1577 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1578 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1579 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1580 }
1581
1582 if (G_PE(cause)) {
1583 mc7->stats.parity_err++;
1584 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1585 mc7->name, G_PE(cause));
1586 }
1587
1588 if (cause & F_AE) {
1589 u32 addr = 0;
1590
1591 if (adapter->params.rev > 0)
1592 addr = t3_read_reg(adapter,
1593 mc7->offset + A_MC7_ERR_ADDR);
1594 mc7->stats.addr_err++;
1595 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1596 mc7->name, addr);
1597 }
1598
1599 if (cause & MC7_INTR_FATAL)
1600 t3_fatal_err(adapter);
1601
1602 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1603}
1604
1605#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1606 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1607/*
1608 * XGMAC interrupt handler.
1609 */
1610static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1611{
1612 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1613 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1614
1615 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1616 mac->stats.tx_fifo_parity_err++;
1617 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1618 }
1619 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1620 mac->stats.rx_fifo_parity_err++;
1621 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1622 }
1623 if (cause & F_TXFIFO_UNDERRUN)
1624 mac->stats.tx_fifo_urun++;
1625 if (cause & F_RXFIFO_OVERFLOW)
1626 mac->stats.rx_fifo_ovfl++;
1627 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1628 mac->stats.serdes_signal_loss++;
1629 if (cause & F_XAUIPCSCTCERR)
1630 mac->stats.xaui_pcs_ctc_err++;
1631 if (cause & F_XAUIPCSALIGNCHANGE)
1632 mac->stats.xaui_pcs_align_change++;
1633
1634 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1635 if (cause & XGM_INTR_FATAL)
1636 t3_fatal_err(adap);
1637 return cause != 0;
1638}
1639
1640/*
1641 * Interrupt handler for PHY events.
1642 */
1643int t3_phy_intr_handler(struct adapter *adapter)
1644{
1ca03cbc 1645 u32 mask, gpi = adapter_info(adapter)->gpio_intr;
4d22de3e
DLR
1646 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1647
1648 for_each_port(adapter, i) {
1ca03cbc
DLR
1649 struct port_info *p = adap2pinfo(adapter, i);
1650
1651 mask = gpi - (gpi & (gpi - 1));
1652 gpi -= mask;
1653
1654 if (!(p->port_type->caps & SUPPORTED_IRQ))
1655 continue;
1656
1657 if (cause & mask) {
1658 int phy_cause = p->phy.ops->intr_handler(&p->phy);
4d22de3e
DLR
1659
1660 if (phy_cause & cphy_cause_link_change)
1661 t3_link_changed(adapter, i);
1662 if (phy_cause & cphy_cause_fifo_error)
1ca03cbc 1663 p->phy.fifo_errors++;
4d22de3e
DLR
1664 }
1665 }
1666
1667 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1668 return 0;
1669}
1670
1671/*
1672 * T3 slow path (non-data) interrupt handler.
1673 */
1674int t3_slow_intr_handler(struct adapter *adapter)
1675{
1676 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1677
1678 cause &= adapter->slow_intr_mask;
1679 if (!cause)
1680 return 0;
1681 if (cause & F_PCIM0) {
1682 if (is_pcie(adapter))
1683 pcie_intr_handler(adapter);
1684 else
1685 pci_intr_handler(adapter);
1686 }
1687 if (cause & F_SGE3)
1688 t3_sge_err_intr_handler(adapter);
1689 if (cause & F_MC7_PMRX)
1690 mc7_intr_handler(&adapter->pmrx);
1691 if (cause & F_MC7_PMTX)
1692 mc7_intr_handler(&adapter->pmtx);
1693 if (cause & F_MC7_CM)
1694 mc7_intr_handler(&adapter->cm);
1695 if (cause & F_CIM)
1696 cim_intr_handler(adapter);
1697 if (cause & F_TP1)
1698 tp_intr_handler(adapter);
1699 if (cause & F_ULP2_RX)
1700 ulprx_intr_handler(adapter);
1701 if (cause & F_ULP2_TX)
1702 ulptx_intr_handler(adapter);
1703 if (cause & F_PM1_RX)
1704 pmrx_intr_handler(adapter);
1705 if (cause & F_PM1_TX)
1706 pmtx_intr_handler(adapter);
1707 if (cause & F_CPL_SWITCH)
1708 cplsw_intr_handler(adapter);
1709 if (cause & F_MPS0)
1710 mps_intr_handler(adapter);
1711 if (cause & F_MC5A)
1712 t3_mc5_intr_handler(&adapter->mc5);
1713 if (cause & F_XGMAC0_0)
1714 mac_intr_handler(adapter, 0);
1715 if (cause & F_XGMAC0_1)
1716 mac_intr_handler(adapter, 1);
1717 if (cause & F_T3DBG)
1718 t3_os_ext_intr_handler(adapter);
1719
1720 /* Clear the interrupts just processed. */
1721 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1722 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1723 return 1;
1724}
1725
1726/**
1727 * t3_intr_enable - enable interrupts
1728 * @adapter: the adapter whose interrupts should be enabled
1729 *
1730 * Enable interrupts by setting the interrupt enable registers of the
1731 * various HW modules and then enabling the top-level interrupt
1732 * concentrator.
1733 */
1734void t3_intr_enable(struct adapter *adapter)
1735{
1736 static const struct addr_val_pair intr_en_avp[] = {
1737 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1738 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1739 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1740 MC7_INTR_MASK},
1741 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1742 MC7_INTR_MASK},
1743 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1744 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
4d22de3e
DLR
1745 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1746 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1747 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1748 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1749 };
1750
1751 adapter->slow_intr_mask = PL_INTR_MASK;
1752
1753 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
a2604be5
DLR
1754 t3_write_reg(adapter, A_TP_INT_ENABLE,
1755 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
4d22de3e
DLR
1756
1757 if (adapter->params.rev > 0) {
1758 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1759 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1760 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1761 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1762 F_PBL_BOUND_ERR_CH1);
1763 } else {
1764 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1765 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1766 }
1767
1768 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1769 adapter_info(adapter)->gpio_intr);
1770 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1771 adapter_info(adapter)->gpio_intr);
1772 if (is_pcie(adapter))
1773 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1774 else
1775 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1776 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1777 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1778}
1779
1780/**
1781 * t3_intr_disable - disable a card's interrupts
1782 * @adapter: the adapter whose interrupts should be disabled
1783 *
1784 * Disable interrupts. We only disable the top-level interrupt
1785 * concentrator and the SGE data interrupts.
1786 */
1787void t3_intr_disable(struct adapter *adapter)
1788{
1789 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1790 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1791 adapter->slow_intr_mask = 0;
1792}
1793
1794/**
1795 * t3_intr_clear - clear all interrupts
1796 * @adapter: the adapter whose interrupts should be cleared
1797 *
1798 * Clears all interrupts.
1799 */
1800void t3_intr_clear(struct adapter *adapter)
1801{
1802 static const unsigned int cause_reg_addr[] = {
1803 A_SG_INT_CAUSE,
1804 A_SG_RSPQ_FL_STATUS,
1805 A_PCIX_INT_CAUSE,
1806 A_MC7_INT_CAUSE,
1807 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1808 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1809 A_CIM_HOST_INT_CAUSE,
1810 A_TP_INT_CAUSE,
1811 A_MC5_DB_INT_CAUSE,
1812 A_ULPRX_INT_CAUSE,
1813 A_ULPTX_INT_CAUSE,
1814 A_CPL_INTR_CAUSE,
1815 A_PM1_TX_INT_CAUSE,
1816 A_PM1_RX_INT_CAUSE,
1817 A_MPS_INT_CAUSE,
1818 A_T3DBG_INT_CAUSE,
1819 };
1820 unsigned int i;
1821
1822 /* Clear PHY and MAC interrupts for each port. */
1823 for_each_port(adapter, i)
1824 t3_port_intr_clear(adapter, i);
1825
1826 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1827 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1828
3eea3337
DLR
1829 if (is_pcie(adapter))
1830 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
4d22de3e
DLR
1831 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1832 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1833}
1834
1835/**
1836 * t3_port_intr_enable - enable port-specific interrupts
1837 * @adapter: associated adapter
1838 * @idx: index of port whose interrupts should be enabled
1839 *
1840 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1841 * adapter port.
1842 */
1843void t3_port_intr_enable(struct adapter *adapter, int idx)
1844{
1845 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1846
1847 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1848 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1849 phy->ops->intr_enable(phy);
1850}
1851
1852/**
1853 * t3_port_intr_disable - disable port-specific interrupts
1854 * @adapter: associated adapter
1855 * @idx: index of port whose interrupts should be disabled
1856 *
1857 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1858 * adapter port.
1859 */
1860void t3_port_intr_disable(struct adapter *adapter, int idx)
1861{
1862 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1863
1864 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1865 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1866 phy->ops->intr_disable(phy);
1867}
1868
1869/**
1870 * t3_port_intr_clear - clear port-specific interrupts
1871 * @adapter: associated adapter
1872 * @idx: index of port whose interrupts to clear
1873 *
1874 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1875 * adapter port.
1876 */
1877void t3_port_intr_clear(struct adapter *adapter, int idx)
1878{
1879 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1880
1881 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1882 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1883 phy->ops->intr_clear(phy);
1884}
1885
bb9366af
DLR
1886#define SG_CONTEXT_CMD_ATTEMPTS 100
1887
4d22de3e
DLR
1888/**
1889 * t3_sge_write_context - write an SGE context
1890 * @adapter: the adapter
1891 * @id: the context id
1892 * @type: the context type
1893 *
1894 * Program an SGE context with the values already loaded in the
1895 * CONTEXT_DATA? registers.
1896 */
1897static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1898 unsigned int type)
1899{
1900 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1901 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1902 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1903 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1904 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1905 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1906 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 1907 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
1908}
1909
1910/**
1911 * t3_sge_init_ecntxt - initialize an SGE egress context
1912 * @adapter: the adapter to configure
1913 * @id: the context id
1914 * @gts_enable: whether to enable GTS for the context
1915 * @type: the egress context type
1916 * @respq: associated response queue
1917 * @base_addr: base address of queue
1918 * @size: number of queue entries
1919 * @token: uP token
1920 * @gen: initial generation value for the context
1921 * @cidx: consumer pointer
1922 *
1923 * Initialize an SGE egress context and make it ready for use. If the
1924 * platform allows concurrent context operations, the caller is
1925 * responsible for appropriate locking.
1926 */
1927int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
1928 enum sge_context_type type, int respq, u64 base_addr,
1929 unsigned int size, unsigned int token, int gen,
1930 unsigned int cidx)
1931{
1932 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1933
1934 if (base_addr & 0xfff) /* must be 4K aligned */
1935 return -EINVAL;
1936 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1937 return -EBUSY;
1938
1939 base_addr >>= 12;
1940 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1941 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1942 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1943 V_EC_BASE_LO(base_addr & 0xffff));
1944 base_addr >>= 16;
1945 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
1946 base_addr >>= 32;
1947 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1948 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
1949 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
1950 F_EC_VALID);
1951 return t3_sge_write_context(adapter, id, F_EGRESS);
1952}
1953
1954/**
1955 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
1956 * @adapter: the adapter to configure
1957 * @id: the context id
1958 * @gts_enable: whether to enable GTS for the context
1959 * @base_addr: base address of queue
1960 * @size: number of queue entries
1961 * @bsize: size of each buffer for this queue
1962 * @cong_thres: threshold to signal congestion to upstream producers
1963 * @gen: initial generation value for the context
1964 * @cidx: consumer pointer
1965 *
1966 * Initialize an SGE free list context and make it ready for use. The
1967 * caller is responsible for ensuring only one context operation occurs
1968 * at a time.
1969 */
1970int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
1971 int gts_enable, u64 base_addr, unsigned int size,
1972 unsigned int bsize, unsigned int cong_thres, int gen,
1973 unsigned int cidx)
1974{
1975 if (base_addr & 0xfff) /* must be 4K aligned */
1976 return -EINVAL;
1977 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1978 return -EBUSY;
1979
1980 base_addr >>= 12;
1981 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
1982 base_addr >>= 32;
1983 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
1984 V_FL_BASE_HI((u32) base_addr) |
1985 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
1986 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
1987 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
1988 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
1989 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1990 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
1991 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
1992 return t3_sge_write_context(adapter, id, F_FREELIST);
1993}
1994
1995/**
1996 * t3_sge_init_rspcntxt - initialize an SGE response queue context
1997 * @adapter: the adapter to configure
1998 * @id: the context id
1999 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2000 * @base_addr: base address of queue
2001 * @size: number of queue entries
2002 * @fl_thres: threshold for selecting the normal or jumbo free list
2003 * @gen: initial generation value for the context
2004 * @cidx: consumer pointer
2005 *
2006 * Initialize an SGE response queue context and make it ready for use.
2007 * The caller is responsible for ensuring only one context operation
2008 * occurs at a time.
2009 */
2010int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2011 int irq_vec_idx, u64 base_addr, unsigned int size,
2012 unsigned int fl_thres, int gen, unsigned int cidx)
2013{
2014 unsigned int intr = 0;
2015
2016 if (base_addr & 0xfff) /* must be 4K aligned */
2017 return -EINVAL;
2018 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2019 return -EBUSY;
2020
2021 base_addr >>= 12;
2022 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2023 V_CQ_INDEX(cidx));
2024 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2025 base_addr >>= 32;
2026 if (irq_vec_idx >= 0)
2027 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2028 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2029 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2030 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2031 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2032}
2033
2034/**
2035 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2036 * @adapter: the adapter to configure
2037 * @id: the context id
2038 * @base_addr: base address of queue
2039 * @size: number of queue entries
2040 * @rspq: response queue for async notifications
2041 * @ovfl_mode: CQ overflow mode
2042 * @credits: completion queue credits
2043 * @credit_thres: the credit threshold
2044 *
2045 * Initialize an SGE completion queue context and make it ready for use.
2046 * The caller is responsible for ensuring only one context operation
2047 * occurs at a time.
2048 */
2049int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2050 unsigned int size, int rspq, int ovfl_mode,
2051 unsigned int credits, unsigned int credit_thres)
2052{
2053 if (base_addr & 0xfff) /* must be 4K aligned */
2054 return -EINVAL;
2055 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2056 return -EBUSY;
2057
2058 base_addr >>= 12;
2059 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2060 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2061 base_addr >>= 32;
2062 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2063 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
1c17ae8a
DLR
2064 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2065 V_CQ_ERR(ovfl_mode));
4d22de3e
DLR
2066 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2067 V_CQ_CREDIT_THRES(credit_thres));
2068 return t3_sge_write_context(adapter, id, F_CQ);
2069}
2070
2071/**
2072 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2073 * @adapter: the adapter
2074 * @id: the egress context id
2075 * @enable: enable (1) or disable (0) the context
2076 *
2077 * Enable or disable an SGE egress context. The caller is responsible for
2078 * ensuring only one context operation occurs at a time.
2079 */
2080int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2081{
2082 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2083 return -EBUSY;
2084
2085 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2086 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2087 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2088 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2089 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2090 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2091 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2092 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2093 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2094}
2095
2096/**
2097 * t3_sge_disable_fl - disable an SGE free-buffer list
2098 * @adapter: the adapter
2099 * @id: the free list context id
2100 *
2101 * Disable an SGE free-buffer list. The caller is responsible for
2102 * ensuring only one context operation occurs at a time.
2103 */
2104int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2105{
2106 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2107 return -EBUSY;
2108
2109 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2110 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2111 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2112 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2113 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2114 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2115 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2116 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2117 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2118}
2119
2120/**
2121 * t3_sge_disable_rspcntxt - disable an SGE response queue
2122 * @adapter: the adapter
2123 * @id: the response queue context id
2124 *
2125 * Disable an SGE response queue. The caller is responsible for
2126 * ensuring only one context operation occurs at a time.
2127 */
2128int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2129{
2130 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2131 return -EBUSY;
2132
2133 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2134 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2135 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2136 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2137 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2138 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2139 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2140 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2141 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2142}
2143
2144/**
2145 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2146 * @adapter: the adapter
2147 * @id: the completion queue context id
2148 *
2149 * Disable an SGE completion queue. The caller is responsible for
2150 * ensuring only one context operation occurs at a time.
2151 */
2152int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2153{
2154 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2155 return -EBUSY;
2156
2157 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2158 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2159 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2160 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2161 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2162 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2163 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2164 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2165 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2166}
2167
2168/**
2169 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2170 * @adapter: the adapter
2171 * @id: the context id
2172 * @op: the operation to perform
2173 *
2174 * Perform the selected operation on an SGE completion queue context.
2175 * The caller is responsible for ensuring only one context operation
2176 * occurs at a time.
2177 */
2178int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2179 unsigned int credits)
2180{
2181 u32 val;
2182
2183 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2184 return -EBUSY;
2185
2186 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2187 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2188 V_CONTEXT(id) | F_CQ);
2189 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2190 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
4d22de3e
DLR
2191 return -EIO;
2192
2193 if (op >= 2 && op < 7) {
2194 if (adapter->params.rev > 0)
2195 return G_CQ_INDEX(val);
2196
2197 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2198 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2199 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
bb9366af
DLR
2200 F_CONTEXT_CMD_BUSY, 0,
2201 SG_CONTEXT_CMD_ATTEMPTS, 1))
4d22de3e
DLR
2202 return -EIO;
2203 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2204 }
2205 return 0;
2206}
2207
2208/**
2209 * t3_sge_read_context - read an SGE context
2210 * @type: the context type
2211 * @adapter: the adapter
2212 * @id: the context id
2213 * @data: holds the retrieved context
2214 *
2215 * Read an SGE egress context. The caller is responsible for ensuring
2216 * only one context operation occurs at a time.
2217 */
2218static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2219 unsigned int id, u32 data[4])
2220{
2221 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2222 return -EBUSY;
2223
2224 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2225 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2226 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
bb9366af 2227 SG_CONTEXT_CMD_ATTEMPTS, 1))
4d22de3e
DLR
2228 return -EIO;
2229 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2230 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2231 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2232 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2233 return 0;
2234}
2235
2236/**
2237 * t3_sge_read_ecntxt - read an SGE egress context
2238 * @adapter: the adapter
2239 * @id: the context id
2240 * @data: holds the retrieved context
2241 *
2242 * Read an SGE egress context. The caller is responsible for ensuring
2243 * only one context operation occurs at a time.
2244 */
2245int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2246{
2247 if (id >= 65536)
2248 return -EINVAL;
2249 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2250}
2251
2252/**
2253 * t3_sge_read_cq - read an SGE CQ context
2254 * @adapter: the adapter
2255 * @id: the context id
2256 * @data: holds the retrieved context
2257 *
2258 * Read an SGE CQ context. The caller is responsible for ensuring
2259 * only one context operation occurs at a time.
2260 */
2261int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2262{
2263 if (id >= 65536)
2264 return -EINVAL;
2265 return t3_sge_read_context(F_CQ, adapter, id, data);
2266}
2267
2268/**
2269 * t3_sge_read_fl - read an SGE free-list context
2270 * @adapter: the adapter
2271 * @id: the context id
2272 * @data: holds the retrieved context
2273 *
2274 * Read an SGE free-list context. The caller is responsible for ensuring
2275 * only one context operation occurs at a time.
2276 */
2277int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2278{
2279 if (id >= SGE_QSETS * 2)
2280 return -EINVAL;
2281 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2282}
2283
2284/**
2285 * t3_sge_read_rspq - read an SGE response queue context
2286 * @adapter: the adapter
2287 * @id: the context id
2288 * @data: holds the retrieved context
2289 *
2290 * Read an SGE response queue context. The caller is responsible for
2291 * ensuring only one context operation occurs at a time.
2292 */
2293int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2294{
2295 if (id >= SGE_QSETS)
2296 return -EINVAL;
2297 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2298}
2299
2300/**
2301 * t3_config_rss - configure Rx packet steering
2302 * @adapter: the adapter
2303 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2304 * @cpus: values for the CPU lookup table (0xff terminated)
2305 * @rspq: values for the response queue lookup table (0xffff terminated)
2306 *
2307 * Programs the receive packet steering logic. @cpus and @rspq provide
2308 * the values for the CPU and response queue lookup tables. If they
2309 * provide fewer values than the size of the tables the supplied values
2310 * are used repeatedly until the tables are fully populated.
2311 */
2312void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2313 const u8 * cpus, const u16 *rspq)
2314{
2315 int i, j, cpu_idx = 0, q_idx = 0;
2316
2317 if (cpus)
2318 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2319 u32 val = i << 16;
2320
2321 for (j = 0; j < 2; ++j) {
2322 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2323 if (cpus[cpu_idx] == 0xff)
2324 cpu_idx = 0;
2325 }
2326 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2327 }
2328
2329 if (rspq)
2330 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2331 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2332 (i << 16) | rspq[q_idx++]);
2333 if (rspq[q_idx] == 0xffff)
2334 q_idx = 0;
2335 }
2336
2337 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2338}
2339
2340/**
2341 * t3_read_rss - read the contents of the RSS tables
2342 * @adapter: the adapter
2343 * @lkup: holds the contents of the RSS lookup table
2344 * @map: holds the contents of the RSS map table
2345 *
2346 * Reads the contents of the receive packet steering tables.
2347 */
2348int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2349{
2350 int i;
2351 u32 val;
2352
2353 if (lkup)
2354 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2355 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2356 0xffff0000 | i);
2357 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2358 if (!(val & 0x80000000))
2359 return -EAGAIN;
2360 *lkup++ = val;
2361 *lkup++ = (val >> 8);
2362 }
2363
2364 if (map)
2365 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2366 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2367 0xffff0000 | i);
2368 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2369 if (!(val & 0x80000000))
2370 return -EAGAIN;
2371 *map++ = val;
2372 }
2373 return 0;
2374}
2375
2376/**
2377 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2378 * @adap: the adapter
2379 * @enable: 1 to select offload mode, 0 for regular NIC
2380 *
2381 * Switches TP to NIC/offload mode.
2382 */
2383void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2384{
2385 if (is_offload(adap) || !enable)
2386 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2387 V_NICMODE(!enable));
2388}
2389
2390/**
2391 * pm_num_pages - calculate the number of pages of the payload memory
2392 * @mem_size: the size of the payload memory
2393 * @pg_size: the size of each payload memory page
2394 *
2395 * Calculate the number of pages, each of the given size, that fit in a
2396 * memory of the specified size, respecting the HW requirement that the
2397 * number of pages must be a multiple of 24.
2398 */
2399static inline unsigned int pm_num_pages(unsigned int mem_size,
2400 unsigned int pg_size)
2401{
2402 unsigned int n = mem_size / pg_size;
2403
2404 return n - n % 24;
2405}
2406
2407#define mem_region(adap, start, size, reg) \
2408 t3_write_reg((adap), A_ ## reg, (start)); \
2409 start += size
2410
2411/*
2412 * partition_mem - partition memory and configure TP memory settings
2413 * @adap: the adapter
2414 * @p: the TP parameters
2415 *
2416 * Partitions context and payload memory and configures TP's memory
2417 * registers.
2418 */
2419static void partition_mem(struct adapter *adap, const struct tp_params *p)
2420{
2421 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2422 unsigned int timers = 0, timers_shift = 22;
2423
2424 if (adap->params.rev > 0) {
2425 if (tids <= 16 * 1024) {
2426 timers = 1;
2427 timers_shift = 16;
2428 } else if (tids <= 64 * 1024) {
2429 timers = 2;
2430 timers_shift = 18;
2431 } else if (tids <= 256 * 1024) {
2432 timers = 3;
2433 timers_shift = 20;
2434 }
2435 }
2436
2437 t3_write_reg(adap, A_TP_PMM_SIZE,
2438 p->chan_rx_size | (p->chan_tx_size >> 16));
2439
2440 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2441 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2442 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2443 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2444 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2445
2446 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2447 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2448 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2449
2450 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2451 /* Add a bit of headroom and make multiple of 24 */
2452 pstructs += 48;
2453 pstructs -= pstructs % 24;
2454 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2455
2456 m = tids * TCB_SIZE;
2457 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2458 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2459 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2460 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2461 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2462 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2463 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2464 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2465
2466 m = (m + 4095) & ~0xfff;
2467 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2468 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2469
2470 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2471 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2472 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2473 if (tids < m)
2474 adap->params.mc5.nservers += m - tids;
2475}
2476
2477static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2478 u32 val)
2479{
2480 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2481 t3_write_reg(adap, A_TP_PIO_DATA, val);
2482}
2483
2484static void tp_config(struct adapter *adap, const struct tp_params *p)
2485{
4d22de3e
DLR
2486 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2487 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2488 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2489 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2490 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
8a9fab22 2491 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
4d22de3e
DLR
2492 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2493 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2494 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2495 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2496 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
2497 F_IPV6ENABLE | F_NICMODE);
2498 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2499 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
8a9fab22
DLR
2500 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2501 adap->params.rev > 0 ? F_ENABLEESND :
2502 F_T3A_ENABLEESND);
4d22de3e 2503
3b1d307b 2504 t3_set_reg_field(adap, A_TP_PC_CONFIG,
8a9fab22
DLR
2505 F_ENABLEEPCMDAFULL,
2506 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2507 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
3b1d307b 2508 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
8a9fab22
DLR
2509 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2510 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2eab17ab 2511
4d22de3e
DLR
2512 if (adap->params.rev > 0) {
2513 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2514 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2515 F_TXPACEAUTO);
2516 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2517 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2518 } else
2519 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2520
a2604be5
DLR
2521 if (adap->params.rev == T3_REV_C)
2522 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2523 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2524 V_TABLELATENCYDELTA(4));
2525
8a9fab22
DLR
2526 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2527 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2528 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2529 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
4d22de3e
DLR
2530}
2531
2532/* Desired TP timer resolution in usec */
2533#define TP_TMR_RES 50
2534
2535/* TCP timer values in ms */
2536#define TP_DACK_TIMER 50
2537#define TP_RTO_MIN 250
2538
2539/**
2540 * tp_set_timers - set TP timing parameters
2541 * @adap: the adapter to set
2542 * @core_clk: the core clock frequency in Hz
2543 *
2544 * Set TP's timing parameters, such as the various timer resolutions and
2545 * the TCP timer values.
2546 */
2547static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2548{
2549 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2550 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2551 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2552 unsigned int tps = core_clk >> tre;
2553
2554 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2555 V_DELAYEDACKRESOLUTION(dack_re) |
2556 V_TIMESTAMPRESOLUTION(tstamp_re));
2557 t3_write_reg(adap, A_TP_DACK_TIMER,
2558 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2559 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2560 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2561 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2562 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2563 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2564 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2565 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2566 V_KEEPALIVEMAX(9));
2567
2568#define SECONDS * tps
2569
2570 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2571 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2572 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2573 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2574 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2575 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2576 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2577 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2578 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2579
2580#undef SECONDS
2581}
2582
2583/**
2584 * t3_tp_set_coalescing_size - set receive coalescing size
2585 * @adap: the adapter
2586 * @size: the receive coalescing size
2587 * @psh: whether a set PSH bit should deliver coalesced data
2588 *
2589 * Set the receive coalescing size and PSH bit handling.
2590 */
2591int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2592{
2593 u32 val;
2594
2595 if (size > MAX_RX_COALESCING_LEN)
2596 return -EINVAL;
2597
2598 val = t3_read_reg(adap, A_TP_PARA_REG3);
2599 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2600
2601 if (size) {
2602 val |= F_RXCOALESCEENABLE;
2603 if (psh)
2604 val |= F_RXCOALESCEPSHEN;
8a9fab22 2605 size = min(MAX_RX_COALESCING_LEN, size);
4d22de3e
DLR
2606 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2607 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2608 }
2609 t3_write_reg(adap, A_TP_PARA_REG3, val);
2610 return 0;
2611}
2612
2613/**
2614 * t3_tp_set_max_rxsize - set the max receive size
2615 * @adap: the adapter
2616 * @size: the max receive size
2617 *
2618 * Set TP's max receive size. This is the limit that applies when
2619 * receive coalescing is disabled.
2620 */
2621void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2622{
2623 t3_write_reg(adap, A_TP_PARA_REG7,
2624 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2625}
2626
2627static void __devinit init_mtus(unsigned short mtus[])
2628{
2629 /*
2630 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2631 * it can accomodate max size TCP/IP headers when SACK and timestamps
2632 * are enabled and still have at least 8 bytes of payload.
2633 */
75758e8a 2634 mtus[0] = 88;
8a9fab22
DLR
2635 mtus[1] = 88;
2636 mtus[2] = 256;
2637 mtus[3] = 512;
2638 mtus[4] = 576;
4d22de3e
DLR
2639 mtus[5] = 1024;
2640 mtus[6] = 1280;
2641 mtus[7] = 1492;
2642 mtus[8] = 1500;
2643 mtus[9] = 2002;
2644 mtus[10] = 2048;
2645 mtus[11] = 4096;
2646 mtus[12] = 4352;
2647 mtus[13] = 8192;
2648 mtus[14] = 9000;
2649 mtus[15] = 9600;
2650}
2651
2652/*
2653 * Initial congestion control parameters.
2654 */
2655static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2656{
2657 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2658 a[9] = 2;
2659 a[10] = 3;
2660 a[11] = 4;
2661 a[12] = 5;
2662 a[13] = 6;
2663 a[14] = 7;
2664 a[15] = 8;
2665 a[16] = 9;
2666 a[17] = 10;
2667 a[18] = 14;
2668 a[19] = 17;
2669 a[20] = 21;
2670 a[21] = 25;
2671 a[22] = 30;
2672 a[23] = 35;
2673 a[24] = 45;
2674 a[25] = 60;
2675 a[26] = 80;
2676 a[27] = 100;
2677 a[28] = 200;
2678 a[29] = 300;
2679 a[30] = 400;
2680 a[31] = 500;
2681
2682 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2683 b[9] = b[10] = 1;
2684 b[11] = b[12] = 2;
2685 b[13] = b[14] = b[15] = b[16] = 3;
2686 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2687 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2688 b[28] = b[29] = 6;
2689 b[30] = b[31] = 7;
2690}
2691
2692/* The minimum additive increment value for the congestion control table */
2693#define CC_MIN_INCR 2U
2694
2695/**
2696 * t3_load_mtus - write the MTU and congestion control HW tables
2697 * @adap: the adapter
2698 * @mtus: the unrestricted values for the MTU table
2699 * @alphs: the values for the congestion control alpha parameter
2700 * @beta: the values for the congestion control beta parameter
2701 * @mtu_cap: the maximum permitted effective MTU
2702 *
2703 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2704 * Update the high-speed congestion control table with the supplied alpha,
2705 * beta, and MTUs.
2706 */
2707void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2708 unsigned short alpha[NCCTRL_WIN],
2709 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2710{
2711 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2712 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2713 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2714 28672, 40960, 57344, 81920, 114688, 163840, 229376
2715 };
2716
2717 unsigned int i, w;
2718
2719 for (i = 0; i < NMTUS; ++i) {
2720 unsigned int mtu = min(mtus[i], mtu_cap);
2721 unsigned int log2 = fls(mtu);
2722
2723 if (!(mtu & ((1 << log2) >> 2))) /* round */
2724 log2--;
2725 t3_write_reg(adap, A_TP_MTU_TABLE,
2726 (i << 24) | (log2 << 16) | mtu);
2727
2728 for (w = 0; w < NCCTRL_WIN; ++w) {
2729 unsigned int inc;
2730
2731 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2732 CC_MIN_INCR);
2733
2734 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2735 (w << 16) | (beta[w] << 13) | inc);
2736 }
2737 }
2738}
2739
2740/**
2741 * t3_read_hw_mtus - returns the values in the HW MTU table
2742 * @adap: the adapter
2743 * @mtus: where to store the HW MTU values
2744 *
2745 * Reads the HW MTU table.
2746 */
2747void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2748{
2749 int i;
2750
2751 for (i = 0; i < NMTUS; ++i) {
2752 unsigned int val;
2753
2754 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2755 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2756 mtus[i] = val & 0x3fff;
2757 }
2758}
2759
2760/**
2761 * t3_get_cong_cntl_tab - reads the congestion control table
2762 * @adap: the adapter
2763 * @incr: where to store the alpha values
2764 *
2765 * Reads the additive increments programmed into the HW congestion
2766 * control table.
2767 */
2768void t3_get_cong_cntl_tab(struct adapter *adap,
2769 unsigned short incr[NMTUS][NCCTRL_WIN])
2770{
2771 unsigned int mtu, w;
2772
2773 for (mtu = 0; mtu < NMTUS; ++mtu)
2774 for (w = 0; w < NCCTRL_WIN; ++w) {
2775 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2776 0xffff0000 | (mtu << 5) | w);
2777 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2778 0x1fff;
2779 }
2780}
2781
2782/**
2783 * t3_tp_get_mib_stats - read TP's MIB counters
2784 * @adap: the adapter
2785 * @tps: holds the returned counter values
2786 *
2787 * Returns the values of TP's MIB counters.
2788 */
2789void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2790{
2791 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2792 sizeof(*tps) / sizeof(u32), 0);
2793}
2794
2795#define ulp_region(adap, name, start, len) \
2796 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2797 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2798 (start) + (len) - 1); \
2799 start += len
2800
2801#define ulptx_region(adap, name, start, len) \
2802 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2803 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2804 (start) + (len) - 1)
2805
2806static void ulp_config(struct adapter *adap, const struct tp_params *p)
2807{
2808 unsigned int m = p->chan_rx_size;
2809
2810 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2811 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2812 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2813 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2814 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2815 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2816 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2817 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2818}
2819
480fe1a3
DLR
2820/**
2821 * t3_set_proto_sram - set the contents of the protocol sram
2822 * @adapter: the adapter
2823 * @data: the protocol image
2824 *
2825 * Write the contents of the protocol SRAM.
2826 */
2827int t3_set_proto_sram(struct adapter *adap, u8 *data)
2828{
2829 int i;
2830 u32 *buf = (u32 *)data;
2831
2832 for (i = 0; i < PROTO_SRAM_LINES; i++) {
2833 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
2834 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
2835 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
2836 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
2837 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
2eab17ab 2838
480fe1a3
DLR
2839 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2840 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2841 return -EIO;
2842 }
2843 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2844
2845 return 0;
2846}
2847
4d22de3e
DLR
2848void t3_config_trace_filter(struct adapter *adapter,
2849 const struct trace_params *tp, int filter_index,
2850 int invert, int enable)
2851{
2852 u32 addr, key[4], mask[4];
2853
2854 key[0] = tp->sport | (tp->sip << 16);
2855 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2856 key[2] = tp->dip;
2857 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2858
2859 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2860 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2861 mask[2] = tp->dip_mask;
2862 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2863
2864 if (invert)
2865 key[3] |= (1 << 29);
2866 if (enable)
2867 key[3] |= (1 << 28);
2868
2869 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2870 tp_wr_indirect(adapter, addr++, key[0]);
2871 tp_wr_indirect(adapter, addr++, mask[0]);
2872 tp_wr_indirect(adapter, addr++, key[1]);
2873 tp_wr_indirect(adapter, addr++, mask[1]);
2874 tp_wr_indirect(adapter, addr++, key[2]);
2875 tp_wr_indirect(adapter, addr++, mask[2]);
2876 tp_wr_indirect(adapter, addr++, key[3]);
2877 tp_wr_indirect(adapter, addr, mask[3]);
2878 t3_read_reg(adapter, A_TP_PIO_DATA);
2879}
2880
2881/**
2882 * t3_config_sched - configure a HW traffic scheduler
2883 * @adap: the adapter
2884 * @kbps: target rate in Kbps
2885 * @sched: the scheduler index
2886 *
2887 * Configure a HW scheduler for the target rate
2888 */
2889int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2890{
2891 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2892 unsigned int clk = adap->params.vpd.cclk * 1000;
2893 unsigned int selected_cpt = 0, selected_bpt = 0;
2894
2895 if (kbps > 0) {
2896 kbps *= 125; /* -> bytes */
2897 for (cpt = 1; cpt <= 255; cpt++) {
2898 tps = clk / cpt;
2899 bpt = (kbps + tps / 2) / tps;
2900 if (bpt > 0 && bpt <= 255) {
2901 v = bpt * tps;
2902 delta = v >= kbps ? v - kbps : kbps - v;
2903 if (delta <= mindelta) {
2904 mindelta = delta;
2905 selected_cpt = cpt;
2906 selected_bpt = bpt;
2907 }
2908 } else if (selected_cpt)
2909 break;
2910 }
2911 if (!selected_cpt)
2912 return -EINVAL;
2913 }
2914 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2915 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2916 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2917 if (sched & 1)
2918 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2919 else
2920 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2921 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2922 return 0;
2923}
2924
2925static int tp_init(struct adapter *adap, const struct tp_params *p)
2926{
2927 int busy = 0;
2928
2929 tp_config(adap, p);
2930 t3_set_vlan_accel(adap, 3, 0);
2931
2932 if (is_offload(adap)) {
2933 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2934 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2935 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2936 0, 1000, 5);
2937 if (busy)
2938 CH_ERR(adap, "TP initialization timed out\n");
2939 }
2940
2941 if (!busy)
2942 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
2943 return busy;
2944}
2945
2946int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
2947{
2948 if (port_mask & ~((1 << adap->params.nports) - 1))
2949 return -EINVAL;
2950 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
2951 port_mask << S_PORT0ACTIVE);
2952 return 0;
2953}
2954
2955/*
2956 * Perform the bits of HW initialization that are dependent on the number
2957 * of available ports.
2958 */
2959static void init_hw_for_avail_ports(struct adapter *adap, int nports)
2960{
2961 int i;
2962
2963 if (nports == 1) {
2964 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
2965 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
2966 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
2967 F_PORT0ACTIVE | F_ENFORCEPKT);
8a9fab22 2968 t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
4d22de3e
DLR
2969 } else {
2970 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
2971 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
2972 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
2973 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
2974 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
2975 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
2976 F_ENFORCEPKT);
2977 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
2978 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
2979 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
2980 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
2981 for (i = 0; i < 16; i++)
2982 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
2983 (i << 16) | 0x1010);
2984 }
2985}
2986
2987static int calibrate_xgm(struct adapter *adapter)
2988{
2989 if (uses_xaui(adapter)) {
2990 unsigned int v, i;
2991
2992 for (i = 0; i < 5; ++i) {
2993 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
2994 t3_read_reg(adapter, A_XGM_XAUI_IMP);
2995 msleep(1);
2996 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
2997 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
2998 t3_write_reg(adapter, A_XGM_XAUI_IMP,
2999 V_XAUIIMP(G_CALIMP(v) >> 2));
3000 return 0;
3001 }
3002 }
3003 CH_ERR(adapter, "MAC calibration failed\n");
3004 return -1;
3005 } else {
3006 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3007 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3008 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3009 F_XGM_IMPSETUPDATE);
3010 }
3011 return 0;
3012}
3013
3014static void calibrate_xgm_t3b(struct adapter *adapter)
3015{
3016 if (!uses_xaui(adapter)) {
3017 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3018 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3019 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3020 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3021 F_XGM_IMPSETUPDATE);
3022 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3023 0);
3024 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3025 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3026 }
3027}
3028
3029struct mc7_timing_params {
3030 unsigned char ActToPreDly;
3031 unsigned char ActToRdWrDly;
3032 unsigned char PreCyc;
3033 unsigned char RefCyc[5];
3034 unsigned char BkCyc;
3035 unsigned char WrToRdDly;
3036 unsigned char RdToWrDly;
3037};
3038
3039/*
3040 * Write a value to a register and check that the write completed. These
3041 * writes normally complete in a cycle or two, so one read should suffice.
3042 * The very first read exists to flush the posted write to the device.
3043 */
3044static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3045{
3046 t3_write_reg(adapter, addr, val);
3047 t3_read_reg(adapter, addr); /* flush */
3048 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3049 return 0;
3050 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3051 return -EIO;
3052}
3053
3054static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3055{
3056 static const unsigned int mc7_mode[] = {
3057 0x632, 0x642, 0x652, 0x432, 0x442
3058 };
3059 static const struct mc7_timing_params mc7_timings[] = {
3060 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3061 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3062 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3063 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3064 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3065 };
3066
3067 u32 val;
3068 unsigned int width, density, slow, attempts;
3069 struct adapter *adapter = mc7->adapter;
3070 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3071
8ac3ba68
DLR
3072 if (!mc7->size)
3073 return 0;
3074
4d22de3e
DLR
3075 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3076 slow = val & F_SLOW;
3077 width = G_WIDTH(val);
3078 density = G_DEN(val);
3079
3080 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3081 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3082 msleep(1);
3083
3084 if (!slow) {
3085 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3086 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3087 msleep(1);
3088 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3089 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3090 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3091 mc7->name);
3092 goto out_fail;
3093 }
3094 }
3095
3096 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3097 V_ACTTOPREDLY(p->ActToPreDly) |
3098 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3099 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3100 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3101
3102 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3103 val | F_CLKEN | F_TERM150);
3104 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3105
3106 if (!slow)
3107 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3108 F_DLLENB);
3109 udelay(1);
3110
3111 val = slow ? 3 : 6;
3112 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3113 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3114 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3115 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3116 goto out_fail;
3117
3118 if (!slow) {
3119 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3120 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3121 udelay(5);
3122 }
3123
3124 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3125 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3126 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3127 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3128 mc7_mode[mem_type]) ||
3129 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3130 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3131 goto out_fail;
3132
3133 /* clock value is in KHz */
3134 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3135 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3136
3137 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3138 F_PERREFEN | V_PREREFDIV(mc7_clock));
3139 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3140
3141 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3142 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3143 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3144 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3145 (mc7->size << width) - 1);
3146 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3147 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3148
3149 attempts = 50;
3150 do {
3151 msleep(250);
3152 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3153 } while ((val & F_BUSY) && --attempts);
3154 if (val & F_BUSY) {
3155 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3156 goto out_fail;
3157 }
3158
3159 /* Enable normal memory accesses. */
3160 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3161 return 0;
3162
3163out_fail:
3164 return -1;
3165}
3166
3167static void config_pcie(struct adapter *adap)
3168{
3169 static const u16 ack_lat[4][6] = {
3170 {237, 416, 559, 1071, 2095, 4143},
3171 {128, 217, 289, 545, 1057, 2081},
3172 {73, 118, 154, 282, 538, 1050},
3173 {67, 107, 86, 150, 278, 534}
3174 };
3175 static const u16 rpl_tmr[4][6] = {
3176 {711, 1248, 1677, 3213, 6285, 12429},
3177 {384, 651, 867, 1635, 3171, 6243},
3178 {219, 354, 462, 846, 1614, 3150},
3179 {201, 321, 258, 450, 834, 1602}
3180 };
3181
3182 u16 val;
3183 unsigned int log2_width, pldsize;
3184 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3185
3186 pci_read_config_word(adap->pdev,
3187 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3188 &val);
3189 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3190 pci_read_config_word(adap->pdev,
3191 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3192 &val);
3193
3194 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3195 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3196 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3197 log2_width = fls(adap->params.pci.width) - 1;
3198 acklat = ack_lat[log2_width][pldsize];
3199 if (val & 1) /* check LOsEnable */
3200 acklat += fst_trn_tx * 4;
3201 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3202
3203 if (adap->params.rev == 0)
3204 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3205 V_T3A_ACKLAT(M_T3A_ACKLAT),
3206 V_T3A_ACKLAT(acklat));
3207 else
3208 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3209 V_ACKLAT(acklat));
3210
3211 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3212 V_REPLAYLMT(rpllmt));
3213
3214 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3215 t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
3216}
3217
3218/*
3219 * Initialize and configure T3 HW modules. This performs the
3220 * initialization steps that need to be done once after a card is reset.
3221 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3222 *
3223 * fw_params are passed to FW and their value is platform dependent. Only the
3224 * top 8 bits are available for use, the rest must be 0.
3225 */
3226int t3_init_hw(struct adapter *adapter, u32 fw_params)
3227{
3228 int err = -EIO, attempts = 100;
3229 const struct vpd_params *vpd = &adapter->params.vpd;
3230
3231 if (adapter->params.rev > 0)
3232 calibrate_xgm_t3b(adapter);
3233 else if (calibrate_xgm(adapter))
3234 goto out_err;
3235
3236 if (vpd->mclk) {
3237 partition_mem(adapter, &adapter->params.tp);
3238
3239 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3240 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3241 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3242 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3243 adapter->params.mc5.nfilters,
3244 adapter->params.mc5.nroutes))
3245 goto out_err;
3246 }
3247
3248 if (tp_init(adapter, &adapter->params.tp))
3249 goto out_err;
3250
3251 t3_tp_set_coalescing_size(adapter,
3252 min(adapter->params.sge.max_pkt_size,
3253 MAX_RX_COALESCING_LEN), 1);
3254 t3_tp_set_max_rxsize(adapter,
3255 min(adapter->params.sge.max_pkt_size, 16384U));
3256 ulp_config(adapter, &adapter->params.tp);
3257
3258 if (is_pcie(adapter))
3259 config_pcie(adapter);
3260 else
3261 t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
3262
a2604be5
DLR
3263 if (adapter->params.rev == T3_REV_C)
3264 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3265 F_CFG_CQE_SOP_MASK);
3266
8a9fab22 3267 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3f61e427
DLR
3268 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3269 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
4d22de3e
DLR
3270 init_hw_for_avail_ports(adapter, adapter->params.nports);
3271 t3_sge_init(adapter, &adapter->params.sge);
3272
3273 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3274 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3275 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3276 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3277
3278 do { /* wait for uP to initialize */
3279 msleep(20);
3280 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
8ac3ba68
DLR
3281 if (!attempts) {
3282 CH_ERR(adapter, "uP initialization timed out\n");
4d22de3e 3283 goto out_err;
8ac3ba68 3284 }
4d22de3e
DLR
3285
3286 err = 0;
3287out_err:
3288 return err;
3289}
3290
3291/**
3292 * get_pci_mode - determine a card's PCI mode
3293 * @adapter: the adapter
3294 * @p: where to store the PCI settings
3295 *
3296 * Determines a card's PCI mode and associated parameters, such as speed
3297 * and width.
3298 */
3299static void __devinit get_pci_mode(struct adapter *adapter,
3300 struct pci_params *p)
3301{
3302 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3303 u32 pci_mode, pcie_cap;
3304
3305 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3306 if (pcie_cap) {
3307 u16 val;
3308
3309 p->variant = PCI_VARIANT_PCIE;
3310 p->pcie_cap_addr = pcie_cap;
3311 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3312 &val);
3313 p->width = (val >> 4) & 0x3f;
3314 return;
3315 }
3316
3317 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3318 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3319 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3320 pci_mode = G_PCIXINITPAT(pci_mode);
3321 if (pci_mode == 0)
3322 p->variant = PCI_VARIANT_PCI;
3323 else if (pci_mode < 4)
3324 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3325 else if (pci_mode < 8)
3326 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3327 else
3328 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3329}
3330
3331/**
3332 * init_link_config - initialize a link's SW state
3333 * @lc: structure holding the link state
3334 * @ai: information about the current card
3335 *
3336 * Initializes the SW state maintained for each link, including the link's
3337 * capabilities and default speed/duplex/flow-control/autonegotiation
3338 * settings.
3339 */
3340static void __devinit init_link_config(struct link_config *lc,
3341 unsigned int caps)
3342{
3343 lc->supported = caps;
3344 lc->requested_speed = lc->speed = SPEED_INVALID;
3345 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3346 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3347 if (lc->supported & SUPPORTED_Autoneg) {
3348 lc->advertising = lc->supported;
3349 lc->autoneg = AUTONEG_ENABLE;
3350 lc->requested_fc |= PAUSE_AUTONEG;
3351 } else {
3352 lc->advertising = 0;
3353 lc->autoneg = AUTONEG_DISABLE;
3354 }
3355}
3356
3357/**
3358 * mc7_calc_size - calculate MC7 memory size
3359 * @cfg: the MC7 configuration
3360 *
3361 * Calculates the size of an MC7 memory in bytes from the value of its
3362 * configuration register.
3363 */
3364static unsigned int __devinit mc7_calc_size(u32 cfg)
3365{
3366 unsigned int width = G_WIDTH(cfg);
3367 unsigned int banks = !!(cfg & F_BKS) + 1;
3368 unsigned int org = !!(cfg & F_ORG) + 1;
3369 unsigned int density = G_DEN(cfg);
3370 unsigned int MBs = ((256 << density) * banks) / (org << width);
3371
3372 return MBs << 20;
3373}
3374
3375static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3376 unsigned int base_addr, const char *name)
3377{
3378 u32 cfg;
3379
3380 mc7->adapter = adapter;
3381 mc7->name = name;
3382 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3383 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
8ac3ba68 3384 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
4d22de3e
DLR
3385 mc7->width = G_WIDTH(cfg);
3386}
3387
3388void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3389{
3390 mac->adapter = adapter;
3391 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3392 mac->nucast = 1;
3393
3394 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3395 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3396 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3397 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3398 F_ENRGMII, 0);
3399 }
3400}
3401
3402void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3403{
3404 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3405
3406 mi1_init(adapter, ai);
3407 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3408 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3409 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3410 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
8ac3ba68 3411 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
4d22de3e
DLR
3412
3413 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3414 val |= F_ENRGMII;
3415
3416 /* Enable MAC clocks so we can access the registers */
3417 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3418 t3_read_reg(adapter, A_XGM_PORT_CFG);
3419
3420 val |= F_CLKDIVRESET_;
3421 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3422 t3_read_reg(adapter, A_XGM_PORT_CFG);
3423 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3424 t3_read_reg(adapter, A_XGM_PORT_CFG);
3425}
3426
3427/*
2eab17ab 3428 * Reset the adapter.
e4d08359 3429 * Older PCIe cards lose their config space during reset, PCI-X
4d22de3e
DLR
3430 * ones don't.
3431 */
9265fabf 3432static int t3_reset_adapter(struct adapter *adapter)
4d22de3e 3433{
2eab17ab 3434 int i, save_and_restore_pcie =
e4d08359 3435 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
4d22de3e
DLR
3436 uint16_t devid = 0;
3437
e4d08359 3438 if (save_and_restore_pcie)
4d22de3e
DLR
3439 pci_save_state(adapter->pdev);
3440 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3441
3442 /*
3443 * Delay. Give Some time to device to reset fully.
3444 * XXX The delay time should be modified.
3445 */
3446 for (i = 0; i < 10; i++) {
3447 msleep(50);
3448 pci_read_config_word(adapter->pdev, 0x00, &devid);
3449 if (devid == 0x1425)
3450 break;
3451 }
3452
3453 if (devid != 0x1425)
3454 return -1;
3455
e4d08359 3456 if (save_and_restore_pcie)
4d22de3e
DLR
3457 pci_restore_state(adapter->pdev);
3458 return 0;
3459}
3460
3461/*
3462 * Initialize adapter SW state for the various HW modules, set initial values
3463 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3464 * interface.
3465 */
3466int __devinit t3_prep_adapter(struct adapter *adapter,
3467 const struct adapter_info *ai, int reset)
3468{
3469 int ret;
3470 unsigned int i, j = 0;
3471
3472 get_pci_mode(adapter, &adapter->params.pci);
3473
3474 adapter->params.info = ai;
3475 adapter->params.nports = ai->nports;
3476 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3477 adapter->params.linkpoll_period = 0;
3478 adapter->params.stats_update_period = is_10G(adapter) ?
3479 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3480 adapter->params.pci.vpd_cap_addr =
3481 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3482 ret = get_vpd_params(adapter, &adapter->params.vpd);
3483 if (ret < 0)
3484 return ret;
3485
3486 if (reset && t3_reset_adapter(adapter))
3487 return -1;
3488
3489 t3_sge_prep(adapter, &adapter->params.sge);
3490
3491 if (adapter->params.vpd.mclk) {
3492 struct tp_params *p = &adapter->params.tp;
3493
3494 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3495 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3496 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3497
3498 p->nchan = ai->nports;
3499 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3500 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3501 p->cm_size = t3_mc7_size(&adapter->cm);
3502 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3503 p->chan_tx_size = p->pmtx_size / p->nchan;
3504 p->rx_pg_size = 64 * 1024;
3505 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3506 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3507 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3508 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3509 adapter->params.rev > 0 ? 12 : 6;
8ac3ba68
DLR
3510 }
3511
3512 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3513 t3_mc7_size(&adapter->pmtx) &&
3514 t3_mc7_size(&adapter->cm);
4d22de3e 3515
8ac3ba68 3516 if (is_offload(adapter)) {
4d22de3e
DLR
3517 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3518 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3519 DEFAULT_NFILTERS : 0;
3520 adapter->params.mc5.nroutes = 0;
3521 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3522
3523 init_mtus(adapter->params.mtus);
3524 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3525 }
3526
3527 early_hw_init(adapter, ai);
3528
3529 for_each_port(adapter, i) {
3530 u8 hw_addr[6];
3531 struct port_info *p = adap2pinfo(adapter, i);
3532
3533 while (!adapter->params.vpd.port_type[j])
3534 ++j;
3535
3536 p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3537 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3538 ai->mdio_ops);
3539 mac_prep(&p->mac, adapter, j);
3540 ++j;
3541
3542 /*
3543 * The VPD EEPROM stores the base Ethernet address for the
3544 * card. A port's address is derived from the base by adding
3545 * the port's index to the base's low octet.
3546 */
3547 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3548 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3549
3550 memcpy(adapter->port[i]->dev_addr, hw_addr,
3551 ETH_ALEN);
3552 memcpy(adapter->port[i]->perm_addr, hw_addr,
3553 ETH_ALEN);
3554 init_link_config(&p->link_config, p->port_type->caps);
3555 p->phy.ops->power_down(&p->phy, 1);
3556 if (!(p->port_type->caps & SUPPORTED_IRQ))
3557 adapter->params.linkpoll_period = 10;
3558 }
3559
3560 return 0;
3561}
3562
3563void t3_led_ready(struct adapter *adapter)
3564{
3565 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3566 F_GPIO0_OUT_VAL);
3567}