]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - drivers/net/cxgb3/t3_hw.c
cxgb3: Support for Aeluros 2005 PHY
[mirror_ubuntu-kernels.git] / drivers / net / cxgb3 / t3_hw.c
CommitLineData
4d22de3e 1/*
1d68e93d 2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include "common.h"
33#include "regs.h"
34#include "sge_defs.h"
35#include "firmware_exports.h"
36
f2c6879e
DLR
37/**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
46 *
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
51 */
4d22de3e
DLR
52
53int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
55{
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
58
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
63 }
64 if (--attempts == 0)
b881955b 65 return -EAGAIN;
4d22de3e
DLR
66 if (delay)
67 udelay(delay);
68 }
69}
70
71/**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
77 *
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
81 */
82void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
84{
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
88 }
89}
90
91/**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
97 *
98 * Sets a register field specified by the supplied mask to the
99 * given value.
100 */
101void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
103{
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
105
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
108}
109
110/**
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
118 *
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
121 */
9265fabf
SH
122static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
4d22de3e
DLR
125{
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
130 }
131}
132
133/**
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
139 *
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
142 */
143int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
145{
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
148
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
151
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
154
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
159
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
163
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
172
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
183 }
184 start += 8;
185 }
186 *buf++ = val64;
187 }
188 return 0;
189}
190
191/*
192 * Initialize MI1.
193 */
194static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
195{
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
04497982 197 u32 val = F_PREEN | V_CLKDIV(clkdiv);
4d22de3e 198
4d22de3e
DLR
199 t3_write_reg(adap, A_MI1_CFG, val);
200}
201
04497982 202#define MDIO_ATTEMPTS 20
4d22de3e
DLR
203
204/*
04497982 205 * MI1 read/write operations for clause 22 PHYs.
4d22de3e 206 */
04497982
DLR
207static int t3_mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
208 int reg_addr, unsigned int *valp)
4d22de3e
DLR
209{
210 int ret;
211 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
212
213 if (mmd_addr)
214 return -EINVAL;
215
216 mutex_lock(&adapter->mdio_lock);
04497982 217 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
4d22de3e
DLR
218 t3_write_reg(adapter, A_MI1_ADDR, addr);
219 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
04497982 220 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
4d22de3e
DLR
221 if (!ret)
222 *valp = t3_read_reg(adapter, A_MI1_DATA);
223 mutex_unlock(&adapter->mdio_lock);
224 return ret;
225}
226
04497982 227static int t3_mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
4d22de3e
DLR
228 int reg_addr, unsigned int val)
229{
230 int ret;
231 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
232
233 if (mmd_addr)
234 return -EINVAL;
235
236 mutex_lock(&adapter->mdio_lock);
04497982 237 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
4d22de3e
DLR
238 t3_write_reg(adapter, A_MI1_ADDR, addr);
239 t3_write_reg(adapter, A_MI1_DATA, val);
240 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
04497982 241 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
4d22de3e
DLR
242 mutex_unlock(&adapter->mdio_lock);
243 return ret;
244}
245
246static const struct mdio_ops mi1_mdio_ops = {
04497982
DLR
247 t3_mi1_read,
248 t3_mi1_write
4d22de3e
DLR
249};
250
04497982
DLR
251/*
252 * Performs the address cycle for clause 45 PHYs.
253 * Must be called with the MDIO_LOCK held.
254 */
255static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
256 int reg_addr)
257{
258 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
259
260 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
261 t3_write_reg(adapter, A_MI1_ADDR, addr);
262 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
263 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
264 return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
265 MDIO_ATTEMPTS, 10);
266}
267
4d22de3e
DLR
268/*
269 * MI1 read/write operations for indirect-addressed PHYs.
270 */
271static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
272 int reg_addr, unsigned int *valp)
273{
274 int ret;
4d22de3e
DLR
275
276 mutex_lock(&adapter->mdio_lock);
04497982 277 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
4d22de3e
DLR
278 if (!ret) {
279 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
280 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
04497982 281 MDIO_ATTEMPTS, 10);
4d22de3e
DLR
282 if (!ret)
283 *valp = t3_read_reg(adapter, A_MI1_DATA);
284 }
285 mutex_unlock(&adapter->mdio_lock);
286 return ret;
287}
288
289static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
290 int reg_addr, unsigned int val)
291{
292 int ret;
4d22de3e
DLR
293
294 mutex_lock(&adapter->mdio_lock);
04497982 295 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
4d22de3e
DLR
296 if (!ret) {
297 t3_write_reg(adapter, A_MI1_DATA, val);
298 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
299 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
04497982 300 MDIO_ATTEMPTS, 10);
4d22de3e
DLR
301 }
302 mutex_unlock(&adapter->mdio_lock);
303 return ret;
304}
305
306static const struct mdio_ops mi1_mdio_ext_ops = {
307 mi1_ext_read,
308 mi1_ext_write
309};
310
311/**
312 * t3_mdio_change_bits - modify the value of a PHY register
313 * @phy: the PHY to operate on
314 * @mmd: the device address
315 * @reg: the register address
316 * @clear: what part of the register value to mask off
317 * @set: what part of the register value to set
318 *
319 * Changes the value of a PHY register by applying a mask to its current
320 * value and ORing the result with a new value.
321 */
322int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
323 unsigned int set)
324{
325 int ret;
326 unsigned int val;
327
328 ret = mdio_read(phy, mmd, reg, &val);
329 if (!ret) {
330 val &= ~clear;
331 ret = mdio_write(phy, mmd, reg, val | set);
332 }
333 return ret;
334}
335
336/**
337 * t3_phy_reset - reset a PHY block
338 * @phy: the PHY to operate on
339 * @mmd: the device address of the PHY block to reset
340 * @wait: how long to wait for the reset to complete in 1ms increments
341 *
342 * Resets a PHY block and optionally waits for the reset to complete.
343 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
344 * for 10G PHYs.
345 */
346int t3_phy_reset(struct cphy *phy, int mmd, int wait)
347{
348 int err;
349 unsigned int ctl;
350
351 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
352 if (err || !wait)
353 return err;
354
355 do {
356 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
357 if (err)
358 return err;
359 ctl &= BMCR_RESET;
360 if (ctl)
361 msleep(1);
362 } while (ctl && --wait);
363
364 return ctl ? -1 : 0;
365}
366
367/**
368 * t3_phy_advertise - set the PHY advertisement registers for autoneg
369 * @phy: the PHY to operate on
370 * @advert: bitmap of capabilities the PHY should advertise
371 *
372 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
373 * requested capabilities.
374 */
375int t3_phy_advertise(struct cphy *phy, unsigned int advert)
376{
377 int err;
378 unsigned int val = 0;
379
380 err = mdio_read(phy, 0, MII_CTRL1000, &val);
381 if (err)
382 return err;
383
384 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
385 if (advert & ADVERTISED_1000baseT_Half)
386 val |= ADVERTISE_1000HALF;
387 if (advert & ADVERTISED_1000baseT_Full)
388 val |= ADVERTISE_1000FULL;
389
390 err = mdio_write(phy, 0, MII_CTRL1000, val);
391 if (err)
392 return err;
393
394 val = 1;
395 if (advert & ADVERTISED_10baseT_Half)
396 val |= ADVERTISE_10HALF;
397 if (advert & ADVERTISED_10baseT_Full)
398 val |= ADVERTISE_10FULL;
399 if (advert & ADVERTISED_100baseT_Half)
400 val |= ADVERTISE_100HALF;
401 if (advert & ADVERTISED_100baseT_Full)
402 val |= ADVERTISE_100FULL;
403 if (advert & ADVERTISED_Pause)
404 val |= ADVERTISE_PAUSE_CAP;
405 if (advert & ADVERTISED_Asym_Pause)
406 val |= ADVERTISE_PAUSE_ASYM;
407 return mdio_write(phy, 0, MII_ADVERTISE, val);
408}
409
410/**
411 * t3_set_phy_speed_duplex - force PHY speed and duplex
412 * @phy: the PHY to operate on
413 * @speed: requested PHY speed
414 * @duplex: requested PHY duplex
415 *
416 * Force a 10/100/1000 PHY's speed and duplex. This also disables
417 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
418 */
419int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
420{
421 int err;
422 unsigned int ctl;
423
424 err = mdio_read(phy, 0, MII_BMCR, &ctl);
425 if (err)
426 return err;
427
428 if (speed >= 0) {
429 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
430 if (speed == SPEED_100)
431 ctl |= BMCR_SPEED100;
432 else if (speed == SPEED_1000)
433 ctl |= BMCR_SPEED1000;
434 }
435 if (duplex >= 0) {
436 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
437 if (duplex == DUPLEX_FULL)
438 ctl |= BMCR_FULLDPLX;
439 }
440 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
441 ctl |= BMCR_ANENABLE;
442 return mdio_write(phy, 0, MII_BMCR, ctl);
443}
444
9b1e3656
DLR
445int t3_phy_lasi_intr_enable(struct cphy *phy)
446{
447 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
448}
449
450int t3_phy_lasi_intr_disable(struct cphy *phy)
451{
452 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
453}
454
455int t3_phy_lasi_intr_clear(struct cphy *phy)
456{
457 u32 val;
458
459 return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
460}
461
462int t3_phy_lasi_intr_handler(struct cphy *phy)
463{
464 unsigned int status;
465 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
466
467 if (err)
468 return err;
469 return (status & 1) ? cphy_cause_link_change : 0;
470}
471
4d22de3e 472static const struct adapter_info t3_adap_info[] = {
04497982 473 {2, 0,
4d22de3e 474 F_GPIO2_OEN | F_GPIO4_OEN |
f231e0a5 475 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
4d22de3e 476 &mi1_mdio_ops, "Chelsio PE9000"},
04497982 477 {2, 0,
4d22de3e 478 F_GPIO2_OEN | F_GPIO4_OEN |
f231e0a5 479 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
4d22de3e 480 &mi1_mdio_ops, "Chelsio T302"},
04497982 481 {1, 0,
4d22de3e 482 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
75758e8a 483 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
f231e0a5 484 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
4d22de3e 485 &mi1_mdio_ext_ops, "Chelsio T310"},
04497982 486 {2, 0,
4d22de3e
DLR
487 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
488 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
f231e0a5
DLR
489 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
490 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
4d22de3e
DLR
491 &mi1_mdio_ext_ops, "Chelsio T320"},
492};
493
494/*
495 * Return the adapter_info structure with a given index. Out-of-range indices
496 * return NULL.
497 */
498const struct adapter_info *t3_get_adapter_info(unsigned int id)
499{
500 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
501}
502
04497982
DLR
503struct port_type_info {
504 int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
505 int phy_addr, const struct mdio_ops *ops);
506};
4d22de3e
DLR
507
508static const struct port_type_info port_types[] = {
04497982
DLR
509 { NULL },
510 { t3_ael1002_phy_prep },
511 { t3_vsc8211_phy_prep },
512 { NULL},
513 { t3_xaui_direct_phy_prep },
1e882025 514 { t3_ael2005_phy_prep },
04497982
DLR
515 { t3_qt2045_phy_prep },
516 { t3_ael1006_phy_prep },
517 { NULL },
4d22de3e
DLR
518};
519
4d22de3e
DLR
520#define VPD_ENTRY(name, len) \
521 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
522
523/*
524 * Partial EEPROM Vital Product Data structure. Includes only the ID and
525 * VPD-R sections.
526 */
527struct t3_vpd {
528 u8 id_tag;
529 u8 id_len[2];
530 u8 id_data[16];
531 u8 vpdr_tag;
532 u8 vpdr_len[2];
533 VPD_ENTRY(pn, 16); /* part number */
534 VPD_ENTRY(ec, 16); /* EC level */
167cdf5f 535 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
4d22de3e
DLR
536 VPD_ENTRY(na, 12); /* MAC address base */
537 VPD_ENTRY(cclk, 6); /* core clock */
538 VPD_ENTRY(mclk, 6); /* mem clock */
539 VPD_ENTRY(uclk, 6); /* uP clk */
540 VPD_ENTRY(mdc, 6); /* MDIO clk */
541 VPD_ENTRY(mt, 2); /* mem timing */
542 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
543 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
544 VPD_ENTRY(port0, 2); /* PHY0 complex */
545 VPD_ENTRY(port1, 2); /* PHY1 complex */
546 VPD_ENTRY(port2, 2); /* PHY2 complex */
547 VPD_ENTRY(port3, 2); /* PHY3 complex */
548 VPD_ENTRY(rv, 1); /* csum */
549 u32 pad; /* for multiple-of-4 sizing and alignment */
550};
551
552#define EEPROM_MAX_POLL 4
553#define EEPROM_STAT_ADDR 0x4000
554#define VPD_BASE 0xc00
555
556/**
557 * t3_seeprom_read - read a VPD EEPROM location
558 * @adapter: adapter to read
559 * @addr: EEPROM address
560 * @data: where to store the read data
561 *
562 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
563 * VPD ROM capability. A zero is written to the flag bit when the
564 * addres is written to the control register. The hardware device will
565 * set the flag to 1 when 4 bytes have been read into the data register.
566 */
05e5c116 567int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
4d22de3e
DLR
568{
569 u16 val;
570 int attempts = EEPROM_MAX_POLL;
05e5c116 571 u32 v;
4d22de3e
DLR
572 unsigned int base = adapter->params.pci.vpd_cap_addr;
573
574 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
575 return -EINVAL;
576
577 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
578 do {
579 udelay(10);
580 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
581 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
582
583 if (!(val & PCI_VPD_ADDR_F)) {
584 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
585 return -EIO;
586 }
05e5c116
AV
587 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
588 *data = cpu_to_le32(v);
4d22de3e
DLR
589 return 0;
590}
591
592/**
593 * t3_seeprom_write - write a VPD EEPROM location
594 * @adapter: adapter to write
595 * @addr: EEPROM address
596 * @data: value to write
597 *
598 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
599 * VPD ROM capability.
600 */
05e5c116 601int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
4d22de3e
DLR
602{
603 u16 val;
604 int attempts = EEPROM_MAX_POLL;
605 unsigned int base = adapter->params.pci.vpd_cap_addr;
606
607 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
608 return -EINVAL;
609
610 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
05e5c116 611 le32_to_cpu(data));
4d22de3e
DLR
612 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
613 addr | PCI_VPD_ADDR_F);
614 do {
615 msleep(1);
616 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
617 } while ((val & PCI_VPD_ADDR_F) && --attempts);
618
619 if (val & PCI_VPD_ADDR_F) {
620 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
621 return -EIO;
622 }
623 return 0;
624}
625
626/**
627 * t3_seeprom_wp - enable/disable EEPROM write protection
628 * @adapter: the adapter
629 * @enable: 1 to enable write protection, 0 to disable it
630 *
631 * Enables or disables write protection on the serial EEPROM.
632 */
633int t3_seeprom_wp(struct adapter *adapter, int enable)
634{
635 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
636}
637
638/*
639 * Convert a character holding a hex digit to a number.
640 */
641static unsigned int hex2int(unsigned char c)
642{
643 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
644}
645
646/**
647 * get_vpd_params - read VPD parameters from VPD EEPROM
648 * @adapter: adapter to read
649 * @p: where to store the parameters
650 *
651 * Reads card parameters stored in VPD EEPROM.
652 */
653static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
654{
655 int i, addr, ret;
656 struct t3_vpd vpd;
657
658 /*
659 * Card information is normally at VPD_BASE but some early cards had
660 * it at 0.
661 */
05e5c116 662 ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
4d22de3e
DLR
663 if (ret)
664 return ret;
665 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
666
667 for (i = 0; i < sizeof(vpd); i += 4) {
668 ret = t3_seeprom_read(adapter, addr + i,
05e5c116 669 (__le32 *)((u8 *)&vpd + i));
4d22de3e
DLR
670 if (ret)
671 return ret;
672 }
673
674 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
675 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
676 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
677 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
678 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
167cdf5f 679 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
4d22de3e
DLR
680
681 /* Old eeproms didn't have port information */
682 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
683 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
684 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
685 } else {
686 p->port_type[0] = hex2int(vpd.port0_data[0]);
687 p->port_type[1] = hex2int(vpd.port1_data[0]);
688 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
689 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
690 }
691
692 for (i = 0; i < 6; i++)
693 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
694 hex2int(vpd.na_data[2 * i + 1]);
695 return 0;
696}
697
698/* serial flash and firmware constants */
699enum {
700 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
701 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
702 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
703
704 /* flash command opcodes */
705 SF_PROG_PAGE = 2, /* program page */
706 SF_WR_DISABLE = 4, /* disable writes */
707 SF_RD_STATUS = 5, /* read status register */
708 SF_WR_ENABLE = 6, /* enable writes */
709 SF_RD_DATA_FAST = 0xb, /* read flash */
710 SF_ERASE_SECTOR = 0xd8, /* erase sector */
711
712 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
80513675 713 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
2e283962 714 FW_MIN_SIZE = 8 /* at least version and csum */
4d22de3e
DLR
715};
716
717/**
718 * sf1_read - read data from the serial flash
719 * @adapter: the adapter
720 * @byte_cnt: number of bytes to read
721 * @cont: whether another operation will be chained
722 * @valp: where to store the read data
723 *
724 * Reads up to 4 bytes of data from the serial flash. The location of
725 * the read needs to be specified prior to calling this by issuing the
726 * appropriate commands to the serial flash.
727 */
728static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
729 u32 *valp)
730{
731 int ret;
732
733 if (!byte_cnt || byte_cnt > 4)
734 return -EINVAL;
735 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
736 return -EBUSY;
737 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
738 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
739 if (!ret)
740 *valp = t3_read_reg(adapter, A_SF_DATA);
741 return ret;
742}
743
744/**
745 * sf1_write - write data to the serial flash
746 * @adapter: the adapter
747 * @byte_cnt: number of bytes to write
748 * @cont: whether another operation will be chained
749 * @val: value to write
750 *
751 * Writes up to 4 bytes of data to the serial flash. The location of
752 * the write needs to be specified prior to calling this by issuing the
753 * appropriate commands to the serial flash.
754 */
755static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
756 u32 val)
757{
758 if (!byte_cnt || byte_cnt > 4)
759 return -EINVAL;
760 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
761 return -EBUSY;
762 t3_write_reg(adapter, A_SF_DATA, val);
763 t3_write_reg(adapter, A_SF_OP,
764 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
765 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
766}
767
768/**
769 * flash_wait_op - wait for a flash operation to complete
770 * @adapter: the adapter
771 * @attempts: max number of polls of the status register
772 * @delay: delay between polls in ms
773 *
774 * Wait for a flash operation to complete by polling the status register.
775 */
776static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
777{
778 int ret;
779 u32 status;
780
781 while (1) {
782 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
783 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
784 return ret;
785 if (!(status & 1))
786 return 0;
787 if (--attempts == 0)
788 return -EAGAIN;
789 if (delay)
790 msleep(delay);
791 }
792}
793
794/**
795 * t3_read_flash - read words from serial flash
796 * @adapter: the adapter
797 * @addr: the start address for the read
798 * @nwords: how many 32-bit words to read
799 * @data: where to store the read data
800 * @byte_oriented: whether to store data as bytes or as words
801 *
802 * Read the specified number of 32-bit words from the serial flash.
803 * If @byte_oriented is set the read data is stored as a byte array
804 * (i.e., big-endian), otherwise as 32-bit words in the platform's
805 * natural endianess.
806 */
807int t3_read_flash(struct adapter *adapter, unsigned int addr,
808 unsigned int nwords, u32 *data, int byte_oriented)
809{
810 int ret;
811
812 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
813 return -EINVAL;
814
815 addr = swab32(addr) | SF_RD_DATA_FAST;
816
817 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
818 (ret = sf1_read(adapter, 1, 1, data)) != 0)
819 return ret;
820
821 for (; nwords; nwords--, data++) {
822 ret = sf1_read(adapter, 4, nwords > 1, data);
823 if (ret)
824 return ret;
825 if (byte_oriented)
826 *data = htonl(*data);
827 }
828 return 0;
829}
830
831/**
832 * t3_write_flash - write up to a page of data to the serial flash
833 * @adapter: the adapter
834 * @addr: the start address to write
835 * @n: length of data to write
836 * @data: the data to write
837 *
838 * Writes up to a page of data (256 bytes) to the serial flash starting
839 * at the given address.
840 */
841static int t3_write_flash(struct adapter *adapter, unsigned int addr,
842 unsigned int n, const u8 *data)
843{
844 int ret;
845 u32 buf[64];
846 unsigned int i, c, left, val, offset = addr & 0xff;
847
848 if (addr + n > SF_SIZE || offset + n > 256)
849 return -EINVAL;
850
851 val = swab32(addr) | SF_PROG_PAGE;
852
853 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
854 (ret = sf1_write(adapter, 4, 1, val)) != 0)
855 return ret;
856
857 for (left = n; left; left -= c) {
858 c = min(left, 4U);
859 for (val = 0, i = 0; i < c; ++i)
860 val = (val << 8) + *data++;
861
862 ret = sf1_write(adapter, c, c != left, val);
863 if (ret)
864 return ret;
865 }
866 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
867 return ret;
868
869 /* Read the page to verify the write succeeded */
870 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
871 if (ret)
872 return ret;
873
874 if (memcmp(data - n, (u8 *) buf + offset, n))
875 return -EIO;
876 return 0;
877}
878
480fe1a3 879/**
47330077 880 * t3_get_tp_version - read the tp sram version
480fe1a3 881 * @adapter: the adapter
47330077 882 * @vers: where to place the version
480fe1a3 883 *
47330077 884 * Reads the protocol sram version from sram.
480fe1a3 885 */
47330077 886int t3_get_tp_version(struct adapter *adapter, u32 *vers)
480fe1a3
DLR
887{
888 int ret;
480fe1a3
DLR
889
890 /* Get version loaded in SRAM */
891 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
892 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
893 1, 1, 5, 1);
894 if (ret)
895 return ret;
2eab17ab 896
47330077
DLR
897 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
898
899 return 0;
900}
901
902/**
903 * t3_check_tpsram_version - read the tp sram version
904 * @adapter: the adapter
905 * @must_load: set to 1 if loading a new microcode image is required
906 *
907 * Reads the protocol sram version from flash.
908 */
909int t3_check_tpsram_version(struct adapter *adapter, int *must_load)
910{
911 int ret;
912 u32 vers;
913 unsigned int major, minor;
914
915 if (adapter->params.rev == T3_REV_A)
916 return 0;
917
918 *must_load = 1;
919
920 ret = t3_get_tp_version(adapter, &vers);
921 if (ret)
922 return ret;
480fe1a3
DLR
923
924 major = G_TP_VERSION_MAJOR(vers);
925 minor = G_TP_VERSION_MINOR(vers);
926
2eab17ab 927 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
480fe1a3
DLR
928 return 0;
929
47330077
DLR
930 if (major != TP_VERSION_MAJOR)
931 CH_ERR(adapter, "found wrong TP version (%u.%u), "
932 "driver needs version %d.%d\n", major, minor,
933 TP_VERSION_MAJOR, TP_VERSION_MINOR);
934 else {
935 *must_load = 0;
936 CH_ERR(adapter, "found wrong TP version (%u.%u), "
937 "driver compiled for version %d.%d\n", major, minor,
938 TP_VERSION_MAJOR, TP_VERSION_MINOR);
939 }
480fe1a3
DLR
940 return -EINVAL;
941}
942
943/**
2eab17ab 944 * t3_check_tpsram - check if provided protocol SRAM
480fe1a3
DLR
945 * is compatible with this driver
946 * @adapter: the adapter
947 * @tp_sram: the firmware image to write
948 * @size: image size
949 *
950 * Checks if an adapter's tp sram is compatible with the driver.
951 * Returns 0 if the versions are compatible, a negative error otherwise.
952 */
2c733a16
DW
953int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
954 unsigned int size)
480fe1a3
DLR
955{
956 u32 csum;
957 unsigned int i;
05e5c116 958 const __be32 *p = (const __be32 *)tp_sram;
480fe1a3
DLR
959
960 /* Verify checksum */
961 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
962 csum += ntohl(p[i]);
963 if (csum != 0xffffffff) {
964 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
965 csum);
966 return -EINVAL;
967 }
968
969 return 0;
970}
971
4aac3899
DLR
972enum fw_version_type {
973 FW_VERSION_N3,
974 FW_VERSION_T3
975};
976
4d22de3e
DLR
977/**
978 * t3_get_fw_version - read the firmware version
979 * @adapter: the adapter
980 * @vers: where to place the version
981 *
982 * Reads the FW version from flash.
983 */
984int t3_get_fw_version(struct adapter *adapter, u32 *vers)
985{
986 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
987}
988
989/**
990 * t3_check_fw_version - check if the FW is compatible with this driver
991 * @adapter: the adapter
a5a3b460
DLR
992 * @must_load: set to 1 if loading a new FW image is required
993
4d22de3e
DLR
994 * Checks if an adapter's FW is compatible with the driver. Returns 0
995 * if the versions are compatible, a negative error otherwise.
996 */
a5a3b460 997int t3_check_fw_version(struct adapter *adapter, int *must_load)
4d22de3e
DLR
998{
999 int ret;
1000 u32 vers;
4aac3899 1001 unsigned int type, major, minor;
4d22de3e 1002
a5a3b460 1003 *must_load = 1;
4d22de3e
DLR
1004 ret = t3_get_fw_version(adapter, &vers);
1005 if (ret)
1006 return ret;
1007
4aac3899
DLR
1008 type = G_FW_VERSION_TYPE(vers);
1009 major = G_FW_VERSION_MAJOR(vers);
1010 minor = G_FW_VERSION_MINOR(vers);
4d22de3e 1011
75d8626f
DLR
1012 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1013 minor == FW_VERSION_MINOR)
4d22de3e
DLR
1014 return 0;
1015
a5a3b460
DLR
1016 if (major != FW_VERSION_MAJOR)
1017 CH_ERR(adapter, "found wrong FW version(%u.%u), "
1018 "driver needs version %u.%u\n", major, minor,
1019 FW_VERSION_MAJOR, FW_VERSION_MINOR);
273fa904 1020 else if (minor < FW_VERSION_MINOR) {
a5a3b460 1021 *must_load = 0;
273fa904
DLR
1022 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1023 "driver compiled for version %u.%u\n", major, minor,
1024 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1025 } else {
1026 CH_WARN(adapter, "found newer FW version(%u.%u), "
a5a3b460
DLR
1027 "driver compiled for version %u.%u\n", major, minor,
1028 FW_VERSION_MAJOR, FW_VERSION_MINOR);
273fa904 1029 return 0;
a5a3b460 1030 }
4d22de3e
DLR
1031 return -EINVAL;
1032}
1033
1034/**
1035 * t3_flash_erase_sectors - erase a range of flash sectors
1036 * @adapter: the adapter
1037 * @start: the first sector to erase
1038 * @end: the last sector to erase
1039 *
1040 * Erases the sectors in the given range.
1041 */
1042static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1043{
1044 while (start <= end) {
1045 int ret;
1046
1047 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1048 (ret = sf1_write(adapter, 4, 0,
1049 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1050 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1051 return ret;
1052 start++;
1053 }
1054 return 0;
1055}
1056
1057/*
1058 * t3_load_fw - download firmware
1059 * @adapter: the adapter
8a9fab22 1060 * @fw_data: the firmware image to write
4d22de3e
DLR
1061 * @size: image size
1062 *
1063 * Write the supplied firmware image to the card's serial flash.
1064 * The FW image has the following sections: @size - 8 bytes of code and
1065 * data, followed by 4 bytes of FW version, followed by the 32-bit
1066 * 1's complement checksum of the whole image.
1067 */
1068int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1069{
1070 u32 csum;
1071 unsigned int i;
05e5c116 1072 const __be32 *p = (const __be32 *)fw_data;
4d22de3e
DLR
1073 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1074
2e283962 1075 if ((size & 3) || size < FW_MIN_SIZE)
4d22de3e
DLR
1076 return -EINVAL;
1077 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1078 return -EFBIG;
1079
1080 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1081 csum += ntohl(p[i]);
1082 if (csum != 0xffffffff) {
1083 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1084 csum);
1085 return -EINVAL;
1086 }
1087
1088 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1089 if (ret)
1090 goto out;
1091
1092 size -= 8; /* trim off version and checksum */
1093 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1094 unsigned int chunk_size = min(size, 256U);
1095
1096 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1097 if (ret)
1098 goto out;
1099
1100 addr += chunk_size;
1101 fw_data += chunk_size;
1102 size -= chunk_size;
1103 }
1104
1105 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1106out:
1107 if (ret)
1108 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1109 return ret;
1110}
1111
1112#define CIM_CTL_BASE 0x2000
1113
1114/**
1115 * t3_cim_ctl_blk_read - read a block from CIM control region
1116 *
1117 * @adap: the adapter
1118 * @addr: the start address within the CIM control region
1119 * @n: number of words to read
1120 * @valp: where to store the result
1121 *
1122 * Reads a block of 4-byte words from the CIM control region.
1123 */
1124int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1125 unsigned int n, unsigned int *valp)
1126{
1127 int ret = 0;
1128
1129 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1130 return -EBUSY;
1131
1132 for ( ; !ret && n--; addr += 4) {
1133 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1134 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1135 0, 5, 2);
1136 if (!ret)
1137 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1138 }
1139 return ret;
1140}
1141
1142
1143/**
1144 * t3_link_changed - handle interface link changes
1145 * @adapter: the adapter
1146 * @port_id: the port index that changed link state
1147 *
1148 * Called when a port's link settings change to propagate the new values
1149 * to the associated PHY and MAC. After performing the common tasks it
1150 * invokes an OS-specific handler.
1151 */
1152void t3_link_changed(struct adapter *adapter, int port_id)
1153{
1154 int link_ok, speed, duplex, fc;
1155 struct port_info *pi = adap2pinfo(adapter, port_id);
1156 struct cphy *phy = &pi->phy;
1157 struct cmac *mac = &pi->mac;
1158 struct link_config *lc = &pi->link_config;
1159
1160 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1161
9b1e3656
DLR
1162 if (lc->requested_fc & PAUSE_AUTONEG)
1163 fc &= lc->requested_fc;
1164 else
1165 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1166
1167 if (link_ok == lc->link_ok && speed == lc->speed &&
1168 duplex == lc->duplex && fc == lc->fc)
1169 return; /* nothing changed */
1170
4d22de3e
DLR
1171 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1172 uses_xaui(adapter)) {
1173 if (link_ok)
1174 t3b_pcs_reset(mac);
1175 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1176 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1177 }
1178 lc->link_ok = link_ok;
1179 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1180 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
4d22de3e
DLR
1181
1182 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1183 /* Set MAC speed, duplex, and flow control to match PHY. */
1184 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1185 lc->fc = fc;
1186 }
1187
1188 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1189}
1190
1191/**
1192 * t3_link_start - apply link configuration to MAC/PHY
1193 * @phy: the PHY to setup
1194 * @mac: the MAC to setup
1195 * @lc: the requested link configuration
1196 *
1197 * Set up a port's MAC and PHY according to a desired link configuration.
1198 * - If the PHY can auto-negotiate first decide what to advertise, then
1199 * enable/disable auto-negotiation as desired, and reset.
1200 * - If the PHY does not auto-negotiate just reset it.
1201 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1202 * otherwise do it later based on the outcome of auto-negotiation.
1203 */
1204int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1205{
1206 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1207
1208 lc->link_ok = 0;
1209 if (lc->supported & SUPPORTED_Autoneg) {
1210 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1211 if (fc) {
1212 lc->advertising |= ADVERTISED_Asym_Pause;
1213 if (fc & PAUSE_RX)
1214 lc->advertising |= ADVERTISED_Pause;
1215 }
1216 phy->ops->advertise(phy, lc->advertising);
1217
1218 if (lc->autoneg == AUTONEG_DISABLE) {
1219 lc->speed = lc->requested_speed;
1220 lc->duplex = lc->requested_duplex;
1221 lc->fc = (unsigned char)fc;
1222 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1223 fc);
1224 /* Also disables autoneg */
1225 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
4d22de3e
DLR
1226 } else
1227 phy->ops->autoneg_enable(phy);
1228 } else {
1229 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1230 lc->fc = (unsigned char)fc;
1231 phy->ops->reset(phy, 0);
1232 }
1233 return 0;
1234}
1235
1236/**
1237 * t3_set_vlan_accel - control HW VLAN extraction
1238 * @adapter: the adapter
1239 * @ports: bitmap of adapter ports to operate on
1240 * @on: enable (1) or disable (0) HW VLAN extraction
1241 *
1242 * Enables or disables HW extraction of VLAN tags for the given port.
1243 */
1244void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1245{
1246 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1247 ports << S_VLANEXTRACTIONENABLE,
1248 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1249}
1250
1251struct intr_info {
1252 unsigned int mask; /* bits to check in interrupt status */
1253 const char *msg; /* message to print or NULL */
1254 short stat_idx; /* stat counter to increment or -1 */
20d3fc11 1255 unsigned short fatal; /* whether the condition reported is fatal */
4d22de3e
DLR
1256};
1257
1258/**
1259 * t3_handle_intr_status - table driven interrupt handler
1260 * @adapter: the adapter that generated the interrupt
1261 * @reg: the interrupt status register to process
1262 * @mask: a mask to apply to the interrupt status
1263 * @acts: table of interrupt actions
1264 * @stats: statistics counters tracking interrupt occurences
1265 *
1266 * A table driven interrupt handler that applies a set of masks to an
1267 * interrupt status word and performs the corresponding actions if the
1268 * interrupts described by the mask have occured. The actions include
1269 * optionally printing a warning or alert message, and optionally
1270 * incrementing a stat counter. The table is terminated by an entry
1271 * specifying mask 0. Returns the number of fatal interrupt conditions.
1272 */
1273static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1274 unsigned int mask,
1275 const struct intr_info *acts,
1276 unsigned long *stats)
1277{
1278 int fatal = 0;
1279 unsigned int status = t3_read_reg(adapter, reg) & mask;
1280
1281 for (; acts->mask; ++acts) {
1282 if (!(status & acts->mask))
1283 continue;
1284 if (acts->fatal) {
1285 fatal++;
1286 CH_ALERT(adapter, "%s (0x%x)\n",
1287 acts->msg, status & acts->mask);
1288 } else if (acts->msg)
1289 CH_WARN(adapter, "%s (0x%x)\n",
1290 acts->msg, status & acts->mask);
1291 if (acts->stat_idx >= 0)
1292 stats[acts->stat_idx]++;
1293 }
1294 if (status) /* clear processed interrupts */
1295 t3_write_reg(adapter, reg, status);
1296 return fatal;
1297}
1298
b881955b
DLR
1299#define SGE_INTR_MASK (F_RSPQDISABLED | \
1300 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1301 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1302 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1303 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1304 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1305 F_HIRCQPARITYERROR)
4d22de3e
DLR
1306#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1307 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1308 F_NFASRCHFAIL)
1309#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1310#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1311 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1312 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1313#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1314 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1315 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1316 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1317 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1318 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1319#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1320 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1321 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
b881955b
DLR
1322 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1323 F_TXPARERR | V_BISTERR(M_BISTERR))
1324#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1325 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1326 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1327#define ULPTX_INTR_MASK 0xfc
1328#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
4d22de3e
DLR
1329 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1330 F_ZERO_SWITCH_ERROR)
1331#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1332 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1333 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
b881955b
DLR
1334 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1335 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1336 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1337 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1338 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
4d22de3e
DLR
1339#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1340 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1341 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1342#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1343 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1344 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1345#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1346 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1347 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1348 V_MCAPARERRENB(M_MCAPARERRENB))
1349#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1350 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1351 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1352 F_MPS0 | F_CPL_SWITCH)
1353
1354/*
1355 * Interrupt handler for the PCIX1 module.
1356 */
1357static void pci_intr_handler(struct adapter *adapter)
1358{
1359 static const struct intr_info pcix1_intr_info[] = {
4d22de3e
DLR
1360 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1361 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1362 {F_RCVTARABT, "PCI received target abort", -1, 1},
1363 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1364 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1365 {F_DETPARERR, "PCI detected parity error", -1, 1},
1366 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1367 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1368 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1369 1},
1370 {F_DETCORECCERR, "PCI correctable ECC error",
1371 STAT_PCI_CORR_ECC, 0},
1372 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1373 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1374 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1375 1},
1376 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1377 1},
1378 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1379 1},
1380 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1381 "error", -1, 1},
1382 {0}
1383 };
1384
1385 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1386 pcix1_intr_info, adapter->irq_stats))
1387 t3_fatal_err(adapter);
1388}
1389
1390/*
1391 * Interrupt handler for the PCIE module.
1392 */
1393static void pcie_intr_handler(struct adapter *adapter)
1394{
1395 static const struct intr_info pcie_intr_info[] = {
b5a44bcb 1396 {F_PEXERR, "PCI PEX error", -1, 1},
4d22de3e
DLR
1397 {F_UNXSPLCPLERRR,
1398 "PCI unexpected split completion DMA read error", -1, 1},
1399 {F_UNXSPLCPLERRC,
1400 "PCI unexpected split completion DMA command error", -1, 1},
1401 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1402 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1403 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1404 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1405 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1406 "PCI MSI-X table/PBA parity error", -1, 1},
b881955b
DLR
1407 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1408 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1409 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1410 {F_TXPARERR, "PCI Tx parity error", -1, 1},
4d22de3e
DLR
1411 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1412 {0}
1413 };
1414
3eea3337
DLR
1415 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1416 CH_ALERT(adapter, "PEX error code 0x%x\n",
1417 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1418
4d22de3e
DLR
1419 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1420 pcie_intr_info, adapter->irq_stats))
1421 t3_fatal_err(adapter);
1422}
1423
1424/*
1425 * TP interrupt handler.
1426 */
1427static void tp_intr_handler(struct adapter *adapter)
1428{
1429 static const struct intr_info tp_intr_info[] = {
1430 {0xffffff, "TP parity error", -1, 1},
1431 {0x1000000, "TP out of Rx pages", -1, 1},
1432 {0x2000000, "TP out of Tx pages", -1, 1},
1433 {0}
1434 };
1435
a2604be5 1436 static struct intr_info tp_intr_info_t3c[] = {
b881955b
DLR
1437 {0x1fffffff, "TP parity error", -1, 1},
1438 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1439 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1440 {0}
a2604be5
DLR
1441 };
1442
4d22de3e 1443 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
a2604be5 1444 adapter->params.rev < T3_REV_C ?
b881955b 1445 tp_intr_info : tp_intr_info_t3c, NULL))
4d22de3e
DLR
1446 t3_fatal_err(adapter);
1447}
1448
1449/*
1450 * CIM interrupt handler.
1451 */
1452static void cim_intr_handler(struct adapter *adapter)
1453{
1454 static const struct intr_info cim_intr_info[] = {
1455 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1456 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1457 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1458 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1459 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1460 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1461 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1462 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1463 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1464 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1465 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1466 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
b881955b
DLR
1467 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1468 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1469 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1470 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1471 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1472 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1473 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1474 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1475 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1476 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1477 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1478 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
4d22de3e
DLR
1479 {0}
1480 };
1481
1482 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1483 cim_intr_info, NULL))
1484 t3_fatal_err(adapter);
1485}
1486
1487/*
1488 * ULP RX interrupt handler.
1489 */
1490static void ulprx_intr_handler(struct adapter *adapter)
1491{
1492 static const struct intr_info ulprx_intr_info[] = {
b881955b
DLR
1493 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1494 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1495 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1496 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1497 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1498 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1499 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1500 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
4d22de3e
DLR
1501 {0}
1502 };
1503
1504 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1505 ulprx_intr_info, NULL))
1506 t3_fatal_err(adapter);
1507}
1508
1509/*
1510 * ULP TX interrupt handler.
1511 */
1512static void ulptx_intr_handler(struct adapter *adapter)
1513{
1514 static const struct intr_info ulptx_intr_info[] = {
1515 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1516 STAT_ULP_CH0_PBL_OOB, 0},
1517 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1518 STAT_ULP_CH1_PBL_OOB, 0},
b881955b 1519 {0xfc, "ULP TX parity error", -1, 1},
4d22de3e
DLR
1520 {0}
1521 };
1522
1523 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1524 ulptx_intr_info, adapter->irq_stats))
1525 t3_fatal_err(adapter);
1526}
1527
1528#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1529 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1530 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1531 F_ICSPI1_TX_FRAMING_ERROR)
1532#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1533 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1534 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1535 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1536
1537/*
1538 * PM TX interrupt handler.
1539 */
1540static void pmtx_intr_handler(struct adapter *adapter)
1541{
1542 static const struct intr_info pmtx_intr_info[] = {
1543 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1544 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1545 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1546 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1547 "PMTX ispi parity error", -1, 1},
1548 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1549 "PMTX ospi parity error", -1, 1},
1550 {0}
1551 };
1552
1553 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1554 pmtx_intr_info, NULL))
1555 t3_fatal_err(adapter);
1556}
1557
1558#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1559 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1560 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1561 F_IESPI1_TX_FRAMING_ERROR)
1562#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1563 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1564 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1565 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1566
1567/*
1568 * PM RX interrupt handler.
1569 */
1570static void pmrx_intr_handler(struct adapter *adapter)
1571{
1572 static const struct intr_info pmrx_intr_info[] = {
1573 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1574 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1575 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1576 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1577 "PMRX ispi parity error", -1, 1},
1578 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1579 "PMRX ospi parity error", -1, 1},
1580 {0}
1581 };
1582
1583 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1584 pmrx_intr_info, NULL))
1585 t3_fatal_err(adapter);
1586}
1587
1588/*
1589 * CPL switch interrupt handler.
1590 */
1591static void cplsw_intr_handler(struct adapter *adapter)
1592{
1593 static const struct intr_info cplsw_intr_info[] = {
b881955b
DLR
1594 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1595 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
4d22de3e
DLR
1596 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1597 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1598 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1599 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1600 {0}
1601 };
1602
1603 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1604 cplsw_intr_info, NULL))
1605 t3_fatal_err(adapter);
1606}
1607
1608/*
1609 * MPS interrupt handler.
1610 */
1611static void mps_intr_handler(struct adapter *adapter)
1612{
1613 static const struct intr_info mps_intr_info[] = {
1614 {0x1ff, "MPS parity error", -1, 1},
1615 {0}
1616 };
1617
1618 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1619 mps_intr_info, NULL))
1620 t3_fatal_err(adapter);
1621}
1622
1623#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1624
1625/*
1626 * MC7 interrupt handler.
1627 */
1628static void mc7_intr_handler(struct mc7 *mc7)
1629{
1630 struct adapter *adapter = mc7->adapter;
1631 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1632
1633 if (cause & F_CE) {
1634 mc7->stats.corr_err++;
1635 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1636 "data 0x%x 0x%x 0x%x\n", mc7->name,
1637 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1638 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1639 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1640 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1641 }
1642
1643 if (cause & F_UE) {
1644 mc7->stats.uncorr_err++;
1645 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1646 "data 0x%x 0x%x 0x%x\n", mc7->name,
1647 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1648 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1649 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1650 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1651 }
1652
1653 if (G_PE(cause)) {
1654 mc7->stats.parity_err++;
1655 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1656 mc7->name, G_PE(cause));
1657 }
1658
1659 if (cause & F_AE) {
1660 u32 addr = 0;
1661
1662 if (adapter->params.rev > 0)
1663 addr = t3_read_reg(adapter,
1664 mc7->offset + A_MC7_ERR_ADDR);
1665 mc7->stats.addr_err++;
1666 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1667 mc7->name, addr);
1668 }
1669
1670 if (cause & MC7_INTR_FATAL)
1671 t3_fatal_err(adapter);
1672
1673 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1674}
1675
1676#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1677 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1678/*
1679 * XGMAC interrupt handler.
1680 */
1681static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1682{
1683 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1684 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1685
1686 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1687 mac->stats.tx_fifo_parity_err++;
1688 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1689 }
1690 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1691 mac->stats.rx_fifo_parity_err++;
1692 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1693 }
1694 if (cause & F_TXFIFO_UNDERRUN)
1695 mac->stats.tx_fifo_urun++;
1696 if (cause & F_RXFIFO_OVERFLOW)
1697 mac->stats.rx_fifo_ovfl++;
1698 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1699 mac->stats.serdes_signal_loss++;
1700 if (cause & F_XAUIPCSCTCERR)
1701 mac->stats.xaui_pcs_ctc_err++;
1702 if (cause & F_XAUIPCSALIGNCHANGE)
1703 mac->stats.xaui_pcs_align_change++;
1704
1705 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1706 if (cause & XGM_INTR_FATAL)
1707 t3_fatal_err(adap);
1708 return cause != 0;
1709}
1710
1711/*
1712 * Interrupt handler for PHY events.
1713 */
1714int t3_phy_intr_handler(struct adapter *adapter)
1715{
4d22de3e
DLR
1716 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1717
1718 for_each_port(adapter, i) {
1ca03cbc
DLR
1719 struct port_info *p = adap2pinfo(adapter, i);
1720
04497982 1721 if (!(p->phy.caps & SUPPORTED_IRQ))
1ca03cbc
DLR
1722 continue;
1723
f231e0a5 1724 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1ca03cbc 1725 int phy_cause = p->phy.ops->intr_handler(&p->phy);
4d22de3e
DLR
1726
1727 if (phy_cause & cphy_cause_link_change)
1728 t3_link_changed(adapter, i);
1729 if (phy_cause & cphy_cause_fifo_error)
1ca03cbc 1730 p->phy.fifo_errors++;
1e882025
DLR
1731 if (phy_cause & cphy_cause_module_change)
1732 t3_os_phymod_changed(adapter, i);
4d22de3e
DLR
1733 }
1734 }
1735
1736 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1737 return 0;
1738}
1739
1740/*
1741 * T3 slow path (non-data) interrupt handler.
1742 */
1743int t3_slow_intr_handler(struct adapter *adapter)
1744{
1745 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1746
1747 cause &= adapter->slow_intr_mask;
1748 if (!cause)
1749 return 0;
1750 if (cause & F_PCIM0) {
1751 if (is_pcie(adapter))
1752 pcie_intr_handler(adapter);
1753 else
1754 pci_intr_handler(adapter);
1755 }
1756 if (cause & F_SGE3)
1757 t3_sge_err_intr_handler(adapter);
1758 if (cause & F_MC7_PMRX)
1759 mc7_intr_handler(&adapter->pmrx);
1760 if (cause & F_MC7_PMTX)
1761 mc7_intr_handler(&adapter->pmtx);
1762 if (cause & F_MC7_CM)
1763 mc7_intr_handler(&adapter->cm);
1764 if (cause & F_CIM)
1765 cim_intr_handler(adapter);
1766 if (cause & F_TP1)
1767 tp_intr_handler(adapter);
1768 if (cause & F_ULP2_RX)
1769 ulprx_intr_handler(adapter);
1770 if (cause & F_ULP2_TX)
1771 ulptx_intr_handler(adapter);
1772 if (cause & F_PM1_RX)
1773 pmrx_intr_handler(adapter);
1774 if (cause & F_PM1_TX)
1775 pmtx_intr_handler(adapter);
1776 if (cause & F_CPL_SWITCH)
1777 cplsw_intr_handler(adapter);
1778 if (cause & F_MPS0)
1779 mps_intr_handler(adapter);
1780 if (cause & F_MC5A)
1781 t3_mc5_intr_handler(&adapter->mc5);
1782 if (cause & F_XGMAC0_0)
1783 mac_intr_handler(adapter, 0);
1784 if (cause & F_XGMAC0_1)
1785 mac_intr_handler(adapter, 1);
1786 if (cause & F_T3DBG)
1787 t3_os_ext_intr_handler(adapter);
1788
1789 /* Clear the interrupts just processed. */
1790 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1791 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1792 return 1;
1793}
1794
f231e0a5
DLR
1795static unsigned int calc_gpio_intr(struct adapter *adap)
1796{
1797 unsigned int i, gpi_intr = 0;
1798
1799 for_each_port(adap, i)
1800 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1801 adapter_info(adap)->gpio_intr[i])
1802 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1803 return gpi_intr;
1804}
1805
4d22de3e
DLR
1806/**
1807 * t3_intr_enable - enable interrupts
1808 * @adapter: the adapter whose interrupts should be enabled
1809 *
1810 * Enable interrupts by setting the interrupt enable registers of the
1811 * various HW modules and then enabling the top-level interrupt
1812 * concentrator.
1813 */
1814void t3_intr_enable(struct adapter *adapter)
1815{
1816 static const struct addr_val_pair intr_en_avp[] = {
1817 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1818 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1819 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1820 MC7_INTR_MASK},
1821 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1822 MC7_INTR_MASK},
1823 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1824 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
4d22de3e
DLR
1825 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1826 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1827 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1828 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1829 };
1830
1831 adapter->slow_intr_mask = PL_INTR_MASK;
1832
1833 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
a2604be5
DLR
1834 t3_write_reg(adapter, A_TP_INT_ENABLE,
1835 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
4d22de3e
DLR
1836
1837 if (adapter->params.rev > 0) {
1838 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1839 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1840 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1841 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1842 F_PBL_BOUND_ERR_CH1);
1843 } else {
1844 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1845 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1846 }
1847
f231e0a5
DLR
1848 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1849
4d22de3e
DLR
1850 if (is_pcie(adapter))
1851 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1852 else
1853 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1854 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1855 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1856}
1857
1858/**
1859 * t3_intr_disable - disable a card's interrupts
1860 * @adapter: the adapter whose interrupts should be disabled
1861 *
1862 * Disable interrupts. We only disable the top-level interrupt
1863 * concentrator and the SGE data interrupts.
1864 */
1865void t3_intr_disable(struct adapter *adapter)
1866{
1867 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1868 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1869 adapter->slow_intr_mask = 0;
1870}
1871
1872/**
1873 * t3_intr_clear - clear all interrupts
1874 * @adapter: the adapter whose interrupts should be cleared
1875 *
1876 * Clears all interrupts.
1877 */
1878void t3_intr_clear(struct adapter *adapter)
1879{
1880 static const unsigned int cause_reg_addr[] = {
1881 A_SG_INT_CAUSE,
1882 A_SG_RSPQ_FL_STATUS,
1883 A_PCIX_INT_CAUSE,
1884 A_MC7_INT_CAUSE,
1885 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1886 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1887 A_CIM_HOST_INT_CAUSE,
1888 A_TP_INT_CAUSE,
1889 A_MC5_DB_INT_CAUSE,
1890 A_ULPRX_INT_CAUSE,
1891 A_ULPTX_INT_CAUSE,
1892 A_CPL_INTR_CAUSE,
1893 A_PM1_TX_INT_CAUSE,
1894 A_PM1_RX_INT_CAUSE,
1895 A_MPS_INT_CAUSE,
1896 A_T3DBG_INT_CAUSE,
1897 };
1898 unsigned int i;
1899
1900 /* Clear PHY and MAC interrupts for each port. */
1901 for_each_port(adapter, i)
1902 t3_port_intr_clear(adapter, i);
1903
1904 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1905 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1906
3eea3337
DLR
1907 if (is_pcie(adapter))
1908 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
4d22de3e
DLR
1909 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1910 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1911}
1912
1913/**
1914 * t3_port_intr_enable - enable port-specific interrupts
1915 * @adapter: associated adapter
1916 * @idx: index of port whose interrupts should be enabled
1917 *
1918 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1919 * adapter port.
1920 */
1921void t3_port_intr_enable(struct adapter *adapter, int idx)
1922{
1923 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1924
1925 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1926 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1927 phy->ops->intr_enable(phy);
1928}
1929
1930/**
1931 * t3_port_intr_disable - disable port-specific interrupts
1932 * @adapter: associated adapter
1933 * @idx: index of port whose interrupts should be disabled
1934 *
1935 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1936 * adapter port.
1937 */
1938void t3_port_intr_disable(struct adapter *adapter, int idx)
1939{
1940 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1941
1942 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1943 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1944 phy->ops->intr_disable(phy);
1945}
1946
1947/**
1948 * t3_port_intr_clear - clear port-specific interrupts
1949 * @adapter: associated adapter
1950 * @idx: index of port whose interrupts to clear
1951 *
1952 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1953 * adapter port.
1954 */
1955void t3_port_intr_clear(struct adapter *adapter, int idx)
1956{
1957 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1958
1959 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1960 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1961 phy->ops->intr_clear(phy);
1962}
1963
bb9366af
DLR
1964#define SG_CONTEXT_CMD_ATTEMPTS 100
1965
4d22de3e
DLR
1966/**
1967 * t3_sge_write_context - write an SGE context
1968 * @adapter: the adapter
1969 * @id: the context id
1970 * @type: the context type
1971 *
1972 * Program an SGE context with the values already loaded in the
1973 * CONTEXT_DATA? registers.
1974 */
1975static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1976 unsigned int type)
1977{
1978 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1979 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1980 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1981 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1982 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1983 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1984 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 1985 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
1986}
1987
b881955b
DLR
1988static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
1989 unsigned int type)
1990{
1991 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
1992 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
1993 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
1994 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
1995 return t3_sge_write_context(adap, id, type);
1996}
1997
4d22de3e
DLR
1998/**
1999 * t3_sge_init_ecntxt - initialize an SGE egress context
2000 * @adapter: the adapter to configure
2001 * @id: the context id
2002 * @gts_enable: whether to enable GTS for the context
2003 * @type: the egress context type
2004 * @respq: associated response queue
2005 * @base_addr: base address of queue
2006 * @size: number of queue entries
2007 * @token: uP token
2008 * @gen: initial generation value for the context
2009 * @cidx: consumer pointer
2010 *
2011 * Initialize an SGE egress context and make it ready for use. If the
2012 * platform allows concurrent context operations, the caller is
2013 * responsible for appropriate locking.
2014 */
2015int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2016 enum sge_context_type type, int respq, u64 base_addr,
2017 unsigned int size, unsigned int token, int gen,
2018 unsigned int cidx)
2019{
2020 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2021
2022 if (base_addr & 0xfff) /* must be 4K aligned */
2023 return -EINVAL;
2024 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2025 return -EBUSY;
2026
2027 base_addr >>= 12;
2028 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2029 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2030 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2031 V_EC_BASE_LO(base_addr & 0xffff));
2032 base_addr >>= 16;
2033 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2034 base_addr >>= 32;
2035 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2036 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2037 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2038 F_EC_VALID);
2039 return t3_sge_write_context(adapter, id, F_EGRESS);
2040}
2041
2042/**
2043 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2044 * @adapter: the adapter to configure
2045 * @id: the context id
2046 * @gts_enable: whether to enable GTS for the context
2047 * @base_addr: base address of queue
2048 * @size: number of queue entries
2049 * @bsize: size of each buffer for this queue
2050 * @cong_thres: threshold to signal congestion to upstream producers
2051 * @gen: initial generation value for the context
2052 * @cidx: consumer pointer
2053 *
2054 * Initialize an SGE free list context and make it ready for use. The
2055 * caller is responsible for ensuring only one context operation occurs
2056 * at a time.
2057 */
2058int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2059 int gts_enable, u64 base_addr, unsigned int size,
2060 unsigned int bsize, unsigned int cong_thres, int gen,
2061 unsigned int cidx)
2062{
2063 if (base_addr & 0xfff) /* must be 4K aligned */
2064 return -EINVAL;
2065 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2066 return -EBUSY;
2067
2068 base_addr >>= 12;
2069 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2070 base_addr >>= 32;
2071 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2072 V_FL_BASE_HI((u32) base_addr) |
2073 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2074 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2075 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2076 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2077 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2078 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2079 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2080 return t3_sge_write_context(adapter, id, F_FREELIST);
2081}
2082
2083/**
2084 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2085 * @adapter: the adapter to configure
2086 * @id: the context id
2087 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2088 * @base_addr: base address of queue
2089 * @size: number of queue entries
2090 * @fl_thres: threshold for selecting the normal or jumbo free list
2091 * @gen: initial generation value for the context
2092 * @cidx: consumer pointer
2093 *
2094 * Initialize an SGE response queue context and make it ready for use.
2095 * The caller is responsible for ensuring only one context operation
2096 * occurs at a time.
2097 */
2098int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2099 int irq_vec_idx, u64 base_addr, unsigned int size,
2100 unsigned int fl_thres, int gen, unsigned int cidx)
2101{
2102 unsigned int intr = 0;
2103
2104 if (base_addr & 0xfff) /* must be 4K aligned */
2105 return -EINVAL;
2106 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2107 return -EBUSY;
2108
2109 base_addr >>= 12;
2110 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2111 V_CQ_INDEX(cidx));
2112 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2113 base_addr >>= 32;
2114 if (irq_vec_idx >= 0)
2115 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2116 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2117 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2118 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2119 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2120}
2121
2122/**
2123 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2124 * @adapter: the adapter to configure
2125 * @id: the context id
2126 * @base_addr: base address of queue
2127 * @size: number of queue entries
2128 * @rspq: response queue for async notifications
2129 * @ovfl_mode: CQ overflow mode
2130 * @credits: completion queue credits
2131 * @credit_thres: the credit threshold
2132 *
2133 * Initialize an SGE completion queue context and make it ready for use.
2134 * The caller is responsible for ensuring only one context operation
2135 * occurs at a time.
2136 */
2137int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2138 unsigned int size, int rspq, int ovfl_mode,
2139 unsigned int credits, unsigned int credit_thres)
2140{
2141 if (base_addr & 0xfff) /* must be 4K aligned */
2142 return -EINVAL;
2143 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2144 return -EBUSY;
2145
2146 base_addr >>= 12;
2147 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2148 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2149 base_addr >>= 32;
2150 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2151 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
1c17ae8a
DLR
2152 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2153 V_CQ_ERR(ovfl_mode));
4d22de3e
DLR
2154 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2155 V_CQ_CREDIT_THRES(credit_thres));
2156 return t3_sge_write_context(adapter, id, F_CQ);
2157}
2158
2159/**
2160 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2161 * @adapter: the adapter
2162 * @id: the egress context id
2163 * @enable: enable (1) or disable (0) the context
2164 *
2165 * Enable or disable an SGE egress context. The caller is responsible for
2166 * ensuring only one context operation occurs at a time.
2167 */
2168int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2169{
2170 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2171 return -EBUSY;
2172
2173 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2174 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2175 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2176 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2177 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2178 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2179 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2180 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2181 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2182}
2183
2184/**
2185 * t3_sge_disable_fl - disable an SGE free-buffer list
2186 * @adapter: the adapter
2187 * @id: the free list context id
2188 *
2189 * Disable an SGE free-buffer list. The caller is responsible for
2190 * ensuring only one context operation occurs at a time.
2191 */
2192int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2193{
2194 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2195 return -EBUSY;
2196
2197 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2198 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2199 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2200 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2201 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2202 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2203 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2204 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2205 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2206}
2207
2208/**
2209 * t3_sge_disable_rspcntxt - disable an SGE response queue
2210 * @adapter: the adapter
2211 * @id: the response queue context id
2212 *
2213 * Disable an SGE response queue. The caller is responsible for
2214 * ensuring only one context operation occurs at a time.
2215 */
2216int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2217{
2218 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2219 return -EBUSY;
2220
2221 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2222 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2223 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2224 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2225 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2226 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2227 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2228 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2229 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2230}
2231
2232/**
2233 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2234 * @adapter: the adapter
2235 * @id: the completion queue context id
2236 *
2237 * Disable an SGE completion queue. The caller is responsible for
2238 * ensuring only one context operation occurs at a time.
2239 */
2240int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2241{
2242 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2243 return -EBUSY;
2244
2245 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2246 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2247 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2248 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2249 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2250 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2251 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2252 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2253 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2254}
2255
2256/**
2257 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2258 * @adapter: the adapter
2259 * @id: the context id
2260 * @op: the operation to perform
2261 *
2262 * Perform the selected operation on an SGE completion queue context.
2263 * The caller is responsible for ensuring only one context operation
2264 * occurs at a time.
2265 */
2266int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2267 unsigned int credits)
2268{
2269 u32 val;
2270
2271 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2272 return -EBUSY;
2273
2274 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2275 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2276 V_CONTEXT(id) | F_CQ);
2277 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2278 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
4d22de3e
DLR
2279 return -EIO;
2280
2281 if (op >= 2 && op < 7) {
2282 if (adapter->params.rev > 0)
2283 return G_CQ_INDEX(val);
2284
2285 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2286 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2287 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
bb9366af
DLR
2288 F_CONTEXT_CMD_BUSY, 0,
2289 SG_CONTEXT_CMD_ATTEMPTS, 1))
4d22de3e
DLR
2290 return -EIO;
2291 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2292 }
2293 return 0;
2294}
2295
2296/**
2297 * t3_sge_read_context - read an SGE context
2298 * @type: the context type
2299 * @adapter: the adapter
2300 * @id: the context id
2301 * @data: holds the retrieved context
2302 *
2303 * Read an SGE egress context. The caller is responsible for ensuring
2304 * only one context operation occurs at a time.
2305 */
2306static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2307 unsigned int id, u32 data[4])
2308{
2309 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2310 return -EBUSY;
2311
2312 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2313 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2314 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
bb9366af 2315 SG_CONTEXT_CMD_ATTEMPTS, 1))
4d22de3e
DLR
2316 return -EIO;
2317 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2318 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2319 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2320 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2321 return 0;
2322}
2323
2324/**
2325 * t3_sge_read_ecntxt - read an SGE egress context
2326 * @adapter: the adapter
2327 * @id: the context id
2328 * @data: holds the retrieved context
2329 *
2330 * Read an SGE egress context. The caller is responsible for ensuring
2331 * only one context operation occurs at a time.
2332 */
2333int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2334{
2335 if (id >= 65536)
2336 return -EINVAL;
2337 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2338}
2339
2340/**
2341 * t3_sge_read_cq - read an SGE CQ context
2342 * @adapter: the adapter
2343 * @id: the context id
2344 * @data: holds the retrieved context
2345 *
2346 * Read an SGE CQ context. The caller is responsible for ensuring
2347 * only one context operation occurs at a time.
2348 */
2349int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2350{
2351 if (id >= 65536)
2352 return -EINVAL;
2353 return t3_sge_read_context(F_CQ, adapter, id, data);
2354}
2355
2356/**
2357 * t3_sge_read_fl - read an SGE free-list context
2358 * @adapter: the adapter
2359 * @id: the context id
2360 * @data: holds the retrieved context
2361 *
2362 * Read an SGE free-list context. The caller is responsible for ensuring
2363 * only one context operation occurs at a time.
2364 */
2365int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2366{
2367 if (id >= SGE_QSETS * 2)
2368 return -EINVAL;
2369 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2370}
2371
2372/**
2373 * t3_sge_read_rspq - read an SGE response queue context
2374 * @adapter: the adapter
2375 * @id: the context id
2376 * @data: holds the retrieved context
2377 *
2378 * Read an SGE response queue context. The caller is responsible for
2379 * ensuring only one context operation occurs at a time.
2380 */
2381int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2382{
2383 if (id >= SGE_QSETS)
2384 return -EINVAL;
2385 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2386}
2387
2388/**
2389 * t3_config_rss - configure Rx packet steering
2390 * @adapter: the adapter
2391 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2392 * @cpus: values for the CPU lookup table (0xff terminated)
2393 * @rspq: values for the response queue lookup table (0xffff terminated)
2394 *
2395 * Programs the receive packet steering logic. @cpus and @rspq provide
2396 * the values for the CPU and response queue lookup tables. If they
2397 * provide fewer values than the size of the tables the supplied values
2398 * are used repeatedly until the tables are fully populated.
2399 */
2400void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2401 const u8 * cpus, const u16 *rspq)
2402{
2403 int i, j, cpu_idx = 0, q_idx = 0;
2404
2405 if (cpus)
2406 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2407 u32 val = i << 16;
2408
2409 for (j = 0; j < 2; ++j) {
2410 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2411 if (cpus[cpu_idx] == 0xff)
2412 cpu_idx = 0;
2413 }
2414 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2415 }
2416
2417 if (rspq)
2418 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2419 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2420 (i << 16) | rspq[q_idx++]);
2421 if (rspq[q_idx] == 0xffff)
2422 q_idx = 0;
2423 }
2424
2425 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2426}
2427
2428/**
2429 * t3_read_rss - read the contents of the RSS tables
2430 * @adapter: the adapter
2431 * @lkup: holds the contents of the RSS lookup table
2432 * @map: holds the contents of the RSS map table
2433 *
2434 * Reads the contents of the receive packet steering tables.
2435 */
2436int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2437{
2438 int i;
2439 u32 val;
2440
2441 if (lkup)
2442 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2443 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2444 0xffff0000 | i);
2445 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2446 if (!(val & 0x80000000))
2447 return -EAGAIN;
2448 *lkup++ = val;
2449 *lkup++ = (val >> 8);
2450 }
2451
2452 if (map)
2453 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2454 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2455 0xffff0000 | i);
2456 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2457 if (!(val & 0x80000000))
2458 return -EAGAIN;
2459 *map++ = val;
2460 }
2461 return 0;
2462}
2463
2464/**
2465 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2466 * @adap: the adapter
2467 * @enable: 1 to select offload mode, 0 for regular NIC
2468 *
2469 * Switches TP to NIC/offload mode.
2470 */
2471void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2472{
2473 if (is_offload(adap) || !enable)
2474 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2475 V_NICMODE(!enable));
2476}
2477
2478/**
2479 * pm_num_pages - calculate the number of pages of the payload memory
2480 * @mem_size: the size of the payload memory
2481 * @pg_size: the size of each payload memory page
2482 *
2483 * Calculate the number of pages, each of the given size, that fit in a
2484 * memory of the specified size, respecting the HW requirement that the
2485 * number of pages must be a multiple of 24.
2486 */
2487static inline unsigned int pm_num_pages(unsigned int mem_size,
2488 unsigned int pg_size)
2489{
2490 unsigned int n = mem_size / pg_size;
2491
2492 return n - n % 24;
2493}
2494
2495#define mem_region(adap, start, size, reg) \
2496 t3_write_reg((adap), A_ ## reg, (start)); \
2497 start += size
2498
b881955b 2499/**
4d22de3e
DLR
2500 * partition_mem - partition memory and configure TP memory settings
2501 * @adap: the adapter
2502 * @p: the TP parameters
2503 *
2504 * Partitions context and payload memory and configures TP's memory
2505 * registers.
2506 */
2507static void partition_mem(struct adapter *adap, const struct tp_params *p)
2508{
2509 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2510 unsigned int timers = 0, timers_shift = 22;
2511
2512 if (adap->params.rev > 0) {
2513 if (tids <= 16 * 1024) {
2514 timers = 1;
2515 timers_shift = 16;
2516 } else if (tids <= 64 * 1024) {
2517 timers = 2;
2518 timers_shift = 18;
2519 } else if (tids <= 256 * 1024) {
2520 timers = 3;
2521 timers_shift = 20;
2522 }
2523 }
2524
2525 t3_write_reg(adap, A_TP_PMM_SIZE,
2526 p->chan_rx_size | (p->chan_tx_size >> 16));
2527
2528 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2529 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2530 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2531 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2532 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2533
2534 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2535 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2536 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2537
2538 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2539 /* Add a bit of headroom and make multiple of 24 */
2540 pstructs += 48;
2541 pstructs -= pstructs % 24;
2542 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2543
2544 m = tids * TCB_SIZE;
2545 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2546 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2547 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2548 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2549 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2550 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2551 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2552 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2553
2554 m = (m + 4095) & ~0xfff;
2555 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2556 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2557
2558 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2559 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2560 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2561 if (tids < m)
2562 adap->params.mc5.nservers += m - tids;
2563}
2564
2565static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2566 u32 val)
2567{
2568 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2569 t3_write_reg(adap, A_TP_PIO_DATA, val);
2570}
2571
2572static void tp_config(struct adapter *adap, const struct tp_params *p)
2573{
4d22de3e
DLR
2574 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2575 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2576 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2577 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2578 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
8a9fab22 2579 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
4d22de3e
DLR
2580 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2581 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2582 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2583 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
b881955b 2584 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
4d22de3e
DLR
2585 F_IPV6ENABLE | F_NICMODE);
2586 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2587 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
8a9fab22
DLR
2588 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2589 adap->params.rev > 0 ? F_ENABLEESND :
2590 F_T3A_ENABLEESND);
4d22de3e 2591
3b1d307b 2592 t3_set_reg_field(adap, A_TP_PC_CONFIG,
8a9fab22
DLR
2593 F_ENABLEEPCMDAFULL,
2594 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2595 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
b881955b
DLR
2596 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2597 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2598 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
8a9fab22
DLR
2599 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2600 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2eab17ab 2601
4d22de3e
DLR
2602 if (adap->params.rev > 0) {
2603 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2604 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2605 F_TXPACEAUTO);
2606 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2607 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2608 } else
2609 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2610
a2604be5
DLR
2611 if (adap->params.rev == T3_REV_C)
2612 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2613 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2614 V_TABLELATENCYDELTA(4));
2615
8a9fab22
DLR
2616 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2617 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2618 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2619 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
4d22de3e
DLR
2620}
2621
2622/* Desired TP timer resolution in usec */
2623#define TP_TMR_RES 50
2624
2625/* TCP timer values in ms */
2626#define TP_DACK_TIMER 50
2627#define TP_RTO_MIN 250
2628
2629/**
2630 * tp_set_timers - set TP timing parameters
2631 * @adap: the adapter to set
2632 * @core_clk: the core clock frequency in Hz
2633 *
2634 * Set TP's timing parameters, such as the various timer resolutions and
2635 * the TCP timer values.
2636 */
2637static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2638{
2639 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2640 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2641 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2642 unsigned int tps = core_clk >> tre;
2643
2644 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2645 V_DELAYEDACKRESOLUTION(dack_re) |
2646 V_TIMESTAMPRESOLUTION(tstamp_re));
2647 t3_write_reg(adap, A_TP_DACK_TIMER,
2648 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2649 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2650 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2651 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2652 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2653 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2654 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2655 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2656 V_KEEPALIVEMAX(9));
2657
2658#define SECONDS * tps
2659
2660 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2661 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2662 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2663 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2664 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2665 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2666 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2667 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2668 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2669
2670#undef SECONDS
2671}
2672
2673/**
2674 * t3_tp_set_coalescing_size - set receive coalescing size
2675 * @adap: the adapter
2676 * @size: the receive coalescing size
2677 * @psh: whether a set PSH bit should deliver coalesced data
2678 *
2679 * Set the receive coalescing size and PSH bit handling.
2680 */
2681int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2682{
2683 u32 val;
2684
2685 if (size > MAX_RX_COALESCING_LEN)
2686 return -EINVAL;
2687
2688 val = t3_read_reg(adap, A_TP_PARA_REG3);
2689 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2690
2691 if (size) {
2692 val |= F_RXCOALESCEENABLE;
2693 if (psh)
2694 val |= F_RXCOALESCEPSHEN;
8a9fab22 2695 size = min(MAX_RX_COALESCING_LEN, size);
4d22de3e
DLR
2696 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2697 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2698 }
2699 t3_write_reg(adap, A_TP_PARA_REG3, val);
2700 return 0;
2701}
2702
2703/**
2704 * t3_tp_set_max_rxsize - set the max receive size
2705 * @adap: the adapter
2706 * @size: the max receive size
2707 *
2708 * Set TP's max receive size. This is the limit that applies when
2709 * receive coalescing is disabled.
2710 */
2711void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2712{
2713 t3_write_reg(adap, A_TP_PARA_REG7,
2714 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2715}
2716
7b9b0943 2717static void init_mtus(unsigned short mtus[])
4d22de3e
DLR
2718{
2719 /*
2720 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2721 * it can accomodate max size TCP/IP headers when SACK and timestamps
2722 * are enabled and still have at least 8 bytes of payload.
2723 */
75758e8a 2724 mtus[0] = 88;
8a9fab22
DLR
2725 mtus[1] = 88;
2726 mtus[2] = 256;
2727 mtus[3] = 512;
2728 mtus[4] = 576;
4d22de3e
DLR
2729 mtus[5] = 1024;
2730 mtus[6] = 1280;
2731 mtus[7] = 1492;
2732 mtus[8] = 1500;
2733 mtus[9] = 2002;
2734 mtus[10] = 2048;
2735 mtus[11] = 4096;
2736 mtus[12] = 4352;
2737 mtus[13] = 8192;
2738 mtus[14] = 9000;
2739 mtus[15] = 9600;
2740}
2741
2742/*
2743 * Initial congestion control parameters.
2744 */
7b9b0943 2745static void init_cong_ctrl(unsigned short *a, unsigned short *b)
4d22de3e
DLR
2746{
2747 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2748 a[9] = 2;
2749 a[10] = 3;
2750 a[11] = 4;
2751 a[12] = 5;
2752 a[13] = 6;
2753 a[14] = 7;
2754 a[15] = 8;
2755 a[16] = 9;
2756 a[17] = 10;
2757 a[18] = 14;
2758 a[19] = 17;
2759 a[20] = 21;
2760 a[21] = 25;
2761 a[22] = 30;
2762 a[23] = 35;
2763 a[24] = 45;
2764 a[25] = 60;
2765 a[26] = 80;
2766 a[27] = 100;
2767 a[28] = 200;
2768 a[29] = 300;
2769 a[30] = 400;
2770 a[31] = 500;
2771
2772 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2773 b[9] = b[10] = 1;
2774 b[11] = b[12] = 2;
2775 b[13] = b[14] = b[15] = b[16] = 3;
2776 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2777 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2778 b[28] = b[29] = 6;
2779 b[30] = b[31] = 7;
2780}
2781
2782/* The minimum additive increment value for the congestion control table */
2783#define CC_MIN_INCR 2U
2784
2785/**
2786 * t3_load_mtus - write the MTU and congestion control HW tables
2787 * @adap: the adapter
2788 * @mtus: the unrestricted values for the MTU table
2789 * @alphs: the values for the congestion control alpha parameter
2790 * @beta: the values for the congestion control beta parameter
2791 * @mtu_cap: the maximum permitted effective MTU
2792 *
2793 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2794 * Update the high-speed congestion control table with the supplied alpha,
2795 * beta, and MTUs.
2796 */
2797void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2798 unsigned short alpha[NCCTRL_WIN],
2799 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2800{
2801 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2802 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2803 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2804 28672, 40960, 57344, 81920, 114688, 163840, 229376
2805 };
2806
2807 unsigned int i, w;
2808
2809 for (i = 0; i < NMTUS; ++i) {
2810 unsigned int mtu = min(mtus[i], mtu_cap);
2811 unsigned int log2 = fls(mtu);
2812
2813 if (!(mtu & ((1 << log2) >> 2))) /* round */
2814 log2--;
2815 t3_write_reg(adap, A_TP_MTU_TABLE,
2816 (i << 24) | (log2 << 16) | mtu);
2817
2818 for (w = 0; w < NCCTRL_WIN; ++w) {
2819 unsigned int inc;
2820
2821 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2822 CC_MIN_INCR);
2823
2824 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2825 (w << 16) | (beta[w] << 13) | inc);
2826 }
2827 }
2828}
2829
2830/**
2831 * t3_read_hw_mtus - returns the values in the HW MTU table
2832 * @adap: the adapter
2833 * @mtus: where to store the HW MTU values
2834 *
2835 * Reads the HW MTU table.
2836 */
2837void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2838{
2839 int i;
2840
2841 for (i = 0; i < NMTUS; ++i) {
2842 unsigned int val;
2843
2844 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2845 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2846 mtus[i] = val & 0x3fff;
2847 }
2848}
2849
2850/**
2851 * t3_get_cong_cntl_tab - reads the congestion control table
2852 * @adap: the adapter
2853 * @incr: where to store the alpha values
2854 *
2855 * Reads the additive increments programmed into the HW congestion
2856 * control table.
2857 */
2858void t3_get_cong_cntl_tab(struct adapter *adap,
2859 unsigned short incr[NMTUS][NCCTRL_WIN])
2860{
2861 unsigned int mtu, w;
2862
2863 for (mtu = 0; mtu < NMTUS; ++mtu)
2864 for (w = 0; w < NCCTRL_WIN; ++w) {
2865 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2866 0xffff0000 | (mtu << 5) | w);
2867 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2868 0x1fff;
2869 }
2870}
2871
2872/**
2873 * t3_tp_get_mib_stats - read TP's MIB counters
2874 * @adap: the adapter
2875 * @tps: holds the returned counter values
2876 *
2877 * Returns the values of TP's MIB counters.
2878 */
2879void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2880{
2881 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2882 sizeof(*tps) / sizeof(u32), 0);
2883}
2884
2885#define ulp_region(adap, name, start, len) \
2886 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2887 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2888 (start) + (len) - 1); \
2889 start += len
2890
2891#define ulptx_region(adap, name, start, len) \
2892 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2893 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2894 (start) + (len) - 1)
2895
2896static void ulp_config(struct adapter *adap, const struct tp_params *p)
2897{
2898 unsigned int m = p->chan_rx_size;
2899
2900 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2901 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2902 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2903 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2904 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2905 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2906 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2907 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2908}
2909
480fe1a3
DLR
2910/**
2911 * t3_set_proto_sram - set the contents of the protocol sram
2912 * @adapter: the adapter
2913 * @data: the protocol image
2914 *
2915 * Write the contents of the protocol SRAM.
2916 */
2c733a16 2917int t3_set_proto_sram(struct adapter *adap, const u8 *data)
480fe1a3
DLR
2918{
2919 int i;
2c733a16 2920 const __be32 *buf = (const __be32 *)data;
480fe1a3
DLR
2921
2922 for (i = 0; i < PROTO_SRAM_LINES; i++) {
05e5c116
AV
2923 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
2924 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
2925 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
2926 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
2927 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
2eab17ab 2928
480fe1a3
DLR
2929 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2930 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2931 return -EIO;
2932 }
2933 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2934
2935 return 0;
2936}
2937
4d22de3e
DLR
2938void t3_config_trace_filter(struct adapter *adapter,
2939 const struct trace_params *tp, int filter_index,
2940 int invert, int enable)
2941{
2942 u32 addr, key[4], mask[4];
2943
2944 key[0] = tp->sport | (tp->sip << 16);
2945 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2946 key[2] = tp->dip;
2947 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2948
2949 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2950 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2951 mask[2] = tp->dip_mask;
2952 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2953
2954 if (invert)
2955 key[3] |= (1 << 29);
2956 if (enable)
2957 key[3] |= (1 << 28);
2958
2959 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2960 tp_wr_indirect(adapter, addr++, key[0]);
2961 tp_wr_indirect(adapter, addr++, mask[0]);
2962 tp_wr_indirect(adapter, addr++, key[1]);
2963 tp_wr_indirect(adapter, addr++, mask[1]);
2964 tp_wr_indirect(adapter, addr++, key[2]);
2965 tp_wr_indirect(adapter, addr++, mask[2]);
2966 tp_wr_indirect(adapter, addr++, key[3]);
2967 tp_wr_indirect(adapter, addr, mask[3]);
2968 t3_read_reg(adapter, A_TP_PIO_DATA);
2969}
2970
2971/**
2972 * t3_config_sched - configure a HW traffic scheduler
2973 * @adap: the adapter
2974 * @kbps: target rate in Kbps
2975 * @sched: the scheduler index
2976 *
2977 * Configure a HW scheduler for the target rate
2978 */
2979int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2980{
2981 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2982 unsigned int clk = adap->params.vpd.cclk * 1000;
2983 unsigned int selected_cpt = 0, selected_bpt = 0;
2984
2985 if (kbps > 0) {
2986 kbps *= 125; /* -> bytes */
2987 for (cpt = 1; cpt <= 255; cpt++) {
2988 tps = clk / cpt;
2989 bpt = (kbps + tps / 2) / tps;
2990 if (bpt > 0 && bpt <= 255) {
2991 v = bpt * tps;
2992 delta = v >= kbps ? v - kbps : kbps - v;
2993 if (delta <= mindelta) {
2994 mindelta = delta;
2995 selected_cpt = cpt;
2996 selected_bpt = bpt;
2997 }
2998 } else if (selected_cpt)
2999 break;
3000 }
3001 if (!selected_cpt)
3002 return -EINVAL;
3003 }
3004 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3005 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3006 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3007 if (sched & 1)
3008 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3009 else
3010 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3011 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3012 return 0;
3013}
3014
3015static int tp_init(struct adapter *adap, const struct tp_params *p)
3016{
3017 int busy = 0;
3018
3019 tp_config(adap, p);
3020 t3_set_vlan_accel(adap, 3, 0);
3021
3022 if (is_offload(adap)) {
3023 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3024 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3025 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3026 0, 1000, 5);
3027 if (busy)
3028 CH_ERR(adap, "TP initialization timed out\n");
3029 }
3030
3031 if (!busy)
3032 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3033 return busy;
3034}
3035
3036int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3037{
3038 if (port_mask & ~((1 << adap->params.nports) - 1))
3039 return -EINVAL;
3040 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3041 port_mask << S_PORT0ACTIVE);
3042 return 0;
3043}
3044
3045/*
3046 * Perform the bits of HW initialization that are dependent on the number
3047 * of available ports.
3048 */
3049static void init_hw_for_avail_ports(struct adapter *adap, int nports)
3050{
3051 int i;
3052
3053 if (nports == 1) {
3054 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3055 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3056 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
3057 F_PORT0ACTIVE | F_ENFORCEPKT);
8a9fab22 3058 t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
4d22de3e
DLR
3059 } else {
3060 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3061 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3062 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3063 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3064 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3065 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3066 F_ENFORCEPKT);
3067 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3068 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3069 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3070 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3071 for (i = 0; i < 16; i++)
3072 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3073 (i << 16) | 0x1010);
3074 }
3075}
3076
3077static int calibrate_xgm(struct adapter *adapter)
3078{
3079 if (uses_xaui(adapter)) {
3080 unsigned int v, i;
3081
3082 for (i = 0; i < 5; ++i) {
3083 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3084 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3085 msleep(1);
3086 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3087 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3088 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3089 V_XAUIIMP(G_CALIMP(v) >> 2));
3090 return 0;
3091 }
3092 }
3093 CH_ERR(adapter, "MAC calibration failed\n");
3094 return -1;
3095 } else {
3096 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3097 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3098 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3099 F_XGM_IMPSETUPDATE);
3100 }
3101 return 0;
3102}
3103
3104static void calibrate_xgm_t3b(struct adapter *adapter)
3105{
3106 if (!uses_xaui(adapter)) {
3107 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3108 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3109 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3110 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3111 F_XGM_IMPSETUPDATE);
3112 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3113 0);
3114 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3115 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3116 }
3117}
3118
3119struct mc7_timing_params {
3120 unsigned char ActToPreDly;
3121 unsigned char ActToRdWrDly;
3122 unsigned char PreCyc;
3123 unsigned char RefCyc[5];
3124 unsigned char BkCyc;
3125 unsigned char WrToRdDly;
3126 unsigned char RdToWrDly;
3127};
3128
3129/*
3130 * Write a value to a register and check that the write completed. These
3131 * writes normally complete in a cycle or two, so one read should suffice.
3132 * The very first read exists to flush the posted write to the device.
3133 */
3134static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3135{
3136 t3_write_reg(adapter, addr, val);
3137 t3_read_reg(adapter, addr); /* flush */
3138 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3139 return 0;
3140 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3141 return -EIO;
3142}
3143
3144static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3145{
3146 static const unsigned int mc7_mode[] = {
3147 0x632, 0x642, 0x652, 0x432, 0x442
3148 };
3149 static const struct mc7_timing_params mc7_timings[] = {
3150 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3151 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3152 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3153 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3154 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3155 };
3156
3157 u32 val;
3158 unsigned int width, density, slow, attempts;
3159 struct adapter *adapter = mc7->adapter;
3160 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3161
8ac3ba68
DLR
3162 if (!mc7->size)
3163 return 0;
3164
4d22de3e
DLR
3165 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3166 slow = val & F_SLOW;
3167 width = G_WIDTH(val);
3168 density = G_DEN(val);
3169
3170 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3171 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3172 msleep(1);
3173
3174 if (!slow) {
3175 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3176 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3177 msleep(1);
3178 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3179 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3180 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3181 mc7->name);
3182 goto out_fail;
3183 }
3184 }
3185
3186 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3187 V_ACTTOPREDLY(p->ActToPreDly) |
3188 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3189 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3190 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3191
3192 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3193 val | F_CLKEN | F_TERM150);
3194 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3195
3196 if (!slow)
3197 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3198 F_DLLENB);
3199 udelay(1);
3200
3201 val = slow ? 3 : 6;
3202 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3203 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3204 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3205 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3206 goto out_fail;
3207
3208 if (!slow) {
3209 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3210 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3211 udelay(5);
3212 }
3213
3214 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3215 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3216 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3217 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3218 mc7_mode[mem_type]) ||
3219 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3220 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3221 goto out_fail;
3222
3223 /* clock value is in KHz */
3224 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3225 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3226
3227 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3228 F_PERREFEN | V_PREREFDIV(mc7_clock));
3229 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3230
3231 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3232 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3233 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3234 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3235 (mc7->size << width) - 1);
3236 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3237 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3238
3239 attempts = 50;
3240 do {
3241 msleep(250);
3242 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3243 } while ((val & F_BUSY) && --attempts);
3244 if (val & F_BUSY) {
3245 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3246 goto out_fail;
3247 }
3248
3249 /* Enable normal memory accesses. */
3250 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3251 return 0;
3252
3253out_fail:
3254 return -1;
3255}
3256
3257static void config_pcie(struct adapter *adap)
3258{
3259 static const u16 ack_lat[4][6] = {
3260 {237, 416, 559, 1071, 2095, 4143},
3261 {128, 217, 289, 545, 1057, 2081},
3262 {73, 118, 154, 282, 538, 1050},
3263 {67, 107, 86, 150, 278, 534}
3264 };
3265 static const u16 rpl_tmr[4][6] = {
3266 {711, 1248, 1677, 3213, 6285, 12429},
3267 {384, 651, 867, 1635, 3171, 6243},
3268 {219, 354, 462, 846, 1614, 3150},
3269 {201, 321, 258, 450, 834, 1602}
3270 };
3271
3272 u16 val;
3273 unsigned int log2_width, pldsize;
3274 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3275
3276 pci_read_config_word(adap->pdev,
3277 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3278 &val);
3279 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3280 pci_read_config_word(adap->pdev,
3281 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3282 &val);
3283
3284 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3285 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3286 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3287 log2_width = fls(adap->params.pci.width) - 1;
3288 acklat = ack_lat[log2_width][pldsize];
3289 if (val & 1) /* check LOsEnable */
3290 acklat += fst_trn_tx * 4;
3291 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3292
3293 if (adap->params.rev == 0)
3294 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3295 V_T3A_ACKLAT(M_T3A_ACKLAT),
3296 V_T3A_ACKLAT(acklat));
3297 else
3298 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3299 V_ACKLAT(acklat));
3300
3301 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3302 V_REPLAYLMT(rpllmt));
3303
3304 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
b881955b 3305 t3_set_reg_field(adap, A_PCIE_CFG, 0,
204e2f98 3306 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
b881955b 3307 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
4d22de3e
DLR
3308}
3309
3310/*
3311 * Initialize and configure T3 HW modules. This performs the
3312 * initialization steps that need to be done once after a card is reset.
3313 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3314 *
3315 * fw_params are passed to FW and their value is platform dependent. Only the
3316 * top 8 bits are available for use, the rest must be 0.
3317 */
3318int t3_init_hw(struct adapter *adapter, u32 fw_params)
3319{
b881955b 3320 int err = -EIO, attempts, i;
4d22de3e
DLR
3321 const struct vpd_params *vpd = &adapter->params.vpd;
3322
3323 if (adapter->params.rev > 0)
3324 calibrate_xgm_t3b(adapter);
3325 else if (calibrate_xgm(adapter))
3326 goto out_err;
3327
3328 if (vpd->mclk) {
3329 partition_mem(adapter, &adapter->params.tp);
3330
3331 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3332 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3333 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3334 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3335 adapter->params.mc5.nfilters,
3336 adapter->params.mc5.nroutes))
3337 goto out_err;
b881955b
DLR
3338
3339 for (i = 0; i < 32; i++)
3340 if (clear_sge_ctxt(adapter, i, F_CQ))
3341 goto out_err;
4d22de3e
DLR
3342 }
3343
3344 if (tp_init(adapter, &adapter->params.tp))
3345 goto out_err;
3346
3347 t3_tp_set_coalescing_size(adapter,
3348 min(adapter->params.sge.max_pkt_size,
3349 MAX_RX_COALESCING_LEN), 1);
3350 t3_tp_set_max_rxsize(adapter,
3351 min(adapter->params.sge.max_pkt_size, 16384U));
3352 ulp_config(adapter, &adapter->params.tp);
3353
3354 if (is_pcie(adapter))
3355 config_pcie(adapter);
3356 else
b881955b
DLR
3357 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3358 F_DMASTOPEN | F_CLIDECEN);
4d22de3e 3359
a2604be5
DLR
3360 if (adapter->params.rev == T3_REV_C)
3361 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3362 F_CFG_CQE_SOP_MASK);
3363
8a9fab22 3364 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3f61e427
DLR
3365 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3366 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
4d22de3e
DLR
3367 init_hw_for_avail_ports(adapter, adapter->params.nports);
3368 t3_sge_init(adapter, &adapter->params.sge);
3369
f231e0a5
DLR
3370 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3371
4d22de3e
DLR
3372 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3373 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3374 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3375 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3376
b881955b 3377 attempts = 100;
4d22de3e
DLR
3378 do { /* wait for uP to initialize */
3379 msleep(20);
3380 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
8ac3ba68
DLR
3381 if (!attempts) {
3382 CH_ERR(adapter, "uP initialization timed out\n");
4d22de3e 3383 goto out_err;
8ac3ba68 3384 }
4d22de3e
DLR
3385
3386 err = 0;
3387out_err:
3388 return err;
3389}
3390
3391/**
3392 * get_pci_mode - determine a card's PCI mode
3393 * @adapter: the adapter
3394 * @p: where to store the PCI settings
3395 *
3396 * Determines a card's PCI mode and associated parameters, such as speed
3397 * and width.
3398 */
7b9b0943 3399static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
4d22de3e
DLR
3400{
3401 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3402 u32 pci_mode, pcie_cap;
3403
3404 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3405 if (pcie_cap) {
3406 u16 val;
3407
3408 p->variant = PCI_VARIANT_PCIE;
3409 p->pcie_cap_addr = pcie_cap;
3410 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3411 &val);
3412 p->width = (val >> 4) & 0x3f;
3413 return;
3414 }
3415
3416 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3417 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3418 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3419 pci_mode = G_PCIXINITPAT(pci_mode);
3420 if (pci_mode == 0)
3421 p->variant = PCI_VARIANT_PCI;
3422 else if (pci_mode < 4)
3423 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3424 else if (pci_mode < 8)
3425 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3426 else
3427 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3428}
3429
3430/**
3431 * init_link_config - initialize a link's SW state
3432 * @lc: structure holding the link state
3433 * @ai: information about the current card
3434 *
3435 * Initializes the SW state maintained for each link, including the link's
3436 * capabilities and default speed/duplex/flow-control/autonegotiation
3437 * settings.
3438 */
7b9b0943 3439static void init_link_config(struct link_config *lc, unsigned int caps)
4d22de3e
DLR
3440{
3441 lc->supported = caps;
3442 lc->requested_speed = lc->speed = SPEED_INVALID;
3443 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3444 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3445 if (lc->supported & SUPPORTED_Autoneg) {
3446 lc->advertising = lc->supported;
3447 lc->autoneg = AUTONEG_ENABLE;
3448 lc->requested_fc |= PAUSE_AUTONEG;
3449 } else {
3450 lc->advertising = 0;
3451 lc->autoneg = AUTONEG_DISABLE;
3452 }
3453}
3454
3455/**
3456 * mc7_calc_size - calculate MC7 memory size
3457 * @cfg: the MC7 configuration
3458 *
3459 * Calculates the size of an MC7 memory in bytes from the value of its
3460 * configuration register.
3461 */
7b9b0943 3462static unsigned int mc7_calc_size(u32 cfg)
4d22de3e
DLR
3463{
3464 unsigned int width = G_WIDTH(cfg);
3465 unsigned int banks = !!(cfg & F_BKS) + 1;
3466 unsigned int org = !!(cfg & F_ORG) + 1;
3467 unsigned int density = G_DEN(cfg);
3468 unsigned int MBs = ((256 << density) * banks) / (org << width);
3469
3470 return MBs << 20;
3471}
3472
7b9b0943
RD
3473static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3474 unsigned int base_addr, const char *name)
4d22de3e
DLR
3475{
3476 u32 cfg;
3477
3478 mc7->adapter = adapter;
3479 mc7->name = name;
3480 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3481 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
8ac3ba68 3482 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
4d22de3e
DLR
3483 mc7->width = G_WIDTH(cfg);
3484}
3485
3486void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3487{
3488 mac->adapter = adapter;
3489 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3490 mac->nucast = 1;
3491
3492 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3493 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3494 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3495 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3496 F_ENRGMII, 0);
3497 }
3498}
3499
3500void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3501{
3502 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3503
3504 mi1_init(adapter, ai);
3505 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3506 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3507 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3508 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
8ac3ba68 3509 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
b881955b 3510 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
4d22de3e
DLR
3511
3512 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3513 val |= F_ENRGMII;
3514
3515 /* Enable MAC clocks so we can access the registers */
3516 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3517 t3_read_reg(adapter, A_XGM_PORT_CFG);
3518
3519 val |= F_CLKDIVRESET_;
3520 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3521 t3_read_reg(adapter, A_XGM_PORT_CFG);
3522 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3523 t3_read_reg(adapter, A_XGM_PORT_CFG);
3524}
3525
3526/*
2eab17ab 3527 * Reset the adapter.
e4d08359 3528 * Older PCIe cards lose their config space during reset, PCI-X
4d22de3e
DLR
3529 * ones don't.
3530 */
20d3fc11 3531int t3_reset_adapter(struct adapter *adapter)
4d22de3e 3532{
2eab17ab 3533 int i, save_and_restore_pcie =
e4d08359 3534 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
4d22de3e
DLR
3535 uint16_t devid = 0;
3536
e4d08359 3537 if (save_and_restore_pcie)
4d22de3e
DLR
3538 pci_save_state(adapter->pdev);
3539 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3540
3541 /*
3542 * Delay. Give Some time to device to reset fully.
3543 * XXX The delay time should be modified.
3544 */
3545 for (i = 0; i < 10; i++) {
3546 msleep(50);
3547 pci_read_config_word(adapter->pdev, 0x00, &devid);
3548 if (devid == 0x1425)
3549 break;
3550 }
3551
3552 if (devid != 0x1425)
3553 return -1;
3554
e4d08359 3555 if (save_and_restore_pcie)
4d22de3e
DLR
3556 pci_restore_state(adapter->pdev);
3557 return 0;
3558}
3559
7b9b0943 3560static int init_parity(struct adapter *adap)
b881955b
DLR
3561{
3562 int i, err, addr;
3563
3564 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3565 return -EBUSY;
3566
3567 for (err = i = 0; !err && i < 16; i++)
3568 err = clear_sge_ctxt(adap, i, F_EGRESS);
3569 for (i = 0xfff0; !err && i <= 0xffff; i++)
3570 err = clear_sge_ctxt(adap, i, F_EGRESS);
3571 for (i = 0; !err && i < SGE_QSETS; i++)
3572 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3573 if (err)
3574 return err;
3575
3576 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3577 for (i = 0; i < 4; i++)
3578 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3579 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3580 F_IBQDBGWR | V_IBQDBGQID(i) |
3581 V_IBQDBGADDR(addr));
3582 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3583 F_IBQDBGBUSY, 0, 2, 1);
3584 if (err)
3585 return err;
3586 }
3587 return 0;
3588}
3589
4d22de3e
DLR
3590/*
3591 * Initialize adapter SW state for the various HW modules, set initial values
3592 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3593 * interface.
3594 */
7b9b0943
RD
3595int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3596 int reset)
4d22de3e
DLR
3597{
3598 int ret;
04497982 3599 unsigned int i, j = -1;
4d22de3e
DLR
3600
3601 get_pci_mode(adapter, &adapter->params.pci);
3602
3603 adapter->params.info = ai;
3604 adapter->params.nports = ai->nports;
3605 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3606 adapter->params.linkpoll_period = 0;
3607 adapter->params.stats_update_period = is_10G(adapter) ?
3608 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3609 adapter->params.pci.vpd_cap_addr =
3610 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3611 ret = get_vpd_params(adapter, &adapter->params.vpd);
3612 if (ret < 0)
3613 return ret;
3614
3615 if (reset && t3_reset_adapter(adapter))
3616 return -1;
3617
3618 t3_sge_prep(adapter, &adapter->params.sge);
3619
3620 if (adapter->params.vpd.mclk) {
3621 struct tp_params *p = &adapter->params.tp;
3622
3623 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3624 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3625 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3626
3627 p->nchan = ai->nports;
3628 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3629 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3630 p->cm_size = t3_mc7_size(&adapter->cm);
3631 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3632 p->chan_tx_size = p->pmtx_size / p->nchan;
3633 p->rx_pg_size = 64 * 1024;
3634 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3635 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3636 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3637 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3638 adapter->params.rev > 0 ? 12 : 6;
8ac3ba68
DLR
3639 }
3640
3641 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3642 t3_mc7_size(&adapter->pmtx) &&
3643 t3_mc7_size(&adapter->cm);
4d22de3e 3644
8ac3ba68 3645 if (is_offload(adapter)) {
4d22de3e
DLR
3646 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3647 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3648 DEFAULT_NFILTERS : 0;
3649 adapter->params.mc5.nroutes = 0;
3650 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3651
3652 init_mtus(adapter->params.mtus);
3653 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3654 }
3655
3656 early_hw_init(adapter, ai);
b881955b
DLR
3657 ret = init_parity(adapter);
3658 if (ret)
3659 return ret;
4d22de3e
DLR
3660
3661 for_each_port(adapter, i) {
3662 u8 hw_addr[6];
04497982 3663 const struct port_type_info *pti;
4d22de3e
DLR
3664 struct port_info *p = adap2pinfo(adapter, i);
3665
04497982
DLR
3666 while (!adapter->params.vpd.port_type[++j])
3667 ;
4d22de3e 3668
04497982
DLR
3669 pti = &port_types[adapter->params.vpd.port_type[j]];
3670 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3671 ai->mdio_ops);
78e4689e
DLR
3672 if (ret)
3673 return ret;
4d22de3e 3674 mac_prep(&p->mac, adapter, j);
4d22de3e
DLR
3675
3676 /*
3677 * The VPD EEPROM stores the base Ethernet address for the
3678 * card. A port's address is derived from the base by adding
3679 * the port's index to the base's low octet.
3680 */
3681 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3682 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3683
3684 memcpy(adapter->port[i]->dev_addr, hw_addr,
3685 ETH_ALEN);
3686 memcpy(adapter->port[i]->perm_addr, hw_addr,
3687 ETH_ALEN);
04497982 3688 init_link_config(&p->link_config, p->phy.caps);
4d22de3e 3689 p->phy.ops->power_down(&p->phy, 1);
04497982 3690 if (!(p->phy.caps & SUPPORTED_IRQ))
4d22de3e
DLR
3691 adapter->params.linkpoll_period = 10;
3692 }
3693
3694 return 0;
3695}
3696
3697void t3_led_ready(struct adapter *adapter)
3698{
3699 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3700 F_GPIO0_OUT_VAL);
3701}
204e2f98
DLR
3702
3703int t3_replay_prep_adapter(struct adapter *adapter)
3704{
3705 const struct adapter_info *ai = adapter->params.info;
04497982 3706 unsigned int i, j = -1;
204e2f98
DLR
3707 int ret;
3708
3709 early_hw_init(adapter, ai);
3710 ret = init_parity(adapter);
3711 if (ret)
3712 return ret;
3713
3714 for_each_port(adapter, i) {
04497982 3715 const struct port_type_info *pti;
204e2f98 3716 struct port_info *p = adap2pinfo(adapter, i);
204e2f98 3717
04497982
DLR
3718 while (!adapter->params.vpd.port_type[++j])
3719 ;
3720
3721 pti = &port_types[adapter->params.vpd.port_type[j]];
3722 ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
78e4689e
DLR
3723 if (ret)
3724 return ret;
204e2f98 3725 p->phy.ops->power_down(&p->phy, 1);
204e2f98
DLR
3726 }
3727
3728return 0;
3729}
3730