]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - drivers/net/cxgb3/t3_hw.c
cxgb3: Use generic XENPAK LASI register definitions
[mirror_ubuntu-kernels.git] / drivers / net / cxgb3 / t3_hw.c
CommitLineData
4d22de3e 1/*
a02d44a0 2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include "common.h"
33#include "regs.h"
34#include "sge_defs.h"
35#include "firmware_exports.h"
36
f2c6879e
DLR
37/**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
46 *
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
51 */
4d22de3e
DLR
52
53int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
55{
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
58
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
63 }
64 if (--attempts == 0)
b881955b 65 return -EAGAIN;
4d22de3e
DLR
66 if (delay)
67 udelay(delay);
68 }
69}
70
71/**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
77 *
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
81 */
82void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
84{
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
88 }
89}
90
91/**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
97 *
98 * Sets a register field specified by the supplied mask to the
99 * given value.
100 */
101void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
103{
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
105
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
108}
109
110/**
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
118 *
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
121 */
9265fabf
SH
122static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
4d22de3e
DLR
125{
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
130 }
131}
132
133/**
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
139 *
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
142 */
143int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
145{
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
148
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
151
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
154
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
159
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
163
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
172
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
183 }
184 start += 8;
185 }
186 *buf++ = val64;
187 }
188 return 0;
189}
190
191/*
192 * Initialize MI1.
193 */
194static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
195{
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
04497982 197 u32 val = F_PREEN | V_CLKDIV(clkdiv);
4d22de3e 198
4d22de3e
DLR
199 t3_write_reg(adap, A_MI1_CFG, val);
200}
201
04497982 202#define MDIO_ATTEMPTS 20
4d22de3e
DLR
203
204/*
04497982 205 * MI1 read/write operations for clause 22 PHYs.
4d22de3e 206 */
0f07c4ee
BH
207static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr,
208 u16 reg_addr)
4d22de3e 209{
0f07c4ee
BH
210 struct port_info *pi = netdev_priv(dev);
211 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
212 int ret;
213 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
214
4d22de3e 215 mutex_lock(&adapter->mdio_lock);
04497982 216 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
4d22de3e
DLR
217 t3_write_reg(adapter, A_MI1_ADDR, addr);
218 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
04497982 219 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
4d22de3e 220 if (!ret)
0f07c4ee 221 ret = t3_read_reg(adapter, A_MI1_DATA);
4d22de3e
DLR
222 mutex_unlock(&adapter->mdio_lock);
223 return ret;
224}
225
0f07c4ee
BH
226static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr,
227 u16 reg_addr, u16 val)
4d22de3e 228{
0f07c4ee
BH
229 struct port_info *pi = netdev_priv(dev);
230 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
231 int ret;
232 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
233
4d22de3e 234 mutex_lock(&adapter->mdio_lock);
04497982 235 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
4d22de3e
DLR
236 t3_write_reg(adapter, A_MI1_ADDR, addr);
237 t3_write_reg(adapter, A_MI1_DATA, val);
238 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
04497982 239 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
4d22de3e
DLR
240 mutex_unlock(&adapter->mdio_lock);
241 return ret;
242}
243
244static const struct mdio_ops mi1_mdio_ops = {
0f07c4ee
BH
245 .read = t3_mi1_read,
246 .write = t3_mi1_write,
247 .mode_support = MDIO_SUPPORTS_C22
4d22de3e
DLR
248};
249
04497982
DLR
250/*
251 * Performs the address cycle for clause 45 PHYs.
252 * Must be called with the MDIO_LOCK held.
253 */
254static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
255 int reg_addr)
256{
257 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
258
259 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
260 t3_write_reg(adapter, A_MI1_ADDR, addr);
261 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
262 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
263 return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
264 MDIO_ATTEMPTS, 10);
265}
266
4d22de3e
DLR
267/*
268 * MI1 read/write operations for indirect-addressed PHYs.
269 */
0f07c4ee
BH
270static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
271 u16 reg_addr)
4d22de3e 272{
0f07c4ee
BH
273 struct port_info *pi = netdev_priv(dev);
274 struct adapter *adapter = pi->adapter;
4d22de3e 275 int ret;
4d22de3e
DLR
276
277 mutex_lock(&adapter->mdio_lock);
04497982 278 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
4d22de3e
DLR
279 if (!ret) {
280 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
281 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
04497982 282 MDIO_ATTEMPTS, 10);
4d22de3e 283 if (!ret)
0f07c4ee 284 ret = t3_read_reg(adapter, A_MI1_DATA);
4d22de3e
DLR
285 }
286 mutex_unlock(&adapter->mdio_lock);
287 return ret;
288}
289
0f07c4ee
BH
290static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr,
291 u16 reg_addr, u16 val)
4d22de3e 292{
0f07c4ee
BH
293 struct port_info *pi = netdev_priv(dev);
294 struct adapter *adapter = pi->adapter;
4d22de3e 295 int ret;
4d22de3e
DLR
296
297 mutex_lock(&adapter->mdio_lock);
04497982 298 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
4d22de3e
DLR
299 if (!ret) {
300 t3_write_reg(adapter, A_MI1_DATA, val);
301 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
302 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
04497982 303 MDIO_ATTEMPTS, 10);
4d22de3e
DLR
304 }
305 mutex_unlock(&adapter->mdio_lock);
306 return ret;
307}
308
309static const struct mdio_ops mi1_mdio_ext_ops = {
0f07c4ee
BH
310 .read = mi1_ext_read,
311 .write = mi1_ext_write,
312 .mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
4d22de3e
DLR
313};
314
315/**
316 * t3_mdio_change_bits - modify the value of a PHY register
317 * @phy: the PHY to operate on
318 * @mmd: the device address
319 * @reg: the register address
320 * @clear: what part of the register value to mask off
321 * @set: what part of the register value to set
322 *
323 * Changes the value of a PHY register by applying a mask to its current
324 * value and ORing the result with a new value.
325 */
326int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
327 unsigned int set)
328{
329 int ret;
330 unsigned int val;
331
0f07c4ee 332 ret = t3_mdio_read(phy, mmd, reg, &val);
4d22de3e
DLR
333 if (!ret) {
334 val &= ~clear;
0f07c4ee 335 ret = t3_mdio_write(phy, mmd, reg, val | set);
4d22de3e
DLR
336 }
337 return ret;
338}
339
340/**
341 * t3_phy_reset - reset a PHY block
342 * @phy: the PHY to operate on
343 * @mmd: the device address of the PHY block to reset
344 * @wait: how long to wait for the reset to complete in 1ms increments
345 *
346 * Resets a PHY block and optionally waits for the reset to complete.
347 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
348 * for 10G PHYs.
349 */
350int t3_phy_reset(struct cphy *phy, int mmd, int wait)
351{
352 int err;
353 unsigned int ctl;
354
0f07c4ee
BH
355 err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER,
356 MDIO_CTRL1_RESET);
4d22de3e
DLR
357 if (err || !wait)
358 return err;
359
360 do {
0f07c4ee 361 err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl);
4d22de3e
DLR
362 if (err)
363 return err;
0f07c4ee 364 ctl &= MDIO_CTRL1_RESET;
4d22de3e
DLR
365 if (ctl)
366 msleep(1);
367 } while (ctl && --wait);
368
369 return ctl ? -1 : 0;
370}
371
372/**
373 * t3_phy_advertise - set the PHY advertisement registers for autoneg
374 * @phy: the PHY to operate on
375 * @advert: bitmap of capabilities the PHY should advertise
376 *
377 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
378 * requested capabilities.
379 */
380int t3_phy_advertise(struct cphy *phy, unsigned int advert)
381{
382 int err;
383 unsigned int val = 0;
384
0f07c4ee 385 err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val);
4d22de3e
DLR
386 if (err)
387 return err;
388
389 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
390 if (advert & ADVERTISED_1000baseT_Half)
391 val |= ADVERTISE_1000HALF;
392 if (advert & ADVERTISED_1000baseT_Full)
393 val |= ADVERTISE_1000FULL;
394
0f07c4ee 395 err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val);
4d22de3e
DLR
396 if (err)
397 return err;
398
399 val = 1;
400 if (advert & ADVERTISED_10baseT_Half)
401 val |= ADVERTISE_10HALF;
402 if (advert & ADVERTISED_10baseT_Full)
403 val |= ADVERTISE_10FULL;
404 if (advert & ADVERTISED_100baseT_Half)
405 val |= ADVERTISE_100HALF;
406 if (advert & ADVERTISED_100baseT_Full)
407 val |= ADVERTISE_100FULL;
408 if (advert & ADVERTISED_Pause)
409 val |= ADVERTISE_PAUSE_CAP;
410 if (advert & ADVERTISED_Asym_Pause)
411 val |= ADVERTISE_PAUSE_ASYM;
0f07c4ee 412 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
4d22de3e
DLR
413}
414
0ce2f03b
DLR
415/**
416 * t3_phy_advertise_fiber - set fiber PHY advertisement register
417 * @phy: the PHY to operate on
418 * @advert: bitmap of capabilities the PHY should advertise
419 *
420 * Sets a fiber PHY's advertisement register to advertise the
421 * requested capabilities.
422 */
423int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
424{
425 unsigned int val = 0;
426
427 if (advert & ADVERTISED_1000baseT_Half)
428 val |= ADVERTISE_1000XHALF;
429 if (advert & ADVERTISED_1000baseT_Full)
430 val |= ADVERTISE_1000XFULL;
431 if (advert & ADVERTISED_Pause)
432 val |= ADVERTISE_1000XPAUSE;
433 if (advert & ADVERTISED_Asym_Pause)
434 val |= ADVERTISE_1000XPSE_ASYM;
0f07c4ee 435 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
0ce2f03b
DLR
436}
437
4d22de3e
DLR
438/**
439 * t3_set_phy_speed_duplex - force PHY speed and duplex
440 * @phy: the PHY to operate on
441 * @speed: requested PHY speed
442 * @duplex: requested PHY duplex
443 *
444 * Force a 10/100/1000 PHY's speed and duplex. This also disables
445 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
446 */
447int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
448{
449 int err;
450 unsigned int ctl;
451
0f07c4ee 452 err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl);
4d22de3e
DLR
453 if (err)
454 return err;
455
456 if (speed >= 0) {
457 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
458 if (speed == SPEED_100)
459 ctl |= BMCR_SPEED100;
460 else if (speed == SPEED_1000)
461 ctl |= BMCR_SPEED1000;
462 }
463 if (duplex >= 0) {
464 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
465 if (duplex == DUPLEX_FULL)
466 ctl |= BMCR_FULLDPLX;
467 }
468 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
469 ctl |= BMCR_ANENABLE;
0f07c4ee 470 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl);
4d22de3e
DLR
471}
472
9b1e3656
DLR
473int t3_phy_lasi_intr_enable(struct cphy *phy)
474{
64318334
BH
475 return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
476 MDIO_PMA_LASI_LSALARM);
9b1e3656
DLR
477}
478
479int t3_phy_lasi_intr_disable(struct cphy *phy)
480{
64318334 481 return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
9b1e3656
DLR
482}
483
484int t3_phy_lasi_intr_clear(struct cphy *phy)
485{
486 u32 val;
487
64318334 488 return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
9b1e3656
DLR
489}
490
491int t3_phy_lasi_intr_handler(struct cphy *phy)
492{
493 unsigned int status;
64318334
BH
494 int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT,
495 &status);
9b1e3656
DLR
496
497 if (err)
498 return err;
64318334 499 return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0;
9b1e3656
DLR
500}
501
4d22de3e 502static const struct adapter_info t3_adap_info[] = {
952cdf33 503 {1, 1, 0,
4d22de3e 504 F_GPIO2_OEN | F_GPIO4_OEN |
f231e0a5 505 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
4d22de3e 506 &mi1_mdio_ops, "Chelsio PE9000"},
952cdf33 507 {1, 1, 0,
4d22de3e 508 F_GPIO2_OEN | F_GPIO4_OEN |
f231e0a5 509 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
4d22de3e 510 &mi1_mdio_ops, "Chelsio T302"},
952cdf33 511 {1, 0, 0,
4d22de3e 512 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
75758e8a 513 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
f231e0a5 514 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
4d22de3e 515 &mi1_mdio_ext_ops, "Chelsio T310"},
952cdf33 516 {1, 1, 0,
4d22de3e
DLR
517 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
518 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
f231e0a5
DLR
519 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
520 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
4d22de3e 521 &mi1_mdio_ext_ops, "Chelsio T320"},
ce03aadd
DLR
522 {},
523 {},
952cdf33 524 {1, 0, 0,
ce03aadd
DLR
525 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
526 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
527 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
528 &mi1_mdio_ext_ops, "Chelsio T310" },
4d22de3e
DLR
529};
530
531/*
532 * Return the adapter_info structure with a given index. Out-of-range indices
533 * return NULL.
534 */
535const struct adapter_info *t3_get_adapter_info(unsigned int id)
536{
537 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
538}
539
04497982
DLR
540struct port_type_info {
541 int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
542 int phy_addr, const struct mdio_ops *ops);
543};
4d22de3e
DLR
544
545static const struct port_type_info port_types[] = {
04497982
DLR
546 { NULL },
547 { t3_ael1002_phy_prep },
548 { t3_vsc8211_phy_prep },
549 { NULL},
550 { t3_xaui_direct_phy_prep },
1e882025 551 { t3_ael2005_phy_prep },
04497982
DLR
552 { t3_qt2045_phy_prep },
553 { t3_ael1006_phy_prep },
554 { NULL },
4d22de3e
DLR
555};
556
4d22de3e
DLR
557#define VPD_ENTRY(name, len) \
558 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
559
560/*
561 * Partial EEPROM Vital Product Data structure. Includes only the ID and
562 * VPD-R sections.
563 */
564struct t3_vpd {
565 u8 id_tag;
566 u8 id_len[2];
567 u8 id_data[16];
568 u8 vpdr_tag;
569 u8 vpdr_len[2];
570 VPD_ENTRY(pn, 16); /* part number */
571 VPD_ENTRY(ec, 16); /* EC level */
167cdf5f 572 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
4d22de3e
DLR
573 VPD_ENTRY(na, 12); /* MAC address base */
574 VPD_ENTRY(cclk, 6); /* core clock */
575 VPD_ENTRY(mclk, 6); /* mem clock */
576 VPD_ENTRY(uclk, 6); /* uP clk */
577 VPD_ENTRY(mdc, 6); /* MDIO clk */
578 VPD_ENTRY(mt, 2); /* mem timing */
579 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
580 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
581 VPD_ENTRY(port0, 2); /* PHY0 complex */
582 VPD_ENTRY(port1, 2); /* PHY1 complex */
583 VPD_ENTRY(port2, 2); /* PHY2 complex */
584 VPD_ENTRY(port3, 2); /* PHY3 complex */
585 VPD_ENTRY(rv, 1); /* csum */
586 u32 pad; /* for multiple-of-4 sizing and alignment */
587};
588
9f64306b 589#define EEPROM_MAX_POLL 40
4d22de3e
DLR
590#define EEPROM_STAT_ADDR 0x4000
591#define VPD_BASE 0xc00
592
593/**
594 * t3_seeprom_read - read a VPD EEPROM location
595 * @adapter: adapter to read
596 * @addr: EEPROM address
597 * @data: where to store the read data
598 *
599 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
600 * VPD ROM capability. A zero is written to the flag bit when the
601 * addres is written to the control register. The hardware device will
602 * set the flag to 1 when 4 bytes have been read into the data register.
603 */
05e5c116 604int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
4d22de3e
DLR
605{
606 u16 val;
607 int attempts = EEPROM_MAX_POLL;
05e5c116 608 u32 v;
4d22de3e
DLR
609 unsigned int base = adapter->params.pci.vpd_cap_addr;
610
611 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
612 return -EINVAL;
613
614 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
615 do {
616 udelay(10);
617 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
618 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
619
620 if (!(val & PCI_VPD_ADDR_F)) {
621 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
622 return -EIO;
623 }
05e5c116
AV
624 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
625 *data = cpu_to_le32(v);
4d22de3e
DLR
626 return 0;
627}
628
629/**
630 * t3_seeprom_write - write a VPD EEPROM location
631 * @adapter: adapter to write
632 * @addr: EEPROM address
633 * @data: value to write
634 *
635 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
636 * VPD ROM capability.
637 */
05e5c116 638int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
4d22de3e
DLR
639{
640 u16 val;
641 int attempts = EEPROM_MAX_POLL;
642 unsigned int base = adapter->params.pci.vpd_cap_addr;
643
644 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
645 return -EINVAL;
646
647 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
05e5c116 648 le32_to_cpu(data));
4d22de3e
DLR
649 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
650 addr | PCI_VPD_ADDR_F);
651 do {
652 msleep(1);
653 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
654 } while ((val & PCI_VPD_ADDR_F) && --attempts);
655
656 if (val & PCI_VPD_ADDR_F) {
657 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
658 return -EIO;
659 }
660 return 0;
661}
662
663/**
664 * t3_seeprom_wp - enable/disable EEPROM write protection
665 * @adapter: the adapter
666 * @enable: 1 to enable write protection, 0 to disable it
667 *
668 * Enables or disables write protection on the serial EEPROM.
669 */
670int t3_seeprom_wp(struct adapter *adapter, int enable)
671{
672 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
673}
674
675/*
676 * Convert a character holding a hex digit to a number.
677 */
678static unsigned int hex2int(unsigned char c)
679{
680 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
681}
682
683/**
684 * get_vpd_params - read VPD parameters from VPD EEPROM
685 * @adapter: adapter to read
686 * @p: where to store the parameters
687 *
688 * Reads card parameters stored in VPD EEPROM.
689 */
690static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
691{
692 int i, addr, ret;
693 struct t3_vpd vpd;
694
695 /*
696 * Card information is normally at VPD_BASE but some early cards had
697 * it at 0.
698 */
05e5c116 699 ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
4d22de3e
DLR
700 if (ret)
701 return ret;
702 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
703
704 for (i = 0; i < sizeof(vpd); i += 4) {
705 ret = t3_seeprom_read(adapter, addr + i,
05e5c116 706 (__le32 *)((u8 *)&vpd + i));
4d22de3e
DLR
707 if (ret)
708 return ret;
709 }
710
711 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
712 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
713 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
714 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
715 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
167cdf5f 716 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
4d22de3e
DLR
717
718 /* Old eeproms didn't have port information */
719 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
720 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
721 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
722 } else {
723 p->port_type[0] = hex2int(vpd.port0_data[0]);
724 p->port_type[1] = hex2int(vpd.port1_data[0]);
725 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
726 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
727 }
728
729 for (i = 0; i < 6; i++)
730 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
731 hex2int(vpd.na_data[2 * i + 1]);
732 return 0;
733}
734
735/* serial flash and firmware constants */
736enum {
737 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
738 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
739 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
740
741 /* flash command opcodes */
742 SF_PROG_PAGE = 2, /* program page */
743 SF_WR_DISABLE = 4, /* disable writes */
744 SF_RD_STATUS = 5, /* read status register */
745 SF_WR_ENABLE = 6, /* enable writes */
746 SF_RD_DATA_FAST = 0xb, /* read flash */
747 SF_ERASE_SECTOR = 0xd8, /* erase sector */
748
749 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
80513675 750 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
2e283962 751 FW_MIN_SIZE = 8 /* at least version and csum */
4d22de3e
DLR
752};
753
754/**
755 * sf1_read - read data from the serial flash
756 * @adapter: the adapter
757 * @byte_cnt: number of bytes to read
758 * @cont: whether another operation will be chained
759 * @valp: where to store the read data
760 *
761 * Reads up to 4 bytes of data from the serial flash. The location of
762 * the read needs to be specified prior to calling this by issuing the
763 * appropriate commands to the serial flash.
764 */
765static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
766 u32 *valp)
767{
768 int ret;
769
770 if (!byte_cnt || byte_cnt > 4)
771 return -EINVAL;
772 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
773 return -EBUSY;
774 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
775 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
776 if (!ret)
777 *valp = t3_read_reg(adapter, A_SF_DATA);
778 return ret;
779}
780
781/**
782 * sf1_write - write data to the serial flash
783 * @adapter: the adapter
784 * @byte_cnt: number of bytes to write
785 * @cont: whether another operation will be chained
786 * @val: value to write
787 *
788 * Writes up to 4 bytes of data to the serial flash. The location of
789 * the write needs to be specified prior to calling this by issuing the
790 * appropriate commands to the serial flash.
791 */
792static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
793 u32 val)
794{
795 if (!byte_cnt || byte_cnt > 4)
796 return -EINVAL;
797 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
798 return -EBUSY;
799 t3_write_reg(adapter, A_SF_DATA, val);
800 t3_write_reg(adapter, A_SF_OP,
801 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
802 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
803}
804
805/**
806 * flash_wait_op - wait for a flash operation to complete
807 * @adapter: the adapter
808 * @attempts: max number of polls of the status register
809 * @delay: delay between polls in ms
810 *
811 * Wait for a flash operation to complete by polling the status register.
812 */
813static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
814{
815 int ret;
816 u32 status;
817
818 while (1) {
819 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
820 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
821 return ret;
822 if (!(status & 1))
823 return 0;
824 if (--attempts == 0)
825 return -EAGAIN;
826 if (delay)
827 msleep(delay);
828 }
829}
830
831/**
832 * t3_read_flash - read words from serial flash
833 * @adapter: the adapter
834 * @addr: the start address for the read
835 * @nwords: how many 32-bit words to read
836 * @data: where to store the read data
837 * @byte_oriented: whether to store data as bytes or as words
838 *
839 * Read the specified number of 32-bit words from the serial flash.
840 * If @byte_oriented is set the read data is stored as a byte array
841 * (i.e., big-endian), otherwise as 32-bit words in the platform's
842 * natural endianess.
843 */
844int t3_read_flash(struct adapter *adapter, unsigned int addr,
845 unsigned int nwords, u32 *data, int byte_oriented)
846{
847 int ret;
848
849 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
850 return -EINVAL;
851
852 addr = swab32(addr) | SF_RD_DATA_FAST;
853
854 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
855 (ret = sf1_read(adapter, 1, 1, data)) != 0)
856 return ret;
857
858 for (; nwords; nwords--, data++) {
859 ret = sf1_read(adapter, 4, nwords > 1, data);
860 if (ret)
861 return ret;
862 if (byte_oriented)
863 *data = htonl(*data);
864 }
865 return 0;
866}
867
868/**
869 * t3_write_flash - write up to a page of data to the serial flash
870 * @adapter: the adapter
871 * @addr: the start address to write
872 * @n: length of data to write
873 * @data: the data to write
874 *
875 * Writes up to a page of data (256 bytes) to the serial flash starting
876 * at the given address.
877 */
878static int t3_write_flash(struct adapter *adapter, unsigned int addr,
879 unsigned int n, const u8 *data)
880{
881 int ret;
882 u32 buf[64];
883 unsigned int i, c, left, val, offset = addr & 0xff;
884
885 if (addr + n > SF_SIZE || offset + n > 256)
886 return -EINVAL;
887
888 val = swab32(addr) | SF_PROG_PAGE;
889
890 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
891 (ret = sf1_write(adapter, 4, 1, val)) != 0)
892 return ret;
893
894 for (left = n; left; left -= c) {
895 c = min(left, 4U);
896 for (val = 0, i = 0; i < c; ++i)
897 val = (val << 8) + *data++;
898
899 ret = sf1_write(adapter, c, c != left, val);
900 if (ret)
901 return ret;
902 }
903 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
904 return ret;
905
906 /* Read the page to verify the write succeeded */
907 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
908 if (ret)
909 return ret;
910
911 if (memcmp(data - n, (u8 *) buf + offset, n))
912 return -EIO;
913 return 0;
914}
915
480fe1a3 916/**
47330077 917 * t3_get_tp_version - read the tp sram version
480fe1a3 918 * @adapter: the adapter
47330077 919 * @vers: where to place the version
480fe1a3 920 *
47330077 921 * Reads the protocol sram version from sram.
480fe1a3 922 */
47330077 923int t3_get_tp_version(struct adapter *adapter, u32 *vers)
480fe1a3
DLR
924{
925 int ret;
480fe1a3
DLR
926
927 /* Get version loaded in SRAM */
928 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
929 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
930 1, 1, 5, 1);
931 if (ret)
932 return ret;
2eab17ab 933
47330077
DLR
934 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
935
936 return 0;
937}
938
939/**
940 * t3_check_tpsram_version - read the tp sram version
941 * @adapter: the adapter
47330077
DLR
942 *
943 * Reads the protocol sram version from flash.
944 */
8207befa 945int t3_check_tpsram_version(struct adapter *adapter)
47330077
DLR
946{
947 int ret;
948 u32 vers;
949 unsigned int major, minor;
950
951 if (adapter->params.rev == T3_REV_A)
952 return 0;
953
47330077
DLR
954
955 ret = t3_get_tp_version(adapter, &vers);
956 if (ret)
957 return ret;
480fe1a3
DLR
958
959 major = G_TP_VERSION_MAJOR(vers);
960 minor = G_TP_VERSION_MINOR(vers);
961
2eab17ab 962 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
480fe1a3 963 return 0;
47330077 964 else {
47330077
DLR
965 CH_ERR(adapter, "found wrong TP version (%u.%u), "
966 "driver compiled for version %d.%d\n", major, minor,
967 TP_VERSION_MAJOR, TP_VERSION_MINOR);
968 }
480fe1a3
DLR
969 return -EINVAL;
970}
971
972/**
2eab17ab 973 * t3_check_tpsram - check if provided protocol SRAM
480fe1a3
DLR
974 * is compatible with this driver
975 * @adapter: the adapter
976 * @tp_sram: the firmware image to write
977 * @size: image size
978 *
979 * Checks if an adapter's tp sram is compatible with the driver.
980 * Returns 0 if the versions are compatible, a negative error otherwise.
981 */
2c733a16
DW
982int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
983 unsigned int size)
480fe1a3
DLR
984{
985 u32 csum;
986 unsigned int i;
05e5c116 987 const __be32 *p = (const __be32 *)tp_sram;
480fe1a3
DLR
988
989 /* Verify checksum */
990 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
991 csum += ntohl(p[i]);
992 if (csum != 0xffffffff) {
993 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
994 csum);
995 return -EINVAL;
996 }
997
998 return 0;
999}
1000
4aac3899
DLR
1001enum fw_version_type {
1002 FW_VERSION_N3,
1003 FW_VERSION_T3
1004};
1005
4d22de3e
DLR
1006/**
1007 * t3_get_fw_version - read the firmware version
1008 * @adapter: the adapter
1009 * @vers: where to place the version
1010 *
1011 * Reads the FW version from flash.
1012 */
1013int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1014{
1015 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1016}
1017
1018/**
1019 * t3_check_fw_version - check if the FW is compatible with this driver
1020 * @adapter: the adapter
8207befa 1021 *
4d22de3e
DLR
1022 * Checks if an adapter's FW is compatible with the driver. Returns 0
1023 * if the versions are compatible, a negative error otherwise.
1024 */
8207befa 1025int t3_check_fw_version(struct adapter *adapter)
4d22de3e
DLR
1026{
1027 int ret;
1028 u32 vers;
4aac3899 1029 unsigned int type, major, minor;
4d22de3e
DLR
1030
1031 ret = t3_get_fw_version(adapter, &vers);
1032 if (ret)
1033 return ret;
1034
4aac3899
DLR
1035 type = G_FW_VERSION_TYPE(vers);
1036 major = G_FW_VERSION_MAJOR(vers);
1037 minor = G_FW_VERSION_MINOR(vers);
4d22de3e 1038
75d8626f
DLR
1039 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1040 minor == FW_VERSION_MINOR)
4d22de3e 1041 return 0;
8207befa 1042 else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
273fa904
DLR
1043 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1044 "driver compiled for version %u.%u\n", major, minor,
1045 FW_VERSION_MAJOR, FW_VERSION_MINOR);
8207befa 1046 else {
273fa904 1047 CH_WARN(adapter, "found newer FW version(%u.%u), "
a5a3b460
DLR
1048 "driver compiled for version %u.%u\n", major, minor,
1049 FW_VERSION_MAJOR, FW_VERSION_MINOR);
273fa904 1050 return 0;
a5a3b460 1051 }
4d22de3e
DLR
1052 return -EINVAL;
1053}
1054
1055/**
1056 * t3_flash_erase_sectors - erase a range of flash sectors
1057 * @adapter: the adapter
1058 * @start: the first sector to erase
1059 * @end: the last sector to erase
1060 *
1061 * Erases the sectors in the given range.
1062 */
1063static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1064{
1065 while (start <= end) {
1066 int ret;
1067
1068 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1069 (ret = sf1_write(adapter, 4, 0,
1070 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1071 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1072 return ret;
1073 start++;
1074 }
1075 return 0;
1076}
1077
1078/*
1079 * t3_load_fw - download firmware
1080 * @adapter: the adapter
8a9fab22 1081 * @fw_data: the firmware image to write
4d22de3e
DLR
1082 * @size: image size
1083 *
1084 * Write the supplied firmware image to the card's serial flash.
1085 * The FW image has the following sections: @size - 8 bytes of code and
1086 * data, followed by 4 bytes of FW version, followed by the 32-bit
1087 * 1's complement checksum of the whole image.
1088 */
1089int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1090{
1091 u32 csum;
1092 unsigned int i;
05e5c116 1093 const __be32 *p = (const __be32 *)fw_data;
4d22de3e
DLR
1094 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1095
2e283962 1096 if ((size & 3) || size < FW_MIN_SIZE)
4d22de3e
DLR
1097 return -EINVAL;
1098 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1099 return -EFBIG;
1100
1101 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1102 csum += ntohl(p[i]);
1103 if (csum != 0xffffffff) {
1104 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1105 csum);
1106 return -EINVAL;
1107 }
1108
1109 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1110 if (ret)
1111 goto out;
1112
1113 size -= 8; /* trim off version and checksum */
1114 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1115 unsigned int chunk_size = min(size, 256U);
1116
1117 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1118 if (ret)
1119 goto out;
1120
1121 addr += chunk_size;
1122 fw_data += chunk_size;
1123 size -= chunk_size;
1124 }
1125
1126 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1127out:
1128 if (ret)
1129 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1130 return ret;
1131}
1132
1133#define CIM_CTL_BASE 0x2000
1134
1135/**
1136 * t3_cim_ctl_blk_read - read a block from CIM control region
1137 *
1138 * @adap: the adapter
1139 * @addr: the start address within the CIM control region
1140 * @n: number of words to read
1141 * @valp: where to store the result
1142 *
1143 * Reads a block of 4-byte words from the CIM control region.
1144 */
1145int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1146 unsigned int n, unsigned int *valp)
1147{
1148 int ret = 0;
1149
1150 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1151 return -EBUSY;
1152
1153 for ( ; !ret && n--; addr += 4) {
1154 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1155 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1156 0, 5, 2);
1157 if (!ret)
1158 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1159 }
1160 return ret;
1161}
1162
bf792094
DLR
1163static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1164 u32 *rx_hash_high, u32 *rx_hash_low)
1165{
1166 /* stop Rx unicast traffic */
1167 t3_mac_disable_exact_filters(mac);
1168
1169 /* stop broadcast, multicast, promiscuous mode traffic */
1170 *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1171 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1172 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1173 F_DISBCAST);
1174
1175 *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1176 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1177
1178 *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1179 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1180
1181 /* Leave time to drain max RX fifo */
1182 msleep(1);
1183}
1184
1185static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1186 u32 rx_hash_high, u32 rx_hash_low)
1187{
1188 t3_mac_enable_exact_filters(mac);
1189 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1190 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1191 rx_cfg);
1192 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1193 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1194}
4d22de3e
DLR
1195
1196/**
1197 * t3_link_changed - handle interface link changes
1198 * @adapter: the adapter
1199 * @port_id: the port index that changed link state
1200 *
1201 * Called when a port's link settings change to propagate the new values
1202 * to the associated PHY and MAC. After performing the common tasks it
1203 * invokes an OS-specific handler.
1204 */
1205void t3_link_changed(struct adapter *adapter, int port_id)
1206{
1207 int link_ok, speed, duplex, fc;
1208 struct port_info *pi = adap2pinfo(adapter, port_id);
1209 struct cphy *phy = &pi->phy;
1210 struct cmac *mac = &pi->mac;
1211 struct link_config *lc = &pi->link_config;
1212
1213 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1214
bf792094
DLR
1215 if (!lc->link_ok && link_ok) {
1216 u32 rx_cfg, rx_hash_high, rx_hash_low;
1217 u32 status;
1218
1219 t3_xgm_intr_enable(adapter, port_id);
1220 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1221 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1222 t3_mac_enable(mac, MAC_DIRECTION_RX);
1223
1224 status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1225 if (status & F_LINKFAULTCHANGE) {
1226 mac->stats.link_faults++;
3851c66c 1227 pi->link_fault = 1;
bf792094
DLR
1228 }
1229 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
bf792094
DLR
1230 }
1231
9b1e3656
DLR
1232 if (lc->requested_fc & PAUSE_AUTONEG)
1233 fc &= lc->requested_fc;
1234 else
1235 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1236
1237 if (link_ok == lc->link_ok && speed == lc->speed &&
1238 duplex == lc->duplex && fc == lc->fc)
1239 return; /* nothing changed */
1240
4d22de3e
DLR
1241 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1242 uses_xaui(adapter)) {
1243 if (link_ok)
1244 t3b_pcs_reset(mac);
1245 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1246 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1247 }
1248 lc->link_ok = link_ok;
1249 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1250 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
4d22de3e
DLR
1251
1252 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1253 /* Set MAC speed, duplex, and flow control to match PHY. */
1254 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1255 lc->fc = fc;
1256 }
1257
1258 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1259}
1260
bf792094
DLR
1261void t3_link_fault(struct adapter *adapter, int port_id)
1262{
1263 struct port_info *pi = adap2pinfo(adapter, port_id);
1264 struct cmac *mac = &pi->mac;
1265 struct cphy *phy = &pi->phy;
1266 struct link_config *lc = &pi->link_config;
1267 int link_ok, speed, duplex, fc, link_fault;
1268 u32 rx_cfg, rx_hash_high, rx_hash_low;
1269
1270 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1271
1272 if (adapter->params.rev > 0 && uses_xaui(adapter))
1273 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1274
1275 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1276 t3_mac_enable(mac, MAC_DIRECTION_RX);
1277
1278 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1279
1280 link_fault = t3_read_reg(adapter,
1281 A_XGM_INT_STATUS + mac->offset);
1282 link_fault &= F_LINKFAULTCHANGE;
1283
1284 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1285
1286 if (link_fault) {
1287 lc->link_ok = 0;
1288 lc->speed = SPEED_INVALID;
1289 lc->duplex = DUPLEX_INVALID;
1290
1291 t3_os_link_fault(adapter, port_id, 0);
1292
1293 /* Account link faults only when the phy reports a link up */
1294 if (link_ok)
1295 mac->stats.link_faults++;
bf792094
DLR
1296 } else {
1297 if (link_ok)
1298 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1299 F_TXACTENABLE | F_RXEN);
1300
1301 pi->link_fault = 0;
1302 lc->link_ok = (unsigned char)link_ok;
1303 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1304 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1305 t3_os_link_fault(adapter, port_id, link_ok);
1306 }
1307}
1308
4d22de3e
DLR
1309/**
1310 * t3_link_start - apply link configuration to MAC/PHY
1311 * @phy: the PHY to setup
1312 * @mac: the MAC to setup
1313 * @lc: the requested link configuration
1314 *
1315 * Set up a port's MAC and PHY according to a desired link configuration.
1316 * - If the PHY can auto-negotiate first decide what to advertise, then
1317 * enable/disable auto-negotiation as desired, and reset.
1318 * - If the PHY does not auto-negotiate just reset it.
1319 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1320 * otherwise do it later based on the outcome of auto-negotiation.
1321 */
1322int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1323{
1324 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1325
1326 lc->link_ok = 0;
1327 if (lc->supported & SUPPORTED_Autoneg) {
1328 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1329 if (fc) {
1330 lc->advertising |= ADVERTISED_Asym_Pause;
1331 if (fc & PAUSE_RX)
1332 lc->advertising |= ADVERTISED_Pause;
1333 }
1334 phy->ops->advertise(phy, lc->advertising);
1335
1336 if (lc->autoneg == AUTONEG_DISABLE) {
1337 lc->speed = lc->requested_speed;
1338 lc->duplex = lc->requested_duplex;
1339 lc->fc = (unsigned char)fc;
1340 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1341 fc);
1342 /* Also disables autoneg */
1343 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
4d22de3e
DLR
1344 } else
1345 phy->ops->autoneg_enable(phy);
1346 } else {
1347 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1348 lc->fc = (unsigned char)fc;
1349 phy->ops->reset(phy, 0);
1350 }
1351 return 0;
1352}
1353
1354/**
1355 * t3_set_vlan_accel - control HW VLAN extraction
1356 * @adapter: the adapter
1357 * @ports: bitmap of adapter ports to operate on
1358 * @on: enable (1) or disable (0) HW VLAN extraction
1359 *
1360 * Enables or disables HW extraction of VLAN tags for the given port.
1361 */
1362void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1363{
1364 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1365 ports << S_VLANEXTRACTIONENABLE,
1366 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1367}
1368
1369struct intr_info {
1370 unsigned int mask; /* bits to check in interrupt status */
1371 const char *msg; /* message to print or NULL */
1372 short stat_idx; /* stat counter to increment or -1 */
20d3fc11 1373 unsigned short fatal; /* whether the condition reported is fatal */
4d22de3e
DLR
1374};
1375
1376/**
1377 * t3_handle_intr_status - table driven interrupt handler
1378 * @adapter: the adapter that generated the interrupt
1379 * @reg: the interrupt status register to process
1380 * @mask: a mask to apply to the interrupt status
1381 * @acts: table of interrupt actions
1382 * @stats: statistics counters tracking interrupt occurences
1383 *
1384 * A table driven interrupt handler that applies a set of masks to an
1385 * interrupt status word and performs the corresponding actions if the
1386 * interrupts described by the mask have occured. The actions include
1387 * optionally printing a warning or alert message, and optionally
1388 * incrementing a stat counter. The table is terminated by an entry
1389 * specifying mask 0. Returns the number of fatal interrupt conditions.
1390 */
1391static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1392 unsigned int mask,
1393 const struct intr_info *acts,
1394 unsigned long *stats)
1395{
1396 int fatal = 0;
1397 unsigned int status = t3_read_reg(adapter, reg) & mask;
1398
1399 for (; acts->mask; ++acts) {
1400 if (!(status & acts->mask))
1401 continue;
1402 if (acts->fatal) {
1403 fatal++;
1404 CH_ALERT(adapter, "%s (0x%x)\n",
1405 acts->msg, status & acts->mask);
1406 } else if (acts->msg)
1407 CH_WARN(adapter, "%s (0x%x)\n",
1408 acts->msg, status & acts->mask);
1409 if (acts->stat_idx >= 0)
1410 stats[acts->stat_idx]++;
1411 }
1412 if (status) /* clear processed interrupts */
1413 t3_write_reg(adapter, reg, status);
1414 return fatal;
1415}
1416
b881955b
DLR
1417#define SGE_INTR_MASK (F_RSPQDISABLED | \
1418 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1419 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1420 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1421 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1422 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1423 F_HIRCQPARITYERROR)
4d22de3e
DLR
1424#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1425 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1426 F_NFASRCHFAIL)
1427#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1428#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1429 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
fc882196 1430 F_TXFIFO_UNDERRUN)
4d22de3e
DLR
1431#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1432 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1433 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1434 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1435 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1436 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1437#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1438 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1439 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
b881955b
DLR
1440 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1441 F_TXPARERR | V_BISTERR(M_BISTERR))
1442#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1443 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1444 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1445#define ULPTX_INTR_MASK 0xfc
1446#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
4d22de3e
DLR
1447 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1448 F_ZERO_SWITCH_ERROR)
1449#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1450 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1451 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
b881955b
DLR
1452 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1453 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1454 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1455 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1456 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
4d22de3e
DLR
1457#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1458 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1459 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1460#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1461 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1462 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1463#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1464 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1465 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1466 V_MCAPARERRENB(M_MCAPARERRENB))
bf792094 1467#define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
4d22de3e
DLR
1468#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1469 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1470 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1471 F_MPS0 | F_CPL_SWITCH)
4d22de3e
DLR
1472/*
1473 * Interrupt handler for the PCIX1 module.
1474 */
1475static void pci_intr_handler(struct adapter *adapter)
1476{
1477 static const struct intr_info pcix1_intr_info[] = {
4d22de3e
DLR
1478 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1479 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1480 {F_RCVTARABT, "PCI received target abort", -1, 1},
1481 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1482 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1483 {F_DETPARERR, "PCI detected parity error", -1, 1},
1484 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1485 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1486 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1487 1},
1488 {F_DETCORECCERR, "PCI correctable ECC error",
1489 STAT_PCI_CORR_ECC, 0},
1490 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1491 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1492 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1493 1},
1494 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1495 1},
1496 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1497 1},
1498 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1499 "error", -1, 1},
1500 {0}
1501 };
1502
1503 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1504 pcix1_intr_info, adapter->irq_stats))
1505 t3_fatal_err(adapter);
1506}
1507
1508/*
1509 * Interrupt handler for the PCIE module.
1510 */
1511static void pcie_intr_handler(struct adapter *adapter)
1512{
1513 static const struct intr_info pcie_intr_info[] = {
b5a44bcb 1514 {F_PEXERR, "PCI PEX error", -1, 1},
4d22de3e
DLR
1515 {F_UNXSPLCPLERRR,
1516 "PCI unexpected split completion DMA read error", -1, 1},
1517 {F_UNXSPLCPLERRC,
1518 "PCI unexpected split completion DMA command error", -1, 1},
1519 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1520 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1521 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1522 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1523 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1524 "PCI MSI-X table/PBA parity error", -1, 1},
b881955b
DLR
1525 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1526 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1527 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1528 {F_TXPARERR, "PCI Tx parity error", -1, 1},
4d22de3e
DLR
1529 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1530 {0}
1531 };
1532
3eea3337
DLR
1533 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1534 CH_ALERT(adapter, "PEX error code 0x%x\n",
1535 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1536
4d22de3e
DLR
1537 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1538 pcie_intr_info, adapter->irq_stats))
1539 t3_fatal_err(adapter);
1540}
1541
1542/*
1543 * TP interrupt handler.
1544 */
1545static void tp_intr_handler(struct adapter *adapter)
1546{
1547 static const struct intr_info tp_intr_info[] = {
1548 {0xffffff, "TP parity error", -1, 1},
1549 {0x1000000, "TP out of Rx pages", -1, 1},
1550 {0x2000000, "TP out of Tx pages", -1, 1},
1551 {0}
1552 };
1553
a2604be5 1554 static struct intr_info tp_intr_info_t3c[] = {
b881955b
DLR
1555 {0x1fffffff, "TP parity error", -1, 1},
1556 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1557 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1558 {0}
a2604be5
DLR
1559 };
1560
4d22de3e 1561 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
a2604be5 1562 adapter->params.rev < T3_REV_C ?
b881955b 1563 tp_intr_info : tp_intr_info_t3c, NULL))
4d22de3e
DLR
1564 t3_fatal_err(adapter);
1565}
1566
1567/*
1568 * CIM interrupt handler.
1569 */
1570static void cim_intr_handler(struct adapter *adapter)
1571{
1572 static const struct intr_info cim_intr_info[] = {
1573 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1574 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1575 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1576 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1577 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1578 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1579 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1580 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1581 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1582 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1583 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1584 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
b881955b
DLR
1585 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1586 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1587 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1588 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1589 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1590 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1591 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1592 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1593 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1594 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1595 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1596 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
4d22de3e
DLR
1597 {0}
1598 };
1599
1600 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1601 cim_intr_info, NULL))
1602 t3_fatal_err(adapter);
1603}
1604
1605/*
1606 * ULP RX interrupt handler.
1607 */
1608static void ulprx_intr_handler(struct adapter *adapter)
1609{
1610 static const struct intr_info ulprx_intr_info[] = {
b881955b
DLR
1611 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1612 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1613 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1614 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1615 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1616 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1617 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1618 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
4d22de3e
DLR
1619 {0}
1620 };
1621
1622 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1623 ulprx_intr_info, NULL))
1624 t3_fatal_err(adapter);
1625}
1626
1627/*
1628 * ULP TX interrupt handler.
1629 */
1630static void ulptx_intr_handler(struct adapter *adapter)
1631{
1632 static const struct intr_info ulptx_intr_info[] = {
1633 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1634 STAT_ULP_CH0_PBL_OOB, 0},
1635 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1636 STAT_ULP_CH1_PBL_OOB, 0},
b881955b 1637 {0xfc, "ULP TX parity error", -1, 1},
4d22de3e
DLR
1638 {0}
1639 };
1640
1641 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1642 ulptx_intr_info, adapter->irq_stats))
1643 t3_fatal_err(adapter);
1644}
1645
1646#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1647 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1648 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1649 F_ICSPI1_TX_FRAMING_ERROR)
1650#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1651 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1652 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1653 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1654
1655/*
1656 * PM TX interrupt handler.
1657 */
1658static void pmtx_intr_handler(struct adapter *adapter)
1659{
1660 static const struct intr_info pmtx_intr_info[] = {
1661 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1662 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1663 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1664 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1665 "PMTX ispi parity error", -1, 1},
1666 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1667 "PMTX ospi parity error", -1, 1},
1668 {0}
1669 };
1670
1671 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1672 pmtx_intr_info, NULL))
1673 t3_fatal_err(adapter);
1674}
1675
1676#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1677 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1678 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1679 F_IESPI1_TX_FRAMING_ERROR)
1680#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1681 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1682 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1683 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1684
1685/*
1686 * PM RX interrupt handler.
1687 */
1688static void pmrx_intr_handler(struct adapter *adapter)
1689{
1690 static const struct intr_info pmrx_intr_info[] = {
1691 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1692 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1693 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1694 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1695 "PMRX ispi parity error", -1, 1},
1696 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1697 "PMRX ospi parity error", -1, 1},
1698 {0}
1699 };
1700
1701 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1702 pmrx_intr_info, NULL))
1703 t3_fatal_err(adapter);
1704}
1705
1706/*
1707 * CPL switch interrupt handler.
1708 */
1709static void cplsw_intr_handler(struct adapter *adapter)
1710{
1711 static const struct intr_info cplsw_intr_info[] = {
b881955b
DLR
1712 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1713 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
4d22de3e
DLR
1714 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1715 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1716 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1717 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1718 {0}
1719 };
1720
1721 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1722 cplsw_intr_info, NULL))
1723 t3_fatal_err(adapter);
1724}
1725
1726/*
1727 * MPS interrupt handler.
1728 */
1729static void mps_intr_handler(struct adapter *adapter)
1730{
1731 static const struct intr_info mps_intr_info[] = {
1732 {0x1ff, "MPS parity error", -1, 1},
1733 {0}
1734 };
1735
1736 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1737 mps_intr_info, NULL))
1738 t3_fatal_err(adapter);
1739}
1740
1741#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1742
1743/*
1744 * MC7 interrupt handler.
1745 */
1746static void mc7_intr_handler(struct mc7 *mc7)
1747{
1748 struct adapter *adapter = mc7->adapter;
1749 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1750
1751 if (cause & F_CE) {
1752 mc7->stats.corr_err++;
1753 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1754 "data 0x%x 0x%x 0x%x\n", mc7->name,
1755 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1756 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1757 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1758 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1759 }
1760
1761 if (cause & F_UE) {
1762 mc7->stats.uncorr_err++;
1763 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1764 "data 0x%x 0x%x 0x%x\n", mc7->name,
1765 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1766 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1767 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1768 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1769 }
1770
1771 if (G_PE(cause)) {
1772 mc7->stats.parity_err++;
1773 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1774 mc7->name, G_PE(cause));
1775 }
1776
1777 if (cause & F_AE) {
1778 u32 addr = 0;
1779
1780 if (adapter->params.rev > 0)
1781 addr = t3_read_reg(adapter,
1782 mc7->offset + A_MC7_ERR_ADDR);
1783 mc7->stats.addr_err++;
1784 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1785 mc7->name, addr);
1786 }
1787
1788 if (cause & MC7_INTR_FATAL)
1789 t3_fatal_err(adapter);
1790
1791 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1792}
1793
1794#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1795 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1796/*
1797 * XGMAC interrupt handler.
1798 */
1799static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1800{
1801 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
fc882196
DLR
1802 /*
1803 * We mask out interrupt causes for which we're not taking interrupts.
1804 * This allows us to use polling logic to monitor some of the other
1805 * conditions when taking interrupts would impose too much load on the
1806 * system.
1807 */
1808 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
1809 ~F_RXFIFO_OVERFLOW;
4d22de3e
DLR
1810
1811 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1812 mac->stats.tx_fifo_parity_err++;
1813 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1814 }
1815 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1816 mac->stats.rx_fifo_parity_err++;
1817 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1818 }
1819 if (cause & F_TXFIFO_UNDERRUN)
1820 mac->stats.tx_fifo_urun++;
1821 if (cause & F_RXFIFO_OVERFLOW)
1822 mac->stats.rx_fifo_ovfl++;
1823 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1824 mac->stats.serdes_signal_loss++;
1825 if (cause & F_XAUIPCSCTCERR)
1826 mac->stats.xaui_pcs_ctc_err++;
1827 if (cause & F_XAUIPCSALIGNCHANGE)
1828 mac->stats.xaui_pcs_align_change++;
bf792094
DLR
1829 if (cause & F_XGM_INT) {
1830 t3_set_reg_field(adap,
1831 A_XGM_INT_ENABLE + mac->offset,
1832 F_XGM_INT, 0);
1833 mac->stats.link_faults++;
1834
1835 t3_os_link_fault_handler(adap, idx);
1836 }
4d22de3e
DLR
1837
1838 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
bf792094 1839
4d22de3e
DLR
1840 if (cause & XGM_INTR_FATAL)
1841 t3_fatal_err(adap);
bf792094 1842
4d22de3e
DLR
1843 return cause != 0;
1844}
1845
1846/*
1847 * Interrupt handler for PHY events.
1848 */
1849int t3_phy_intr_handler(struct adapter *adapter)
1850{
4d22de3e
DLR
1851 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1852
1853 for_each_port(adapter, i) {
1ca03cbc
DLR
1854 struct port_info *p = adap2pinfo(adapter, i);
1855
04497982 1856 if (!(p->phy.caps & SUPPORTED_IRQ))
1ca03cbc
DLR
1857 continue;
1858
f231e0a5 1859 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1ca03cbc 1860 int phy_cause = p->phy.ops->intr_handler(&p->phy);
4d22de3e
DLR
1861
1862 if (phy_cause & cphy_cause_link_change)
1863 t3_link_changed(adapter, i);
1864 if (phy_cause & cphy_cause_fifo_error)
1ca03cbc 1865 p->phy.fifo_errors++;
1e882025
DLR
1866 if (phy_cause & cphy_cause_module_change)
1867 t3_os_phymod_changed(adapter, i);
4d22de3e
DLR
1868 }
1869 }
1870
1871 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1872 return 0;
1873}
1874
1875/*
1876 * T3 slow path (non-data) interrupt handler.
1877 */
1878int t3_slow_intr_handler(struct adapter *adapter)
1879{
1880 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1881
1882 cause &= adapter->slow_intr_mask;
1883 if (!cause)
1884 return 0;
1885 if (cause & F_PCIM0) {
1886 if (is_pcie(adapter))
1887 pcie_intr_handler(adapter);
1888 else
1889 pci_intr_handler(adapter);
1890 }
1891 if (cause & F_SGE3)
1892 t3_sge_err_intr_handler(adapter);
1893 if (cause & F_MC7_PMRX)
1894 mc7_intr_handler(&adapter->pmrx);
1895 if (cause & F_MC7_PMTX)
1896 mc7_intr_handler(&adapter->pmtx);
1897 if (cause & F_MC7_CM)
1898 mc7_intr_handler(&adapter->cm);
1899 if (cause & F_CIM)
1900 cim_intr_handler(adapter);
1901 if (cause & F_TP1)
1902 tp_intr_handler(adapter);
1903 if (cause & F_ULP2_RX)
1904 ulprx_intr_handler(adapter);
1905 if (cause & F_ULP2_TX)
1906 ulptx_intr_handler(adapter);
1907 if (cause & F_PM1_RX)
1908 pmrx_intr_handler(adapter);
1909 if (cause & F_PM1_TX)
1910 pmtx_intr_handler(adapter);
1911 if (cause & F_CPL_SWITCH)
1912 cplsw_intr_handler(adapter);
1913 if (cause & F_MPS0)
1914 mps_intr_handler(adapter);
1915 if (cause & F_MC5A)
1916 t3_mc5_intr_handler(&adapter->mc5);
1917 if (cause & F_XGMAC0_0)
1918 mac_intr_handler(adapter, 0);
1919 if (cause & F_XGMAC0_1)
1920 mac_intr_handler(adapter, 1);
1921 if (cause & F_T3DBG)
1922 t3_os_ext_intr_handler(adapter);
1923
1924 /* Clear the interrupts just processed. */
1925 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1926 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1927 return 1;
1928}
1929
f231e0a5
DLR
1930static unsigned int calc_gpio_intr(struct adapter *adap)
1931{
1932 unsigned int i, gpi_intr = 0;
1933
1934 for_each_port(adap, i)
1935 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1936 adapter_info(adap)->gpio_intr[i])
1937 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1938 return gpi_intr;
1939}
1940
4d22de3e
DLR
1941/**
1942 * t3_intr_enable - enable interrupts
1943 * @adapter: the adapter whose interrupts should be enabled
1944 *
1945 * Enable interrupts by setting the interrupt enable registers of the
1946 * various HW modules and then enabling the top-level interrupt
1947 * concentrator.
1948 */
1949void t3_intr_enable(struct adapter *adapter)
1950{
1951 static const struct addr_val_pair intr_en_avp[] = {
1952 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1953 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1954 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1955 MC7_INTR_MASK},
1956 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1957 MC7_INTR_MASK},
1958 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1959 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
4d22de3e
DLR
1960 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1961 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1962 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1963 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1964 };
1965
1966 adapter->slow_intr_mask = PL_INTR_MASK;
1967
1968 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
a2604be5
DLR
1969 t3_write_reg(adapter, A_TP_INT_ENABLE,
1970 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
4d22de3e
DLR
1971
1972 if (adapter->params.rev > 0) {
1973 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1974 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1975 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1976 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1977 F_PBL_BOUND_ERR_CH1);
1978 } else {
1979 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1980 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1981 }
1982
f231e0a5
DLR
1983 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1984
4d22de3e
DLR
1985 if (is_pcie(adapter))
1986 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1987 else
1988 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1989 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1990 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1991}
1992
1993/**
1994 * t3_intr_disable - disable a card's interrupts
1995 * @adapter: the adapter whose interrupts should be disabled
1996 *
1997 * Disable interrupts. We only disable the top-level interrupt
1998 * concentrator and the SGE data interrupts.
1999 */
2000void t3_intr_disable(struct adapter *adapter)
2001{
2002 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2003 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
2004 adapter->slow_intr_mask = 0;
2005}
2006
2007/**
2008 * t3_intr_clear - clear all interrupts
2009 * @adapter: the adapter whose interrupts should be cleared
2010 *
2011 * Clears all interrupts.
2012 */
2013void t3_intr_clear(struct adapter *adapter)
2014{
2015 static const unsigned int cause_reg_addr[] = {
2016 A_SG_INT_CAUSE,
2017 A_SG_RSPQ_FL_STATUS,
2018 A_PCIX_INT_CAUSE,
2019 A_MC7_INT_CAUSE,
2020 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2021 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2022 A_CIM_HOST_INT_CAUSE,
2023 A_TP_INT_CAUSE,
2024 A_MC5_DB_INT_CAUSE,
2025 A_ULPRX_INT_CAUSE,
2026 A_ULPTX_INT_CAUSE,
2027 A_CPL_INTR_CAUSE,
2028 A_PM1_TX_INT_CAUSE,
2029 A_PM1_RX_INT_CAUSE,
2030 A_MPS_INT_CAUSE,
2031 A_T3DBG_INT_CAUSE,
2032 };
2033 unsigned int i;
2034
2035 /* Clear PHY and MAC interrupts for each port. */
2036 for_each_port(adapter, i)
2037 t3_port_intr_clear(adapter, i);
2038
2039 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2040 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2041
3eea3337
DLR
2042 if (is_pcie(adapter))
2043 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
4d22de3e
DLR
2044 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2045 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2046}
2047
bf792094
DLR
2048void t3_xgm_intr_enable(struct adapter *adapter, int idx)
2049{
2050 struct port_info *pi = adap2pinfo(adapter, idx);
2051
2052 t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2053 XGM_EXTRA_INTR_MASK);
2054}
2055
2056void t3_xgm_intr_disable(struct adapter *adapter, int idx)
2057{
2058 struct port_info *pi = adap2pinfo(adapter, idx);
2059
2060 t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2061 0x7ff);
2062}
2063
4d22de3e
DLR
2064/**
2065 * t3_port_intr_enable - enable port-specific interrupts
2066 * @adapter: associated adapter
2067 * @idx: index of port whose interrupts should be enabled
2068 *
2069 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
2070 * adapter port.
2071 */
2072void t3_port_intr_enable(struct adapter *adapter, int idx)
2073{
2074 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2075
2076 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
2077 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2078 phy->ops->intr_enable(phy);
2079}
2080
2081/**
2082 * t3_port_intr_disable - disable port-specific interrupts
2083 * @adapter: associated adapter
2084 * @idx: index of port whose interrupts should be disabled
2085 *
2086 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
2087 * adapter port.
2088 */
2089void t3_port_intr_disable(struct adapter *adapter, int idx)
2090{
2091 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2092
2093 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
2094 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2095 phy->ops->intr_disable(phy);
2096}
2097
2098/**
2099 * t3_port_intr_clear - clear port-specific interrupts
2100 * @adapter: associated adapter
2101 * @idx: index of port whose interrupts to clear
2102 *
2103 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
2104 * adapter port.
2105 */
2106void t3_port_intr_clear(struct adapter *adapter, int idx)
2107{
2108 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2109
2110 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
2111 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
2112 phy->ops->intr_clear(phy);
2113}
2114
bb9366af
DLR
2115#define SG_CONTEXT_CMD_ATTEMPTS 100
2116
4d22de3e
DLR
2117/**
2118 * t3_sge_write_context - write an SGE context
2119 * @adapter: the adapter
2120 * @id: the context id
2121 * @type: the context type
2122 *
2123 * Program an SGE context with the values already loaded in the
2124 * CONTEXT_DATA? registers.
2125 */
2126static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2127 unsigned int type)
2128{
3fa58c88
DLR
2129 if (type == F_RESPONSEQ) {
2130 /*
2131 * Can't write the Response Queue Context bits for
2132 * Interrupt Armed or the Reserve bits after the chip
2133 * has been initialized out of reset. Writing to these
2134 * bits can confuse the hardware.
2135 */
2136 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2137 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2138 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2139 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2140 } else {
2141 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2142 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2143 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2144 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2145 }
4d22de3e
DLR
2146 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2147 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2148 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2149 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2150}
2151
3fa58c88
DLR
2152/**
2153 * clear_sge_ctxt - completely clear an SGE context
2154 * @adapter: the adapter
2155 * @id: the context id
2156 * @type: the context type
2157 *
2158 * Completely clear an SGE context. Used predominantly at post-reset
2159 * initialization. Note in particular that we don't skip writing to any
2160 * "sensitive bits" in the contexts the way that t3_sge_write_context()
2161 * does ...
2162 */
b881955b
DLR
2163static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2164 unsigned int type)
2165{
2166 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2167 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2168 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2169 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
3fa58c88
DLR
2170 t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2171 t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2172 t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2173 t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2174 t3_write_reg(adap, A_SG_CONTEXT_CMD,
2175 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2176 return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2177 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
b881955b
DLR
2178}
2179
4d22de3e
DLR
2180/**
2181 * t3_sge_init_ecntxt - initialize an SGE egress context
2182 * @adapter: the adapter to configure
2183 * @id: the context id
2184 * @gts_enable: whether to enable GTS for the context
2185 * @type: the egress context type
2186 * @respq: associated response queue
2187 * @base_addr: base address of queue
2188 * @size: number of queue entries
2189 * @token: uP token
2190 * @gen: initial generation value for the context
2191 * @cidx: consumer pointer
2192 *
2193 * Initialize an SGE egress context and make it ready for use. If the
2194 * platform allows concurrent context operations, the caller is
2195 * responsible for appropriate locking.
2196 */
2197int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2198 enum sge_context_type type, int respq, u64 base_addr,
2199 unsigned int size, unsigned int token, int gen,
2200 unsigned int cidx)
2201{
2202 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2203
2204 if (base_addr & 0xfff) /* must be 4K aligned */
2205 return -EINVAL;
2206 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2207 return -EBUSY;
2208
2209 base_addr >>= 12;
2210 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2211 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2212 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2213 V_EC_BASE_LO(base_addr & 0xffff));
2214 base_addr >>= 16;
2215 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2216 base_addr >>= 32;
2217 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2218 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2219 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2220 F_EC_VALID);
2221 return t3_sge_write_context(adapter, id, F_EGRESS);
2222}
2223
2224/**
2225 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2226 * @adapter: the adapter to configure
2227 * @id: the context id
2228 * @gts_enable: whether to enable GTS for the context
2229 * @base_addr: base address of queue
2230 * @size: number of queue entries
2231 * @bsize: size of each buffer for this queue
2232 * @cong_thres: threshold to signal congestion to upstream producers
2233 * @gen: initial generation value for the context
2234 * @cidx: consumer pointer
2235 *
2236 * Initialize an SGE free list context and make it ready for use. The
2237 * caller is responsible for ensuring only one context operation occurs
2238 * at a time.
2239 */
2240int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2241 int gts_enable, u64 base_addr, unsigned int size,
2242 unsigned int bsize, unsigned int cong_thres, int gen,
2243 unsigned int cidx)
2244{
2245 if (base_addr & 0xfff) /* must be 4K aligned */
2246 return -EINVAL;
2247 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2248 return -EBUSY;
2249
2250 base_addr >>= 12;
2251 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2252 base_addr >>= 32;
2253 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2254 V_FL_BASE_HI((u32) base_addr) |
2255 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2256 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2257 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2258 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2259 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2260 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2261 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2262 return t3_sge_write_context(adapter, id, F_FREELIST);
2263}
2264
2265/**
2266 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2267 * @adapter: the adapter to configure
2268 * @id: the context id
2269 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2270 * @base_addr: base address of queue
2271 * @size: number of queue entries
2272 * @fl_thres: threshold for selecting the normal or jumbo free list
2273 * @gen: initial generation value for the context
2274 * @cidx: consumer pointer
2275 *
2276 * Initialize an SGE response queue context and make it ready for use.
2277 * The caller is responsible for ensuring only one context operation
2278 * occurs at a time.
2279 */
2280int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2281 int irq_vec_idx, u64 base_addr, unsigned int size,
2282 unsigned int fl_thres, int gen, unsigned int cidx)
2283{
2284 unsigned int intr = 0;
2285
2286 if (base_addr & 0xfff) /* must be 4K aligned */
2287 return -EINVAL;
2288 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2289 return -EBUSY;
2290
2291 base_addr >>= 12;
2292 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2293 V_CQ_INDEX(cidx));
2294 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2295 base_addr >>= 32;
2296 if (irq_vec_idx >= 0)
2297 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2298 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2299 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2300 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2301 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2302}
2303
2304/**
2305 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2306 * @adapter: the adapter to configure
2307 * @id: the context id
2308 * @base_addr: base address of queue
2309 * @size: number of queue entries
2310 * @rspq: response queue for async notifications
2311 * @ovfl_mode: CQ overflow mode
2312 * @credits: completion queue credits
2313 * @credit_thres: the credit threshold
2314 *
2315 * Initialize an SGE completion queue context and make it ready for use.
2316 * The caller is responsible for ensuring only one context operation
2317 * occurs at a time.
2318 */
2319int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2320 unsigned int size, int rspq, int ovfl_mode,
2321 unsigned int credits, unsigned int credit_thres)
2322{
2323 if (base_addr & 0xfff) /* must be 4K aligned */
2324 return -EINVAL;
2325 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2326 return -EBUSY;
2327
2328 base_addr >>= 12;
2329 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2330 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2331 base_addr >>= 32;
2332 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2333 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
1c17ae8a
DLR
2334 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2335 V_CQ_ERR(ovfl_mode));
4d22de3e
DLR
2336 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2337 V_CQ_CREDIT_THRES(credit_thres));
2338 return t3_sge_write_context(adapter, id, F_CQ);
2339}
2340
2341/**
2342 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2343 * @adapter: the adapter
2344 * @id: the egress context id
2345 * @enable: enable (1) or disable (0) the context
2346 *
2347 * Enable or disable an SGE egress context. The caller is responsible for
2348 * ensuring only one context operation occurs at a time.
2349 */
2350int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2351{
2352 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2353 return -EBUSY;
2354
2355 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2356 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2357 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2358 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2359 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2360 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2361 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2362 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2363 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2364}
2365
2366/**
2367 * t3_sge_disable_fl - disable an SGE free-buffer list
2368 * @adapter: the adapter
2369 * @id: the free list context id
2370 *
2371 * Disable an SGE free-buffer list. The caller is responsible for
2372 * ensuring only one context operation occurs at a time.
2373 */
2374int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2375{
2376 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2377 return -EBUSY;
2378
2379 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2380 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2381 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2382 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2383 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2384 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2385 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2386 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2387 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2388}
2389
2390/**
2391 * t3_sge_disable_rspcntxt - disable an SGE response queue
2392 * @adapter: the adapter
2393 * @id: the response queue context id
2394 *
2395 * Disable an SGE response queue. The caller is responsible for
2396 * ensuring only one context operation occurs at a time.
2397 */
2398int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2399{
2400 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2401 return -EBUSY;
2402
2403 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2404 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2405 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2406 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2407 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2408 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2409 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2410 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2411 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2412}
2413
2414/**
2415 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2416 * @adapter: the adapter
2417 * @id: the completion queue context id
2418 *
2419 * Disable an SGE completion queue. The caller is responsible for
2420 * ensuring only one context operation occurs at a time.
2421 */
2422int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2423{
2424 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2425 return -EBUSY;
2426
2427 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2428 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2429 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2430 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2431 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2432 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2433 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2434 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2435 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2436}
2437
2438/**
2439 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2440 * @adapter: the adapter
2441 * @id: the context id
2442 * @op: the operation to perform
2443 *
2444 * Perform the selected operation on an SGE completion queue context.
2445 * The caller is responsible for ensuring only one context operation
2446 * occurs at a time.
2447 */
2448int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2449 unsigned int credits)
2450{
2451 u32 val;
2452
2453 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2454 return -EBUSY;
2455
2456 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2457 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2458 V_CONTEXT(id) | F_CQ);
2459 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2460 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
4d22de3e
DLR
2461 return -EIO;
2462
2463 if (op >= 2 && op < 7) {
2464 if (adapter->params.rev > 0)
2465 return G_CQ_INDEX(val);
2466
2467 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2468 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2469 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
bb9366af
DLR
2470 F_CONTEXT_CMD_BUSY, 0,
2471 SG_CONTEXT_CMD_ATTEMPTS, 1))
4d22de3e
DLR
2472 return -EIO;
2473 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2474 }
2475 return 0;
2476}
2477
2478/**
2479 * t3_sge_read_context - read an SGE context
2480 * @type: the context type
2481 * @adapter: the adapter
2482 * @id: the context id
2483 * @data: holds the retrieved context
2484 *
2485 * Read an SGE egress context. The caller is responsible for ensuring
2486 * only one context operation occurs at a time.
2487 */
2488static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2489 unsigned int id, u32 data[4])
2490{
2491 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2492 return -EBUSY;
2493
2494 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2495 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2496 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
bb9366af 2497 SG_CONTEXT_CMD_ATTEMPTS, 1))
4d22de3e
DLR
2498 return -EIO;
2499 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2500 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2501 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2502 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2503 return 0;
2504}
2505
2506/**
2507 * t3_sge_read_ecntxt - read an SGE egress context
2508 * @adapter: the adapter
2509 * @id: the context id
2510 * @data: holds the retrieved context
2511 *
2512 * Read an SGE egress context. The caller is responsible for ensuring
2513 * only one context operation occurs at a time.
2514 */
2515int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2516{
2517 if (id >= 65536)
2518 return -EINVAL;
2519 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2520}
2521
2522/**
2523 * t3_sge_read_cq - read an SGE CQ context
2524 * @adapter: the adapter
2525 * @id: the context id
2526 * @data: holds the retrieved context
2527 *
2528 * Read an SGE CQ context. The caller is responsible for ensuring
2529 * only one context operation occurs at a time.
2530 */
2531int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2532{
2533 if (id >= 65536)
2534 return -EINVAL;
2535 return t3_sge_read_context(F_CQ, adapter, id, data);
2536}
2537
2538/**
2539 * t3_sge_read_fl - read an SGE free-list context
2540 * @adapter: the adapter
2541 * @id: the context id
2542 * @data: holds the retrieved context
2543 *
2544 * Read an SGE free-list context. The caller is responsible for ensuring
2545 * only one context operation occurs at a time.
2546 */
2547int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2548{
2549 if (id >= SGE_QSETS * 2)
2550 return -EINVAL;
2551 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2552}
2553
2554/**
2555 * t3_sge_read_rspq - read an SGE response queue context
2556 * @adapter: the adapter
2557 * @id: the context id
2558 * @data: holds the retrieved context
2559 *
2560 * Read an SGE response queue context. The caller is responsible for
2561 * ensuring only one context operation occurs at a time.
2562 */
2563int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2564{
2565 if (id >= SGE_QSETS)
2566 return -EINVAL;
2567 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2568}
2569
2570/**
2571 * t3_config_rss - configure Rx packet steering
2572 * @adapter: the adapter
2573 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2574 * @cpus: values for the CPU lookup table (0xff terminated)
2575 * @rspq: values for the response queue lookup table (0xffff terminated)
2576 *
2577 * Programs the receive packet steering logic. @cpus and @rspq provide
2578 * the values for the CPU and response queue lookup tables. If they
2579 * provide fewer values than the size of the tables the supplied values
2580 * are used repeatedly until the tables are fully populated.
2581 */
2582void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2583 const u8 * cpus, const u16 *rspq)
2584{
2585 int i, j, cpu_idx = 0, q_idx = 0;
2586
2587 if (cpus)
2588 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2589 u32 val = i << 16;
2590
2591 for (j = 0; j < 2; ++j) {
2592 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2593 if (cpus[cpu_idx] == 0xff)
2594 cpu_idx = 0;
2595 }
2596 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2597 }
2598
2599 if (rspq)
2600 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2601 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2602 (i << 16) | rspq[q_idx++]);
2603 if (rspq[q_idx] == 0xffff)
2604 q_idx = 0;
2605 }
2606
2607 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2608}
2609
2610/**
2611 * t3_read_rss - read the contents of the RSS tables
2612 * @adapter: the adapter
2613 * @lkup: holds the contents of the RSS lookup table
2614 * @map: holds the contents of the RSS map table
2615 *
2616 * Reads the contents of the receive packet steering tables.
2617 */
2618int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2619{
2620 int i;
2621 u32 val;
2622
2623 if (lkup)
2624 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2625 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2626 0xffff0000 | i);
2627 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2628 if (!(val & 0x80000000))
2629 return -EAGAIN;
2630 *lkup++ = val;
2631 *lkup++ = (val >> 8);
2632 }
2633
2634 if (map)
2635 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2636 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2637 0xffff0000 | i);
2638 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2639 if (!(val & 0x80000000))
2640 return -EAGAIN;
2641 *map++ = val;
2642 }
2643 return 0;
2644}
2645
2646/**
2647 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2648 * @adap: the adapter
2649 * @enable: 1 to select offload mode, 0 for regular NIC
2650 *
2651 * Switches TP to NIC/offload mode.
2652 */
2653void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2654{
2655 if (is_offload(adap) || !enable)
2656 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2657 V_NICMODE(!enable));
2658}
2659
2660/**
2661 * pm_num_pages - calculate the number of pages of the payload memory
2662 * @mem_size: the size of the payload memory
2663 * @pg_size: the size of each payload memory page
2664 *
2665 * Calculate the number of pages, each of the given size, that fit in a
2666 * memory of the specified size, respecting the HW requirement that the
2667 * number of pages must be a multiple of 24.
2668 */
2669static inline unsigned int pm_num_pages(unsigned int mem_size,
2670 unsigned int pg_size)
2671{
2672 unsigned int n = mem_size / pg_size;
2673
2674 return n - n % 24;
2675}
2676
2677#define mem_region(adap, start, size, reg) \
2678 t3_write_reg((adap), A_ ## reg, (start)); \
2679 start += size
2680
b881955b 2681/**
4d22de3e
DLR
2682 * partition_mem - partition memory and configure TP memory settings
2683 * @adap: the adapter
2684 * @p: the TP parameters
2685 *
2686 * Partitions context and payload memory and configures TP's memory
2687 * registers.
2688 */
2689static void partition_mem(struct adapter *adap, const struct tp_params *p)
2690{
2691 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2692 unsigned int timers = 0, timers_shift = 22;
2693
2694 if (adap->params.rev > 0) {
2695 if (tids <= 16 * 1024) {
2696 timers = 1;
2697 timers_shift = 16;
2698 } else if (tids <= 64 * 1024) {
2699 timers = 2;
2700 timers_shift = 18;
2701 } else if (tids <= 256 * 1024) {
2702 timers = 3;
2703 timers_shift = 20;
2704 }
2705 }
2706
2707 t3_write_reg(adap, A_TP_PMM_SIZE,
2708 p->chan_rx_size | (p->chan_tx_size >> 16));
2709
2710 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2711 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2712 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2713 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2714 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2715
2716 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2717 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2718 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2719
2720 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2721 /* Add a bit of headroom and make multiple of 24 */
2722 pstructs += 48;
2723 pstructs -= pstructs % 24;
2724 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2725
2726 m = tids * TCB_SIZE;
2727 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2728 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2729 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2730 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2731 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2732 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2733 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2734 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2735
2736 m = (m + 4095) & ~0xfff;
2737 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2738 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2739
2740 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2741 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2742 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2743 if (tids < m)
2744 adap->params.mc5.nservers += m - tids;
2745}
2746
2747static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2748 u32 val)
2749{
2750 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2751 t3_write_reg(adap, A_TP_PIO_DATA, val);
2752}
2753
2754static void tp_config(struct adapter *adap, const struct tp_params *p)
2755{
4d22de3e
DLR
2756 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2757 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2758 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2759 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2760 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
3fa58c88 2761 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
4d22de3e
DLR
2762 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2763 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
3fa58c88 2764 V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
4d22de3e 2765 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
b881955b 2766 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
4d22de3e
DLR
2767 F_IPV6ENABLE | F_NICMODE);
2768 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2769 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
8a9fab22
DLR
2770 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2771 adap->params.rev > 0 ? F_ENABLEESND :
2772 F_T3A_ENABLEESND);
4d22de3e 2773
3b1d307b 2774 t3_set_reg_field(adap, A_TP_PC_CONFIG,
8a9fab22
DLR
2775 F_ENABLEEPCMDAFULL,
2776 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2777 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
b881955b
DLR
2778 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2779 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2780 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
8a9fab22
DLR
2781 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2782 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2eab17ab 2783
4d22de3e
DLR
2784 if (adap->params.rev > 0) {
2785 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2786 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2787 F_TXPACEAUTO);
2788 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2789 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2790 } else
2791 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2792
a2604be5
DLR
2793 if (adap->params.rev == T3_REV_C)
2794 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2795 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2796 V_TABLELATENCYDELTA(4));
2797
8a9fab22
DLR
2798 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2799 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2800 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2801 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
4d22de3e
DLR
2802}
2803
2804/* Desired TP timer resolution in usec */
2805#define TP_TMR_RES 50
2806
2807/* TCP timer values in ms */
2808#define TP_DACK_TIMER 50
2809#define TP_RTO_MIN 250
2810
2811/**
2812 * tp_set_timers - set TP timing parameters
2813 * @adap: the adapter to set
2814 * @core_clk: the core clock frequency in Hz
2815 *
2816 * Set TP's timing parameters, such as the various timer resolutions and
2817 * the TCP timer values.
2818 */
2819static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2820{
2821 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2822 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2823 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2824 unsigned int tps = core_clk >> tre;
2825
2826 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2827 V_DELAYEDACKRESOLUTION(dack_re) |
2828 V_TIMESTAMPRESOLUTION(tstamp_re));
2829 t3_write_reg(adap, A_TP_DACK_TIMER,
2830 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2831 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2832 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2833 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2834 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2835 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2836 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2837 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2838 V_KEEPALIVEMAX(9));
2839
2840#define SECONDS * tps
2841
2842 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2843 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2844 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2845 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2846 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2847 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2848 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2849 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2850 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2851
2852#undef SECONDS
2853}
2854
2855/**
2856 * t3_tp_set_coalescing_size - set receive coalescing size
2857 * @adap: the adapter
2858 * @size: the receive coalescing size
2859 * @psh: whether a set PSH bit should deliver coalesced data
2860 *
2861 * Set the receive coalescing size and PSH bit handling.
2862 */
2863int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2864{
2865 u32 val;
2866
2867 if (size > MAX_RX_COALESCING_LEN)
2868 return -EINVAL;
2869
2870 val = t3_read_reg(adap, A_TP_PARA_REG3);
2871 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2872
2873 if (size) {
2874 val |= F_RXCOALESCEENABLE;
2875 if (psh)
2876 val |= F_RXCOALESCEPSHEN;
8a9fab22 2877 size = min(MAX_RX_COALESCING_LEN, size);
4d22de3e
DLR
2878 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2879 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2880 }
2881 t3_write_reg(adap, A_TP_PARA_REG3, val);
2882 return 0;
2883}
2884
2885/**
2886 * t3_tp_set_max_rxsize - set the max receive size
2887 * @adap: the adapter
2888 * @size: the max receive size
2889 *
2890 * Set TP's max receive size. This is the limit that applies when
2891 * receive coalescing is disabled.
2892 */
2893void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2894{
2895 t3_write_reg(adap, A_TP_PARA_REG7,
2896 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2897}
2898
7b9b0943 2899static void init_mtus(unsigned short mtus[])
4d22de3e
DLR
2900{
2901 /*
2902 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2903 * it can accomodate max size TCP/IP headers when SACK and timestamps
2904 * are enabled and still have at least 8 bytes of payload.
2905 */
75758e8a 2906 mtus[0] = 88;
8a9fab22
DLR
2907 mtus[1] = 88;
2908 mtus[2] = 256;
2909 mtus[3] = 512;
2910 mtus[4] = 576;
4d22de3e
DLR
2911 mtus[5] = 1024;
2912 mtus[6] = 1280;
2913 mtus[7] = 1492;
2914 mtus[8] = 1500;
2915 mtus[9] = 2002;
2916 mtus[10] = 2048;
2917 mtus[11] = 4096;
2918 mtus[12] = 4352;
2919 mtus[13] = 8192;
2920 mtus[14] = 9000;
2921 mtus[15] = 9600;
2922}
2923
2924/*
2925 * Initial congestion control parameters.
2926 */
7b9b0943 2927static void init_cong_ctrl(unsigned short *a, unsigned short *b)
4d22de3e
DLR
2928{
2929 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2930 a[9] = 2;
2931 a[10] = 3;
2932 a[11] = 4;
2933 a[12] = 5;
2934 a[13] = 6;
2935 a[14] = 7;
2936 a[15] = 8;
2937 a[16] = 9;
2938 a[17] = 10;
2939 a[18] = 14;
2940 a[19] = 17;
2941 a[20] = 21;
2942 a[21] = 25;
2943 a[22] = 30;
2944 a[23] = 35;
2945 a[24] = 45;
2946 a[25] = 60;
2947 a[26] = 80;
2948 a[27] = 100;
2949 a[28] = 200;
2950 a[29] = 300;
2951 a[30] = 400;
2952 a[31] = 500;
2953
2954 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2955 b[9] = b[10] = 1;
2956 b[11] = b[12] = 2;
2957 b[13] = b[14] = b[15] = b[16] = 3;
2958 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2959 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2960 b[28] = b[29] = 6;
2961 b[30] = b[31] = 7;
2962}
2963
2964/* The minimum additive increment value for the congestion control table */
2965#define CC_MIN_INCR 2U
2966
2967/**
2968 * t3_load_mtus - write the MTU and congestion control HW tables
2969 * @adap: the adapter
2970 * @mtus: the unrestricted values for the MTU table
2971 * @alphs: the values for the congestion control alpha parameter
2972 * @beta: the values for the congestion control beta parameter
2973 * @mtu_cap: the maximum permitted effective MTU
2974 *
2975 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2976 * Update the high-speed congestion control table with the supplied alpha,
2977 * beta, and MTUs.
2978 */
2979void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2980 unsigned short alpha[NCCTRL_WIN],
2981 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2982{
2983 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2984 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2985 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2986 28672, 40960, 57344, 81920, 114688, 163840, 229376
2987 };
2988
2989 unsigned int i, w;
2990
2991 for (i = 0; i < NMTUS; ++i) {
2992 unsigned int mtu = min(mtus[i], mtu_cap);
2993 unsigned int log2 = fls(mtu);
2994
2995 if (!(mtu & ((1 << log2) >> 2))) /* round */
2996 log2--;
2997 t3_write_reg(adap, A_TP_MTU_TABLE,
2998 (i << 24) | (log2 << 16) | mtu);
2999
3000 for (w = 0; w < NCCTRL_WIN; ++w) {
3001 unsigned int inc;
3002
3003 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3004 CC_MIN_INCR);
3005
3006 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3007 (w << 16) | (beta[w] << 13) | inc);
3008 }
3009 }
3010}
3011
3012/**
3013 * t3_read_hw_mtus - returns the values in the HW MTU table
3014 * @adap: the adapter
3015 * @mtus: where to store the HW MTU values
3016 *
3017 * Reads the HW MTU table.
3018 */
3019void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
3020{
3021 int i;
3022
3023 for (i = 0; i < NMTUS; ++i) {
3024 unsigned int val;
3025
3026 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
3027 val = t3_read_reg(adap, A_TP_MTU_TABLE);
3028 mtus[i] = val & 0x3fff;
3029 }
3030}
3031
3032/**
3033 * t3_get_cong_cntl_tab - reads the congestion control table
3034 * @adap: the adapter
3035 * @incr: where to store the alpha values
3036 *
3037 * Reads the additive increments programmed into the HW congestion
3038 * control table.
3039 */
3040void t3_get_cong_cntl_tab(struct adapter *adap,
3041 unsigned short incr[NMTUS][NCCTRL_WIN])
3042{
3043 unsigned int mtu, w;
3044
3045 for (mtu = 0; mtu < NMTUS; ++mtu)
3046 for (w = 0; w < NCCTRL_WIN; ++w) {
3047 t3_write_reg(adap, A_TP_CCTRL_TABLE,
3048 0xffff0000 | (mtu << 5) | w);
3049 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
3050 0x1fff;
3051 }
3052}
3053
3054/**
3055 * t3_tp_get_mib_stats - read TP's MIB counters
3056 * @adap: the adapter
3057 * @tps: holds the returned counter values
3058 *
3059 * Returns the values of TP's MIB counters.
3060 */
3061void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
3062{
3063 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
3064 sizeof(*tps) / sizeof(u32), 0);
3065}
3066
3067#define ulp_region(adap, name, start, len) \
3068 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
3069 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
3070 (start) + (len) - 1); \
3071 start += len
3072
3073#define ulptx_region(adap, name, start, len) \
3074 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
3075 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
3076 (start) + (len) - 1)
3077
3078static void ulp_config(struct adapter *adap, const struct tp_params *p)
3079{
3080 unsigned int m = p->chan_rx_size;
3081
3082 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
3083 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
3084 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
3085 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
3086 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
3087 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
3088 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
3089 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
3090}
3091
480fe1a3
DLR
3092/**
3093 * t3_set_proto_sram - set the contents of the protocol sram
3094 * @adapter: the adapter
3095 * @data: the protocol image
3096 *
3097 * Write the contents of the protocol SRAM.
3098 */
2c733a16 3099int t3_set_proto_sram(struct adapter *adap, const u8 *data)
480fe1a3
DLR
3100{
3101 int i;
2c733a16 3102 const __be32 *buf = (const __be32 *)data;
480fe1a3
DLR
3103
3104 for (i = 0; i < PROTO_SRAM_LINES; i++) {
05e5c116
AV
3105 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
3106 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
3107 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
3108 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
3109 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
2eab17ab 3110
480fe1a3
DLR
3111 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
3112 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
3113 return -EIO;
3114 }
3115 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
3116
3117 return 0;
3118}
3119
4d22de3e
DLR
3120void t3_config_trace_filter(struct adapter *adapter,
3121 const struct trace_params *tp, int filter_index,
3122 int invert, int enable)
3123{
3124 u32 addr, key[4], mask[4];
3125
3126 key[0] = tp->sport | (tp->sip << 16);
3127 key[1] = (tp->sip >> 16) | (tp->dport << 16);
3128 key[2] = tp->dip;
3129 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3130
3131 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3132 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3133 mask[2] = tp->dip_mask;
3134 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3135
3136 if (invert)
3137 key[3] |= (1 << 29);
3138 if (enable)
3139 key[3] |= (1 << 28);
3140
3141 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3142 tp_wr_indirect(adapter, addr++, key[0]);
3143 tp_wr_indirect(adapter, addr++, mask[0]);
3144 tp_wr_indirect(adapter, addr++, key[1]);
3145 tp_wr_indirect(adapter, addr++, mask[1]);
3146 tp_wr_indirect(adapter, addr++, key[2]);
3147 tp_wr_indirect(adapter, addr++, mask[2]);
3148 tp_wr_indirect(adapter, addr++, key[3]);
3149 tp_wr_indirect(adapter, addr, mask[3]);
3150 t3_read_reg(adapter, A_TP_PIO_DATA);
3151}
3152
3153/**
3154 * t3_config_sched - configure a HW traffic scheduler
3155 * @adap: the adapter
3156 * @kbps: target rate in Kbps
3157 * @sched: the scheduler index
3158 *
3159 * Configure a HW scheduler for the target rate
3160 */
3161int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
3162{
3163 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3164 unsigned int clk = adap->params.vpd.cclk * 1000;
3165 unsigned int selected_cpt = 0, selected_bpt = 0;
3166
3167 if (kbps > 0) {
3168 kbps *= 125; /* -> bytes */
3169 for (cpt = 1; cpt <= 255; cpt++) {
3170 tps = clk / cpt;
3171 bpt = (kbps + tps / 2) / tps;
3172 if (bpt > 0 && bpt <= 255) {
3173 v = bpt * tps;
3174 delta = v >= kbps ? v - kbps : kbps - v;
3175 if (delta <= mindelta) {
3176 mindelta = delta;
3177 selected_cpt = cpt;
3178 selected_bpt = bpt;
3179 }
3180 } else if (selected_cpt)
3181 break;
3182 }
3183 if (!selected_cpt)
3184 return -EINVAL;
3185 }
3186 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3187 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3188 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3189 if (sched & 1)
3190 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3191 else
3192 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3193 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3194 return 0;
3195}
3196
3197static int tp_init(struct adapter *adap, const struct tp_params *p)
3198{
3199 int busy = 0;
3200
3201 tp_config(adap, p);
3202 t3_set_vlan_accel(adap, 3, 0);
3203
3204 if (is_offload(adap)) {
3205 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3206 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3207 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3208 0, 1000, 5);
3209 if (busy)
3210 CH_ERR(adap, "TP initialization timed out\n");
3211 }
3212
3213 if (!busy)
3214 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3215 return busy;
3216}
3217
3218int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3219{
3220 if (port_mask & ~((1 << adap->params.nports) - 1))
3221 return -EINVAL;
3222 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3223 port_mask << S_PORT0ACTIVE);
3224 return 0;
3225}
3226
3227/*
952cdf33
DLR
3228 * Perform the bits of HW initialization that are dependent on the Tx
3229 * channels being used.
4d22de3e 3230 */
952cdf33 3231static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
4d22de3e
DLR
3232{
3233 int i;
3234
952cdf33 3235 if (chan_map != 3) { /* one channel */
4d22de3e
DLR
3236 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3237 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
952cdf33
DLR
3238 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3239 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3240 F_TPTXPORT1EN | F_PORT1ACTIVE));
3241 t3_write_reg(adap, A_PM1_TX_CFG,
3242 chan_map == 1 ? 0xffffffff : 0);
3243 } else { /* two channels */
4d22de3e
DLR
3244 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3245 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3246 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3247 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3248 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3249 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3250 F_ENFORCEPKT);
3251 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3252 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3253 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3254 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3255 for (i = 0; i < 16; i++)
3256 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3257 (i << 16) | 0x1010);
3258 }
3259}
3260
3261static int calibrate_xgm(struct adapter *adapter)
3262{
3263 if (uses_xaui(adapter)) {
3264 unsigned int v, i;
3265
3266 for (i = 0; i < 5; ++i) {
3267 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3268 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3269 msleep(1);
3270 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3271 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3272 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3273 V_XAUIIMP(G_CALIMP(v) >> 2));
3274 return 0;
3275 }
3276 }
3277 CH_ERR(adapter, "MAC calibration failed\n");
3278 return -1;
3279 } else {
3280 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3281 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3282 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3283 F_XGM_IMPSETUPDATE);
3284 }
3285 return 0;
3286}
3287
3288static void calibrate_xgm_t3b(struct adapter *adapter)
3289{
3290 if (!uses_xaui(adapter)) {
3291 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3292 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3293 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3294 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3295 F_XGM_IMPSETUPDATE);
3296 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3297 0);
3298 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3299 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3300 }
3301}
3302
3303struct mc7_timing_params {
3304 unsigned char ActToPreDly;
3305 unsigned char ActToRdWrDly;
3306 unsigned char PreCyc;
3307 unsigned char RefCyc[5];
3308 unsigned char BkCyc;
3309 unsigned char WrToRdDly;
3310 unsigned char RdToWrDly;
3311};
3312
3313/*
3314 * Write a value to a register and check that the write completed. These
3315 * writes normally complete in a cycle or two, so one read should suffice.
3316 * The very first read exists to flush the posted write to the device.
3317 */
3318static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3319{
3320 t3_write_reg(adapter, addr, val);
3321 t3_read_reg(adapter, addr); /* flush */
3322 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3323 return 0;
3324 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3325 return -EIO;
3326}
3327
3328static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3329{
3330 static const unsigned int mc7_mode[] = {
3331 0x632, 0x642, 0x652, 0x432, 0x442
3332 };
3333 static const struct mc7_timing_params mc7_timings[] = {
3334 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3335 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3336 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3337 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3338 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3339 };
3340
3341 u32 val;
3342 unsigned int width, density, slow, attempts;
3343 struct adapter *adapter = mc7->adapter;
3344 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3345
8ac3ba68
DLR
3346 if (!mc7->size)
3347 return 0;
3348
4d22de3e
DLR
3349 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3350 slow = val & F_SLOW;
3351 width = G_WIDTH(val);
3352 density = G_DEN(val);
3353
3354 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3355 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3356 msleep(1);
3357
3358 if (!slow) {
3359 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3360 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3361 msleep(1);
3362 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3363 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3364 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3365 mc7->name);
3366 goto out_fail;
3367 }
3368 }
3369
3370 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3371 V_ACTTOPREDLY(p->ActToPreDly) |
3372 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3373 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3374 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3375
3376 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3377 val | F_CLKEN | F_TERM150);
3378 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3379
3380 if (!slow)
3381 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3382 F_DLLENB);
3383 udelay(1);
3384
3385 val = slow ? 3 : 6;
3386 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3387 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3388 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3389 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3390 goto out_fail;
3391
3392 if (!slow) {
3393 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3394 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3395 udelay(5);
3396 }
3397
3398 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3399 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3400 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3401 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3402 mc7_mode[mem_type]) ||
3403 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3404 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3405 goto out_fail;
3406
3407 /* clock value is in KHz */
3408 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3409 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3410
3411 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3412 F_PERREFEN | V_PREREFDIV(mc7_clock));
3413 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3414
3415 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3416 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3417 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3418 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3419 (mc7->size << width) - 1);
3420 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3421 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3422
3423 attempts = 50;
3424 do {
3425 msleep(250);
3426 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3427 } while ((val & F_BUSY) && --attempts);
3428 if (val & F_BUSY) {
3429 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3430 goto out_fail;
3431 }
3432
3433 /* Enable normal memory accesses. */
3434 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3435 return 0;
3436
3437out_fail:
3438 return -1;
3439}
3440
3441static void config_pcie(struct adapter *adap)
3442{
3443 static const u16 ack_lat[4][6] = {
3444 {237, 416, 559, 1071, 2095, 4143},
3445 {128, 217, 289, 545, 1057, 2081},
3446 {73, 118, 154, 282, 538, 1050},
3447 {67, 107, 86, 150, 278, 534}
3448 };
3449 static const u16 rpl_tmr[4][6] = {
3450 {711, 1248, 1677, 3213, 6285, 12429},
3451 {384, 651, 867, 1635, 3171, 6243},
3452 {219, 354, 462, 846, 1614, 3150},
3453 {201, 321, 258, 450, 834, 1602}
3454 };
3455
3456 u16 val;
3457 unsigned int log2_width, pldsize;
3458 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3459
3460 pci_read_config_word(adap->pdev,
3461 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3462 &val);
3463 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3464 pci_read_config_word(adap->pdev,
3465 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3466 &val);
3467
3468 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3469 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3470 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3471 log2_width = fls(adap->params.pci.width) - 1;
3472 acklat = ack_lat[log2_width][pldsize];
3473 if (val & 1) /* check LOsEnable */
3474 acklat += fst_trn_tx * 4;
3475 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3476
3477 if (adap->params.rev == 0)
3478 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3479 V_T3A_ACKLAT(M_T3A_ACKLAT),
3480 V_T3A_ACKLAT(acklat));
3481 else
3482 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3483 V_ACKLAT(acklat));
3484
3485 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3486 V_REPLAYLMT(rpllmt));
3487
3488 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
b881955b 3489 t3_set_reg_field(adap, A_PCIE_CFG, 0,
204e2f98 3490 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
b881955b 3491 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
4d22de3e
DLR
3492}
3493
3494/*
3495 * Initialize and configure T3 HW modules. This performs the
3496 * initialization steps that need to be done once after a card is reset.
3497 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3498 *
3499 * fw_params are passed to FW and their value is platform dependent. Only the
3500 * top 8 bits are available for use, the rest must be 0.
3501 */
3502int t3_init_hw(struct adapter *adapter, u32 fw_params)
3503{
b881955b 3504 int err = -EIO, attempts, i;
4d22de3e
DLR
3505 const struct vpd_params *vpd = &adapter->params.vpd;
3506
3507 if (adapter->params.rev > 0)
3508 calibrate_xgm_t3b(adapter);
3509 else if (calibrate_xgm(adapter))
3510 goto out_err;
3511
3512 if (vpd->mclk) {
3513 partition_mem(adapter, &adapter->params.tp);
3514
3515 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3516 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3517 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3518 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3519 adapter->params.mc5.nfilters,
3520 adapter->params.mc5.nroutes))
3521 goto out_err;
b881955b
DLR
3522
3523 for (i = 0; i < 32; i++)
3524 if (clear_sge_ctxt(adapter, i, F_CQ))
3525 goto out_err;
4d22de3e
DLR
3526 }
3527
3528 if (tp_init(adapter, &adapter->params.tp))
3529 goto out_err;
3530
3531 t3_tp_set_coalescing_size(adapter,
3532 min(adapter->params.sge.max_pkt_size,
3533 MAX_RX_COALESCING_LEN), 1);
3534 t3_tp_set_max_rxsize(adapter,
3535 min(adapter->params.sge.max_pkt_size, 16384U));
3536 ulp_config(adapter, &adapter->params.tp);
3537
3538 if (is_pcie(adapter))
3539 config_pcie(adapter);
3540 else
b881955b
DLR
3541 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3542 F_DMASTOPEN | F_CLIDECEN);
4d22de3e 3543
a2604be5
DLR
3544 if (adapter->params.rev == T3_REV_C)
3545 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3546 F_CFG_CQE_SOP_MASK);
3547
8a9fab22 3548 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3f61e427
DLR
3549 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3550 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
952cdf33 3551 chan_init_hw(adapter, adapter->params.chan_map);
4d22de3e
DLR
3552 t3_sge_init(adapter, &adapter->params.sge);
3553
f231e0a5
DLR
3554 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3555
4d22de3e
DLR
3556 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3557 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3558 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3559 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3560
b881955b 3561 attempts = 100;
4d22de3e
DLR
3562 do { /* wait for uP to initialize */
3563 msleep(20);
3564 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
8ac3ba68
DLR
3565 if (!attempts) {
3566 CH_ERR(adapter, "uP initialization timed out\n");
4d22de3e 3567 goto out_err;
8ac3ba68 3568 }
4d22de3e
DLR
3569
3570 err = 0;
3571out_err:
3572 return err;
3573}
3574
3575/**
3576 * get_pci_mode - determine a card's PCI mode
3577 * @adapter: the adapter
3578 * @p: where to store the PCI settings
3579 *
3580 * Determines a card's PCI mode and associated parameters, such as speed
3581 * and width.
3582 */
7b9b0943 3583static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
4d22de3e
DLR
3584{
3585 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3586 u32 pci_mode, pcie_cap;
3587
3588 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3589 if (pcie_cap) {
3590 u16 val;
3591
3592 p->variant = PCI_VARIANT_PCIE;
3593 p->pcie_cap_addr = pcie_cap;
3594 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3595 &val);
3596 p->width = (val >> 4) & 0x3f;
3597 return;
3598 }
3599
3600 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3601 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3602 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3603 pci_mode = G_PCIXINITPAT(pci_mode);
3604 if (pci_mode == 0)
3605 p->variant = PCI_VARIANT_PCI;
3606 else if (pci_mode < 4)
3607 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3608 else if (pci_mode < 8)
3609 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3610 else
3611 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3612}
3613
3614/**
3615 * init_link_config - initialize a link's SW state
3616 * @lc: structure holding the link state
3617 * @ai: information about the current card
3618 *
3619 * Initializes the SW state maintained for each link, including the link's
3620 * capabilities and default speed/duplex/flow-control/autonegotiation
3621 * settings.
3622 */
7b9b0943 3623static void init_link_config(struct link_config *lc, unsigned int caps)
4d22de3e
DLR
3624{
3625 lc->supported = caps;
3626 lc->requested_speed = lc->speed = SPEED_INVALID;
3627 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3628 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3629 if (lc->supported & SUPPORTED_Autoneg) {
3630 lc->advertising = lc->supported;
3631 lc->autoneg = AUTONEG_ENABLE;
3632 lc->requested_fc |= PAUSE_AUTONEG;
3633 } else {
3634 lc->advertising = 0;
3635 lc->autoneg = AUTONEG_DISABLE;
3636 }
3637}
3638
3639/**
3640 * mc7_calc_size - calculate MC7 memory size
3641 * @cfg: the MC7 configuration
3642 *
3643 * Calculates the size of an MC7 memory in bytes from the value of its
3644 * configuration register.
3645 */
7b9b0943 3646static unsigned int mc7_calc_size(u32 cfg)
4d22de3e
DLR
3647{
3648 unsigned int width = G_WIDTH(cfg);
3649 unsigned int banks = !!(cfg & F_BKS) + 1;
3650 unsigned int org = !!(cfg & F_ORG) + 1;
3651 unsigned int density = G_DEN(cfg);
3652 unsigned int MBs = ((256 << density) * banks) / (org << width);
3653
3654 return MBs << 20;
3655}
3656
7b9b0943
RD
3657static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3658 unsigned int base_addr, const char *name)
4d22de3e
DLR
3659{
3660 u32 cfg;
3661
3662 mc7->adapter = adapter;
3663 mc7->name = name;
3664 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3665 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
8ac3ba68 3666 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
4d22de3e
DLR
3667 mc7->width = G_WIDTH(cfg);
3668}
3669
3670void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3671{
3672 mac->adapter = adapter;
3673 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3674 mac->nucast = 1;
3675
3676 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3677 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3678 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3679 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3680 F_ENRGMII, 0);
3681 }
3682}
3683
3684void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3685{
3686 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3687
3688 mi1_init(adapter, ai);
3689 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3690 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3691 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3692 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
8ac3ba68 3693 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
b881955b 3694 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
4d22de3e
DLR
3695
3696 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3697 val |= F_ENRGMII;
3698
3699 /* Enable MAC clocks so we can access the registers */
3700 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3701 t3_read_reg(adapter, A_XGM_PORT_CFG);
3702
3703 val |= F_CLKDIVRESET_;
3704 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3705 t3_read_reg(adapter, A_XGM_PORT_CFG);
3706 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3707 t3_read_reg(adapter, A_XGM_PORT_CFG);
3708}
3709
3710/*
2eab17ab 3711 * Reset the adapter.
e4d08359 3712 * Older PCIe cards lose their config space during reset, PCI-X
4d22de3e
DLR
3713 * ones don't.
3714 */
20d3fc11 3715int t3_reset_adapter(struct adapter *adapter)
4d22de3e 3716{
2eab17ab 3717 int i, save_and_restore_pcie =
e4d08359 3718 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
4d22de3e
DLR
3719 uint16_t devid = 0;
3720
e4d08359 3721 if (save_and_restore_pcie)
4d22de3e
DLR
3722 pci_save_state(adapter->pdev);
3723 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3724
3725 /*
3726 * Delay. Give Some time to device to reset fully.
3727 * XXX The delay time should be modified.
3728 */
3729 for (i = 0; i < 10; i++) {
3730 msleep(50);
3731 pci_read_config_word(adapter->pdev, 0x00, &devid);
3732 if (devid == 0x1425)
3733 break;
3734 }
3735
3736 if (devid != 0x1425)
3737 return -1;
3738
e4d08359 3739 if (save_and_restore_pcie)
4d22de3e
DLR
3740 pci_restore_state(adapter->pdev);
3741 return 0;
3742}
3743
7b9b0943 3744static int init_parity(struct adapter *adap)
b881955b
DLR
3745{
3746 int i, err, addr;
3747
3748 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3749 return -EBUSY;
3750
3751 for (err = i = 0; !err && i < 16; i++)
3752 err = clear_sge_ctxt(adap, i, F_EGRESS);
3753 for (i = 0xfff0; !err && i <= 0xffff; i++)
3754 err = clear_sge_ctxt(adap, i, F_EGRESS);
3755 for (i = 0; !err && i < SGE_QSETS; i++)
3756 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3757 if (err)
3758 return err;
3759
3760 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3761 for (i = 0; i < 4; i++)
3762 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3763 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3764 F_IBQDBGWR | V_IBQDBGQID(i) |
3765 V_IBQDBGADDR(addr));
3766 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3767 F_IBQDBGBUSY, 0, 2, 1);
3768 if (err)
3769 return err;
3770 }
3771 return 0;
3772}
3773
4d22de3e
DLR
3774/*
3775 * Initialize adapter SW state for the various HW modules, set initial values
3776 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3777 * interface.
3778 */
7b9b0943
RD
3779int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3780 int reset)
4d22de3e
DLR
3781{
3782 int ret;
04497982 3783 unsigned int i, j = -1;
4d22de3e
DLR
3784
3785 get_pci_mode(adapter, &adapter->params.pci);
3786
3787 adapter->params.info = ai;
952cdf33 3788 adapter->params.nports = ai->nports0 + ai->nports1;
00b64f2a 3789 adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
4d22de3e 3790 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
fc882196
DLR
3791 /*
3792 * We used to only run the "adapter check task" once a second if
3793 * we had PHYs which didn't support interrupts (we would check
3794 * their link status once a second). Now we check other conditions
3795 * in that routine which could potentially impose a very high
3796 * interrupt load on the system. As such, we now always scan the
3797 * adapter state once a second ...
3798 */
3799 adapter->params.linkpoll_period = 10;
4d22de3e
DLR
3800 adapter->params.stats_update_period = is_10G(adapter) ?
3801 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3802 adapter->params.pci.vpd_cap_addr =
3803 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3804 ret = get_vpd_params(adapter, &adapter->params.vpd);
3805 if (ret < 0)
3806 return ret;
3807
3808 if (reset && t3_reset_adapter(adapter))
3809 return -1;
3810
3811 t3_sge_prep(adapter, &adapter->params.sge);
3812
3813 if (adapter->params.vpd.mclk) {
3814 struct tp_params *p = &adapter->params.tp;
3815
3816 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3817 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3818 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3819
952cdf33 3820 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
4d22de3e
DLR
3821 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3822 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3823 p->cm_size = t3_mc7_size(&adapter->cm);
3824 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3825 p->chan_tx_size = p->pmtx_size / p->nchan;
3826 p->rx_pg_size = 64 * 1024;
3827 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3828 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3829 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3830 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3831 adapter->params.rev > 0 ? 12 : 6;
8ac3ba68
DLR
3832 }
3833
3834 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3835 t3_mc7_size(&adapter->pmtx) &&
3836 t3_mc7_size(&adapter->cm);
4d22de3e 3837
8ac3ba68 3838 if (is_offload(adapter)) {
4d22de3e
DLR
3839 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3840 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3841 DEFAULT_NFILTERS : 0;
3842 adapter->params.mc5.nroutes = 0;
3843 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3844
3845 init_mtus(adapter->params.mtus);
3846 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3847 }
3848
3849 early_hw_init(adapter, ai);
b881955b
DLR
3850 ret = init_parity(adapter);
3851 if (ret)
3852 return ret;
4d22de3e
DLR
3853
3854 for_each_port(adapter, i) {
3855 u8 hw_addr[6];
04497982 3856 const struct port_type_info *pti;
4d22de3e
DLR
3857 struct port_info *p = adap2pinfo(adapter, i);
3858
04497982
DLR
3859 while (!adapter->params.vpd.port_type[++j])
3860 ;
4d22de3e 3861
04497982 3862 pti = &port_types[adapter->params.vpd.port_type[j]];
9f64306b
DLR
3863 if (!pti->phy_prep) {
3864 CH_ALERT(adapter, "Invalid port type index %d\n",
3865 adapter->params.vpd.port_type[j]);
3866 return -EINVAL;
3867 }
3868
04497982
DLR
3869 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3870 ai->mdio_ops);
78e4689e
DLR
3871 if (ret)
3872 return ret;
0f07c4ee 3873 p->phy.mdio.dev = adapter->port[i];
4d22de3e 3874 mac_prep(&p->mac, adapter, j);
4d22de3e
DLR
3875
3876 /*
3877 * The VPD EEPROM stores the base Ethernet address for the
3878 * card. A port's address is derived from the base by adding
3879 * the port's index to the base's low octet.
3880 */
3881 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3882 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3883
3884 memcpy(adapter->port[i]->dev_addr, hw_addr,
3885 ETH_ALEN);
3886 memcpy(adapter->port[i]->perm_addr, hw_addr,
3887 ETH_ALEN);
04497982 3888 init_link_config(&p->link_config, p->phy.caps);
4d22de3e 3889 p->phy.ops->power_down(&p->phy, 1);
fc882196
DLR
3890
3891 /*
3892 * If the PHY doesn't support interrupts for link status
3893 * changes, schedule a scan of the adapter links at least
3894 * once a second.
3895 */
3896 if (!(p->phy.caps & SUPPORTED_IRQ) &&
3897 adapter->params.linkpoll_period > 10)
4d22de3e
DLR
3898 adapter->params.linkpoll_period = 10;
3899 }
3900
3901 return 0;
3902}
3903
3904void t3_led_ready(struct adapter *adapter)
3905{
3906 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3907 F_GPIO0_OUT_VAL);
3908}
204e2f98
DLR
3909
3910int t3_replay_prep_adapter(struct adapter *adapter)
3911{
3912 const struct adapter_info *ai = adapter->params.info;
04497982 3913 unsigned int i, j = -1;
204e2f98
DLR
3914 int ret;
3915
3916 early_hw_init(adapter, ai);
3917 ret = init_parity(adapter);
3918 if (ret)
3919 return ret;
3920
3921 for_each_port(adapter, i) {
04497982 3922 const struct port_type_info *pti;
204e2f98 3923 struct port_info *p = adap2pinfo(adapter, i);
204e2f98 3924
04497982
DLR
3925 while (!adapter->params.vpd.port_type[++j])
3926 ;
3927
3928 pti = &port_types[adapter->params.vpd.port_type[j]];
0f07c4ee 3929 ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
78e4689e
DLR
3930 if (ret)
3931 return ret;
204e2f98 3932 p->phy.ops->power_down(&p->phy, 1);
204e2f98
DLR
3933 }
3934
3935return 0;
3936}
3937