]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/cxgb3/t3_hw.c
cxgb3: Add Aeluros 2020 phy support
[mirror_ubuntu-zesty-kernel.git] / drivers / net / cxgb3 / t3_hw.c
CommitLineData
4d22de3e 1/*
a02d44a0 2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include "common.h"
33#include "regs.h"
34#include "sge_defs.h"
35#include "firmware_exports.h"
36
f2c6879e
DLR
37/**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
46 *
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
51 */
4d22de3e
DLR
52
53int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
55{
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
58
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
63 }
64 if (--attempts == 0)
b881955b 65 return -EAGAIN;
4d22de3e
DLR
66 if (delay)
67 udelay(delay);
68 }
69}
70
71/**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
77 *
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
81 */
82void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
84{
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
88 }
89}
90
91/**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
97 *
98 * Sets a register field specified by the supplied mask to the
99 * given value.
100 */
101void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
103{
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
105
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
108}
109
110/**
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
118 *
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
121 */
9265fabf
SH
122static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
4d22de3e
DLR
125{
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
130 }
131}
132
133/**
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
139 *
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
142 */
143int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
145{
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
148
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
151
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
154
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
159
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
163
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
172
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
183 }
184 start += 8;
185 }
186 *buf++ = val64;
187 }
188 return 0;
189}
190
191/*
192 * Initialize MI1.
193 */
194static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
195{
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
04497982 197 u32 val = F_PREEN | V_CLKDIV(clkdiv);
4d22de3e 198
4d22de3e
DLR
199 t3_write_reg(adap, A_MI1_CFG, val);
200}
201
04497982 202#define MDIO_ATTEMPTS 20
4d22de3e
DLR
203
204/*
04497982 205 * MI1 read/write operations for clause 22 PHYs.
4d22de3e 206 */
0f07c4ee
BH
207static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr,
208 u16 reg_addr)
4d22de3e 209{
0f07c4ee
BH
210 struct port_info *pi = netdev_priv(dev);
211 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
212 int ret;
213 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
214
4d22de3e 215 mutex_lock(&adapter->mdio_lock);
04497982 216 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
4d22de3e
DLR
217 t3_write_reg(adapter, A_MI1_ADDR, addr);
218 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
04497982 219 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
4d22de3e 220 if (!ret)
0f07c4ee 221 ret = t3_read_reg(adapter, A_MI1_DATA);
4d22de3e
DLR
222 mutex_unlock(&adapter->mdio_lock);
223 return ret;
224}
225
0f07c4ee
BH
226static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr,
227 u16 reg_addr, u16 val)
4d22de3e 228{
0f07c4ee
BH
229 struct port_info *pi = netdev_priv(dev);
230 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
231 int ret;
232 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
233
4d22de3e 234 mutex_lock(&adapter->mdio_lock);
04497982 235 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
4d22de3e
DLR
236 t3_write_reg(adapter, A_MI1_ADDR, addr);
237 t3_write_reg(adapter, A_MI1_DATA, val);
238 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
04497982 239 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
4d22de3e
DLR
240 mutex_unlock(&adapter->mdio_lock);
241 return ret;
242}
243
244static const struct mdio_ops mi1_mdio_ops = {
0f07c4ee
BH
245 .read = t3_mi1_read,
246 .write = t3_mi1_write,
247 .mode_support = MDIO_SUPPORTS_C22
4d22de3e
DLR
248};
249
04497982
DLR
250/*
251 * Performs the address cycle for clause 45 PHYs.
252 * Must be called with the MDIO_LOCK held.
253 */
254static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
255 int reg_addr)
256{
257 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
258
259 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
260 t3_write_reg(adapter, A_MI1_ADDR, addr);
261 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
262 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
263 return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
264 MDIO_ATTEMPTS, 10);
265}
266
4d22de3e
DLR
267/*
268 * MI1 read/write operations for indirect-addressed PHYs.
269 */
0f07c4ee
BH
270static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
271 u16 reg_addr)
4d22de3e 272{
0f07c4ee
BH
273 struct port_info *pi = netdev_priv(dev);
274 struct adapter *adapter = pi->adapter;
4d22de3e 275 int ret;
4d22de3e
DLR
276
277 mutex_lock(&adapter->mdio_lock);
04497982 278 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
4d22de3e
DLR
279 if (!ret) {
280 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
281 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
04497982 282 MDIO_ATTEMPTS, 10);
4d22de3e 283 if (!ret)
0f07c4ee 284 ret = t3_read_reg(adapter, A_MI1_DATA);
4d22de3e
DLR
285 }
286 mutex_unlock(&adapter->mdio_lock);
287 return ret;
288}
289
0f07c4ee
BH
290static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr,
291 u16 reg_addr, u16 val)
4d22de3e 292{
0f07c4ee
BH
293 struct port_info *pi = netdev_priv(dev);
294 struct adapter *adapter = pi->adapter;
4d22de3e 295 int ret;
4d22de3e
DLR
296
297 mutex_lock(&adapter->mdio_lock);
04497982 298 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
4d22de3e
DLR
299 if (!ret) {
300 t3_write_reg(adapter, A_MI1_DATA, val);
301 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
302 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
04497982 303 MDIO_ATTEMPTS, 10);
4d22de3e
DLR
304 }
305 mutex_unlock(&adapter->mdio_lock);
306 return ret;
307}
308
309static const struct mdio_ops mi1_mdio_ext_ops = {
0f07c4ee
BH
310 .read = mi1_ext_read,
311 .write = mi1_ext_write,
312 .mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
4d22de3e
DLR
313};
314
315/**
316 * t3_mdio_change_bits - modify the value of a PHY register
317 * @phy: the PHY to operate on
318 * @mmd: the device address
319 * @reg: the register address
320 * @clear: what part of the register value to mask off
321 * @set: what part of the register value to set
322 *
323 * Changes the value of a PHY register by applying a mask to its current
324 * value and ORing the result with a new value.
325 */
326int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
327 unsigned int set)
328{
329 int ret;
330 unsigned int val;
331
0f07c4ee 332 ret = t3_mdio_read(phy, mmd, reg, &val);
4d22de3e
DLR
333 if (!ret) {
334 val &= ~clear;
0f07c4ee 335 ret = t3_mdio_write(phy, mmd, reg, val | set);
4d22de3e
DLR
336 }
337 return ret;
338}
339
340/**
341 * t3_phy_reset - reset a PHY block
342 * @phy: the PHY to operate on
343 * @mmd: the device address of the PHY block to reset
344 * @wait: how long to wait for the reset to complete in 1ms increments
345 *
346 * Resets a PHY block and optionally waits for the reset to complete.
347 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
348 * for 10G PHYs.
349 */
350int t3_phy_reset(struct cphy *phy, int mmd, int wait)
351{
352 int err;
353 unsigned int ctl;
354
0f07c4ee
BH
355 err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER,
356 MDIO_CTRL1_RESET);
4d22de3e
DLR
357 if (err || !wait)
358 return err;
359
360 do {
0f07c4ee 361 err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl);
4d22de3e
DLR
362 if (err)
363 return err;
0f07c4ee 364 ctl &= MDIO_CTRL1_RESET;
4d22de3e
DLR
365 if (ctl)
366 msleep(1);
367 } while (ctl && --wait);
368
369 return ctl ? -1 : 0;
370}
371
372/**
373 * t3_phy_advertise - set the PHY advertisement registers for autoneg
374 * @phy: the PHY to operate on
375 * @advert: bitmap of capabilities the PHY should advertise
376 *
377 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
378 * requested capabilities.
379 */
380int t3_phy_advertise(struct cphy *phy, unsigned int advert)
381{
382 int err;
383 unsigned int val = 0;
384
0f07c4ee 385 err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val);
4d22de3e
DLR
386 if (err)
387 return err;
388
389 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
390 if (advert & ADVERTISED_1000baseT_Half)
391 val |= ADVERTISE_1000HALF;
392 if (advert & ADVERTISED_1000baseT_Full)
393 val |= ADVERTISE_1000FULL;
394
0f07c4ee 395 err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val);
4d22de3e
DLR
396 if (err)
397 return err;
398
399 val = 1;
400 if (advert & ADVERTISED_10baseT_Half)
401 val |= ADVERTISE_10HALF;
402 if (advert & ADVERTISED_10baseT_Full)
403 val |= ADVERTISE_10FULL;
404 if (advert & ADVERTISED_100baseT_Half)
405 val |= ADVERTISE_100HALF;
406 if (advert & ADVERTISED_100baseT_Full)
407 val |= ADVERTISE_100FULL;
408 if (advert & ADVERTISED_Pause)
409 val |= ADVERTISE_PAUSE_CAP;
410 if (advert & ADVERTISED_Asym_Pause)
411 val |= ADVERTISE_PAUSE_ASYM;
0f07c4ee 412 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
4d22de3e
DLR
413}
414
0ce2f03b
DLR
415/**
416 * t3_phy_advertise_fiber - set fiber PHY advertisement register
417 * @phy: the PHY to operate on
418 * @advert: bitmap of capabilities the PHY should advertise
419 *
420 * Sets a fiber PHY's advertisement register to advertise the
421 * requested capabilities.
422 */
423int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
424{
425 unsigned int val = 0;
426
427 if (advert & ADVERTISED_1000baseT_Half)
428 val |= ADVERTISE_1000XHALF;
429 if (advert & ADVERTISED_1000baseT_Full)
430 val |= ADVERTISE_1000XFULL;
431 if (advert & ADVERTISED_Pause)
432 val |= ADVERTISE_1000XPAUSE;
433 if (advert & ADVERTISED_Asym_Pause)
434 val |= ADVERTISE_1000XPSE_ASYM;
0f07c4ee 435 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
0ce2f03b
DLR
436}
437
4d22de3e
DLR
438/**
439 * t3_set_phy_speed_duplex - force PHY speed and duplex
440 * @phy: the PHY to operate on
441 * @speed: requested PHY speed
442 * @duplex: requested PHY duplex
443 *
444 * Force a 10/100/1000 PHY's speed and duplex. This also disables
445 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
446 */
447int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
448{
449 int err;
450 unsigned int ctl;
451
0f07c4ee 452 err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl);
4d22de3e
DLR
453 if (err)
454 return err;
455
456 if (speed >= 0) {
457 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
458 if (speed == SPEED_100)
459 ctl |= BMCR_SPEED100;
460 else if (speed == SPEED_1000)
461 ctl |= BMCR_SPEED1000;
462 }
463 if (duplex >= 0) {
464 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
465 if (duplex == DUPLEX_FULL)
466 ctl |= BMCR_FULLDPLX;
467 }
468 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
469 ctl |= BMCR_ANENABLE;
0f07c4ee 470 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl);
4d22de3e
DLR
471}
472
9b1e3656
DLR
473int t3_phy_lasi_intr_enable(struct cphy *phy)
474{
64318334
BH
475 return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
476 MDIO_PMA_LASI_LSALARM);
9b1e3656
DLR
477}
478
479int t3_phy_lasi_intr_disable(struct cphy *phy)
480{
64318334 481 return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
9b1e3656
DLR
482}
483
484int t3_phy_lasi_intr_clear(struct cphy *phy)
485{
486 u32 val;
487
64318334 488 return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
9b1e3656
DLR
489}
490
491int t3_phy_lasi_intr_handler(struct cphy *phy)
492{
493 unsigned int status;
64318334
BH
494 int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT,
495 &status);
9b1e3656
DLR
496
497 if (err)
498 return err;
64318334 499 return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0;
9b1e3656
DLR
500}
501
4d22de3e 502static const struct adapter_info t3_adap_info[] = {
952cdf33 503 {1, 1, 0,
4d22de3e 504 F_GPIO2_OEN | F_GPIO4_OEN |
f231e0a5 505 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
4d22de3e 506 &mi1_mdio_ops, "Chelsio PE9000"},
952cdf33 507 {1, 1, 0,
4d22de3e 508 F_GPIO2_OEN | F_GPIO4_OEN |
f231e0a5 509 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
4d22de3e 510 &mi1_mdio_ops, "Chelsio T302"},
952cdf33 511 {1, 0, 0,
4d22de3e 512 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
75758e8a 513 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
f231e0a5 514 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
4d22de3e 515 &mi1_mdio_ext_ops, "Chelsio T310"},
952cdf33 516 {1, 1, 0,
4d22de3e
DLR
517 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
518 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
f231e0a5
DLR
519 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
520 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
4d22de3e 521 &mi1_mdio_ext_ops, "Chelsio T320"},
ce03aadd
DLR
522 {},
523 {},
952cdf33 524 {1, 0, 0,
ce03aadd
DLR
525 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
526 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
527 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
528 &mi1_mdio_ext_ops, "Chelsio T310" },
74451424
DLR
529 {1, 0, 0,
530 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
531 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
532 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
533 &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
4d22de3e
DLR
534};
535
536/*
537 * Return the adapter_info structure with a given index. Out-of-range indices
538 * return NULL.
539 */
540const struct adapter_info *t3_get_adapter_info(unsigned int id)
541{
542 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
543}
544
04497982
DLR
545struct port_type_info {
546 int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
547 int phy_addr, const struct mdio_ops *ops);
548};
4d22de3e
DLR
549
550static const struct port_type_info port_types[] = {
04497982
DLR
551 { NULL },
552 { t3_ael1002_phy_prep },
553 { t3_vsc8211_phy_prep },
554 { NULL},
555 { t3_xaui_direct_phy_prep },
1e882025 556 { t3_ael2005_phy_prep },
04497982
DLR
557 { t3_qt2045_phy_prep },
558 { t3_ael1006_phy_prep },
559 { NULL },
74451424
DLR
560 { NULL },
561 { t3_ael2020_phy_prep },
4d22de3e
DLR
562};
563
4d22de3e
DLR
564#define VPD_ENTRY(name, len) \
565 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
566
567/*
568 * Partial EEPROM Vital Product Data structure. Includes only the ID and
569 * VPD-R sections.
570 */
571struct t3_vpd {
572 u8 id_tag;
573 u8 id_len[2];
574 u8 id_data[16];
575 u8 vpdr_tag;
576 u8 vpdr_len[2];
577 VPD_ENTRY(pn, 16); /* part number */
578 VPD_ENTRY(ec, 16); /* EC level */
167cdf5f 579 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
4d22de3e
DLR
580 VPD_ENTRY(na, 12); /* MAC address base */
581 VPD_ENTRY(cclk, 6); /* core clock */
582 VPD_ENTRY(mclk, 6); /* mem clock */
583 VPD_ENTRY(uclk, 6); /* uP clk */
584 VPD_ENTRY(mdc, 6); /* MDIO clk */
585 VPD_ENTRY(mt, 2); /* mem timing */
586 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
587 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
588 VPD_ENTRY(port0, 2); /* PHY0 complex */
589 VPD_ENTRY(port1, 2); /* PHY1 complex */
590 VPD_ENTRY(port2, 2); /* PHY2 complex */
591 VPD_ENTRY(port3, 2); /* PHY3 complex */
592 VPD_ENTRY(rv, 1); /* csum */
593 u32 pad; /* for multiple-of-4 sizing and alignment */
594};
595
9f64306b 596#define EEPROM_MAX_POLL 40
4d22de3e
DLR
597#define EEPROM_STAT_ADDR 0x4000
598#define VPD_BASE 0xc00
599
600/**
601 * t3_seeprom_read - read a VPD EEPROM location
602 * @adapter: adapter to read
603 * @addr: EEPROM address
604 * @data: where to store the read data
605 *
606 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
607 * VPD ROM capability. A zero is written to the flag bit when the
608 * addres is written to the control register. The hardware device will
609 * set the flag to 1 when 4 bytes have been read into the data register.
610 */
05e5c116 611int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
4d22de3e
DLR
612{
613 u16 val;
614 int attempts = EEPROM_MAX_POLL;
05e5c116 615 u32 v;
4d22de3e
DLR
616 unsigned int base = adapter->params.pci.vpd_cap_addr;
617
618 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
619 return -EINVAL;
620
621 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
622 do {
623 udelay(10);
624 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
625 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
626
627 if (!(val & PCI_VPD_ADDR_F)) {
628 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
629 return -EIO;
630 }
05e5c116
AV
631 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
632 *data = cpu_to_le32(v);
4d22de3e
DLR
633 return 0;
634}
635
636/**
637 * t3_seeprom_write - write a VPD EEPROM location
638 * @adapter: adapter to write
639 * @addr: EEPROM address
640 * @data: value to write
641 *
642 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
643 * VPD ROM capability.
644 */
05e5c116 645int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
4d22de3e
DLR
646{
647 u16 val;
648 int attempts = EEPROM_MAX_POLL;
649 unsigned int base = adapter->params.pci.vpd_cap_addr;
650
651 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
652 return -EINVAL;
653
654 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
05e5c116 655 le32_to_cpu(data));
4d22de3e
DLR
656 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
657 addr | PCI_VPD_ADDR_F);
658 do {
659 msleep(1);
660 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
661 } while ((val & PCI_VPD_ADDR_F) && --attempts);
662
663 if (val & PCI_VPD_ADDR_F) {
664 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
665 return -EIO;
666 }
667 return 0;
668}
669
670/**
671 * t3_seeprom_wp - enable/disable EEPROM write protection
672 * @adapter: the adapter
673 * @enable: 1 to enable write protection, 0 to disable it
674 *
675 * Enables or disables write protection on the serial EEPROM.
676 */
677int t3_seeprom_wp(struct adapter *adapter, int enable)
678{
679 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
680}
681
682/*
683 * Convert a character holding a hex digit to a number.
684 */
685static unsigned int hex2int(unsigned char c)
686{
687 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
688}
689
690/**
691 * get_vpd_params - read VPD parameters from VPD EEPROM
692 * @adapter: adapter to read
693 * @p: where to store the parameters
694 *
695 * Reads card parameters stored in VPD EEPROM.
696 */
697static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
698{
699 int i, addr, ret;
700 struct t3_vpd vpd;
701
702 /*
703 * Card information is normally at VPD_BASE but some early cards had
704 * it at 0.
705 */
05e5c116 706 ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
4d22de3e
DLR
707 if (ret)
708 return ret;
709 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
710
711 for (i = 0; i < sizeof(vpd); i += 4) {
712 ret = t3_seeprom_read(adapter, addr + i,
05e5c116 713 (__le32 *)((u8 *)&vpd + i));
4d22de3e
DLR
714 if (ret)
715 return ret;
716 }
717
718 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
719 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
720 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
721 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
722 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
167cdf5f 723 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
4d22de3e
DLR
724
725 /* Old eeproms didn't have port information */
726 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
727 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
728 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
729 } else {
730 p->port_type[0] = hex2int(vpd.port0_data[0]);
731 p->port_type[1] = hex2int(vpd.port1_data[0]);
732 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
733 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
734 }
735
736 for (i = 0; i < 6; i++)
737 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
738 hex2int(vpd.na_data[2 * i + 1]);
739 return 0;
740}
741
742/* serial flash and firmware constants */
743enum {
744 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
745 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
746 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
747
748 /* flash command opcodes */
749 SF_PROG_PAGE = 2, /* program page */
750 SF_WR_DISABLE = 4, /* disable writes */
751 SF_RD_STATUS = 5, /* read status register */
752 SF_WR_ENABLE = 6, /* enable writes */
753 SF_RD_DATA_FAST = 0xb, /* read flash */
754 SF_ERASE_SECTOR = 0xd8, /* erase sector */
755
756 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
80513675 757 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
2e283962 758 FW_MIN_SIZE = 8 /* at least version and csum */
4d22de3e
DLR
759};
760
761/**
762 * sf1_read - read data from the serial flash
763 * @adapter: the adapter
764 * @byte_cnt: number of bytes to read
765 * @cont: whether another operation will be chained
766 * @valp: where to store the read data
767 *
768 * Reads up to 4 bytes of data from the serial flash. The location of
769 * the read needs to be specified prior to calling this by issuing the
770 * appropriate commands to the serial flash.
771 */
772static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
773 u32 *valp)
774{
775 int ret;
776
777 if (!byte_cnt || byte_cnt > 4)
778 return -EINVAL;
779 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
780 return -EBUSY;
781 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
782 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
783 if (!ret)
784 *valp = t3_read_reg(adapter, A_SF_DATA);
785 return ret;
786}
787
788/**
789 * sf1_write - write data to the serial flash
790 * @adapter: the adapter
791 * @byte_cnt: number of bytes to write
792 * @cont: whether another operation will be chained
793 * @val: value to write
794 *
795 * Writes up to 4 bytes of data to the serial flash. The location of
796 * the write needs to be specified prior to calling this by issuing the
797 * appropriate commands to the serial flash.
798 */
799static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
800 u32 val)
801{
802 if (!byte_cnt || byte_cnt > 4)
803 return -EINVAL;
804 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
805 return -EBUSY;
806 t3_write_reg(adapter, A_SF_DATA, val);
807 t3_write_reg(adapter, A_SF_OP,
808 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
809 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
810}
811
812/**
813 * flash_wait_op - wait for a flash operation to complete
814 * @adapter: the adapter
815 * @attempts: max number of polls of the status register
816 * @delay: delay between polls in ms
817 *
818 * Wait for a flash operation to complete by polling the status register.
819 */
820static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
821{
822 int ret;
823 u32 status;
824
825 while (1) {
826 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
827 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
828 return ret;
829 if (!(status & 1))
830 return 0;
831 if (--attempts == 0)
832 return -EAGAIN;
833 if (delay)
834 msleep(delay);
835 }
836}
837
838/**
839 * t3_read_flash - read words from serial flash
840 * @adapter: the adapter
841 * @addr: the start address for the read
842 * @nwords: how many 32-bit words to read
843 * @data: where to store the read data
844 * @byte_oriented: whether to store data as bytes or as words
845 *
846 * Read the specified number of 32-bit words from the serial flash.
847 * If @byte_oriented is set the read data is stored as a byte array
848 * (i.e., big-endian), otherwise as 32-bit words in the platform's
849 * natural endianess.
850 */
851int t3_read_flash(struct adapter *adapter, unsigned int addr,
852 unsigned int nwords, u32 *data, int byte_oriented)
853{
854 int ret;
855
856 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
857 return -EINVAL;
858
859 addr = swab32(addr) | SF_RD_DATA_FAST;
860
861 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
862 (ret = sf1_read(adapter, 1, 1, data)) != 0)
863 return ret;
864
865 for (; nwords; nwords--, data++) {
866 ret = sf1_read(adapter, 4, nwords > 1, data);
867 if (ret)
868 return ret;
869 if (byte_oriented)
870 *data = htonl(*data);
871 }
872 return 0;
873}
874
875/**
876 * t3_write_flash - write up to a page of data to the serial flash
877 * @adapter: the adapter
878 * @addr: the start address to write
879 * @n: length of data to write
880 * @data: the data to write
881 *
882 * Writes up to a page of data (256 bytes) to the serial flash starting
883 * at the given address.
884 */
885static int t3_write_flash(struct adapter *adapter, unsigned int addr,
886 unsigned int n, const u8 *data)
887{
888 int ret;
889 u32 buf[64];
890 unsigned int i, c, left, val, offset = addr & 0xff;
891
892 if (addr + n > SF_SIZE || offset + n > 256)
893 return -EINVAL;
894
895 val = swab32(addr) | SF_PROG_PAGE;
896
897 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
898 (ret = sf1_write(adapter, 4, 1, val)) != 0)
899 return ret;
900
901 for (left = n; left; left -= c) {
902 c = min(left, 4U);
903 for (val = 0, i = 0; i < c; ++i)
904 val = (val << 8) + *data++;
905
906 ret = sf1_write(adapter, c, c != left, val);
907 if (ret)
908 return ret;
909 }
910 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
911 return ret;
912
913 /* Read the page to verify the write succeeded */
914 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
915 if (ret)
916 return ret;
917
918 if (memcmp(data - n, (u8 *) buf + offset, n))
919 return -EIO;
920 return 0;
921}
922
480fe1a3 923/**
47330077 924 * t3_get_tp_version - read the tp sram version
480fe1a3 925 * @adapter: the adapter
47330077 926 * @vers: where to place the version
480fe1a3 927 *
47330077 928 * Reads the protocol sram version from sram.
480fe1a3 929 */
47330077 930int t3_get_tp_version(struct adapter *adapter, u32 *vers)
480fe1a3
DLR
931{
932 int ret;
480fe1a3
DLR
933
934 /* Get version loaded in SRAM */
935 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
936 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
937 1, 1, 5, 1);
938 if (ret)
939 return ret;
2eab17ab 940
47330077
DLR
941 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
942
943 return 0;
944}
945
946/**
947 * t3_check_tpsram_version - read the tp sram version
948 * @adapter: the adapter
47330077
DLR
949 *
950 * Reads the protocol sram version from flash.
951 */
8207befa 952int t3_check_tpsram_version(struct adapter *adapter)
47330077
DLR
953{
954 int ret;
955 u32 vers;
956 unsigned int major, minor;
957
958 if (adapter->params.rev == T3_REV_A)
959 return 0;
960
47330077
DLR
961
962 ret = t3_get_tp_version(adapter, &vers);
963 if (ret)
964 return ret;
480fe1a3
DLR
965
966 major = G_TP_VERSION_MAJOR(vers);
967 minor = G_TP_VERSION_MINOR(vers);
968
2eab17ab 969 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
480fe1a3 970 return 0;
47330077 971 else {
47330077
DLR
972 CH_ERR(adapter, "found wrong TP version (%u.%u), "
973 "driver compiled for version %d.%d\n", major, minor,
974 TP_VERSION_MAJOR, TP_VERSION_MINOR);
975 }
480fe1a3
DLR
976 return -EINVAL;
977}
978
979/**
2eab17ab 980 * t3_check_tpsram - check if provided protocol SRAM
480fe1a3
DLR
981 * is compatible with this driver
982 * @adapter: the adapter
983 * @tp_sram: the firmware image to write
984 * @size: image size
985 *
986 * Checks if an adapter's tp sram is compatible with the driver.
987 * Returns 0 if the versions are compatible, a negative error otherwise.
988 */
2c733a16
DW
989int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
990 unsigned int size)
480fe1a3
DLR
991{
992 u32 csum;
993 unsigned int i;
05e5c116 994 const __be32 *p = (const __be32 *)tp_sram;
480fe1a3
DLR
995
996 /* Verify checksum */
997 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
998 csum += ntohl(p[i]);
999 if (csum != 0xffffffff) {
1000 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
1001 csum);
1002 return -EINVAL;
1003 }
1004
1005 return 0;
1006}
1007
4aac3899
DLR
1008enum fw_version_type {
1009 FW_VERSION_N3,
1010 FW_VERSION_T3
1011};
1012
4d22de3e
DLR
1013/**
1014 * t3_get_fw_version - read the firmware version
1015 * @adapter: the adapter
1016 * @vers: where to place the version
1017 *
1018 * Reads the FW version from flash.
1019 */
1020int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1021{
1022 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1023}
1024
1025/**
1026 * t3_check_fw_version - check if the FW is compatible with this driver
1027 * @adapter: the adapter
8207befa 1028 *
4d22de3e
DLR
1029 * Checks if an adapter's FW is compatible with the driver. Returns 0
1030 * if the versions are compatible, a negative error otherwise.
1031 */
8207befa 1032int t3_check_fw_version(struct adapter *adapter)
4d22de3e
DLR
1033{
1034 int ret;
1035 u32 vers;
4aac3899 1036 unsigned int type, major, minor;
4d22de3e
DLR
1037
1038 ret = t3_get_fw_version(adapter, &vers);
1039 if (ret)
1040 return ret;
1041
4aac3899
DLR
1042 type = G_FW_VERSION_TYPE(vers);
1043 major = G_FW_VERSION_MAJOR(vers);
1044 minor = G_FW_VERSION_MINOR(vers);
4d22de3e 1045
75d8626f
DLR
1046 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1047 minor == FW_VERSION_MINOR)
4d22de3e 1048 return 0;
8207befa 1049 else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
273fa904
DLR
1050 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1051 "driver compiled for version %u.%u\n", major, minor,
1052 FW_VERSION_MAJOR, FW_VERSION_MINOR);
8207befa 1053 else {
273fa904 1054 CH_WARN(adapter, "found newer FW version(%u.%u), "
a5a3b460
DLR
1055 "driver compiled for version %u.%u\n", major, minor,
1056 FW_VERSION_MAJOR, FW_VERSION_MINOR);
273fa904 1057 return 0;
a5a3b460 1058 }
4d22de3e
DLR
1059 return -EINVAL;
1060}
1061
1062/**
1063 * t3_flash_erase_sectors - erase a range of flash sectors
1064 * @adapter: the adapter
1065 * @start: the first sector to erase
1066 * @end: the last sector to erase
1067 *
1068 * Erases the sectors in the given range.
1069 */
1070static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1071{
1072 while (start <= end) {
1073 int ret;
1074
1075 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1076 (ret = sf1_write(adapter, 4, 0,
1077 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1078 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1079 return ret;
1080 start++;
1081 }
1082 return 0;
1083}
1084
1085/*
1086 * t3_load_fw - download firmware
1087 * @adapter: the adapter
8a9fab22 1088 * @fw_data: the firmware image to write
4d22de3e
DLR
1089 * @size: image size
1090 *
1091 * Write the supplied firmware image to the card's serial flash.
1092 * The FW image has the following sections: @size - 8 bytes of code and
1093 * data, followed by 4 bytes of FW version, followed by the 32-bit
1094 * 1's complement checksum of the whole image.
1095 */
1096int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1097{
1098 u32 csum;
1099 unsigned int i;
05e5c116 1100 const __be32 *p = (const __be32 *)fw_data;
4d22de3e
DLR
1101 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1102
2e283962 1103 if ((size & 3) || size < FW_MIN_SIZE)
4d22de3e
DLR
1104 return -EINVAL;
1105 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1106 return -EFBIG;
1107
1108 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1109 csum += ntohl(p[i]);
1110 if (csum != 0xffffffff) {
1111 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1112 csum);
1113 return -EINVAL;
1114 }
1115
1116 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1117 if (ret)
1118 goto out;
1119
1120 size -= 8; /* trim off version and checksum */
1121 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1122 unsigned int chunk_size = min(size, 256U);
1123
1124 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1125 if (ret)
1126 goto out;
1127
1128 addr += chunk_size;
1129 fw_data += chunk_size;
1130 size -= chunk_size;
1131 }
1132
1133 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1134out:
1135 if (ret)
1136 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1137 return ret;
1138}
1139
1140#define CIM_CTL_BASE 0x2000
1141
1142/**
1143 * t3_cim_ctl_blk_read - read a block from CIM control region
1144 *
1145 * @adap: the adapter
1146 * @addr: the start address within the CIM control region
1147 * @n: number of words to read
1148 * @valp: where to store the result
1149 *
1150 * Reads a block of 4-byte words from the CIM control region.
1151 */
1152int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1153 unsigned int n, unsigned int *valp)
1154{
1155 int ret = 0;
1156
1157 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1158 return -EBUSY;
1159
1160 for ( ; !ret && n--; addr += 4) {
1161 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1162 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1163 0, 5, 2);
1164 if (!ret)
1165 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1166 }
1167 return ret;
1168}
1169
bf792094
DLR
1170static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1171 u32 *rx_hash_high, u32 *rx_hash_low)
1172{
1173 /* stop Rx unicast traffic */
1174 t3_mac_disable_exact_filters(mac);
1175
1176 /* stop broadcast, multicast, promiscuous mode traffic */
1177 *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1178 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1179 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1180 F_DISBCAST);
1181
1182 *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1183 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1184
1185 *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1186 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1187
1188 /* Leave time to drain max RX fifo */
1189 msleep(1);
1190}
1191
1192static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1193 u32 rx_hash_high, u32 rx_hash_low)
1194{
1195 t3_mac_enable_exact_filters(mac);
1196 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1197 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1198 rx_cfg);
1199 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1200 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1201}
4d22de3e
DLR
1202
1203/**
1204 * t3_link_changed - handle interface link changes
1205 * @adapter: the adapter
1206 * @port_id: the port index that changed link state
1207 *
1208 * Called when a port's link settings change to propagate the new values
1209 * to the associated PHY and MAC. After performing the common tasks it
1210 * invokes an OS-specific handler.
1211 */
1212void t3_link_changed(struct adapter *adapter, int port_id)
1213{
1214 int link_ok, speed, duplex, fc;
1215 struct port_info *pi = adap2pinfo(adapter, port_id);
1216 struct cphy *phy = &pi->phy;
1217 struct cmac *mac = &pi->mac;
1218 struct link_config *lc = &pi->link_config;
1219
1220 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1221
bf792094
DLR
1222 if (!lc->link_ok && link_ok) {
1223 u32 rx_cfg, rx_hash_high, rx_hash_low;
1224 u32 status;
1225
1226 t3_xgm_intr_enable(adapter, port_id);
1227 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1228 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1229 t3_mac_enable(mac, MAC_DIRECTION_RX);
1230
1231 status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1232 if (status & F_LINKFAULTCHANGE) {
1233 mac->stats.link_faults++;
3851c66c 1234 pi->link_fault = 1;
bf792094
DLR
1235 }
1236 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
bf792094
DLR
1237 }
1238
9b1e3656
DLR
1239 if (lc->requested_fc & PAUSE_AUTONEG)
1240 fc &= lc->requested_fc;
1241 else
1242 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1243
1244 if (link_ok == lc->link_ok && speed == lc->speed &&
1245 duplex == lc->duplex && fc == lc->fc)
1246 return; /* nothing changed */
1247
4d22de3e
DLR
1248 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1249 uses_xaui(adapter)) {
1250 if (link_ok)
1251 t3b_pcs_reset(mac);
1252 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1253 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1254 }
1255 lc->link_ok = link_ok;
1256 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1257 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
4d22de3e
DLR
1258
1259 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1260 /* Set MAC speed, duplex, and flow control to match PHY. */
1261 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1262 lc->fc = fc;
1263 }
1264
1265 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1266}
1267
bf792094
DLR
1268void t3_link_fault(struct adapter *adapter, int port_id)
1269{
1270 struct port_info *pi = adap2pinfo(adapter, port_id);
1271 struct cmac *mac = &pi->mac;
1272 struct cphy *phy = &pi->phy;
1273 struct link_config *lc = &pi->link_config;
1274 int link_ok, speed, duplex, fc, link_fault;
1275 u32 rx_cfg, rx_hash_high, rx_hash_low;
1276
1277 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1278
1279 if (adapter->params.rev > 0 && uses_xaui(adapter))
1280 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1281
1282 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1283 t3_mac_enable(mac, MAC_DIRECTION_RX);
1284
1285 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1286
1287 link_fault = t3_read_reg(adapter,
1288 A_XGM_INT_STATUS + mac->offset);
1289 link_fault &= F_LINKFAULTCHANGE;
1290
1291 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1292
1293 if (link_fault) {
1294 lc->link_ok = 0;
1295 lc->speed = SPEED_INVALID;
1296 lc->duplex = DUPLEX_INVALID;
1297
1298 t3_os_link_fault(adapter, port_id, 0);
1299
1300 /* Account link faults only when the phy reports a link up */
1301 if (link_ok)
1302 mac->stats.link_faults++;
bf792094
DLR
1303 } else {
1304 if (link_ok)
1305 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1306 F_TXACTENABLE | F_RXEN);
1307
1308 pi->link_fault = 0;
1309 lc->link_ok = (unsigned char)link_ok;
1310 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1311 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1312 t3_os_link_fault(adapter, port_id, link_ok);
1313 }
1314}
1315
4d22de3e
DLR
1316/**
1317 * t3_link_start - apply link configuration to MAC/PHY
1318 * @phy: the PHY to setup
1319 * @mac: the MAC to setup
1320 * @lc: the requested link configuration
1321 *
1322 * Set up a port's MAC and PHY according to a desired link configuration.
1323 * - If the PHY can auto-negotiate first decide what to advertise, then
1324 * enable/disable auto-negotiation as desired, and reset.
1325 * - If the PHY does not auto-negotiate just reset it.
1326 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1327 * otherwise do it later based on the outcome of auto-negotiation.
1328 */
1329int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1330{
1331 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1332
1333 lc->link_ok = 0;
1334 if (lc->supported & SUPPORTED_Autoneg) {
1335 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1336 if (fc) {
1337 lc->advertising |= ADVERTISED_Asym_Pause;
1338 if (fc & PAUSE_RX)
1339 lc->advertising |= ADVERTISED_Pause;
1340 }
1341 phy->ops->advertise(phy, lc->advertising);
1342
1343 if (lc->autoneg == AUTONEG_DISABLE) {
1344 lc->speed = lc->requested_speed;
1345 lc->duplex = lc->requested_duplex;
1346 lc->fc = (unsigned char)fc;
1347 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1348 fc);
1349 /* Also disables autoneg */
1350 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
4d22de3e
DLR
1351 } else
1352 phy->ops->autoneg_enable(phy);
1353 } else {
1354 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1355 lc->fc = (unsigned char)fc;
1356 phy->ops->reset(phy, 0);
1357 }
1358 return 0;
1359}
1360
1361/**
1362 * t3_set_vlan_accel - control HW VLAN extraction
1363 * @adapter: the adapter
1364 * @ports: bitmap of adapter ports to operate on
1365 * @on: enable (1) or disable (0) HW VLAN extraction
1366 *
1367 * Enables or disables HW extraction of VLAN tags for the given port.
1368 */
1369void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1370{
1371 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1372 ports << S_VLANEXTRACTIONENABLE,
1373 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1374}
1375
1376struct intr_info {
1377 unsigned int mask; /* bits to check in interrupt status */
1378 const char *msg; /* message to print or NULL */
1379 short stat_idx; /* stat counter to increment or -1 */
20d3fc11 1380 unsigned short fatal; /* whether the condition reported is fatal */
4d22de3e
DLR
1381};
1382
1383/**
1384 * t3_handle_intr_status - table driven interrupt handler
1385 * @adapter: the adapter that generated the interrupt
1386 * @reg: the interrupt status register to process
1387 * @mask: a mask to apply to the interrupt status
1388 * @acts: table of interrupt actions
1389 * @stats: statistics counters tracking interrupt occurences
1390 *
1391 * A table driven interrupt handler that applies a set of masks to an
1392 * interrupt status word and performs the corresponding actions if the
1393 * interrupts described by the mask have occured. The actions include
1394 * optionally printing a warning or alert message, and optionally
1395 * incrementing a stat counter. The table is terminated by an entry
1396 * specifying mask 0. Returns the number of fatal interrupt conditions.
1397 */
1398static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1399 unsigned int mask,
1400 const struct intr_info *acts,
1401 unsigned long *stats)
1402{
1403 int fatal = 0;
1404 unsigned int status = t3_read_reg(adapter, reg) & mask;
1405
1406 for (; acts->mask; ++acts) {
1407 if (!(status & acts->mask))
1408 continue;
1409 if (acts->fatal) {
1410 fatal++;
1411 CH_ALERT(adapter, "%s (0x%x)\n",
1412 acts->msg, status & acts->mask);
1413 } else if (acts->msg)
1414 CH_WARN(adapter, "%s (0x%x)\n",
1415 acts->msg, status & acts->mask);
1416 if (acts->stat_idx >= 0)
1417 stats[acts->stat_idx]++;
1418 }
1419 if (status) /* clear processed interrupts */
1420 t3_write_reg(adapter, reg, status);
1421 return fatal;
1422}
1423
b881955b
DLR
1424#define SGE_INTR_MASK (F_RSPQDISABLED | \
1425 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1426 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1427 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1428 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1429 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1430 F_HIRCQPARITYERROR)
4d22de3e
DLR
1431#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1432 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1433 F_NFASRCHFAIL)
1434#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1435#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1436 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
fc882196 1437 F_TXFIFO_UNDERRUN)
4d22de3e
DLR
1438#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1439 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1440 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1441 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1442 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1443 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1444#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1445 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1446 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
b881955b
DLR
1447 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1448 F_TXPARERR | V_BISTERR(M_BISTERR))
1449#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1450 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1451 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1452#define ULPTX_INTR_MASK 0xfc
1453#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
4d22de3e
DLR
1454 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1455 F_ZERO_SWITCH_ERROR)
1456#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1457 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1458 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
b881955b
DLR
1459 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1460 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1461 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1462 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1463 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
4d22de3e
DLR
1464#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1465 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1466 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1467#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1468 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1469 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1470#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1471 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1472 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1473 V_MCAPARERRENB(M_MCAPARERRENB))
bf792094 1474#define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
4d22de3e
DLR
1475#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1476 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1477 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1478 F_MPS0 | F_CPL_SWITCH)
4d22de3e
DLR
1479/*
1480 * Interrupt handler for the PCIX1 module.
1481 */
1482static void pci_intr_handler(struct adapter *adapter)
1483{
1484 static const struct intr_info pcix1_intr_info[] = {
4d22de3e
DLR
1485 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1486 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1487 {F_RCVTARABT, "PCI received target abort", -1, 1},
1488 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1489 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1490 {F_DETPARERR, "PCI detected parity error", -1, 1},
1491 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1492 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1493 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1494 1},
1495 {F_DETCORECCERR, "PCI correctable ECC error",
1496 STAT_PCI_CORR_ECC, 0},
1497 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1498 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1499 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1500 1},
1501 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1502 1},
1503 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1504 1},
1505 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1506 "error", -1, 1},
1507 {0}
1508 };
1509
1510 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1511 pcix1_intr_info, adapter->irq_stats))
1512 t3_fatal_err(adapter);
1513}
1514
1515/*
1516 * Interrupt handler for the PCIE module.
1517 */
1518static void pcie_intr_handler(struct adapter *adapter)
1519{
1520 static const struct intr_info pcie_intr_info[] = {
b5a44bcb 1521 {F_PEXERR, "PCI PEX error", -1, 1},
4d22de3e
DLR
1522 {F_UNXSPLCPLERRR,
1523 "PCI unexpected split completion DMA read error", -1, 1},
1524 {F_UNXSPLCPLERRC,
1525 "PCI unexpected split completion DMA command error", -1, 1},
1526 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1527 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1528 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1529 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1530 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1531 "PCI MSI-X table/PBA parity error", -1, 1},
b881955b
DLR
1532 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1533 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1534 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1535 {F_TXPARERR, "PCI Tx parity error", -1, 1},
4d22de3e
DLR
1536 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1537 {0}
1538 };
1539
3eea3337
DLR
1540 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1541 CH_ALERT(adapter, "PEX error code 0x%x\n",
1542 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1543
4d22de3e
DLR
1544 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1545 pcie_intr_info, adapter->irq_stats))
1546 t3_fatal_err(adapter);
1547}
1548
1549/*
1550 * TP interrupt handler.
1551 */
1552static void tp_intr_handler(struct adapter *adapter)
1553{
1554 static const struct intr_info tp_intr_info[] = {
1555 {0xffffff, "TP parity error", -1, 1},
1556 {0x1000000, "TP out of Rx pages", -1, 1},
1557 {0x2000000, "TP out of Tx pages", -1, 1},
1558 {0}
1559 };
1560
a2604be5 1561 static struct intr_info tp_intr_info_t3c[] = {
b881955b
DLR
1562 {0x1fffffff, "TP parity error", -1, 1},
1563 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1564 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1565 {0}
a2604be5
DLR
1566 };
1567
4d22de3e 1568 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
a2604be5 1569 adapter->params.rev < T3_REV_C ?
b881955b 1570 tp_intr_info : tp_intr_info_t3c, NULL))
4d22de3e
DLR
1571 t3_fatal_err(adapter);
1572}
1573
1574/*
1575 * CIM interrupt handler.
1576 */
1577static void cim_intr_handler(struct adapter *adapter)
1578{
1579 static const struct intr_info cim_intr_info[] = {
1580 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1581 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1582 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1583 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1584 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1585 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1586 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1587 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1588 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1589 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1590 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1591 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
b881955b
DLR
1592 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1593 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1594 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1595 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1596 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1597 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1598 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1599 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1600 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1601 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1602 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1603 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
4d22de3e
DLR
1604 {0}
1605 };
1606
1607 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1608 cim_intr_info, NULL))
1609 t3_fatal_err(adapter);
1610}
1611
1612/*
1613 * ULP RX interrupt handler.
1614 */
1615static void ulprx_intr_handler(struct adapter *adapter)
1616{
1617 static const struct intr_info ulprx_intr_info[] = {
b881955b
DLR
1618 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1619 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1620 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1621 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1622 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1623 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1624 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1625 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
4d22de3e
DLR
1626 {0}
1627 };
1628
1629 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1630 ulprx_intr_info, NULL))
1631 t3_fatal_err(adapter);
1632}
1633
1634/*
1635 * ULP TX interrupt handler.
1636 */
1637static void ulptx_intr_handler(struct adapter *adapter)
1638{
1639 static const struct intr_info ulptx_intr_info[] = {
1640 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1641 STAT_ULP_CH0_PBL_OOB, 0},
1642 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1643 STAT_ULP_CH1_PBL_OOB, 0},
b881955b 1644 {0xfc, "ULP TX parity error", -1, 1},
4d22de3e
DLR
1645 {0}
1646 };
1647
1648 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1649 ulptx_intr_info, adapter->irq_stats))
1650 t3_fatal_err(adapter);
1651}
1652
1653#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1654 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1655 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1656 F_ICSPI1_TX_FRAMING_ERROR)
1657#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1658 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1659 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1660 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1661
1662/*
1663 * PM TX interrupt handler.
1664 */
1665static void pmtx_intr_handler(struct adapter *adapter)
1666{
1667 static const struct intr_info pmtx_intr_info[] = {
1668 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1669 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1670 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1671 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1672 "PMTX ispi parity error", -1, 1},
1673 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1674 "PMTX ospi parity error", -1, 1},
1675 {0}
1676 };
1677
1678 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1679 pmtx_intr_info, NULL))
1680 t3_fatal_err(adapter);
1681}
1682
1683#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1684 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1685 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1686 F_IESPI1_TX_FRAMING_ERROR)
1687#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1688 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1689 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1690 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1691
1692/*
1693 * PM RX interrupt handler.
1694 */
1695static void pmrx_intr_handler(struct adapter *adapter)
1696{
1697 static const struct intr_info pmrx_intr_info[] = {
1698 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1699 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1700 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1701 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1702 "PMRX ispi parity error", -1, 1},
1703 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1704 "PMRX ospi parity error", -1, 1},
1705 {0}
1706 };
1707
1708 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1709 pmrx_intr_info, NULL))
1710 t3_fatal_err(adapter);
1711}
1712
1713/*
1714 * CPL switch interrupt handler.
1715 */
1716static void cplsw_intr_handler(struct adapter *adapter)
1717{
1718 static const struct intr_info cplsw_intr_info[] = {
b881955b
DLR
1719 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1720 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
4d22de3e
DLR
1721 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1722 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1723 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1724 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1725 {0}
1726 };
1727
1728 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1729 cplsw_intr_info, NULL))
1730 t3_fatal_err(adapter);
1731}
1732
1733/*
1734 * MPS interrupt handler.
1735 */
1736static void mps_intr_handler(struct adapter *adapter)
1737{
1738 static const struct intr_info mps_intr_info[] = {
1739 {0x1ff, "MPS parity error", -1, 1},
1740 {0}
1741 };
1742
1743 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1744 mps_intr_info, NULL))
1745 t3_fatal_err(adapter);
1746}
1747
1748#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1749
1750/*
1751 * MC7 interrupt handler.
1752 */
1753static void mc7_intr_handler(struct mc7 *mc7)
1754{
1755 struct adapter *adapter = mc7->adapter;
1756 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1757
1758 if (cause & F_CE) {
1759 mc7->stats.corr_err++;
1760 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1761 "data 0x%x 0x%x 0x%x\n", mc7->name,
1762 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1763 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1764 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1765 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1766 }
1767
1768 if (cause & F_UE) {
1769 mc7->stats.uncorr_err++;
1770 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1771 "data 0x%x 0x%x 0x%x\n", mc7->name,
1772 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1773 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1774 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1775 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1776 }
1777
1778 if (G_PE(cause)) {
1779 mc7->stats.parity_err++;
1780 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1781 mc7->name, G_PE(cause));
1782 }
1783
1784 if (cause & F_AE) {
1785 u32 addr = 0;
1786
1787 if (adapter->params.rev > 0)
1788 addr = t3_read_reg(adapter,
1789 mc7->offset + A_MC7_ERR_ADDR);
1790 mc7->stats.addr_err++;
1791 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1792 mc7->name, addr);
1793 }
1794
1795 if (cause & MC7_INTR_FATAL)
1796 t3_fatal_err(adapter);
1797
1798 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1799}
1800
1801#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1802 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1803/*
1804 * XGMAC interrupt handler.
1805 */
1806static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1807{
1808 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
fc882196
DLR
1809 /*
1810 * We mask out interrupt causes for which we're not taking interrupts.
1811 * This allows us to use polling logic to monitor some of the other
1812 * conditions when taking interrupts would impose too much load on the
1813 * system.
1814 */
1815 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
1816 ~F_RXFIFO_OVERFLOW;
4d22de3e
DLR
1817
1818 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1819 mac->stats.tx_fifo_parity_err++;
1820 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1821 }
1822 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1823 mac->stats.rx_fifo_parity_err++;
1824 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1825 }
1826 if (cause & F_TXFIFO_UNDERRUN)
1827 mac->stats.tx_fifo_urun++;
1828 if (cause & F_RXFIFO_OVERFLOW)
1829 mac->stats.rx_fifo_ovfl++;
1830 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1831 mac->stats.serdes_signal_loss++;
1832 if (cause & F_XAUIPCSCTCERR)
1833 mac->stats.xaui_pcs_ctc_err++;
1834 if (cause & F_XAUIPCSALIGNCHANGE)
1835 mac->stats.xaui_pcs_align_change++;
bf792094
DLR
1836 if (cause & F_XGM_INT) {
1837 t3_set_reg_field(adap,
1838 A_XGM_INT_ENABLE + mac->offset,
1839 F_XGM_INT, 0);
1840 mac->stats.link_faults++;
1841
1842 t3_os_link_fault_handler(adap, idx);
1843 }
4d22de3e
DLR
1844
1845 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
bf792094 1846
4d22de3e
DLR
1847 if (cause & XGM_INTR_FATAL)
1848 t3_fatal_err(adap);
bf792094 1849
4d22de3e
DLR
1850 return cause != 0;
1851}
1852
1853/*
1854 * Interrupt handler for PHY events.
1855 */
1856int t3_phy_intr_handler(struct adapter *adapter)
1857{
4d22de3e
DLR
1858 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1859
1860 for_each_port(adapter, i) {
1ca03cbc
DLR
1861 struct port_info *p = adap2pinfo(adapter, i);
1862
04497982 1863 if (!(p->phy.caps & SUPPORTED_IRQ))
1ca03cbc
DLR
1864 continue;
1865
f231e0a5 1866 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1ca03cbc 1867 int phy_cause = p->phy.ops->intr_handler(&p->phy);
4d22de3e
DLR
1868
1869 if (phy_cause & cphy_cause_link_change)
1870 t3_link_changed(adapter, i);
1871 if (phy_cause & cphy_cause_fifo_error)
1ca03cbc 1872 p->phy.fifo_errors++;
1e882025
DLR
1873 if (phy_cause & cphy_cause_module_change)
1874 t3_os_phymod_changed(adapter, i);
4d22de3e
DLR
1875 }
1876 }
1877
1878 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1879 return 0;
1880}
1881
1882/*
1883 * T3 slow path (non-data) interrupt handler.
1884 */
1885int t3_slow_intr_handler(struct adapter *adapter)
1886{
1887 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1888
1889 cause &= adapter->slow_intr_mask;
1890 if (!cause)
1891 return 0;
1892 if (cause & F_PCIM0) {
1893 if (is_pcie(adapter))
1894 pcie_intr_handler(adapter);
1895 else
1896 pci_intr_handler(adapter);
1897 }
1898 if (cause & F_SGE3)
1899 t3_sge_err_intr_handler(adapter);
1900 if (cause & F_MC7_PMRX)
1901 mc7_intr_handler(&adapter->pmrx);
1902 if (cause & F_MC7_PMTX)
1903 mc7_intr_handler(&adapter->pmtx);
1904 if (cause & F_MC7_CM)
1905 mc7_intr_handler(&adapter->cm);
1906 if (cause & F_CIM)
1907 cim_intr_handler(adapter);
1908 if (cause & F_TP1)
1909 tp_intr_handler(adapter);
1910 if (cause & F_ULP2_RX)
1911 ulprx_intr_handler(adapter);
1912 if (cause & F_ULP2_TX)
1913 ulptx_intr_handler(adapter);
1914 if (cause & F_PM1_RX)
1915 pmrx_intr_handler(adapter);
1916 if (cause & F_PM1_TX)
1917 pmtx_intr_handler(adapter);
1918 if (cause & F_CPL_SWITCH)
1919 cplsw_intr_handler(adapter);
1920 if (cause & F_MPS0)
1921 mps_intr_handler(adapter);
1922 if (cause & F_MC5A)
1923 t3_mc5_intr_handler(&adapter->mc5);
1924 if (cause & F_XGMAC0_0)
1925 mac_intr_handler(adapter, 0);
1926 if (cause & F_XGMAC0_1)
1927 mac_intr_handler(adapter, 1);
1928 if (cause & F_T3DBG)
1929 t3_os_ext_intr_handler(adapter);
1930
1931 /* Clear the interrupts just processed. */
1932 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1933 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1934 return 1;
1935}
1936
f231e0a5
DLR
1937static unsigned int calc_gpio_intr(struct adapter *adap)
1938{
1939 unsigned int i, gpi_intr = 0;
1940
1941 for_each_port(adap, i)
1942 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1943 adapter_info(adap)->gpio_intr[i])
1944 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1945 return gpi_intr;
1946}
1947
4d22de3e
DLR
1948/**
1949 * t3_intr_enable - enable interrupts
1950 * @adapter: the adapter whose interrupts should be enabled
1951 *
1952 * Enable interrupts by setting the interrupt enable registers of the
1953 * various HW modules and then enabling the top-level interrupt
1954 * concentrator.
1955 */
1956void t3_intr_enable(struct adapter *adapter)
1957{
1958 static const struct addr_val_pair intr_en_avp[] = {
1959 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1960 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1961 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1962 MC7_INTR_MASK},
1963 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1964 MC7_INTR_MASK},
1965 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1966 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
4d22de3e
DLR
1967 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1968 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1969 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1970 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1971 };
1972
1973 adapter->slow_intr_mask = PL_INTR_MASK;
1974
1975 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
a2604be5
DLR
1976 t3_write_reg(adapter, A_TP_INT_ENABLE,
1977 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
4d22de3e
DLR
1978
1979 if (adapter->params.rev > 0) {
1980 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1981 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1982 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1983 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1984 F_PBL_BOUND_ERR_CH1);
1985 } else {
1986 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1987 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1988 }
1989
f231e0a5
DLR
1990 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1991
4d22de3e
DLR
1992 if (is_pcie(adapter))
1993 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1994 else
1995 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1996 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1997 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1998}
1999
2000/**
2001 * t3_intr_disable - disable a card's interrupts
2002 * @adapter: the adapter whose interrupts should be disabled
2003 *
2004 * Disable interrupts. We only disable the top-level interrupt
2005 * concentrator and the SGE data interrupts.
2006 */
2007void t3_intr_disable(struct adapter *adapter)
2008{
2009 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2010 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
2011 adapter->slow_intr_mask = 0;
2012}
2013
2014/**
2015 * t3_intr_clear - clear all interrupts
2016 * @adapter: the adapter whose interrupts should be cleared
2017 *
2018 * Clears all interrupts.
2019 */
2020void t3_intr_clear(struct adapter *adapter)
2021{
2022 static const unsigned int cause_reg_addr[] = {
2023 A_SG_INT_CAUSE,
2024 A_SG_RSPQ_FL_STATUS,
2025 A_PCIX_INT_CAUSE,
2026 A_MC7_INT_CAUSE,
2027 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2028 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2029 A_CIM_HOST_INT_CAUSE,
2030 A_TP_INT_CAUSE,
2031 A_MC5_DB_INT_CAUSE,
2032 A_ULPRX_INT_CAUSE,
2033 A_ULPTX_INT_CAUSE,
2034 A_CPL_INTR_CAUSE,
2035 A_PM1_TX_INT_CAUSE,
2036 A_PM1_RX_INT_CAUSE,
2037 A_MPS_INT_CAUSE,
2038 A_T3DBG_INT_CAUSE,
2039 };
2040 unsigned int i;
2041
2042 /* Clear PHY and MAC interrupts for each port. */
2043 for_each_port(adapter, i)
2044 t3_port_intr_clear(adapter, i);
2045
2046 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2047 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2048
3eea3337
DLR
2049 if (is_pcie(adapter))
2050 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
4d22de3e
DLR
2051 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2052 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2053}
2054
bf792094
DLR
2055void t3_xgm_intr_enable(struct adapter *adapter, int idx)
2056{
2057 struct port_info *pi = adap2pinfo(adapter, idx);
2058
2059 t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2060 XGM_EXTRA_INTR_MASK);
2061}
2062
2063void t3_xgm_intr_disable(struct adapter *adapter, int idx)
2064{
2065 struct port_info *pi = adap2pinfo(adapter, idx);
2066
2067 t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2068 0x7ff);
2069}
2070
4d22de3e
DLR
2071/**
2072 * t3_port_intr_enable - enable port-specific interrupts
2073 * @adapter: associated adapter
2074 * @idx: index of port whose interrupts should be enabled
2075 *
2076 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
2077 * adapter port.
2078 */
2079void t3_port_intr_enable(struct adapter *adapter, int idx)
2080{
2081 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2082
2083 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
2084 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2085 phy->ops->intr_enable(phy);
2086}
2087
2088/**
2089 * t3_port_intr_disable - disable port-specific interrupts
2090 * @adapter: associated adapter
2091 * @idx: index of port whose interrupts should be disabled
2092 *
2093 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
2094 * adapter port.
2095 */
2096void t3_port_intr_disable(struct adapter *adapter, int idx)
2097{
2098 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2099
2100 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
2101 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2102 phy->ops->intr_disable(phy);
2103}
2104
2105/**
2106 * t3_port_intr_clear - clear port-specific interrupts
2107 * @adapter: associated adapter
2108 * @idx: index of port whose interrupts to clear
2109 *
2110 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
2111 * adapter port.
2112 */
2113void t3_port_intr_clear(struct adapter *adapter, int idx)
2114{
2115 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2116
2117 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
2118 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
2119 phy->ops->intr_clear(phy);
2120}
2121
bb9366af
DLR
2122#define SG_CONTEXT_CMD_ATTEMPTS 100
2123
4d22de3e
DLR
2124/**
2125 * t3_sge_write_context - write an SGE context
2126 * @adapter: the adapter
2127 * @id: the context id
2128 * @type: the context type
2129 *
2130 * Program an SGE context with the values already loaded in the
2131 * CONTEXT_DATA? registers.
2132 */
2133static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2134 unsigned int type)
2135{
3fa58c88
DLR
2136 if (type == F_RESPONSEQ) {
2137 /*
2138 * Can't write the Response Queue Context bits for
2139 * Interrupt Armed or the Reserve bits after the chip
2140 * has been initialized out of reset. Writing to these
2141 * bits can confuse the hardware.
2142 */
2143 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2144 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2145 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2146 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2147 } else {
2148 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2149 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2150 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2151 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2152 }
4d22de3e
DLR
2153 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2154 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2155 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2156 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2157}
2158
3fa58c88
DLR
2159/**
2160 * clear_sge_ctxt - completely clear an SGE context
2161 * @adapter: the adapter
2162 * @id: the context id
2163 * @type: the context type
2164 *
2165 * Completely clear an SGE context. Used predominantly at post-reset
2166 * initialization. Note in particular that we don't skip writing to any
2167 * "sensitive bits" in the contexts the way that t3_sge_write_context()
2168 * does ...
2169 */
b881955b
DLR
2170static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2171 unsigned int type)
2172{
2173 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2174 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2175 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2176 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
3fa58c88
DLR
2177 t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2178 t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2179 t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2180 t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2181 t3_write_reg(adap, A_SG_CONTEXT_CMD,
2182 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2183 return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2184 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
b881955b
DLR
2185}
2186
4d22de3e
DLR
2187/**
2188 * t3_sge_init_ecntxt - initialize an SGE egress context
2189 * @adapter: the adapter to configure
2190 * @id: the context id
2191 * @gts_enable: whether to enable GTS for the context
2192 * @type: the egress context type
2193 * @respq: associated response queue
2194 * @base_addr: base address of queue
2195 * @size: number of queue entries
2196 * @token: uP token
2197 * @gen: initial generation value for the context
2198 * @cidx: consumer pointer
2199 *
2200 * Initialize an SGE egress context and make it ready for use. If the
2201 * platform allows concurrent context operations, the caller is
2202 * responsible for appropriate locking.
2203 */
2204int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2205 enum sge_context_type type, int respq, u64 base_addr,
2206 unsigned int size, unsigned int token, int gen,
2207 unsigned int cidx)
2208{
2209 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2210
2211 if (base_addr & 0xfff) /* must be 4K aligned */
2212 return -EINVAL;
2213 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2214 return -EBUSY;
2215
2216 base_addr >>= 12;
2217 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2218 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2219 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2220 V_EC_BASE_LO(base_addr & 0xffff));
2221 base_addr >>= 16;
2222 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2223 base_addr >>= 32;
2224 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2225 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2226 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2227 F_EC_VALID);
2228 return t3_sge_write_context(adapter, id, F_EGRESS);
2229}
2230
2231/**
2232 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2233 * @adapter: the adapter to configure
2234 * @id: the context id
2235 * @gts_enable: whether to enable GTS for the context
2236 * @base_addr: base address of queue
2237 * @size: number of queue entries
2238 * @bsize: size of each buffer for this queue
2239 * @cong_thres: threshold to signal congestion to upstream producers
2240 * @gen: initial generation value for the context
2241 * @cidx: consumer pointer
2242 *
2243 * Initialize an SGE free list context and make it ready for use. The
2244 * caller is responsible for ensuring only one context operation occurs
2245 * at a time.
2246 */
2247int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2248 int gts_enable, u64 base_addr, unsigned int size,
2249 unsigned int bsize, unsigned int cong_thres, int gen,
2250 unsigned int cidx)
2251{
2252 if (base_addr & 0xfff) /* must be 4K aligned */
2253 return -EINVAL;
2254 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2255 return -EBUSY;
2256
2257 base_addr >>= 12;
2258 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2259 base_addr >>= 32;
2260 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2261 V_FL_BASE_HI((u32) base_addr) |
2262 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2263 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2264 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2265 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2266 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2267 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2268 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2269 return t3_sge_write_context(adapter, id, F_FREELIST);
2270}
2271
2272/**
2273 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2274 * @adapter: the adapter to configure
2275 * @id: the context id
2276 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2277 * @base_addr: base address of queue
2278 * @size: number of queue entries
2279 * @fl_thres: threshold for selecting the normal or jumbo free list
2280 * @gen: initial generation value for the context
2281 * @cidx: consumer pointer
2282 *
2283 * Initialize an SGE response queue context and make it ready for use.
2284 * The caller is responsible for ensuring only one context operation
2285 * occurs at a time.
2286 */
2287int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2288 int irq_vec_idx, u64 base_addr, unsigned int size,
2289 unsigned int fl_thres, int gen, unsigned int cidx)
2290{
2291 unsigned int intr = 0;
2292
2293 if (base_addr & 0xfff) /* must be 4K aligned */
2294 return -EINVAL;
2295 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2296 return -EBUSY;
2297
2298 base_addr >>= 12;
2299 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2300 V_CQ_INDEX(cidx));
2301 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2302 base_addr >>= 32;
2303 if (irq_vec_idx >= 0)
2304 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2305 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2306 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2307 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2308 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2309}
2310
2311/**
2312 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2313 * @adapter: the adapter to configure
2314 * @id: the context id
2315 * @base_addr: base address of queue
2316 * @size: number of queue entries
2317 * @rspq: response queue for async notifications
2318 * @ovfl_mode: CQ overflow mode
2319 * @credits: completion queue credits
2320 * @credit_thres: the credit threshold
2321 *
2322 * Initialize an SGE completion queue context and make it ready for use.
2323 * The caller is responsible for ensuring only one context operation
2324 * occurs at a time.
2325 */
2326int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2327 unsigned int size, int rspq, int ovfl_mode,
2328 unsigned int credits, unsigned int credit_thres)
2329{
2330 if (base_addr & 0xfff) /* must be 4K aligned */
2331 return -EINVAL;
2332 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2333 return -EBUSY;
2334
2335 base_addr >>= 12;
2336 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2337 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2338 base_addr >>= 32;
2339 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2340 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
1c17ae8a
DLR
2341 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2342 V_CQ_ERR(ovfl_mode));
4d22de3e
DLR
2343 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2344 V_CQ_CREDIT_THRES(credit_thres));
2345 return t3_sge_write_context(adapter, id, F_CQ);
2346}
2347
2348/**
2349 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2350 * @adapter: the adapter
2351 * @id: the egress context id
2352 * @enable: enable (1) or disable (0) the context
2353 *
2354 * Enable or disable an SGE egress context. The caller is responsible for
2355 * ensuring only one context operation occurs at a time.
2356 */
2357int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2358{
2359 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2360 return -EBUSY;
2361
2362 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2363 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2364 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2365 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2366 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2367 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2368 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2369 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2370 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2371}
2372
2373/**
2374 * t3_sge_disable_fl - disable an SGE free-buffer list
2375 * @adapter: the adapter
2376 * @id: the free list context id
2377 *
2378 * Disable an SGE free-buffer list. The caller is responsible for
2379 * ensuring only one context operation occurs at a time.
2380 */
2381int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2382{
2383 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2384 return -EBUSY;
2385
2386 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2387 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2388 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2389 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2390 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2391 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2392 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2393 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2394 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2395}
2396
2397/**
2398 * t3_sge_disable_rspcntxt - disable an SGE response queue
2399 * @adapter: the adapter
2400 * @id: the response queue context id
2401 *
2402 * Disable an SGE response queue. The caller is responsible for
2403 * ensuring only one context operation occurs at a time.
2404 */
2405int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2406{
2407 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2408 return -EBUSY;
2409
2410 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2411 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2412 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2413 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2414 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2415 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2416 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2417 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2418 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2419}
2420
2421/**
2422 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2423 * @adapter: the adapter
2424 * @id: the completion queue context id
2425 *
2426 * Disable an SGE completion queue. The caller is responsible for
2427 * ensuring only one context operation occurs at a time.
2428 */
2429int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2430{
2431 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2432 return -EBUSY;
2433
2434 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2435 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2436 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2437 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2438 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2439 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2440 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2441 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2442 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2443}
2444
2445/**
2446 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2447 * @adapter: the adapter
2448 * @id: the context id
2449 * @op: the operation to perform
2450 *
2451 * Perform the selected operation on an SGE completion queue context.
2452 * The caller is responsible for ensuring only one context operation
2453 * occurs at a time.
2454 */
2455int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2456 unsigned int credits)
2457{
2458 u32 val;
2459
2460 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2461 return -EBUSY;
2462
2463 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2464 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2465 V_CONTEXT(id) | F_CQ);
2466 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2467 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
4d22de3e
DLR
2468 return -EIO;
2469
2470 if (op >= 2 && op < 7) {
2471 if (adapter->params.rev > 0)
2472 return G_CQ_INDEX(val);
2473
2474 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2475 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2476 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
bb9366af
DLR
2477 F_CONTEXT_CMD_BUSY, 0,
2478 SG_CONTEXT_CMD_ATTEMPTS, 1))
4d22de3e
DLR
2479 return -EIO;
2480 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2481 }
2482 return 0;
2483}
2484
2485/**
2486 * t3_sge_read_context - read an SGE context
2487 * @type: the context type
2488 * @adapter: the adapter
2489 * @id: the context id
2490 * @data: holds the retrieved context
2491 *
2492 * Read an SGE egress context. The caller is responsible for ensuring
2493 * only one context operation occurs at a time.
2494 */
2495static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2496 unsigned int id, u32 data[4])
2497{
2498 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2499 return -EBUSY;
2500
2501 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2502 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2503 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
bb9366af 2504 SG_CONTEXT_CMD_ATTEMPTS, 1))
4d22de3e
DLR
2505 return -EIO;
2506 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2507 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2508 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2509 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2510 return 0;
2511}
2512
2513/**
2514 * t3_sge_read_ecntxt - read an SGE egress context
2515 * @adapter: the adapter
2516 * @id: the context id
2517 * @data: holds the retrieved context
2518 *
2519 * Read an SGE egress context. The caller is responsible for ensuring
2520 * only one context operation occurs at a time.
2521 */
2522int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2523{
2524 if (id >= 65536)
2525 return -EINVAL;
2526 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2527}
2528
2529/**
2530 * t3_sge_read_cq - read an SGE CQ context
2531 * @adapter: the adapter
2532 * @id: the context id
2533 * @data: holds the retrieved context
2534 *
2535 * Read an SGE CQ context. The caller is responsible for ensuring
2536 * only one context operation occurs at a time.
2537 */
2538int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2539{
2540 if (id >= 65536)
2541 return -EINVAL;
2542 return t3_sge_read_context(F_CQ, adapter, id, data);
2543}
2544
2545/**
2546 * t3_sge_read_fl - read an SGE free-list context
2547 * @adapter: the adapter
2548 * @id: the context id
2549 * @data: holds the retrieved context
2550 *
2551 * Read an SGE free-list context. The caller is responsible for ensuring
2552 * only one context operation occurs at a time.
2553 */
2554int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2555{
2556 if (id >= SGE_QSETS * 2)
2557 return -EINVAL;
2558 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2559}
2560
2561/**
2562 * t3_sge_read_rspq - read an SGE response queue context
2563 * @adapter: the adapter
2564 * @id: the context id
2565 * @data: holds the retrieved context
2566 *
2567 * Read an SGE response queue context. The caller is responsible for
2568 * ensuring only one context operation occurs at a time.
2569 */
2570int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2571{
2572 if (id >= SGE_QSETS)
2573 return -EINVAL;
2574 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2575}
2576
2577/**
2578 * t3_config_rss - configure Rx packet steering
2579 * @adapter: the adapter
2580 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2581 * @cpus: values for the CPU lookup table (0xff terminated)
2582 * @rspq: values for the response queue lookup table (0xffff terminated)
2583 *
2584 * Programs the receive packet steering logic. @cpus and @rspq provide
2585 * the values for the CPU and response queue lookup tables. If they
2586 * provide fewer values than the size of the tables the supplied values
2587 * are used repeatedly until the tables are fully populated.
2588 */
2589void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2590 const u8 * cpus, const u16 *rspq)
2591{
2592 int i, j, cpu_idx = 0, q_idx = 0;
2593
2594 if (cpus)
2595 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2596 u32 val = i << 16;
2597
2598 for (j = 0; j < 2; ++j) {
2599 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2600 if (cpus[cpu_idx] == 0xff)
2601 cpu_idx = 0;
2602 }
2603 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2604 }
2605
2606 if (rspq)
2607 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2608 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2609 (i << 16) | rspq[q_idx++]);
2610 if (rspq[q_idx] == 0xffff)
2611 q_idx = 0;
2612 }
2613
2614 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2615}
2616
2617/**
2618 * t3_read_rss - read the contents of the RSS tables
2619 * @adapter: the adapter
2620 * @lkup: holds the contents of the RSS lookup table
2621 * @map: holds the contents of the RSS map table
2622 *
2623 * Reads the contents of the receive packet steering tables.
2624 */
2625int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2626{
2627 int i;
2628 u32 val;
2629
2630 if (lkup)
2631 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2632 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2633 0xffff0000 | i);
2634 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2635 if (!(val & 0x80000000))
2636 return -EAGAIN;
2637 *lkup++ = val;
2638 *lkup++ = (val >> 8);
2639 }
2640
2641 if (map)
2642 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2643 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2644 0xffff0000 | i);
2645 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2646 if (!(val & 0x80000000))
2647 return -EAGAIN;
2648 *map++ = val;
2649 }
2650 return 0;
2651}
2652
2653/**
2654 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2655 * @adap: the adapter
2656 * @enable: 1 to select offload mode, 0 for regular NIC
2657 *
2658 * Switches TP to NIC/offload mode.
2659 */
2660void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2661{
2662 if (is_offload(adap) || !enable)
2663 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2664 V_NICMODE(!enable));
2665}
2666
2667/**
2668 * pm_num_pages - calculate the number of pages of the payload memory
2669 * @mem_size: the size of the payload memory
2670 * @pg_size: the size of each payload memory page
2671 *
2672 * Calculate the number of pages, each of the given size, that fit in a
2673 * memory of the specified size, respecting the HW requirement that the
2674 * number of pages must be a multiple of 24.
2675 */
2676static inline unsigned int pm_num_pages(unsigned int mem_size,
2677 unsigned int pg_size)
2678{
2679 unsigned int n = mem_size / pg_size;
2680
2681 return n - n % 24;
2682}
2683
2684#define mem_region(adap, start, size, reg) \
2685 t3_write_reg((adap), A_ ## reg, (start)); \
2686 start += size
2687
b881955b 2688/**
4d22de3e
DLR
2689 * partition_mem - partition memory and configure TP memory settings
2690 * @adap: the adapter
2691 * @p: the TP parameters
2692 *
2693 * Partitions context and payload memory and configures TP's memory
2694 * registers.
2695 */
2696static void partition_mem(struct adapter *adap, const struct tp_params *p)
2697{
2698 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2699 unsigned int timers = 0, timers_shift = 22;
2700
2701 if (adap->params.rev > 0) {
2702 if (tids <= 16 * 1024) {
2703 timers = 1;
2704 timers_shift = 16;
2705 } else if (tids <= 64 * 1024) {
2706 timers = 2;
2707 timers_shift = 18;
2708 } else if (tids <= 256 * 1024) {
2709 timers = 3;
2710 timers_shift = 20;
2711 }
2712 }
2713
2714 t3_write_reg(adap, A_TP_PMM_SIZE,
2715 p->chan_rx_size | (p->chan_tx_size >> 16));
2716
2717 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2718 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2719 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2720 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2721 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2722
2723 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2724 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2725 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2726
2727 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2728 /* Add a bit of headroom and make multiple of 24 */
2729 pstructs += 48;
2730 pstructs -= pstructs % 24;
2731 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2732
2733 m = tids * TCB_SIZE;
2734 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2735 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2736 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2737 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2738 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2739 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2740 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2741 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2742
2743 m = (m + 4095) & ~0xfff;
2744 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2745 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2746
2747 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2748 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2749 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2750 if (tids < m)
2751 adap->params.mc5.nservers += m - tids;
2752}
2753
2754static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2755 u32 val)
2756{
2757 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2758 t3_write_reg(adap, A_TP_PIO_DATA, val);
2759}
2760
2761static void tp_config(struct adapter *adap, const struct tp_params *p)
2762{
4d22de3e
DLR
2763 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2764 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2765 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2766 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2767 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
3fa58c88 2768 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
4d22de3e
DLR
2769 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2770 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
3fa58c88 2771 V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
4d22de3e 2772 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
b881955b 2773 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
4d22de3e
DLR
2774 F_IPV6ENABLE | F_NICMODE);
2775 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2776 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
8a9fab22
DLR
2777 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2778 adap->params.rev > 0 ? F_ENABLEESND :
2779 F_T3A_ENABLEESND);
4d22de3e 2780
3b1d307b 2781 t3_set_reg_field(adap, A_TP_PC_CONFIG,
8a9fab22
DLR
2782 F_ENABLEEPCMDAFULL,
2783 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2784 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
b881955b
DLR
2785 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2786 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2787 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
8a9fab22
DLR
2788 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2789 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2eab17ab 2790
4d22de3e
DLR
2791 if (adap->params.rev > 0) {
2792 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2793 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2794 F_TXPACEAUTO);
2795 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2796 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2797 } else
2798 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2799
a2604be5
DLR
2800 if (adap->params.rev == T3_REV_C)
2801 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2802 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2803 V_TABLELATENCYDELTA(4));
2804
8a9fab22
DLR
2805 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2806 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2807 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2808 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
4d22de3e
DLR
2809}
2810
2811/* Desired TP timer resolution in usec */
2812#define TP_TMR_RES 50
2813
2814/* TCP timer values in ms */
2815#define TP_DACK_TIMER 50
2816#define TP_RTO_MIN 250
2817
2818/**
2819 * tp_set_timers - set TP timing parameters
2820 * @adap: the adapter to set
2821 * @core_clk: the core clock frequency in Hz
2822 *
2823 * Set TP's timing parameters, such as the various timer resolutions and
2824 * the TCP timer values.
2825 */
2826static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2827{
2828 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2829 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2830 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2831 unsigned int tps = core_clk >> tre;
2832
2833 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2834 V_DELAYEDACKRESOLUTION(dack_re) |
2835 V_TIMESTAMPRESOLUTION(tstamp_re));
2836 t3_write_reg(adap, A_TP_DACK_TIMER,
2837 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2838 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2839 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2840 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2841 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2842 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2843 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2844 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2845 V_KEEPALIVEMAX(9));
2846
2847#define SECONDS * tps
2848
2849 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2850 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2851 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2852 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2853 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2854 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2855 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2856 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2857 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2858
2859#undef SECONDS
2860}
2861
2862/**
2863 * t3_tp_set_coalescing_size - set receive coalescing size
2864 * @adap: the adapter
2865 * @size: the receive coalescing size
2866 * @psh: whether a set PSH bit should deliver coalesced data
2867 *
2868 * Set the receive coalescing size and PSH bit handling.
2869 */
2870int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2871{
2872 u32 val;
2873
2874 if (size > MAX_RX_COALESCING_LEN)
2875 return -EINVAL;
2876
2877 val = t3_read_reg(adap, A_TP_PARA_REG3);
2878 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2879
2880 if (size) {
2881 val |= F_RXCOALESCEENABLE;
2882 if (psh)
2883 val |= F_RXCOALESCEPSHEN;
8a9fab22 2884 size = min(MAX_RX_COALESCING_LEN, size);
4d22de3e
DLR
2885 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2886 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2887 }
2888 t3_write_reg(adap, A_TP_PARA_REG3, val);
2889 return 0;
2890}
2891
2892/**
2893 * t3_tp_set_max_rxsize - set the max receive size
2894 * @adap: the adapter
2895 * @size: the max receive size
2896 *
2897 * Set TP's max receive size. This is the limit that applies when
2898 * receive coalescing is disabled.
2899 */
2900void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2901{
2902 t3_write_reg(adap, A_TP_PARA_REG7,
2903 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2904}
2905
7b9b0943 2906static void init_mtus(unsigned short mtus[])
4d22de3e
DLR
2907{
2908 /*
2909 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2910 * it can accomodate max size TCP/IP headers when SACK and timestamps
2911 * are enabled and still have at least 8 bytes of payload.
2912 */
75758e8a 2913 mtus[0] = 88;
8a9fab22
DLR
2914 mtus[1] = 88;
2915 mtus[2] = 256;
2916 mtus[3] = 512;
2917 mtus[4] = 576;
4d22de3e
DLR
2918 mtus[5] = 1024;
2919 mtus[6] = 1280;
2920 mtus[7] = 1492;
2921 mtus[8] = 1500;
2922 mtus[9] = 2002;
2923 mtus[10] = 2048;
2924 mtus[11] = 4096;
2925 mtus[12] = 4352;
2926 mtus[13] = 8192;
2927 mtus[14] = 9000;
2928 mtus[15] = 9600;
2929}
2930
2931/*
2932 * Initial congestion control parameters.
2933 */
7b9b0943 2934static void init_cong_ctrl(unsigned short *a, unsigned short *b)
4d22de3e
DLR
2935{
2936 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2937 a[9] = 2;
2938 a[10] = 3;
2939 a[11] = 4;
2940 a[12] = 5;
2941 a[13] = 6;
2942 a[14] = 7;
2943 a[15] = 8;
2944 a[16] = 9;
2945 a[17] = 10;
2946 a[18] = 14;
2947 a[19] = 17;
2948 a[20] = 21;
2949 a[21] = 25;
2950 a[22] = 30;
2951 a[23] = 35;
2952 a[24] = 45;
2953 a[25] = 60;
2954 a[26] = 80;
2955 a[27] = 100;
2956 a[28] = 200;
2957 a[29] = 300;
2958 a[30] = 400;
2959 a[31] = 500;
2960
2961 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2962 b[9] = b[10] = 1;
2963 b[11] = b[12] = 2;
2964 b[13] = b[14] = b[15] = b[16] = 3;
2965 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2966 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2967 b[28] = b[29] = 6;
2968 b[30] = b[31] = 7;
2969}
2970
2971/* The minimum additive increment value for the congestion control table */
2972#define CC_MIN_INCR 2U
2973
2974/**
2975 * t3_load_mtus - write the MTU and congestion control HW tables
2976 * @adap: the adapter
2977 * @mtus: the unrestricted values for the MTU table
2978 * @alphs: the values for the congestion control alpha parameter
2979 * @beta: the values for the congestion control beta parameter
2980 * @mtu_cap: the maximum permitted effective MTU
2981 *
2982 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2983 * Update the high-speed congestion control table with the supplied alpha,
2984 * beta, and MTUs.
2985 */
2986void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2987 unsigned short alpha[NCCTRL_WIN],
2988 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2989{
2990 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2991 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2992 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2993 28672, 40960, 57344, 81920, 114688, 163840, 229376
2994 };
2995
2996 unsigned int i, w;
2997
2998 for (i = 0; i < NMTUS; ++i) {
2999 unsigned int mtu = min(mtus[i], mtu_cap);
3000 unsigned int log2 = fls(mtu);
3001
3002 if (!(mtu & ((1 << log2) >> 2))) /* round */
3003 log2--;
3004 t3_write_reg(adap, A_TP_MTU_TABLE,
3005 (i << 24) | (log2 << 16) | mtu);
3006
3007 for (w = 0; w < NCCTRL_WIN; ++w) {
3008 unsigned int inc;
3009
3010 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3011 CC_MIN_INCR);
3012
3013 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3014 (w << 16) | (beta[w] << 13) | inc);
3015 }
3016 }
3017}
3018
3019/**
3020 * t3_read_hw_mtus - returns the values in the HW MTU table
3021 * @adap: the adapter
3022 * @mtus: where to store the HW MTU values
3023 *
3024 * Reads the HW MTU table.
3025 */
3026void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
3027{
3028 int i;
3029
3030 for (i = 0; i < NMTUS; ++i) {
3031 unsigned int val;
3032
3033 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
3034 val = t3_read_reg(adap, A_TP_MTU_TABLE);
3035 mtus[i] = val & 0x3fff;
3036 }
3037}
3038
3039/**
3040 * t3_get_cong_cntl_tab - reads the congestion control table
3041 * @adap: the adapter
3042 * @incr: where to store the alpha values
3043 *
3044 * Reads the additive increments programmed into the HW congestion
3045 * control table.
3046 */
3047void t3_get_cong_cntl_tab(struct adapter *adap,
3048 unsigned short incr[NMTUS][NCCTRL_WIN])
3049{
3050 unsigned int mtu, w;
3051
3052 for (mtu = 0; mtu < NMTUS; ++mtu)
3053 for (w = 0; w < NCCTRL_WIN; ++w) {
3054 t3_write_reg(adap, A_TP_CCTRL_TABLE,
3055 0xffff0000 | (mtu << 5) | w);
3056 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
3057 0x1fff;
3058 }
3059}
3060
3061/**
3062 * t3_tp_get_mib_stats - read TP's MIB counters
3063 * @adap: the adapter
3064 * @tps: holds the returned counter values
3065 *
3066 * Returns the values of TP's MIB counters.
3067 */
3068void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
3069{
3070 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
3071 sizeof(*tps) / sizeof(u32), 0);
3072}
3073
3074#define ulp_region(adap, name, start, len) \
3075 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
3076 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
3077 (start) + (len) - 1); \
3078 start += len
3079
3080#define ulptx_region(adap, name, start, len) \
3081 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
3082 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
3083 (start) + (len) - 1)
3084
3085static void ulp_config(struct adapter *adap, const struct tp_params *p)
3086{
3087 unsigned int m = p->chan_rx_size;
3088
3089 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
3090 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
3091 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
3092 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
3093 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
3094 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
3095 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
3096 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
3097}
3098
480fe1a3
DLR
3099/**
3100 * t3_set_proto_sram - set the contents of the protocol sram
3101 * @adapter: the adapter
3102 * @data: the protocol image
3103 *
3104 * Write the contents of the protocol SRAM.
3105 */
2c733a16 3106int t3_set_proto_sram(struct adapter *adap, const u8 *data)
480fe1a3
DLR
3107{
3108 int i;
2c733a16 3109 const __be32 *buf = (const __be32 *)data;
480fe1a3
DLR
3110
3111 for (i = 0; i < PROTO_SRAM_LINES; i++) {
05e5c116
AV
3112 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
3113 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
3114 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
3115 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
3116 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
2eab17ab 3117
480fe1a3
DLR
3118 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
3119 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
3120 return -EIO;
3121 }
3122 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
3123
3124 return 0;
3125}
3126
4d22de3e
DLR
3127void t3_config_trace_filter(struct adapter *adapter,
3128 const struct trace_params *tp, int filter_index,
3129 int invert, int enable)
3130{
3131 u32 addr, key[4], mask[4];
3132
3133 key[0] = tp->sport | (tp->sip << 16);
3134 key[1] = (tp->sip >> 16) | (tp->dport << 16);
3135 key[2] = tp->dip;
3136 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3137
3138 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3139 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3140 mask[2] = tp->dip_mask;
3141 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3142
3143 if (invert)
3144 key[3] |= (1 << 29);
3145 if (enable)
3146 key[3] |= (1 << 28);
3147
3148 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3149 tp_wr_indirect(adapter, addr++, key[0]);
3150 tp_wr_indirect(adapter, addr++, mask[0]);
3151 tp_wr_indirect(adapter, addr++, key[1]);
3152 tp_wr_indirect(adapter, addr++, mask[1]);
3153 tp_wr_indirect(adapter, addr++, key[2]);
3154 tp_wr_indirect(adapter, addr++, mask[2]);
3155 tp_wr_indirect(adapter, addr++, key[3]);
3156 tp_wr_indirect(adapter, addr, mask[3]);
3157 t3_read_reg(adapter, A_TP_PIO_DATA);
3158}
3159
3160/**
3161 * t3_config_sched - configure a HW traffic scheduler
3162 * @adap: the adapter
3163 * @kbps: target rate in Kbps
3164 * @sched: the scheduler index
3165 *
3166 * Configure a HW scheduler for the target rate
3167 */
3168int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
3169{
3170 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3171 unsigned int clk = adap->params.vpd.cclk * 1000;
3172 unsigned int selected_cpt = 0, selected_bpt = 0;
3173
3174 if (kbps > 0) {
3175 kbps *= 125; /* -> bytes */
3176 for (cpt = 1; cpt <= 255; cpt++) {
3177 tps = clk / cpt;
3178 bpt = (kbps + tps / 2) / tps;
3179 if (bpt > 0 && bpt <= 255) {
3180 v = bpt * tps;
3181 delta = v >= kbps ? v - kbps : kbps - v;
3182 if (delta <= mindelta) {
3183 mindelta = delta;
3184 selected_cpt = cpt;
3185 selected_bpt = bpt;
3186 }
3187 } else if (selected_cpt)
3188 break;
3189 }
3190 if (!selected_cpt)
3191 return -EINVAL;
3192 }
3193 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3194 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3195 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3196 if (sched & 1)
3197 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3198 else
3199 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3200 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3201 return 0;
3202}
3203
3204static int tp_init(struct adapter *adap, const struct tp_params *p)
3205{
3206 int busy = 0;
3207
3208 tp_config(adap, p);
3209 t3_set_vlan_accel(adap, 3, 0);
3210
3211 if (is_offload(adap)) {
3212 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3213 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3214 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3215 0, 1000, 5);
3216 if (busy)
3217 CH_ERR(adap, "TP initialization timed out\n");
3218 }
3219
3220 if (!busy)
3221 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3222 return busy;
3223}
3224
3225int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3226{
3227 if (port_mask & ~((1 << adap->params.nports) - 1))
3228 return -EINVAL;
3229 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3230 port_mask << S_PORT0ACTIVE);
3231 return 0;
3232}
3233
3234/*
952cdf33
DLR
3235 * Perform the bits of HW initialization that are dependent on the Tx
3236 * channels being used.
4d22de3e 3237 */
952cdf33 3238static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
4d22de3e
DLR
3239{
3240 int i;
3241
952cdf33 3242 if (chan_map != 3) { /* one channel */
4d22de3e
DLR
3243 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3244 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
952cdf33
DLR
3245 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3246 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3247 F_TPTXPORT1EN | F_PORT1ACTIVE));
3248 t3_write_reg(adap, A_PM1_TX_CFG,
3249 chan_map == 1 ? 0xffffffff : 0);
3250 } else { /* two channels */
4d22de3e
DLR
3251 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3252 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3253 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3254 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3255 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3256 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3257 F_ENFORCEPKT);
3258 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3259 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3260 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3261 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3262 for (i = 0; i < 16; i++)
3263 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3264 (i << 16) | 0x1010);
3265 }
3266}
3267
3268static int calibrate_xgm(struct adapter *adapter)
3269{
3270 if (uses_xaui(adapter)) {
3271 unsigned int v, i;
3272
3273 for (i = 0; i < 5; ++i) {
3274 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3275 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3276 msleep(1);
3277 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3278 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3279 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3280 V_XAUIIMP(G_CALIMP(v) >> 2));
3281 return 0;
3282 }
3283 }
3284 CH_ERR(adapter, "MAC calibration failed\n");
3285 return -1;
3286 } else {
3287 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3288 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3289 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3290 F_XGM_IMPSETUPDATE);
3291 }
3292 return 0;
3293}
3294
3295static void calibrate_xgm_t3b(struct adapter *adapter)
3296{
3297 if (!uses_xaui(adapter)) {
3298 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3299 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3300 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3301 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3302 F_XGM_IMPSETUPDATE);
3303 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3304 0);
3305 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3306 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3307 }
3308}
3309
3310struct mc7_timing_params {
3311 unsigned char ActToPreDly;
3312 unsigned char ActToRdWrDly;
3313 unsigned char PreCyc;
3314 unsigned char RefCyc[5];
3315 unsigned char BkCyc;
3316 unsigned char WrToRdDly;
3317 unsigned char RdToWrDly;
3318};
3319
3320/*
3321 * Write a value to a register and check that the write completed. These
3322 * writes normally complete in a cycle or two, so one read should suffice.
3323 * The very first read exists to flush the posted write to the device.
3324 */
3325static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3326{
3327 t3_write_reg(adapter, addr, val);
3328 t3_read_reg(adapter, addr); /* flush */
3329 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3330 return 0;
3331 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3332 return -EIO;
3333}
3334
3335static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3336{
3337 static const unsigned int mc7_mode[] = {
3338 0x632, 0x642, 0x652, 0x432, 0x442
3339 };
3340 static const struct mc7_timing_params mc7_timings[] = {
3341 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3342 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3343 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3344 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3345 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3346 };
3347
3348 u32 val;
3349 unsigned int width, density, slow, attempts;
3350 struct adapter *adapter = mc7->adapter;
3351 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3352
8ac3ba68
DLR
3353 if (!mc7->size)
3354 return 0;
3355
4d22de3e
DLR
3356 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3357 slow = val & F_SLOW;
3358 width = G_WIDTH(val);
3359 density = G_DEN(val);
3360
3361 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3362 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3363 msleep(1);
3364
3365 if (!slow) {
3366 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3367 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3368 msleep(1);
3369 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3370 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3371 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3372 mc7->name);
3373 goto out_fail;
3374 }
3375 }
3376
3377 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3378 V_ACTTOPREDLY(p->ActToPreDly) |
3379 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3380 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3381 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3382
3383 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3384 val | F_CLKEN | F_TERM150);
3385 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3386
3387 if (!slow)
3388 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3389 F_DLLENB);
3390 udelay(1);
3391
3392 val = slow ? 3 : 6;
3393 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3394 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3395 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3396 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3397 goto out_fail;
3398
3399 if (!slow) {
3400 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3401 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3402 udelay(5);
3403 }
3404
3405 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3406 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3407 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3408 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3409 mc7_mode[mem_type]) ||
3410 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3411 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3412 goto out_fail;
3413
3414 /* clock value is in KHz */
3415 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3416 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3417
3418 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3419 F_PERREFEN | V_PREREFDIV(mc7_clock));
3420 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3421
3422 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3423 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3424 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3425 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3426 (mc7->size << width) - 1);
3427 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3428 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3429
3430 attempts = 50;
3431 do {
3432 msleep(250);
3433 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3434 } while ((val & F_BUSY) && --attempts);
3435 if (val & F_BUSY) {
3436 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3437 goto out_fail;
3438 }
3439
3440 /* Enable normal memory accesses. */
3441 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3442 return 0;
3443
3444out_fail:
3445 return -1;
3446}
3447
3448static void config_pcie(struct adapter *adap)
3449{
3450 static const u16 ack_lat[4][6] = {
3451 {237, 416, 559, 1071, 2095, 4143},
3452 {128, 217, 289, 545, 1057, 2081},
3453 {73, 118, 154, 282, 538, 1050},
3454 {67, 107, 86, 150, 278, 534}
3455 };
3456 static const u16 rpl_tmr[4][6] = {
3457 {711, 1248, 1677, 3213, 6285, 12429},
3458 {384, 651, 867, 1635, 3171, 6243},
3459 {219, 354, 462, 846, 1614, 3150},
3460 {201, 321, 258, 450, 834, 1602}
3461 };
3462
3463 u16 val;
3464 unsigned int log2_width, pldsize;
3465 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3466
3467 pci_read_config_word(adap->pdev,
3468 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3469 &val);
3470 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3471 pci_read_config_word(adap->pdev,
3472 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3473 &val);
3474
3475 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3476 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3477 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3478 log2_width = fls(adap->params.pci.width) - 1;
3479 acklat = ack_lat[log2_width][pldsize];
3480 if (val & 1) /* check LOsEnable */
3481 acklat += fst_trn_tx * 4;
3482 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3483
3484 if (adap->params.rev == 0)
3485 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3486 V_T3A_ACKLAT(M_T3A_ACKLAT),
3487 V_T3A_ACKLAT(acklat));
3488 else
3489 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3490 V_ACKLAT(acklat));
3491
3492 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3493 V_REPLAYLMT(rpllmt));
3494
3495 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
b881955b 3496 t3_set_reg_field(adap, A_PCIE_CFG, 0,
204e2f98 3497 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
b881955b 3498 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
4d22de3e
DLR
3499}
3500
3501/*
3502 * Initialize and configure T3 HW modules. This performs the
3503 * initialization steps that need to be done once after a card is reset.
3504 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3505 *
3506 * fw_params are passed to FW and their value is platform dependent. Only the
3507 * top 8 bits are available for use, the rest must be 0.
3508 */
3509int t3_init_hw(struct adapter *adapter, u32 fw_params)
3510{
b881955b 3511 int err = -EIO, attempts, i;
4d22de3e
DLR
3512 const struct vpd_params *vpd = &adapter->params.vpd;
3513
3514 if (adapter->params.rev > 0)
3515 calibrate_xgm_t3b(adapter);
3516 else if (calibrate_xgm(adapter))
3517 goto out_err;
3518
3519 if (vpd->mclk) {
3520 partition_mem(adapter, &adapter->params.tp);
3521
3522 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3523 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3524 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3525 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3526 adapter->params.mc5.nfilters,
3527 adapter->params.mc5.nroutes))
3528 goto out_err;
b881955b
DLR
3529
3530 for (i = 0; i < 32; i++)
3531 if (clear_sge_ctxt(adapter, i, F_CQ))
3532 goto out_err;
4d22de3e
DLR
3533 }
3534
3535 if (tp_init(adapter, &adapter->params.tp))
3536 goto out_err;
3537
3538 t3_tp_set_coalescing_size(adapter,
3539 min(adapter->params.sge.max_pkt_size,
3540 MAX_RX_COALESCING_LEN), 1);
3541 t3_tp_set_max_rxsize(adapter,
3542 min(adapter->params.sge.max_pkt_size, 16384U));
3543 ulp_config(adapter, &adapter->params.tp);
3544
3545 if (is_pcie(adapter))
3546 config_pcie(adapter);
3547 else
b881955b
DLR
3548 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3549 F_DMASTOPEN | F_CLIDECEN);
4d22de3e 3550
a2604be5
DLR
3551 if (adapter->params.rev == T3_REV_C)
3552 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3553 F_CFG_CQE_SOP_MASK);
3554
8a9fab22 3555 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3f61e427
DLR
3556 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3557 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
952cdf33 3558 chan_init_hw(adapter, adapter->params.chan_map);
4d22de3e
DLR
3559 t3_sge_init(adapter, &adapter->params.sge);
3560
f231e0a5
DLR
3561 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3562
4d22de3e
DLR
3563 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3564 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3565 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3566 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3567
b881955b 3568 attempts = 100;
4d22de3e
DLR
3569 do { /* wait for uP to initialize */
3570 msleep(20);
3571 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
8ac3ba68
DLR
3572 if (!attempts) {
3573 CH_ERR(adapter, "uP initialization timed out\n");
4d22de3e 3574 goto out_err;
8ac3ba68 3575 }
4d22de3e
DLR
3576
3577 err = 0;
3578out_err:
3579 return err;
3580}
3581
3582/**
3583 * get_pci_mode - determine a card's PCI mode
3584 * @adapter: the adapter
3585 * @p: where to store the PCI settings
3586 *
3587 * Determines a card's PCI mode and associated parameters, such as speed
3588 * and width.
3589 */
7b9b0943 3590static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
4d22de3e
DLR
3591{
3592 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3593 u32 pci_mode, pcie_cap;
3594
3595 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3596 if (pcie_cap) {
3597 u16 val;
3598
3599 p->variant = PCI_VARIANT_PCIE;
3600 p->pcie_cap_addr = pcie_cap;
3601 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3602 &val);
3603 p->width = (val >> 4) & 0x3f;
3604 return;
3605 }
3606
3607 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3608 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3609 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3610 pci_mode = G_PCIXINITPAT(pci_mode);
3611 if (pci_mode == 0)
3612 p->variant = PCI_VARIANT_PCI;
3613 else if (pci_mode < 4)
3614 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3615 else if (pci_mode < 8)
3616 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3617 else
3618 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3619}
3620
3621/**
3622 * init_link_config - initialize a link's SW state
3623 * @lc: structure holding the link state
3624 * @ai: information about the current card
3625 *
3626 * Initializes the SW state maintained for each link, including the link's
3627 * capabilities and default speed/duplex/flow-control/autonegotiation
3628 * settings.
3629 */
7b9b0943 3630static void init_link_config(struct link_config *lc, unsigned int caps)
4d22de3e
DLR
3631{
3632 lc->supported = caps;
3633 lc->requested_speed = lc->speed = SPEED_INVALID;
3634 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3635 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3636 if (lc->supported & SUPPORTED_Autoneg) {
3637 lc->advertising = lc->supported;
3638 lc->autoneg = AUTONEG_ENABLE;
3639 lc->requested_fc |= PAUSE_AUTONEG;
3640 } else {
3641 lc->advertising = 0;
3642 lc->autoneg = AUTONEG_DISABLE;
3643 }
3644}
3645
3646/**
3647 * mc7_calc_size - calculate MC7 memory size
3648 * @cfg: the MC7 configuration
3649 *
3650 * Calculates the size of an MC7 memory in bytes from the value of its
3651 * configuration register.
3652 */
7b9b0943 3653static unsigned int mc7_calc_size(u32 cfg)
4d22de3e
DLR
3654{
3655 unsigned int width = G_WIDTH(cfg);
3656 unsigned int banks = !!(cfg & F_BKS) + 1;
3657 unsigned int org = !!(cfg & F_ORG) + 1;
3658 unsigned int density = G_DEN(cfg);
3659 unsigned int MBs = ((256 << density) * banks) / (org << width);
3660
3661 return MBs << 20;
3662}
3663
7b9b0943
RD
3664static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3665 unsigned int base_addr, const char *name)
4d22de3e
DLR
3666{
3667 u32 cfg;
3668
3669 mc7->adapter = adapter;
3670 mc7->name = name;
3671 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3672 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
8ac3ba68 3673 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
4d22de3e
DLR
3674 mc7->width = G_WIDTH(cfg);
3675}
3676
3677void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3678{
3679 mac->adapter = adapter;
3680 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3681 mac->nucast = 1;
3682
3683 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3684 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3685 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3686 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3687 F_ENRGMII, 0);
3688 }
3689}
3690
3691void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3692{
3693 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3694
3695 mi1_init(adapter, ai);
3696 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3697 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3698 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3699 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
8ac3ba68 3700 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
b881955b 3701 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
4d22de3e
DLR
3702
3703 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3704 val |= F_ENRGMII;
3705
3706 /* Enable MAC clocks so we can access the registers */
3707 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3708 t3_read_reg(adapter, A_XGM_PORT_CFG);
3709
3710 val |= F_CLKDIVRESET_;
3711 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3712 t3_read_reg(adapter, A_XGM_PORT_CFG);
3713 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3714 t3_read_reg(adapter, A_XGM_PORT_CFG);
3715}
3716
3717/*
2eab17ab 3718 * Reset the adapter.
e4d08359 3719 * Older PCIe cards lose their config space during reset, PCI-X
4d22de3e
DLR
3720 * ones don't.
3721 */
20d3fc11 3722int t3_reset_adapter(struct adapter *adapter)
4d22de3e 3723{
2eab17ab 3724 int i, save_and_restore_pcie =
e4d08359 3725 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
4d22de3e
DLR
3726 uint16_t devid = 0;
3727
e4d08359 3728 if (save_and_restore_pcie)
4d22de3e
DLR
3729 pci_save_state(adapter->pdev);
3730 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3731
3732 /*
3733 * Delay. Give Some time to device to reset fully.
3734 * XXX The delay time should be modified.
3735 */
3736 for (i = 0; i < 10; i++) {
3737 msleep(50);
3738 pci_read_config_word(adapter->pdev, 0x00, &devid);
3739 if (devid == 0x1425)
3740 break;
3741 }
3742
3743 if (devid != 0x1425)
3744 return -1;
3745
e4d08359 3746 if (save_and_restore_pcie)
4d22de3e
DLR
3747 pci_restore_state(adapter->pdev);
3748 return 0;
3749}
3750
7b9b0943 3751static int init_parity(struct adapter *adap)
b881955b
DLR
3752{
3753 int i, err, addr;
3754
3755 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3756 return -EBUSY;
3757
3758 for (err = i = 0; !err && i < 16; i++)
3759 err = clear_sge_ctxt(adap, i, F_EGRESS);
3760 for (i = 0xfff0; !err && i <= 0xffff; i++)
3761 err = clear_sge_ctxt(adap, i, F_EGRESS);
3762 for (i = 0; !err && i < SGE_QSETS; i++)
3763 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3764 if (err)
3765 return err;
3766
3767 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3768 for (i = 0; i < 4; i++)
3769 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3770 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3771 F_IBQDBGWR | V_IBQDBGQID(i) |
3772 V_IBQDBGADDR(addr));
3773 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3774 F_IBQDBGBUSY, 0, 2, 1);
3775 if (err)
3776 return err;
3777 }
3778 return 0;
3779}
3780
4d22de3e
DLR
3781/*
3782 * Initialize adapter SW state for the various HW modules, set initial values
3783 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3784 * interface.
3785 */
7b9b0943
RD
3786int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3787 int reset)
4d22de3e
DLR
3788{
3789 int ret;
04497982 3790 unsigned int i, j = -1;
4d22de3e
DLR
3791
3792 get_pci_mode(adapter, &adapter->params.pci);
3793
3794 adapter->params.info = ai;
952cdf33 3795 adapter->params.nports = ai->nports0 + ai->nports1;
00b64f2a 3796 adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
4d22de3e 3797 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
fc882196
DLR
3798 /*
3799 * We used to only run the "adapter check task" once a second if
3800 * we had PHYs which didn't support interrupts (we would check
3801 * their link status once a second). Now we check other conditions
3802 * in that routine which could potentially impose a very high
3803 * interrupt load on the system. As such, we now always scan the
3804 * adapter state once a second ...
3805 */
3806 adapter->params.linkpoll_period = 10;
4d22de3e
DLR
3807 adapter->params.stats_update_period = is_10G(adapter) ?
3808 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3809 adapter->params.pci.vpd_cap_addr =
3810 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3811 ret = get_vpd_params(adapter, &adapter->params.vpd);
3812 if (ret < 0)
3813 return ret;
3814
3815 if (reset && t3_reset_adapter(adapter))
3816 return -1;
3817
3818 t3_sge_prep(adapter, &adapter->params.sge);
3819
3820 if (adapter->params.vpd.mclk) {
3821 struct tp_params *p = &adapter->params.tp;
3822
3823 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3824 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3825 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3826
952cdf33 3827 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
4d22de3e
DLR
3828 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3829 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3830 p->cm_size = t3_mc7_size(&adapter->cm);
3831 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3832 p->chan_tx_size = p->pmtx_size / p->nchan;
3833 p->rx_pg_size = 64 * 1024;
3834 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3835 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3836 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3837 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3838 adapter->params.rev > 0 ? 12 : 6;
8ac3ba68
DLR
3839 }
3840
3841 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3842 t3_mc7_size(&adapter->pmtx) &&
3843 t3_mc7_size(&adapter->cm);
4d22de3e 3844
8ac3ba68 3845 if (is_offload(adapter)) {
4d22de3e
DLR
3846 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3847 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3848 DEFAULT_NFILTERS : 0;
3849 adapter->params.mc5.nroutes = 0;
3850 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3851
3852 init_mtus(adapter->params.mtus);
3853 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3854 }
3855
3856 early_hw_init(adapter, ai);
b881955b
DLR
3857 ret = init_parity(adapter);
3858 if (ret)
3859 return ret;
4d22de3e
DLR
3860
3861 for_each_port(adapter, i) {
3862 u8 hw_addr[6];
04497982 3863 const struct port_type_info *pti;
4d22de3e
DLR
3864 struct port_info *p = adap2pinfo(adapter, i);
3865
04497982
DLR
3866 while (!adapter->params.vpd.port_type[++j])
3867 ;
4d22de3e 3868
04497982 3869 pti = &port_types[adapter->params.vpd.port_type[j]];
9f64306b
DLR
3870 if (!pti->phy_prep) {
3871 CH_ALERT(adapter, "Invalid port type index %d\n",
3872 adapter->params.vpd.port_type[j]);
3873 return -EINVAL;
3874 }
3875
86c890ab 3876 p->phy.mdio.dev = adapter->port[i];
04497982
DLR
3877 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3878 ai->mdio_ops);
78e4689e
DLR
3879 if (ret)
3880 return ret;
4d22de3e 3881 mac_prep(&p->mac, adapter, j);
4d22de3e
DLR
3882
3883 /*
3884 * The VPD EEPROM stores the base Ethernet address for the
3885 * card. A port's address is derived from the base by adding
3886 * the port's index to the base's low octet.
3887 */
3888 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3889 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3890
3891 memcpy(adapter->port[i]->dev_addr, hw_addr,
3892 ETH_ALEN);
3893 memcpy(adapter->port[i]->perm_addr, hw_addr,
3894 ETH_ALEN);
04497982 3895 init_link_config(&p->link_config, p->phy.caps);
4d22de3e 3896 p->phy.ops->power_down(&p->phy, 1);
fc882196
DLR
3897
3898 /*
3899 * If the PHY doesn't support interrupts for link status
3900 * changes, schedule a scan of the adapter links at least
3901 * once a second.
3902 */
3903 if (!(p->phy.caps & SUPPORTED_IRQ) &&
3904 adapter->params.linkpoll_period > 10)
4d22de3e
DLR
3905 adapter->params.linkpoll_period = 10;
3906 }
3907
3908 return 0;
3909}
3910
3911void t3_led_ready(struct adapter *adapter)
3912{
3913 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3914 F_GPIO0_OUT_VAL);
3915}
204e2f98
DLR
3916
3917int t3_replay_prep_adapter(struct adapter *adapter)
3918{
3919 const struct adapter_info *ai = adapter->params.info;
04497982 3920 unsigned int i, j = -1;
204e2f98
DLR
3921 int ret;
3922
3923 early_hw_init(adapter, ai);
3924 ret = init_parity(adapter);
3925 if (ret)
3926 return ret;
3927
3928 for_each_port(adapter, i) {
04497982 3929 const struct port_type_info *pti;
204e2f98 3930 struct port_info *p = adap2pinfo(adapter, i);
204e2f98 3931
04497982
DLR
3932 while (!adapter->params.vpd.port_type[++j])
3933 ;
3934
3935 pti = &port_types[adapter->params.vpd.port_type[j]];
0f07c4ee 3936 ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
78e4689e
DLR
3937 if (ret)
3938 return ret;
204e2f98 3939 p->phy.ops->power_down(&p->phy, 1);
204e2f98
DLR
3940 }
3941
3942return 0;
3943}
3944