]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
net: cxgb3_main: fix potential Spectre v1
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / chelsio / cxgb3 / cxgb3_main.c
CommitLineData
4d22de3e 1/*
a02d44a0 2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
428ac43f
JP
32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
4d22de3e
DLR
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/init.h>
38#include <linux/pci.h>
39#include <linux/dma-mapping.h>
40#include <linux/netdevice.h>
41#include <linux/etherdevice.h>
42#include <linux/if_vlan.h>
0f07c4ee 43#include <linux/mdio.h>
4d22de3e
DLR
44#include <linux/sockios.h>
45#include <linux/workqueue.h>
46#include <linux/proc_fs.h>
47#include <linux/rtnetlink.h>
2e283962 48#include <linux/firmware.h>
d9da466a 49#include <linux/log2.h>
34336ec0 50#include <linux/stringify.h>
e998f245 51#include <linux/sched.h>
5a0e3ad6 52#include <linux/slab.h>
7c0f6ba6 53#include <linux/uaccess.h>
09d01afd 54#include <linux/nospec.h>
4d22de3e
DLR
55
56#include "common.h"
57#include "cxgb3_ioctl.h"
58#include "regs.h"
59#include "cxgb3_offload.h"
60#include "version.h"
61
62#include "cxgb3_ctl_defs.h"
63#include "t3_cpl.h"
64#include "firmware_exports.h"
65
66enum {
67 MAX_TXQ_ENTRIES = 16384,
68 MAX_CTRL_TXQ_ENTRIES = 1024,
69 MAX_RSPQ_ENTRIES = 16384,
70 MAX_RX_BUFFERS = 16384,
71 MAX_RX_JUMBO_BUFFERS = 16384,
72 MIN_TXQ_ENTRIES = 4,
73 MIN_CTRL_TXQ_ENTRIES = 4,
74 MIN_RSPQ_ENTRIES = 32,
75 MIN_FL_ENTRIES = 32
76};
77
78#define PORT_MASK ((1 << MAX_NPORTS) - 1)
79
80#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
81 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
82 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
83
84#define EEPROM_MAGIC 0x38E2F10C
85
678771d6
DLR
86#define CH_DEVICE(devid, idx) \
87 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
4d22de3e 88
9baa3c34 89static const struct pci_device_id cxgb3_pci_tbl[] = {
678771d6
DLR
90 CH_DEVICE(0x20, 0), /* PE9000 */
91 CH_DEVICE(0x21, 1), /* T302E */
92 CH_DEVICE(0x22, 2), /* T310E */
93 CH_DEVICE(0x23, 3), /* T320X */
94 CH_DEVICE(0x24, 1), /* T302X */
95 CH_DEVICE(0x25, 3), /* T320E */
96 CH_DEVICE(0x26, 2), /* T310X */
97 CH_DEVICE(0x30, 2), /* T3B10 */
98 CH_DEVICE(0x31, 3), /* T3B20 */
99 CH_DEVICE(0x32, 1), /* T3B02 */
ce03aadd 100 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
74451424
DLR
101 CH_DEVICE(0x36, 3), /* S320E-CR */
102 CH_DEVICE(0x37, 7), /* N320E-G2 */
4d22de3e
DLR
103 {0,}
104};
105
106MODULE_DESCRIPTION(DRV_DESC);
107MODULE_AUTHOR("Chelsio Communications");
1d68e93d 108MODULE_LICENSE("Dual BSD/GPL");
4d22de3e
DLR
109MODULE_VERSION(DRV_VERSION);
110MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
111
112static int dflt_msg_enable = DFLT_MSG_ENABLE;
113
114module_param(dflt_msg_enable, int, 0644);
115MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
116
117/*
118 * The driver uses the best interrupt scheme available on a platform in the
119 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
120 * of these schemes the driver may consider as follows:
121 *
122 * msi = 2: choose from among all three options
123 * msi = 1: only consider MSI and pin interrupts
124 * msi = 0: force pin interrupts
125 */
126static int msi = 2;
127
128module_param(msi, int, 0644);
129MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
130
131/*
132 * The driver enables offload as a default.
133 * To disable it, use ofld_disable = 1.
134 */
135
136static int ofld_disable = 0;
137
138module_param(ofld_disable, int, 0644);
139MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
140
141/*
142 * We have work elements that we need to cancel when an interface is taken
143 * down. Normally the work elements would be executed by keventd but that
144 * can deadlock because of linkwatch. If our close method takes the rtnl
145 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
146 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
147 * for our work to complete. Get our own work queue to solve this.
148 */
e998f245 149struct workqueue_struct *cxgb3_wq;
4d22de3e
DLR
150
151/**
152 * link_report - show link status and link speed/duplex
153 * @p: the port whose settings are to be reported
154 *
155 * Shows the link status, speed, and duplex of a port.
156 */
157static void link_report(struct net_device *dev)
158{
159 if (!netif_carrier_ok(dev))
428ac43f 160 netdev_info(dev, "link down\n");
4d22de3e
DLR
161 else {
162 const char *s = "10Mbps";
163 const struct port_info *p = netdev_priv(dev);
164
165 switch (p->link_config.speed) {
166 case SPEED_10000:
167 s = "10Gbps";
168 break;
169 case SPEED_1000:
170 s = "1000Mbps";
171 break;
172 case SPEED_100:
173 s = "100Mbps";
174 break;
175 }
176
428ac43f
JP
177 netdev_info(dev, "link up, %s, %s-duplex\n",
178 s, p->link_config.duplex == DUPLEX_FULL
179 ? "full" : "half");
4d22de3e
DLR
180 }
181}
182
34701fde
DLR
183static void enable_tx_fifo_drain(struct adapter *adapter,
184 struct port_info *pi)
185{
186 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
187 F_ENDROPPKT);
188 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
189 t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
190 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
191}
192
193static void disable_tx_fifo_drain(struct adapter *adapter,
194 struct port_info *pi)
195{
196 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
197 F_ENDROPPKT, 0);
198}
199
bf792094
DLR
200void t3_os_link_fault(struct adapter *adap, int port_id, int state)
201{
202 struct net_device *dev = adap->port[port_id];
203 struct port_info *pi = netdev_priv(dev);
204
205 if (state == netif_carrier_ok(dev))
206 return;
207
208 if (state) {
209 struct cmac *mac = &pi->mac;
210
211 netif_carrier_on(dev);
212
34701fde
DLR
213 disable_tx_fifo_drain(adap, pi);
214
bf792094
DLR
215 /* Clear local faults */
216 t3_xgm_intr_disable(adap, pi->port_id);
217 t3_read_reg(adap, A_XGM_INT_STATUS +
218 pi->mac.offset);
219 t3_write_reg(adap,
220 A_XGM_INT_CAUSE + pi->mac.offset,
221 F_XGM_INT);
222
223 t3_set_reg_field(adap,
224 A_XGM_INT_ENABLE +
225 pi->mac.offset,
226 F_XGM_INT, F_XGM_INT);
227 t3_xgm_intr_enable(adap, pi->port_id);
228
229 t3_mac_enable(mac, MAC_DIRECTION_TX);
34701fde 230 } else {
bf792094
DLR
231 netif_carrier_off(dev);
232
34701fde
DLR
233 /* Flush TX FIFO */
234 enable_tx_fifo_drain(adap, pi);
235 }
bf792094
DLR
236 link_report(dev);
237}
238
4d22de3e
DLR
239/**
240 * t3_os_link_changed - handle link status changes
241 * @adapter: the adapter associated with the link change
242 * @port_id: the port index whose limk status has changed
243 * @link_stat: the new status of the link
244 * @speed: the new speed setting
245 * @duplex: the new duplex setting
246 * @pause: the new flow-control setting
247 *
248 * This is the OS-dependent handler for link status changes. The OS
249 * neutral handler takes care of most of the processing for these events,
250 * then calls this handler for any OS-specific processing.
251 */
252void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
253 int speed, int duplex, int pause)
254{
255 struct net_device *dev = adapter->port[port_id];
6d6dabac
DLR
256 struct port_info *pi = netdev_priv(dev);
257 struct cmac *mac = &pi->mac;
4d22de3e
DLR
258
259 /* Skip changes from disabled ports. */
260 if (!netif_running(dev))
261 return;
262
263 if (link_stat != netif_carrier_ok(dev)) {
6d6dabac 264 if (link_stat) {
34701fde
DLR
265 disable_tx_fifo_drain(adapter, pi);
266
59cf8107 267 t3_mac_enable(mac, MAC_DIRECTION_RX);
bf792094
DLR
268
269 /* Clear local faults */
270 t3_xgm_intr_disable(adapter, pi->port_id);
271 t3_read_reg(adapter, A_XGM_INT_STATUS +
272 pi->mac.offset);
273 t3_write_reg(adapter,
274 A_XGM_INT_CAUSE + pi->mac.offset,
275 F_XGM_INT);
276
277 t3_set_reg_field(adapter,
278 A_XGM_INT_ENABLE + pi->mac.offset,
279 F_XGM_INT, F_XGM_INT);
280 t3_xgm_intr_enable(adapter, pi->port_id);
281
4d22de3e 282 netif_carrier_on(dev);
6d6dabac 283 } else {
4d22de3e 284 netif_carrier_off(dev);
bf792094
DLR
285
286 t3_xgm_intr_disable(adapter, pi->port_id);
287 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
288 t3_set_reg_field(adapter,
289 A_XGM_INT_ENABLE + pi->mac.offset,
290 F_XGM_INT, 0);
291
292 if (is_10G(adapter))
293 pi->phy.ops->power_down(&pi->phy, 1);
294
295 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
59cf8107
DLR
296 t3_mac_disable(mac, MAC_DIRECTION_RX);
297 t3_link_start(&pi->phy, mac, &pi->link_config);
34701fde
DLR
298
299 /* Flush TX FIFO */
300 enable_tx_fifo_drain(adapter, pi);
6d6dabac
DLR
301 }
302
4d22de3e
DLR
303 link_report(dev);
304 }
305}
306
1e882025
DLR
307/**
308 * t3_os_phymod_changed - handle PHY module changes
309 * @phy: the PHY reporting the module change
310 * @mod_type: new module type
311 *
312 * This is the OS-dependent handler for PHY module changes. It is
313 * invoked when a PHY module is removed or inserted for any OS-specific
314 * processing.
315 */
316void t3_os_phymod_changed(struct adapter *adap, int port_id)
317{
318 static const char *mod_str[] = {
319 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
320 };
321
322 const struct net_device *dev = adap->port[port_id];
323 const struct port_info *pi = netdev_priv(dev);
324
325 if (pi->phy.modtype == phy_modtype_none)
428ac43f 326 netdev_info(dev, "PHY module unplugged\n");
1e882025 327 else
428ac43f
JP
328 netdev_info(dev, "%s PHY module inserted\n",
329 mod_str[pi->phy.modtype]);
1e882025
DLR
330}
331
4d22de3e
DLR
332static void cxgb_set_rxmode(struct net_device *dev)
333{
4d22de3e
DLR
334 struct port_info *pi = netdev_priv(dev);
335
0988d269 336 t3_mac_set_rx_mode(&pi->mac, dev);
4d22de3e
DLR
337}
338
339/**
340 * link_start - enable a port
341 * @dev: the device to enable
342 *
343 * Performs the MAC and PHY actions needed to enable a port.
344 */
345static void link_start(struct net_device *dev)
346{
4d22de3e
DLR
347 struct port_info *pi = netdev_priv(dev);
348 struct cmac *mac = &pi->mac;
349
4d22de3e 350 t3_mac_reset(mac);
f14d42f3 351 t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
4d22de3e 352 t3_mac_set_mtu(mac, dev->mtu);
f14d42f3
KX
353 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
354 t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
0988d269 355 t3_mac_set_rx_mode(mac, dev);
4d22de3e
DLR
356 t3_link_start(&pi->phy, mac, &pi->link_config);
357 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
358}
359
360static inline void cxgb_disable_msi(struct adapter *adapter)
361{
362 if (adapter->flags & USING_MSIX) {
363 pci_disable_msix(adapter->pdev);
364 adapter->flags &= ~USING_MSIX;
365 } else if (adapter->flags & USING_MSI) {
366 pci_disable_msi(adapter->pdev);
367 adapter->flags &= ~USING_MSI;
368 }
369}
370
371/*
372 * Interrupt handler for asynchronous events used with MSI-X.
373 */
374static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
375{
376 t3_slow_intr_handler(cookie);
377 return IRQ_HANDLED;
378}
379
380/*
381 * Name the MSI-X interrupts.
382 */
383static void name_msix_vecs(struct adapter *adap)
384{
385 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
386
387 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
388 adap->msix_info[0].desc[n] = 0;
389
390 for_each_port(adap, j) {
391 struct net_device *d = adap->port[j];
392 const struct port_info *pi = netdev_priv(d);
393
394 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
395 snprintf(adap->msix_info[msi_idx].desc, n,
8c263761 396 "%s-%d", d->name, pi->first_qset + i);
4d22de3e
DLR
397 adap->msix_info[msi_idx].desc[n] = 0;
398 }
8c263761 399 }
4d22de3e
DLR
400}
401
402static int request_msix_data_irqs(struct adapter *adap)
403{
404 int i, j, err, qidx = 0;
405
406 for_each_port(adap, i) {
407 int nqsets = adap2pinfo(adap, i)->nqsets;
408
409 for (j = 0; j < nqsets; ++j) {
410 err = request_irq(adap->msix_info[qidx + 1].vec,
411 t3_intr_handler(adap,
412 adap->sge.qs[qidx].
413 rspq.polling), 0,
414 adap->msix_info[qidx + 1].desc,
415 &adap->sge.qs[qidx]);
416 if (err) {
417 while (--qidx >= 0)
418 free_irq(adap->msix_info[qidx + 1].vec,
419 &adap->sge.qs[qidx]);
420 return err;
421 }
422 qidx++;
423 }
424 }
425 return 0;
426}
427
8c263761
DLR
428static void free_irq_resources(struct adapter *adapter)
429{
430 if (adapter->flags & USING_MSIX) {
431 int i, n = 0;
432
433 free_irq(adapter->msix_info[0].vec, adapter);
434 for_each_port(adapter, i)
5cda9364 435 n += adap2pinfo(adapter, i)->nqsets;
8c263761
DLR
436
437 for (i = 0; i < n; ++i)
438 free_irq(adapter->msix_info[i + 1].vec,
439 &adapter->sge.qs[i]);
440 } else
441 free_irq(adapter->pdev->irq, adapter);
442}
443
b881955b
DLR
444static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
445 unsigned long n)
446{
e95ef5d3 447 int attempts = 10;
b881955b
DLR
448
449 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
450 if (!--attempts)
451 return -ETIMEDOUT;
452 msleep(10);
453 }
454 return 0;
455}
456
457static int init_tp_parity(struct adapter *adap)
458{
459 int i;
460 struct sk_buff *skb;
461 struct cpl_set_tcb_field *greq;
462 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
463
464 t3_tp_set_offload_mode(adap, 1);
465
466 for (i = 0; i < 16; i++) {
467 struct cpl_smt_write_req *req;
468
74b793e1
DLR
469 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
470 if (!skb)
471 skb = adap->nofail_skb;
472 if (!skb)
473 goto alloc_skb_fail;
474
de77b966 475 req = __skb_put_zero(skb, sizeof(*req));
b881955b
DLR
476 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
477 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
dce7d1d0 478 req->mtu_idx = NMTUS - 1;
b881955b
DLR
479 req->iff = i;
480 t3_mgmt_tx(adap, skb);
74b793e1
DLR
481 if (skb == adap->nofail_skb) {
482 await_mgmt_replies(adap, cnt, i + 1);
483 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
484 if (!adap->nofail_skb)
485 goto alloc_skb_fail;
486 }
b881955b
DLR
487 }
488
489 for (i = 0; i < 2048; i++) {
490 struct cpl_l2t_write_req *req;
491
74b793e1
DLR
492 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
493 if (!skb)
494 skb = adap->nofail_skb;
495 if (!skb)
496 goto alloc_skb_fail;
497
de77b966 498 req = __skb_put_zero(skb, sizeof(*req));
b881955b
DLR
499 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
500 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
501 req->params = htonl(V_L2T_W_IDX(i));
502 t3_mgmt_tx(adap, skb);
74b793e1
DLR
503 if (skb == adap->nofail_skb) {
504 await_mgmt_replies(adap, cnt, 16 + i + 1);
505 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
506 if (!adap->nofail_skb)
507 goto alloc_skb_fail;
508 }
b881955b
DLR
509 }
510
511 for (i = 0; i < 2048; i++) {
512 struct cpl_rte_write_req *req;
513
74b793e1
DLR
514 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
515 if (!skb)
516 skb = adap->nofail_skb;
517 if (!skb)
518 goto alloc_skb_fail;
519
de77b966 520 req = __skb_put_zero(skb, sizeof(*req));
b881955b
DLR
521 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
522 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
523 req->l2t_idx = htonl(V_L2T_W_IDX(i));
524 t3_mgmt_tx(adap, skb);
74b793e1
DLR
525 if (skb == adap->nofail_skb) {
526 await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
527 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
528 if (!adap->nofail_skb)
529 goto alloc_skb_fail;
530 }
b881955b
DLR
531 }
532
74b793e1
DLR
533 skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
534 if (!skb)
535 skb = adap->nofail_skb;
536 if (!skb)
537 goto alloc_skb_fail;
538
de77b966 539 greq = __skb_put_zero(skb, sizeof(*greq));
b881955b
DLR
540 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
541 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
542 greq->mask = cpu_to_be64(1);
543 t3_mgmt_tx(adap, skb);
544
545 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
74b793e1
DLR
546 if (skb == adap->nofail_skb) {
547 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
548 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
549 }
550
b881955b
DLR
551 t3_tp_set_offload_mode(adap, 0);
552 return i;
74b793e1
DLR
553
554alloc_skb_fail:
555 t3_tp_set_offload_mode(adap, 0);
556 return -ENOMEM;
b881955b
DLR
557}
558
4d22de3e
DLR
559/**
560 * setup_rss - configure RSS
561 * @adap: the adapter
562 *
563 * Sets up RSS to distribute packets to multiple receive queues. We
564 * configure the RSS CPU lookup table to distribute to the number of HW
565 * receive queues, and the response queue lookup table to narrow that
566 * down to the response queues actually configured for each port.
567 * We always configure the RSS mapping for two ports since the mapping
568 * table has plenty of entries.
569 */
570static void setup_rss(struct adapter *adap)
571{
572 int i;
573 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
574 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
575 u8 cpus[SGE_QSETS + 1];
0b86a2a1 576 u16 rspq_map[RSS_TABLE_SIZE + 1];
4d22de3e
DLR
577
578 for (i = 0; i < SGE_QSETS; ++i)
579 cpus[i] = i;
580 cpus[SGE_QSETS] = 0xff; /* terminator */
581
582 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
583 rspq_map[i] = i % nq0;
584 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
585 }
0b86a2a1 586 rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
4d22de3e
DLR
587
588 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
589 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
a2604be5 590 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
4d22de3e
DLR
591}
592
e998f245
SW
593static void ring_dbs(struct adapter *adap)
594{
595 int i, j;
596
597 for (i = 0; i < SGE_QSETS; i++) {
598 struct sge_qset *qs = &adap->sge.qs[i];
599
600 if (qs->adap)
601 for (j = 0; j < SGE_TXQ_PER_SET; j++)
602 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
603 }
604}
605
bea3348e 606static void init_napi(struct adapter *adap)
4d22de3e 607{
bea3348e 608 int i;
4d22de3e 609
bea3348e
SH
610 for (i = 0; i < SGE_QSETS; i++) {
611 struct sge_qset *qs = &adap->sge.qs[i];
4d22de3e 612
bea3348e
SH
613 if (qs->adap)
614 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
615 64);
4d22de3e 616 }
48c4b6db
DLR
617
618 /*
619 * netif_napi_add() can be called only once per napi_struct because it
620 * adds each new napi_struct to a list. Be careful not to call it a
621 * second time, e.g., during EEH recovery, by making a note of it.
622 */
623 adap->flags |= NAPI_INIT;
4d22de3e
DLR
624}
625
626/*
627 * Wait until all NAPI handlers are descheduled. This includes the handlers of
628 * both netdevices representing interfaces and the dummy ones for the extra
629 * queues.
630 */
631static void quiesce_rx(struct adapter *adap)
632{
633 int i;
4d22de3e 634
bea3348e
SH
635 for (i = 0; i < SGE_QSETS; i++)
636 if (adap->sge.qs[i].adap)
637 napi_disable(&adap->sge.qs[i].napi);
638}
4d22de3e 639
bea3348e
SH
640static void enable_all_napi(struct adapter *adap)
641{
642 int i;
643 for (i = 0; i < SGE_QSETS; i++)
644 if (adap->sge.qs[i].adap)
645 napi_enable(&adap->sge.qs[i].napi);
4d22de3e
DLR
646}
647
648/**
649 * setup_sge_qsets - configure SGE Tx/Rx/response queues
650 * @adap: the adapter
651 *
652 * Determines how many sets of SGE queues to use and initializes them.
653 * We support multiple queue sets per port if we have MSI-X, otherwise
654 * just one queue set per port.
655 */
656static int setup_sge_qsets(struct adapter *adap)
657{
bea3348e 658 int i, j, err, irq_idx = 0, qset_idx = 0;
8ac3ba68 659 unsigned int ntxq = SGE_TXQ_PER_SET;
4d22de3e
DLR
660
661 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
662 irq_idx = -1;
663
664 for_each_port(adap, i) {
665 struct net_device *dev = adap->port[i];
bea3348e 666 struct port_info *pi = netdev_priv(dev);
4d22de3e 667
bea3348e 668 pi->qs = &adap->sge.qs[pi->first_qset];
e594e96e 669 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
4d22de3e
DLR
670 err = t3_sge_alloc_qset(adap, qset_idx, 1,
671 (adap->flags & USING_MSIX) ? qset_idx + 1 :
672 irq_idx,
82ad3329
DLR
673 &adap->params.sge.qset[qset_idx], ntxq, dev,
674 netdev_get_tx_queue(dev, j));
4d22de3e
DLR
675 if (err) {
676 t3_free_sge_resources(adap);
677 return err;
678 }
679 }
680 }
681
682 return 0;
683}
684
3e5192ee 685static ssize_t attr_show(struct device *d, char *buf,
896392ef 686 ssize_t(*format) (struct net_device *, char *))
4d22de3e
DLR
687{
688 ssize_t len;
4d22de3e
DLR
689
690 /* Synchronize with ioctls that may shut down the device */
691 rtnl_lock();
896392ef 692 len = (*format) (to_net_dev(d), buf);
4d22de3e
DLR
693 rtnl_unlock();
694 return len;
695}
696
3e5192ee 697static ssize_t attr_store(struct device *d,
0ee8d33c 698 const char *buf, size_t len,
896392ef 699 ssize_t(*set) (struct net_device *, unsigned int),
4d22de3e
DLR
700 unsigned int min_val, unsigned int max_val)
701{
4d22de3e
DLR
702 ssize_t ret;
703 unsigned int val;
4d22de3e
DLR
704
705 if (!capable(CAP_NET_ADMIN))
706 return -EPERM;
707
e72c932d
LC
708 ret = kstrtouint(buf, 0, &val);
709 if (ret)
710 return ret;
711 if (val < min_val || val > max_val)
4d22de3e
DLR
712 return -EINVAL;
713
714 rtnl_lock();
896392ef 715 ret = (*set) (to_net_dev(d), val);
4d22de3e
DLR
716 if (!ret)
717 ret = len;
718 rtnl_unlock();
719 return ret;
720}
721
722#define CXGB3_SHOW(name, val_expr) \
896392ef 723static ssize_t format_##name(struct net_device *dev, char *buf) \
4d22de3e 724{ \
5fbf816f
DLR
725 struct port_info *pi = netdev_priv(dev); \
726 struct adapter *adap = pi->adapter; \
4d22de3e
DLR
727 return sprintf(buf, "%u\n", val_expr); \
728} \
0ee8d33c
DLR
729static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
730 char *buf) \
4d22de3e 731{ \
3e5192ee 732 return attr_show(d, buf, format_##name); \
4d22de3e
DLR
733}
734
896392ef 735static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
4d22de3e 736{
5fbf816f
DLR
737 struct port_info *pi = netdev_priv(dev);
738 struct adapter *adap = pi->adapter;
9f238486 739 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
896392ef 740
4d22de3e
DLR
741 if (adap->flags & FULL_INIT_DONE)
742 return -EBUSY;
743 if (val && adap->params.rev == 0)
744 return -EINVAL;
9f238486
DLR
745 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
746 min_tids)
4d22de3e
DLR
747 return -EINVAL;
748 adap->params.mc5.nfilters = val;
749 return 0;
750}
751
0ee8d33c
DLR
752static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
753 const char *buf, size_t len)
4d22de3e 754{
3e5192ee 755 return attr_store(d, buf, len, set_nfilters, 0, ~0);
4d22de3e
DLR
756}
757
896392ef 758static ssize_t set_nservers(struct net_device *dev, unsigned int val)
4d22de3e 759{
5fbf816f
DLR
760 struct port_info *pi = netdev_priv(dev);
761 struct adapter *adap = pi->adapter;
896392ef 762
4d22de3e
DLR
763 if (adap->flags & FULL_INIT_DONE)
764 return -EBUSY;
9f238486
DLR
765 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
766 MC5_MIN_TIDS)
4d22de3e
DLR
767 return -EINVAL;
768 adap->params.mc5.nservers = val;
769 return 0;
770}
771
0ee8d33c
DLR
772static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
773 const char *buf, size_t len)
4d22de3e 774{
3e5192ee 775 return attr_store(d, buf, len, set_nservers, 0, ~0);
4d22de3e
DLR
776}
777
778#define CXGB3_ATTR_R(name, val_expr) \
779CXGB3_SHOW(name, val_expr) \
0ee8d33c 780static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
4d22de3e
DLR
781
782#define CXGB3_ATTR_RW(name, val_expr, store_method) \
783CXGB3_SHOW(name, val_expr) \
0ee8d33c 784static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
4d22de3e
DLR
785
786CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
787CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
788CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
789
790static struct attribute *cxgb3_attrs[] = {
0ee8d33c
DLR
791 &dev_attr_cam_size.attr,
792 &dev_attr_nfilters.attr,
793 &dev_attr_nservers.attr,
4d22de3e
DLR
794 NULL
795};
796
98dc8373
AY
797static const struct attribute_group cxgb3_attr_group = {
798 .attrs = cxgb3_attrs,
799};
4d22de3e 800
3e5192ee 801static ssize_t tm_attr_show(struct device *d,
0ee8d33c 802 char *buf, int sched)
4d22de3e 803{
5fbf816f
DLR
804 struct port_info *pi = netdev_priv(to_net_dev(d));
805 struct adapter *adap = pi->adapter;
4d22de3e 806 unsigned int v, addr, bpt, cpt;
5fbf816f 807 ssize_t len;
4d22de3e
DLR
808
809 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
810 rtnl_lock();
811 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
812 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
813 if (sched & 1)
814 v >>= 16;
815 bpt = (v >> 8) & 0xff;
816 cpt = v & 0xff;
817 if (!cpt)
818 len = sprintf(buf, "disabled\n");
819 else {
820 v = (adap->params.vpd.cclk * 1000) / cpt;
821 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
822 }
823 rtnl_unlock();
824 return len;
825}
826
3e5192ee 827static ssize_t tm_attr_store(struct device *d,
0ee8d33c 828 const char *buf, size_t len, int sched)
4d22de3e 829{
5fbf816f
DLR
830 struct port_info *pi = netdev_priv(to_net_dev(d));
831 struct adapter *adap = pi->adapter;
832 unsigned int val;
4d22de3e 833 ssize_t ret;
4d22de3e
DLR
834
835 if (!capable(CAP_NET_ADMIN))
836 return -EPERM;
837
e72c932d
LC
838 ret = kstrtouint(buf, 0, &val);
839 if (ret)
840 return ret;
841 if (val > 10000000)
4d22de3e
DLR
842 return -EINVAL;
843
844 rtnl_lock();
845 ret = t3_config_sched(adap, val, sched);
846 if (!ret)
847 ret = len;
848 rtnl_unlock();
849 return ret;
850}
851
852#define TM_ATTR(name, sched) \
0ee8d33c
DLR
853static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
854 char *buf) \
4d22de3e 855{ \
3e5192ee 856 return tm_attr_show(d, buf, sched); \
4d22de3e 857} \
0ee8d33c
DLR
858static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
859 const char *buf, size_t len) \
4d22de3e 860{ \
3e5192ee 861 return tm_attr_store(d, buf, len, sched); \
4d22de3e 862} \
0ee8d33c 863static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
4d22de3e
DLR
864
865TM_ATTR(sched0, 0);
866TM_ATTR(sched1, 1);
867TM_ATTR(sched2, 2);
868TM_ATTR(sched3, 3);
869TM_ATTR(sched4, 4);
870TM_ATTR(sched5, 5);
871TM_ATTR(sched6, 6);
872TM_ATTR(sched7, 7);
873
874static struct attribute *offload_attrs[] = {
0ee8d33c
DLR
875 &dev_attr_sched0.attr,
876 &dev_attr_sched1.attr,
877 &dev_attr_sched2.attr,
878 &dev_attr_sched3.attr,
879 &dev_attr_sched4.attr,
880 &dev_attr_sched5.attr,
881 &dev_attr_sched6.attr,
882 &dev_attr_sched7.attr,
4d22de3e
DLR
883 NULL
884};
885
98dc8373
AY
886static const struct attribute_group offload_attr_group = {
887 .attrs = offload_attrs,
888};
4d22de3e
DLR
889
890/*
891 * Sends an sk_buff to an offload queue driver
892 * after dealing with any active network taps.
893 */
894static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
895{
896 int ret;
897
898 local_bh_disable();
899 ret = t3_offload_tx(tdev, skb);
900 local_bh_enable();
901 return ret;
902}
903
904static int write_smt_entry(struct adapter *adapter, int idx)
905{
906 struct cpl_smt_write_req *req;
f14d42f3 907 struct port_info *pi = netdev_priv(adapter->port[idx]);
4d22de3e
DLR
908 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
909
910 if (!skb)
911 return -ENOMEM;
912
4df864c1 913 req = __skb_put(skb, sizeof(*req));
4d22de3e
DLR
914 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
915 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
916 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
917 req->iff = idx;
4d22de3e 918 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
f14d42f3 919 memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
4d22de3e
DLR
920 skb->priority = 1;
921 offload_tx(&adapter->tdev, skb);
922 return 0;
923}
924
925static int init_smt(struct adapter *adapter)
926{
927 int i;
928
929 for_each_port(adapter, i)
930 write_smt_entry(adapter, i);
931 return 0;
932}
933
934static void init_port_mtus(struct adapter *adapter)
935{
936 unsigned int mtus = adapter->port[0]->mtu;
937
938 if (adapter->port[1])
939 mtus |= adapter->port[1]->mtu << 16;
940 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
941}
942
8c263761 943static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
14ab9892
DLR
944 int hi, int port)
945{
946 struct sk_buff *skb;
947 struct mngt_pktsched_wr *req;
8c263761 948 int ret;
14ab9892 949
74b793e1
DLR
950 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
951 if (!skb)
952 skb = adap->nofail_skb;
953 if (!skb)
954 return -ENOMEM;
955
4df864c1 956 req = skb_put(skb, sizeof(*req));
14ab9892
DLR
957 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
958 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
959 req->sched = sched;
960 req->idx = qidx;
961 req->min = lo;
962 req->max = hi;
963 req->binding = port;
8c263761 964 ret = t3_mgmt_tx(adap, skb);
74b793e1
DLR
965 if (skb == adap->nofail_skb) {
966 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
967 GFP_KERNEL);
968 if (!adap->nofail_skb)
969 ret = -ENOMEM;
970 }
8c263761
DLR
971
972 return ret;
14ab9892
DLR
973}
974
8c263761 975static int bind_qsets(struct adapter *adap)
14ab9892 976{
8c263761 977 int i, j, err = 0;
14ab9892
DLR
978
979 for_each_port(adap, i) {
980 const struct port_info *pi = adap2pinfo(adap, i);
981
8c263761
DLR
982 for (j = 0; j < pi->nqsets; ++j) {
983 int ret = send_pktsched_cmd(adap, 1,
984 pi->first_qset + j, -1,
985 -1, i);
986 if (ret)
987 err = ret;
988 }
14ab9892 989 }
8c263761
DLR
990
991 return err;
14ab9892
DLR
992}
993
34336ec0
BH
994#define FW_VERSION __stringify(FW_VERSION_MAJOR) "." \
995 __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
996#define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
997#define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "." \
998 __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
999#define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
2e8c07c3
DLR
1000#define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
1001#define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
9450526a 1002#define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
34336ec0
BH
1003MODULE_FIRMWARE(FW_FNAME);
1004MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
1005MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
1006MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
1007MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
1008MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
2e8c07c3
DLR
1009
1010static inline const char *get_edc_fw_name(int edc_idx)
1011{
1012 const char *fw_name = NULL;
1013
1014 switch (edc_idx) {
1015 case EDC_OPT_AEL2005:
1016 fw_name = AEL2005_OPT_EDC_NAME;
1017 break;
1018 case EDC_TWX_AEL2005:
1019 fw_name = AEL2005_TWX_EDC_NAME;
1020 break;
1021 case EDC_TWX_AEL2020:
1022 fw_name = AEL2020_TWX_EDC_NAME;
1023 break;
1024 }
1025 return fw_name;
1026}
1027
1028int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1029{
1030 struct adapter *adapter = phy->adapter;
1031 const struct firmware *fw;
92a486ca 1032 const char *fw_name;
2e8c07c3
DLR
1033 u32 csum;
1034 const __be32 *p;
1035 u16 *cache = phy->phy_cache;
92a486ca 1036 int i, ret = -EINVAL;
2e8c07c3 1037
92a486ca
KC
1038 fw_name = get_edc_fw_name(edc_idx);
1039 if (fw_name)
1040 ret = request_firmware(&fw, fw_name, &adapter->pdev->dev);
2e8c07c3
DLR
1041 if (ret < 0) {
1042 dev_err(&adapter->pdev->dev,
1043 "could not upgrade firmware: unable to load %s\n",
92a486ca 1044 fw_name);
2e8c07c3
DLR
1045 return ret;
1046 }
1047
1048 /* check size, take checksum in account */
1049 if (fw->size > size + 4) {
1050 CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1051 (unsigned int)fw->size, size + 4);
1052 ret = -EINVAL;
1053 }
1054
1055 /* compute checksum */
1056 p = (const __be32 *)fw->data;
1057 for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1058 csum += ntohl(p[i]);
1059
1060 if (csum != 0xffffffff) {
1061 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1062 csum);
1063 ret = -EINVAL;
1064 }
1065
1066 for (i = 0; i < size / 4 ; i++) {
1067 *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1068 *cache++ = be32_to_cpu(p[i]) & 0xffff;
1069 }
1070
1071 release_firmware(fw);
1072
1073 return ret;
1074}
2e283962
DLR
1075
1076static int upgrade_fw(struct adapter *adap)
1077{
1078 int ret;
2e283962
DLR
1079 const struct firmware *fw;
1080 struct device *dev = &adap->pdev->dev;
1081
34336ec0 1082 ret = request_firmware(&fw, FW_FNAME, dev);
2e283962
DLR
1083 if (ret < 0) {
1084 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
34336ec0 1085 FW_FNAME);
2e283962
DLR
1086 return ret;
1087 }
1088 ret = t3_load_fw(adap, fw->data, fw->size);
1089 release_firmware(fw);
47330077
DLR
1090
1091 if (ret == 0)
1092 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1093 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1094 else
1095 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1096 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
2eab17ab 1097
47330077
DLR
1098 return ret;
1099}
1100
1101static inline char t3rev2char(struct adapter *adapter)
1102{
1103 char rev = 0;
1104
1105 switch(adapter->params.rev) {
1106 case T3_REV_B:
1107 case T3_REV_B2:
1108 rev = 'b';
1109 break;
1aafee26
DLR
1110 case T3_REV_C:
1111 rev = 'c';
1112 break;
47330077
DLR
1113 }
1114 return rev;
1115}
1116
9265fabf 1117static int update_tpsram(struct adapter *adap)
47330077
DLR
1118{
1119 const struct firmware *tpsram;
1120 char buf[64];
1121 struct device *dev = &adap->pdev->dev;
1122 int ret;
1123 char rev;
2eab17ab 1124
47330077
DLR
1125 rev = t3rev2char(adap);
1126 if (!rev)
1127 return 0;
1128
34336ec0 1129 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
47330077
DLR
1130
1131 ret = request_firmware(&tpsram, buf, dev);
1132 if (ret < 0) {
1133 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1134 buf);
1135 return ret;
1136 }
2eab17ab 1137
47330077
DLR
1138 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1139 if (ret)
2eab17ab 1140 goto release_tpsram;
47330077
DLR
1141
1142 ret = t3_set_proto_sram(adap, tpsram->data);
1143 if (ret == 0)
1144 dev_info(dev,
1145 "successful update of protocol engine "
1146 "to %d.%d.%d\n",
1147 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1148 else
1149 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1150 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1151 if (ret)
1152 dev_err(dev, "loading protocol SRAM failed\n");
1153
1154release_tpsram:
1155 release_firmware(tpsram);
2eab17ab 1156
2e283962
DLR
1157 return ret;
1158}
1159
60158e64
RD
1160/**
1161 * t3_synchronize_rx - wait for current Rx processing on a port to complete
1162 * @adap: the adapter
1163 * @p: the port
1164 *
1165 * Ensures that current Rx processing on any of the queues associated with
1166 * the given port completes before returning. We do this by acquiring and
1167 * releasing the locks of the response queues associated with the port.
1168 */
1169static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1170{
1171 int i;
1172
1173 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1174 struct sge_rspq *q = &adap->sge.qs[i].rspq;
1175
1176 spin_lock_irq(&q->lock);
1177 spin_unlock_irq(&q->lock);
1178 }
1179}
1180
1181static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
1182{
1183 struct port_info *pi = netdev_priv(dev);
1184 struct adapter *adapter = pi->adapter;
1185
1186 if (adapter->params.rev > 0) {
1187 t3_set_vlan_accel(adapter, 1 << pi->port_id,
f646968f 1188 features & NETIF_F_HW_VLAN_CTAG_RX);
60158e64
RD
1189 } else {
1190 /* single control for all ports */
f646968f 1191 unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
60158e64
RD
1192
1193 for_each_port(adapter, i)
1194 have_vlans |=
f646968f
PM
1195 adapter->port[i]->features &
1196 NETIF_F_HW_VLAN_CTAG_RX;
60158e64
RD
1197
1198 t3_set_vlan_accel(adapter, 1, have_vlans);
1199 }
1200 t3_synchronize_rx(adapter, pi);
1201}
1202
4d22de3e
DLR
1203/**
1204 * cxgb_up - enable the adapter
1205 * @adapter: adapter being enabled
1206 *
1207 * Called when the first port is enabled, this function performs the
1208 * actions necessary to make an adapter operational, such as completing
1209 * the initialization of HW modules, and enabling interrupts.
1210 *
1211 * Must be called with the rtnl lock held.
1212 */
1213static int cxgb_up(struct adapter *adap)
1214{
60158e64 1215 int i, err;
4d22de3e
DLR
1216
1217 if (!(adap->flags & FULL_INIT_DONE)) {
8207befa 1218 err = t3_check_fw_version(adap);
a5a3b460 1219 if (err == -EINVAL) {
2e283962 1220 err = upgrade_fw(adap);
8207befa
DLR
1221 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1222 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1223 FW_VERSION_MICRO, err ? "failed" : "succeeded");
a5a3b460 1224 }
4d22de3e 1225
8207befa 1226 err = t3_check_tpsram_version(adap);
47330077
DLR
1227 if (err == -EINVAL) {
1228 err = update_tpsram(adap);
8207befa
DLR
1229 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1230 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1231 TP_VERSION_MICRO, err ? "failed" : "succeeded");
47330077
DLR
1232 }
1233
20d3fc11
DLR
1234 /*
1235 * Clear interrupts now to catch errors if t3_init_hw fails.
1236 * We clear them again later as initialization may trigger
1237 * conditions that can interrupt.
1238 */
1239 t3_intr_clear(adap);
1240
4d22de3e
DLR
1241 err = t3_init_hw(adap, 0);
1242 if (err)
1243 goto out;
1244
b881955b 1245 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
6cdbd77e 1246 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
bea3348e 1247
4d22de3e
DLR
1248 err = setup_sge_qsets(adap);
1249 if (err)
1250 goto out;
1251
60158e64
RD
1252 for_each_port(adap, i)
1253 cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
1254
4d22de3e 1255 setup_rss(adap);
48c4b6db
DLR
1256 if (!(adap->flags & NAPI_INIT))
1257 init_napi(adap);
31563789
DLR
1258
1259 t3_start_sge_timers(adap);
4d22de3e
DLR
1260 adap->flags |= FULL_INIT_DONE;
1261 }
1262
1263 t3_intr_clear(adap);
1264
1265 if (adap->flags & USING_MSIX) {
1266 name_msix_vecs(adap);
1267 err = request_irq(adap->msix_info[0].vec,
1268 t3_async_intr_handler, 0,
1269 adap->msix_info[0].desc, adap);
1270 if (err)
1271 goto irq_err;
1272
42256f57
DLR
1273 err = request_msix_data_irqs(adap);
1274 if (err) {
4d22de3e
DLR
1275 free_irq(adap->msix_info[0].vec, adap);
1276 goto irq_err;
1277 }
1278 } else if ((err = request_irq(adap->pdev->irq,
1279 t3_intr_handler(adap,
1280 adap->sge.qs[0].rspq.
1281 polling),
2db6346f
TG
1282 (adap->flags & USING_MSI) ?
1283 0 : IRQF_SHARED,
4d22de3e
DLR
1284 adap->name, adap)))
1285 goto irq_err;
1286
bea3348e 1287 enable_all_napi(adap);
4d22de3e
DLR
1288 t3_sge_start(adap);
1289 t3_intr_enable(adap);
14ab9892 1290
b881955b
DLR
1291 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1292 is_offload(adap) && init_tp_parity(adap) == 0)
1293 adap->flags |= TP_PARITY_INIT;
1294
1295 if (adap->flags & TP_PARITY_INIT) {
1296 t3_write_reg(adap, A_TP_INT_CAUSE,
1297 F_CMCACHEPERR | F_ARPLUTPERR);
1298 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1299 }
1300
8c263761 1301 if (!(adap->flags & QUEUES_BOUND)) {
18edc84c
DLR
1302 int ret = bind_qsets(adap);
1303
1304 if (ret < 0) {
1305 CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
8c263761
DLR
1306 t3_intr_disable(adap);
1307 free_irq_resources(adap);
18edc84c 1308 err = ret;
8c263761
DLR
1309 goto out;
1310 }
1311 adap->flags |= QUEUES_BOUND;
1312 }
14ab9892 1313
4d22de3e
DLR
1314out:
1315 return err;
1316irq_err:
1317 CH_ERR(adap, "request_irq failed, err %d\n", err);
1318 goto out;
1319}
1320
1321/*
1322 * Release resources when all the ports and offloading have been stopped.
1323 */
55bc3228 1324static void cxgb_down(struct adapter *adapter, int on_wq)
4d22de3e
DLR
1325{
1326 t3_sge_stop(adapter);
1327 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1328 t3_intr_disable(adapter);
1329 spin_unlock_irq(&adapter->work_lock);
1330
8c263761 1331 free_irq_resources(adapter);
4d22de3e 1332 quiesce_rx(adapter);
a6f018e3 1333 t3_sge_stop(adapter);
55bc3228
CL
1334 if (!on_wq)
1335 flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
4d22de3e
DLR
1336}
1337
1338static void schedule_chk_task(struct adapter *adap)
1339{
1340 unsigned int timeo;
1341
1342 timeo = adap->params.linkpoll_period ?
1343 (HZ * adap->params.linkpoll_period) / 10 :
1344 adap->params.stats_update_period * HZ;
1345 if (timeo)
1346 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1347}
1348
1349static int offload_open(struct net_device *dev)
1350{
5fbf816f
DLR
1351 struct port_info *pi = netdev_priv(dev);
1352 struct adapter *adapter = pi->adapter;
1353 struct t3cdev *tdev = dev2t3cdev(dev);
4d22de3e 1354 int adap_up = adapter->open_device_map & PORT_MASK;
c54f5c24 1355 int err;
4d22de3e
DLR
1356
1357 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1358 return 0;
1359
1360 if (!adap_up && (err = cxgb_up(adapter)) < 0)
48c4b6db 1361 goto out;
4d22de3e
DLR
1362
1363 t3_tp_set_offload_mode(adapter, 1);
1364 tdev->lldev = adapter->port[0];
1365 err = cxgb3_offload_activate(adapter);
1366 if (err)
1367 goto out;
1368
1369 init_port_mtus(adapter);
1370 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1371 adapter->params.b_wnd,
1372 adapter->params.rev == 0 ?
1373 adapter->port[0]->mtu : 0xffff);
1374 init_smt(adapter);
1375
d96a51f6
DN
1376 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1377 dev_dbg(&dev->dev, "cannot create sysfs group\n");
4d22de3e
DLR
1378
1379 /* Call back all registered clients */
1380 cxgb3_add_clients(tdev);
1381
1382out:
1383 /* restore them in case the offload module has changed them */
1384 if (err) {
1385 t3_tp_set_offload_mode(adapter, 0);
1386 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1387 cxgb3_set_dummy_ops(tdev);
1388 }
1389 return err;
1390}
1391
1392static int offload_close(struct t3cdev *tdev)
1393{
1394 struct adapter *adapter = tdev2adap(tdev);
23f333a2 1395 struct t3c_data *td = T3C_DATA(tdev);
4d22de3e
DLR
1396
1397 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1398 return 0;
1399
1400 /* Call back all registered clients */
1401 cxgb3_remove_clients(tdev);
1402
0ee8d33c 1403 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e 1404
c80b0c28 1405 /* Flush work scheduled while releasing TIDs */
43829731 1406 flush_work(&td->tid_release_task);
c80b0c28 1407
4d22de3e
DLR
1408 tdev->lldev = NULL;
1409 cxgb3_set_dummy_ops(tdev);
1410 t3_tp_set_offload_mode(adapter, 0);
1411 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1412
1413 if (!adapter->open_device_map)
55bc3228 1414 cxgb_down(adapter, 0);
4d22de3e
DLR
1415
1416 cxgb3_offload_deactivate(adapter);
1417 return 0;
1418}
1419
1420static int cxgb_open(struct net_device *dev)
1421{
4d22de3e 1422 struct port_info *pi = netdev_priv(dev);
5fbf816f 1423 struct adapter *adapter = pi->adapter;
4d22de3e 1424 int other_ports = adapter->open_device_map & PORT_MASK;
5fbf816f 1425 int err;
4d22de3e 1426
48c4b6db 1427 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
4d22de3e
DLR
1428 return err;
1429
1430 set_bit(pi->port_id, &adapter->open_device_map);
8ac3ba68 1431 if (is_offload(adapter) && !ofld_disable) {
4d22de3e
DLR
1432 err = offload_open(dev);
1433 if (err)
428ac43f 1434 pr_warn("Could not initialize offload capabilities\n");
4d22de3e
DLR
1435 }
1436
19221e75
BH
1437 netif_set_real_num_tx_queues(dev, pi->nqsets);
1438 err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1439 if (err)
1440 return err;
4d22de3e
DLR
1441 link_start(dev);
1442 t3_port_intr_enable(adapter, pi->port_id);
82ad3329 1443 netif_tx_start_all_queues(dev);
4d22de3e
DLR
1444 if (!other_ports)
1445 schedule_chk_task(adapter);
1446
fa0d4c11 1447 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
4d22de3e
DLR
1448 return 0;
1449}
1450
55bc3228 1451static int __cxgb_close(struct net_device *dev, int on_wq)
4d22de3e 1452{
5fbf816f
DLR
1453 struct port_info *pi = netdev_priv(dev);
1454 struct adapter *adapter = pi->adapter;
4d22de3e 1455
e8d19370
DLR
1456
1457 if (!adapter->open_device_map)
1458 return 0;
1459
bf792094
DLR
1460 /* Stop link fault interrupts */
1461 t3_xgm_intr_disable(adapter, pi->port_id);
1462 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1463
5fbf816f 1464 t3_port_intr_disable(adapter, pi->port_id);
82ad3329 1465 netif_tx_stop_all_queues(dev);
5fbf816f 1466 pi->phy.ops->power_down(&pi->phy, 1);
4d22de3e 1467 netif_carrier_off(dev);
5fbf816f 1468 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
4d22de3e 1469
20d3fc11 1470 spin_lock_irq(&adapter->work_lock); /* sync with update task */
5fbf816f 1471 clear_bit(pi->port_id, &adapter->open_device_map);
20d3fc11 1472 spin_unlock_irq(&adapter->work_lock);
4d22de3e
DLR
1473
1474 if (!(adapter->open_device_map & PORT_MASK))
c80b0c28 1475 cancel_delayed_work_sync(&adapter->adap_check_task);
4d22de3e
DLR
1476
1477 if (!adapter->open_device_map)
55bc3228 1478 cxgb_down(adapter, on_wq);
4d22de3e 1479
fa0d4c11 1480 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
4d22de3e
DLR
1481 return 0;
1482}
1483
55bc3228
CL
1484static int cxgb_close(struct net_device *dev)
1485{
1486 return __cxgb_close(dev, 0);
1487}
1488
4d22de3e
DLR
1489static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1490{
5fbf816f
DLR
1491 struct port_info *pi = netdev_priv(dev);
1492 struct adapter *adapter = pi->adapter;
a73be7fe 1493 struct net_device_stats *ns = &dev->stats;
4d22de3e
DLR
1494 const struct mac_stats *pstats;
1495
1496 spin_lock(&adapter->stats_lock);
5fbf816f 1497 pstats = t3_mac_update_stats(&pi->mac);
4d22de3e
DLR
1498 spin_unlock(&adapter->stats_lock);
1499
1500 ns->tx_bytes = pstats->tx_octets;
1501 ns->tx_packets = pstats->tx_frames;
1502 ns->rx_bytes = pstats->rx_octets;
1503 ns->rx_packets = pstats->rx_frames;
1504 ns->multicast = pstats->rx_mcast_frames;
1505
1506 ns->tx_errors = pstats->tx_underrun;
1507 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1508 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1509 pstats->rx_fifo_ovfl;
1510
1511 /* detailed rx_errors */
1512 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1513 ns->rx_over_errors = 0;
1514 ns->rx_crc_errors = pstats->rx_fcs_errs;
1515 ns->rx_frame_errors = pstats->rx_symbol_errs;
1516 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1517 ns->rx_missed_errors = pstats->rx_cong_drops;
1518
1519 /* detailed tx_errors */
1520 ns->tx_aborted_errors = 0;
1521 ns->tx_carrier_errors = 0;
1522 ns->tx_fifo_errors = pstats->tx_underrun;
1523 ns->tx_heartbeat_errors = 0;
1524 ns->tx_window_errors = 0;
1525 return ns;
1526}
1527
1528static u32 get_msglevel(struct net_device *dev)
1529{
5fbf816f
DLR
1530 struct port_info *pi = netdev_priv(dev);
1531 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1532
1533 return adapter->msg_enable;
1534}
1535
1536static void set_msglevel(struct net_device *dev, u32 val)
1537{
5fbf816f
DLR
1538 struct port_info *pi = netdev_priv(dev);
1539 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1540
1541 adapter->msg_enable = val;
1542}
1543
9ca683c6 1544static const char stats_strings[][ETH_GSTRING_LEN] = {
4d22de3e
DLR
1545 "TxOctetsOK ",
1546 "TxFramesOK ",
1547 "TxMulticastFramesOK",
1548 "TxBroadcastFramesOK",
1549 "TxPauseFrames ",
1550 "TxUnderrun ",
1551 "TxExtUnderrun ",
1552
1553 "TxFrames64 ",
1554 "TxFrames65To127 ",
1555 "TxFrames128To255 ",
1556 "TxFrames256To511 ",
1557 "TxFrames512To1023 ",
1558 "TxFrames1024To1518 ",
1559 "TxFrames1519ToMax ",
1560
1561 "RxOctetsOK ",
1562 "RxFramesOK ",
1563 "RxMulticastFramesOK",
1564 "RxBroadcastFramesOK",
1565 "RxPauseFrames ",
1566 "RxFCSErrors ",
1567 "RxSymbolErrors ",
1568 "RxShortErrors ",
1569 "RxJabberErrors ",
1570 "RxLengthErrors ",
1571 "RxFIFOoverflow ",
1572
1573 "RxFrames64 ",
1574 "RxFrames65To127 ",
1575 "RxFrames128To255 ",
1576 "RxFrames256To511 ",
1577 "RxFrames512To1023 ",
1578 "RxFrames1024To1518 ",
1579 "RxFrames1519ToMax ",
1580
1581 "PhyFIFOErrors ",
1582 "TSO ",
1583 "VLANextractions ",
1584 "VLANinsertions ",
1585 "TxCsumOffload ",
1586 "RxCsumGood ",
b47385bd
DLR
1587 "LroAggregated ",
1588 "LroFlushed ",
1589 "LroNoDesc ",
fc90664e
DLR
1590 "RxDrops ",
1591
1592 "CheckTXEnToggled ",
1593 "CheckResets ",
1594
bf792094 1595 "LinkFaults ",
4d22de3e
DLR
1596};
1597
b9f2c044 1598static int get_sset_count(struct net_device *dev, int sset)
4d22de3e 1599{
b9f2c044
JG
1600 switch (sset) {
1601 case ETH_SS_STATS:
1602 return ARRAY_SIZE(stats_strings);
1603 default:
1604 return -EOPNOTSUPP;
1605 }
4d22de3e
DLR
1606}
1607
1608#define T3_REGMAP_SIZE (3 * 1024)
1609
1610static int get_regs_len(struct net_device *dev)
1611{
1612 return T3_REGMAP_SIZE;
1613}
1614
1615static int get_eeprom_len(struct net_device *dev)
1616{
1617 return EEPROMSIZE;
1618}
1619
1620static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1621{
5fbf816f
DLR
1622 struct port_info *pi = netdev_priv(dev);
1623 struct adapter *adapter = pi->adapter;
4d22de3e 1624 u32 fw_vers = 0;
47330077 1625 u32 tp_vers = 0;
4d22de3e 1626
cf3760da 1627 spin_lock(&adapter->stats_lock);
4d22de3e 1628 t3_get_fw_version(adapter, &fw_vers);
47330077 1629 t3_get_tp_version(adapter, &tp_vers);
cf3760da 1630 spin_unlock(&adapter->stats_lock);
4d22de3e 1631
23020ab3
RJ
1632 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1633 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1634 strlcpy(info->bus_info, pci_name(adapter->pdev),
1635 sizeof(info->bus_info));
84b40501 1636 if (fw_vers)
4d22de3e 1637 snprintf(info->fw_version, sizeof(info->fw_version),
47330077 1638 "%s %u.%u.%u TP %u.%u.%u",
4aac3899
DLR
1639 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1640 G_FW_VERSION_MAJOR(fw_vers),
1641 G_FW_VERSION_MINOR(fw_vers),
47330077
DLR
1642 G_FW_VERSION_MICRO(fw_vers),
1643 G_TP_VERSION_MAJOR(tp_vers),
1644 G_TP_VERSION_MINOR(tp_vers),
1645 G_TP_VERSION_MICRO(tp_vers));
4d22de3e
DLR
1646}
1647
1648static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1649{
1650 if (stringset == ETH_SS_STATS)
1651 memcpy(data, stats_strings, sizeof(stats_strings));
1652}
1653
1654static unsigned long collect_sge_port_stats(struct adapter *adapter,
1655 struct port_info *p, int idx)
1656{
1657 int i;
1658 unsigned long tot = 0;
1659
8c263761
DLR
1660 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1661 tot += adapter->sge.qs[i].port_stats[idx];
4d22de3e
DLR
1662 return tot;
1663}
1664
1665static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1666 u64 *data)
1667{
4d22de3e 1668 struct port_info *pi = netdev_priv(dev);
5fbf816f 1669 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1670 const struct mac_stats *s;
1671
1672 spin_lock(&adapter->stats_lock);
1673 s = t3_mac_update_stats(&pi->mac);
1674 spin_unlock(&adapter->stats_lock);
1675
1676 *data++ = s->tx_octets;
1677 *data++ = s->tx_frames;
1678 *data++ = s->tx_mcast_frames;
1679 *data++ = s->tx_bcast_frames;
1680 *data++ = s->tx_pause;
1681 *data++ = s->tx_underrun;
1682 *data++ = s->tx_fifo_urun;
1683
1684 *data++ = s->tx_frames_64;
1685 *data++ = s->tx_frames_65_127;
1686 *data++ = s->tx_frames_128_255;
1687 *data++ = s->tx_frames_256_511;
1688 *data++ = s->tx_frames_512_1023;
1689 *data++ = s->tx_frames_1024_1518;
1690 *data++ = s->tx_frames_1519_max;
1691
1692 *data++ = s->rx_octets;
1693 *data++ = s->rx_frames;
1694 *data++ = s->rx_mcast_frames;
1695 *data++ = s->rx_bcast_frames;
1696 *data++ = s->rx_pause;
1697 *data++ = s->rx_fcs_errs;
1698 *data++ = s->rx_symbol_errs;
1699 *data++ = s->rx_short;
1700 *data++ = s->rx_jabber;
1701 *data++ = s->rx_too_long;
1702 *data++ = s->rx_fifo_ovfl;
1703
1704 *data++ = s->rx_frames_64;
1705 *data++ = s->rx_frames_65_127;
1706 *data++ = s->rx_frames_128_255;
1707 *data++ = s->rx_frames_256_511;
1708 *data++ = s->rx_frames_512_1023;
1709 *data++ = s->rx_frames_1024_1518;
1710 *data++ = s->rx_frames_1519_max;
1711
1712 *data++ = pi->phy.fifo_errors;
1713
1714 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1715 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1716 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1717 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1718 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
7be2df45
HX
1719 *data++ = 0;
1720 *data++ = 0;
1721 *data++ = 0;
4d22de3e 1722 *data++ = s->rx_cong_drops;
fc90664e
DLR
1723
1724 *data++ = s->num_toggled;
1725 *data++ = s->num_resets;
bf792094
DLR
1726
1727 *data++ = s->link_faults;
4d22de3e
DLR
1728}
1729
1730static inline void reg_block_dump(struct adapter *ap, void *buf,
1731 unsigned int start, unsigned int end)
1732{
1733 u32 *p = buf + start;
1734
1735 for (; start <= end; start += sizeof(u32))
1736 *p++ = t3_read_reg(ap, start);
1737}
1738
1739static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1740 void *buf)
1741{
5fbf816f
DLR
1742 struct port_info *pi = netdev_priv(dev);
1743 struct adapter *ap = pi->adapter;
4d22de3e
DLR
1744
1745 /*
1746 * Version scheme:
1747 * bits 0..9: chip version
1748 * bits 10..15: chip revision
1749 * bit 31: set for PCIe cards
1750 */
1751 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1752
1753 /*
1754 * We skip the MAC statistics registers because they are clear-on-read.
1755 * Also reading multi-register stats would need to synchronize with the
1756 * periodic mac stats accumulation. Hard to justify the complexity.
1757 */
1758 memset(buf, 0, T3_REGMAP_SIZE);
1759 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1760 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1761 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1762 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1763 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1764 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1765 XGM_REG(A_XGM_SERDES_STAT3, 1));
1766 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1767 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1768}
1769
1770static int restart_autoneg(struct net_device *dev)
1771{
1772 struct port_info *p = netdev_priv(dev);
1773
1774 if (!netif_running(dev))
1775 return -EAGAIN;
1776 if (p->link_config.autoneg != AUTONEG_ENABLE)
1777 return -EINVAL;
1778 p->phy.ops->autoneg_restart(&p->phy);
1779 return 0;
1780}
1781
12fcf941 1782static int set_phys_id(struct net_device *dev,
1783 enum ethtool_phys_id_state state)
4d22de3e 1784{
5fbf816f
DLR
1785 struct port_info *pi = netdev_priv(dev);
1786 struct adapter *adapter = pi->adapter;
4d22de3e 1787
12fcf941 1788 switch (state) {
1789 case ETHTOOL_ID_ACTIVE:
fce55922 1790 return 1; /* cycle on/off once per second */
12fcf941 1791
1792 case ETHTOOL_ID_OFF:
1793 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1794 break;
4d22de3e 1795
12fcf941 1796 case ETHTOOL_ID_ON:
1797 case ETHTOOL_ID_INACTIVE:
4d22de3e 1798 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
4d22de3e 1799 F_GPIO0_OUT_VAL);
12fcf941 1800 }
1801
4d22de3e
DLR
1802 return 0;
1803}
1804
b7b44fd2
PR
1805static int get_link_ksettings(struct net_device *dev,
1806 struct ethtool_link_ksettings *cmd)
4d22de3e
DLR
1807{
1808 struct port_info *p = netdev_priv(dev);
b7b44fd2 1809 u32 supported;
4d22de3e 1810
b7b44fd2
PR
1811 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1812 p->link_config.supported);
1813 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1814 p->link_config.advertising);
4d22de3e
DLR
1815
1816 if (netif_carrier_ok(dev)) {
b7b44fd2
PR
1817 cmd->base.speed = p->link_config.speed;
1818 cmd->base.duplex = p->link_config.duplex;
4d22de3e 1819 } else {
b7b44fd2
PR
1820 cmd->base.speed = SPEED_UNKNOWN;
1821 cmd->base.duplex = DUPLEX_UNKNOWN;
4d22de3e
DLR
1822 }
1823
b7b44fd2
PR
1824 ethtool_convert_link_mode_to_legacy_u32(&supported,
1825 cmd->link_modes.supported);
1826
1827 cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1828 cmd->base.phy_address = p->phy.mdio.prtad;
1829 cmd->base.autoneg = p->link_config.autoneg;
4d22de3e
DLR
1830 return 0;
1831}
1832
1833static int speed_duplex_to_caps(int speed, int duplex)
1834{
1835 int cap = 0;
1836
1837 switch (speed) {
1838 case SPEED_10:
1839 if (duplex == DUPLEX_FULL)
1840 cap = SUPPORTED_10baseT_Full;
1841 else
1842 cap = SUPPORTED_10baseT_Half;
1843 break;
1844 case SPEED_100:
1845 if (duplex == DUPLEX_FULL)
1846 cap = SUPPORTED_100baseT_Full;
1847 else
1848 cap = SUPPORTED_100baseT_Half;
1849 break;
1850 case SPEED_1000:
1851 if (duplex == DUPLEX_FULL)
1852 cap = SUPPORTED_1000baseT_Full;
1853 else
1854 cap = SUPPORTED_1000baseT_Half;
1855 break;
1856 case SPEED_10000:
1857 if (duplex == DUPLEX_FULL)
1858 cap = SUPPORTED_10000baseT_Full;
1859 }
1860 return cap;
1861}
1862
1863#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1864 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1865 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1866 ADVERTISED_10000baseT_Full)
1867
b7b44fd2
PR
1868static int set_link_ksettings(struct net_device *dev,
1869 const struct ethtool_link_ksettings *cmd)
4d22de3e
DLR
1870{
1871 struct port_info *p = netdev_priv(dev);
1872 struct link_config *lc = &p->link_config;
b7b44fd2
PR
1873 u32 advertising;
1874
1875 ethtool_convert_link_mode_to_legacy_u32(&advertising,
1876 cmd->link_modes.advertising);
4d22de3e 1877
9b1e3656
DLR
1878 if (!(lc->supported & SUPPORTED_Autoneg)) {
1879 /*
1880 * PHY offers a single speed/duplex. See if that's what's
1881 * being requested.
1882 */
b7b44fd2
PR
1883 if (cmd->base.autoneg == AUTONEG_DISABLE) {
1884 u32 speed = cmd->base.speed;
1885 int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
9b1e3656
DLR
1886 if (lc->supported & cap)
1887 return 0;
1888 }
1889 return -EINVAL;
1890 }
4d22de3e 1891
b7b44fd2
PR
1892 if (cmd->base.autoneg == AUTONEG_DISABLE) {
1893 u32 speed = cmd->base.speed;
1894 int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
4d22de3e 1895
25db0338 1896 if (!(lc->supported & cap) || (speed == SPEED_1000))
4d22de3e 1897 return -EINVAL;
25db0338 1898 lc->requested_speed = speed;
b7b44fd2 1899 lc->requested_duplex = cmd->base.duplex;
4d22de3e
DLR
1900 lc->advertising = 0;
1901 } else {
b7b44fd2
PR
1902 advertising &= ADVERTISED_MASK;
1903 advertising &= lc->supported;
1904 if (!advertising)
4d22de3e
DLR
1905 return -EINVAL;
1906 lc->requested_speed = SPEED_INVALID;
1907 lc->requested_duplex = DUPLEX_INVALID;
b7b44fd2 1908 lc->advertising = advertising | ADVERTISED_Autoneg;
4d22de3e 1909 }
b7b44fd2 1910 lc->autoneg = cmd->base.autoneg;
4d22de3e
DLR
1911 if (netif_running(dev))
1912 t3_link_start(&p->phy, &p->mac, lc);
1913 return 0;
1914}
1915
1916static void get_pauseparam(struct net_device *dev,
1917 struct ethtool_pauseparam *epause)
1918{
1919 struct port_info *p = netdev_priv(dev);
1920
1921 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1922 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1923 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1924}
1925
1926static int set_pauseparam(struct net_device *dev,
1927 struct ethtool_pauseparam *epause)
1928{
1929 struct port_info *p = netdev_priv(dev);
1930 struct link_config *lc = &p->link_config;
1931
1932 if (epause->autoneg == AUTONEG_DISABLE)
1933 lc->requested_fc = 0;
1934 else if (lc->supported & SUPPORTED_Autoneg)
1935 lc->requested_fc = PAUSE_AUTONEG;
1936 else
1937 return -EINVAL;
1938
1939 if (epause->rx_pause)
1940 lc->requested_fc |= PAUSE_RX;
1941 if (epause->tx_pause)
1942 lc->requested_fc |= PAUSE_TX;
1943 if (lc->autoneg == AUTONEG_ENABLE) {
1944 if (netif_running(dev))
1945 t3_link_start(&p->phy, &p->mac, lc);
1946 } else {
1947 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1948 if (netif_running(dev))
1949 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1950 }
1951 return 0;
1952}
1953
4d22de3e
DLR
1954static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1955{
5fbf816f
DLR
1956 struct port_info *pi = netdev_priv(dev);
1957 struct adapter *adapter = pi->adapter;
05b97b30 1958 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
4d22de3e
DLR
1959
1960 e->rx_max_pending = MAX_RX_BUFFERS;
4d22de3e
DLR
1961 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1962 e->tx_max_pending = MAX_TXQ_ENTRIES;
1963
05b97b30
DLR
1964 e->rx_pending = q->fl_size;
1965 e->rx_mini_pending = q->rspq_size;
1966 e->rx_jumbo_pending = q->jumbo_size;
1967 e->tx_pending = q->txq_size[0];
4d22de3e
DLR
1968}
1969
1970static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1971{
5fbf816f
DLR
1972 struct port_info *pi = netdev_priv(dev);
1973 struct adapter *adapter = pi->adapter;
05b97b30 1974 struct qset_params *q;
5fbf816f 1975 int i;
4d22de3e
DLR
1976
1977 if (e->rx_pending > MAX_RX_BUFFERS ||
1978 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1979 e->tx_pending > MAX_TXQ_ENTRIES ||
1980 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1981 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1982 e->rx_pending < MIN_FL_ENTRIES ||
1983 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1984 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1985 return -EINVAL;
1986
1987 if (adapter->flags & FULL_INIT_DONE)
1988 return -EBUSY;
1989
05b97b30
DLR
1990 q = &adapter->params.sge.qset[pi->first_qset];
1991 for (i = 0; i < pi->nqsets; ++i, ++q) {
4d22de3e
DLR
1992 q->rspq_size = e->rx_mini_pending;
1993 q->fl_size = e->rx_pending;
1994 q->jumbo_size = e->rx_jumbo_pending;
1995 q->txq_size[0] = e->tx_pending;
1996 q->txq_size[1] = e->tx_pending;
1997 q->txq_size[2] = e->tx_pending;
1998 }
1999 return 0;
2000}
2001
2002static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2003{
5fbf816f
DLR
2004 struct port_info *pi = netdev_priv(dev);
2005 struct adapter *adapter = pi->adapter;
c211c969
AB
2006 struct qset_params *qsp;
2007 struct sge_qset *qs;
2008 int i;
4d22de3e
DLR
2009
2010 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
2011 return -EINVAL;
2012
c211c969
AB
2013 for (i = 0; i < pi->nqsets; i++) {
2014 qsp = &adapter->params.sge.qset[i];
2015 qs = &adapter->sge.qs[i];
2016 qsp->coalesce_usecs = c->rx_coalesce_usecs;
2017 t3_update_qset_coalesce(qs, qsp);
2018 }
2019
4d22de3e
DLR
2020 return 0;
2021}
2022
2023static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2024{
5fbf816f
DLR
2025 struct port_info *pi = netdev_priv(dev);
2026 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2027 struct qset_params *q = adapter->params.sge.qset;
2028
2029 c->rx_coalesce_usecs = q->coalesce_usecs;
2030 return 0;
2031}
2032
2033static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2034 u8 * data)
2035{
5fbf816f
DLR
2036 struct port_info *pi = netdev_priv(dev);
2037 struct adapter *adapter = pi->adapter;
4d22de3e 2038 int i, err = 0;
4d22de3e
DLR
2039
2040 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2041 if (!buf)
2042 return -ENOMEM;
2043
2044 e->magic = EEPROM_MAGIC;
2045 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
05e5c116 2046 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
4d22de3e
DLR
2047
2048 if (!err)
2049 memcpy(data, buf + e->offset, e->len);
2050 kfree(buf);
2051 return err;
2052}
2053
2054static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2055 u8 * data)
2056{
5fbf816f
DLR
2057 struct port_info *pi = netdev_priv(dev);
2058 struct adapter *adapter = pi->adapter;
05e5c116
AV
2059 u32 aligned_offset, aligned_len;
2060 __le32 *p;
4d22de3e 2061 u8 *buf;
c54f5c24 2062 int err;
4d22de3e
DLR
2063
2064 if (eeprom->magic != EEPROM_MAGIC)
2065 return -EINVAL;
2066
2067 aligned_offset = eeprom->offset & ~3;
2068 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2069
2070 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2071 buf = kmalloc(aligned_len, GFP_KERNEL);
2072 if (!buf)
2073 return -ENOMEM;
05e5c116 2074 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
4d22de3e
DLR
2075 if (!err && aligned_len > 4)
2076 err = t3_seeprom_read(adapter,
2077 aligned_offset + aligned_len - 4,
05e5c116 2078 (__le32 *) & buf[aligned_len - 4]);
4d22de3e
DLR
2079 if (err)
2080 goto out;
2081 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2082 } else
2083 buf = data;
2084
2085 err = t3_seeprom_wp(adapter, 0);
2086 if (err)
2087 goto out;
2088
05e5c116 2089 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
4d22de3e
DLR
2090 err = t3_seeprom_write(adapter, aligned_offset, *p);
2091 aligned_offset += 4;
2092 }
2093
2094 if (!err)
2095 err = t3_seeprom_wp(adapter, 1);
2096out:
2097 if (buf != data)
2098 kfree(buf);
2099 return err;
2100}
2101
2102static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2103{
2104 wol->supported = 0;
2105 wol->wolopts = 0;
2106 memset(&wol->sopass, 0, sizeof(wol->sopass));
2107}
2108
2109static const struct ethtool_ops cxgb_ethtool_ops = {
4d22de3e
DLR
2110 .get_drvinfo = get_drvinfo,
2111 .get_msglevel = get_msglevel,
2112 .set_msglevel = set_msglevel,
2113 .get_ringparam = get_sge_param,
2114 .set_ringparam = set_sge_param,
2115 .get_coalesce = get_coalesce,
2116 .set_coalesce = set_coalesce,
2117 .get_eeprom_len = get_eeprom_len,
2118 .get_eeprom = get_eeprom,
2119 .set_eeprom = set_eeprom,
2120 .get_pauseparam = get_pauseparam,
2121 .set_pauseparam = set_pauseparam,
4d22de3e
DLR
2122 .get_link = ethtool_op_get_link,
2123 .get_strings = get_strings,
12fcf941 2124 .set_phys_id = set_phys_id,
4d22de3e 2125 .nway_reset = restart_autoneg,
b9f2c044 2126 .get_sset_count = get_sset_count,
4d22de3e
DLR
2127 .get_ethtool_stats = get_stats,
2128 .get_regs_len = get_regs_len,
2129 .get_regs = get_regs,
2130 .get_wol = get_wol,
b7b44fd2
PR
2131 .get_link_ksettings = get_link_ksettings,
2132 .set_link_ksettings = set_link_ksettings,
4d22de3e
DLR
2133};
2134
2135static int in_range(int val, int lo, int hi)
2136{
2137 return val < 0 || (val <= hi && val >= lo);
2138}
2139
2140static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2141{
5fbf816f
DLR
2142 struct port_info *pi = netdev_priv(dev);
2143 struct adapter *adapter = pi->adapter;
4d22de3e 2144 u32 cmd;
5fbf816f 2145 int ret;
4d22de3e
DLR
2146
2147 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2148 return -EFAULT;
2149
2150 switch (cmd) {
4d22de3e
DLR
2151 case CHELSIO_SET_QSET_PARAMS:{
2152 int i;
2153 struct qset_params *q;
2154 struct ch_qset_params t;
8c263761
DLR
2155 int q1 = pi->first_qset;
2156 int nqsets = pi->nqsets;
4d22de3e
DLR
2157
2158 if (!capable(CAP_NET_ADMIN))
2159 return -EPERM;
2160 if (copy_from_user(&t, useraddr, sizeof(t)))
2161 return -EFAULT;
2162 if (t.qset_idx >= SGE_QSETS)
2163 return -EINVAL;
2164 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
8e95a202
JP
2165 !in_range(t.cong_thres, 0, 255) ||
2166 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2167 MAX_TXQ_ENTRIES) ||
2168 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2169 MAX_TXQ_ENTRIES) ||
2170 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2171 MAX_CTRL_TXQ_ENTRIES) ||
2172 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2173 MAX_RX_BUFFERS) ||
2174 !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2175 MAX_RX_JUMBO_BUFFERS) ||
2176 !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2177 MAX_RSPQ_ENTRIES))
4d22de3e 2178 return -EINVAL;
8c263761 2179
4d22de3e
DLR
2180 if ((adapter->flags & FULL_INIT_DONE) &&
2181 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2182 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2183 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2184 t.polling >= 0 || t.cong_thres >= 0))
2185 return -EBUSY;
2186
8c263761
DLR
2187 /* Allow setting of any available qset when offload enabled */
2188 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2189 q1 = 0;
2190 for_each_port(adapter, i) {
2191 pi = adap2pinfo(adapter, i);
2192 nqsets += pi->first_qset + pi->nqsets;
2193 }
2194 }
2195
2196 if (t.qset_idx < q1)
2197 return -EINVAL;
2198 if (t.qset_idx > q1 + nqsets - 1)
2199 return -EINVAL;
2200
4d22de3e
DLR
2201 q = &adapter->params.sge.qset[t.qset_idx];
2202
2203 if (t.rspq_size >= 0)
2204 q->rspq_size = t.rspq_size;
2205 if (t.fl_size[0] >= 0)
2206 q->fl_size = t.fl_size[0];
2207 if (t.fl_size[1] >= 0)
2208 q->jumbo_size = t.fl_size[1];
2209 if (t.txq_size[0] >= 0)
2210 q->txq_size[0] = t.txq_size[0];
2211 if (t.txq_size[1] >= 0)
2212 q->txq_size[1] = t.txq_size[1];
2213 if (t.txq_size[2] >= 0)
2214 q->txq_size[2] = t.txq_size[2];
2215 if (t.cong_thres >= 0)
2216 q->cong_thres = t.cong_thres;
2217 if (t.intr_lat >= 0) {
2218 struct sge_qset *qs =
2219 &adapter->sge.qs[t.qset_idx];
2220
2221 q->coalesce_usecs = t.intr_lat;
2222 t3_update_qset_coalesce(qs, q);
2223 }
2224 if (t.polling >= 0) {
2225 if (adapter->flags & USING_MSIX)
2226 q->polling = t.polling;
2227 else {
2228 /* No polling with INTx for T3A */
2229 if (adapter->params.rev == 0 &&
2230 !(adapter->flags & USING_MSI))
2231 t.polling = 0;
2232
2233 for (i = 0; i < SGE_QSETS; i++) {
2234 q = &adapter->params.sge.
2235 qset[i];
2236 q->polling = t.polling;
2237 }
2238 }
2239 }
d2fe2755
MM
2240
2241 if (t.lro >= 0) {
2242 if (t.lro)
2243 dev->wanted_features |= NETIF_F_GRO;
2244 else
2245 dev->wanted_features &= ~NETIF_F_GRO;
2246 netdev_update_features(dev);
2247 }
04ecb072 2248
4d22de3e
DLR
2249 break;
2250 }
2251 case CHELSIO_GET_QSET_PARAMS:{
2252 struct qset_params *q;
2253 struct ch_qset_params t;
8c263761
DLR
2254 int q1 = pi->first_qset;
2255 int nqsets = pi->nqsets;
2256 int i;
4d22de3e
DLR
2257
2258 if (copy_from_user(&t, useraddr, sizeof(t)))
2259 return -EFAULT;
8c263761
DLR
2260
2261 /* Display qsets for all ports when offload enabled */
2262 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2263 q1 = 0;
2264 for_each_port(adapter, i) {
2265 pi = adap2pinfo(adapter, i);
2266 nqsets = pi->first_qset + pi->nqsets;
2267 }
2268 }
2269
2270 if (t.qset_idx >= nqsets)
4d22de3e 2271 return -EINVAL;
09d01afd 2272 t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
4d22de3e 2273
8c263761 2274 q = &adapter->params.sge.qset[q1 + t.qset_idx];
4d22de3e
DLR
2275 t.rspq_size = q->rspq_size;
2276 t.txq_size[0] = q->txq_size[0];
2277 t.txq_size[1] = q->txq_size[1];
2278 t.txq_size[2] = q->txq_size[2];
2279 t.fl_size[0] = q->fl_size;
2280 t.fl_size[1] = q->jumbo_size;
2281 t.polling = q->polling;
d2fe2755 2282 t.lro = !!(dev->features & NETIF_F_GRO);
4d22de3e
DLR
2283 t.intr_lat = q->coalesce_usecs;
2284 t.cong_thres = q->cong_thres;
8c263761
DLR
2285 t.qnum = q1;
2286
2287 if (adapter->flags & USING_MSIX)
2288 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2289 else
2290 t.vector = adapter->pdev->irq;
4d22de3e
DLR
2291
2292 if (copy_to_user(useraddr, &t, sizeof(t)))
2293 return -EFAULT;
2294 break;
2295 }
2296 case CHELSIO_SET_QSET_NUM:{
2297 struct ch_reg edata;
4d22de3e
DLR
2298 unsigned int i, first_qset = 0, other_qsets = 0;
2299
2300 if (!capable(CAP_NET_ADMIN))
2301 return -EPERM;
2302 if (adapter->flags & FULL_INIT_DONE)
2303 return -EBUSY;
2304 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2305 return -EFAULT;
2306 if (edata.val < 1 ||
2307 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2308 return -EINVAL;
2309
2310 for_each_port(adapter, i)
2311 if (adapter->port[i] && adapter->port[i] != dev)
2312 other_qsets += adap2pinfo(adapter, i)->nqsets;
2313
2314 if (edata.val + other_qsets > SGE_QSETS)
2315 return -EINVAL;
2316
2317 pi->nqsets = edata.val;
2318
2319 for_each_port(adapter, i)
2320 if (adapter->port[i]) {
2321 pi = adap2pinfo(adapter, i);
2322 pi->first_qset = first_qset;
2323 first_qset += pi->nqsets;
2324 }
2325 break;
2326 }
2327 case CHELSIO_GET_QSET_NUM:{
2328 struct ch_reg edata;
4d22de3e 2329
49c37c03
DR
2330 memset(&edata, 0, sizeof(struct ch_reg));
2331
4d22de3e
DLR
2332 edata.cmd = CHELSIO_GET_QSET_NUM;
2333 edata.val = pi->nqsets;
2334 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2335 return -EFAULT;
2336 break;
2337 }
2338 case CHELSIO_LOAD_FW:{
2339 u8 *fw_data;
2340 struct ch_mem_range t;
2341
1b3aa7af 2342 if (!capable(CAP_SYS_RAWIO))
4d22de3e
DLR
2343 return -EPERM;
2344 if (copy_from_user(&t, useraddr, sizeof(t)))
2345 return -EFAULT;
1b3aa7af 2346 /* Check t.len sanity ? */
c5dc9a35
JL
2347 fw_data = memdup_user(useraddr + sizeof(t), t.len);
2348 if (IS_ERR(fw_data))
2349 return PTR_ERR(fw_data);
4d22de3e
DLR
2350
2351 ret = t3_load_fw(adapter, fw_data, t.len);
2352 kfree(fw_data);
2353 if (ret)
2354 return ret;
2355 break;
2356 }
2357 case CHELSIO_SETMTUTAB:{
2358 struct ch_mtus m;
2359 int i;
2360
2361 if (!is_offload(adapter))
2362 return -EOPNOTSUPP;
2363 if (!capable(CAP_NET_ADMIN))
2364 return -EPERM;
2365 if (offload_running(adapter))
2366 return -EBUSY;
2367 if (copy_from_user(&m, useraddr, sizeof(m)))
2368 return -EFAULT;
2369 if (m.nmtus != NMTUS)
2370 return -EINVAL;
2371 if (m.mtus[0] < 81) /* accommodate SACK */
2372 return -EINVAL;
2373
2374 /* MTUs must be in ascending order */
2375 for (i = 1; i < NMTUS; ++i)
2376 if (m.mtus[i] < m.mtus[i - 1])
2377 return -EINVAL;
2378
2379 memcpy(adapter->params.mtus, m.mtus,
2380 sizeof(adapter->params.mtus));
2381 break;
2382 }
2383 case CHELSIO_GET_PM:{
2384 struct tp_params *p = &adapter->params.tp;
2385 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2386
2387 if (!is_offload(adapter))
2388 return -EOPNOTSUPP;
2389 m.tx_pg_sz = p->tx_pg_size;
2390 m.tx_num_pg = p->tx_num_pgs;
2391 m.rx_pg_sz = p->rx_pg_size;
2392 m.rx_num_pg = p->rx_num_pgs;
2393 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2394 if (copy_to_user(useraddr, &m, sizeof(m)))
2395 return -EFAULT;
2396 break;
2397 }
2398 case CHELSIO_SET_PM:{
2399 struct ch_pm m;
2400 struct tp_params *p = &adapter->params.tp;
2401
2402 if (!is_offload(adapter))
2403 return -EOPNOTSUPP;
2404 if (!capable(CAP_NET_ADMIN))
2405 return -EPERM;
2406 if (adapter->flags & FULL_INIT_DONE)
2407 return -EBUSY;
2408 if (copy_from_user(&m, useraddr, sizeof(m)))
2409 return -EFAULT;
d9da466a 2410 if (!is_power_of_2(m.rx_pg_sz) ||
2411 !is_power_of_2(m.tx_pg_sz))
4d22de3e
DLR
2412 return -EINVAL; /* not power of 2 */
2413 if (!(m.rx_pg_sz & 0x14000))
2414 return -EINVAL; /* not 16KB or 64KB */
2415 if (!(m.tx_pg_sz & 0x1554000))
2416 return -EINVAL;
2417 if (m.tx_num_pg == -1)
2418 m.tx_num_pg = p->tx_num_pgs;
2419 if (m.rx_num_pg == -1)
2420 m.rx_num_pg = p->rx_num_pgs;
2421 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2422 return -EINVAL;
2423 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2424 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2425 return -EINVAL;
2426 p->rx_pg_size = m.rx_pg_sz;
2427 p->tx_pg_size = m.tx_pg_sz;
2428 p->rx_num_pgs = m.rx_num_pg;
2429 p->tx_num_pgs = m.tx_num_pg;
2430 break;
2431 }
2432 case CHELSIO_GET_MEM:{
2433 struct ch_mem_range t;
2434 struct mc7 *mem;
2435 u64 buf[32];
2436
2437 if (!is_offload(adapter))
2438 return -EOPNOTSUPP;
2439 if (!(adapter->flags & FULL_INIT_DONE))
2440 return -EIO; /* need the memory controllers */
2441 if (copy_from_user(&t, useraddr, sizeof(t)))
2442 return -EFAULT;
2443 if ((t.addr & 7) || (t.len & 7))
2444 return -EINVAL;
2445 if (t.mem_id == MEM_CM)
2446 mem = &adapter->cm;
2447 else if (t.mem_id == MEM_PMRX)
2448 mem = &adapter->pmrx;
2449 else if (t.mem_id == MEM_PMTX)
2450 mem = &adapter->pmtx;
2451 else
2452 return -EINVAL;
2453
2454 /*
1825494a
DLR
2455 * Version scheme:
2456 * bits 0..9: chip version
2457 * bits 10..15: chip revision
2458 */
4d22de3e
DLR
2459 t.version = 3 | (adapter->params.rev << 10);
2460 if (copy_to_user(useraddr, &t, sizeof(t)))
2461 return -EFAULT;
2462
2463 /*
2464 * Read 256 bytes at a time as len can be large and we don't
2465 * want to use huge intermediate buffers.
2466 */
2467 useraddr += sizeof(t); /* advance to start of buffer */
2468 while (t.len) {
2469 unsigned int chunk =
2470 min_t(unsigned int, t.len, sizeof(buf));
2471
2472 ret =
2473 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2474 buf);
2475 if (ret)
2476 return ret;
2477 if (copy_to_user(useraddr, buf, chunk))
2478 return -EFAULT;
2479 useraddr += chunk;
2480 t.addr += chunk;
2481 t.len -= chunk;
2482 }
2483 break;
2484 }
2485 case CHELSIO_SET_TRACE_FILTER:{
2486 struct ch_trace t;
2487 const struct trace_params *tp;
2488
2489 if (!capable(CAP_NET_ADMIN))
2490 return -EPERM;
2491 if (!offload_running(adapter))
2492 return -EAGAIN;
2493 if (copy_from_user(&t, useraddr, sizeof(t)))
2494 return -EFAULT;
2495
2496 tp = (const struct trace_params *)&t.sip;
2497 if (t.config_tx)
2498 t3_config_trace_filter(adapter, tp, 0,
2499 t.invert_match,
2500 t.trace_tx);
2501 if (t.config_rx)
2502 t3_config_trace_filter(adapter, tp, 1,
2503 t.invert_match,
2504 t.trace_rx);
2505 break;
2506 }
4d22de3e
DLR
2507 default:
2508 return -EOPNOTSUPP;
2509 }
2510 return 0;
2511}
2512
2513static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2514{
4d22de3e 2515 struct mii_ioctl_data *data = if_mii(req);
5fbf816f
DLR
2516 struct port_info *pi = netdev_priv(dev);
2517 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2518
2519 switch (cmd) {
0f07c4ee
BH
2520 case SIOCGMIIREG:
2521 case SIOCSMIIREG:
2522 /* Convert phy_id from older PRTAD/DEVAD format */
2523 if (is_10G(adapter) &&
2524 !mdio_phy_id_is_c45(data->phy_id) &&
2525 (data->phy_id & 0x1f00) &&
2526 !(data->phy_id & 0xe0e0))
2527 data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2528 data->phy_id & 0x1f);
4d22de3e 2529 /* FALLTHRU */
0f07c4ee
BH
2530 case SIOCGMIIPHY:
2531 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
4d22de3e
DLR
2532 case SIOCCHIOCTL:
2533 return cxgb_extension_ioctl(dev, req->ifr_data);
2534 default:
2535 return -EOPNOTSUPP;
2536 }
4d22de3e
DLR
2537}
2538
2539static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2540{
4d22de3e 2541 struct port_info *pi = netdev_priv(dev);
5fbf816f
DLR
2542 struct adapter *adapter = pi->adapter;
2543 int ret;
4d22de3e 2544
4d22de3e
DLR
2545 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2546 return ret;
2547 dev->mtu = new_mtu;
2548 init_port_mtus(adapter);
2549 if (adapter->params.rev == 0 && offload_running(adapter))
2550 t3_load_mtus(adapter, adapter->params.mtus,
2551 adapter->params.a_wnd, adapter->params.b_wnd,
2552 adapter->port[0]->mtu);
2553 return 0;
2554}
2555
2556static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2557{
4d22de3e 2558 struct port_info *pi = netdev_priv(dev);
5fbf816f 2559 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2560 struct sockaddr *addr = p;
2561
2562 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 2563 return -EADDRNOTAVAIL;
4d22de3e
DLR
2564
2565 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
f14d42f3 2566 t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
4d22de3e
DLR
2567 if (offload_running(adapter))
2568 write_smt_entry(adapter, pi->port_id);
2569 return 0;
2570}
2571
c8f44aff
MM
2572static netdev_features_t cxgb_fix_features(struct net_device *dev,
2573 netdev_features_t features)
892ef5d8
JP
2574{
2575 /*
2576 * Since there is no support for separate rx/tx vlan accel
2577 * enable/disable make sure tx flag is always in same state as rx.
2578 */
f646968f
PM
2579 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2580 features |= NETIF_F_HW_VLAN_CTAG_TX;
892ef5d8 2581 else
f646968f 2582 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
892ef5d8
JP
2583
2584 return features;
2585}
2586
c8f44aff 2587static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
892ef5d8 2588{
c8f44aff 2589 netdev_features_t changed = dev->features ^ features;
892ef5d8 2590
f646968f 2591 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
892ef5d8
JP
2592 cxgb_vlan_mode(dev, features);
2593
2594 return 0;
2595}
2596
4d22de3e
DLR
2597#ifdef CONFIG_NET_POLL_CONTROLLER
2598static void cxgb_netpoll(struct net_device *dev)
2599{
890de332 2600 struct port_info *pi = netdev_priv(dev);
5fbf816f 2601 struct adapter *adapter = pi->adapter;
890de332 2602 int qidx;
4d22de3e 2603
890de332
DLR
2604 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2605 struct sge_qset *qs = &adapter->sge.qs[qidx];
2606 void *source;
2eab17ab 2607
890de332
DLR
2608 if (adapter->flags & USING_MSIX)
2609 source = qs;
2610 else
2611 source = adapter;
2612
2613 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2614 }
4d22de3e
DLR
2615}
2616#endif
2617
2618/*
2619 * Periodic accumulation of MAC statistics.
2620 */
2621static void mac_stats_update(struct adapter *adapter)
2622{
2623 int i;
2624
2625 for_each_port(adapter, i) {
2626 struct net_device *dev = adapter->port[i];
2627 struct port_info *p = netdev_priv(dev);
2628
2629 if (netif_running(dev)) {
2630 spin_lock(&adapter->stats_lock);
2631 t3_mac_update_stats(&p->mac);
2632 spin_unlock(&adapter->stats_lock);
2633 }
2634 }
2635}
2636
2637static void check_link_status(struct adapter *adapter)
2638{
2639 int i;
2640
2641 for_each_port(adapter, i) {
2642 struct net_device *dev = adapter->port[i];
2643 struct port_info *p = netdev_priv(dev);
c22c8149 2644 int link_fault;
4d22de3e 2645
bf792094 2646 spin_lock_irq(&adapter->work_lock);
c22c8149
DLR
2647 link_fault = p->link_fault;
2648 spin_unlock_irq(&adapter->work_lock);
2649
2650 if (link_fault) {
3851c66c 2651 t3_link_fault(adapter, i);
bf792094
DLR
2652 continue;
2653 }
bf792094
DLR
2654
2655 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2656 t3_xgm_intr_disable(adapter, i);
2657 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2658
4d22de3e 2659 t3_link_changed(adapter, i);
bf792094
DLR
2660 t3_xgm_intr_enable(adapter, i);
2661 }
4d22de3e
DLR
2662 }
2663}
2664
fc90664e
DLR
2665static void check_t3b2_mac(struct adapter *adapter)
2666{
2667 int i;
2668
f2d961c9
DLR
2669 if (!rtnl_trylock()) /* synchronize with ifdown */
2670 return;
2671
fc90664e
DLR
2672 for_each_port(adapter, i) {
2673 struct net_device *dev = adapter->port[i];
2674 struct port_info *p = netdev_priv(dev);
2675 int status;
2676
2677 if (!netif_running(dev))
2678 continue;
2679
2680 status = 0;
6d6dabac 2681 if (netif_running(dev) && netif_carrier_ok(dev))
fc90664e
DLR
2682 status = t3b2_mac_watchdog_task(&p->mac);
2683 if (status == 1)
2684 p->mac.stats.num_toggled++;
2685 else if (status == 2) {
2686 struct cmac *mac = &p->mac;
2687
2688 t3_mac_set_mtu(mac, dev->mtu);
f14d42f3 2689 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
fc90664e
DLR
2690 cxgb_set_rxmode(dev);
2691 t3_link_start(&p->phy, mac, &p->link_config);
2692 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2693 t3_port_intr_enable(adapter, p->port_id);
2694 p->mac.stats.num_resets++;
2695 }
2696 }
2697 rtnl_unlock();
2698}
2699
2700
4d22de3e
DLR
2701static void t3_adap_check_task(struct work_struct *work)
2702{
2703 struct adapter *adapter = container_of(work, struct adapter,
2704 adap_check_task.work);
2705 const struct adapter_params *p = &adapter->params;
fc882196
DLR
2706 int port;
2707 unsigned int v, status, reset;
4d22de3e
DLR
2708
2709 adapter->check_task_cnt++;
2710
3851c66c 2711 check_link_status(adapter);
4d22de3e
DLR
2712
2713 /* Accumulate MAC stats if needed */
2714 if (!p->linkpoll_period ||
2715 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2716 p->stats_update_period) {
2717 mac_stats_update(adapter);
2718 adapter->check_task_cnt = 0;
2719 }
2720
fc90664e
DLR
2721 if (p->rev == T3_REV_B2)
2722 check_t3b2_mac(adapter);
2723
fc882196
DLR
2724 /*
2725 * Scan the XGMAC's to check for various conditions which we want to
2726 * monitor in a periodic polling manner rather than via an interrupt
2727 * condition. This is used for conditions which would otherwise flood
2728 * the system with interrupts and we only really need to know that the
2729 * conditions are "happening" ... For each condition we count the
2730 * detection of the condition and reset it for the next polling loop.
2731 */
2732 for_each_port(adapter, port) {
2733 struct cmac *mac = &adap2pinfo(adapter, port)->mac;
2734 u32 cause;
2735
2736 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2737 reset = 0;
2738 if (cause & F_RXFIFO_OVERFLOW) {
2739 mac->stats.rx_fifo_ovfl++;
2740 reset |= F_RXFIFO_OVERFLOW;
2741 }
2742
2743 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2744 }
2745
2746 /*
2747 * We do the same as above for FL_EMPTY interrupts.
2748 */
2749 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2750 reset = 0;
2751
2752 if (status & F_FLEMPTY) {
2753 struct sge_qset *qs = &adapter->sge.qs[0];
2754 int i = 0;
2755
2756 reset |= F_FLEMPTY;
2757
2758 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2759 0xffff;
2760
2761 while (v) {
2762 qs->fl[i].empty += (v & 1);
2763 if (i)
2764 qs++;
2765 i ^= 1;
2766 v >>= 1;
2767 }
2768 }
2769
2770 t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2771
4d22de3e 2772 /* Schedule the next check update if any port is active. */
20d3fc11 2773 spin_lock_irq(&adapter->work_lock);
4d22de3e
DLR
2774 if (adapter->open_device_map & PORT_MASK)
2775 schedule_chk_task(adapter);
20d3fc11 2776 spin_unlock_irq(&adapter->work_lock);
4d22de3e
DLR
2777}
2778
e998f245
SW
2779static void db_full_task(struct work_struct *work)
2780{
2781 struct adapter *adapter = container_of(work, struct adapter,
2782 db_full_task);
2783
2784 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2785}
2786
2787static void db_empty_task(struct work_struct *work)
2788{
2789 struct adapter *adapter = container_of(work, struct adapter,
2790 db_empty_task);
2791
2792 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2793}
2794
2795static void db_drop_task(struct work_struct *work)
2796{
2797 struct adapter *adapter = container_of(work, struct adapter,
2798 db_drop_task);
2799 unsigned long delay = 1000;
2800 unsigned short r;
2801
2802 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2803
2804 /*
2805 * Sleep a while before ringing the driver qset dbs.
2806 * The delay is between 1000-2023 usecs.
2807 */
2808 get_random_bytes(&r, 2);
2809 delay += r & 1023;
2810 set_current_state(TASK_UNINTERRUPTIBLE);
2811 schedule_timeout(usecs_to_jiffies(delay));
2812 ring_dbs(adapter);
2813}
2814
4d22de3e
DLR
2815/*
2816 * Processes external (PHY) interrupts in process context.
2817 */
2818static void ext_intr_task(struct work_struct *work)
2819{
2820 struct adapter *adapter = container_of(work, struct adapter,
2821 ext_intr_handler_task);
bf792094
DLR
2822 int i;
2823
2824 /* Disable link fault interrupts */
2825 for_each_port(adapter, i) {
2826 struct net_device *dev = adapter->port[i];
2827 struct port_info *p = netdev_priv(dev);
2828
2829 t3_xgm_intr_disable(adapter, i);
2830 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2831 }
4d22de3e 2832
bf792094 2833 /* Re-enable link fault interrupts */
4d22de3e
DLR
2834 t3_phy_intr_handler(adapter);
2835
bf792094
DLR
2836 for_each_port(adapter, i)
2837 t3_xgm_intr_enable(adapter, i);
2838
4d22de3e
DLR
2839 /* Now reenable external interrupts */
2840 spin_lock_irq(&adapter->work_lock);
2841 if (adapter->slow_intr_mask) {
2842 adapter->slow_intr_mask |= F_T3DBG;
2843 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2844 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2845 adapter->slow_intr_mask);
2846 }
2847 spin_unlock_irq(&adapter->work_lock);
2848}
2849
2850/*
2851 * Interrupt-context handler for external (PHY) interrupts.
2852 */
2853void t3_os_ext_intr_handler(struct adapter *adapter)
2854{
2855 /*
2856 * Schedule a task to handle external interrupts as they may be slow
2857 * and we use a mutex to protect MDIO registers. We disable PHY
2858 * interrupts in the meantime and let the task reenable them when
2859 * it's done.
2860 */
2861 spin_lock(&adapter->work_lock);
2862 if (adapter->slow_intr_mask) {
2863 adapter->slow_intr_mask &= ~F_T3DBG;
2864 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2865 adapter->slow_intr_mask);
2866 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2867 }
2868 spin_unlock(&adapter->work_lock);
2869}
2870
bf792094
DLR
2871void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2872{
2873 struct net_device *netdev = adapter->port[port_id];
2874 struct port_info *pi = netdev_priv(netdev);
2875
2876 spin_lock(&adapter->work_lock);
2877 pi->link_fault = 1;
bf792094
DLR
2878 spin_unlock(&adapter->work_lock);
2879}
2880
55bc3228 2881static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
20d3fc11
DLR
2882{
2883 int i, ret = 0;
2884
cb0bc205
DLR
2885 if (is_offload(adapter) &&
2886 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
fa0d4c11 2887 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
cb0bc205
DLR
2888 offload_close(&adapter->tdev);
2889 }
2890
20d3fc11
DLR
2891 /* Stop all ports */
2892 for_each_port(adapter, i) {
2893 struct net_device *netdev = adapter->port[i];
2894
2895 if (netif_running(netdev))
55bc3228 2896 __cxgb_close(netdev, on_wq);
20d3fc11
DLR
2897 }
2898
20d3fc11
DLR
2899 /* Stop SGE timers */
2900 t3_stop_sge_timers(adapter);
2901
2902 adapter->flags &= ~FULL_INIT_DONE;
2903
2904 if (reset)
2905 ret = t3_reset_adapter(adapter);
2906
2907 pci_disable_device(adapter->pdev);
2908
2909 return ret;
2910}
2911
2912static int t3_reenable_adapter(struct adapter *adapter)
2913{
2914 if (pci_enable_device(adapter->pdev)) {
2915 dev_err(&adapter->pdev->dev,
2916 "Cannot re-enable PCI device after reset.\n");
2917 goto err;
2918 }
2919 pci_set_master(adapter->pdev);
2920 pci_restore_state(adapter->pdev);
ccdddf50 2921 pci_save_state(adapter->pdev);
20d3fc11
DLR
2922
2923 /* Free sge resources */
2924 t3_free_sge_resources(adapter);
2925
2926 if (t3_replay_prep_adapter(adapter))
2927 goto err;
2928
2929 return 0;
2930err:
2931 return -1;
2932}
2933
2934static void t3_resume_ports(struct adapter *adapter)
2935{
2936 int i;
2937
2938 /* Restart the ports */
2939 for_each_port(adapter, i) {
2940 struct net_device *netdev = adapter->port[i];
2941
2942 if (netif_running(netdev)) {
2943 if (cxgb_open(netdev)) {
2944 dev_err(&adapter->pdev->dev,
2945 "can't bring device back up"
2946 " after reset\n");
2947 continue;
2948 }
2949 }
2950 }
cb0bc205
DLR
2951
2952 if (is_offload(adapter) && !ofld_disable)
fa0d4c11 2953 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
20d3fc11
DLR
2954}
2955
2956/*
2957 * processes a fatal error.
2958 * Bring the ports down, reset the chip, bring the ports back up.
2959 */
2960static void fatal_error_task(struct work_struct *work)
2961{
2962 struct adapter *adapter = container_of(work, struct adapter,
2963 fatal_error_handler_task);
2964 int err = 0;
2965
2966 rtnl_lock();
55bc3228 2967 err = t3_adapter_error(adapter, 1, 1);
20d3fc11
DLR
2968 if (!err)
2969 err = t3_reenable_adapter(adapter);
2970 if (!err)
2971 t3_resume_ports(adapter);
2972
2973 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2974 rtnl_unlock();
2975}
2976
4d22de3e
DLR
2977void t3_fatal_err(struct adapter *adapter)
2978{
2979 unsigned int fw_status[4];
2980
2981 if (adapter->flags & FULL_INIT_DONE) {
2982 t3_sge_stop(adapter);
c64c2eae
DLR
2983 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2984 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2985 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2986 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
20d3fc11
DLR
2987
2988 spin_lock(&adapter->work_lock);
4d22de3e 2989 t3_intr_disable(adapter);
20d3fc11
DLR
2990 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2991 spin_unlock(&adapter->work_lock);
4d22de3e
DLR
2992 }
2993 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2994 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2995 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2996 fw_status[0], fw_status[1],
2997 fw_status[2], fw_status[3]);
4d22de3e
DLR
2998}
2999
91a6b50c
DLR
3000/**
3001 * t3_io_error_detected - called when PCI error is detected
3002 * @pdev: Pointer to PCI device
3003 * @state: The current pci connection state
3004 *
3005 * This function is called after a PCI bus error affecting
3006 * this device has been detected.
3007 */
3008static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3009 pci_channel_state_t state)
3010{
bc4b6b52 3011 struct adapter *adapter = pci_get_drvdata(pdev);
91a6b50c 3012
e8d19370
DLR
3013 if (state == pci_channel_io_perm_failure)
3014 return PCI_ERS_RESULT_DISCONNECT;
3015
c661c4a2 3016 t3_adapter_error(adapter, 0, 0);
91a6b50c 3017
48c4b6db 3018 /* Request a slot reset. */
91a6b50c
DLR
3019 return PCI_ERS_RESULT_NEED_RESET;
3020}
3021
3022/**
3023 * t3_io_slot_reset - called after the pci bus has been reset.
3024 * @pdev: Pointer to PCI device
3025 *
3026 * Restart the card from scratch, as if from a cold-boot.
3027 */
3028static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3029{
bc4b6b52 3030 struct adapter *adapter = pci_get_drvdata(pdev);
91a6b50c 3031
20d3fc11
DLR
3032 if (!t3_reenable_adapter(adapter))
3033 return PCI_ERS_RESULT_RECOVERED;
91a6b50c 3034
48c4b6db 3035 return PCI_ERS_RESULT_DISCONNECT;
91a6b50c
DLR
3036}
3037
3038/**
3039 * t3_io_resume - called when traffic can start flowing again.
3040 * @pdev: Pointer to PCI device
3041 *
3042 * This callback is called when the error recovery driver tells us that
3043 * its OK to resume normal operation.
3044 */
3045static void t3_io_resume(struct pci_dev *pdev)
3046{
bc4b6b52 3047 struct adapter *adapter = pci_get_drvdata(pdev);
91a6b50c 3048
68f40c10
DLR
3049 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3050 t3_read_reg(adapter, A_PCIE_PEX_ERR));
3051
7cc47d13 3052 rtnl_lock();
20d3fc11 3053 t3_resume_ports(adapter);
7cc47d13 3054 rtnl_unlock();
91a6b50c
DLR
3055}
3056
3646f0e5 3057static const struct pci_error_handlers t3_err_handler = {
91a6b50c
DLR
3058 .error_detected = t3_io_error_detected,
3059 .slot_reset = t3_io_slot_reset,
3060 .resume = t3_io_resume,
3061};
3062
8c263761
DLR
3063/*
3064 * Set the number of qsets based on the number of CPUs and the number of ports,
3065 * not to exceed the number of available qsets, assuming there are enough qsets
3066 * per port in HW.
3067 */
3068static void set_nqsets(struct adapter *adap)
3069{
3070 int i, j = 0;
dbfa6001 3071 int num_cpus = netif_get_num_default_rss_queues();
8c263761 3072 int hwports = adap->params.nports;
5cda9364 3073 int nqsets = adap->msix_nvectors - 1;
8c263761 3074
f9ee3882 3075 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
8c263761
DLR
3076 if (hwports == 2 &&
3077 (hwports * nqsets > SGE_QSETS ||
3078 num_cpus >= nqsets / hwports))
3079 nqsets /= hwports;
3080 if (nqsets > num_cpus)
3081 nqsets = num_cpus;
3082 if (nqsets < 1 || hwports == 4)
3083 nqsets = 1;
3084 } else
3085 nqsets = 1;
3086
3087 for_each_port(adap, i) {
3088 struct port_info *pi = adap2pinfo(adap, i);
3089
3090 pi->first_qset = j;
3091 pi->nqsets = nqsets;
3092 j = pi->first_qset + nqsets;
3093
3094 dev_info(&adap->pdev->dev,
3095 "Port %d using %d queue sets.\n", i, nqsets);
3096 }
3097}
3098
2109eaab 3099static int cxgb_enable_msix(struct adapter *adap)
4d22de3e
DLR
3100{
3101 struct msix_entry entries[SGE_QSETS + 1];
5cda9364 3102 int vectors;
fc1d0bf1 3103 int i;
4d22de3e 3104
5cda9364
DLR
3105 vectors = ARRAY_SIZE(entries);
3106 for (i = 0; i < vectors; ++i)
4d22de3e
DLR
3107 entries[i].entry = i;
3108
fc1d0bf1
AG
3109 vectors = pci_enable_msix_range(adap->pdev, entries,
3110 adap->params.nports + 1, vectors);
3111 if (vectors < 0)
3112 return vectors;
5cda9364 3113
fc1d0bf1
AG
3114 for (i = 0; i < vectors; ++i)
3115 adap->msix_info[i].vec = entries[i].vector;
3116 adap->msix_nvectors = vectors;
5cda9364 3117
fc1d0bf1 3118 return 0;
4d22de3e
DLR
3119}
3120
1dd06ae8 3121static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
4d22de3e
DLR
3122{
3123 static const char *pci_variant[] = {
3124 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3125 };
3126
3127 int i;
3128 char buf[80];
3129
3130 if (is_pcie(adap))
3131 snprintf(buf, sizeof(buf), "%s x%d",
3132 pci_variant[adap->params.pci.variant],
3133 adap->params.pci.width);
3134 else
3135 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3136 pci_variant[adap->params.pci.variant],
3137 adap->params.pci.speed, adap->params.pci.width);
3138
3139 for_each_port(adap, i) {
3140 struct net_device *dev = adap->port[i];
3141 const struct port_info *pi = netdev_priv(dev);
3142
3143 if (!test_bit(i, &adap->registered_device_map))
3144 continue;
428ac43f
JP
3145 netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
3146 ai->desc, pi->phy.desc,
3147 is_offload(adap) ? "R" : "", adap->params.rev, buf,
3148 (adap->flags & USING_MSIX) ? " MSI-X" :
3149 (adap->flags & USING_MSI) ? " MSI" : "");
4d22de3e 3150 if (adap->name == dev->name && adap->params.vpd.mclk)
428ac43f 3151 pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
4d22de3e
DLR
3152 adap->name, t3_mc7_size(&adap->cm) >> 20,
3153 t3_mc7_size(&adap->pmtx) >> 20,
167cdf5f
DLR
3154 t3_mc7_size(&adap->pmrx) >> 20,
3155 adap->params.vpd.sn);
4d22de3e
DLR
3156 }
3157}
3158
dd752696
SH
3159static const struct net_device_ops cxgb_netdev_ops = {
3160 .ndo_open = cxgb_open,
3161 .ndo_stop = cxgb_close,
43a944f3 3162 .ndo_start_xmit = t3_eth_xmit,
dd752696
SH
3163 .ndo_get_stats = cxgb_get_stats,
3164 .ndo_validate_addr = eth_validate_addr,
afc4b13d 3165 .ndo_set_rx_mode = cxgb_set_rxmode,
dd752696
SH
3166 .ndo_do_ioctl = cxgb_ioctl,
3167 .ndo_change_mtu = cxgb_change_mtu,
3168 .ndo_set_mac_address = cxgb_set_mac_addr,
892ef5d8
JP
3169 .ndo_fix_features = cxgb_fix_features,
3170 .ndo_set_features = cxgb_set_features,
dd752696
SH
3171#ifdef CONFIG_NET_POLL_CONTROLLER
3172 .ndo_poll_controller = cxgb_netpoll,
3173#endif
3174};
3175
2109eaab 3176static void cxgb3_init_iscsi_mac(struct net_device *dev)
f14d42f3
KX
3177{
3178 struct port_info *pi = netdev_priv(dev);
3179
3180 memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3181 pi->iscsic.mac_addr[3] |= 0x80;
3182}
3183
1d962ecf 3184#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3185#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3186 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
1dd06ae8 3187static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4d22de3e 3188{
4d22de3e 3189 int i, err, pci_using_dac = 0;
68f40c10 3190 resource_size_t mmio_start, mmio_len;
4d22de3e
DLR
3191 const struct adapter_info *ai;
3192 struct adapter *adapter = NULL;
3193 struct port_info *pi;
3194
428ac43f 3195 pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
4d22de3e
DLR
3196
3197 if (!cxgb3_wq) {
3198 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3199 if (!cxgb3_wq) {
428ac43f 3200 pr_err("cannot initialize work queue\n");
4d22de3e
DLR
3201 return -ENOMEM;
3202 }
3203 }
3204
7aaaaa1e 3205 err = pci_enable_device(pdev);
4d22de3e 3206 if (err) {
7aaaaa1e
KV
3207 dev_err(&pdev->dev, "cannot enable PCI device\n");
3208 goto out;
4d22de3e
DLR
3209 }
3210
7aaaaa1e 3211 err = pci_request_regions(pdev, DRV_NAME);
4d22de3e 3212 if (err) {
7aaaaa1e
KV
3213 /* Just info, some other driver may have claimed the device. */
3214 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3215 goto out_disable_device;
4d22de3e
DLR
3216 }
3217
6a35528a 3218 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4d22de3e 3219 pci_using_dac = 1;
6a35528a 3220 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4d22de3e
DLR
3221 if (err) {
3222 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3223 "coherent allocations\n");
7aaaaa1e 3224 goto out_release_regions;
4d22de3e 3225 }
284901a9 3226 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
4d22de3e 3227 dev_err(&pdev->dev, "no usable DMA configuration\n");
7aaaaa1e 3228 goto out_release_regions;
4d22de3e
DLR
3229 }
3230
3231 pci_set_master(pdev);
204e2f98 3232 pci_save_state(pdev);
4d22de3e
DLR
3233
3234 mmio_start = pci_resource_start(pdev, 0);
3235 mmio_len = pci_resource_len(pdev, 0);
3236 ai = t3_get_adapter_info(ent->driver_data);
3237
3238 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3239 if (!adapter) {
3240 err = -ENOMEM;
7aaaaa1e 3241 goto out_release_regions;
4d22de3e
DLR
3242 }
3243
74b793e1
DLR
3244 adapter->nofail_skb =
3245 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3246 if (!adapter->nofail_skb) {
3247 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3248 err = -ENOMEM;
3249 goto out_free_adapter;
3250 }
3251
4d22de3e
DLR
3252 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3253 if (!adapter->regs) {
3254 dev_err(&pdev->dev, "cannot map device registers\n");
3255 err = -ENOMEM;
3256 goto out_free_adapter;
3257 }
3258
3259 adapter->pdev = pdev;
3260 adapter->name = pci_name(pdev);
3261 adapter->msg_enable = dflt_msg_enable;
3262 adapter->mmio_len = mmio_len;
3263
3264 mutex_init(&adapter->mdio_lock);
3265 spin_lock_init(&adapter->work_lock);
3266 spin_lock_init(&adapter->stats_lock);
3267
3268 INIT_LIST_HEAD(&adapter->adapter_list);
3269 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
20d3fc11 3270 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
e998f245
SW
3271
3272 INIT_WORK(&adapter->db_full_task, db_full_task);
3273 INIT_WORK(&adapter->db_empty_task, db_empty_task);
3274 INIT_WORK(&adapter->db_drop_task, db_drop_task);
3275
4d22de3e
DLR
3276 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3277
952cdf33 3278 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
4d22de3e
DLR
3279 struct net_device *netdev;
3280
82ad3329 3281 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
4d22de3e
DLR
3282 if (!netdev) {
3283 err = -ENOMEM;
3284 goto out_free_dev;
3285 }
3286
4d22de3e
DLR
3287 SET_NETDEV_DEV(netdev, &pdev->dev);
3288
3289 adapter->port[i] = netdev;
3290 pi = netdev_priv(netdev);
5fbf816f 3291 pi->adapter = adapter;
4d22de3e
DLR
3292 pi->port_id = i;
3293 netif_carrier_off(netdev);
3294 netdev->irq = pdev->irq;
3295 netdev->mem_start = mmio_start;
3296 netdev->mem_end = mmio_start + mmio_len - 1;
d2fe2755 3297 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
f646968f
PM
3298 NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
3299 netdev->features |= netdev->hw_features |
3300 NETIF_F_HW_VLAN_CTAG_TX;
1d962ecf 3301 netdev->vlan_features |= netdev->features & VLAN_FEAT;
4d22de3e
DLR
3302 if (pci_using_dac)
3303 netdev->features |= NETIF_F_HIGHDMA;
3304
dd752696 3305 netdev->netdev_ops = &cxgb_netdev_ops;
7ad24ea4 3306 netdev->ethtool_ops = &cxgb_ethtool_ops;
d894be57
JW
3307 netdev->min_mtu = 81;
3308 netdev->max_mtu = ETH_MAX_MTU;
4d22de3e
DLR
3309 }
3310
5fbf816f 3311 pci_set_drvdata(pdev, adapter);
4d22de3e
DLR
3312 if (t3_prep_adapter(adapter, ai, 1) < 0) {
3313 err = -ENODEV;
3314 goto out_free_dev;
3315 }
2eab17ab 3316
4d22de3e
DLR
3317 /*
3318 * The card is now ready to go. If any errors occur during device
3319 * registration we do not fail the whole card but rather proceed only
3320 * with the ports we manage to register successfully. However we must
3321 * register at least one net device.
3322 */
3323 for_each_port(adapter, i) {
3324 err = register_netdev(adapter->port[i]);
3325 if (err)
3326 dev_warn(&pdev->dev,
3327 "cannot register net device %s, skipping\n",
3328 adapter->port[i]->name);
3329 else {
3330 /*
3331 * Change the name we use for messages to the name of
3332 * the first successfully registered interface.
3333 */
3334 if (!adapter->registered_device_map)
3335 adapter->name = adapter->port[i]->name;
3336
3337 __set_bit(i, &adapter->registered_device_map);
3338 }
3339 }
3340 if (!adapter->registered_device_map) {
3341 dev_err(&pdev->dev, "could not register any net devices\n");
3342 goto out_free_dev;
3343 }
3344
f14d42f3
KX
3345 for_each_port(adapter, i)
3346 cxgb3_init_iscsi_mac(adapter->port[i]);
3347
4d22de3e
DLR
3348 /* Driver's ready. Reflect it on LEDs */
3349 t3_led_ready(adapter);
3350
3351 if (is_offload(adapter)) {
3352 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3353 cxgb3_adapter_ofld(adapter);
3354 }
3355
3356 /* See what interrupts we'll be using */
3357 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3358 adapter->flags |= USING_MSIX;
3359 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3360 adapter->flags |= USING_MSI;
3361
8c263761
DLR
3362 set_nqsets(adapter);
3363
0ee8d33c 3364 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
3365 &cxgb3_attr_group);
3366
3367 print_port_info(adapter, ai);
3368 return 0;
3369
3370out_free_dev:
3371 iounmap(adapter->regs);
952cdf33 3372 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
4d22de3e
DLR
3373 if (adapter->port[i])
3374 free_netdev(adapter->port[i]);
3375
3376out_free_adapter:
3377 kfree(adapter);
3378
4d22de3e
DLR
3379out_release_regions:
3380 pci_release_regions(pdev);
7aaaaa1e
KV
3381out_disable_device:
3382 pci_disable_device(pdev);
7aaaaa1e 3383out:
4d22de3e
DLR
3384 return err;
3385}
3386
2109eaab 3387static void remove_one(struct pci_dev *pdev)
4d22de3e 3388{
5fbf816f 3389 struct adapter *adapter = pci_get_drvdata(pdev);
4d22de3e 3390
5fbf816f 3391 if (adapter) {
4d22de3e 3392 int i;
4d22de3e
DLR
3393
3394 t3_sge_stop(adapter);
0ee8d33c 3395 sysfs_remove_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
3396 &cxgb3_attr_group);
3397
4d22de3e
DLR
3398 if (is_offload(adapter)) {
3399 cxgb3_adapter_unofld(adapter);
3400 if (test_bit(OFFLOAD_DEVMAP_BIT,
3401 &adapter->open_device_map))
3402 offload_close(&adapter->tdev);
3403 }
3404
67d92ab7
DLR
3405 for_each_port(adapter, i)
3406 if (test_bit(i, &adapter->registered_device_map))
3407 unregister_netdev(adapter->port[i]);
3408
0ca41c04 3409 t3_stop_sge_timers(adapter);
4d22de3e
DLR
3410 t3_free_sge_resources(adapter);
3411 cxgb_disable_msi(adapter);
3412
4d22de3e
DLR
3413 for_each_port(adapter, i)
3414 if (adapter->port[i])
3415 free_netdev(adapter->port[i]);
3416
3417 iounmap(adapter->regs);
74b793e1
DLR
3418 if (adapter->nofail_skb)
3419 kfree_skb(adapter->nofail_skb);
4d22de3e
DLR
3420 kfree(adapter);
3421 pci_release_regions(pdev);
3422 pci_disable_device(pdev);
4d22de3e
DLR
3423 }
3424}
3425
3426static struct pci_driver driver = {
3427 .name = DRV_NAME,
3428 .id_table = cxgb3_pci_tbl,
3429 .probe = init_one,
2109eaab 3430 .remove = remove_one,
91a6b50c 3431 .err_handler = &t3_err_handler,
4d22de3e
DLR
3432};
3433
3434static int __init cxgb3_init_module(void)
3435{
3436 int ret;
3437
3438 cxgb3_offload_init();
3439
3440 ret = pci_register_driver(&driver);
3441 return ret;
3442}
3443
3444static void __exit cxgb3_cleanup_module(void)
3445{
3446 pci_unregister_driver(&driver);
3447 if (cxgb3_wq)
3448 destroy_workqueue(cxgb3_wq);
3449}
3450
3451module_init(cxgb3_init_module);
3452module_exit(cxgb3_cleanup_module);