]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/chelsio/cxgb/cxgb2.c
Merge branch 'drm-fixes-4.10' of git://people.freedesktop.org/~agd5f/linux into drm...
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / chelsio / cxgb / cxgb2.c
CommitLineData
8199d3a7
CL
1/*****************************************************************************
2 * *
3 * File: cxgb2.c *
559fb51b
SB
4 * $Revision: 1.25 $ *
5 * $Date: 2005/06/22 00:43:25 $ *
8199d3a7
CL
6 * Description: *
7 * Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
0ab75ae8 14 * with this program; if not, see <http://www.gnu.org/licenses/>. *
8199d3a7
CL
15 * *
16 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
17 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
19 * *
20 * http://www.chelsio.com *
21 * *
22 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
23 * All rights reserved. *
24 * *
25 * Maintainers: maintainers@chelsio.com *
26 * *
27 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
28 * Tina Yang <tainay@chelsio.com> *
29 * Felix Marti <felix@chelsio.com> *
30 * Scott Bardone <sbardone@chelsio.com> *
31 * Kurt Ottaway <kottaway@chelsio.com> *
32 * Frank DiMambro <frank@chelsio.com> *
33 * *
34 * History: *
35 * *
36 ****************************************************************************/
37
38#include "common.h"
8199d3a7 39#include <linux/module.h>
8199d3a7
CL
40#include <linux/pci.h>
41#include <linux/netdevice.h>
42#include <linux/etherdevice.h>
43#include <linux/if_vlan.h>
44#include <linux/mii.h>
45#include <linux/sockios.h>
559fb51b 46#include <linux/dma-mapping.h>
7c0f6ba6 47#include <linux/uaccess.h>
8199d3a7 48
8199d3a7
CL
49#include "cpl5_cmd.h"
50#include "regs.h"
51#include "gmac.h"
52#include "cphy.h"
53#include "sge.h"
f1d3d38a 54#include "tp.h"
8199d3a7 55#include "espi.h"
f1d3d38a 56#include "elmer0.h"
8199d3a7 57
559fb51b 58#include <linux/workqueue.h>
8199d3a7 59
559fb51b
SB
60static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
61{
62 schedule_delayed_work(&ap->stats_update_task, secs * HZ);
63}
8199d3a7 64
559fb51b
SB
65static inline void cancel_mac_stats_update(struct adapter *ap)
66{
67 cancel_delayed_work(&ap->stats_update_task);
68}
8199d3a7 69
356bd146
FR
70#define MAX_CMDQ_ENTRIES 16384
71#define MAX_CMDQ1_ENTRIES 1024
72#define MAX_RX_BUFFERS 16384
73#define MAX_RX_JUMBO_BUFFERS 16384
8199d3a7
CL
74#define MAX_TX_BUFFERS_HIGH 16384U
75#define MAX_TX_BUFFERS_LOW 1536U
f1d3d38a 76#define MAX_TX_BUFFERS 1460U
356bd146 77#define MIN_FL_ENTRIES 32
8199d3a7 78
8199d3a7
CL
79#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
80 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
81 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
82
83/*
84 * The EEPROM is actually bigger but only the first few bytes are used so we
85 * only report those.
86 */
87#define EEPROM_SIZE 32
88
559fb51b 89MODULE_DESCRIPTION(DRV_DESCRIPTION);
8199d3a7
CL
90MODULE_AUTHOR("Chelsio Communications");
91MODULE_LICENSE("GPL");
8199d3a7
CL
92
93static int dflt_msg_enable = DFLT_MSG_ENABLE;
94
8d3b33f6 95module_param(dflt_msg_enable, int, 0);
f1d3d38a
SH
96MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
97
98#define HCLOCK 0x0
99#define LCLOCK 0x1
100
101/* T1 cards powersave mode */
102static int t1_clock(struct adapter *adapter, int mode);
103static int t1powersave = 1; /* HW default is powersave mode. */
8199d3a7 104
f1d3d38a
SH
105module_param(t1powersave, int, 0);
106MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
8199d3a7 107
325dde48
SH
108static int disable_msi = 0;
109module_param(disable_msi, int, 0);
110MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
8199d3a7
CL
111
112static const char pci_speed[][4] = {
113 "33", "66", "100", "133"
114};
115
116/*
117 * Setup MAC to receive the types of packets we want.
118 */
119static void t1_set_rxmode(struct net_device *dev)
120{
c3ccc123 121 struct adapter *adapter = dev->ml_priv;
8199d3a7
CL
122 struct cmac *mac = adapter->port[dev->if_port].mac;
123 struct t1_rx_mode rm;
124
125 rm.dev = dev;
8199d3a7
CL
126 mac->ops->set_rx_mode(mac, &rm);
127}
128
129static void link_report(struct port_info *p)
130{
131 if (!netif_carrier_ok(p->dev))
428ac43f 132 netdev_info(p->dev, "link down\n");
8199d3a7 133 else {
559fb51b 134 const char *s = "10Mbps";
8199d3a7
CL
135
136 switch (p->link_config.speed) {
559fb51b
SB
137 case SPEED_10000: s = "10Gbps"; break;
138 case SPEED_1000: s = "1000Mbps"; break;
139 case SPEED_100: s = "100Mbps"; break;
8199d3a7
CL
140 }
141
428ac43f
JP
142 netdev_info(p->dev, "link up, %s, %s-duplex\n",
143 s, p->link_config.duplex == DUPLEX_FULL
144 ? "full" : "half");
8199d3a7
CL
145 }
146}
147
f1d3d38a 148void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
8199d3a7
CL
149 int speed, int duplex, int pause)
150{
151 struct port_info *p = &adapter->port[port_id];
152
153 if (link_stat != netif_carrier_ok(p->dev)) {
154 if (link_stat)
155 netif_carrier_on(p->dev);
156 else
157 netif_carrier_off(p->dev);
158 link_report(p);
159
f1d3d38a
SH
160 /* multi-ports: inform toe */
161 if ((speed > 0) && (adapter->params.nports > 1)) {
162 unsigned int sched_speed = 10;
163 switch (speed) {
164 case SPEED_1000:
165 sched_speed = 1000;
166 break;
167 case SPEED_100:
168 sched_speed = 100;
169 break;
170 case SPEED_10:
171 sched_speed = 10;
172 break;
173 }
174 t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
175 }
8199d3a7
CL
176 }
177}
178
179static void link_start(struct port_info *p)
180{
181 struct cmac *mac = p->mac;
182
183 mac->ops->reset(mac);
184 if (mac->ops->macaddress_set)
185 mac->ops->macaddress_set(mac, p->dev->dev_addr);
186 t1_set_rxmode(p->dev);
187 t1_link_start(p->phy, mac, &p->link_config);
188 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
189}
190
191static void enable_hw_csum(struct adapter *adapter)
192{
30f554f9 193 if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
f1d3d38a 194 t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */
f1d3d38a 195 t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
8199d3a7
CL
196}
197
198/*
199 * Things to do upon first use of a card.
200 * This must run with the rtnl lock held.
201 */
202static int cxgb_up(struct adapter *adapter)
203{
204 int err = 0;
205
206 if (!(adapter->flags & FULL_INIT_DONE)) {
207 err = t1_init_hw_modules(adapter);
208 if (err)
209 goto out_err;
210
211 enable_hw_csum(adapter);
212 adapter->flags |= FULL_INIT_DONE;
213 }
214
215 t1_interrupts_clear(adapter);
325dde48 216
7fe26a60
SH
217 adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
218 err = request_irq(adapter->pdev->irq, t1_interrupt,
325dde48
SH
219 adapter->params.has_msi ? 0 : IRQF_SHARED,
220 adapter->name, adapter);
221 if (err) {
222 if (adapter->params.has_msi)
223 pci_disable_msi(adapter->pdev);
224
8199d3a7 225 goto out_err;
559fb51b 226 }
325dde48 227
8199d3a7
CL
228 t1_sge_start(adapter->sge);
229 t1_interrupts_enable(adapter);
356bd146 230out_err:
8199d3a7
CL
231 return err;
232}
233
234/*
235 * Release resources when all the ports have been stopped.
236 */
237static void cxgb_down(struct adapter *adapter)
238{
239 t1_sge_stop(adapter->sge);
240 t1_interrupts_disable(adapter);
241 free_irq(adapter->pdev->irq, adapter);
325dde48
SH
242 if (adapter->params.has_msi)
243 pci_disable_msi(adapter->pdev);
8199d3a7
CL
244}
245
246static int cxgb_open(struct net_device *dev)
247{
248 int err;
c3ccc123 249 struct adapter *adapter = dev->ml_priv;
8199d3a7
CL
250 int other_ports = adapter->open_device_map & PORT_MASK;
251
bea3348e
SH
252 napi_enable(&adapter->napi);
253 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
254 napi_disable(&adapter->napi);
8199d3a7 255 return err;
bea3348e 256 }
8199d3a7
CL
257
258 __set_bit(dev->if_port, &adapter->open_device_map);
259 link_start(&adapter->port[dev->if_port]);
260 netif_start_queue(dev);
261 if (!other_ports && adapter->params.stats_update_period)
262 schedule_mac_stats_update(adapter,
263 adapter->params.stats_update_period);
133b0851
JP
264
265 t1_vlan_mode(adapter, dev->features);
8199d3a7
CL
266 return 0;
267}
268
269static int cxgb_close(struct net_device *dev)
270{
c3ccc123 271 struct adapter *adapter = dev->ml_priv;
8199d3a7
CL
272 struct port_info *p = &adapter->port[dev->if_port];
273 struct cmac *mac = p->mac;
274
275 netif_stop_queue(dev);
bea3348e 276 napi_disable(&adapter->napi);
8199d3a7
CL
277 mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
278 netif_carrier_off(dev);
279
280 clear_bit(dev->if_port, &adapter->open_device_map);
281 if (adapter->params.stats_update_period &&
282 !(adapter->open_device_map & PORT_MASK)) {
283 /* Stop statistics accumulation. */
4e857c58 284 smp_mb__after_atomic();
8199d3a7
CL
285 spin_lock(&adapter->work_lock); /* sync with update task */
286 spin_unlock(&adapter->work_lock);
287 cancel_mac_stats_update(adapter);
288 }
289
290 if (!adapter->open_device_map)
291 cxgb_down(adapter);
292 return 0;
293}
294
295static struct net_device_stats *t1_get_stats(struct net_device *dev)
296{
c3ccc123 297 struct adapter *adapter = dev->ml_priv;
8199d3a7
CL
298 struct port_info *p = &adapter->port[dev->if_port];
299 struct net_device_stats *ns = &p->netstats;
300 const struct cmac_statistics *pstats;
301
302 /* Do a full update of the MAC stats */
303 pstats = p->mac->ops->statistics_update(p->mac,
20578151 304 MAC_STATS_UPDATE_FULL);
8199d3a7
CL
305
306 ns->tx_packets = pstats->TxUnicastFramesOK +
307 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
308
309 ns->rx_packets = pstats->RxUnicastFramesOK +
310 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
311
312 ns->tx_bytes = pstats->TxOctetsOK;
313 ns->rx_bytes = pstats->RxOctetsOK;
314
315 ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
316 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
317 ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
318 pstats->RxFCSErrors + pstats->RxAlignErrors +
319 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
320 pstats->RxSymbolErrors + pstats->RxRuntErrors;
321
322 ns->multicast = pstats->RxMulticastFramesOK;
323 ns->collisions = pstats->TxTotalCollisions;
324
325 /* detailed rx_errors */
326 ns->rx_length_errors = pstats->RxFrameTooLongErrors +
327 pstats->RxJabberErrors;
328 ns->rx_over_errors = 0;
329 ns->rx_crc_errors = pstats->RxFCSErrors;
330 ns->rx_frame_errors = pstats->RxAlignErrors;
331 ns->rx_fifo_errors = 0;
332 ns->rx_missed_errors = 0;
333
334 /* detailed tx_errors */
335 ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions;
336 ns->tx_carrier_errors = 0;
337 ns->tx_fifo_errors = pstats->TxUnderrun;
338 ns->tx_heartbeat_errors = 0;
339 ns->tx_window_errors = pstats->TxLateCollisions;
340 return ns;
341}
342
343static u32 get_msglevel(struct net_device *dev)
344{
c3ccc123 345 struct adapter *adapter = dev->ml_priv;
8199d3a7
CL
346
347 return adapter->msg_enable;
348}
349
350static void set_msglevel(struct net_device *dev, u32 val)
351{
c3ccc123 352 struct adapter *adapter = dev->ml_priv;
8199d3a7
CL
353
354 adapter->msg_enable = val;
355}
356
9ca683c6 357static const char stats_strings[][ETH_GSTRING_LEN] = {
20578151
SH
358 "TxOctetsOK",
359 "TxOctetsBad",
360 "TxUnicastFramesOK",
361 "TxMulticastFramesOK",
362 "TxBroadcastFramesOK",
363 "TxPauseFrames",
364 "TxFramesWithDeferredXmissions",
365 "TxLateCollisions",
366 "TxTotalCollisions",
367 "TxFramesAbortedDueToXSCollisions",
368 "TxUnderrun",
369 "TxLengthErrors",
370 "TxInternalMACXmitError",
371 "TxFramesWithExcessiveDeferral",
372 "TxFCSErrors",
e0348b9a
DLR
373 "TxJumboFramesOk",
374 "TxJumboOctetsOk",
375
20578151
SH
376 "RxOctetsOK",
377 "RxOctetsBad",
378 "RxUnicastFramesOK",
379 "RxMulticastFramesOK",
380 "RxBroadcastFramesOK",
381 "RxPauseFrames",
382 "RxFCSErrors",
383 "RxAlignErrors",
384 "RxSymbolErrors",
385 "RxDataErrors",
386 "RxSequenceErrors",
387 "RxRuntErrors",
388 "RxJabberErrors",
389 "RxInternalMACRcvError",
390 "RxInRangeLengthErrors",
391 "RxOutOfRangeLengthField",
392 "RxFrameTooLongErrors",
e0348b9a
DLR
393 "RxJumboFramesOk",
394 "RxJumboOctetsOk",
559fb51b 395
56f643c2 396 /* Port stats */
559fb51b
SB
397 "RxCsumGood",
398 "TxCsumOffload",
56f643c2
SH
399 "TxTso",
400 "RxVlan",
401 "TxVlan",
7832ee03
DLR
402 "TxNeedHeadroom",
403
56f643c2
SH
404 /* Interrupt stats */
405 "rx drops",
406 "pure_rsps",
407 "unhandled irqs",
559fb51b
SB
408 "respQ_empty",
409 "respQ_overflow",
410 "freelistQ_empty",
411 "pkt_too_big",
412 "pkt_mismatch",
413 "cmdQ_full0",
414 "cmdQ_full1",
20578151 415
559fb51b
SB
416 "espi_DIP2ParityErr",
417 "espi_DIP4Err",
418 "espi_RxDrops",
419 "espi_TxDrops",
420 "espi_RxOvfl",
421 "espi_ParityErr"
8199d3a7 422};
20578151 423
559fb51b
SB
424#define T2_REGMAP_SIZE (3 * 1024)
425
426static int get_regs_len(struct net_device *dev)
427{
428 return T2_REGMAP_SIZE;
429}
8199d3a7
CL
430
431static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
432{
c3ccc123 433 struct adapter *adapter = dev->ml_priv;
8199d3a7 434
23020ab3
RJ
435 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
436 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
23020ab3
RJ
437 strlcpy(info->bus_info, pci_name(adapter->pdev),
438 sizeof(info->bus_info));
8199d3a7
CL
439}
440
b9f2c044 441static int get_sset_count(struct net_device *dev, int sset)
8199d3a7 442{
b9f2c044
JG
443 switch (sset) {
444 case ETH_SS_STATS:
445 return ARRAY_SIZE(stats_strings);
446 default:
447 return -EOPNOTSUPP;
448 }
8199d3a7
CL
449}
450
451static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
452{
453 if (stringset == ETH_SS_STATS)
454 memcpy(data, stats_strings, sizeof(stats_strings));
455}
456
457static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
458 u64 *data)
459{
c3ccc123 460 struct adapter *adapter = dev->ml_priv;
8199d3a7
CL
461 struct cmac *mac = adapter->port[dev->if_port].mac;
462 const struct cmac_statistics *s;
559fb51b 463 const struct sge_intr_counts *t;
56f643c2 464 struct sge_port_stats ss;
8199d3a7
CL
465
466 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
e0348b9a 467 t = t1_sge_get_intr_counts(adapter->sge);
56f643c2 468 t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
56f643c2 469
e0348b9a
DLR
470 *data++ = s->TxOctetsOK;
471 *data++ = s->TxOctetsBad;
472 *data++ = s->TxUnicastFramesOK;
473 *data++ = s->TxMulticastFramesOK;
474 *data++ = s->TxBroadcastFramesOK;
475 *data++ = s->TxPauseFrames;
476 *data++ = s->TxFramesWithDeferredXmissions;
477 *data++ = s->TxLateCollisions;
478 *data++ = s->TxTotalCollisions;
479 *data++ = s->TxFramesAbortedDueToXSCollisions;
480 *data++ = s->TxUnderrun;
481 *data++ = s->TxLengthErrors;
482 *data++ = s->TxInternalMACXmitError;
483 *data++ = s->TxFramesWithExcessiveDeferral;
484 *data++ = s->TxFCSErrors;
485 *data++ = s->TxJumboFramesOK;
486 *data++ = s->TxJumboOctetsOK;
487
488 *data++ = s->RxOctetsOK;
489 *data++ = s->RxOctetsBad;
490 *data++ = s->RxUnicastFramesOK;
491 *data++ = s->RxMulticastFramesOK;
492 *data++ = s->RxBroadcastFramesOK;
493 *data++ = s->RxPauseFrames;
494 *data++ = s->RxFCSErrors;
495 *data++ = s->RxAlignErrors;
496 *data++ = s->RxSymbolErrors;
497 *data++ = s->RxDataErrors;
498 *data++ = s->RxSequenceErrors;
499 *data++ = s->RxRuntErrors;
500 *data++ = s->RxJabberErrors;
501 *data++ = s->RxInternalMACRcvError;
502 *data++ = s->RxInRangeLengthErrors;
503 *data++ = s->RxOutOfRangeLengthField;
504 *data++ = s->RxFrameTooLongErrors;
505 *data++ = s->RxJumboFramesOK;
506 *data++ = s->RxJumboOctetsOK;
507
508 *data++ = ss.rx_cso_good;
509 *data++ = ss.tx_cso;
510 *data++ = ss.tx_tso;
511 *data++ = ss.vlan_xtract;
512 *data++ = ss.vlan_insert;
513 *data++ = ss.tx_need_hdrroom;
514
56f643c2
SH
515 *data++ = t->rx_drops;
516 *data++ = t->pure_rsps;
517 *data++ = t->unhandled_irqs;
518 *data++ = t->respQ_empty;
519 *data++ = t->respQ_overflow;
520 *data++ = t->freelistQ_empty;
521 *data++ = t->pkt_too_big;
522 *data++ = t->pkt_mismatch;
523 *data++ = t->cmdQ_full[0];
524 *data++ = t->cmdQ_full[1];
f1d3d38a
SH
525
526 if (adapter->espi) {
527 const struct espi_intr_counts *e;
528
529 e = t1_espi_get_intr_counts(adapter->espi);
56f643c2
SH
530 *data++ = e->DIP2_parity_err;
531 *data++ = e->DIP4_err;
532 *data++ = e->rx_drops;
533 *data++ = e->tx_drops;
534 *data++ = e->rx_ovflw;
535 *data++ = e->parity_err;
f1d3d38a 536 }
559fb51b
SB
537}
538
539static inline void reg_block_dump(struct adapter *ap, void *buf,
540 unsigned int start, unsigned int end)
541{
542 u32 *p = buf + start;
543
544 for ( ; start <= end; start += sizeof(u32))
545 *p++ = readl(ap->regs + start);
546}
8199d3a7 547
559fb51b
SB
548static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
549 void *buf)
550{
c3ccc123 551 struct adapter *ap = dev->ml_priv;
559fb51b
SB
552
553 /*
554 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
555 */
556 regs->version = 2;
557
558 memset(buf, 0, T2_REGMAP_SIZE);
559 reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
f1d3d38a
SH
560 reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
561 reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
562 reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
563 reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
564 reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
565 reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
566 reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
567 reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
568 reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
8199d3a7
CL
569}
570
49cad939
PR
571static int get_link_ksettings(struct net_device *dev,
572 struct ethtool_link_ksettings *cmd)
8199d3a7 573{
c3ccc123 574 struct adapter *adapter = dev->ml_priv;
8199d3a7 575 struct port_info *p = &adapter->port[dev->if_port];
49cad939 576 u32 supported, advertising;
8199d3a7 577
49cad939
PR
578 supported = p->link_config.supported;
579 advertising = p->link_config.advertising;
8199d3a7
CL
580
581 if (netif_carrier_ok(dev)) {
49cad939
PR
582 cmd->base.speed = p->link_config.speed;
583 cmd->base.duplex = p->link_config.duplex;
8199d3a7 584 } else {
49cad939
PR
585 cmd->base.speed = SPEED_UNKNOWN;
586 cmd->base.duplex = DUPLEX_UNKNOWN;
8199d3a7
CL
587 }
588
49cad939
PR
589 cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
590 cmd->base.phy_address = p->phy->mdio.prtad;
591 cmd->base.autoneg = p->link_config.autoneg;
592
593 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
594 supported);
595 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
596 advertising);
597
8199d3a7
CL
598 return 0;
599}
600
601static int speed_duplex_to_caps(int speed, int duplex)
602{
603 int cap = 0;
604
605 switch (speed) {
606 case SPEED_10:
607 if (duplex == DUPLEX_FULL)
608 cap = SUPPORTED_10baseT_Full;
609 else
610 cap = SUPPORTED_10baseT_Half;
611 break;
612 case SPEED_100:
613 if (duplex == DUPLEX_FULL)
614 cap = SUPPORTED_100baseT_Full;
615 else
616 cap = SUPPORTED_100baseT_Half;
617 break;
618 case SPEED_1000:
619 if (duplex == DUPLEX_FULL)
620 cap = SUPPORTED_1000baseT_Full;
621 else
622 cap = SUPPORTED_1000baseT_Half;
623 break;
624 case SPEED_10000:
625 if (duplex == DUPLEX_FULL)
626 cap = SUPPORTED_10000baseT_Full;
627 }
628 return cap;
629}
630
631#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
632 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
633 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
634 ADVERTISED_10000baseT_Full)
635
49cad939
PR
636static int set_link_ksettings(struct net_device *dev,
637 const struct ethtool_link_ksettings *cmd)
8199d3a7 638{
c3ccc123 639 struct adapter *adapter = dev->ml_priv;
8199d3a7
CL
640 struct port_info *p = &adapter->port[dev->if_port];
641 struct link_config *lc = &p->link_config;
49cad939
PR
642 u32 advertising;
643
644 ethtool_convert_link_mode_to_legacy_u32(&advertising,
645 cmd->link_modes.advertising);
8199d3a7
CL
646
647 if (!(lc->supported & SUPPORTED_Autoneg))
559fb51b 648 return -EOPNOTSUPP; /* can't change speed/duplex */
8199d3a7 649
49cad939
PR
650 if (cmd->base.autoneg == AUTONEG_DISABLE) {
651 u32 speed = cmd->base.speed;
652 int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
8199d3a7 653
25db0338 654 if (!(lc->supported & cap) || (speed == SPEED_1000))
8199d3a7 655 return -EINVAL;
25db0338 656 lc->requested_speed = speed;
49cad939 657 lc->requested_duplex = cmd->base.duplex;
8199d3a7
CL
658 lc->advertising = 0;
659 } else {
49cad939
PR
660 advertising &= ADVERTISED_MASK;
661 if (advertising & (advertising - 1))
662 advertising = lc->supported;
663 advertising &= lc->supported;
664 if (!advertising)
8199d3a7
CL
665 return -EINVAL;
666 lc->requested_speed = SPEED_INVALID;
667 lc->requested_duplex = DUPLEX_INVALID;
49cad939 668 lc->advertising = advertising | ADVERTISED_Autoneg;
8199d3a7 669 }
49cad939 670 lc->autoneg = cmd->base.autoneg;
8199d3a7
CL
671 if (netif_running(dev))
672 t1_link_start(p->phy, p->mac, lc);
673 return 0;
674}
675
676static void get_pauseparam(struct net_device *dev,
677 struct ethtool_pauseparam *epause)
678{
c3ccc123 679 struct adapter *adapter = dev->ml_priv;
8199d3a7
CL
680 struct port_info *p = &adapter->port[dev->if_port];
681
682 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
683 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
684 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
685}
686
687static int set_pauseparam(struct net_device *dev,
688 struct ethtool_pauseparam *epause)
689{
c3ccc123 690 struct adapter *adapter = dev->ml_priv;
8199d3a7
CL
691 struct port_info *p = &adapter->port[dev->if_port];
692 struct link_config *lc = &p->link_config;
693
694 if (epause->autoneg == AUTONEG_DISABLE)
695 lc->requested_fc = 0;
696 else if (lc->supported & SUPPORTED_Autoneg)
697 lc->requested_fc = PAUSE_AUTONEG;
698 else
699 return -EINVAL;
700
701 if (epause->rx_pause)
702 lc->requested_fc |= PAUSE_RX;
703 if (epause->tx_pause)
704 lc->requested_fc |= PAUSE_TX;
705 if (lc->autoneg == AUTONEG_ENABLE) {
706 if (netif_running(dev))
707 t1_link_start(p->phy, p->mac, lc);
708 } else {
709 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
710 if (netif_running(dev))
711 p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
712 lc->fc);
713 }
714 return 0;
715}
716
8199d3a7
CL
717static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
718{
c3ccc123 719 struct adapter *adapter = dev->ml_priv;
8199d3a7
CL
720 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
721
722 e->rx_max_pending = MAX_RX_BUFFERS;
8199d3a7
CL
723 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
724 e->tx_max_pending = MAX_CMDQ_ENTRIES;
725
726 e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
8199d3a7
CL
727 e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
728 e->tx_pending = adapter->params.sge.cmdQ_size[0];
729}
730
731static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
732{
c3ccc123 733 struct adapter *adapter = dev->ml_priv;
8199d3a7
CL
734 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
735
736 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
737 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
738 e->tx_pending > MAX_CMDQ_ENTRIES ||
739 e->rx_pending < MIN_FL_ENTRIES ||
740 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
741 e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
742 return -EINVAL;
743
744 if (adapter->flags & FULL_INIT_DONE)
356bd146 745 return -EBUSY;
8199d3a7
CL
746
747 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
748 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
749 adapter->params.sge.cmdQ_size[0] = e->tx_pending;
750 adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
751 MAX_CMDQ1_ENTRIES : e->tx_pending;
752 return 0;
753}
754
755static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
756{
c3ccc123 757 struct adapter *adapter = dev->ml_priv;
8199d3a7 758
7fe26a60 759 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
356bd146 760 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
8199d3a7 761 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
8199d3a7
CL
762 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
763 return 0;
764}
765
766static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
767{
c3ccc123 768 struct adapter *adapter = dev->ml_priv;
8199d3a7 769
559fb51b 770 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
8199d3a7
CL
771 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
772 c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
773 return 0;
774}
775
776static int get_eeprom_len(struct net_device *dev)
777{
c3ccc123 778 struct adapter *adapter = dev->ml_priv;
f1d3d38a 779
356bd146 780 return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
8199d3a7
CL
781}
782
783#define EEPROM_MAGIC(ap) \
784 (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
785
786static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
787 u8 *data)
788{
789 int i;
790 u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
c3ccc123 791 struct adapter *adapter = dev->ml_priv;
8199d3a7
CL
792
793 e->magic = EEPROM_MAGIC(adapter);
794 for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
ac390c60 795 t1_seeprom_read(adapter, i, (__le32 *)&buf[i]);
8199d3a7
CL
796 memcpy(data, buf + e->offset, e->len);
797 return 0;
798}
799
7282d491 800static const struct ethtool_ops t1_ethtool_ops = {
8199d3a7
CL
801 .get_drvinfo = get_drvinfo,
802 .get_msglevel = get_msglevel,
803 .set_msglevel = set_msglevel,
804 .get_ringparam = get_sge_param,
805 .set_ringparam = set_sge_param,
806 .get_coalesce = get_coalesce,
807 .set_coalesce = set_coalesce,
808 .get_eeprom_len = get_eeprom_len,
809 .get_eeprom = get_eeprom,
810 .get_pauseparam = get_pauseparam,
811 .set_pauseparam = set_pauseparam,
8199d3a7
CL
812 .get_link = ethtool_op_get_link,
813 .get_strings = get_strings,
b9f2c044 814 .get_sset_count = get_sset_count,
8199d3a7 815 .get_ethtool_stats = get_stats,
559fb51b
SB
816 .get_regs_len = get_regs_len,
817 .get_regs = get_regs,
49cad939
PR
818 .get_link_ksettings = get_link_ksettings,
819 .set_link_ksettings = set_link_ksettings,
8199d3a7
CL
820};
821
8199d3a7
CL
822static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
823{
c3ccc123 824 struct adapter *adapter = dev->ml_priv;
23c3320c 825 struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio;
8199d3a7 826
23c3320c 827 return mdio_mii_ioctl(mdio, if_mii(req), cmd);
8199d3a7
CL
828}
829
830static int t1_change_mtu(struct net_device *dev, int new_mtu)
831{
832 int ret;
c3ccc123 833 struct adapter *adapter = dev->ml_priv;
8199d3a7
CL
834 struct cmac *mac = adapter->port[dev->if_port].mac;
835
836 if (!mac->ops->set_mtu)
356bd146 837 return -EOPNOTSUPP;
8199d3a7
CL
838 if ((ret = mac->ops->set_mtu(mac, new_mtu)))
839 return ret;
840 dev->mtu = new_mtu;
841 return 0;
842}
843
844static int t1_set_mac_addr(struct net_device *dev, void *p)
845{
c3ccc123 846 struct adapter *adapter = dev->ml_priv;
8199d3a7
CL
847 struct cmac *mac = adapter->port[dev->if_port].mac;
848 struct sockaddr *addr = p;
849
850 if (!mac->ops->macaddress_set)
851 return -EOPNOTSUPP;
852
853 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
854 mac->ops->macaddress_set(mac, dev->dev_addr);
855 return 0;
856}
857
c8f44aff
MM
858static netdev_features_t t1_fix_features(struct net_device *dev,
859 netdev_features_t features)
8199d3a7 860{
133b0851
JP
861 /*
862 * Since there is no support for separate rx/tx vlan accel
863 * enable/disable make sure tx flag is always in same state as rx.
864 */
f646968f
PM
865 if (features & NETIF_F_HW_VLAN_CTAG_RX)
866 features |= NETIF_F_HW_VLAN_CTAG_TX;
133b0851 867 else
f646968f 868 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
8199d3a7 869
133b0851 870 return features;
8199d3a7 871}
8199d3a7 872
c8f44aff 873static int t1_set_features(struct net_device *dev, netdev_features_t features)
133b0851 874{
c8f44aff 875 netdev_features_t changed = dev->features ^ features;
133b0851
JP
876 struct adapter *adapter = dev->ml_priv;
877
f646968f 878 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
133b0851
JP
879 t1_vlan_mode(adapter, features);
880
881 return 0;
882}
8199d3a7
CL
883#ifdef CONFIG_NET_POLL_CONTROLLER
884static void t1_netpoll(struct net_device *dev)
885{
559fb51b 886 unsigned long flags;
c3ccc123 887 struct adapter *adapter = dev->ml_priv;
8199d3a7 888
559fb51b 889 local_irq_save(flags);
7fe26a60 890 t1_interrupt(adapter->pdev->irq, adapter);
559fb51b 891 local_irq_restore(flags);
8199d3a7
CL
892}
893#endif
894
895/*
896 * Periodic accumulation of MAC statistics. This is used only if the MAC
897 * does not have any other way to prevent stats counter overflow.
898 */
c4028958 899static void mac_stats_task(struct work_struct *work)
8199d3a7
CL
900{
901 int i;
c4028958
DH
902 struct adapter *adapter =
903 container_of(work, struct adapter, stats_update_task.work);
8199d3a7
CL
904
905 for_each_port(adapter, i) {
906 struct port_info *p = &adapter->port[i];
907
908 if (netif_running(p->dev))
909 p->mac->ops->statistics_update(p->mac,
910 MAC_STATS_UPDATE_FAST);
911 }
912
913 /* Schedule the next statistics update if any port is active. */
914 spin_lock(&adapter->work_lock);
915 if (adapter->open_device_map & PORT_MASK)
916 schedule_mac_stats_update(adapter,
917 adapter->params.stats_update_period);
918 spin_unlock(&adapter->work_lock);
919}
920
921/*
922 * Processes elmer0 external interrupts in process context.
923 */
c4028958 924static void ext_intr_task(struct work_struct *work)
8199d3a7 925{
c4028958
DH
926 struct adapter *adapter =
927 container_of(work, struct adapter, ext_intr_handler_task);
8199d3a7 928
f1d3d38a 929 t1_elmer0_ext_intr_handler(adapter);
8199d3a7
CL
930
931 /* Now reenable external interrupts */
559fb51b 932 spin_lock_irq(&adapter->async_lock);
8199d3a7 933 adapter->slow_intr_mask |= F_PL_INTR_EXT;
559fb51b
SB
934 writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
935 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
20578151 936 adapter->regs + A_PL_ENABLE);
559fb51b 937 spin_unlock_irq(&adapter->async_lock);
8199d3a7
CL
938}
939
940/*
941 * Interrupt-context handler for elmer0 external interrupts.
942 */
943void t1_elmer0_ext_intr(struct adapter *adapter)
944{
8199d3a7
CL
945 /*
946 * Schedule a task to handle external interrupts as we require
947 * a process context. We disable EXT interrupts in the interim
948 * and let the task reenable them when it's done.
949 */
950 adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
559fb51b 951 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
20578151 952 adapter->regs + A_PL_ENABLE);
8199d3a7
CL
953 schedule_work(&adapter->ext_intr_handler_task);
954}
955
956void t1_fatal_err(struct adapter *adapter)
957{
958 if (adapter->flags & FULL_INIT_DONE) {
959 t1_sge_stop(adapter->sge);
960 t1_interrupts_disable(adapter);
961 }
c1f51212 962 pr_alert("%s: encountered fatal error, operation suspended\n",
8199d3a7
CL
963 adapter->name);
964}
965
80ff32b7
SH
966static const struct net_device_ops cxgb_netdev_ops = {
967 .ndo_open = cxgb_open,
968 .ndo_stop = cxgb_close,
00829823 969 .ndo_start_xmit = t1_start_xmit,
80ff32b7
SH
970 .ndo_get_stats = t1_get_stats,
971 .ndo_validate_addr = eth_validate_addr,
afc4b13d 972 .ndo_set_rx_mode = t1_set_rxmode,
80ff32b7
SH
973 .ndo_do_ioctl = t1_ioctl,
974 .ndo_change_mtu = t1_change_mtu,
975 .ndo_set_mac_address = t1_set_mac_addr,
133b0851
JP
976 .ndo_fix_features = t1_fix_features,
977 .ndo_set_features = t1_set_features,
80ff32b7
SH
978#ifdef CONFIG_NET_POLL_CONTROLLER
979 .ndo_poll_controller = t1_netpoll,
980#endif
981};
982
1dd06ae8 983static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8199d3a7 984{
8199d3a7
CL
985 int i, err, pci_using_dac = 0;
986 unsigned long mmio_start, mmio_len;
987 const struct board_info *bi;
988 struct adapter *adapter = NULL;
989 struct port_info *pi;
990
428ac43f 991 pr_info_once("%s - version %s\n", DRV_DESCRIPTION, DRV_VERSION);
8199d3a7
CL
992
993 err = pci_enable_device(pdev);
994 if (err)
20578151 995 return err;
8199d3a7
CL
996
997 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
c1f51212 998 pr_err("%s: cannot find PCI device memory base address\n",
8199d3a7
CL
999 pci_name(pdev));
1000 err = -ENODEV;
1001 goto out_disable_pdev;
1002 }
1003
6a35528a 1004 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
8199d3a7 1005 pci_using_dac = 1;
559fb51b 1006
6a35528a 1007 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
c1f51212 1008 pr_err("%s: unable to obtain 64-bit DMA for "
8199d3a7
CL
1009 "consistent allocations\n", pci_name(pdev));
1010 err = -ENODEV;
1011 goto out_disable_pdev;
1012 }
559fb51b 1013
284901a9 1014 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
c1f51212 1015 pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
8199d3a7
CL
1016 goto out_disable_pdev;
1017 }
1018
559fb51b 1019 err = pci_request_regions(pdev, DRV_NAME);
8199d3a7 1020 if (err) {
c1f51212 1021 pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev));
8199d3a7
CL
1022 goto out_disable_pdev;
1023 }
1024
1025 pci_set_master(pdev);
1026
20578151 1027 mmio_start = pci_resource_start(pdev, 0);
8199d3a7
CL
1028 mmio_len = pci_resource_len(pdev, 0);
1029 bi = t1_get_board_info(ent->driver_data);
1030
1031 for (i = 0; i < bi->port_number; ++i) {
1032 struct net_device *netdev;
1033
1034 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1035 if (!netdev) {
1036 err = -ENOMEM;
1037 goto out_free_dev;
1038 }
1039
8199d3a7
CL
1040 SET_NETDEV_DEV(netdev, &pdev->dev);
1041
1042 if (!adapter) {
c3ccc123 1043 adapter = netdev_priv(netdev);
8199d3a7
CL
1044 adapter->pdev = pdev;
1045 adapter->port[0].dev = netdev; /* so we don't leak it */
1046
1047 adapter->regs = ioremap(mmio_start, mmio_len);
1048 if (!adapter->regs) {
c1f51212 1049 pr_err("%s: cannot map device registers\n",
8199d3a7
CL
1050 pci_name(pdev));
1051 err = -ENOMEM;
1052 goto out_free_dev;
1053 }
1054
1055 if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1056 err = -ENODEV; /* Can't handle this chip rev */
1057 goto out_free_dev;
1058 }
1059
1060 adapter->name = pci_name(pdev);
1061 adapter->msg_enable = dflt_msg_enable;
1062 adapter->mmio_len = mmio_len;
1063
8199d3a7
CL
1064 spin_lock_init(&adapter->tpi_lock);
1065 spin_lock_init(&adapter->work_lock);
1066 spin_lock_init(&adapter->async_lock);
352c417d 1067 spin_lock_init(&adapter->mac_lock);
8199d3a7
CL
1068
1069 INIT_WORK(&adapter->ext_intr_handler_task,
c4028958
DH
1070 ext_intr_task);
1071 INIT_DELAYED_WORK(&adapter->stats_update_task,
1072 mac_stats_task);
8199d3a7
CL
1073
1074 pci_set_drvdata(pdev, netdev);
8199d3a7
CL
1075 }
1076
1077 pi = &adapter->port[i];
1078 pi->dev = netdev;
1079 netif_carrier_off(netdev);
1080 netdev->irq = pdev->irq;
1081 netdev->if_port = i;
1082 netdev->mem_start = mmio_start;
1083 netdev->mem_end = mmio_start + mmio_len - 1;
c3ccc123 1084 netdev->ml_priv = adapter;
30f554f9
MM
1085 netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1086 NETIF_F_RXCSUM;
1087 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1088 NETIF_F_RXCSUM | NETIF_F_LLTX;
559fb51b 1089
8199d3a7
CL
1090 if (pci_using_dac)
1091 netdev->features |= NETIF_F_HIGHDMA;
1092 if (vlan_tso_capable(adapter)) {
8199d3a7 1093 netdev->features |=
f646968f
PM
1094 NETIF_F_HW_VLAN_CTAG_TX |
1095 NETIF_F_HW_VLAN_CTAG_RX;
1096 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
f1d3d38a
SH
1097
1098 /* T204: disable TSO */
1099 if (!(is_T2(adapter)) || bi->port_number != 4) {
30f554f9 1100 netdev->hw_features |= NETIF_F_TSO;
f1d3d38a
SH
1101 netdev->features |= NETIF_F_TSO;
1102 }
8199d3a7
CL
1103 }
1104
80ff32b7 1105 netdev->netdev_ops = &cxgb_netdev_ops;
30f554f9 1106 netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
f1d3d38a 1107 sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
80ff32b7 1108
bea3348e 1109 netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
8199d3a7 1110
7ad24ea4 1111 netdev->ethtool_ops = &t1_ethtool_ops;
44770e11
JW
1112
1113 switch (bi->board) {
1114 case CHBT_BOARD_CHT110:
1115 case CHBT_BOARD_N110:
1116 case CHBT_BOARD_N210:
1117 case CHBT_BOARD_CHT210:
1118 netdev->max_mtu = PM3393_MAX_FRAME_SIZE -
1119 (ETH_HLEN + ETH_FCS_LEN);
1120 break;
1121 case CHBT_BOARD_CHN204:
1122 netdev->max_mtu = VSC7326_MAX_MTU;
1123 break;
1124 default:
1125 netdev->max_mtu = ETH_DATA_LEN;
1126 break;
1127 }
8199d3a7
CL
1128 }
1129
1130 if (t1_init_sw_modules(adapter, bi) < 0) {
1131 err = -ENODEV;
1132 goto out_free_dev;
1133 }
1134
1135 /*
1136 * The card is now ready to go. If any errors occur during device
1137 * registration we do not fail the whole card but rather proceed only
1138 * with the ports we manage to register successfully. However we must
1139 * register at least one net device.
1140 */
1141 for (i = 0; i < bi->port_number; ++i) {
1142 err = register_netdev(adapter->port[i].dev);
1143 if (err)
428ac43f
JP
1144 pr_warn("%s: cannot register net device %s, skipping\n",
1145 pci_name(pdev), adapter->port[i].dev->name);
8199d3a7
CL
1146 else {
1147 /*
1148 * Change the name we use for messages to the name of
1149 * the first successfully registered interface.
1150 */
1151 if (!adapter->registered_device_map)
1152 adapter->name = adapter->port[i].dev->name;
1153
20578151 1154 __set_bit(i, &adapter->registered_device_map);
8199d3a7
CL
1155 }
1156 }
1157 if (!adapter->registered_device_map) {
c1f51212 1158 pr_err("%s: could not register any net devices\n",
8199d3a7
CL
1159 pci_name(pdev));
1160 goto out_release_adapter_res;
1161 }
1162
428ac43f
JP
1163 pr_info("%s: %s (rev %d), %s %dMHz/%d-bit\n",
1164 adapter->name, bi->desc, adapter->params.chip_revision,
1165 adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1166 adapter->params.pci.speed, adapter->params.pci.width);
f1d3d38a
SH
1167
1168 /*
1169 * Set the T1B ASIC and memory clocks.
1170 */
1171 if (t1powersave)
1172 adapter->t1powersave = LCLOCK; /* HW default is powersave mode. */
1173 else
1174 adapter->t1powersave = HCLOCK;
1175 if (t1_is_T1B(adapter))
1176 t1_clock(adapter, t1powersave);
1177
8199d3a7
CL
1178 return 0;
1179
356bd146 1180out_release_adapter_res:
8199d3a7 1181 t1_free_sw_modules(adapter);
356bd146 1182out_free_dev:
8199d3a7 1183 if (adapter) {
e487647a
SH
1184 if (adapter->regs)
1185 iounmap(adapter->regs);
8199d3a7 1186 for (i = bi->port_number - 1; i >= 0; --i)
e487647a
SH
1187 if (adapter->port[i].dev)
1188 free_netdev(adapter->port[i].dev);
8199d3a7
CL
1189 }
1190 pci_release_regions(pdev);
356bd146 1191out_disable_pdev:
8199d3a7 1192 pci_disable_device(pdev);
8199d3a7
CL
1193 return err;
1194}
1195
f1d3d38a
SH
1196static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1197{
1198 int data;
1199 int i;
1200 u32 val;
1201
1202 enum {
1203 S_CLOCK = 1 << 3,
1204 S_DATA = 1 << 4
1205 };
1206
1207 for (i = (nbits - 1); i > -1; i--) {
1208
1209 udelay(50);
1210
1211 data = ((bitdata >> i) & 0x1);
1212 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1213
1214 if (data)
1215 val |= S_DATA;
1216 else
1217 val &= ~S_DATA;
1218
1219 udelay(50);
1220
1221 /* Set SCLOCK low */
1222 val &= ~S_CLOCK;
1223 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1224
1225 udelay(50);
1226
1227 /* Write SCLOCK high */
1228 val |= S_CLOCK;
1229 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1230
1231 }
1232}
1233
1234static int t1_clock(struct adapter *adapter, int mode)
1235{
1236 u32 val;
1237 int M_CORE_VAL;
1238 int M_MEM_VAL;
1239
1240 enum {
356bd146
FR
1241 M_CORE_BITS = 9,
1242 T_CORE_VAL = 0,
1243 T_CORE_BITS = 2,
1244 N_CORE_VAL = 0,
1245 N_CORE_BITS = 2,
1246 M_MEM_BITS = 9,
1247 T_MEM_VAL = 0,
1248 T_MEM_BITS = 2,
1249 N_MEM_VAL = 0,
1250 N_MEM_BITS = 2,
1251 NP_LOAD = 1 << 17,
1252 S_LOAD_MEM = 1 << 5,
1253 S_LOAD_CORE = 1 << 6,
1254 S_CLOCK = 1 << 3
f1d3d38a
SH
1255 };
1256
1257 if (!t1_is_T1B(adapter))
1258 return -ENODEV; /* Can't re-clock this chip. */
1259
d7487421 1260 if (mode & 2)
f1d3d38a 1261 return 0; /* show current mode. */
f1d3d38a
SH
1262
1263 if ((adapter->t1powersave & 1) == (mode & 1))
1264 return -EALREADY; /* ASIC already running in mode. */
1265
1266 if ((mode & 1) == HCLOCK) {
1267 M_CORE_VAL = 0x14;
1268 M_MEM_VAL = 0x18;
1269 adapter->t1powersave = HCLOCK; /* overclock */
1270 } else {
1271 M_CORE_VAL = 0xe;
1272 M_MEM_VAL = 0x10;
1273 adapter->t1powersave = LCLOCK; /* underclock */
1274 }
1275
1276 /* Don't interrupt this serial stream! */
1277 spin_lock(&adapter->tpi_lock);
1278
1279 /* Initialize for ASIC core */
1280 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1281 val |= NP_LOAD;
1282 udelay(50);
1283 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1284 udelay(50);
1285 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1286 val &= ~S_LOAD_CORE;
1287 val &= ~S_CLOCK;
1288 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1289 udelay(50);
1290
1291 /* Serial program the ASIC clock synthesizer */
1292 bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1293 bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1294 bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1295 udelay(50);
1296
1297 /* Finish ASIC core */
1298 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1299 val |= S_LOAD_CORE;
1300 udelay(50);
1301 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1302 udelay(50);
1303 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1304 val &= ~S_LOAD_CORE;
1305 udelay(50);
1306 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1307 udelay(50);
1308
1309 /* Initialize for memory */
1310 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1311 val |= NP_LOAD;
1312 udelay(50);
1313 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1314 udelay(50);
1315 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1316 val &= ~S_LOAD_MEM;
1317 val &= ~S_CLOCK;
1318 udelay(50);
1319 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1320 udelay(50);
1321
1322 /* Serial program the memory clock synthesizer */
1323 bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1324 bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1325 bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1326 udelay(50);
1327
1328 /* Finish memory */
1329 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1330 val |= S_LOAD_MEM;
1331 udelay(50);
1332 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1333 udelay(50);
1334 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1335 val &= ~S_LOAD_MEM;
1336 udelay(50);
1337 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1338
1339 spin_unlock(&adapter->tpi_lock);
1340
1341 return 0;
1342}
1343
8199d3a7
CL
1344static inline void t1_sw_reset(struct pci_dev *pdev)
1345{
1346 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1347 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1348}
1349
ff76a3cc 1350static void remove_one(struct pci_dev *pdev)
8199d3a7
CL
1351{
1352 struct net_device *dev = pci_get_drvdata(pdev);
c3ccc123 1353 struct adapter *adapter = dev->ml_priv;
47cbe6f4 1354 int i;
8199d3a7 1355
47cbe6f4
FR
1356 for_each_port(adapter, i) {
1357 if (test_bit(i, &adapter->registered_device_map))
1358 unregister_netdev(adapter->port[i].dev);
1359 }
8199d3a7 1360
47cbe6f4
FR
1361 t1_free_sw_modules(adapter);
1362 iounmap(adapter->regs);
e487647a 1363
47cbe6f4
FR
1364 while (--i >= 0) {
1365 if (adapter->port[i].dev)
1366 free_netdev(adapter->port[i].dev);
8199d3a7 1367 }
47cbe6f4
FR
1368
1369 pci_release_regions(pdev);
1370 pci_disable_device(pdev);
47cbe6f4 1371 t1_sw_reset(pdev);
8199d3a7
CL
1372}
1373
65c21912 1374static struct pci_driver cxgb_pci_driver = {
559fb51b 1375 .name = DRV_NAME,
8199d3a7
CL
1376 .id_table = t1_pci_tbl,
1377 .probe = init_one,
ff76a3cc 1378 .remove = remove_one,
8199d3a7
CL
1379};
1380
65c21912 1381module_pci_driver(cxgb_pci_driver);