]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/chelsio/cxgb2.c
[B44]: port to native ssb support
[mirror_ubuntu-artful-kernel.git] / drivers / net / chelsio / cxgb2.c
CommitLineData
8199d3a7
CL
1/*****************************************************************************
2 * *
3 * File: cxgb2.c *
559fb51b
SB
4 * $Revision: 1.25 $ *
5 * $Date: 2005/06/22 00:43:25 $ *
8199d3a7
CL
6 * Description: *
7 * Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#include "common.h"
8199d3a7
CL
40#include <linux/module.h>
41#include <linux/init.h>
42#include <linux/pci.h>
43#include <linux/netdevice.h>
44#include <linux/etherdevice.h>
45#include <linux/if_vlan.h>
46#include <linux/mii.h>
47#include <linux/sockios.h>
559fb51b 48#include <linux/dma-mapping.h>
8199d3a7
CL
49#include <asm/uaccess.h>
50
8199d3a7
CL
51#include "cpl5_cmd.h"
52#include "regs.h"
53#include "gmac.h"
54#include "cphy.h"
55#include "sge.h"
f1d3d38a 56#include "tp.h"
8199d3a7 57#include "espi.h"
f1d3d38a 58#include "elmer0.h"
8199d3a7 59
559fb51b 60#include <linux/workqueue.h>
8199d3a7 61
559fb51b
SB
62static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
63{
64 schedule_delayed_work(&ap->stats_update_task, secs * HZ);
65}
8199d3a7 66
559fb51b
SB
67static inline void cancel_mac_stats_update(struct adapter *ap)
68{
69 cancel_delayed_work(&ap->stats_update_task);
70}
8199d3a7 71
356bd146
FR
72#define MAX_CMDQ_ENTRIES 16384
73#define MAX_CMDQ1_ENTRIES 1024
74#define MAX_RX_BUFFERS 16384
75#define MAX_RX_JUMBO_BUFFERS 16384
8199d3a7
CL
76#define MAX_TX_BUFFERS_HIGH 16384U
77#define MAX_TX_BUFFERS_LOW 1536U
f1d3d38a 78#define MAX_TX_BUFFERS 1460U
356bd146 79#define MIN_FL_ENTRIES 32
8199d3a7 80
8199d3a7
CL
81#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
83 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
84
85/*
86 * The EEPROM is actually bigger but only the first few bytes are used so we
87 * only report those.
88 */
89#define EEPROM_SIZE 32
90
559fb51b 91MODULE_DESCRIPTION(DRV_DESCRIPTION);
8199d3a7
CL
92MODULE_AUTHOR("Chelsio Communications");
93MODULE_LICENSE("GPL");
8199d3a7
CL
94
95static int dflt_msg_enable = DFLT_MSG_ENABLE;
96
8d3b33f6 97module_param(dflt_msg_enable, int, 0);
f1d3d38a
SH
98MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
99
100#define HCLOCK 0x0
101#define LCLOCK 0x1
102
103/* T1 cards powersave mode */
104static int t1_clock(struct adapter *adapter, int mode);
105static int t1powersave = 1; /* HW default is powersave mode. */
8199d3a7 106
f1d3d38a
SH
107module_param(t1powersave, int, 0);
108MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
8199d3a7 109
325dde48
SH
110static int disable_msi = 0;
111module_param(disable_msi, int, 0);
112MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
8199d3a7
CL
113
114static const char pci_speed[][4] = {
115 "33", "66", "100", "133"
116};
117
118/*
119 * Setup MAC to receive the types of packets we want.
120 */
121static void t1_set_rxmode(struct net_device *dev)
122{
123 struct adapter *adapter = dev->priv;
124 struct cmac *mac = adapter->port[dev->if_port].mac;
125 struct t1_rx_mode rm;
126
127 rm.dev = dev;
128 rm.idx = 0;
129 rm.list = dev->mc_list;
130 mac->ops->set_rx_mode(mac, &rm);
131}
132
133static void link_report(struct port_info *p)
134{
135 if (!netif_carrier_ok(p->dev))
20578151 136 printk(KERN_INFO "%s: link down\n", p->dev->name);
8199d3a7 137 else {
559fb51b 138 const char *s = "10Mbps";
8199d3a7
CL
139
140 switch (p->link_config.speed) {
559fb51b
SB
141 case SPEED_10000: s = "10Gbps"; break;
142 case SPEED_1000: s = "1000Mbps"; break;
143 case SPEED_100: s = "100Mbps"; break;
8199d3a7
CL
144 }
145
356bd146 146 printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
8199d3a7
CL
147 p->dev->name, s,
148 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
149 }
150}
151
f1d3d38a 152void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
8199d3a7
CL
153 int speed, int duplex, int pause)
154{
155 struct port_info *p = &adapter->port[port_id];
156
157 if (link_stat != netif_carrier_ok(p->dev)) {
158 if (link_stat)
159 netif_carrier_on(p->dev);
160 else
161 netif_carrier_off(p->dev);
162 link_report(p);
163
f1d3d38a
SH
164 /* multi-ports: inform toe */
165 if ((speed > 0) && (adapter->params.nports > 1)) {
166 unsigned int sched_speed = 10;
167 switch (speed) {
168 case SPEED_1000:
169 sched_speed = 1000;
170 break;
171 case SPEED_100:
172 sched_speed = 100;
173 break;
174 case SPEED_10:
175 sched_speed = 10;
176 break;
177 }
178 t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
179 }
8199d3a7
CL
180 }
181}
182
183static void link_start(struct port_info *p)
184{
185 struct cmac *mac = p->mac;
186
187 mac->ops->reset(mac);
188 if (mac->ops->macaddress_set)
189 mac->ops->macaddress_set(mac, p->dev->dev_addr);
190 t1_set_rxmode(p->dev);
191 t1_link_start(p->phy, mac, &p->link_config);
192 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
193}
194
195static void enable_hw_csum(struct adapter *adapter)
196{
197 if (adapter->flags & TSO_CAPABLE)
f1d3d38a
SH
198 t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */
199 if (adapter->flags & UDP_CSUM_CAPABLE)
200 t1_tp_set_udp_checksum_offload(adapter->tp, 1);
201 t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
8199d3a7
CL
202}
203
204/*
205 * Things to do upon first use of a card.
206 * This must run with the rtnl lock held.
207 */
208static int cxgb_up(struct adapter *adapter)
209{
210 int err = 0;
211
212 if (!(adapter->flags & FULL_INIT_DONE)) {
213 err = t1_init_hw_modules(adapter);
214 if (err)
215 goto out_err;
216
217 enable_hw_csum(adapter);
218 adapter->flags |= FULL_INIT_DONE;
219 }
220
221 t1_interrupts_clear(adapter);
325dde48 222
7fe26a60
SH
223 adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
224 err = request_irq(adapter->pdev->irq, t1_interrupt,
325dde48
SH
225 adapter->params.has_msi ? 0 : IRQF_SHARED,
226 adapter->name, adapter);
227 if (err) {
228 if (adapter->params.has_msi)
229 pci_disable_msi(adapter->pdev);
230
8199d3a7 231 goto out_err;
559fb51b 232 }
325dde48 233
8199d3a7
CL
234 t1_sge_start(adapter->sge);
235 t1_interrupts_enable(adapter);
356bd146 236out_err:
8199d3a7
CL
237 return err;
238}
239
240/*
241 * Release resources when all the ports have been stopped.
242 */
243static void cxgb_down(struct adapter *adapter)
244{
245 t1_sge_stop(adapter->sge);
246 t1_interrupts_disable(adapter);
247 free_irq(adapter->pdev->irq, adapter);
325dde48
SH
248 if (adapter->params.has_msi)
249 pci_disable_msi(adapter->pdev);
8199d3a7
CL
250}
251
252static int cxgb_open(struct net_device *dev)
253{
254 int err;
255 struct adapter *adapter = dev->priv;
256 int other_ports = adapter->open_device_map & PORT_MASK;
257
bea3348e
SH
258 napi_enable(&adapter->napi);
259 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
260 napi_disable(&adapter->napi);
8199d3a7 261 return err;
bea3348e 262 }
8199d3a7
CL
263
264 __set_bit(dev->if_port, &adapter->open_device_map);
265 link_start(&adapter->port[dev->if_port]);
266 netif_start_queue(dev);
267 if (!other_ports && adapter->params.stats_update_period)
268 schedule_mac_stats_update(adapter,
269 adapter->params.stats_update_period);
270 return 0;
271}
272
273static int cxgb_close(struct net_device *dev)
274{
275 struct adapter *adapter = dev->priv;
276 struct port_info *p = &adapter->port[dev->if_port];
277 struct cmac *mac = p->mac;
278
279 netif_stop_queue(dev);
bea3348e 280 napi_disable(&adapter->napi);
8199d3a7
CL
281 mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
282 netif_carrier_off(dev);
283
284 clear_bit(dev->if_port, &adapter->open_device_map);
285 if (adapter->params.stats_update_period &&
286 !(adapter->open_device_map & PORT_MASK)) {
287 /* Stop statistics accumulation. */
288 smp_mb__after_clear_bit();
289 spin_lock(&adapter->work_lock); /* sync with update task */
290 spin_unlock(&adapter->work_lock);
291 cancel_mac_stats_update(adapter);
292 }
293
294 if (!adapter->open_device_map)
295 cxgb_down(adapter);
296 return 0;
297}
298
299static struct net_device_stats *t1_get_stats(struct net_device *dev)
300{
301 struct adapter *adapter = dev->priv;
302 struct port_info *p = &adapter->port[dev->if_port];
303 struct net_device_stats *ns = &p->netstats;
304 const struct cmac_statistics *pstats;
305
306 /* Do a full update of the MAC stats */
307 pstats = p->mac->ops->statistics_update(p->mac,
20578151 308 MAC_STATS_UPDATE_FULL);
8199d3a7
CL
309
310 ns->tx_packets = pstats->TxUnicastFramesOK +
311 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
312
313 ns->rx_packets = pstats->RxUnicastFramesOK +
314 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
315
316 ns->tx_bytes = pstats->TxOctetsOK;
317 ns->rx_bytes = pstats->RxOctetsOK;
318
319 ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
320 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
321 ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
322 pstats->RxFCSErrors + pstats->RxAlignErrors +
323 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
324 pstats->RxSymbolErrors + pstats->RxRuntErrors;
325
326 ns->multicast = pstats->RxMulticastFramesOK;
327 ns->collisions = pstats->TxTotalCollisions;
328
329 /* detailed rx_errors */
330 ns->rx_length_errors = pstats->RxFrameTooLongErrors +
331 pstats->RxJabberErrors;
332 ns->rx_over_errors = 0;
333 ns->rx_crc_errors = pstats->RxFCSErrors;
334 ns->rx_frame_errors = pstats->RxAlignErrors;
335 ns->rx_fifo_errors = 0;
336 ns->rx_missed_errors = 0;
337
338 /* detailed tx_errors */
339 ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions;
340 ns->tx_carrier_errors = 0;
341 ns->tx_fifo_errors = pstats->TxUnderrun;
342 ns->tx_heartbeat_errors = 0;
343 ns->tx_window_errors = pstats->TxLateCollisions;
344 return ns;
345}
346
347static u32 get_msglevel(struct net_device *dev)
348{
349 struct adapter *adapter = dev->priv;
350
351 return adapter->msg_enable;
352}
353
354static void set_msglevel(struct net_device *dev, u32 val)
355{
356 struct adapter *adapter = dev->priv;
357
358 adapter->msg_enable = val;
359}
360
361static char stats_strings[][ETH_GSTRING_LEN] = {
20578151
SH
362 "TxOctetsOK",
363 "TxOctetsBad",
364 "TxUnicastFramesOK",
365 "TxMulticastFramesOK",
366 "TxBroadcastFramesOK",
367 "TxPauseFrames",
368 "TxFramesWithDeferredXmissions",
369 "TxLateCollisions",
370 "TxTotalCollisions",
371 "TxFramesAbortedDueToXSCollisions",
372 "TxUnderrun",
373 "TxLengthErrors",
374 "TxInternalMACXmitError",
375 "TxFramesWithExcessiveDeferral",
376 "TxFCSErrors",
377
378 "RxOctetsOK",
379 "RxOctetsBad",
380 "RxUnicastFramesOK",
381 "RxMulticastFramesOK",
382 "RxBroadcastFramesOK",
383 "RxPauseFrames",
384 "RxFCSErrors",
385 "RxAlignErrors",
386 "RxSymbolErrors",
387 "RxDataErrors",
388 "RxSequenceErrors",
389 "RxRuntErrors",
390 "RxJabberErrors",
391 "RxInternalMACRcvError",
392 "RxInRangeLengthErrors",
393 "RxOutOfRangeLengthField",
394 "RxFrameTooLongErrors",
559fb51b 395
56f643c2
SH
396 /* Port stats */
397 "RxPackets",
559fb51b 398 "RxCsumGood",
56f643c2 399 "TxPackets",
559fb51b 400 "TxCsumOffload",
56f643c2
SH
401 "TxTso",
402 "RxVlan",
403 "TxVlan",
404
405 /* Interrupt stats */
406 "rx drops",
407 "pure_rsps",
408 "unhandled irqs",
559fb51b
SB
409 "respQ_empty",
410 "respQ_overflow",
411 "freelistQ_empty",
412 "pkt_too_big",
413 "pkt_mismatch",
414 "cmdQ_full0",
415 "cmdQ_full1",
20578151 416
559fb51b
SB
417 "espi_DIP2ParityErr",
418 "espi_DIP4Err",
419 "espi_RxDrops",
420 "espi_TxDrops",
421 "espi_RxOvfl",
422 "espi_ParityErr"
8199d3a7 423};
20578151 424
559fb51b
SB
425#define T2_REGMAP_SIZE (3 * 1024)
426
427static int get_regs_len(struct net_device *dev)
428{
429 return T2_REGMAP_SIZE;
430}
8199d3a7
CL
431
432static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
433{
434 struct adapter *adapter = dev->priv;
435
559fb51b
SB
436 strcpy(info->driver, DRV_NAME);
437 strcpy(info->version, DRV_VERSION);
8199d3a7
CL
438 strcpy(info->fw_version, "N/A");
439 strcpy(info->bus_info, pci_name(adapter->pdev));
440}
441
442static int get_stats_count(struct net_device *dev)
443{
444 return ARRAY_SIZE(stats_strings);
445}
446
447static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
448{
449 if (stringset == ETH_SS_STATS)
450 memcpy(data, stats_strings, sizeof(stats_strings));
451}
452
453static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
454 u64 *data)
455{
456 struct adapter *adapter = dev->priv;
457 struct cmac *mac = adapter->port[dev->if_port].mac;
458 const struct cmac_statistics *s;
559fb51b 459 const struct sge_intr_counts *t;
56f643c2 460 struct sge_port_stats ss;
83432468 461 unsigned int len;
8199d3a7
CL
462
463 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
559fb51b 464
83432468
FR
465 len = sizeof(u64)*(&s->TxFCSErrors + 1 - &s->TxOctetsOK);
466 memcpy(data, &s->TxOctetsOK, len);
467 data += len;
468
469 len = sizeof(u64)*(&s->RxFrameTooLongErrors + 1 - &s->RxOctetsOK);
470 memcpy(data, &s->RxOctetsOK, len);
471 data += len;
559fb51b 472
56f643c2 473 t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
83432468
FR
474 memcpy(data, &ss, sizeof(ss));
475 data += sizeof(ss);
56f643c2
SH
476
477 t = t1_sge_get_intr_counts(adapter->sge);
478 *data++ = t->rx_drops;
479 *data++ = t->pure_rsps;
480 *data++ = t->unhandled_irqs;
481 *data++ = t->respQ_empty;
482 *data++ = t->respQ_overflow;
483 *data++ = t->freelistQ_empty;
484 *data++ = t->pkt_too_big;
485 *data++ = t->pkt_mismatch;
486 *data++ = t->cmdQ_full[0];
487 *data++ = t->cmdQ_full[1];
f1d3d38a
SH
488
489 if (adapter->espi) {
490 const struct espi_intr_counts *e;
491
492 e = t1_espi_get_intr_counts(adapter->espi);
56f643c2
SH
493 *data++ = e->DIP2_parity_err;
494 *data++ = e->DIP4_err;
495 *data++ = e->rx_drops;
496 *data++ = e->tx_drops;
497 *data++ = e->rx_ovflw;
498 *data++ = e->parity_err;
f1d3d38a 499 }
559fb51b
SB
500}
501
502static inline void reg_block_dump(struct adapter *ap, void *buf,
503 unsigned int start, unsigned int end)
504{
505 u32 *p = buf + start;
506
507 for ( ; start <= end; start += sizeof(u32))
508 *p++ = readl(ap->regs + start);
509}
8199d3a7 510
559fb51b
SB
511static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
512 void *buf)
513{
514 struct adapter *ap = dev->priv;
515
516 /*
517 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
518 */
519 regs->version = 2;
520
521 memset(buf, 0, T2_REGMAP_SIZE);
522 reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
f1d3d38a
SH
523 reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
524 reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
525 reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
526 reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
527 reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
528 reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
529 reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
530 reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
531 reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
8199d3a7
CL
532}
533
534static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
535{
536 struct adapter *adapter = dev->priv;
537 struct port_info *p = &adapter->port[dev->if_port];
538
539 cmd->supported = p->link_config.supported;
540 cmd->advertising = p->link_config.advertising;
541
542 if (netif_carrier_ok(dev)) {
543 cmd->speed = p->link_config.speed;
544 cmd->duplex = p->link_config.duplex;
545 } else {
546 cmd->speed = -1;
547 cmd->duplex = -1;
548 }
549
20578151
SH
550 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
551 cmd->phy_address = p->phy->addr;
552 cmd->transceiver = XCVR_EXTERNAL;
553 cmd->autoneg = p->link_config.autoneg;
554 cmd->maxtxpkt = 0;
555 cmd->maxrxpkt = 0;
8199d3a7
CL
556 return 0;
557}
558
559static int speed_duplex_to_caps(int speed, int duplex)
560{
561 int cap = 0;
562
563 switch (speed) {
564 case SPEED_10:
565 if (duplex == DUPLEX_FULL)
566 cap = SUPPORTED_10baseT_Full;
567 else
568 cap = SUPPORTED_10baseT_Half;
569 break;
570 case SPEED_100:
571 if (duplex == DUPLEX_FULL)
572 cap = SUPPORTED_100baseT_Full;
573 else
574 cap = SUPPORTED_100baseT_Half;
575 break;
576 case SPEED_1000:
577 if (duplex == DUPLEX_FULL)
578 cap = SUPPORTED_1000baseT_Full;
579 else
580 cap = SUPPORTED_1000baseT_Half;
581 break;
582 case SPEED_10000:
583 if (duplex == DUPLEX_FULL)
584 cap = SUPPORTED_10000baseT_Full;
585 }
586 return cap;
587}
588
589#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
590 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
591 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
592 ADVERTISED_10000baseT_Full)
593
594static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
595{
596 struct adapter *adapter = dev->priv;
597 struct port_info *p = &adapter->port[dev->if_port];
598 struct link_config *lc = &p->link_config;
599
600 if (!(lc->supported & SUPPORTED_Autoneg))
559fb51b 601 return -EOPNOTSUPP; /* can't change speed/duplex */
8199d3a7
CL
602
603 if (cmd->autoneg == AUTONEG_DISABLE) {
604 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
605
606 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
607 return -EINVAL;
608 lc->requested_speed = cmd->speed;
609 lc->requested_duplex = cmd->duplex;
610 lc->advertising = 0;
611 } else {
612 cmd->advertising &= ADVERTISED_MASK;
613 if (cmd->advertising & (cmd->advertising - 1))
614 cmd->advertising = lc->supported;
615 cmd->advertising &= lc->supported;
616 if (!cmd->advertising)
617 return -EINVAL;
618 lc->requested_speed = SPEED_INVALID;
619 lc->requested_duplex = DUPLEX_INVALID;
620 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
621 }
622 lc->autoneg = cmd->autoneg;
623 if (netif_running(dev))
624 t1_link_start(p->phy, p->mac, lc);
625 return 0;
626}
627
628static void get_pauseparam(struct net_device *dev,
629 struct ethtool_pauseparam *epause)
630{
631 struct adapter *adapter = dev->priv;
632 struct port_info *p = &adapter->port[dev->if_port];
633
634 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
635 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
636 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
637}
638
639static int set_pauseparam(struct net_device *dev,
640 struct ethtool_pauseparam *epause)
641{
642 struct adapter *adapter = dev->priv;
643 struct port_info *p = &adapter->port[dev->if_port];
644 struct link_config *lc = &p->link_config;
645
646 if (epause->autoneg == AUTONEG_DISABLE)
647 lc->requested_fc = 0;
648 else if (lc->supported & SUPPORTED_Autoneg)
649 lc->requested_fc = PAUSE_AUTONEG;
650 else
651 return -EINVAL;
652
653 if (epause->rx_pause)
654 lc->requested_fc |= PAUSE_RX;
655 if (epause->tx_pause)
656 lc->requested_fc |= PAUSE_TX;
657 if (lc->autoneg == AUTONEG_ENABLE) {
658 if (netif_running(dev))
659 t1_link_start(p->phy, p->mac, lc);
660 } else {
661 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
662 if (netif_running(dev))
663 p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
664 lc->fc);
665 }
666 return 0;
667}
668
669static u32 get_rx_csum(struct net_device *dev)
670{
671 struct adapter *adapter = dev->priv;
672
673 return (adapter->flags & RX_CSUM_ENABLED) != 0;
674}
675
676static int set_rx_csum(struct net_device *dev, u32 data)
677{
678 struct adapter *adapter = dev->priv;
679
680 if (data)
681 adapter->flags |= RX_CSUM_ENABLED;
682 else
683 adapter->flags &= ~RX_CSUM_ENABLED;
684 return 0;
685}
686
687static int set_tso(struct net_device *dev, u32 value)
688{
689 struct adapter *adapter = dev->priv;
690
691 if (!(adapter->flags & TSO_CAPABLE))
692 return value ? -EOPNOTSUPP : 0;
693 return ethtool_op_set_tso(dev, value);
694}
695
696static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
697{
698 struct adapter *adapter = dev->priv;
699 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
700
701 e->rx_max_pending = MAX_RX_BUFFERS;
702 e->rx_mini_max_pending = 0;
703 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
704 e->tx_max_pending = MAX_CMDQ_ENTRIES;
705
706 e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
707 e->rx_mini_pending = 0;
708 e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
709 e->tx_pending = adapter->params.sge.cmdQ_size[0];
710}
711
712static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
713{
714 struct adapter *adapter = dev->priv;
715 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
716
717 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
718 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
719 e->tx_pending > MAX_CMDQ_ENTRIES ||
720 e->rx_pending < MIN_FL_ENTRIES ||
721 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
722 e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
723 return -EINVAL;
724
725 if (adapter->flags & FULL_INIT_DONE)
356bd146 726 return -EBUSY;
8199d3a7
CL
727
728 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
729 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
730 adapter->params.sge.cmdQ_size[0] = e->tx_pending;
731 adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
732 MAX_CMDQ1_ENTRIES : e->tx_pending;
733 return 0;
734}
735
736static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
737{
738 struct adapter *adapter = dev->priv;
739
7fe26a60 740 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
356bd146 741 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
8199d3a7 742 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
8199d3a7
CL
743 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
744 return 0;
745}
746
747static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
748{
749 struct adapter *adapter = dev->priv;
750
559fb51b 751 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
8199d3a7
CL
752 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
753 c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
754 return 0;
755}
756
757static int get_eeprom_len(struct net_device *dev)
758{
356bd146 759 struct adapter *adapter = dev->priv;
f1d3d38a 760
356bd146 761 return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
8199d3a7
CL
762}
763
764#define EEPROM_MAGIC(ap) \
765 (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
766
767static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
768 u8 *data)
769{
770 int i;
771 u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
772 struct adapter *adapter = dev->priv;
773
774 e->magic = EEPROM_MAGIC(adapter);
775 for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
776 t1_seeprom_read(adapter, i, (u32 *)&buf[i]);
777 memcpy(data, buf + e->offset, e->len);
778 return 0;
779}
780
7282d491 781static const struct ethtool_ops t1_ethtool_ops = {
8199d3a7
CL
782 .get_settings = get_settings,
783 .set_settings = set_settings,
784 .get_drvinfo = get_drvinfo,
785 .get_msglevel = get_msglevel,
786 .set_msglevel = set_msglevel,
787 .get_ringparam = get_sge_param,
788 .set_ringparam = set_sge_param,
789 .get_coalesce = get_coalesce,
790 .set_coalesce = set_coalesce,
791 .get_eeprom_len = get_eeprom_len,
792 .get_eeprom = get_eeprom,
793 .get_pauseparam = get_pauseparam,
794 .set_pauseparam = set_pauseparam,
795 .get_rx_csum = get_rx_csum,
796 .set_rx_csum = set_rx_csum,
8199d3a7 797 .set_tx_csum = ethtool_op_set_tx_csum,
8199d3a7
CL
798 .set_sg = ethtool_op_set_sg,
799 .get_link = ethtool_op_get_link,
800 .get_strings = get_strings,
801 .get_stats_count = get_stats_count,
802 .get_ethtool_stats = get_stats,
559fb51b
SB
803 .get_regs_len = get_regs_len,
804 .get_regs = get_regs,
8199d3a7
CL
805 .set_tso = set_tso,
806};
807
8199d3a7
CL
808static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
809{
20578151
SH
810 struct adapter *adapter = dev->priv;
811 struct mii_ioctl_data *data = if_mii(req);
8199d3a7
CL
812
813 switch (cmd) {
20578151
SH
814 case SIOCGMIIPHY:
815 data->phy_id = adapter->port[dev->if_port].phy->addr;
816 /* FALLTHRU */
817 case SIOCGMIIREG: {
8199d3a7
CL
818 struct cphy *phy = adapter->port[dev->if_port].phy;
819 u32 val;
820
559fb51b 821 if (!phy->mdio_read)
356bd146 822 return -EOPNOTSUPP;
8199d3a7
CL
823 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
824 &val);
20578151
SH
825 data->val_out = val;
826 break;
8199d3a7 827 }
20578151 828 case SIOCSMIIREG: {
8199d3a7
CL
829 struct cphy *phy = adapter->port[dev->if_port].phy;
830
20578151
SH
831 if (!capable(CAP_NET_ADMIN))
832 return -EPERM;
559fb51b 833 if (!phy->mdio_write)
356bd146 834 return -EOPNOTSUPP;
8199d3a7 835 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
559fb51b 836 data->val_in);
20578151 837 break;
8199d3a7
CL
838 }
839
8199d3a7
CL
840 default:
841 return -EOPNOTSUPP;
842 }
843 return 0;
844}
845
846static int t1_change_mtu(struct net_device *dev, int new_mtu)
847{
848 int ret;
849 struct adapter *adapter = dev->priv;
850 struct cmac *mac = adapter->port[dev->if_port].mac;
851
852 if (!mac->ops->set_mtu)
356bd146 853 return -EOPNOTSUPP;
8199d3a7 854 if (new_mtu < 68)
356bd146 855 return -EINVAL;
8199d3a7
CL
856 if ((ret = mac->ops->set_mtu(mac, new_mtu)))
857 return ret;
858 dev->mtu = new_mtu;
859 return 0;
860}
861
862static int t1_set_mac_addr(struct net_device *dev, void *p)
863{
864 struct adapter *adapter = dev->priv;
865 struct cmac *mac = adapter->port[dev->if_port].mac;
866 struct sockaddr *addr = p;
867
868 if (!mac->ops->macaddress_set)
869 return -EOPNOTSUPP;
870
871 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
872 mac->ops->macaddress_set(mac, dev->dev_addr);
873 return 0;
874}
875
876#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
877static void vlan_rx_register(struct net_device *dev,
878 struct vlan_group *grp)
879{
880 struct adapter *adapter = dev->priv;
881
882 spin_lock_irq(&adapter->async_lock);
883 adapter->vlan_grp = grp;
884 t1_set_vlan_accel(adapter, grp != NULL);
885 spin_unlock_irq(&adapter->async_lock);
886}
8199d3a7
CL
887#endif
888
889#ifdef CONFIG_NET_POLL_CONTROLLER
890static void t1_netpoll(struct net_device *dev)
891{
559fb51b 892 unsigned long flags;
8199d3a7
CL
893 struct adapter *adapter = dev->priv;
894
559fb51b 895 local_irq_save(flags);
7fe26a60 896 t1_interrupt(adapter->pdev->irq, adapter);
559fb51b 897 local_irq_restore(flags);
8199d3a7
CL
898}
899#endif
900
901/*
902 * Periodic accumulation of MAC statistics. This is used only if the MAC
903 * does not have any other way to prevent stats counter overflow.
904 */
c4028958 905static void mac_stats_task(struct work_struct *work)
8199d3a7
CL
906{
907 int i;
c4028958
DH
908 struct adapter *adapter =
909 container_of(work, struct adapter, stats_update_task.work);
8199d3a7
CL
910
911 for_each_port(adapter, i) {
912 struct port_info *p = &adapter->port[i];
913
914 if (netif_running(p->dev))
915 p->mac->ops->statistics_update(p->mac,
916 MAC_STATS_UPDATE_FAST);
917 }
918
919 /* Schedule the next statistics update if any port is active. */
920 spin_lock(&adapter->work_lock);
921 if (adapter->open_device_map & PORT_MASK)
922 schedule_mac_stats_update(adapter,
923 adapter->params.stats_update_period);
924 spin_unlock(&adapter->work_lock);
925}
926
927/*
928 * Processes elmer0 external interrupts in process context.
929 */
c4028958 930static void ext_intr_task(struct work_struct *work)
8199d3a7 931{
c4028958
DH
932 struct adapter *adapter =
933 container_of(work, struct adapter, ext_intr_handler_task);
8199d3a7 934
f1d3d38a 935 t1_elmer0_ext_intr_handler(adapter);
8199d3a7
CL
936
937 /* Now reenable external interrupts */
559fb51b 938 spin_lock_irq(&adapter->async_lock);
8199d3a7 939 adapter->slow_intr_mask |= F_PL_INTR_EXT;
559fb51b
SB
940 writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
941 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
20578151 942 adapter->regs + A_PL_ENABLE);
559fb51b 943 spin_unlock_irq(&adapter->async_lock);
8199d3a7
CL
944}
945
946/*
947 * Interrupt-context handler for elmer0 external interrupts.
948 */
949void t1_elmer0_ext_intr(struct adapter *adapter)
950{
8199d3a7
CL
951 /*
952 * Schedule a task to handle external interrupts as we require
953 * a process context. We disable EXT interrupts in the interim
954 * and let the task reenable them when it's done.
955 */
956 adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
559fb51b 957 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
20578151 958 adapter->regs + A_PL_ENABLE);
8199d3a7
CL
959 schedule_work(&adapter->ext_intr_handler_task);
960}
961
962void t1_fatal_err(struct adapter *adapter)
963{
964 if (adapter->flags & FULL_INIT_DONE) {
965 t1_sge_stop(adapter->sge);
966 t1_interrupts_disable(adapter);
967 }
968 CH_ALERT("%s: encountered fatal error, operation suspended\n",
969 adapter->name);
970}
971
8199d3a7
CL
972static int __devinit init_one(struct pci_dev *pdev,
973 const struct pci_device_id *ent)
974{
975 static int version_printed;
976
977 int i, err, pci_using_dac = 0;
978 unsigned long mmio_start, mmio_len;
979 const struct board_info *bi;
980 struct adapter *adapter = NULL;
981 struct port_info *pi;
982
983 if (!version_printed) {
559fb51b
SB
984 printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
985 DRV_VERSION);
8199d3a7
CL
986 ++version_printed;
987 }
988
989 err = pci_enable_device(pdev);
990 if (err)
20578151 991 return err;
8199d3a7
CL
992
993 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
994 CH_ERR("%s: cannot find PCI device memory base address\n",
995 pci_name(pdev));
996 err = -ENODEV;
997 goto out_disable_pdev;
998 }
999
559fb51b 1000 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
8199d3a7 1001 pci_using_dac = 1;
559fb51b
SB
1002
1003 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
8199d3a7
CL
1004 CH_ERR("%s: unable to obtain 64-bit DMA for"
1005 "consistent allocations\n", pci_name(pdev));
1006 err = -ENODEV;
1007 goto out_disable_pdev;
1008 }
559fb51b
SB
1009
1010 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
8199d3a7
CL
1011 CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
1012 goto out_disable_pdev;
1013 }
1014
559fb51b 1015 err = pci_request_regions(pdev, DRV_NAME);
8199d3a7
CL
1016 if (err) {
1017 CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
1018 goto out_disable_pdev;
1019 }
1020
1021 pci_set_master(pdev);
1022
20578151 1023 mmio_start = pci_resource_start(pdev, 0);
8199d3a7
CL
1024 mmio_len = pci_resource_len(pdev, 0);
1025 bi = t1_get_board_info(ent->driver_data);
1026
1027 for (i = 0; i < bi->port_number; ++i) {
1028 struct net_device *netdev;
1029
1030 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1031 if (!netdev) {
1032 err = -ENOMEM;
1033 goto out_free_dev;
1034 }
1035
8199d3a7
CL
1036 SET_NETDEV_DEV(netdev, &pdev->dev);
1037
1038 if (!adapter) {
1039 adapter = netdev->priv;
1040 adapter->pdev = pdev;
1041 adapter->port[0].dev = netdev; /* so we don't leak it */
1042
1043 adapter->regs = ioremap(mmio_start, mmio_len);
1044 if (!adapter->regs) {
1045 CH_ERR("%s: cannot map device registers\n",
1046 pci_name(pdev));
1047 err = -ENOMEM;
1048 goto out_free_dev;
1049 }
1050
1051 if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1052 err = -ENODEV; /* Can't handle this chip rev */
1053 goto out_free_dev;
1054 }
1055
1056 adapter->name = pci_name(pdev);
1057 adapter->msg_enable = dflt_msg_enable;
1058 adapter->mmio_len = mmio_len;
1059
8199d3a7
CL
1060 spin_lock_init(&adapter->tpi_lock);
1061 spin_lock_init(&adapter->work_lock);
1062 spin_lock_init(&adapter->async_lock);
352c417d 1063 spin_lock_init(&adapter->mac_lock);
8199d3a7
CL
1064
1065 INIT_WORK(&adapter->ext_intr_handler_task,
c4028958
DH
1066 ext_intr_task);
1067 INIT_DELAYED_WORK(&adapter->stats_update_task,
1068 mac_stats_task);
8199d3a7
CL
1069
1070 pci_set_drvdata(pdev, netdev);
8199d3a7
CL
1071 }
1072
1073 pi = &adapter->port[i];
1074 pi->dev = netdev;
1075 netif_carrier_off(netdev);
1076 netdev->irq = pdev->irq;
1077 netdev->if_port = i;
1078 netdev->mem_start = mmio_start;
1079 netdev->mem_end = mmio_start + mmio_len - 1;
1080 netdev->priv = adapter;
1081 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
559fb51b
SB
1082 netdev->features |= NETIF_F_LLTX;
1083
8199d3a7
CL
1084 adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1085 if (pci_using_dac)
1086 netdev->features |= NETIF_F_HIGHDMA;
1087 if (vlan_tso_capable(adapter)) {
8199d3a7
CL
1088#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1089 adapter->flags |= VLAN_ACCEL_CAPABLE;
1090 netdev->features |=
1091 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1092 netdev->vlan_rx_register = vlan_rx_register;
8199d3a7 1093#endif
f1d3d38a
SH
1094
1095 /* T204: disable TSO */
1096 if (!(is_T2(adapter)) || bi->port_number != 4) {
1097 adapter->flags |= TSO_CAPABLE;
1098 netdev->features |= NETIF_F_TSO;
1099 }
8199d3a7
CL
1100 }
1101
1102 netdev->open = cxgb_open;
1103 netdev->stop = cxgb_close;
1104 netdev->hard_start_xmit = t1_start_xmit;
1105 netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
f1d3d38a 1106 sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
8199d3a7
CL
1107 netdev->get_stats = t1_get_stats;
1108 netdev->set_multicast_list = t1_set_rxmode;
1109 netdev->do_ioctl = t1_ioctl;
1110 netdev->change_mtu = t1_change_mtu;
1111 netdev->set_mac_address = t1_set_mac_addr;
1112#ifdef CONFIG_NET_POLL_CONTROLLER
1113 netdev->poll_controller = t1_netpoll;
1114#endif
7fe26a60 1115#ifdef CONFIG_CHELSIO_T1_NAPI
bea3348e 1116 netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
7fe26a60 1117#endif
8199d3a7 1118
20578151 1119 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
8199d3a7
CL
1120 }
1121
1122 if (t1_init_sw_modules(adapter, bi) < 0) {
1123 err = -ENODEV;
1124 goto out_free_dev;
1125 }
1126
1127 /*
1128 * The card is now ready to go. If any errors occur during device
1129 * registration we do not fail the whole card but rather proceed only
1130 * with the ports we manage to register successfully. However we must
1131 * register at least one net device.
1132 */
1133 for (i = 0; i < bi->port_number; ++i) {
1134 err = register_netdev(adapter->port[i].dev);
1135 if (err)
1136 CH_WARN("%s: cannot register net device %s, skipping\n",
1137 pci_name(pdev), adapter->port[i].dev->name);
1138 else {
1139 /*
1140 * Change the name we use for messages to the name of
1141 * the first successfully registered interface.
1142 */
1143 if (!adapter->registered_device_map)
1144 adapter->name = adapter->port[i].dev->name;
1145
20578151 1146 __set_bit(i, &adapter->registered_device_map);
8199d3a7
CL
1147 }
1148 }
1149 if (!adapter->registered_device_map) {
1150 CH_ERR("%s: could not register any net devices\n",
1151 pci_name(pdev));
1152 goto out_release_adapter_res;
1153 }
1154
1155 printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1156 bi->desc, adapter->params.chip_revision,
1157 adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1158 adapter->params.pci.speed, adapter->params.pci.width);
f1d3d38a
SH
1159
1160 /*
1161 * Set the T1B ASIC and memory clocks.
1162 */
1163 if (t1powersave)
1164 adapter->t1powersave = LCLOCK; /* HW default is powersave mode. */
1165 else
1166 adapter->t1powersave = HCLOCK;
1167 if (t1_is_T1B(adapter))
1168 t1_clock(adapter, t1powersave);
1169
8199d3a7
CL
1170 return 0;
1171
356bd146 1172out_release_adapter_res:
8199d3a7 1173 t1_free_sw_modules(adapter);
356bd146 1174out_free_dev:
8199d3a7 1175 if (adapter) {
e487647a
SH
1176 if (adapter->regs)
1177 iounmap(adapter->regs);
8199d3a7 1178 for (i = bi->port_number - 1; i >= 0; --i)
e487647a
SH
1179 if (adapter->port[i].dev)
1180 free_netdev(adapter->port[i].dev);
8199d3a7
CL
1181 }
1182 pci_release_regions(pdev);
356bd146 1183out_disable_pdev:
8199d3a7
CL
1184 pci_disable_device(pdev);
1185 pci_set_drvdata(pdev, NULL);
1186 return err;
1187}
1188
f1d3d38a
SH
1189static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1190{
1191 int data;
1192 int i;
1193 u32 val;
1194
1195 enum {
1196 S_CLOCK = 1 << 3,
1197 S_DATA = 1 << 4
1198 };
1199
1200 for (i = (nbits - 1); i > -1; i--) {
1201
1202 udelay(50);
1203
1204 data = ((bitdata >> i) & 0x1);
1205 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1206
1207 if (data)
1208 val |= S_DATA;
1209 else
1210 val &= ~S_DATA;
1211
1212 udelay(50);
1213
1214 /* Set SCLOCK low */
1215 val &= ~S_CLOCK;
1216 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1217
1218 udelay(50);
1219
1220 /* Write SCLOCK high */
1221 val |= S_CLOCK;
1222 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1223
1224 }
1225}
1226
1227static int t1_clock(struct adapter *adapter, int mode)
1228{
1229 u32 val;
1230 int M_CORE_VAL;
1231 int M_MEM_VAL;
1232
1233 enum {
356bd146
FR
1234 M_CORE_BITS = 9,
1235 T_CORE_VAL = 0,
1236 T_CORE_BITS = 2,
1237 N_CORE_VAL = 0,
1238 N_CORE_BITS = 2,
1239 M_MEM_BITS = 9,
1240 T_MEM_VAL = 0,
1241 T_MEM_BITS = 2,
1242 N_MEM_VAL = 0,
1243 N_MEM_BITS = 2,
1244 NP_LOAD = 1 << 17,
1245 S_LOAD_MEM = 1 << 5,
1246 S_LOAD_CORE = 1 << 6,
1247 S_CLOCK = 1 << 3
f1d3d38a
SH
1248 };
1249
1250 if (!t1_is_T1B(adapter))
1251 return -ENODEV; /* Can't re-clock this chip. */
1252
d7487421 1253 if (mode & 2)
f1d3d38a 1254 return 0; /* show current mode. */
f1d3d38a
SH
1255
1256 if ((adapter->t1powersave & 1) == (mode & 1))
1257 return -EALREADY; /* ASIC already running in mode. */
1258
1259 if ((mode & 1) == HCLOCK) {
1260 M_CORE_VAL = 0x14;
1261 M_MEM_VAL = 0x18;
1262 adapter->t1powersave = HCLOCK; /* overclock */
1263 } else {
1264 M_CORE_VAL = 0xe;
1265 M_MEM_VAL = 0x10;
1266 adapter->t1powersave = LCLOCK; /* underclock */
1267 }
1268
1269 /* Don't interrupt this serial stream! */
1270 spin_lock(&adapter->tpi_lock);
1271
1272 /* Initialize for ASIC core */
1273 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1274 val |= NP_LOAD;
1275 udelay(50);
1276 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1277 udelay(50);
1278 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1279 val &= ~S_LOAD_CORE;
1280 val &= ~S_CLOCK;
1281 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1282 udelay(50);
1283
1284 /* Serial program the ASIC clock synthesizer */
1285 bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1286 bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1287 bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1288 udelay(50);
1289
1290 /* Finish ASIC core */
1291 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1292 val |= S_LOAD_CORE;
1293 udelay(50);
1294 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1295 udelay(50);
1296 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1297 val &= ~S_LOAD_CORE;
1298 udelay(50);
1299 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1300 udelay(50);
1301
1302 /* Initialize for memory */
1303 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1304 val |= NP_LOAD;
1305 udelay(50);
1306 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1307 udelay(50);
1308 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1309 val &= ~S_LOAD_MEM;
1310 val &= ~S_CLOCK;
1311 udelay(50);
1312 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1313 udelay(50);
1314
1315 /* Serial program the memory clock synthesizer */
1316 bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1317 bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1318 bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1319 udelay(50);
1320
1321 /* Finish memory */
1322 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1323 val |= S_LOAD_MEM;
1324 udelay(50);
1325 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1326 udelay(50);
1327 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1328 val &= ~S_LOAD_MEM;
1329 udelay(50);
1330 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1331
1332 spin_unlock(&adapter->tpi_lock);
1333
1334 return 0;
1335}
1336
8199d3a7
CL
1337static inline void t1_sw_reset(struct pci_dev *pdev)
1338{
1339 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1340 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1341}
1342
1343static void __devexit remove_one(struct pci_dev *pdev)
1344{
1345 struct net_device *dev = pci_get_drvdata(pdev);
47cbe6f4
FR
1346 struct adapter *adapter = dev->priv;
1347 int i;
8199d3a7 1348
47cbe6f4
FR
1349 for_each_port(adapter, i) {
1350 if (test_bit(i, &adapter->registered_device_map))
1351 unregister_netdev(adapter->port[i].dev);
1352 }
8199d3a7 1353
47cbe6f4
FR
1354 t1_free_sw_modules(adapter);
1355 iounmap(adapter->regs);
e487647a 1356
47cbe6f4
FR
1357 while (--i >= 0) {
1358 if (adapter->port[i].dev)
1359 free_netdev(adapter->port[i].dev);
8199d3a7 1360 }
47cbe6f4
FR
1361
1362 pci_release_regions(pdev);
1363 pci_disable_device(pdev);
1364 pci_set_drvdata(pdev, NULL);
1365 t1_sw_reset(pdev);
8199d3a7
CL
1366}
1367
1368static struct pci_driver driver = {
559fb51b 1369 .name = DRV_NAME,
8199d3a7
CL
1370 .id_table = t1_pci_tbl,
1371 .probe = init_one,
1372 .remove = __devexit_p(remove_one),
1373};
1374
1375static int __init t1_init_module(void)
1376{
29917620 1377 return pci_register_driver(&driver);
8199d3a7
CL
1378}
1379
1380static void __exit t1_cleanup_module(void)
1381{
1382 pci_unregister_driver(&driver);
1383}
1384
1385module_init(t1_init_module);
1386module_exit(t1_cleanup_module);