]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/chelsio/cxgb/cxgb2.c
net: introduce and use netdev_features_t for device features sets
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / chelsio / cxgb / cxgb2.c
1 /*****************************************************************************
2 * *
3 * File: cxgb2.c *
4 * $Revision: 1.25 $ *
5 * $Date: 2005/06/22 00:43:25 $ *
6 * Description: *
7 * Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39 #include "common.h"
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/pci.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/if_vlan.h>
46 #include <linux/mii.h>
47 #include <linux/sockios.h>
48 #include <linux/dma-mapping.h>
49 #include <asm/uaccess.h>
50
51 #include "cpl5_cmd.h"
52 #include "regs.h"
53 #include "gmac.h"
54 #include "cphy.h"
55 #include "sge.h"
56 #include "tp.h"
57 #include "espi.h"
58 #include "elmer0.h"
59
60 #include <linux/workqueue.h>
61
62 static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
63 {
64 schedule_delayed_work(&ap->stats_update_task, secs * HZ);
65 }
66
67 static inline void cancel_mac_stats_update(struct adapter *ap)
68 {
69 cancel_delayed_work(&ap->stats_update_task);
70 }
71
72 #define MAX_CMDQ_ENTRIES 16384
73 #define MAX_CMDQ1_ENTRIES 1024
74 #define MAX_RX_BUFFERS 16384
75 #define MAX_RX_JUMBO_BUFFERS 16384
76 #define MAX_TX_BUFFERS_HIGH 16384U
77 #define MAX_TX_BUFFERS_LOW 1536U
78 #define MAX_TX_BUFFERS 1460U
79 #define MIN_FL_ENTRIES 32
80
81 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
83 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
84
85 /*
86 * The EEPROM is actually bigger but only the first few bytes are used so we
87 * only report those.
88 */
89 #define EEPROM_SIZE 32
90
91 MODULE_DESCRIPTION(DRV_DESCRIPTION);
92 MODULE_AUTHOR("Chelsio Communications");
93 MODULE_LICENSE("GPL");
94
95 static int dflt_msg_enable = DFLT_MSG_ENABLE;
96
97 module_param(dflt_msg_enable, int, 0);
98 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
99
100 #define HCLOCK 0x0
101 #define LCLOCK 0x1
102
103 /* T1 cards powersave mode */
104 static int t1_clock(struct adapter *adapter, int mode);
105 static int t1powersave = 1; /* HW default is powersave mode. */
106
107 module_param(t1powersave, int, 0);
108 MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
109
110 static int disable_msi = 0;
111 module_param(disable_msi, int, 0);
112 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
113
114 static const char pci_speed[][4] = {
115 "33", "66", "100", "133"
116 };
117
118 /*
119 * Setup MAC to receive the types of packets we want.
120 */
121 static void t1_set_rxmode(struct net_device *dev)
122 {
123 struct adapter *adapter = dev->ml_priv;
124 struct cmac *mac = adapter->port[dev->if_port].mac;
125 struct t1_rx_mode rm;
126
127 rm.dev = dev;
128 mac->ops->set_rx_mode(mac, &rm);
129 }
130
131 static void link_report(struct port_info *p)
132 {
133 if (!netif_carrier_ok(p->dev))
134 printk(KERN_INFO "%s: link down\n", p->dev->name);
135 else {
136 const char *s = "10Mbps";
137
138 switch (p->link_config.speed) {
139 case SPEED_10000: s = "10Gbps"; break;
140 case SPEED_1000: s = "1000Mbps"; break;
141 case SPEED_100: s = "100Mbps"; break;
142 }
143
144 printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
145 p->dev->name, s,
146 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
147 }
148 }
149
150 void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
151 int speed, int duplex, int pause)
152 {
153 struct port_info *p = &adapter->port[port_id];
154
155 if (link_stat != netif_carrier_ok(p->dev)) {
156 if (link_stat)
157 netif_carrier_on(p->dev);
158 else
159 netif_carrier_off(p->dev);
160 link_report(p);
161
162 /* multi-ports: inform toe */
163 if ((speed > 0) && (adapter->params.nports > 1)) {
164 unsigned int sched_speed = 10;
165 switch (speed) {
166 case SPEED_1000:
167 sched_speed = 1000;
168 break;
169 case SPEED_100:
170 sched_speed = 100;
171 break;
172 case SPEED_10:
173 sched_speed = 10;
174 break;
175 }
176 t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
177 }
178 }
179 }
180
181 static void link_start(struct port_info *p)
182 {
183 struct cmac *mac = p->mac;
184
185 mac->ops->reset(mac);
186 if (mac->ops->macaddress_set)
187 mac->ops->macaddress_set(mac, p->dev->dev_addr);
188 t1_set_rxmode(p->dev);
189 t1_link_start(p->phy, mac, &p->link_config);
190 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
191 }
192
193 static void enable_hw_csum(struct adapter *adapter)
194 {
195 if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
196 t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */
197 t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
198 }
199
200 /*
201 * Things to do upon first use of a card.
202 * This must run with the rtnl lock held.
203 */
204 static int cxgb_up(struct adapter *adapter)
205 {
206 int err = 0;
207
208 if (!(adapter->flags & FULL_INIT_DONE)) {
209 err = t1_init_hw_modules(adapter);
210 if (err)
211 goto out_err;
212
213 enable_hw_csum(adapter);
214 adapter->flags |= FULL_INIT_DONE;
215 }
216
217 t1_interrupts_clear(adapter);
218
219 adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
220 err = request_irq(adapter->pdev->irq, t1_interrupt,
221 adapter->params.has_msi ? 0 : IRQF_SHARED,
222 adapter->name, adapter);
223 if (err) {
224 if (adapter->params.has_msi)
225 pci_disable_msi(adapter->pdev);
226
227 goto out_err;
228 }
229
230 t1_sge_start(adapter->sge);
231 t1_interrupts_enable(adapter);
232 out_err:
233 return err;
234 }
235
236 /*
237 * Release resources when all the ports have been stopped.
238 */
239 static void cxgb_down(struct adapter *adapter)
240 {
241 t1_sge_stop(adapter->sge);
242 t1_interrupts_disable(adapter);
243 free_irq(adapter->pdev->irq, adapter);
244 if (adapter->params.has_msi)
245 pci_disable_msi(adapter->pdev);
246 }
247
248 static int cxgb_open(struct net_device *dev)
249 {
250 int err;
251 struct adapter *adapter = dev->ml_priv;
252 int other_ports = adapter->open_device_map & PORT_MASK;
253
254 napi_enable(&adapter->napi);
255 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
256 napi_disable(&adapter->napi);
257 return err;
258 }
259
260 __set_bit(dev->if_port, &adapter->open_device_map);
261 link_start(&adapter->port[dev->if_port]);
262 netif_start_queue(dev);
263 if (!other_ports && adapter->params.stats_update_period)
264 schedule_mac_stats_update(adapter,
265 adapter->params.stats_update_period);
266
267 t1_vlan_mode(adapter, dev->features);
268 return 0;
269 }
270
271 static int cxgb_close(struct net_device *dev)
272 {
273 struct adapter *adapter = dev->ml_priv;
274 struct port_info *p = &adapter->port[dev->if_port];
275 struct cmac *mac = p->mac;
276
277 netif_stop_queue(dev);
278 napi_disable(&adapter->napi);
279 mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
280 netif_carrier_off(dev);
281
282 clear_bit(dev->if_port, &adapter->open_device_map);
283 if (adapter->params.stats_update_period &&
284 !(adapter->open_device_map & PORT_MASK)) {
285 /* Stop statistics accumulation. */
286 smp_mb__after_clear_bit();
287 spin_lock(&adapter->work_lock); /* sync with update task */
288 spin_unlock(&adapter->work_lock);
289 cancel_mac_stats_update(adapter);
290 }
291
292 if (!adapter->open_device_map)
293 cxgb_down(adapter);
294 return 0;
295 }
296
297 static struct net_device_stats *t1_get_stats(struct net_device *dev)
298 {
299 struct adapter *adapter = dev->ml_priv;
300 struct port_info *p = &adapter->port[dev->if_port];
301 struct net_device_stats *ns = &p->netstats;
302 const struct cmac_statistics *pstats;
303
304 /* Do a full update of the MAC stats */
305 pstats = p->mac->ops->statistics_update(p->mac,
306 MAC_STATS_UPDATE_FULL);
307
308 ns->tx_packets = pstats->TxUnicastFramesOK +
309 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
310
311 ns->rx_packets = pstats->RxUnicastFramesOK +
312 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
313
314 ns->tx_bytes = pstats->TxOctetsOK;
315 ns->rx_bytes = pstats->RxOctetsOK;
316
317 ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
318 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
319 ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
320 pstats->RxFCSErrors + pstats->RxAlignErrors +
321 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
322 pstats->RxSymbolErrors + pstats->RxRuntErrors;
323
324 ns->multicast = pstats->RxMulticastFramesOK;
325 ns->collisions = pstats->TxTotalCollisions;
326
327 /* detailed rx_errors */
328 ns->rx_length_errors = pstats->RxFrameTooLongErrors +
329 pstats->RxJabberErrors;
330 ns->rx_over_errors = 0;
331 ns->rx_crc_errors = pstats->RxFCSErrors;
332 ns->rx_frame_errors = pstats->RxAlignErrors;
333 ns->rx_fifo_errors = 0;
334 ns->rx_missed_errors = 0;
335
336 /* detailed tx_errors */
337 ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions;
338 ns->tx_carrier_errors = 0;
339 ns->tx_fifo_errors = pstats->TxUnderrun;
340 ns->tx_heartbeat_errors = 0;
341 ns->tx_window_errors = pstats->TxLateCollisions;
342 return ns;
343 }
344
345 static u32 get_msglevel(struct net_device *dev)
346 {
347 struct adapter *adapter = dev->ml_priv;
348
349 return adapter->msg_enable;
350 }
351
352 static void set_msglevel(struct net_device *dev, u32 val)
353 {
354 struct adapter *adapter = dev->ml_priv;
355
356 adapter->msg_enable = val;
357 }
358
359 static char stats_strings[][ETH_GSTRING_LEN] = {
360 "TxOctetsOK",
361 "TxOctetsBad",
362 "TxUnicastFramesOK",
363 "TxMulticastFramesOK",
364 "TxBroadcastFramesOK",
365 "TxPauseFrames",
366 "TxFramesWithDeferredXmissions",
367 "TxLateCollisions",
368 "TxTotalCollisions",
369 "TxFramesAbortedDueToXSCollisions",
370 "TxUnderrun",
371 "TxLengthErrors",
372 "TxInternalMACXmitError",
373 "TxFramesWithExcessiveDeferral",
374 "TxFCSErrors",
375 "TxJumboFramesOk",
376 "TxJumboOctetsOk",
377
378 "RxOctetsOK",
379 "RxOctetsBad",
380 "RxUnicastFramesOK",
381 "RxMulticastFramesOK",
382 "RxBroadcastFramesOK",
383 "RxPauseFrames",
384 "RxFCSErrors",
385 "RxAlignErrors",
386 "RxSymbolErrors",
387 "RxDataErrors",
388 "RxSequenceErrors",
389 "RxRuntErrors",
390 "RxJabberErrors",
391 "RxInternalMACRcvError",
392 "RxInRangeLengthErrors",
393 "RxOutOfRangeLengthField",
394 "RxFrameTooLongErrors",
395 "RxJumboFramesOk",
396 "RxJumboOctetsOk",
397
398 /* Port stats */
399 "RxCsumGood",
400 "TxCsumOffload",
401 "TxTso",
402 "RxVlan",
403 "TxVlan",
404 "TxNeedHeadroom",
405
406 /* Interrupt stats */
407 "rx drops",
408 "pure_rsps",
409 "unhandled irqs",
410 "respQ_empty",
411 "respQ_overflow",
412 "freelistQ_empty",
413 "pkt_too_big",
414 "pkt_mismatch",
415 "cmdQ_full0",
416 "cmdQ_full1",
417
418 "espi_DIP2ParityErr",
419 "espi_DIP4Err",
420 "espi_RxDrops",
421 "espi_TxDrops",
422 "espi_RxOvfl",
423 "espi_ParityErr"
424 };
425
426 #define T2_REGMAP_SIZE (3 * 1024)
427
428 static int get_regs_len(struct net_device *dev)
429 {
430 return T2_REGMAP_SIZE;
431 }
432
433 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
434 {
435 struct adapter *adapter = dev->ml_priv;
436
437 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
438 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
439 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
440 strlcpy(info->bus_info, pci_name(adapter->pdev),
441 sizeof(info->bus_info));
442 }
443
444 static int get_sset_count(struct net_device *dev, int sset)
445 {
446 switch (sset) {
447 case ETH_SS_STATS:
448 return ARRAY_SIZE(stats_strings);
449 default:
450 return -EOPNOTSUPP;
451 }
452 }
453
454 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
455 {
456 if (stringset == ETH_SS_STATS)
457 memcpy(data, stats_strings, sizeof(stats_strings));
458 }
459
460 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
461 u64 *data)
462 {
463 struct adapter *adapter = dev->ml_priv;
464 struct cmac *mac = adapter->port[dev->if_port].mac;
465 const struct cmac_statistics *s;
466 const struct sge_intr_counts *t;
467 struct sge_port_stats ss;
468
469 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
470 t = t1_sge_get_intr_counts(adapter->sge);
471 t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
472
473 *data++ = s->TxOctetsOK;
474 *data++ = s->TxOctetsBad;
475 *data++ = s->TxUnicastFramesOK;
476 *data++ = s->TxMulticastFramesOK;
477 *data++ = s->TxBroadcastFramesOK;
478 *data++ = s->TxPauseFrames;
479 *data++ = s->TxFramesWithDeferredXmissions;
480 *data++ = s->TxLateCollisions;
481 *data++ = s->TxTotalCollisions;
482 *data++ = s->TxFramesAbortedDueToXSCollisions;
483 *data++ = s->TxUnderrun;
484 *data++ = s->TxLengthErrors;
485 *data++ = s->TxInternalMACXmitError;
486 *data++ = s->TxFramesWithExcessiveDeferral;
487 *data++ = s->TxFCSErrors;
488 *data++ = s->TxJumboFramesOK;
489 *data++ = s->TxJumboOctetsOK;
490
491 *data++ = s->RxOctetsOK;
492 *data++ = s->RxOctetsBad;
493 *data++ = s->RxUnicastFramesOK;
494 *data++ = s->RxMulticastFramesOK;
495 *data++ = s->RxBroadcastFramesOK;
496 *data++ = s->RxPauseFrames;
497 *data++ = s->RxFCSErrors;
498 *data++ = s->RxAlignErrors;
499 *data++ = s->RxSymbolErrors;
500 *data++ = s->RxDataErrors;
501 *data++ = s->RxSequenceErrors;
502 *data++ = s->RxRuntErrors;
503 *data++ = s->RxJabberErrors;
504 *data++ = s->RxInternalMACRcvError;
505 *data++ = s->RxInRangeLengthErrors;
506 *data++ = s->RxOutOfRangeLengthField;
507 *data++ = s->RxFrameTooLongErrors;
508 *data++ = s->RxJumboFramesOK;
509 *data++ = s->RxJumboOctetsOK;
510
511 *data++ = ss.rx_cso_good;
512 *data++ = ss.tx_cso;
513 *data++ = ss.tx_tso;
514 *data++ = ss.vlan_xtract;
515 *data++ = ss.vlan_insert;
516 *data++ = ss.tx_need_hdrroom;
517
518 *data++ = t->rx_drops;
519 *data++ = t->pure_rsps;
520 *data++ = t->unhandled_irqs;
521 *data++ = t->respQ_empty;
522 *data++ = t->respQ_overflow;
523 *data++ = t->freelistQ_empty;
524 *data++ = t->pkt_too_big;
525 *data++ = t->pkt_mismatch;
526 *data++ = t->cmdQ_full[0];
527 *data++ = t->cmdQ_full[1];
528
529 if (adapter->espi) {
530 const struct espi_intr_counts *e;
531
532 e = t1_espi_get_intr_counts(adapter->espi);
533 *data++ = e->DIP2_parity_err;
534 *data++ = e->DIP4_err;
535 *data++ = e->rx_drops;
536 *data++ = e->tx_drops;
537 *data++ = e->rx_ovflw;
538 *data++ = e->parity_err;
539 }
540 }
541
542 static inline void reg_block_dump(struct adapter *ap, void *buf,
543 unsigned int start, unsigned int end)
544 {
545 u32 *p = buf + start;
546
547 for ( ; start <= end; start += sizeof(u32))
548 *p++ = readl(ap->regs + start);
549 }
550
551 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
552 void *buf)
553 {
554 struct adapter *ap = dev->ml_priv;
555
556 /*
557 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
558 */
559 regs->version = 2;
560
561 memset(buf, 0, T2_REGMAP_SIZE);
562 reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
563 reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
564 reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
565 reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
566 reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
567 reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
568 reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
569 reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
570 reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
571 reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
572 }
573
574 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
575 {
576 struct adapter *adapter = dev->ml_priv;
577 struct port_info *p = &adapter->port[dev->if_port];
578
579 cmd->supported = p->link_config.supported;
580 cmd->advertising = p->link_config.advertising;
581
582 if (netif_carrier_ok(dev)) {
583 ethtool_cmd_speed_set(cmd, p->link_config.speed);
584 cmd->duplex = p->link_config.duplex;
585 } else {
586 ethtool_cmd_speed_set(cmd, -1);
587 cmd->duplex = -1;
588 }
589
590 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
591 cmd->phy_address = p->phy->mdio.prtad;
592 cmd->transceiver = XCVR_EXTERNAL;
593 cmd->autoneg = p->link_config.autoneg;
594 cmd->maxtxpkt = 0;
595 cmd->maxrxpkt = 0;
596 return 0;
597 }
598
599 static int speed_duplex_to_caps(int speed, int duplex)
600 {
601 int cap = 0;
602
603 switch (speed) {
604 case SPEED_10:
605 if (duplex == DUPLEX_FULL)
606 cap = SUPPORTED_10baseT_Full;
607 else
608 cap = SUPPORTED_10baseT_Half;
609 break;
610 case SPEED_100:
611 if (duplex == DUPLEX_FULL)
612 cap = SUPPORTED_100baseT_Full;
613 else
614 cap = SUPPORTED_100baseT_Half;
615 break;
616 case SPEED_1000:
617 if (duplex == DUPLEX_FULL)
618 cap = SUPPORTED_1000baseT_Full;
619 else
620 cap = SUPPORTED_1000baseT_Half;
621 break;
622 case SPEED_10000:
623 if (duplex == DUPLEX_FULL)
624 cap = SUPPORTED_10000baseT_Full;
625 }
626 return cap;
627 }
628
629 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
630 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
631 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
632 ADVERTISED_10000baseT_Full)
633
634 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
635 {
636 struct adapter *adapter = dev->ml_priv;
637 struct port_info *p = &adapter->port[dev->if_port];
638 struct link_config *lc = &p->link_config;
639
640 if (!(lc->supported & SUPPORTED_Autoneg))
641 return -EOPNOTSUPP; /* can't change speed/duplex */
642
643 if (cmd->autoneg == AUTONEG_DISABLE) {
644 u32 speed = ethtool_cmd_speed(cmd);
645 int cap = speed_duplex_to_caps(speed, cmd->duplex);
646
647 if (!(lc->supported & cap) || (speed == SPEED_1000))
648 return -EINVAL;
649 lc->requested_speed = speed;
650 lc->requested_duplex = cmd->duplex;
651 lc->advertising = 0;
652 } else {
653 cmd->advertising &= ADVERTISED_MASK;
654 if (cmd->advertising & (cmd->advertising - 1))
655 cmd->advertising = lc->supported;
656 cmd->advertising &= lc->supported;
657 if (!cmd->advertising)
658 return -EINVAL;
659 lc->requested_speed = SPEED_INVALID;
660 lc->requested_duplex = DUPLEX_INVALID;
661 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
662 }
663 lc->autoneg = cmd->autoneg;
664 if (netif_running(dev))
665 t1_link_start(p->phy, p->mac, lc);
666 return 0;
667 }
668
669 static void get_pauseparam(struct net_device *dev,
670 struct ethtool_pauseparam *epause)
671 {
672 struct adapter *adapter = dev->ml_priv;
673 struct port_info *p = &adapter->port[dev->if_port];
674
675 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
676 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
677 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
678 }
679
680 static int set_pauseparam(struct net_device *dev,
681 struct ethtool_pauseparam *epause)
682 {
683 struct adapter *adapter = dev->ml_priv;
684 struct port_info *p = &adapter->port[dev->if_port];
685 struct link_config *lc = &p->link_config;
686
687 if (epause->autoneg == AUTONEG_DISABLE)
688 lc->requested_fc = 0;
689 else if (lc->supported & SUPPORTED_Autoneg)
690 lc->requested_fc = PAUSE_AUTONEG;
691 else
692 return -EINVAL;
693
694 if (epause->rx_pause)
695 lc->requested_fc |= PAUSE_RX;
696 if (epause->tx_pause)
697 lc->requested_fc |= PAUSE_TX;
698 if (lc->autoneg == AUTONEG_ENABLE) {
699 if (netif_running(dev))
700 t1_link_start(p->phy, p->mac, lc);
701 } else {
702 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
703 if (netif_running(dev))
704 p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
705 lc->fc);
706 }
707 return 0;
708 }
709
710 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
711 {
712 struct adapter *adapter = dev->ml_priv;
713 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
714
715 e->rx_max_pending = MAX_RX_BUFFERS;
716 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
717 e->tx_max_pending = MAX_CMDQ_ENTRIES;
718
719 e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
720 e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
721 e->tx_pending = adapter->params.sge.cmdQ_size[0];
722 }
723
724 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
725 {
726 struct adapter *adapter = dev->ml_priv;
727 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
728
729 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
730 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
731 e->tx_pending > MAX_CMDQ_ENTRIES ||
732 e->rx_pending < MIN_FL_ENTRIES ||
733 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
734 e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
735 return -EINVAL;
736
737 if (adapter->flags & FULL_INIT_DONE)
738 return -EBUSY;
739
740 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
741 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
742 adapter->params.sge.cmdQ_size[0] = e->tx_pending;
743 adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
744 MAX_CMDQ1_ENTRIES : e->tx_pending;
745 return 0;
746 }
747
748 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
749 {
750 struct adapter *adapter = dev->ml_priv;
751
752 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
753 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
754 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
755 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
756 return 0;
757 }
758
759 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
760 {
761 struct adapter *adapter = dev->ml_priv;
762
763 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
764 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
765 c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
766 return 0;
767 }
768
769 static int get_eeprom_len(struct net_device *dev)
770 {
771 struct adapter *adapter = dev->ml_priv;
772
773 return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
774 }
775
776 #define EEPROM_MAGIC(ap) \
777 (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
778
779 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
780 u8 *data)
781 {
782 int i;
783 u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
784 struct adapter *adapter = dev->ml_priv;
785
786 e->magic = EEPROM_MAGIC(adapter);
787 for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
788 t1_seeprom_read(adapter, i, (__le32 *)&buf[i]);
789 memcpy(data, buf + e->offset, e->len);
790 return 0;
791 }
792
793 static const struct ethtool_ops t1_ethtool_ops = {
794 .get_settings = get_settings,
795 .set_settings = set_settings,
796 .get_drvinfo = get_drvinfo,
797 .get_msglevel = get_msglevel,
798 .set_msglevel = set_msglevel,
799 .get_ringparam = get_sge_param,
800 .set_ringparam = set_sge_param,
801 .get_coalesce = get_coalesce,
802 .set_coalesce = set_coalesce,
803 .get_eeprom_len = get_eeprom_len,
804 .get_eeprom = get_eeprom,
805 .get_pauseparam = get_pauseparam,
806 .set_pauseparam = set_pauseparam,
807 .get_link = ethtool_op_get_link,
808 .get_strings = get_strings,
809 .get_sset_count = get_sset_count,
810 .get_ethtool_stats = get_stats,
811 .get_regs_len = get_regs_len,
812 .get_regs = get_regs,
813 };
814
815 static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
816 {
817 struct adapter *adapter = dev->ml_priv;
818 struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio;
819
820 return mdio_mii_ioctl(mdio, if_mii(req), cmd);
821 }
822
823 static int t1_change_mtu(struct net_device *dev, int new_mtu)
824 {
825 int ret;
826 struct adapter *adapter = dev->ml_priv;
827 struct cmac *mac = adapter->port[dev->if_port].mac;
828
829 if (!mac->ops->set_mtu)
830 return -EOPNOTSUPP;
831 if (new_mtu < 68)
832 return -EINVAL;
833 if ((ret = mac->ops->set_mtu(mac, new_mtu)))
834 return ret;
835 dev->mtu = new_mtu;
836 return 0;
837 }
838
839 static int t1_set_mac_addr(struct net_device *dev, void *p)
840 {
841 struct adapter *adapter = dev->ml_priv;
842 struct cmac *mac = adapter->port[dev->if_port].mac;
843 struct sockaddr *addr = p;
844
845 if (!mac->ops->macaddress_set)
846 return -EOPNOTSUPP;
847
848 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
849 mac->ops->macaddress_set(mac, dev->dev_addr);
850 return 0;
851 }
852
853 static netdev_features_t t1_fix_features(struct net_device *dev,
854 netdev_features_t features)
855 {
856 /*
857 * Since there is no support for separate rx/tx vlan accel
858 * enable/disable make sure tx flag is always in same state as rx.
859 */
860 if (features & NETIF_F_HW_VLAN_RX)
861 features |= NETIF_F_HW_VLAN_TX;
862 else
863 features &= ~NETIF_F_HW_VLAN_TX;
864
865 return features;
866 }
867
868 static int t1_set_features(struct net_device *dev, netdev_features_t features)
869 {
870 netdev_features_t changed = dev->features ^ features;
871 struct adapter *adapter = dev->ml_priv;
872
873 if (changed & NETIF_F_HW_VLAN_RX)
874 t1_vlan_mode(adapter, features);
875
876 return 0;
877 }
878 #ifdef CONFIG_NET_POLL_CONTROLLER
879 static void t1_netpoll(struct net_device *dev)
880 {
881 unsigned long flags;
882 struct adapter *adapter = dev->ml_priv;
883
884 local_irq_save(flags);
885 t1_interrupt(adapter->pdev->irq, adapter);
886 local_irq_restore(flags);
887 }
888 #endif
889
890 /*
891 * Periodic accumulation of MAC statistics. This is used only if the MAC
892 * does not have any other way to prevent stats counter overflow.
893 */
894 static void mac_stats_task(struct work_struct *work)
895 {
896 int i;
897 struct adapter *adapter =
898 container_of(work, struct adapter, stats_update_task.work);
899
900 for_each_port(adapter, i) {
901 struct port_info *p = &adapter->port[i];
902
903 if (netif_running(p->dev))
904 p->mac->ops->statistics_update(p->mac,
905 MAC_STATS_UPDATE_FAST);
906 }
907
908 /* Schedule the next statistics update if any port is active. */
909 spin_lock(&adapter->work_lock);
910 if (adapter->open_device_map & PORT_MASK)
911 schedule_mac_stats_update(adapter,
912 adapter->params.stats_update_period);
913 spin_unlock(&adapter->work_lock);
914 }
915
916 /*
917 * Processes elmer0 external interrupts in process context.
918 */
919 static void ext_intr_task(struct work_struct *work)
920 {
921 struct adapter *adapter =
922 container_of(work, struct adapter, ext_intr_handler_task);
923
924 t1_elmer0_ext_intr_handler(adapter);
925
926 /* Now reenable external interrupts */
927 spin_lock_irq(&adapter->async_lock);
928 adapter->slow_intr_mask |= F_PL_INTR_EXT;
929 writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
930 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
931 adapter->regs + A_PL_ENABLE);
932 spin_unlock_irq(&adapter->async_lock);
933 }
934
935 /*
936 * Interrupt-context handler for elmer0 external interrupts.
937 */
938 void t1_elmer0_ext_intr(struct adapter *adapter)
939 {
940 /*
941 * Schedule a task to handle external interrupts as we require
942 * a process context. We disable EXT interrupts in the interim
943 * and let the task reenable them when it's done.
944 */
945 adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
946 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
947 adapter->regs + A_PL_ENABLE);
948 schedule_work(&adapter->ext_intr_handler_task);
949 }
950
951 void t1_fatal_err(struct adapter *adapter)
952 {
953 if (adapter->flags & FULL_INIT_DONE) {
954 t1_sge_stop(adapter->sge);
955 t1_interrupts_disable(adapter);
956 }
957 pr_alert("%s: encountered fatal error, operation suspended\n",
958 adapter->name);
959 }
960
961 static const struct net_device_ops cxgb_netdev_ops = {
962 .ndo_open = cxgb_open,
963 .ndo_stop = cxgb_close,
964 .ndo_start_xmit = t1_start_xmit,
965 .ndo_get_stats = t1_get_stats,
966 .ndo_validate_addr = eth_validate_addr,
967 .ndo_set_rx_mode = t1_set_rxmode,
968 .ndo_do_ioctl = t1_ioctl,
969 .ndo_change_mtu = t1_change_mtu,
970 .ndo_set_mac_address = t1_set_mac_addr,
971 .ndo_fix_features = t1_fix_features,
972 .ndo_set_features = t1_set_features,
973 #ifdef CONFIG_NET_POLL_CONTROLLER
974 .ndo_poll_controller = t1_netpoll,
975 #endif
976 };
977
978 static int __devinit init_one(struct pci_dev *pdev,
979 const struct pci_device_id *ent)
980 {
981 static int version_printed;
982
983 int i, err, pci_using_dac = 0;
984 unsigned long mmio_start, mmio_len;
985 const struct board_info *bi;
986 struct adapter *adapter = NULL;
987 struct port_info *pi;
988
989 if (!version_printed) {
990 printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
991 DRV_VERSION);
992 ++version_printed;
993 }
994
995 err = pci_enable_device(pdev);
996 if (err)
997 return err;
998
999 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1000 pr_err("%s: cannot find PCI device memory base address\n",
1001 pci_name(pdev));
1002 err = -ENODEV;
1003 goto out_disable_pdev;
1004 }
1005
1006 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1007 pci_using_dac = 1;
1008
1009 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1010 pr_err("%s: unable to obtain 64-bit DMA for "
1011 "consistent allocations\n", pci_name(pdev));
1012 err = -ENODEV;
1013 goto out_disable_pdev;
1014 }
1015
1016 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
1017 pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
1018 goto out_disable_pdev;
1019 }
1020
1021 err = pci_request_regions(pdev, DRV_NAME);
1022 if (err) {
1023 pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev));
1024 goto out_disable_pdev;
1025 }
1026
1027 pci_set_master(pdev);
1028
1029 mmio_start = pci_resource_start(pdev, 0);
1030 mmio_len = pci_resource_len(pdev, 0);
1031 bi = t1_get_board_info(ent->driver_data);
1032
1033 for (i = 0; i < bi->port_number; ++i) {
1034 struct net_device *netdev;
1035
1036 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1037 if (!netdev) {
1038 err = -ENOMEM;
1039 goto out_free_dev;
1040 }
1041
1042 SET_NETDEV_DEV(netdev, &pdev->dev);
1043
1044 if (!adapter) {
1045 adapter = netdev_priv(netdev);
1046 adapter->pdev = pdev;
1047 adapter->port[0].dev = netdev; /* so we don't leak it */
1048
1049 adapter->regs = ioremap(mmio_start, mmio_len);
1050 if (!adapter->regs) {
1051 pr_err("%s: cannot map device registers\n",
1052 pci_name(pdev));
1053 err = -ENOMEM;
1054 goto out_free_dev;
1055 }
1056
1057 if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1058 err = -ENODEV; /* Can't handle this chip rev */
1059 goto out_free_dev;
1060 }
1061
1062 adapter->name = pci_name(pdev);
1063 adapter->msg_enable = dflt_msg_enable;
1064 adapter->mmio_len = mmio_len;
1065
1066 spin_lock_init(&adapter->tpi_lock);
1067 spin_lock_init(&adapter->work_lock);
1068 spin_lock_init(&adapter->async_lock);
1069 spin_lock_init(&adapter->mac_lock);
1070
1071 INIT_WORK(&adapter->ext_intr_handler_task,
1072 ext_intr_task);
1073 INIT_DELAYED_WORK(&adapter->stats_update_task,
1074 mac_stats_task);
1075
1076 pci_set_drvdata(pdev, netdev);
1077 }
1078
1079 pi = &adapter->port[i];
1080 pi->dev = netdev;
1081 netif_carrier_off(netdev);
1082 netdev->irq = pdev->irq;
1083 netdev->if_port = i;
1084 netdev->mem_start = mmio_start;
1085 netdev->mem_end = mmio_start + mmio_len - 1;
1086 netdev->ml_priv = adapter;
1087 netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1088 NETIF_F_RXCSUM;
1089 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1090 NETIF_F_RXCSUM | NETIF_F_LLTX;
1091
1092 if (pci_using_dac)
1093 netdev->features |= NETIF_F_HIGHDMA;
1094 if (vlan_tso_capable(adapter)) {
1095 netdev->features |=
1096 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1097 netdev->hw_features |= NETIF_F_HW_VLAN_RX;
1098
1099 /* T204: disable TSO */
1100 if (!(is_T2(adapter)) || bi->port_number != 4) {
1101 netdev->hw_features |= NETIF_F_TSO;
1102 netdev->features |= NETIF_F_TSO;
1103 }
1104 }
1105
1106 netdev->netdev_ops = &cxgb_netdev_ops;
1107 netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
1108 sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1109
1110 netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
1111
1112 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1113 }
1114
1115 if (t1_init_sw_modules(adapter, bi) < 0) {
1116 err = -ENODEV;
1117 goto out_free_dev;
1118 }
1119
1120 /*
1121 * The card is now ready to go. If any errors occur during device
1122 * registration we do not fail the whole card but rather proceed only
1123 * with the ports we manage to register successfully. However we must
1124 * register at least one net device.
1125 */
1126 for (i = 0; i < bi->port_number; ++i) {
1127 err = register_netdev(adapter->port[i].dev);
1128 if (err)
1129 pr_warning("%s: cannot register net device %s, skipping\n",
1130 pci_name(pdev), adapter->port[i].dev->name);
1131 else {
1132 /*
1133 * Change the name we use for messages to the name of
1134 * the first successfully registered interface.
1135 */
1136 if (!adapter->registered_device_map)
1137 adapter->name = adapter->port[i].dev->name;
1138
1139 __set_bit(i, &adapter->registered_device_map);
1140 }
1141 }
1142 if (!adapter->registered_device_map) {
1143 pr_err("%s: could not register any net devices\n",
1144 pci_name(pdev));
1145 goto out_release_adapter_res;
1146 }
1147
1148 printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1149 bi->desc, adapter->params.chip_revision,
1150 adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1151 adapter->params.pci.speed, adapter->params.pci.width);
1152
1153 /*
1154 * Set the T1B ASIC and memory clocks.
1155 */
1156 if (t1powersave)
1157 adapter->t1powersave = LCLOCK; /* HW default is powersave mode. */
1158 else
1159 adapter->t1powersave = HCLOCK;
1160 if (t1_is_T1B(adapter))
1161 t1_clock(adapter, t1powersave);
1162
1163 return 0;
1164
1165 out_release_adapter_res:
1166 t1_free_sw_modules(adapter);
1167 out_free_dev:
1168 if (adapter) {
1169 if (adapter->regs)
1170 iounmap(adapter->regs);
1171 for (i = bi->port_number - 1; i >= 0; --i)
1172 if (adapter->port[i].dev)
1173 free_netdev(adapter->port[i].dev);
1174 }
1175 pci_release_regions(pdev);
1176 out_disable_pdev:
1177 pci_disable_device(pdev);
1178 pci_set_drvdata(pdev, NULL);
1179 return err;
1180 }
1181
1182 static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1183 {
1184 int data;
1185 int i;
1186 u32 val;
1187
1188 enum {
1189 S_CLOCK = 1 << 3,
1190 S_DATA = 1 << 4
1191 };
1192
1193 for (i = (nbits - 1); i > -1; i--) {
1194
1195 udelay(50);
1196
1197 data = ((bitdata >> i) & 0x1);
1198 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1199
1200 if (data)
1201 val |= S_DATA;
1202 else
1203 val &= ~S_DATA;
1204
1205 udelay(50);
1206
1207 /* Set SCLOCK low */
1208 val &= ~S_CLOCK;
1209 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1210
1211 udelay(50);
1212
1213 /* Write SCLOCK high */
1214 val |= S_CLOCK;
1215 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1216
1217 }
1218 }
1219
1220 static int t1_clock(struct adapter *adapter, int mode)
1221 {
1222 u32 val;
1223 int M_CORE_VAL;
1224 int M_MEM_VAL;
1225
1226 enum {
1227 M_CORE_BITS = 9,
1228 T_CORE_VAL = 0,
1229 T_CORE_BITS = 2,
1230 N_CORE_VAL = 0,
1231 N_CORE_BITS = 2,
1232 M_MEM_BITS = 9,
1233 T_MEM_VAL = 0,
1234 T_MEM_BITS = 2,
1235 N_MEM_VAL = 0,
1236 N_MEM_BITS = 2,
1237 NP_LOAD = 1 << 17,
1238 S_LOAD_MEM = 1 << 5,
1239 S_LOAD_CORE = 1 << 6,
1240 S_CLOCK = 1 << 3
1241 };
1242
1243 if (!t1_is_T1B(adapter))
1244 return -ENODEV; /* Can't re-clock this chip. */
1245
1246 if (mode & 2)
1247 return 0; /* show current mode. */
1248
1249 if ((adapter->t1powersave & 1) == (mode & 1))
1250 return -EALREADY; /* ASIC already running in mode. */
1251
1252 if ((mode & 1) == HCLOCK) {
1253 M_CORE_VAL = 0x14;
1254 M_MEM_VAL = 0x18;
1255 adapter->t1powersave = HCLOCK; /* overclock */
1256 } else {
1257 M_CORE_VAL = 0xe;
1258 M_MEM_VAL = 0x10;
1259 adapter->t1powersave = LCLOCK; /* underclock */
1260 }
1261
1262 /* Don't interrupt this serial stream! */
1263 spin_lock(&adapter->tpi_lock);
1264
1265 /* Initialize for ASIC core */
1266 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1267 val |= NP_LOAD;
1268 udelay(50);
1269 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1270 udelay(50);
1271 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1272 val &= ~S_LOAD_CORE;
1273 val &= ~S_CLOCK;
1274 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1275 udelay(50);
1276
1277 /* Serial program the ASIC clock synthesizer */
1278 bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1279 bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1280 bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1281 udelay(50);
1282
1283 /* Finish ASIC core */
1284 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1285 val |= S_LOAD_CORE;
1286 udelay(50);
1287 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1288 udelay(50);
1289 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1290 val &= ~S_LOAD_CORE;
1291 udelay(50);
1292 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1293 udelay(50);
1294
1295 /* Initialize for memory */
1296 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1297 val |= NP_LOAD;
1298 udelay(50);
1299 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1300 udelay(50);
1301 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1302 val &= ~S_LOAD_MEM;
1303 val &= ~S_CLOCK;
1304 udelay(50);
1305 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1306 udelay(50);
1307
1308 /* Serial program the memory clock synthesizer */
1309 bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1310 bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1311 bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1312 udelay(50);
1313
1314 /* Finish memory */
1315 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1316 val |= S_LOAD_MEM;
1317 udelay(50);
1318 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1319 udelay(50);
1320 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1321 val &= ~S_LOAD_MEM;
1322 udelay(50);
1323 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1324
1325 spin_unlock(&adapter->tpi_lock);
1326
1327 return 0;
1328 }
1329
1330 static inline void t1_sw_reset(struct pci_dev *pdev)
1331 {
1332 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1333 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1334 }
1335
1336 static void __devexit remove_one(struct pci_dev *pdev)
1337 {
1338 struct net_device *dev = pci_get_drvdata(pdev);
1339 struct adapter *adapter = dev->ml_priv;
1340 int i;
1341
1342 for_each_port(adapter, i) {
1343 if (test_bit(i, &adapter->registered_device_map))
1344 unregister_netdev(adapter->port[i].dev);
1345 }
1346
1347 t1_free_sw_modules(adapter);
1348 iounmap(adapter->regs);
1349
1350 while (--i >= 0) {
1351 if (adapter->port[i].dev)
1352 free_netdev(adapter->port[i].dev);
1353 }
1354
1355 pci_release_regions(pdev);
1356 pci_disable_device(pdev);
1357 pci_set_drvdata(pdev, NULL);
1358 t1_sw_reset(pdev);
1359 }
1360
1361 static struct pci_driver driver = {
1362 .name = DRV_NAME,
1363 .id_table = t1_pci_tbl,
1364 .probe = init_one,
1365 .remove = __devexit_p(remove_one),
1366 };
1367
1368 static int __init t1_init_module(void)
1369 {
1370 return pci_register_driver(&driver);
1371 }
1372
1373 static void __exit t1_cleanup_module(void)
1374 {
1375 pci_unregister_driver(&driver);
1376 }
1377
1378 module_init(t1_init_module);
1379 module_exit(t1_cleanup_module);