]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/chelsio/cxgb/cxgb2.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / chelsio / cxgb / cxgb2.c
1 /*****************************************************************************
2 * *
3 * File: cxgb2.c *
4 * $Revision: 1.25 $ *
5 * $Date: 2005/06/22 00:43:25 $ *
6 * Description: *
7 * Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, see <http://www.gnu.org/licenses/>. *
15 * *
16 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
17 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
19 * *
20 * http://www.chelsio.com *
21 * *
22 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
23 * All rights reserved. *
24 * *
25 * Maintainers: maintainers@chelsio.com *
26 * *
27 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
28 * Tina Yang <tainay@chelsio.com> *
29 * Felix Marti <felix@chelsio.com> *
30 * Scott Bardone <sbardone@chelsio.com> *
31 * Kurt Ottaway <kottaway@chelsio.com> *
32 * Frank DiMambro <frank@chelsio.com> *
33 * *
34 * History: *
35 * *
36 ****************************************************************************/
37
38 #include "common.h"
39 #include <linux/module.h>
40 #include <linux/pci.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/if_vlan.h>
44 #include <linux/mii.h>
45 #include <linux/sockios.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/uaccess.h>
48
49 #include "cpl5_cmd.h"
50 #include "regs.h"
51 #include "gmac.h"
52 #include "cphy.h"
53 #include "sge.h"
54 #include "tp.h"
55 #include "espi.h"
56 #include "elmer0.h"
57
58 #include <linux/workqueue.h>
59
60 static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
61 {
62 schedule_delayed_work(&ap->stats_update_task, secs * HZ);
63 }
64
65 static inline void cancel_mac_stats_update(struct adapter *ap)
66 {
67 cancel_delayed_work(&ap->stats_update_task);
68 }
69
70 #define MAX_CMDQ_ENTRIES 16384
71 #define MAX_CMDQ1_ENTRIES 1024
72 #define MAX_RX_BUFFERS 16384
73 #define MAX_RX_JUMBO_BUFFERS 16384
74 #define MAX_TX_BUFFERS_HIGH 16384U
75 #define MAX_TX_BUFFERS_LOW 1536U
76 #define MAX_TX_BUFFERS 1460U
77 #define MIN_FL_ENTRIES 32
78
79 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
80 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
81 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
82
83 /*
84 * The EEPROM is actually bigger but only the first few bytes are used so we
85 * only report those.
86 */
87 #define EEPROM_SIZE 32
88
89 MODULE_DESCRIPTION(DRV_DESCRIPTION);
90 MODULE_AUTHOR("Chelsio Communications");
91 MODULE_LICENSE("GPL");
92
93 static int dflt_msg_enable = DFLT_MSG_ENABLE;
94
95 module_param(dflt_msg_enable, int, 0);
96 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
97
98 #define HCLOCK 0x0
99 #define LCLOCK 0x1
100
101 /* T1 cards powersave mode */
102 static int t1_clock(struct adapter *adapter, int mode);
103 static int t1powersave = 1; /* HW default is powersave mode. */
104
105 module_param(t1powersave, int, 0);
106 MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
107
108 static int disable_msi = 0;
109 module_param(disable_msi, int, 0);
110 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
111
112 static const char pci_speed[][4] = {
113 "33", "66", "100", "133"
114 };
115
116 /*
117 * Setup MAC to receive the types of packets we want.
118 */
119 static void t1_set_rxmode(struct net_device *dev)
120 {
121 struct adapter *adapter = dev->ml_priv;
122 struct cmac *mac = adapter->port[dev->if_port].mac;
123 struct t1_rx_mode rm;
124
125 rm.dev = dev;
126 mac->ops->set_rx_mode(mac, &rm);
127 }
128
129 static void link_report(struct port_info *p)
130 {
131 if (!netif_carrier_ok(p->dev))
132 netdev_info(p->dev, "link down\n");
133 else {
134 const char *s = "10Mbps";
135
136 switch (p->link_config.speed) {
137 case SPEED_10000: s = "10Gbps"; break;
138 case SPEED_1000: s = "1000Mbps"; break;
139 case SPEED_100: s = "100Mbps"; break;
140 }
141
142 netdev_info(p->dev, "link up, %s, %s-duplex\n",
143 s, p->link_config.duplex == DUPLEX_FULL
144 ? "full" : "half");
145 }
146 }
147
148 void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
149 int speed, int duplex, int pause)
150 {
151 struct port_info *p = &adapter->port[port_id];
152
153 if (link_stat != netif_carrier_ok(p->dev)) {
154 if (link_stat)
155 netif_carrier_on(p->dev);
156 else
157 netif_carrier_off(p->dev);
158 link_report(p);
159
160 /* multi-ports: inform toe */
161 if ((speed > 0) && (adapter->params.nports > 1)) {
162 unsigned int sched_speed = 10;
163 switch (speed) {
164 case SPEED_1000:
165 sched_speed = 1000;
166 break;
167 case SPEED_100:
168 sched_speed = 100;
169 break;
170 case SPEED_10:
171 sched_speed = 10;
172 break;
173 }
174 t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
175 }
176 }
177 }
178
179 static void link_start(struct port_info *p)
180 {
181 struct cmac *mac = p->mac;
182
183 mac->ops->reset(mac);
184 if (mac->ops->macaddress_set)
185 mac->ops->macaddress_set(mac, p->dev->dev_addr);
186 t1_set_rxmode(p->dev);
187 t1_link_start(p->phy, mac, &p->link_config);
188 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
189 }
190
191 static void enable_hw_csum(struct adapter *adapter)
192 {
193 if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
194 t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */
195 t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
196 }
197
198 /*
199 * Things to do upon first use of a card.
200 * This must run with the rtnl lock held.
201 */
202 static int cxgb_up(struct adapter *adapter)
203 {
204 int err = 0;
205
206 if (!(adapter->flags & FULL_INIT_DONE)) {
207 err = t1_init_hw_modules(adapter);
208 if (err)
209 goto out_err;
210
211 enable_hw_csum(adapter);
212 adapter->flags |= FULL_INIT_DONE;
213 }
214
215 t1_interrupts_clear(adapter);
216
217 adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
218 err = request_irq(adapter->pdev->irq, t1_interrupt,
219 adapter->params.has_msi ? 0 : IRQF_SHARED,
220 adapter->name, adapter);
221 if (err) {
222 if (adapter->params.has_msi)
223 pci_disable_msi(adapter->pdev);
224
225 goto out_err;
226 }
227
228 t1_sge_start(adapter->sge);
229 t1_interrupts_enable(adapter);
230 out_err:
231 return err;
232 }
233
234 /*
235 * Release resources when all the ports have been stopped.
236 */
237 static void cxgb_down(struct adapter *adapter)
238 {
239 t1_sge_stop(adapter->sge);
240 t1_interrupts_disable(adapter);
241 free_irq(adapter->pdev->irq, adapter);
242 if (adapter->params.has_msi)
243 pci_disable_msi(adapter->pdev);
244 }
245
246 static int cxgb_open(struct net_device *dev)
247 {
248 int err;
249 struct adapter *adapter = dev->ml_priv;
250 int other_ports = adapter->open_device_map & PORT_MASK;
251
252 napi_enable(&adapter->napi);
253 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
254 napi_disable(&adapter->napi);
255 return err;
256 }
257
258 __set_bit(dev->if_port, &adapter->open_device_map);
259 link_start(&adapter->port[dev->if_port]);
260 netif_start_queue(dev);
261 if (!other_ports && adapter->params.stats_update_period)
262 schedule_mac_stats_update(adapter,
263 adapter->params.stats_update_period);
264
265 t1_vlan_mode(adapter, dev->features);
266 return 0;
267 }
268
269 static int cxgb_close(struct net_device *dev)
270 {
271 struct adapter *adapter = dev->ml_priv;
272 struct port_info *p = &adapter->port[dev->if_port];
273 struct cmac *mac = p->mac;
274
275 netif_stop_queue(dev);
276 napi_disable(&adapter->napi);
277 mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
278 netif_carrier_off(dev);
279
280 clear_bit(dev->if_port, &adapter->open_device_map);
281 if (adapter->params.stats_update_period &&
282 !(adapter->open_device_map & PORT_MASK)) {
283 /* Stop statistics accumulation. */
284 smp_mb__after_atomic();
285 spin_lock(&adapter->work_lock); /* sync with update task */
286 spin_unlock(&adapter->work_lock);
287 cancel_mac_stats_update(adapter);
288 }
289
290 if (!adapter->open_device_map)
291 cxgb_down(adapter);
292 return 0;
293 }
294
295 static struct net_device_stats *t1_get_stats(struct net_device *dev)
296 {
297 struct adapter *adapter = dev->ml_priv;
298 struct port_info *p = &adapter->port[dev->if_port];
299 struct net_device_stats *ns = &dev->stats;
300 const struct cmac_statistics *pstats;
301
302 /* Do a full update of the MAC stats */
303 pstats = p->mac->ops->statistics_update(p->mac,
304 MAC_STATS_UPDATE_FULL);
305
306 ns->tx_packets = pstats->TxUnicastFramesOK +
307 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
308
309 ns->rx_packets = pstats->RxUnicastFramesOK +
310 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
311
312 ns->tx_bytes = pstats->TxOctetsOK;
313 ns->rx_bytes = pstats->RxOctetsOK;
314
315 ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
316 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
317 ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
318 pstats->RxFCSErrors + pstats->RxAlignErrors +
319 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
320 pstats->RxSymbolErrors + pstats->RxRuntErrors;
321
322 ns->multicast = pstats->RxMulticastFramesOK;
323 ns->collisions = pstats->TxTotalCollisions;
324
325 /* detailed rx_errors */
326 ns->rx_length_errors = pstats->RxFrameTooLongErrors +
327 pstats->RxJabberErrors;
328 ns->rx_over_errors = 0;
329 ns->rx_crc_errors = pstats->RxFCSErrors;
330 ns->rx_frame_errors = pstats->RxAlignErrors;
331 ns->rx_fifo_errors = 0;
332 ns->rx_missed_errors = 0;
333
334 /* detailed tx_errors */
335 ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions;
336 ns->tx_carrier_errors = 0;
337 ns->tx_fifo_errors = pstats->TxUnderrun;
338 ns->tx_heartbeat_errors = 0;
339 ns->tx_window_errors = pstats->TxLateCollisions;
340 return ns;
341 }
342
343 static u32 get_msglevel(struct net_device *dev)
344 {
345 struct adapter *adapter = dev->ml_priv;
346
347 return adapter->msg_enable;
348 }
349
350 static void set_msglevel(struct net_device *dev, u32 val)
351 {
352 struct adapter *adapter = dev->ml_priv;
353
354 adapter->msg_enable = val;
355 }
356
357 static const char stats_strings[][ETH_GSTRING_LEN] = {
358 "TxOctetsOK",
359 "TxOctetsBad",
360 "TxUnicastFramesOK",
361 "TxMulticastFramesOK",
362 "TxBroadcastFramesOK",
363 "TxPauseFrames",
364 "TxFramesWithDeferredXmissions",
365 "TxLateCollisions",
366 "TxTotalCollisions",
367 "TxFramesAbortedDueToXSCollisions",
368 "TxUnderrun",
369 "TxLengthErrors",
370 "TxInternalMACXmitError",
371 "TxFramesWithExcessiveDeferral",
372 "TxFCSErrors",
373 "TxJumboFramesOk",
374 "TxJumboOctetsOk",
375
376 "RxOctetsOK",
377 "RxOctetsBad",
378 "RxUnicastFramesOK",
379 "RxMulticastFramesOK",
380 "RxBroadcastFramesOK",
381 "RxPauseFrames",
382 "RxFCSErrors",
383 "RxAlignErrors",
384 "RxSymbolErrors",
385 "RxDataErrors",
386 "RxSequenceErrors",
387 "RxRuntErrors",
388 "RxJabberErrors",
389 "RxInternalMACRcvError",
390 "RxInRangeLengthErrors",
391 "RxOutOfRangeLengthField",
392 "RxFrameTooLongErrors",
393 "RxJumboFramesOk",
394 "RxJumboOctetsOk",
395
396 /* Port stats */
397 "RxCsumGood",
398 "TxCsumOffload",
399 "TxTso",
400 "RxVlan",
401 "TxVlan",
402 "TxNeedHeadroom",
403
404 /* Interrupt stats */
405 "rx drops",
406 "pure_rsps",
407 "unhandled irqs",
408 "respQ_empty",
409 "respQ_overflow",
410 "freelistQ_empty",
411 "pkt_too_big",
412 "pkt_mismatch",
413 "cmdQ_full0",
414 "cmdQ_full1",
415
416 "espi_DIP2ParityErr",
417 "espi_DIP4Err",
418 "espi_RxDrops",
419 "espi_TxDrops",
420 "espi_RxOvfl",
421 "espi_ParityErr"
422 };
423
424 #define T2_REGMAP_SIZE (3 * 1024)
425
426 static int get_regs_len(struct net_device *dev)
427 {
428 return T2_REGMAP_SIZE;
429 }
430
431 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
432 {
433 struct adapter *adapter = dev->ml_priv;
434
435 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
436 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
437 strlcpy(info->bus_info, pci_name(adapter->pdev),
438 sizeof(info->bus_info));
439 }
440
441 static int get_sset_count(struct net_device *dev, int sset)
442 {
443 switch (sset) {
444 case ETH_SS_STATS:
445 return ARRAY_SIZE(stats_strings);
446 default:
447 return -EOPNOTSUPP;
448 }
449 }
450
451 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
452 {
453 if (stringset == ETH_SS_STATS)
454 memcpy(data, stats_strings, sizeof(stats_strings));
455 }
456
457 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
458 u64 *data)
459 {
460 struct adapter *adapter = dev->ml_priv;
461 struct cmac *mac = adapter->port[dev->if_port].mac;
462 const struct cmac_statistics *s;
463 const struct sge_intr_counts *t;
464 struct sge_port_stats ss;
465
466 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
467 t = t1_sge_get_intr_counts(adapter->sge);
468 t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
469
470 *data++ = s->TxOctetsOK;
471 *data++ = s->TxOctetsBad;
472 *data++ = s->TxUnicastFramesOK;
473 *data++ = s->TxMulticastFramesOK;
474 *data++ = s->TxBroadcastFramesOK;
475 *data++ = s->TxPauseFrames;
476 *data++ = s->TxFramesWithDeferredXmissions;
477 *data++ = s->TxLateCollisions;
478 *data++ = s->TxTotalCollisions;
479 *data++ = s->TxFramesAbortedDueToXSCollisions;
480 *data++ = s->TxUnderrun;
481 *data++ = s->TxLengthErrors;
482 *data++ = s->TxInternalMACXmitError;
483 *data++ = s->TxFramesWithExcessiveDeferral;
484 *data++ = s->TxFCSErrors;
485 *data++ = s->TxJumboFramesOK;
486 *data++ = s->TxJumboOctetsOK;
487
488 *data++ = s->RxOctetsOK;
489 *data++ = s->RxOctetsBad;
490 *data++ = s->RxUnicastFramesOK;
491 *data++ = s->RxMulticastFramesOK;
492 *data++ = s->RxBroadcastFramesOK;
493 *data++ = s->RxPauseFrames;
494 *data++ = s->RxFCSErrors;
495 *data++ = s->RxAlignErrors;
496 *data++ = s->RxSymbolErrors;
497 *data++ = s->RxDataErrors;
498 *data++ = s->RxSequenceErrors;
499 *data++ = s->RxRuntErrors;
500 *data++ = s->RxJabberErrors;
501 *data++ = s->RxInternalMACRcvError;
502 *data++ = s->RxInRangeLengthErrors;
503 *data++ = s->RxOutOfRangeLengthField;
504 *data++ = s->RxFrameTooLongErrors;
505 *data++ = s->RxJumboFramesOK;
506 *data++ = s->RxJumboOctetsOK;
507
508 *data++ = ss.rx_cso_good;
509 *data++ = ss.tx_cso;
510 *data++ = ss.tx_tso;
511 *data++ = ss.vlan_xtract;
512 *data++ = ss.vlan_insert;
513 *data++ = ss.tx_need_hdrroom;
514
515 *data++ = t->rx_drops;
516 *data++ = t->pure_rsps;
517 *data++ = t->unhandled_irqs;
518 *data++ = t->respQ_empty;
519 *data++ = t->respQ_overflow;
520 *data++ = t->freelistQ_empty;
521 *data++ = t->pkt_too_big;
522 *data++ = t->pkt_mismatch;
523 *data++ = t->cmdQ_full[0];
524 *data++ = t->cmdQ_full[1];
525
526 if (adapter->espi) {
527 const struct espi_intr_counts *e;
528
529 e = t1_espi_get_intr_counts(adapter->espi);
530 *data++ = e->DIP2_parity_err;
531 *data++ = e->DIP4_err;
532 *data++ = e->rx_drops;
533 *data++ = e->tx_drops;
534 *data++ = e->rx_ovflw;
535 *data++ = e->parity_err;
536 }
537 }
538
539 static inline void reg_block_dump(struct adapter *ap, void *buf,
540 unsigned int start, unsigned int end)
541 {
542 u32 *p = buf + start;
543
544 for ( ; start <= end; start += sizeof(u32))
545 *p++ = readl(ap->regs + start);
546 }
547
548 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
549 void *buf)
550 {
551 struct adapter *ap = dev->ml_priv;
552
553 /*
554 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
555 */
556 regs->version = 2;
557
558 memset(buf, 0, T2_REGMAP_SIZE);
559 reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
560 reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
561 reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
562 reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
563 reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
564 reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
565 reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
566 reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
567 reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
568 reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
569 }
570
571 static int get_link_ksettings(struct net_device *dev,
572 struct ethtool_link_ksettings *cmd)
573 {
574 struct adapter *adapter = dev->ml_priv;
575 struct port_info *p = &adapter->port[dev->if_port];
576 u32 supported, advertising;
577
578 supported = p->link_config.supported;
579 advertising = p->link_config.advertising;
580
581 if (netif_carrier_ok(dev)) {
582 cmd->base.speed = p->link_config.speed;
583 cmd->base.duplex = p->link_config.duplex;
584 } else {
585 cmd->base.speed = SPEED_UNKNOWN;
586 cmd->base.duplex = DUPLEX_UNKNOWN;
587 }
588
589 cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
590 cmd->base.phy_address = p->phy->mdio.prtad;
591 cmd->base.autoneg = p->link_config.autoneg;
592
593 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
594 supported);
595 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
596 advertising);
597
598 return 0;
599 }
600
601 static int speed_duplex_to_caps(int speed, int duplex)
602 {
603 int cap = 0;
604
605 switch (speed) {
606 case SPEED_10:
607 if (duplex == DUPLEX_FULL)
608 cap = SUPPORTED_10baseT_Full;
609 else
610 cap = SUPPORTED_10baseT_Half;
611 break;
612 case SPEED_100:
613 if (duplex == DUPLEX_FULL)
614 cap = SUPPORTED_100baseT_Full;
615 else
616 cap = SUPPORTED_100baseT_Half;
617 break;
618 case SPEED_1000:
619 if (duplex == DUPLEX_FULL)
620 cap = SUPPORTED_1000baseT_Full;
621 else
622 cap = SUPPORTED_1000baseT_Half;
623 break;
624 case SPEED_10000:
625 if (duplex == DUPLEX_FULL)
626 cap = SUPPORTED_10000baseT_Full;
627 }
628 return cap;
629 }
630
631 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
632 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
633 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
634 ADVERTISED_10000baseT_Full)
635
636 static int set_link_ksettings(struct net_device *dev,
637 const struct ethtool_link_ksettings *cmd)
638 {
639 struct adapter *adapter = dev->ml_priv;
640 struct port_info *p = &adapter->port[dev->if_port];
641 struct link_config *lc = &p->link_config;
642 u32 advertising;
643
644 ethtool_convert_link_mode_to_legacy_u32(&advertising,
645 cmd->link_modes.advertising);
646
647 if (!(lc->supported & SUPPORTED_Autoneg))
648 return -EOPNOTSUPP; /* can't change speed/duplex */
649
650 if (cmd->base.autoneg == AUTONEG_DISABLE) {
651 u32 speed = cmd->base.speed;
652 int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
653
654 if (!(lc->supported & cap) || (speed == SPEED_1000))
655 return -EINVAL;
656 lc->requested_speed = speed;
657 lc->requested_duplex = cmd->base.duplex;
658 lc->advertising = 0;
659 } else {
660 advertising &= ADVERTISED_MASK;
661 if (advertising & (advertising - 1))
662 advertising = lc->supported;
663 advertising &= lc->supported;
664 if (!advertising)
665 return -EINVAL;
666 lc->requested_speed = SPEED_INVALID;
667 lc->requested_duplex = DUPLEX_INVALID;
668 lc->advertising = advertising | ADVERTISED_Autoneg;
669 }
670 lc->autoneg = cmd->base.autoneg;
671 if (netif_running(dev))
672 t1_link_start(p->phy, p->mac, lc);
673 return 0;
674 }
675
676 static void get_pauseparam(struct net_device *dev,
677 struct ethtool_pauseparam *epause)
678 {
679 struct adapter *adapter = dev->ml_priv;
680 struct port_info *p = &adapter->port[dev->if_port];
681
682 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
683 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
684 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
685 }
686
687 static int set_pauseparam(struct net_device *dev,
688 struct ethtool_pauseparam *epause)
689 {
690 struct adapter *adapter = dev->ml_priv;
691 struct port_info *p = &adapter->port[dev->if_port];
692 struct link_config *lc = &p->link_config;
693
694 if (epause->autoneg == AUTONEG_DISABLE)
695 lc->requested_fc = 0;
696 else if (lc->supported & SUPPORTED_Autoneg)
697 lc->requested_fc = PAUSE_AUTONEG;
698 else
699 return -EINVAL;
700
701 if (epause->rx_pause)
702 lc->requested_fc |= PAUSE_RX;
703 if (epause->tx_pause)
704 lc->requested_fc |= PAUSE_TX;
705 if (lc->autoneg == AUTONEG_ENABLE) {
706 if (netif_running(dev))
707 t1_link_start(p->phy, p->mac, lc);
708 } else {
709 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
710 if (netif_running(dev))
711 p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
712 lc->fc);
713 }
714 return 0;
715 }
716
717 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
718 {
719 struct adapter *adapter = dev->ml_priv;
720 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
721
722 e->rx_max_pending = MAX_RX_BUFFERS;
723 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
724 e->tx_max_pending = MAX_CMDQ_ENTRIES;
725
726 e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
727 e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
728 e->tx_pending = adapter->params.sge.cmdQ_size[0];
729 }
730
731 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
732 {
733 struct adapter *adapter = dev->ml_priv;
734 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
735
736 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
737 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
738 e->tx_pending > MAX_CMDQ_ENTRIES ||
739 e->rx_pending < MIN_FL_ENTRIES ||
740 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
741 e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
742 return -EINVAL;
743
744 if (adapter->flags & FULL_INIT_DONE)
745 return -EBUSY;
746
747 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
748 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
749 adapter->params.sge.cmdQ_size[0] = e->tx_pending;
750 adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
751 MAX_CMDQ1_ENTRIES : e->tx_pending;
752 return 0;
753 }
754
755 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
756 {
757 struct adapter *adapter = dev->ml_priv;
758
759 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
760 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
761 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
762 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
763 return 0;
764 }
765
766 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
767 {
768 struct adapter *adapter = dev->ml_priv;
769
770 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
771 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
772 c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
773 return 0;
774 }
775
776 static int get_eeprom_len(struct net_device *dev)
777 {
778 struct adapter *adapter = dev->ml_priv;
779
780 return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
781 }
782
783 #define EEPROM_MAGIC(ap) \
784 (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
785
786 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
787 u8 *data)
788 {
789 int i;
790 u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
791 struct adapter *adapter = dev->ml_priv;
792
793 e->magic = EEPROM_MAGIC(adapter);
794 for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
795 t1_seeprom_read(adapter, i, (__le32 *)&buf[i]);
796 memcpy(data, buf + e->offset, e->len);
797 return 0;
798 }
799
800 static const struct ethtool_ops t1_ethtool_ops = {
801 .get_drvinfo = get_drvinfo,
802 .get_msglevel = get_msglevel,
803 .set_msglevel = set_msglevel,
804 .get_ringparam = get_sge_param,
805 .set_ringparam = set_sge_param,
806 .get_coalesce = get_coalesce,
807 .set_coalesce = set_coalesce,
808 .get_eeprom_len = get_eeprom_len,
809 .get_eeprom = get_eeprom,
810 .get_pauseparam = get_pauseparam,
811 .set_pauseparam = set_pauseparam,
812 .get_link = ethtool_op_get_link,
813 .get_strings = get_strings,
814 .get_sset_count = get_sset_count,
815 .get_ethtool_stats = get_stats,
816 .get_regs_len = get_regs_len,
817 .get_regs = get_regs,
818 .get_link_ksettings = get_link_ksettings,
819 .set_link_ksettings = set_link_ksettings,
820 };
821
822 static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
823 {
824 struct adapter *adapter = dev->ml_priv;
825 struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio;
826
827 return mdio_mii_ioctl(mdio, if_mii(req), cmd);
828 }
829
830 static int t1_change_mtu(struct net_device *dev, int new_mtu)
831 {
832 int ret;
833 struct adapter *adapter = dev->ml_priv;
834 struct cmac *mac = adapter->port[dev->if_port].mac;
835
836 if (!mac->ops->set_mtu)
837 return -EOPNOTSUPP;
838 if ((ret = mac->ops->set_mtu(mac, new_mtu)))
839 return ret;
840 dev->mtu = new_mtu;
841 return 0;
842 }
843
844 static int t1_set_mac_addr(struct net_device *dev, void *p)
845 {
846 struct adapter *adapter = dev->ml_priv;
847 struct cmac *mac = adapter->port[dev->if_port].mac;
848 struct sockaddr *addr = p;
849
850 if (!mac->ops->macaddress_set)
851 return -EOPNOTSUPP;
852
853 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
854 mac->ops->macaddress_set(mac, dev->dev_addr);
855 return 0;
856 }
857
858 static netdev_features_t t1_fix_features(struct net_device *dev,
859 netdev_features_t features)
860 {
861 /*
862 * Since there is no support for separate rx/tx vlan accel
863 * enable/disable make sure tx flag is always in same state as rx.
864 */
865 if (features & NETIF_F_HW_VLAN_CTAG_RX)
866 features |= NETIF_F_HW_VLAN_CTAG_TX;
867 else
868 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
869
870 return features;
871 }
872
873 static int t1_set_features(struct net_device *dev, netdev_features_t features)
874 {
875 netdev_features_t changed = dev->features ^ features;
876 struct adapter *adapter = dev->ml_priv;
877
878 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
879 t1_vlan_mode(adapter, features);
880
881 return 0;
882 }
883 #ifdef CONFIG_NET_POLL_CONTROLLER
884 static void t1_netpoll(struct net_device *dev)
885 {
886 unsigned long flags;
887 struct adapter *adapter = dev->ml_priv;
888
889 local_irq_save(flags);
890 t1_interrupt(adapter->pdev->irq, adapter);
891 local_irq_restore(flags);
892 }
893 #endif
894
895 /*
896 * Periodic accumulation of MAC statistics. This is used only if the MAC
897 * does not have any other way to prevent stats counter overflow.
898 */
899 static void mac_stats_task(struct work_struct *work)
900 {
901 int i;
902 struct adapter *adapter =
903 container_of(work, struct adapter, stats_update_task.work);
904
905 for_each_port(adapter, i) {
906 struct port_info *p = &adapter->port[i];
907
908 if (netif_running(p->dev))
909 p->mac->ops->statistics_update(p->mac,
910 MAC_STATS_UPDATE_FAST);
911 }
912
913 /* Schedule the next statistics update if any port is active. */
914 spin_lock(&adapter->work_lock);
915 if (adapter->open_device_map & PORT_MASK)
916 schedule_mac_stats_update(adapter,
917 adapter->params.stats_update_period);
918 spin_unlock(&adapter->work_lock);
919 }
920
921 /*
922 * Processes elmer0 external interrupts in process context.
923 */
924 static void ext_intr_task(struct work_struct *work)
925 {
926 struct adapter *adapter =
927 container_of(work, struct adapter, ext_intr_handler_task);
928
929 t1_elmer0_ext_intr_handler(adapter);
930
931 /* Now reenable external interrupts */
932 spin_lock_irq(&adapter->async_lock);
933 adapter->slow_intr_mask |= F_PL_INTR_EXT;
934 writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
935 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
936 adapter->regs + A_PL_ENABLE);
937 spin_unlock_irq(&adapter->async_lock);
938 }
939
940 /*
941 * Interrupt-context handler for elmer0 external interrupts.
942 */
943 void t1_elmer0_ext_intr(struct adapter *adapter)
944 {
945 /*
946 * Schedule a task to handle external interrupts as we require
947 * a process context. We disable EXT interrupts in the interim
948 * and let the task reenable them when it's done.
949 */
950 adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
951 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
952 adapter->regs + A_PL_ENABLE);
953 schedule_work(&adapter->ext_intr_handler_task);
954 }
955
956 void t1_fatal_err(struct adapter *adapter)
957 {
958 if (adapter->flags & FULL_INIT_DONE) {
959 t1_sge_stop(adapter->sge);
960 t1_interrupts_disable(adapter);
961 }
962 pr_alert("%s: encountered fatal error, operation suspended\n",
963 adapter->name);
964 }
965
966 static const struct net_device_ops cxgb_netdev_ops = {
967 .ndo_open = cxgb_open,
968 .ndo_stop = cxgb_close,
969 .ndo_start_xmit = t1_start_xmit,
970 .ndo_get_stats = t1_get_stats,
971 .ndo_validate_addr = eth_validate_addr,
972 .ndo_set_rx_mode = t1_set_rxmode,
973 .ndo_do_ioctl = t1_ioctl,
974 .ndo_change_mtu = t1_change_mtu,
975 .ndo_set_mac_address = t1_set_mac_addr,
976 .ndo_fix_features = t1_fix_features,
977 .ndo_set_features = t1_set_features,
978 #ifdef CONFIG_NET_POLL_CONTROLLER
979 .ndo_poll_controller = t1_netpoll,
980 #endif
981 };
982
983 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
984 {
985 int i, err, pci_using_dac = 0;
986 unsigned long mmio_start, mmio_len;
987 const struct board_info *bi;
988 struct adapter *adapter = NULL;
989 struct port_info *pi;
990
991 pr_info_once("%s - version %s\n", DRV_DESCRIPTION, DRV_VERSION);
992
993 err = pci_enable_device(pdev);
994 if (err)
995 return err;
996
997 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
998 pr_err("%s: cannot find PCI device memory base address\n",
999 pci_name(pdev));
1000 err = -ENODEV;
1001 goto out_disable_pdev;
1002 }
1003
1004 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1005 pci_using_dac = 1;
1006
1007 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1008 pr_err("%s: unable to obtain 64-bit DMA for "
1009 "consistent allocations\n", pci_name(pdev));
1010 err = -ENODEV;
1011 goto out_disable_pdev;
1012 }
1013
1014 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
1015 pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
1016 goto out_disable_pdev;
1017 }
1018
1019 err = pci_request_regions(pdev, DRV_NAME);
1020 if (err) {
1021 pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev));
1022 goto out_disable_pdev;
1023 }
1024
1025 pci_set_master(pdev);
1026
1027 mmio_start = pci_resource_start(pdev, 0);
1028 mmio_len = pci_resource_len(pdev, 0);
1029 bi = t1_get_board_info(ent->driver_data);
1030
1031 for (i = 0; i < bi->port_number; ++i) {
1032 struct net_device *netdev;
1033
1034 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1035 if (!netdev) {
1036 err = -ENOMEM;
1037 goto out_free_dev;
1038 }
1039
1040 SET_NETDEV_DEV(netdev, &pdev->dev);
1041
1042 if (!adapter) {
1043 adapter = netdev_priv(netdev);
1044 adapter->pdev = pdev;
1045 adapter->port[0].dev = netdev; /* so we don't leak it */
1046
1047 adapter->regs = ioremap(mmio_start, mmio_len);
1048 if (!adapter->regs) {
1049 pr_err("%s: cannot map device registers\n",
1050 pci_name(pdev));
1051 err = -ENOMEM;
1052 goto out_free_dev;
1053 }
1054
1055 if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1056 err = -ENODEV; /* Can't handle this chip rev */
1057 goto out_free_dev;
1058 }
1059
1060 adapter->name = pci_name(pdev);
1061 adapter->msg_enable = dflt_msg_enable;
1062 adapter->mmio_len = mmio_len;
1063
1064 spin_lock_init(&adapter->tpi_lock);
1065 spin_lock_init(&adapter->work_lock);
1066 spin_lock_init(&adapter->async_lock);
1067 spin_lock_init(&adapter->mac_lock);
1068
1069 INIT_WORK(&adapter->ext_intr_handler_task,
1070 ext_intr_task);
1071 INIT_DELAYED_WORK(&adapter->stats_update_task,
1072 mac_stats_task);
1073
1074 pci_set_drvdata(pdev, netdev);
1075 }
1076
1077 pi = &adapter->port[i];
1078 pi->dev = netdev;
1079 netif_carrier_off(netdev);
1080 netdev->irq = pdev->irq;
1081 netdev->if_port = i;
1082 netdev->mem_start = mmio_start;
1083 netdev->mem_end = mmio_start + mmio_len - 1;
1084 netdev->ml_priv = adapter;
1085 netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1086 NETIF_F_RXCSUM;
1087 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1088 NETIF_F_RXCSUM | NETIF_F_LLTX;
1089
1090 if (pci_using_dac)
1091 netdev->features |= NETIF_F_HIGHDMA;
1092 if (vlan_tso_capable(adapter)) {
1093 netdev->features |=
1094 NETIF_F_HW_VLAN_CTAG_TX |
1095 NETIF_F_HW_VLAN_CTAG_RX;
1096 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1097
1098 /* T204: disable TSO */
1099 if (!(is_T2(adapter)) || bi->port_number != 4) {
1100 netdev->hw_features |= NETIF_F_TSO;
1101 netdev->features |= NETIF_F_TSO;
1102 }
1103 }
1104
1105 netdev->netdev_ops = &cxgb_netdev_ops;
1106 netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
1107 sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1108
1109 netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
1110
1111 netdev->ethtool_ops = &t1_ethtool_ops;
1112
1113 switch (bi->board) {
1114 case CHBT_BOARD_CHT110:
1115 case CHBT_BOARD_N110:
1116 case CHBT_BOARD_N210:
1117 case CHBT_BOARD_CHT210:
1118 netdev->max_mtu = PM3393_MAX_FRAME_SIZE -
1119 (ETH_HLEN + ETH_FCS_LEN);
1120 break;
1121 case CHBT_BOARD_CHN204:
1122 netdev->max_mtu = VSC7326_MAX_MTU;
1123 break;
1124 default:
1125 netdev->max_mtu = ETH_DATA_LEN;
1126 break;
1127 }
1128 }
1129
1130 if (t1_init_sw_modules(adapter, bi) < 0) {
1131 err = -ENODEV;
1132 goto out_free_dev;
1133 }
1134
1135 /*
1136 * The card is now ready to go. If any errors occur during device
1137 * registration we do not fail the whole card but rather proceed only
1138 * with the ports we manage to register successfully. However we must
1139 * register at least one net device.
1140 */
1141 for (i = 0; i < bi->port_number; ++i) {
1142 err = register_netdev(adapter->port[i].dev);
1143 if (err)
1144 pr_warn("%s: cannot register net device %s, skipping\n",
1145 pci_name(pdev), adapter->port[i].dev->name);
1146 else {
1147 /*
1148 * Change the name we use for messages to the name of
1149 * the first successfully registered interface.
1150 */
1151 if (!adapter->registered_device_map)
1152 adapter->name = adapter->port[i].dev->name;
1153
1154 __set_bit(i, &adapter->registered_device_map);
1155 }
1156 }
1157 if (!adapter->registered_device_map) {
1158 pr_err("%s: could not register any net devices\n",
1159 pci_name(pdev));
1160 goto out_release_adapter_res;
1161 }
1162
1163 pr_info("%s: %s (rev %d), %s %dMHz/%d-bit\n",
1164 adapter->name, bi->desc, adapter->params.chip_revision,
1165 adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1166 adapter->params.pci.speed, adapter->params.pci.width);
1167
1168 /*
1169 * Set the T1B ASIC and memory clocks.
1170 */
1171 if (t1powersave)
1172 adapter->t1powersave = LCLOCK; /* HW default is powersave mode. */
1173 else
1174 adapter->t1powersave = HCLOCK;
1175 if (t1_is_T1B(adapter))
1176 t1_clock(adapter, t1powersave);
1177
1178 return 0;
1179
1180 out_release_adapter_res:
1181 t1_free_sw_modules(adapter);
1182 out_free_dev:
1183 if (adapter) {
1184 if (adapter->regs)
1185 iounmap(adapter->regs);
1186 for (i = bi->port_number - 1; i >= 0; --i)
1187 if (adapter->port[i].dev)
1188 free_netdev(adapter->port[i].dev);
1189 }
1190 pci_release_regions(pdev);
1191 out_disable_pdev:
1192 pci_disable_device(pdev);
1193 return err;
1194 }
1195
1196 static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1197 {
1198 int data;
1199 int i;
1200 u32 val;
1201
1202 enum {
1203 S_CLOCK = 1 << 3,
1204 S_DATA = 1 << 4
1205 };
1206
1207 for (i = (nbits - 1); i > -1; i--) {
1208
1209 udelay(50);
1210
1211 data = ((bitdata >> i) & 0x1);
1212 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1213
1214 if (data)
1215 val |= S_DATA;
1216 else
1217 val &= ~S_DATA;
1218
1219 udelay(50);
1220
1221 /* Set SCLOCK low */
1222 val &= ~S_CLOCK;
1223 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1224
1225 udelay(50);
1226
1227 /* Write SCLOCK high */
1228 val |= S_CLOCK;
1229 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1230
1231 }
1232 }
1233
1234 static int t1_clock(struct adapter *adapter, int mode)
1235 {
1236 u32 val;
1237 int M_CORE_VAL;
1238 int M_MEM_VAL;
1239
1240 enum {
1241 M_CORE_BITS = 9,
1242 T_CORE_VAL = 0,
1243 T_CORE_BITS = 2,
1244 N_CORE_VAL = 0,
1245 N_CORE_BITS = 2,
1246 M_MEM_BITS = 9,
1247 T_MEM_VAL = 0,
1248 T_MEM_BITS = 2,
1249 N_MEM_VAL = 0,
1250 N_MEM_BITS = 2,
1251 NP_LOAD = 1 << 17,
1252 S_LOAD_MEM = 1 << 5,
1253 S_LOAD_CORE = 1 << 6,
1254 S_CLOCK = 1 << 3
1255 };
1256
1257 if (!t1_is_T1B(adapter))
1258 return -ENODEV; /* Can't re-clock this chip. */
1259
1260 if (mode & 2)
1261 return 0; /* show current mode. */
1262
1263 if ((adapter->t1powersave & 1) == (mode & 1))
1264 return -EALREADY; /* ASIC already running in mode. */
1265
1266 if ((mode & 1) == HCLOCK) {
1267 M_CORE_VAL = 0x14;
1268 M_MEM_VAL = 0x18;
1269 adapter->t1powersave = HCLOCK; /* overclock */
1270 } else {
1271 M_CORE_VAL = 0xe;
1272 M_MEM_VAL = 0x10;
1273 adapter->t1powersave = LCLOCK; /* underclock */
1274 }
1275
1276 /* Don't interrupt this serial stream! */
1277 spin_lock(&adapter->tpi_lock);
1278
1279 /* Initialize for ASIC core */
1280 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1281 val |= NP_LOAD;
1282 udelay(50);
1283 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1284 udelay(50);
1285 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1286 val &= ~S_LOAD_CORE;
1287 val &= ~S_CLOCK;
1288 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1289 udelay(50);
1290
1291 /* Serial program the ASIC clock synthesizer */
1292 bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1293 bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1294 bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1295 udelay(50);
1296
1297 /* Finish ASIC core */
1298 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1299 val |= S_LOAD_CORE;
1300 udelay(50);
1301 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1302 udelay(50);
1303 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1304 val &= ~S_LOAD_CORE;
1305 udelay(50);
1306 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1307 udelay(50);
1308
1309 /* Initialize for memory */
1310 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1311 val |= NP_LOAD;
1312 udelay(50);
1313 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1314 udelay(50);
1315 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1316 val &= ~S_LOAD_MEM;
1317 val &= ~S_CLOCK;
1318 udelay(50);
1319 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1320 udelay(50);
1321
1322 /* Serial program the memory clock synthesizer */
1323 bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1324 bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1325 bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1326 udelay(50);
1327
1328 /* Finish memory */
1329 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1330 val |= S_LOAD_MEM;
1331 udelay(50);
1332 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1333 udelay(50);
1334 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1335 val &= ~S_LOAD_MEM;
1336 udelay(50);
1337 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1338
1339 spin_unlock(&adapter->tpi_lock);
1340
1341 return 0;
1342 }
1343
1344 static inline void t1_sw_reset(struct pci_dev *pdev)
1345 {
1346 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1347 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1348 }
1349
1350 static void remove_one(struct pci_dev *pdev)
1351 {
1352 struct net_device *dev = pci_get_drvdata(pdev);
1353 struct adapter *adapter = dev->ml_priv;
1354 int i;
1355
1356 for_each_port(adapter, i) {
1357 if (test_bit(i, &adapter->registered_device_map))
1358 unregister_netdev(adapter->port[i].dev);
1359 }
1360
1361 t1_free_sw_modules(adapter);
1362 iounmap(adapter->regs);
1363
1364 while (--i >= 0) {
1365 if (adapter->port[i].dev)
1366 free_netdev(adapter->port[i].dev);
1367 }
1368
1369 pci_release_regions(pdev);
1370 pci_disable_device(pdev);
1371 t1_sw_reset(pdev);
1372 }
1373
1374 static struct pci_driver cxgb_pci_driver = {
1375 .name = DRV_NAME,
1376 .id_table = t1_pci_tbl,
1377 .probe = init_one,
1378 .remove = remove_one,
1379 };
1380
1381 module_pci_driver(cxgb_pci_driver);