]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/chelsio/cxgb2.c
[PATCH] A new 10GB Ethernet Driver by Chelsio Communications
[mirror_ubuntu-artful-kernel.git] / drivers / net / chelsio / cxgb2.c
CommitLineData
8199d3a7
CL
1/*****************************************************************************
2 * *
3 * File: cxgb2.c *
4 * $Revision: 1.11 $ *
5 * $Date: 2005/03/23 07:41:27 $ *
6 * Description: *
7 * Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#include "common.h"
40
41#include <linux/config.h>
42#include <linux/module.h>
43#include <linux/init.h>
44#include <linux/pci.h>
45#include <linux/netdevice.h>
46#include <linux/etherdevice.h>
47#include <linux/if_vlan.h>
48#include <linux/mii.h>
49#include <linux/sockios.h>
50#include <linux/proc_fs.h>
51#include <linux/version.h>
52#include <linux/workqueue.h>
53#include <asm/uaccess.h>
54
55#include "ch_ethtool.h"
56#include "cpl5_cmd.h"
57#include "regs.h"
58#include "gmac.h"
59#include "cphy.h"
60#include "sge.h"
61#include "tp.h"
62#include "espi.h"
63
64static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
65{
66 schedule_delayed_work(&ap->stats_update_task, secs * HZ);
67}
68
69static inline void cancel_mac_stats_update(struct adapter *ap)
70{
71 cancel_delayed_work(&ap->stats_update_task);
72}
73
74#if BITS_PER_LONG == 64 && !defined(CONFIG_X86_64)
75# define FMT64 "l"
76#else
77# define FMT64 "ll"
78#endif
79
80# define DRV_TYPE ""
81# define MODULE_DESC "Chelsio Network Driver"
82
83static char driver_name[] = DRV_NAME;
84static char driver_string[] = "Chelsio " DRV_TYPE "Network Driver";
85static char driver_version[] = "2.1.0";
86
87#define PCI_DMA_64BIT ~0ULL
88#define PCI_DMA_32BIT 0xffffffffULL
89
90#define MAX_CMDQ_ENTRIES 16384
91#define MAX_CMDQ1_ENTRIES 1024
92#define MAX_RX_BUFFERS 16384
93#define MAX_RX_JUMBO_BUFFERS 16384
94#define MAX_TX_BUFFERS_HIGH 16384U
95#define MAX_TX_BUFFERS_LOW 1536U
96#define MIN_FL_ENTRIES 32
97
98#define PORT_MASK ((1 << MAX_NPORTS) - 1)
99
100#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
101 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
102 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
103
104/*
105 * The EEPROM is actually bigger but only the first few bytes are used so we
106 * only report those.
107 */
108#define EEPROM_SIZE 32
109
110MODULE_DESCRIPTION(MODULE_DESC);
111MODULE_AUTHOR("Chelsio Communications");
112MODULE_LICENSE("GPL");
113MODULE_DEVICE_TABLE(pci, t1_pci_tbl);
114
115static int dflt_msg_enable = DFLT_MSG_ENABLE;
116
117MODULE_PARM(dflt_msg_enable, "i");
118MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 message enable bitmap");
119
120
121static const char pci_speed[][4] = {
122 "33", "66", "100", "133"
123};
124
125/*
126 * Setup MAC to receive the types of packets we want.
127 */
128static void t1_set_rxmode(struct net_device *dev)
129{
130 struct adapter *adapter = dev->priv;
131 struct cmac *mac = adapter->port[dev->if_port].mac;
132 struct t1_rx_mode rm;
133
134 rm.dev = dev;
135 rm.idx = 0;
136 rm.list = dev->mc_list;
137 mac->ops->set_rx_mode(mac, &rm);
138}
139
140static void link_report(struct port_info *p)
141{
142 if (!netif_carrier_ok(p->dev))
143 printk(KERN_INFO "%s: link is down\n", p->dev->name);
144 else {
145 const char *s = "10 Mbps";
146
147 switch (p->link_config.speed) {
148 case SPEED_10000: s = "10 Gbps"; break;
149 case SPEED_1000: s = "1000 Mbps"; break;
150 case SPEED_100: s = "100 Mbps"; break;
151 }
152
153 printk(KERN_INFO "%s: link is up at %s, %s duplex\n",
154 p->dev->name, s,
155 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
156 }
157}
158
159void t1_link_changed(struct adapter *adapter, int port_id, int link_stat,
160 int speed, int duplex, int pause)
161{
162 struct port_info *p = &adapter->port[port_id];
163
164 if (link_stat != netif_carrier_ok(p->dev)) {
165 if (link_stat)
166 netif_carrier_on(p->dev);
167 else
168 netif_carrier_off(p->dev);
169 link_report(p);
170
171 }
172}
173
174static void link_start(struct port_info *p)
175{
176 struct cmac *mac = p->mac;
177
178 mac->ops->reset(mac);
179 if (mac->ops->macaddress_set)
180 mac->ops->macaddress_set(mac, p->dev->dev_addr);
181 t1_set_rxmode(p->dev);
182 t1_link_start(p->phy, mac, &p->link_config);
183 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
184}
185
186static void enable_hw_csum(struct adapter *adapter)
187{
188 if (adapter->flags & TSO_CAPABLE)
189 t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */
190 if (adapter->flags & UDP_CSUM_CAPABLE)
191 t1_tp_set_udp_checksum_offload(adapter->tp, 1);
192 t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
193}
194
195/*
196 * Things to do upon first use of a card.
197 * This must run with the rtnl lock held.
198 */
199static int cxgb_up(struct adapter *adapter)
200{
201 int err = 0;
202
203 if (!(adapter->flags & FULL_INIT_DONE)) {
204 err = t1_init_hw_modules(adapter);
205 if (err)
206 goto out_err;
207
208 enable_hw_csum(adapter);
209 adapter->flags |= FULL_INIT_DONE;
210 }
211
212 t1_interrupts_clear(adapter);
213
214 if ((err = request_irq(adapter->pdev->irq, &t1_interrupt, SA_SHIRQ,
215 adapter->name, adapter)))
216 goto out_err;
217
218 t1_sge_start(adapter->sge);
219 t1_interrupts_enable(adapter);
220
221 err = 0;
222 out_err:
223 return err;
224}
225
226/*
227 * Release resources when all the ports have been stopped.
228 */
229static void cxgb_down(struct adapter *adapter)
230{
231 t1_sge_stop(adapter->sge);
232 t1_interrupts_disable(adapter);
233 free_irq(adapter->pdev->irq, adapter);
234}
235
236static int cxgb_open(struct net_device *dev)
237{
238 int err;
239 struct adapter *adapter = dev->priv;
240 int other_ports = adapter->open_device_map & PORT_MASK;
241
242 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
243 return err;
244
245 __set_bit(dev->if_port, &adapter->open_device_map);
246 link_start(&adapter->port[dev->if_port]);
247 netif_start_queue(dev);
248 if (!other_ports && adapter->params.stats_update_period)
249 schedule_mac_stats_update(adapter,
250 adapter->params.stats_update_period);
251 return 0;
252}
253
254static int cxgb_close(struct net_device *dev)
255{
256 struct adapter *adapter = dev->priv;
257 struct port_info *p = &adapter->port[dev->if_port];
258 struct cmac *mac = p->mac;
259
260 netif_stop_queue(dev);
261 mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
262 netif_carrier_off(dev);
263
264 clear_bit(dev->if_port, &adapter->open_device_map);
265 if (adapter->params.stats_update_period &&
266 !(adapter->open_device_map & PORT_MASK)) {
267 /* Stop statistics accumulation. */
268 smp_mb__after_clear_bit();
269 spin_lock(&adapter->work_lock); /* sync with update task */
270 spin_unlock(&adapter->work_lock);
271 cancel_mac_stats_update(adapter);
272 }
273
274 if (!adapter->open_device_map)
275 cxgb_down(adapter);
276 return 0;
277}
278
279static struct net_device_stats *t1_get_stats(struct net_device *dev)
280{
281 struct adapter *adapter = dev->priv;
282 struct port_info *p = &adapter->port[dev->if_port];
283 struct net_device_stats *ns = &p->netstats;
284 const struct cmac_statistics *pstats;
285
286 /* Do a full update of the MAC stats */
287 pstats = p->mac->ops->statistics_update(p->mac,
288 MAC_STATS_UPDATE_FULL);
289
290 ns->tx_packets = pstats->TxUnicastFramesOK +
291 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
292
293 ns->rx_packets = pstats->RxUnicastFramesOK +
294 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
295
296 ns->tx_bytes = pstats->TxOctetsOK;
297 ns->rx_bytes = pstats->RxOctetsOK;
298
299 ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
300 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
301 ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
302 pstats->RxFCSErrors + pstats->RxAlignErrors +
303 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
304 pstats->RxSymbolErrors + pstats->RxRuntErrors;
305
306 ns->multicast = pstats->RxMulticastFramesOK;
307 ns->collisions = pstats->TxTotalCollisions;
308
309 /* detailed rx_errors */
310 ns->rx_length_errors = pstats->RxFrameTooLongErrors +
311 pstats->RxJabberErrors;
312 ns->rx_over_errors = 0;
313 ns->rx_crc_errors = pstats->RxFCSErrors;
314 ns->rx_frame_errors = pstats->RxAlignErrors;
315 ns->rx_fifo_errors = 0;
316 ns->rx_missed_errors = 0;
317
318 /* detailed tx_errors */
319 ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions;
320 ns->tx_carrier_errors = 0;
321 ns->tx_fifo_errors = pstats->TxUnderrun;
322 ns->tx_heartbeat_errors = 0;
323 ns->tx_window_errors = pstats->TxLateCollisions;
324 return ns;
325}
326
327static u32 get_msglevel(struct net_device *dev)
328{
329 struct adapter *adapter = dev->priv;
330
331 return adapter->msg_enable;
332}
333
334static void set_msglevel(struct net_device *dev, u32 val)
335{
336 struct adapter *adapter = dev->priv;
337
338 adapter->msg_enable = val;
339}
340
341static char stats_strings[][ETH_GSTRING_LEN] = {
342 "TxOctetsOK",
343 "TxOctetsBad",
344 "TxUnicastFramesOK",
345 "TxMulticastFramesOK",
346 "TxBroadcastFramesOK",
347 "TxPauseFrames",
348 "TxFramesWithDeferredXmissions",
349 "TxLateCollisions",
350 "TxTotalCollisions",
351 "TxFramesAbortedDueToXSCollisions",
352 "TxUnderrun",
353 "TxLengthErrors",
354 "TxInternalMACXmitError",
355 "TxFramesWithExcessiveDeferral",
356 "TxFCSErrors",
357
358 "RxOctetsOK",
359 "RxOctetsBad",
360 "RxUnicastFramesOK",
361 "RxMulticastFramesOK",
362 "RxBroadcastFramesOK",
363 "RxPauseFrames",
364 "RxFCSErrors",
365 "RxAlignErrors",
366 "RxSymbolErrors",
367 "RxDataErrors",
368 "RxSequenceErrors",
369 "RxRuntErrors",
370 "RxJabberErrors",
371 "RxInternalMACRcvError",
372 "RxInRangeLengthErrors",
373 "RxOutOfRangeLengthField",
374 "RxFrameTooLongErrors"
375};
376
377static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
378{
379 struct adapter *adapter = dev->priv;
380
381 strcpy(info->driver, driver_name);
382 strcpy(info->version, driver_version);
383 strcpy(info->fw_version, "N/A");
384 strcpy(info->bus_info, pci_name(adapter->pdev));
385}
386
387static int get_stats_count(struct net_device *dev)
388{
389 return ARRAY_SIZE(stats_strings);
390}
391
392static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
393{
394 if (stringset == ETH_SS_STATS)
395 memcpy(data, stats_strings, sizeof(stats_strings));
396}
397
398static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
399 u64 *data)
400{
401 struct adapter *adapter = dev->priv;
402 struct cmac *mac = adapter->port[dev->if_port].mac;
403 const struct cmac_statistics *s;
404
405 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
406
407 *data++ = s->TxOctetsOK;
408 *data++ = s->TxOctetsBad;
409 *data++ = s->TxUnicastFramesOK;
410 *data++ = s->TxMulticastFramesOK;
411 *data++ = s->TxBroadcastFramesOK;
412 *data++ = s->TxPauseFrames;
413 *data++ = s->TxFramesWithDeferredXmissions;
414 *data++ = s->TxLateCollisions;
415 *data++ = s->TxTotalCollisions;
416 *data++ = s->TxFramesAbortedDueToXSCollisions;
417 *data++ = s->TxUnderrun;
418 *data++ = s->TxLengthErrors;
419 *data++ = s->TxInternalMACXmitError;
420 *data++ = s->TxFramesWithExcessiveDeferral;
421 *data++ = s->TxFCSErrors;
422
423 *data++ = s->RxOctetsOK;
424 *data++ = s->RxOctetsBad;
425 *data++ = s->RxUnicastFramesOK;
426 *data++ = s->RxMulticastFramesOK;
427 *data++ = s->RxBroadcastFramesOK;
428 *data++ = s->RxPauseFrames;
429 *data++ = s->RxFCSErrors;
430 *data++ = s->RxAlignErrors;
431 *data++ = s->RxSymbolErrors;
432 *data++ = s->RxDataErrors;
433 *data++ = s->RxSequenceErrors;
434 *data++ = s->RxRuntErrors;
435 *data++ = s->RxJabberErrors;
436 *data++ = s->RxInternalMACRcvError;
437 *data++ = s->RxInRangeLengthErrors;
438 *data++ = s->RxOutOfRangeLengthField;
439 *data++ = s->RxFrameTooLongErrors;
440}
441
442static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
443{
444 struct adapter *adapter = dev->priv;
445 struct port_info *p = &adapter->port[dev->if_port];
446
447 cmd->supported = p->link_config.supported;
448 cmd->advertising = p->link_config.advertising;
449
450 if (netif_carrier_ok(dev)) {
451 cmd->speed = p->link_config.speed;
452 cmd->duplex = p->link_config.duplex;
453 } else {
454 cmd->speed = -1;
455 cmd->duplex = -1;
456 }
457
458 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
459 cmd->phy_address = p->phy->addr;
460 cmd->transceiver = XCVR_EXTERNAL;
461 cmd->autoneg = p->link_config.autoneg;
462 cmd->maxtxpkt = 0;
463 cmd->maxrxpkt = 0;
464 return 0;
465}
466
467static int speed_duplex_to_caps(int speed, int duplex)
468{
469 int cap = 0;
470
471 switch (speed) {
472 case SPEED_10:
473 if (duplex == DUPLEX_FULL)
474 cap = SUPPORTED_10baseT_Full;
475 else
476 cap = SUPPORTED_10baseT_Half;
477 break;
478 case SPEED_100:
479 if (duplex == DUPLEX_FULL)
480 cap = SUPPORTED_100baseT_Full;
481 else
482 cap = SUPPORTED_100baseT_Half;
483 break;
484 case SPEED_1000:
485 if (duplex == DUPLEX_FULL)
486 cap = SUPPORTED_1000baseT_Full;
487 else
488 cap = SUPPORTED_1000baseT_Half;
489 break;
490 case SPEED_10000:
491 if (duplex == DUPLEX_FULL)
492 cap = SUPPORTED_10000baseT_Full;
493 }
494 return cap;
495}
496
497#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
498 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
499 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
500 ADVERTISED_10000baseT_Full)
501
502static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
503{
504 struct adapter *adapter = dev->priv;
505 struct port_info *p = &adapter->port[dev->if_port];
506 struct link_config *lc = &p->link_config;
507
508 if (!(lc->supported & SUPPORTED_Autoneg))
509 return -EOPNOTSUPP; /* can't change speed/duplex */
510
511 if (cmd->autoneg == AUTONEG_DISABLE) {
512 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
513
514 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
515 return -EINVAL;
516 lc->requested_speed = cmd->speed;
517 lc->requested_duplex = cmd->duplex;
518 lc->advertising = 0;
519 } else {
520 cmd->advertising &= ADVERTISED_MASK;
521 if (cmd->advertising & (cmd->advertising - 1))
522 cmd->advertising = lc->supported;
523 cmd->advertising &= lc->supported;
524 if (!cmd->advertising)
525 return -EINVAL;
526 lc->requested_speed = SPEED_INVALID;
527 lc->requested_duplex = DUPLEX_INVALID;
528 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
529 }
530 lc->autoneg = cmd->autoneg;
531 if (netif_running(dev))
532 t1_link_start(p->phy, p->mac, lc);
533 return 0;
534}
535
536static void get_pauseparam(struct net_device *dev,
537 struct ethtool_pauseparam *epause)
538{
539 struct adapter *adapter = dev->priv;
540 struct port_info *p = &adapter->port[dev->if_port];
541
542 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
543 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
544 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
545}
546
547static int set_pauseparam(struct net_device *dev,
548 struct ethtool_pauseparam *epause)
549{
550 struct adapter *adapter = dev->priv;
551 struct port_info *p = &adapter->port[dev->if_port];
552 struct link_config *lc = &p->link_config;
553
554 if (epause->autoneg == AUTONEG_DISABLE)
555 lc->requested_fc = 0;
556 else if (lc->supported & SUPPORTED_Autoneg)
557 lc->requested_fc = PAUSE_AUTONEG;
558 else
559 return -EINVAL;
560
561 if (epause->rx_pause)
562 lc->requested_fc |= PAUSE_RX;
563 if (epause->tx_pause)
564 lc->requested_fc |= PAUSE_TX;
565 if (lc->autoneg == AUTONEG_ENABLE) {
566 if (netif_running(dev))
567 t1_link_start(p->phy, p->mac, lc);
568 } else {
569 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
570 if (netif_running(dev))
571 p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
572 lc->fc);
573 }
574 return 0;
575}
576
577static u32 get_rx_csum(struct net_device *dev)
578{
579 struct adapter *adapter = dev->priv;
580
581 return (adapter->flags & RX_CSUM_ENABLED) != 0;
582}
583
584static int set_rx_csum(struct net_device *dev, u32 data)
585{
586 struct adapter *adapter = dev->priv;
587
588 if (data)
589 adapter->flags |= RX_CSUM_ENABLED;
590 else
591 adapter->flags &= ~RX_CSUM_ENABLED;
592 return 0;
593}
594
595static int set_tso(struct net_device *dev, u32 value)
596{
597 struct adapter *adapter = dev->priv;
598
599 if (!(adapter->flags & TSO_CAPABLE))
600 return value ? -EOPNOTSUPP : 0;
601 return ethtool_op_set_tso(dev, value);
602}
603
604static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
605{
606 struct adapter *adapter = dev->priv;
607 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
608
609 e->rx_max_pending = MAX_RX_BUFFERS;
610 e->rx_mini_max_pending = 0;
611 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
612 e->tx_max_pending = MAX_CMDQ_ENTRIES;
613
614 e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
615 e->rx_mini_pending = 0;
616 e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
617 e->tx_pending = adapter->params.sge.cmdQ_size[0];
618}
619
620static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
621{
622 struct adapter *adapter = dev->priv;
623 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
624
625 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
626 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
627 e->tx_pending > MAX_CMDQ_ENTRIES ||
628 e->rx_pending < MIN_FL_ENTRIES ||
629 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
630 e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
631 return -EINVAL;
632
633 if (adapter->flags & FULL_INIT_DONE)
634 return -EBUSY;
635
636 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
637 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
638 adapter->params.sge.cmdQ_size[0] = e->tx_pending;
639 adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
640 MAX_CMDQ1_ENTRIES : e->tx_pending;
641 return 0;
642}
643
644static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
645{
646 struct adapter *adapter = dev->priv;
647
648 unsigned int sge_coalesce_usecs = 0;
649
650 sge_coalesce_usecs = adapter->params.sge.last_rx_coalesce_raw;
651 sge_coalesce_usecs /= board_info(adapter)->clock_core / 1000000;
652 if ( (adapter->params.sge.coalesce_enable && !c->use_adaptive_rx_coalesce) &&
653 (c->rx_coalesce_usecs == sge_coalesce_usecs) ) {
654 adapter->params.sge.rx_coalesce_usecs =
655 adapter->params.sge.default_rx_coalesce_usecs;
656 } else {
657 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
658 }
659
660 adapter->params.sge.last_rx_coalesce_raw = adapter->params.sge.rx_coalesce_usecs;
661 adapter->params.sge.last_rx_coalesce_raw *= (board_info(adapter)->clock_core / 1000000);
662 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
663 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
664 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
665 return 0;
666}
667
668static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
669{
670 struct adapter *adapter = dev->priv;
671
672 if (adapter->params.sge.coalesce_enable) { /* Adaptive algorithm on */
673 c->rx_coalesce_usecs = adapter->params.sge.last_rx_coalesce_raw;
674 c->rx_coalesce_usecs /= board_info(adapter)->clock_core / 1000000;
675 } else {
676 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
677 }
678 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
679 c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
680 return 0;
681}
682
683static int get_eeprom_len(struct net_device *dev)
684{
685 struct adapter *adapter = dev->priv;
686
687 return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
688}
689
690#define EEPROM_MAGIC(ap) \
691 (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
692
693static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
694 u8 *data)
695{
696 int i;
697 u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
698 struct adapter *adapter = dev->priv;
699
700 e->magic = EEPROM_MAGIC(adapter);
701 for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
702 t1_seeprom_read(adapter, i, (u32 *)&buf[i]);
703 memcpy(data, buf + e->offset, e->len);
704 return 0;
705}
706
707static struct ethtool_ops t1_ethtool_ops = {
708 .get_settings = get_settings,
709 .set_settings = set_settings,
710 .get_drvinfo = get_drvinfo,
711 .get_msglevel = get_msglevel,
712 .set_msglevel = set_msglevel,
713 .get_ringparam = get_sge_param,
714 .set_ringparam = set_sge_param,
715 .get_coalesce = get_coalesce,
716 .set_coalesce = set_coalesce,
717 .get_eeprom_len = get_eeprom_len,
718 .get_eeprom = get_eeprom,
719 .get_pauseparam = get_pauseparam,
720 .set_pauseparam = set_pauseparam,
721 .get_rx_csum = get_rx_csum,
722 .set_rx_csum = set_rx_csum,
723 .get_tx_csum = ethtool_op_get_tx_csum,
724 .set_tx_csum = ethtool_op_set_tx_csum,
725 .get_sg = ethtool_op_get_sg,
726 .set_sg = ethtool_op_set_sg,
727 .get_link = ethtool_op_get_link,
728 .get_strings = get_strings,
729 .get_stats_count = get_stats_count,
730 .get_ethtool_stats = get_stats,
731 .get_tso = ethtool_op_get_tso,
732 .set_tso = set_tso,
733};
734
735static int ethtool_ioctl(struct net_device *dev, void *useraddr)
736{
737 u32 cmd;
738 struct adapter *adapter = dev->priv;
739
740 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
741 return -EFAULT;
742
743 switch (cmd) {
744 case ETHTOOL_SETREG: {
745 struct ethtool_reg edata;
746
747 if (!capable(CAP_NET_ADMIN))
748 return -EPERM;
749 if (copy_from_user(&edata, useraddr, sizeof(edata)))
750 return -EFAULT;
751 if ((edata.addr & 3) != 0 || edata.addr >= adapter->mmio_len)
752 return -EINVAL;
753 if (edata.addr == A_ESPI_MISC_CONTROL)
754 t1_espi_set_misc_ctrl(adapter, edata.val);
755 else {
756 if (edata.addr == 0x950)
757 t1_sge_set_ptimeout(adapter, edata.val);
758 else
759 writel(edata.val, adapter->regs + edata.addr);
760 }
761 break;
762 }
763 case ETHTOOL_GETREG: {
764 struct ethtool_reg edata;
765
766 if (copy_from_user(&edata, useraddr, sizeof(edata)))
767 return -EFAULT;
768 if ((edata.addr & 3) != 0 || edata.addr >= adapter->mmio_len)
769 return -EINVAL;
770 if (edata.addr >= 0x900 && edata.addr <= 0x93c)
771 edata.val = t1_espi_get_mon(adapter, edata.addr, 1);
772 else {
773 if (edata.addr == 0x950)
774 edata.val = t1_sge_get_ptimeout(adapter);
775 else
776 edata.val = readl(adapter->regs + edata.addr);
777 }
778 if (copy_to_user(useraddr, &edata, sizeof(edata)))
779 return -EFAULT;
780 break;
781 }
782 case ETHTOOL_SETTPI: {
783 struct ethtool_reg edata;
784
785 if (!capable(CAP_NET_ADMIN))
786 return -EPERM;
787 if (copy_from_user(&edata, useraddr, sizeof(edata)))
788 return -EFAULT;
789 if ((edata.addr & 3) != 0)
790 return -EINVAL;
791 t1_tpi_write(adapter, edata.addr, edata.val);
792 break;
793 }
794 case ETHTOOL_GETTPI: {
795 struct ethtool_reg edata;
796
797 if (copy_from_user(&edata, useraddr, sizeof(edata)))
798 return -EFAULT;
799 if ((edata.addr & 3) != 0)
800 return -EINVAL;
801 t1_tpi_read(adapter, edata.addr, &edata.val);
802 if (copy_to_user(useraddr, &edata, sizeof(edata)))
803 return -EFAULT;
804 break;
805 }
806 default:
807 return -EOPNOTSUPP;
808 }
809 return 0;
810}
811
812static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
813{
814 struct adapter *adapter = dev->priv;
815 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
816
817 switch (cmd) {
818 case SIOCGMIIPHY:
819 data->phy_id = adapter->port[dev->if_port].phy->addr;
820 /* FALLTHRU */
821 case SIOCGMIIREG: {
822 struct cphy *phy = adapter->port[dev->if_port].phy;
823 u32 val;
824
825 if (!phy->mdio_read) return -EOPNOTSUPP;
826 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
827 &val);
828 data->val_out = val;
829 break;
830 }
831 case SIOCSMIIREG: {
832 struct cphy *phy = adapter->port[dev->if_port].phy;
833
834 if (!capable(CAP_NET_ADMIN)) return -EPERM;
835 if (!phy->mdio_write) return -EOPNOTSUPP;
836 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
837 data->val_in);
838 break;
839 }
840
841 case SIOCCHETHTOOL:
842 return ethtool_ioctl(dev, (void *)req->ifr_data);
843 default:
844 return -EOPNOTSUPP;
845 }
846 return 0;
847}
848
849static int t1_change_mtu(struct net_device *dev, int new_mtu)
850{
851 int ret;
852 struct adapter *adapter = dev->priv;
853 struct cmac *mac = adapter->port[dev->if_port].mac;
854
855 if (!mac->ops->set_mtu)
856 return -EOPNOTSUPP;
857 if (new_mtu < 68)
858 return -EINVAL;
859 if ((ret = mac->ops->set_mtu(mac, new_mtu)))
860 return ret;
861 dev->mtu = new_mtu;
862 return 0;
863}
864
865static int t1_set_mac_addr(struct net_device *dev, void *p)
866{
867 struct adapter *adapter = dev->priv;
868 struct cmac *mac = adapter->port[dev->if_port].mac;
869 struct sockaddr *addr = p;
870
871 if (!mac->ops->macaddress_set)
872 return -EOPNOTSUPP;
873
874 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
875 mac->ops->macaddress_set(mac, dev->dev_addr);
876 return 0;
877}
878
879#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
880static void vlan_rx_register(struct net_device *dev,
881 struct vlan_group *grp)
882{
883 struct adapter *adapter = dev->priv;
884
885 spin_lock_irq(&adapter->async_lock);
886 adapter->vlan_grp = grp;
887 t1_set_vlan_accel(adapter, grp != NULL);
888 spin_unlock_irq(&adapter->async_lock);
889}
890
891static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
892{
893 struct adapter *adapter = dev->priv;
894
895 spin_lock_irq(&adapter->async_lock);
896 if (adapter->vlan_grp)
897 adapter->vlan_grp->vlan_devices[vid] = NULL;
898 spin_unlock_irq(&adapter->async_lock);
899}
900#endif
901
902#ifdef CONFIG_NET_POLL_CONTROLLER
903static void t1_netpoll(struct net_device *dev)
904{
905 struct adapter *adapter = dev->priv;
906
907 t1_interrupt(adapter->pdev->irq, adapter, NULL);
908}
909#endif
910
911/*
912 * Periodic accumulation of MAC statistics. This is used only if the MAC
913 * does not have any other way to prevent stats counter overflow.
914 */
915static void mac_stats_task(void *data)
916{
917 int i;
918 struct adapter *adapter = data;
919
920 for_each_port(adapter, i) {
921 struct port_info *p = &adapter->port[i];
922
923 if (netif_running(p->dev))
924 p->mac->ops->statistics_update(p->mac,
925 MAC_STATS_UPDATE_FAST);
926 }
927
928 /* Schedule the next statistics update if any port is active. */
929 spin_lock(&adapter->work_lock);
930 if (adapter->open_device_map & PORT_MASK)
931 schedule_mac_stats_update(adapter,
932 adapter->params.stats_update_period);
933 spin_unlock(&adapter->work_lock);
934}
935
936/*
937 * Processes elmer0 external interrupts in process context.
938 */
939static void ext_intr_task(void *data)
940{
941 u32 enable;
942 struct adapter *adapter = data;
943
944 elmer0_ext_intr_handler(adapter);
945
946 /* Now reenable external interrupts */
947 t1_write_reg_4(adapter, A_PL_CAUSE, F_PL_INTR_EXT);
948 enable = t1_read_reg_4(adapter, A_PL_ENABLE);
949 t1_write_reg_4(adapter, A_PL_ENABLE, enable | F_PL_INTR_EXT);
950 adapter->slow_intr_mask |= F_PL_INTR_EXT;
951}
952
953/*
954 * Interrupt-context handler for elmer0 external interrupts.
955 */
956void t1_elmer0_ext_intr(struct adapter *adapter)
957{
958 u32 enable = t1_read_reg_4(adapter, A_PL_ENABLE);
959
960 /*
961 * Schedule a task to handle external interrupts as we require
962 * a process context. We disable EXT interrupts in the interim
963 * and let the task reenable them when it's done.
964 */
965 adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
966 t1_write_reg_4(adapter, A_PL_ENABLE, enable & ~F_PL_INTR_EXT);
967 schedule_work(&adapter->ext_intr_handler_task);
968}
969
970void t1_fatal_err(struct adapter *adapter)
971{
972 if (adapter->flags & FULL_INIT_DONE) {
973 t1_sge_stop(adapter->sge);
974 t1_interrupts_disable(adapter);
975 }
976 CH_ALERT("%s: encountered fatal error, operation suspended\n",
977 adapter->name);
978}
979
980
981static int __devinit init_one(struct pci_dev *pdev,
982 const struct pci_device_id *ent)
983{
984 static int version_printed;
985
986 int i, err, pci_using_dac = 0;
987 unsigned long mmio_start, mmio_len;
988 const struct board_info *bi;
989 struct adapter *adapter = NULL;
990 struct port_info *pi;
991
992 if (!version_printed) {
993 printk(KERN_INFO "%s - version %s\n", driver_string,
994 driver_version);
995 ++version_printed;
996 }
997
998 err = pci_enable_device(pdev);
999 if (err)
1000 return err;
1001
1002 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1003 CH_ERR("%s: cannot find PCI device memory base address\n",
1004 pci_name(pdev));
1005 err = -ENODEV;
1006 goto out_disable_pdev;
1007 }
1008
1009 if (!pci_set_dma_mask(pdev, PCI_DMA_64BIT)) {
1010 pci_using_dac = 1;
1011 if (pci_set_consistent_dma_mask(pdev, PCI_DMA_64BIT)) {
1012 CH_ERR("%s: unable to obtain 64-bit DMA for"
1013 "consistent allocations\n", pci_name(pdev));
1014 err = -ENODEV;
1015 goto out_disable_pdev;
1016 }
1017 } else if ((err = pci_set_dma_mask(pdev, PCI_DMA_32BIT)) != 0) {
1018 CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
1019 goto out_disable_pdev;
1020 }
1021
1022 err = pci_request_regions(pdev, driver_name);
1023 if (err) {
1024 CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
1025 goto out_disable_pdev;
1026 }
1027
1028 pci_set_master(pdev);
1029
1030 mmio_start = pci_resource_start(pdev, 0);
1031 mmio_len = pci_resource_len(pdev, 0);
1032 bi = t1_get_board_info(ent->driver_data);
1033
1034 for (i = 0; i < bi->port_number; ++i) {
1035 struct net_device *netdev;
1036
1037 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1038 if (!netdev) {
1039 err = -ENOMEM;
1040 goto out_free_dev;
1041 }
1042
1043 SET_MODULE_OWNER(netdev);
1044 SET_NETDEV_DEV(netdev, &pdev->dev);
1045
1046 if (!adapter) {
1047 adapter = netdev->priv;
1048 adapter->pdev = pdev;
1049 adapter->port[0].dev = netdev; /* so we don't leak it */
1050
1051 adapter->regs = ioremap(mmio_start, mmio_len);
1052 if (!adapter->regs) {
1053 CH_ERR("%s: cannot map device registers\n",
1054 pci_name(pdev));
1055 err = -ENOMEM;
1056 goto out_free_dev;
1057 }
1058
1059 if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1060 err = -ENODEV; /* Can't handle this chip rev */
1061 goto out_free_dev;
1062 }
1063
1064 adapter->name = pci_name(pdev);
1065 adapter->msg_enable = dflt_msg_enable;
1066 adapter->mmio_len = mmio_len;
1067
1068 init_MUTEX(&adapter->mib_mutex);
1069 spin_lock_init(&adapter->tpi_lock);
1070 spin_lock_init(&adapter->work_lock);
1071 spin_lock_init(&adapter->async_lock);
1072
1073 INIT_WORK(&adapter->ext_intr_handler_task,
1074 ext_intr_task, adapter);
1075 INIT_WORK(&adapter->stats_update_task, mac_stats_task,
1076 adapter);
1077
1078 pci_set_drvdata(pdev, netdev);
1079
1080 }
1081
1082 pi = &adapter->port[i];
1083 pi->dev = netdev;
1084 netif_carrier_off(netdev);
1085 netdev->irq = pdev->irq;
1086 netdev->if_port = i;
1087 netdev->mem_start = mmio_start;
1088 netdev->mem_end = mmio_start + mmio_len - 1;
1089 netdev->priv = adapter;
1090 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1091 adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1092 if (pci_using_dac)
1093 netdev->features |= NETIF_F_HIGHDMA;
1094 if (vlan_tso_capable(adapter)) {
1095 adapter->flags |= UDP_CSUM_CAPABLE;
1096#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1097 adapter->flags |= VLAN_ACCEL_CAPABLE;
1098 netdev->features |=
1099 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1100 netdev->vlan_rx_register = vlan_rx_register;
1101 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
1102#endif
1103 adapter->flags |= TSO_CAPABLE;
1104 netdev->features |= NETIF_F_TSO;
1105 }
1106
1107 netdev->open = cxgb_open;
1108 netdev->stop = cxgb_close;
1109 netdev->hard_start_xmit = t1_start_xmit;
1110 netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
1111 sizeof(struct cpl_tx_pkt_lso) :
1112 sizeof(struct cpl_tx_pkt);
1113 netdev->get_stats = t1_get_stats;
1114 netdev->set_multicast_list = t1_set_rxmode;
1115 netdev->do_ioctl = t1_ioctl;
1116 netdev->change_mtu = t1_change_mtu;
1117 netdev->set_mac_address = t1_set_mac_addr;
1118#ifdef CONFIG_NET_POLL_CONTROLLER
1119 netdev->poll_controller = t1_netpoll;
1120#endif
1121 netdev->weight = 64;
1122
1123 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1124 }
1125
1126 if (t1_init_sw_modules(adapter, bi) < 0) {
1127 err = -ENODEV;
1128 goto out_free_dev;
1129 }
1130
1131 /*
1132 * The card is now ready to go. If any errors occur during device
1133 * registration we do not fail the whole card but rather proceed only
1134 * with the ports we manage to register successfully. However we must
1135 * register at least one net device.
1136 */
1137 for (i = 0; i < bi->port_number; ++i) {
1138 err = register_netdev(adapter->port[i].dev);
1139 if (err)
1140 CH_WARN("%s: cannot register net device %s, skipping\n",
1141 pci_name(pdev), adapter->port[i].dev->name);
1142 else {
1143 /*
1144 * Change the name we use for messages to the name of
1145 * the first successfully registered interface.
1146 */
1147 if (!adapter->registered_device_map)
1148 adapter->name = adapter->port[i].dev->name;
1149
1150 __set_bit(i, &adapter->registered_device_map);
1151 }
1152 }
1153 if (!adapter->registered_device_map) {
1154 CH_ERR("%s: could not register any net devices\n",
1155 pci_name(pdev));
1156 goto out_release_adapter_res;
1157 }
1158
1159 printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1160 bi->desc, adapter->params.chip_revision,
1161 adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1162 adapter->params.pci.speed, adapter->params.pci.width);
1163 return 0;
1164
1165 out_release_adapter_res:
1166 t1_free_sw_modules(adapter);
1167 out_free_dev:
1168 if (adapter) {
1169 if (adapter->regs)
1170 iounmap(adapter->regs);
1171 for (i = bi->port_number - 1; i >= 0; --i)
1172 if (adapter->port[i].dev)
1173 free_netdev(adapter->port[i].dev);
1174 }
1175 pci_release_regions(pdev);
1176 out_disable_pdev:
1177 pci_disable_device(pdev);
1178 pci_set_drvdata(pdev, NULL);
1179 return err;
1180}
1181
1182static inline void t1_sw_reset(struct pci_dev *pdev)
1183{
1184 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1185 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1186}
1187
1188static void __devexit remove_one(struct pci_dev *pdev)
1189{
1190 struct net_device *dev = pci_get_drvdata(pdev);
1191
1192 if (dev) {
1193 int i;
1194 struct adapter *adapter = dev->priv;
1195
1196 for_each_port(adapter, i)
1197 if (test_bit(i, &adapter->registered_device_map))
1198 unregister_netdev(adapter->port[i].dev);
1199
1200 t1_free_sw_modules(adapter);
1201 iounmap(adapter->regs);
1202 while (--i >= 0)
1203 if (adapter->port[i].dev)
1204 free_netdev(adapter->port[i].dev);
1205 pci_release_regions(pdev);
1206 pci_disable_device(pdev);
1207 pci_set_drvdata(pdev, NULL);
1208 t1_sw_reset(pdev);
1209 }
1210}
1211
1212static struct pci_driver driver = {
1213 .name = driver_name,
1214 .id_table = t1_pci_tbl,
1215 .probe = init_one,
1216 .remove = __devexit_p(remove_one),
1217};
1218
1219static int __init t1_init_module(void)
1220{
1221 return pci_module_init(&driver);
1222}
1223
1224static void __exit t1_cleanup_module(void)
1225{
1226 pci_unregister_driver(&driver);
1227}
1228
1229module_init(t1_init_module);
1230module_exit(t1_cleanup_module);
1231