]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/net/sis190.c
[SK_BUFF]: Introduce skb_reset_mac_header(skb)
[mirror_ubuntu-zesty-kernel.git] / drivers / net / sis190.c
1 /*
2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
3
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
7
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
9 genuine driver.
10
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
17
18 See the file COPYING in this distribution for more information.
19
20 */
21
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/netdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/pci.h>
29 #include <linux/mii.h>
30 #include <linux/delay.h>
31 #include <linux/crc32.h>
32 #include <linux/dma-mapping.h>
33 #include <asm/irq.h>
34
35 #define net_drv(p, arg...) if (netif_msg_drv(p)) \
36 printk(arg)
37 #define net_probe(p, arg...) if (netif_msg_probe(p)) \
38 printk(arg)
39 #define net_link(p, arg...) if (netif_msg_link(p)) \
40 printk(arg)
41 #define net_intr(p, arg...) if (netif_msg_intr(p)) \
42 printk(arg)
43 #define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
44 printk(arg)
45
46 #define PHY_MAX_ADDR 32
47 #define PHY_ID_ANY 0x1f
48 #define MII_REG_ANY 0x1f
49
50 #ifdef CONFIG_SIS190_NAPI
51 #define NAPI_SUFFIX "-NAPI"
52 #else
53 #define NAPI_SUFFIX ""
54 #endif
55
56 #define DRV_VERSION "1.2" NAPI_SUFFIX
57 #define DRV_NAME "sis190"
58 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
59 #define PFX DRV_NAME ": "
60
61 #ifdef CONFIG_SIS190_NAPI
62 #define sis190_rx_skb netif_receive_skb
63 #define sis190_rx_quota(count, quota) min(count, quota)
64 #else
65 #define sis190_rx_skb netif_rx
66 #define sis190_rx_quota(count, quota) count
67 #endif
68
69 #define MAC_ADDR_LEN 6
70
71 #define NUM_TX_DESC 64 /* [8..1024] */
72 #define NUM_RX_DESC 64 /* [8..8192] */
73 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
74 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
75 #define RX_BUF_SIZE 1536
76 #define RX_BUF_MASK 0xfff8
77
78 #define SIS190_REGS_SIZE 0x80
79 #define SIS190_TX_TIMEOUT (6*HZ)
80 #define SIS190_PHY_TIMEOUT (10*HZ)
81 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
82 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
83 NETIF_MSG_IFDOWN)
84
85 /* Enhanced PHY access register bit definitions */
86 #define EhnMIIread 0x0000
87 #define EhnMIIwrite 0x0020
88 #define EhnMIIdataShift 16
89 #define EhnMIIpmdShift 6 /* 7016 only */
90 #define EhnMIIregShift 11
91 #define EhnMIIreq 0x0010
92 #define EhnMIInotDone 0x0010
93
94 /* Write/read MMIO register */
95 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
96 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
97 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
98 #define SIS_R8(reg) readb (ioaddr + (reg))
99 #define SIS_R16(reg) readw (ioaddr + (reg))
100 #define SIS_R32(reg) readl (ioaddr + (reg))
101
102 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
103
104 enum sis190_registers {
105 TxControl = 0x00,
106 TxDescStartAddr = 0x04,
107 rsv0 = 0x08, // reserved
108 TxSts = 0x0c, // unused (Control/Status)
109 RxControl = 0x10,
110 RxDescStartAddr = 0x14,
111 rsv1 = 0x18, // reserved
112 RxSts = 0x1c, // unused
113 IntrStatus = 0x20,
114 IntrMask = 0x24,
115 IntrControl = 0x28,
116 IntrTimer = 0x2c, // unused (Interupt Timer)
117 PMControl = 0x30, // unused (Power Mgmt Control/Status)
118 rsv2 = 0x34, // reserved
119 ROMControl = 0x38,
120 ROMInterface = 0x3c,
121 StationControl = 0x40,
122 GMIIControl = 0x44,
123 GIoCR = 0x48, // unused (GMAC IO Compensation)
124 GIoCtrl = 0x4c, // unused (GMAC IO Control)
125 TxMacControl = 0x50,
126 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
127 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
128 rsv3 = 0x5c, // reserved
129 RxMacControl = 0x60,
130 RxMacAddr = 0x62,
131 RxHashTable = 0x68,
132 // Undocumented = 0x6c,
133 RxWolCtrl = 0x70,
134 RxWolData = 0x74, // unused (Rx WOL Data Access)
135 RxMPSControl = 0x78, // unused (Rx MPS Control)
136 rsv4 = 0x7c, // reserved
137 };
138
139 enum sis190_register_content {
140 /* IntrStatus */
141 SoftInt = 0x40000000, // unused
142 Timeup = 0x20000000, // unused
143 PauseFrame = 0x00080000, // unused
144 MagicPacket = 0x00040000, // unused
145 WakeupFrame = 0x00020000, // unused
146 LinkChange = 0x00010000,
147 RxQEmpty = 0x00000080,
148 RxQInt = 0x00000040,
149 TxQ1Empty = 0x00000020, // unused
150 TxQ1Int = 0x00000010,
151 TxQ0Empty = 0x00000008, // unused
152 TxQ0Int = 0x00000004,
153 RxHalt = 0x00000002,
154 TxHalt = 0x00000001,
155
156 /* {Rx/Tx}CmdBits */
157 CmdReset = 0x10,
158 CmdRxEnb = 0x08, // unused
159 CmdTxEnb = 0x01,
160 RxBufEmpty = 0x01, // unused
161
162 /* Cfg9346Bits */
163 Cfg9346_Lock = 0x00, // unused
164 Cfg9346_Unlock = 0xc0, // unused
165
166 /* RxMacControl */
167 AcceptErr = 0x20, // unused
168 AcceptRunt = 0x10, // unused
169 AcceptBroadcast = 0x0800,
170 AcceptMulticast = 0x0400,
171 AcceptMyPhys = 0x0200,
172 AcceptAllPhys = 0x0100,
173
174 /* RxConfigBits */
175 RxCfgFIFOShift = 13,
176 RxCfgDMAShift = 8, // 0x1a in RxControl ?
177
178 /* TxConfigBits */
179 TxInterFrameGapShift = 24,
180 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
181
182 LinkStatus = 0x02, // unused
183 FullDup = 0x01, // unused
184
185 /* TBICSRBit */
186 TBILinkOK = 0x02000000, // unused
187 };
188
189 struct TxDesc {
190 __le32 PSize;
191 __le32 status;
192 __le32 addr;
193 __le32 size;
194 };
195
196 struct RxDesc {
197 __le32 PSize;
198 __le32 status;
199 __le32 addr;
200 __le32 size;
201 };
202
203 enum _DescStatusBit {
204 /* _Desc.status */
205 OWNbit = 0x80000000, // RXOWN/TXOWN
206 INTbit = 0x40000000, // RXINT/TXINT
207 CRCbit = 0x00020000, // CRCOFF/CRCEN
208 PADbit = 0x00010000, // PREADD/PADEN
209 /* _Desc.size */
210 RingEnd = 0x80000000,
211 /* TxDesc.status */
212 LSEN = 0x08000000, // TSO ? -- FR
213 IPCS = 0x04000000,
214 TCPCS = 0x02000000,
215 UDPCS = 0x01000000,
216 BSTEN = 0x00800000,
217 EXTEN = 0x00400000,
218 DEFEN = 0x00200000,
219 BKFEN = 0x00100000,
220 CRSEN = 0x00080000,
221 COLEN = 0x00040000,
222 THOL3 = 0x30000000,
223 THOL2 = 0x20000000,
224 THOL1 = 0x10000000,
225 THOL0 = 0x00000000,
226 /* RxDesc.status */
227 IPON = 0x20000000,
228 TCPON = 0x10000000,
229 UDPON = 0x08000000,
230 Wakup = 0x00400000,
231 Magic = 0x00200000,
232 Pause = 0x00100000,
233 DEFbit = 0x00200000,
234 BCAST = 0x000c0000,
235 MCAST = 0x00080000,
236 UCAST = 0x00040000,
237 /* RxDesc.PSize */
238 TAGON = 0x80000000,
239 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
240 ABORT = 0x00800000,
241 SHORT = 0x00400000,
242 LIMIT = 0x00200000,
243 MIIER = 0x00100000,
244 OVRUN = 0x00080000,
245 NIBON = 0x00040000,
246 COLON = 0x00020000,
247 CRCOK = 0x00010000,
248 RxSizeMask = 0x0000ffff
249 /*
250 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
251 * provide two (unused with Linux) Tx queues. No publically
252 * available documentation alas.
253 */
254 };
255
256 enum sis190_eeprom_access_register_bits {
257 EECS = 0x00000001, // unused
258 EECLK = 0x00000002, // unused
259 EEDO = 0x00000008, // unused
260 EEDI = 0x00000004, // unused
261 EEREQ = 0x00000080,
262 EEROP = 0x00000200,
263 EEWOP = 0x00000100 // unused
264 };
265
266 /* EEPROM Addresses */
267 enum sis190_eeprom_address {
268 EEPROMSignature = 0x00,
269 EEPROMCLK = 0x01, // unused
270 EEPROMInfo = 0x02,
271 EEPROMMACAddr = 0x03
272 };
273
274 enum sis190_feature {
275 F_HAS_RGMII = 1,
276 F_PHY_88E1111 = 2,
277 F_PHY_BCM5461 = 4
278 };
279
280 struct sis190_private {
281 void __iomem *mmio_addr;
282 struct pci_dev *pci_dev;
283 struct net_device *dev;
284 struct net_device_stats stats;
285 spinlock_t lock;
286 u32 rx_buf_sz;
287 u32 cur_rx;
288 u32 cur_tx;
289 u32 dirty_rx;
290 u32 dirty_tx;
291 dma_addr_t rx_dma;
292 dma_addr_t tx_dma;
293 struct RxDesc *RxDescRing;
294 struct TxDesc *TxDescRing;
295 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
296 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
297 struct work_struct phy_task;
298 struct timer_list timer;
299 u32 msg_enable;
300 struct mii_if_info mii_if;
301 struct list_head first_phy;
302 u32 features;
303 };
304
305 struct sis190_phy {
306 struct list_head list;
307 int phy_id;
308 u16 id[2];
309 u16 status;
310 u8 type;
311 };
312
313 enum sis190_phy_type {
314 UNKNOWN = 0x00,
315 HOME = 0x01,
316 LAN = 0x02,
317 MIX = 0x03
318 };
319
320 static struct mii_chip_info {
321 const char *name;
322 u16 id[2];
323 unsigned int type;
324 u32 feature;
325 } mii_chip_table[] = {
326 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
327 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
328 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
329 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
330 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
331 { NULL, }
332 };
333
334 static const struct {
335 const char *name;
336 } sis_chip_info[] = {
337 { "SiS 190 PCI Fast Ethernet adapter" },
338 { "SiS 191 PCI Gigabit Ethernet adapter" },
339 };
340
341 static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
342 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
343 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
344 { 0, },
345 };
346
347 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
348
349 static int rx_copybreak = 200;
350
351 static struct {
352 u32 msg_enable;
353 } debug = { -1 };
354
355 MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
356 module_param(rx_copybreak, int, 0);
357 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
358 module_param_named(debug, debug.msg_enable, int, 0);
359 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
360 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
361 MODULE_VERSION(DRV_VERSION);
362 MODULE_LICENSE("GPL");
363
364 static const u32 sis190_intr_mask =
365 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
366
367 /*
368 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
369 * The chips use a 64 element hash table based on the Ethernet CRC.
370 */
371 static const int multicast_filter_limit = 32;
372
373 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
374 {
375 unsigned int i;
376
377 SIS_W32(GMIIControl, ctl);
378
379 msleep(1);
380
381 for (i = 0; i < 100; i++) {
382 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
383 break;
384 msleep(1);
385 }
386
387 if (i > 999)
388 printk(KERN_ERR PFX "PHY command failed !\n");
389 }
390
391 static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
392 {
393 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
394 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
395 (((u32) val) << EhnMIIdataShift));
396 }
397
398 static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
399 {
400 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
401 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
402
403 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
404 }
405
406 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
407 {
408 struct sis190_private *tp = netdev_priv(dev);
409
410 mdio_write(tp->mmio_addr, phy_id, reg, val);
411 }
412
413 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
414 {
415 struct sis190_private *tp = netdev_priv(dev);
416
417 return mdio_read(tp->mmio_addr, phy_id, reg);
418 }
419
420 static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
421 {
422 mdio_read(ioaddr, phy_id, reg);
423 return mdio_read(ioaddr, phy_id, reg);
424 }
425
426 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
427 {
428 u16 data = 0xffff;
429 unsigned int i;
430
431 if (!(SIS_R32(ROMControl) & 0x0002))
432 return 0;
433
434 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
435
436 for (i = 0; i < 200; i++) {
437 if (!(SIS_R32(ROMInterface) & EEREQ)) {
438 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
439 break;
440 }
441 msleep(1);
442 }
443
444 return data;
445 }
446
447 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
448 {
449 SIS_W32(IntrMask, 0x00);
450 SIS_W32(IntrStatus, 0xffffffff);
451 SIS_PCI_COMMIT();
452 }
453
454 static void sis190_asic_down(void __iomem *ioaddr)
455 {
456 /* Stop the chip's Tx and Rx DMA processes. */
457
458 SIS_W32(TxControl, 0x1a00);
459 SIS_W32(RxControl, 0x1a00);
460
461 sis190_irq_mask_and_ack(ioaddr);
462 }
463
464 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
465 {
466 desc->size |= cpu_to_le32(RingEnd);
467 }
468
469 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
470 {
471 u32 eor = le32_to_cpu(desc->size) & RingEnd;
472
473 desc->PSize = 0x0;
474 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
475 wmb();
476 desc->status = cpu_to_le32(OWNbit | INTbit);
477 }
478
479 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
480 u32 rx_buf_sz)
481 {
482 desc->addr = cpu_to_le32(mapping);
483 sis190_give_to_asic(desc, rx_buf_sz);
484 }
485
486 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
487 {
488 desc->PSize = 0x0;
489 desc->addr = 0xdeadbeef;
490 desc->size &= cpu_to_le32(RingEnd);
491 wmb();
492 desc->status = 0x0;
493 }
494
495 static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
496 struct RxDesc *desc, u32 rx_buf_sz)
497 {
498 struct sk_buff *skb;
499 dma_addr_t mapping;
500 int ret = 0;
501
502 skb = dev_alloc_skb(rx_buf_sz);
503 if (!skb)
504 goto err_out;
505
506 *sk_buff = skb;
507
508 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
509 PCI_DMA_FROMDEVICE);
510
511 sis190_map_to_asic(desc, mapping, rx_buf_sz);
512 out:
513 return ret;
514
515 err_out:
516 ret = -ENOMEM;
517 sis190_make_unusable_by_asic(desc);
518 goto out;
519 }
520
521 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
522 u32 start, u32 end)
523 {
524 u32 cur;
525
526 for (cur = start; cur < end; cur++) {
527 int ret, i = cur % NUM_RX_DESC;
528
529 if (tp->Rx_skbuff[i])
530 continue;
531
532 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
533 tp->RxDescRing + i, tp->rx_buf_sz);
534 if (ret < 0)
535 break;
536 }
537 return cur - start;
538 }
539
540 static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
541 struct RxDesc *desc, int rx_buf_sz)
542 {
543 int ret = -1;
544
545 if (pkt_size < rx_copybreak) {
546 struct sk_buff *skb;
547
548 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
549 if (skb) {
550 skb_reserve(skb, NET_IP_ALIGN);
551 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
552 *sk_buff = skb;
553 sis190_give_to_asic(desc, rx_buf_sz);
554 ret = 0;
555 }
556 }
557 return ret;
558 }
559
560 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
561 {
562 #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
563
564 if ((status & CRCOK) && !(status & ErrMask))
565 return 0;
566
567 if (!(status & CRCOK))
568 stats->rx_crc_errors++;
569 else if (status & OVRUN)
570 stats->rx_over_errors++;
571 else if (status & (SHORT | LIMIT))
572 stats->rx_length_errors++;
573 else if (status & (MIIER | NIBON | COLON))
574 stats->rx_frame_errors++;
575
576 stats->rx_errors++;
577 return -1;
578 }
579
580 static int sis190_rx_interrupt(struct net_device *dev,
581 struct sis190_private *tp, void __iomem *ioaddr)
582 {
583 struct net_device_stats *stats = &tp->stats;
584 u32 rx_left, cur_rx = tp->cur_rx;
585 u32 delta, count;
586
587 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
588 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
589
590 for (; rx_left > 0; rx_left--, cur_rx++) {
591 unsigned int entry = cur_rx % NUM_RX_DESC;
592 struct RxDesc *desc = tp->RxDescRing + entry;
593 u32 status;
594
595 if (desc->status & OWNbit)
596 break;
597
598 status = le32_to_cpu(desc->PSize);
599
600 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
601 // status);
602
603 if (sis190_rx_pkt_err(status, stats) < 0)
604 sis190_give_to_asic(desc, tp->rx_buf_sz);
605 else {
606 struct sk_buff *skb = tp->Rx_skbuff[entry];
607 int pkt_size = (status & RxSizeMask) - 4;
608 void (*pci_action)(struct pci_dev *, dma_addr_t,
609 size_t, int) = pci_dma_sync_single_for_device;
610
611 if (unlikely(pkt_size > tp->rx_buf_sz)) {
612 net_intr(tp, KERN_INFO
613 "%s: (frag) status = %08x.\n",
614 dev->name, status);
615 stats->rx_dropped++;
616 stats->rx_length_errors++;
617 sis190_give_to_asic(desc, tp->rx_buf_sz);
618 continue;
619 }
620
621 pci_dma_sync_single_for_cpu(tp->pci_dev,
622 le32_to_cpu(desc->addr), tp->rx_buf_sz,
623 PCI_DMA_FROMDEVICE);
624
625 if (sis190_try_rx_copy(&skb, pkt_size, desc,
626 tp->rx_buf_sz)) {
627 pci_action = pci_unmap_single;
628 tp->Rx_skbuff[entry] = NULL;
629 sis190_make_unusable_by_asic(desc);
630 }
631
632 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
633 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
634
635 skb_put(skb, pkt_size);
636 skb->protocol = eth_type_trans(skb, dev);
637
638 sis190_rx_skb(skb);
639
640 dev->last_rx = jiffies;
641 stats->rx_packets++;
642 stats->rx_bytes += pkt_size;
643 if ((status & BCAST) == MCAST)
644 stats->multicast++;
645 }
646 }
647 count = cur_rx - tp->cur_rx;
648 tp->cur_rx = cur_rx;
649
650 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
651 if (!delta && count && netif_msg_intr(tp))
652 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
653 tp->dirty_rx += delta;
654
655 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
656 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
657
658 return count;
659 }
660
661 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
662 struct TxDesc *desc)
663 {
664 unsigned int len;
665
666 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
667
668 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
669
670 memset(desc, 0x00, sizeof(*desc));
671 }
672
673 static void sis190_tx_interrupt(struct net_device *dev,
674 struct sis190_private *tp, void __iomem *ioaddr)
675 {
676 u32 pending, dirty_tx = tp->dirty_tx;
677 /*
678 * It would not be needed if queueing was allowed to be enabled
679 * again too early (hint: think preempt and unclocked smp systems).
680 */
681 unsigned int queue_stopped;
682
683 smp_rmb();
684 pending = tp->cur_tx - dirty_tx;
685 queue_stopped = (pending == NUM_TX_DESC);
686
687 for (; pending; pending--, dirty_tx++) {
688 unsigned int entry = dirty_tx % NUM_TX_DESC;
689 struct TxDesc *txd = tp->TxDescRing + entry;
690 struct sk_buff *skb;
691
692 if (le32_to_cpu(txd->status) & OWNbit)
693 break;
694
695 skb = tp->Tx_skbuff[entry];
696
697 tp->stats.tx_packets++;
698 tp->stats.tx_bytes += skb->len;
699
700 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
701 tp->Tx_skbuff[entry] = NULL;
702 dev_kfree_skb_irq(skb);
703 }
704
705 if (tp->dirty_tx != dirty_tx) {
706 tp->dirty_tx = dirty_tx;
707 smp_wmb();
708 if (queue_stopped)
709 netif_wake_queue(dev);
710 }
711 }
712
713 /*
714 * The interrupt handler does all of the Rx thread work and cleans up after
715 * the Tx thread.
716 */
717 static irqreturn_t sis190_interrupt(int irq, void *__dev)
718 {
719 struct net_device *dev = __dev;
720 struct sis190_private *tp = netdev_priv(dev);
721 void __iomem *ioaddr = tp->mmio_addr;
722 unsigned int handled = 0;
723 u32 status;
724
725 status = SIS_R32(IntrStatus);
726
727 if ((status == 0xffffffff) || !status)
728 goto out;
729
730 handled = 1;
731
732 if (unlikely(!netif_running(dev))) {
733 sis190_asic_down(ioaddr);
734 goto out;
735 }
736
737 SIS_W32(IntrStatus, status);
738
739 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
740
741 if (status & LinkChange) {
742 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
743 schedule_work(&tp->phy_task);
744 }
745
746 if (status & RxQInt)
747 sis190_rx_interrupt(dev, tp, ioaddr);
748
749 if (status & TxQ0Int)
750 sis190_tx_interrupt(dev, tp, ioaddr);
751 out:
752 return IRQ_RETVAL(handled);
753 }
754
755 #ifdef CONFIG_NET_POLL_CONTROLLER
756 static void sis190_netpoll(struct net_device *dev)
757 {
758 struct sis190_private *tp = netdev_priv(dev);
759 struct pci_dev *pdev = tp->pci_dev;
760
761 disable_irq(pdev->irq);
762 sis190_interrupt(pdev->irq, dev);
763 enable_irq(pdev->irq);
764 }
765 #endif
766
767 static void sis190_free_rx_skb(struct sis190_private *tp,
768 struct sk_buff **sk_buff, struct RxDesc *desc)
769 {
770 struct pci_dev *pdev = tp->pci_dev;
771
772 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
773 PCI_DMA_FROMDEVICE);
774 dev_kfree_skb(*sk_buff);
775 *sk_buff = NULL;
776 sis190_make_unusable_by_asic(desc);
777 }
778
779 static void sis190_rx_clear(struct sis190_private *tp)
780 {
781 unsigned int i;
782
783 for (i = 0; i < NUM_RX_DESC; i++) {
784 if (!tp->Rx_skbuff[i])
785 continue;
786 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
787 }
788 }
789
790 static void sis190_init_ring_indexes(struct sis190_private *tp)
791 {
792 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
793 }
794
795 static int sis190_init_ring(struct net_device *dev)
796 {
797 struct sis190_private *tp = netdev_priv(dev);
798
799 sis190_init_ring_indexes(tp);
800
801 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
802 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
803
804 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
805 goto err_rx_clear;
806
807 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
808
809 return 0;
810
811 err_rx_clear:
812 sis190_rx_clear(tp);
813 return -ENOMEM;
814 }
815
816 static void sis190_set_rx_mode(struct net_device *dev)
817 {
818 struct sis190_private *tp = netdev_priv(dev);
819 void __iomem *ioaddr = tp->mmio_addr;
820 unsigned long flags;
821 u32 mc_filter[2]; /* Multicast hash filter */
822 u16 rx_mode;
823
824 if (dev->flags & IFF_PROMISC) {
825 rx_mode =
826 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
827 AcceptAllPhys;
828 mc_filter[1] = mc_filter[0] = 0xffffffff;
829 } else if ((dev->mc_count > multicast_filter_limit) ||
830 (dev->flags & IFF_ALLMULTI)) {
831 /* Too many to filter perfectly -- accept all multicasts. */
832 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
833 mc_filter[1] = mc_filter[0] = 0xffffffff;
834 } else {
835 struct dev_mc_list *mclist;
836 unsigned int i;
837
838 rx_mode = AcceptBroadcast | AcceptMyPhys;
839 mc_filter[1] = mc_filter[0] = 0;
840 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
841 i++, mclist = mclist->next) {
842 int bit_nr =
843 ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
844 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
845 rx_mode |= AcceptMulticast;
846 }
847 }
848
849 spin_lock_irqsave(&tp->lock, flags);
850
851 SIS_W16(RxMacControl, rx_mode | 0x2);
852 SIS_W32(RxHashTable, mc_filter[0]);
853 SIS_W32(RxHashTable + 4, mc_filter[1]);
854
855 spin_unlock_irqrestore(&tp->lock, flags);
856 }
857
858 static void sis190_soft_reset(void __iomem *ioaddr)
859 {
860 SIS_W32(IntrControl, 0x8000);
861 SIS_PCI_COMMIT();
862 msleep(1);
863 SIS_W32(IntrControl, 0x0);
864 sis190_asic_down(ioaddr);
865 msleep(1);
866 }
867
868 static void sis190_hw_start(struct net_device *dev)
869 {
870 struct sis190_private *tp = netdev_priv(dev);
871 void __iomem *ioaddr = tp->mmio_addr;
872
873 sis190_soft_reset(ioaddr);
874
875 SIS_W32(TxDescStartAddr, tp->tx_dma);
876 SIS_W32(RxDescStartAddr, tp->rx_dma);
877
878 SIS_W32(IntrStatus, 0xffffffff);
879 SIS_W32(IntrMask, 0x0);
880 SIS_W32(GMIIControl, 0x0);
881 SIS_W32(TxMacControl, 0x60);
882 SIS_W16(RxMacControl, 0x02);
883 SIS_W32(RxHashTable, 0x0);
884 SIS_W32(0x6c, 0x0);
885 SIS_W32(RxWolCtrl, 0x0);
886 SIS_W32(RxWolData, 0x0);
887
888 SIS_PCI_COMMIT();
889
890 sis190_set_rx_mode(dev);
891
892 /* Enable all known interrupts by setting the interrupt mask. */
893 SIS_W32(IntrMask, sis190_intr_mask);
894
895 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
896 SIS_W32(RxControl, 0x1a1d);
897
898 netif_start_queue(dev);
899 }
900
901 static void sis190_phy_task(struct work_struct *work)
902 {
903 struct sis190_private *tp =
904 container_of(work, struct sis190_private, phy_task);
905 struct net_device *dev = tp->dev;
906 void __iomem *ioaddr = tp->mmio_addr;
907 int phy_id = tp->mii_if.phy_id;
908 u16 val;
909
910 rtnl_lock();
911
912 if (!netif_running(dev))
913 goto out_unlock;
914
915 val = mdio_read(ioaddr, phy_id, MII_BMCR);
916 if (val & BMCR_RESET) {
917 // FIXME: needlessly high ? -- FR 02/07/2005
918 mod_timer(&tp->timer, jiffies + HZ/10);
919 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
920 BMSR_ANEGCOMPLETE)) {
921 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
922 dev->name);
923 netif_carrier_off(dev);
924 mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET);
925 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
926 } else {
927 /* Rejoice ! */
928 struct {
929 int val;
930 u32 ctl;
931 const char *msg;
932 } reg31[] = {
933 { LPA_1000XFULL | LPA_SLCT, 0x07000c00 | 0x00001000,
934 "1000 Mbps Full Duplex" },
935 { LPA_1000XHALF | LPA_SLCT, 0x07000c00,
936 "1000 Mbps Half Duplex" },
937 { LPA_100FULL, 0x04000800 | 0x00001000,
938 "100 Mbps Full Duplex" },
939 { LPA_100HALF, 0x04000800,
940 "100 Mbps Half Duplex" },
941 { LPA_10FULL, 0x04000400 | 0x00001000,
942 "10 Mbps Full Duplex" },
943 { LPA_10HALF, 0x04000400,
944 "10 Mbps Half Duplex" },
945 { 0, 0x04000400, "unknown" }
946 }, *p;
947 u16 adv;
948
949 val = mdio_read(ioaddr, phy_id, 0x1f);
950 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
951
952 val = mdio_read(ioaddr, phy_id, MII_LPA);
953 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
954 net_link(tp, KERN_INFO "%s: mii lpa = %04x adv = %04x.\n",
955 dev->name, val, adv);
956
957 val &= adv;
958
959 for (p = reg31; p->val; p++) {
960 if ((val & p->val) == p->val)
961 break;
962 }
963
964 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
965
966 if ((tp->features & F_HAS_RGMII) &&
967 (tp->features & F_PHY_BCM5461)) {
968 // Set Tx Delay in RGMII mode.
969 mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
970 udelay(200);
971 mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
972 p->ctl |= 0x03000000;
973 }
974
975 SIS_W32(StationControl, p->ctl);
976
977 if (tp->features & F_HAS_RGMII) {
978 SIS_W32(RGDelay, 0x0441);
979 SIS_W32(RGDelay, 0x0440);
980 }
981
982 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
983 p->msg);
984 netif_carrier_on(dev);
985 }
986
987 out_unlock:
988 rtnl_unlock();
989 }
990
991 static void sis190_phy_timer(unsigned long __opaque)
992 {
993 struct net_device *dev = (struct net_device *)__opaque;
994 struct sis190_private *tp = netdev_priv(dev);
995
996 if (likely(netif_running(dev)))
997 schedule_work(&tp->phy_task);
998 }
999
1000 static inline void sis190_delete_timer(struct net_device *dev)
1001 {
1002 struct sis190_private *tp = netdev_priv(dev);
1003
1004 del_timer_sync(&tp->timer);
1005 }
1006
1007 static inline void sis190_request_timer(struct net_device *dev)
1008 {
1009 struct sis190_private *tp = netdev_priv(dev);
1010 struct timer_list *timer = &tp->timer;
1011
1012 init_timer(timer);
1013 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1014 timer->data = (unsigned long)dev;
1015 timer->function = sis190_phy_timer;
1016 add_timer(timer);
1017 }
1018
1019 static void sis190_set_rxbufsize(struct sis190_private *tp,
1020 struct net_device *dev)
1021 {
1022 unsigned int mtu = dev->mtu;
1023
1024 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1025 /* RxDesc->size has a licence to kill the lower bits */
1026 if (tp->rx_buf_sz & 0x07) {
1027 tp->rx_buf_sz += 8;
1028 tp->rx_buf_sz &= RX_BUF_MASK;
1029 }
1030 }
1031
1032 static int sis190_open(struct net_device *dev)
1033 {
1034 struct sis190_private *tp = netdev_priv(dev);
1035 struct pci_dev *pdev = tp->pci_dev;
1036 int rc = -ENOMEM;
1037
1038 sis190_set_rxbufsize(tp, dev);
1039
1040 /*
1041 * Rx and Tx descriptors need 256 bytes alignment.
1042 * pci_alloc_consistent() guarantees a stronger alignment.
1043 */
1044 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1045 if (!tp->TxDescRing)
1046 goto out;
1047
1048 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1049 if (!tp->RxDescRing)
1050 goto err_free_tx_0;
1051
1052 rc = sis190_init_ring(dev);
1053 if (rc < 0)
1054 goto err_free_rx_1;
1055
1056 INIT_WORK(&tp->phy_task, sis190_phy_task);
1057
1058 sis190_request_timer(dev);
1059
1060 rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
1061 if (rc < 0)
1062 goto err_release_timer_2;
1063
1064 sis190_hw_start(dev);
1065 out:
1066 return rc;
1067
1068 err_release_timer_2:
1069 sis190_delete_timer(dev);
1070 sis190_rx_clear(tp);
1071 err_free_rx_1:
1072 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1073 tp->rx_dma);
1074 err_free_tx_0:
1075 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1076 tp->tx_dma);
1077 goto out;
1078 }
1079
1080 static void sis190_tx_clear(struct sis190_private *tp)
1081 {
1082 unsigned int i;
1083
1084 for (i = 0; i < NUM_TX_DESC; i++) {
1085 struct sk_buff *skb = tp->Tx_skbuff[i];
1086
1087 if (!skb)
1088 continue;
1089
1090 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1091 tp->Tx_skbuff[i] = NULL;
1092 dev_kfree_skb(skb);
1093
1094 tp->stats.tx_dropped++;
1095 }
1096 tp->cur_tx = tp->dirty_tx = 0;
1097 }
1098
1099 static void sis190_down(struct net_device *dev)
1100 {
1101 struct sis190_private *tp = netdev_priv(dev);
1102 void __iomem *ioaddr = tp->mmio_addr;
1103 unsigned int poll_locked = 0;
1104
1105 sis190_delete_timer(dev);
1106
1107 netif_stop_queue(dev);
1108
1109 do {
1110 spin_lock_irq(&tp->lock);
1111
1112 sis190_asic_down(ioaddr);
1113
1114 spin_unlock_irq(&tp->lock);
1115
1116 synchronize_irq(dev->irq);
1117
1118 if (!poll_locked) {
1119 netif_poll_disable(dev);
1120 poll_locked++;
1121 }
1122
1123 synchronize_sched();
1124
1125 } while (SIS_R32(IntrMask));
1126
1127 sis190_tx_clear(tp);
1128 sis190_rx_clear(tp);
1129 }
1130
1131 static int sis190_close(struct net_device *dev)
1132 {
1133 struct sis190_private *tp = netdev_priv(dev);
1134 struct pci_dev *pdev = tp->pci_dev;
1135
1136 sis190_down(dev);
1137
1138 free_irq(dev->irq, dev);
1139
1140 netif_poll_enable(dev);
1141
1142 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1143 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1144
1145 tp->TxDescRing = NULL;
1146 tp->RxDescRing = NULL;
1147
1148 return 0;
1149 }
1150
1151 static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1152 {
1153 struct sis190_private *tp = netdev_priv(dev);
1154 void __iomem *ioaddr = tp->mmio_addr;
1155 u32 len, entry, dirty_tx;
1156 struct TxDesc *desc;
1157 dma_addr_t mapping;
1158
1159 if (unlikely(skb->len < ETH_ZLEN)) {
1160 if (skb_padto(skb, ETH_ZLEN)) {
1161 tp->stats.tx_dropped++;
1162 goto out;
1163 }
1164 len = ETH_ZLEN;
1165 } else {
1166 len = skb->len;
1167 }
1168
1169 entry = tp->cur_tx % NUM_TX_DESC;
1170 desc = tp->TxDescRing + entry;
1171
1172 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1173 netif_stop_queue(dev);
1174 net_tx_err(tp, KERN_ERR PFX
1175 "%s: BUG! Tx Ring full when queue awake!\n",
1176 dev->name);
1177 return NETDEV_TX_BUSY;
1178 }
1179
1180 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1181
1182 tp->Tx_skbuff[entry] = skb;
1183
1184 desc->PSize = cpu_to_le32(len);
1185 desc->addr = cpu_to_le32(mapping);
1186
1187 desc->size = cpu_to_le32(len);
1188 if (entry == (NUM_TX_DESC - 1))
1189 desc->size |= cpu_to_le32(RingEnd);
1190
1191 wmb();
1192
1193 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1194
1195 tp->cur_tx++;
1196
1197 smp_wmb();
1198
1199 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1200
1201 dev->trans_start = jiffies;
1202
1203 dirty_tx = tp->dirty_tx;
1204 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1205 netif_stop_queue(dev);
1206 smp_rmb();
1207 if (dirty_tx != tp->dirty_tx)
1208 netif_wake_queue(dev);
1209 }
1210 out:
1211 return NETDEV_TX_OK;
1212 }
1213
1214 static struct net_device_stats *sis190_get_stats(struct net_device *dev)
1215 {
1216 struct sis190_private *tp = netdev_priv(dev);
1217
1218 return &tp->stats;
1219 }
1220
1221 static void sis190_free_phy(struct list_head *first_phy)
1222 {
1223 struct sis190_phy *cur, *next;
1224
1225 list_for_each_entry_safe(cur, next, first_phy, list) {
1226 kfree(cur);
1227 }
1228 }
1229
1230 /**
1231 * sis190_default_phy - Select default PHY for sis190 mac.
1232 * @dev: the net device to probe for
1233 *
1234 * Select first detected PHY with link as default.
1235 * If no one is link on, select PHY whose types is HOME as default.
1236 * If HOME doesn't exist, select LAN.
1237 */
1238 static u16 sis190_default_phy(struct net_device *dev)
1239 {
1240 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1241 struct sis190_private *tp = netdev_priv(dev);
1242 struct mii_if_info *mii_if = &tp->mii_if;
1243 void __iomem *ioaddr = tp->mmio_addr;
1244 u16 status;
1245
1246 phy_home = phy_default = phy_lan = NULL;
1247
1248 list_for_each_entry(phy, &tp->first_phy, list) {
1249 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1250
1251 // Link ON & Not select default PHY & not ghost PHY.
1252 if ((status & BMSR_LSTATUS) &&
1253 !phy_default &&
1254 (phy->type != UNKNOWN)) {
1255 phy_default = phy;
1256 } else {
1257 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1258 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1259 status | BMCR_ANENABLE | BMCR_ISOLATE);
1260 if (phy->type == HOME)
1261 phy_home = phy;
1262 else if (phy->type == LAN)
1263 phy_lan = phy;
1264 }
1265 }
1266
1267 if (!phy_default) {
1268 if (phy_home)
1269 phy_default = phy_home;
1270 else if (phy_lan)
1271 phy_default = phy_lan;
1272 else
1273 phy_default = list_entry(&tp->first_phy,
1274 struct sis190_phy, list);
1275 }
1276
1277 if (mii_if->phy_id != phy_default->phy_id) {
1278 mii_if->phy_id = phy_default->phy_id;
1279 net_probe(tp, KERN_INFO
1280 "%s: Using transceiver at address %d as default.\n",
1281 pci_name(tp->pci_dev), mii_if->phy_id);
1282 }
1283
1284 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1285 status &= (~BMCR_ISOLATE);
1286
1287 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1288 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1289
1290 return status;
1291 }
1292
1293 static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1294 struct sis190_phy *phy, unsigned int phy_id,
1295 u16 mii_status)
1296 {
1297 void __iomem *ioaddr = tp->mmio_addr;
1298 struct mii_chip_info *p;
1299
1300 INIT_LIST_HEAD(&phy->list);
1301 phy->status = mii_status;
1302 phy->phy_id = phy_id;
1303
1304 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1305 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1306
1307 for (p = mii_chip_table; p->type; p++) {
1308 if ((p->id[0] == phy->id[0]) &&
1309 (p->id[1] == (phy->id[1] & 0xfff0))) {
1310 break;
1311 }
1312 }
1313
1314 if (p->id[1]) {
1315 phy->type = (p->type == MIX) ?
1316 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1317 LAN : HOME) : p->type;
1318 tp->features |= p->feature;
1319 } else
1320 phy->type = UNKNOWN;
1321
1322 net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
1323 pci_name(tp->pci_dev),
1324 (phy->type == UNKNOWN) ? "Unknown PHY" : p->name, phy_id);
1325 }
1326
1327 static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1328 {
1329 if (tp->features & F_PHY_88E1111) {
1330 void __iomem *ioaddr = tp->mmio_addr;
1331 int phy_id = tp->mii_if.phy_id;
1332 u16 reg[2][2] = {
1333 { 0x808b, 0x0ce1 },
1334 { 0x808f, 0x0c60 }
1335 }, *p;
1336
1337 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1338
1339 mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1340 udelay(200);
1341 mdio_write(ioaddr, phy_id, 0x14, p[1]);
1342 udelay(200);
1343 }
1344 }
1345
1346 /**
1347 * sis190_mii_probe - Probe MII PHY for sis190
1348 * @dev: the net device to probe for
1349 *
1350 * Search for total of 32 possible mii phy addresses.
1351 * Identify and set current phy if found one,
1352 * return error if it failed to found.
1353 */
1354 static int __devinit sis190_mii_probe(struct net_device *dev)
1355 {
1356 struct sis190_private *tp = netdev_priv(dev);
1357 struct mii_if_info *mii_if = &tp->mii_if;
1358 void __iomem *ioaddr = tp->mmio_addr;
1359 int phy_id;
1360 int rc = 0;
1361
1362 INIT_LIST_HEAD(&tp->first_phy);
1363
1364 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1365 struct sis190_phy *phy;
1366 u16 status;
1367
1368 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1369
1370 // Try next mii if the current one is not accessible.
1371 if (status == 0xffff || status == 0x0000)
1372 continue;
1373
1374 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1375 if (!phy) {
1376 sis190_free_phy(&tp->first_phy);
1377 rc = -ENOMEM;
1378 goto out;
1379 }
1380
1381 sis190_init_phy(dev, tp, phy, phy_id, status);
1382
1383 list_add(&tp->first_phy, &phy->list);
1384 }
1385
1386 if (list_empty(&tp->first_phy)) {
1387 net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
1388 pci_name(tp->pci_dev));
1389 rc = -EIO;
1390 goto out;
1391 }
1392
1393 /* Select default PHY for mac */
1394 sis190_default_phy(dev);
1395
1396 sis190_mii_probe_88e1111_fixup(tp);
1397
1398 mii_if->dev = dev;
1399 mii_if->mdio_read = __mdio_read;
1400 mii_if->mdio_write = __mdio_write;
1401 mii_if->phy_id_mask = PHY_ID_ANY;
1402 mii_if->reg_num_mask = MII_REG_ANY;
1403 out:
1404 return rc;
1405 }
1406
1407 static void __devexit sis190_mii_remove(struct net_device *dev)
1408 {
1409 struct sis190_private *tp = netdev_priv(dev);
1410
1411 sis190_free_phy(&tp->first_phy);
1412 }
1413
1414 static void sis190_release_board(struct pci_dev *pdev)
1415 {
1416 struct net_device *dev = pci_get_drvdata(pdev);
1417 struct sis190_private *tp = netdev_priv(dev);
1418
1419 iounmap(tp->mmio_addr);
1420 pci_release_regions(pdev);
1421 pci_disable_device(pdev);
1422 free_netdev(dev);
1423 }
1424
1425 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1426 {
1427 struct sis190_private *tp;
1428 struct net_device *dev;
1429 void __iomem *ioaddr;
1430 int rc;
1431
1432 dev = alloc_etherdev(sizeof(*tp));
1433 if (!dev) {
1434 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1435 rc = -ENOMEM;
1436 goto err_out_0;
1437 }
1438
1439 SET_MODULE_OWNER(dev);
1440 SET_NETDEV_DEV(dev, &pdev->dev);
1441
1442 tp = netdev_priv(dev);
1443 tp->dev = dev;
1444 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1445
1446 rc = pci_enable_device(pdev);
1447 if (rc < 0) {
1448 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1449 goto err_free_dev_1;
1450 }
1451
1452 rc = -ENODEV;
1453
1454 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1455 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1456 pci_name(pdev));
1457 goto err_pci_disable_2;
1458 }
1459 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1460 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1461 pci_name(pdev));
1462 goto err_pci_disable_2;
1463 }
1464
1465 rc = pci_request_regions(pdev, DRV_NAME);
1466 if (rc < 0) {
1467 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1468 pci_name(pdev));
1469 goto err_pci_disable_2;
1470 }
1471
1472 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1473 if (rc < 0) {
1474 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1475 pci_name(pdev));
1476 goto err_free_res_3;
1477 }
1478
1479 pci_set_master(pdev);
1480
1481 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1482 if (!ioaddr) {
1483 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1484 pci_name(pdev));
1485 rc = -EIO;
1486 goto err_free_res_3;
1487 }
1488
1489 tp->pci_dev = pdev;
1490 tp->mmio_addr = ioaddr;
1491
1492 sis190_irq_mask_and_ack(ioaddr);
1493
1494 sis190_soft_reset(ioaddr);
1495 out:
1496 return dev;
1497
1498 err_free_res_3:
1499 pci_release_regions(pdev);
1500 err_pci_disable_2:
1501 pci_disable_device(pdev);
1502 err_free_dev_1:
1503 free_netdev(dev);
1504 err_out_0:
1505 dev = ERR_PTR(rc);
1506 goto out;
1507 }
1508
1509 static void sis190_tx_timeout(struct net_device *dev)
1510 {
1511 struct sis190_private *tp = netdev_priv(dev);
1512 void __iomem *ioaddr = tp->mmio_addr;
1513 u8 tmp8;
1514
1515 /* Disable Tx, if not already */
1516 tmp8 = SIS_R8(TxControl);
1517 if (tmp8 & CmdTxEnb)
1518 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1519
1520
1521 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1522 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1523
1524 /* Disable interrupts by clearing the interrupt mask. */
1525 SIS_W32(IntrMask, 0x0000);
1526
1527 /* Stop a shared interrupt from scavenging while we are. */
1528 spin_lock_irq(&tp->lock);
1529 sis190_tx_clear(tp);
1530 spin_unlock_irq(&tp->lock);
1531
1532 /* ...and finally, reset everything. */
1533 sis190_hw_start(dev);
1534
1535 netif_wake_queue(dev);
1536 }
1537
1538 static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1539 {
1540 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1541 }
1542
1543 static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1544 struct net_device *dev)
1545 {
1546 struct sis190_private *tp = netdev_priv(dev);
1547 void __iomem *ioaddr = tp->mmio_addr;
1548 u16 sig;
1549 int i;
1550
1551 net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
1552 pci_name(pdev));
1553
1554 /* Check to see if there is a sane EEPROM */
1555 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1556
1557 if ((sig == 0xffff) || (sig == 0x0000)) {
1558 net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
1559 pci_name(pdev), sig);
1560 return -EIO;
1561 }
1562
1563 /* Get MAC address from EEPROM */
1564 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1565 __le16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1566
1567 ((u16 *)dev->dev_addr)[i] = le16_to_cpu(w);
1568 }
1569
1570 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1571
1572 return 0;
1573 }
1574
1575 /**
1576 * sis190_get_mac_addr_from_apc - Get MAC address for SiS965 model
1577 * @pdev: PCI device
1578 * @dev: network device to get address for
1579 *
1580 * SiS965 model, use APC CMOS RAM to store MAC address.
1581 * APC CMOS RAM is accessed through ISA bridge.
1582 * MAC address is read into @net_dev->dev_addr.
1583 */
1584 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1585 struct net_device *dev)
1586 {
1587 struct sis190_private *tp = netdev_priv(dev);
1588 struct pci_dev *isa_bridge;
1589 u8 reg, tmp8;
1590 int i;
1591
1592 net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
1593 pci_name(pdev));
1594
1595 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0965, NULL);
1596 if (!isa_bridge) {
1597 net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
1598 pci_name(pdev));
1599 return -EIO;
1600 }
1601
1602 /* Enable port 78h & 79h to access APC Registers. */
1603 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1604 reg = (tmp8 & ~0x02);
1605 pci_write_config_byte(isa_bridge, 0x48, reg);
1606 udelay(50);
1607 pci_read_config_byte(isa_bridge, 0x48, &reg);
1608
1609 for (i = 0; i < MAC_ADDR_LEN; i++) {
1610 outb(0x9 + i, 0x78);
1611 dev->dev_addr[i] = inb(0x79);
1612 }
1613
1614 outb(0x12, 0x78);
1615 reg = inb(0x79);
1616
1617 sis190_set_rgmii(tp, reg);
1618
1619 /* Restore the value to ISA Bridge */
1620 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1621 pci_dev_put(isa_bridge);
1622
1623 return 0;
1624 }
1625
1626 /**
1627 * sis190_init_rxfilter - Initialize the Rx filter
1628 * @dev: network device to initialize
1629 *
1630 * Set receive filter address to our MAC address
1631 * and enable packet filtering.
1632 */
1633 static inline void sis190_init_rxfilter(struct net_device *dev)
1634 {
1635 struct sis190_private *tp = netdev_priv(dev);
1636 void __iomem *ioaddr = tp->mmio_addr;
1637 u16 ctl;
1638 int i;
1639
1640 ctl = SIS_R16(RxMacControl);
1641 /*
1642 * Disable packet filtering before setting filter.
1643 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1644 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1645 */
1646 SIS_W16(RxMacControl, ctl & ~0x0f00);
1647
1648 for (i = 0; i < MAC_ADDR_LEN; i++)
1649 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1650
1651 SIS_W16(RxMacControl, ctl);
1652 SIS_PCI_COMMIT();
1653 }
1654
1655 static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
1656 {
1657 u8 from;
1658
1659 pci_read_config_byte(pdev, 0x73, &from);
1660
1661 return (from & 0x00000001) ?
1662 sis190_get_mac_addr_from_apc(pdev, dev) :
1663 sis190_get_mac_addr_from_eeprom(pdev, dev);
1664 }
1665
1666 static void sis190_set_speed_auto(struct net_device *dev)
1667 {
1668 struct sis190_private *tp = netdev_priv(dev);
1669 void __iomem *ioaddr = tp->mmio_addr;
1670 int phy_id = tp->mii_if.phy_id;
1671 int val;
1672
1673 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1674
1675 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1676
1677 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1678 // unchanged.
1679 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1680 ADVERTISE_100FULL | ADVERTISE_10FULL |
1681 ADVERTISE_100HALF | ADVERTISE_10HALF);
1682
1683 // Enable 1000 Full Mode.
1684 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1685
1686 // Enable auto-negotiation and restart auto-negotiation.
1687 mdio_write(ioaddr, phy_id, MII_BMCR,
1688 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1689 }
1690
1691 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1692 {
1693 struct sis190_private *tp = netdev_priv(dev);
1694
1695 return mii_ethtool_gset(&tp->mii_if, cmd);
1696 }
1697
1698 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1699 {
1700 struct sis190_private *tp = netdev_priv(dev);
1701
1702 return mii_ethtool_sset(&tp->mii_if, cmd);
1703 }
1704
1705 static void sis190_get_drvinfo(struct net_device *dev,
1706 struct ethtool_drvinfo *info)
1707 {
1708 struct sis190_private *tp = netdev_priv(dev);
1709
1710 strcpy(info->driver, DRV_NAME);
1711 strcpy(info->version, DRV_VERSION);
1712 strcpy(info->bus_info, pci_name(tp->pci_dev));
1713 }
1714
1715 static int sis190_get_regs_len(struct net_device *dev)
1716 {
1717 return SIS190_REGS_SIZE;
1718 }
1719
1720 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1721 void *p)
1722 {
1723 struct sis190_private *tp = netdev_priv(dev);
1724 unsigned long flags;
1725
1726 if (regs->len > SIS190_REGS_SIZE)
1727 regs->len = SIS190_REGS_SIZE;
1728
1729 spin_lock_irqsave(&tp->lock, flags);
1730 memcpy_fromio(p, tp->mmio_addr, regs->len);
1731 spin_unlock_irqrestore(&tp->lock, flags);
1732 }
1733
1734 static int sis190_nway_reset(struct net_device *dev)
1735 {
1736 struct sis190_private *tp = netdev_priv(dev);
1737
1738 return mii_nway_restart(&tp->mii_if);
1739 }
1740
1741 static u32 sis190_get_msglevel(struct net_device *dev)
1742 {
1743 struct sis190_private *tp = netdev_priv(dev);
1744
1745 return tp->msg_enable;
1746 }
1747
1748 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1749 {
1750 struct sis190_private *tp = netdev_priv(dev);
1751
1752 tp->msg_enable = value;
1753 }
1754
1755 static const struct ethtool_ops sis190_ethtool_ops = {
1756 .get_settings = sis190_get_settings,
1757 .set_settings = sis190_set_settings,
1758 .get_drvinfo = sis190_get_drvinfo,
1759 .get_regs_len = sis190_get_regs_len,
1760 .get_regs = sis190_get_regs,
1761 .get_link = ethtool_op_get_link,
1762 .get_msglevel = sis190_get_msglevel,
1763 .set_msglevel = sis190_set_msglevel,
1764 .nway_reset = sis190_nway_reset,
1765 };
1766
1767 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1768 {
1769 struct sis190_private *tp = netdev_priv(dev);
1770
1771 return !netif_running(dev) ? -EINVAL :
1772 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1773 }
1774
1775 static int __devinit sis190_init_one(struct pci_dev *pdev,
1776 const struct pci_device_id *ent)
1777 {
1778 static int printed_version = 0;
1779 struct sis190_private *tp;
1780 struct net_device *dev;
1781 void __iomem *ioaddr;
1782 int rc;
1783
1784 if (!printed_version) {
1785 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1786 printed_version = 1;
1787 }
1788
1789 dev = sis190_init_board(pdev);
1790 if (IS_ERR(dev)) {
1791 rc = PTR_ERR(dev);
1792 goto out;
1793 }
1794
1795 pci_set_drvdata(pdev, dev);
1796
1797 tp = netdev_priv(dev);
1798 ioaddr = tp->mmio_addr;
1799
1800 rc = sis190_get_mac_addr(pdev, dev);
1801 if (rc < 0)
1802 goto err_release_board;
1803
1804 sis190_init_rxfilter(dev);
1805
1806 INIT_WORK(&tp->phy_task, sis190_phy_task);
1807
1808 dev->open = sis190_open;
1809 dev->stop = sis190_close;
1810 dev->do_ioctl = sis190_ioctl;
1811 dev->get_stats = sis190_get_stats;
1812 dev->tx_timeout = sis190_tx_timeout;
1813 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1814 dev->hard_start_xmit = sis190_start_xmit;
1815 #ifdef CONFIG_NET_POLL_CONTROLLER
1816 dev->poll_controller = sis190_netpoll;
1817 #endif
1818 dev->set_multicast_list = sis190_set_rx_mode;
1819 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1820 dev->irq = pdev->irq;
1821 dev->base_addr = (unsigned long) 0xdead;
1822
1823 spin_lock_init(&tp->lock);
1824
1825 rc = sis190_mii_probe(dev);
1826 if (rc < 0)
1827 goto err_release_board;
1828
1829 rc = register_netdev(dev);
1830 if (rc < 0)
1831 goto err_remove_mii;
1832
1833 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1834 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
1835 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1836 ioaddr, dev->irq,
1837 dev->dev_addr[0], dev->dev_addr[1],
1838 dev->dev_addr[2], dev->dev_addr[3],
1839 dev->dev_addr[4], dev->dev_addr[5]);
1840
1841 net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name,
1842 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1843
1844 netif_carrier_off(dev);
1845
1846 sis190_set_speed_auto(dev);
1847 out:
1848 return rc;
1849
1850 err_remove_mii:
1851 sis190_mii_remove(dev);
1852 err_release_board:
1853 sis190_release_board(pdev);
1854 goto out;
1855 }
1856
1857 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1858 {
1859 struct net_device *dev = pci_get_drvdata(pdev);
1860
1861 sis190_mii_remove(dev);
1862 flush_scheduled_work();
1863 unregister_netdev(dev);
1864 sis190_release_board(pdev);
1865 pci_set_drvdata(pdev, NULL);
1866 }
1867
1868 static struct pci_driver sis190_pci_driver = {
1869 .name = DRV_NAME,
1870 .id_table = sis190_pci_tbl,
1871 .probe = sis190_init_one,
1872 .remove = __devexit_p(sis190_remove_one),
1873 };
1874
1875 static int __init sis190_init_module(void)
1876 {
1877 return pci_register_driver(&sis190_pci_driver);
1878 }
1879
1880 static void __exit sis190_cleanup_module(void)
1881 {
1882 pci_unregister_driver(&sis190_pci_driver);
1883 }
1884
1885 module_init(sis190_init_module);
1886 module_exit(sis190_cleanup_module);