]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/net/ethernet/rdc/r6040.c
Merge branch 'stable/for-linus-5.6' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / rdc / r6040.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * RDC R6040 Fast Ethernet MAC support
4 *
5 * Copyright (C) 2004 Sten Wang <sten.wang@rdc.com.tw>
6 * Copyright (C) 2007
7 * Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>
8 * Copyright (C) 2007-2012 Florian Fainelli <f.fainelli@gmail.com>
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/string.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/interrupt.h>
19 #include <linux/pci.h>
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/delay.h>
24 #include <linux/mii.h>
25 #include <linux/ethtool.h>
26 #include <linux/crc32.h>
27 #include <linux/spinlock.h>
28 #include <linux/bitops.h>
29 #include <linux/io.h>
30 #include <linux/irq.h>
31 #include <linux/uaccess.h>
32 #include <linux/phy.h>
33
34 #include <asm/processor.h>
35
36 #define DRV_NAME "r6040"
37 #define DRV_VERSION "0.29"
38 #define DRV_RELDATE "04Jul2016"
39
40 /* Time in jiffies before concluding the transmitter is hung. */
41 #define TX_TIMEOUT (6000 * HZ / 1000)
42
43 /* RDC MAC I/O Size */
44 #define R6040_IO_SIZE 256
45
46 /* MAX RDC MAC */
47 #define MAX_MAC 2
48
49 /* MAC registers */
50 #define MCR0 0x00 /* Control register 0 */
51 #define MCR0_RCVEN 0x0002 /* Receive enable */
52 #define MCR0_PROMISC 0x0020 /* Promiscuous mode */
53 #define MCR0_HASH_EN 0x0100 /* Enable multicast hash table function */
54 #define MCR0_XMTEN 0x1000 /* Transmission enable */
55 #define MCR0_FD 0x8000 /* Full/Half duplex */
56 #define MCR1 0x04 /* Control register 1 */
57 #define MAC_RST 0x0001 /* Reset the MAC */
58 #define MBCR 0x08 /* Bus control */
59 #define MT_ICR 0x0C /* TX interrupt control */
60 #define MR_ICR 0x10 /* RX interrupt control */
61 #define MTPR 0x14 /* TX poll command register */
62 #define TM2TX 0x0001 /* Trigger MAC to transmit */
63 #define MR_BSR 0x18 /* RX buffer size */
64 #define MR_DCR 0x1A /* RX descriptor control */
65 #define MLSR 0x1C /* Last status */
66 #define TX_FIFO_UNDR 0x0200 /* TX FIFO under-run */
67 #define TX_EXCEEDC 0x2000 /* Transmit exceed collision */
68 #define TX_LATEC 0x4000 /* Transmit late collision */
69 #define MMDIO 0x20 /* MDIO control register */
70 #define MDIO_WRITE 0x4000 /* MDIO write */
71 #define MDIO_READ 0x2000 /* MDIO read */
72 #define MMRD 0x24 /* MDIO read data register */
73 #define MMWD 0x28 /* MDIO write data register */
74 #define MTD_SA0 0x2C /* TX descriptor start address 0 */
75 #define MTD_SA1 0x30 /* TX descriptor start address 1 */
76 #define MRD_SA0 0x34 /* RX descriptor start address 0 */
77 #define MRD_SA1 0x38 /* RX descriptor start address 1 */
78 #define MISR 0x3C /* Status register */
79 #define MIER 0x40 /* INT enable register */
80 #define MSK_INT 0x0000 /* Mask off interrupts */
81 #define RX_FINISH 0x0001 /* RX finished */
82 #define RX_NO_DESC 0x0002 /* No RX descriptor available */
83 #define RX_FIFO_FULL 0x0004 /* RX FIFO full */
84 #define RX_EARLY 0x0008 /* RX early */
85 #define TX_FINISH 0x0010 /* TX finished */
86 #define TX_EARLY 0x0080 /* TX early */
87 #define EVENT_OVRFL 0x0100 /* Event counter overflow */
88 #define LINK_CHANGED 0x0200 /* PHY link changed */
89 #define ME_CISR 0x44 /* Event counter INT status */
90 #define ME_CIER 0x48 /* Event counter INT enable */
91 #define MR_CNT 0x50 /* Successfully received packet counter */
92 #define ME_CNT0 0x52 /* Event counter 0 */
93 #define ME_CNT1 0x54 /* Event counter 1 */
94 #define ME_CNT2 0x56 /* Event counter 2 */
95 #define ME_CNT3 0x58 /* Event counter 3 */
96 #define MT_CNT 0x5A /* Successfully transmit packet counter */
97 #define ME_CNT4 0x5C /* Event counter 4 */
98 #define MP_CNT 0x5E /* Pause frame counter register */
99 #define MAR0 0x60 /* Hash table 0 */
100 #define MAR1 0x62 /* Hash table 1 */
101 #define MAR2 0x64 /* Hash table 2 */
102 #define MAR3 0x66 /* Hash table 3 */
103 #define MID_0L 0x68 /* Multicast address MID0 Low */
104 #define MID_0M 0x6A /* Multicast address MID0 Medium */
105 #define MID_0H 0x6C /* Multicast address MID0 High */
106 #define MID_1L 0x70 /* MID1 Low */
107 #define MID_1M 0x72 /* MID1 Medium */
108 #define MID_1H 0x74 /* MID1 High */
109 #define MID_2L 0x78 /* MID2 Low */
110 #define MID_2M 0x7A /* MID2 Medium */
111 #define MID_2H 0x7C /* MID2 High */
112 #define MID_3L 0x80 /* MID3 Low */
113 #define MID_3M 0x82 /* MID3 Medium */
114 #define MID_3H 0x84 /* MID3 High */
115 #define PHY_CC 0x88 /* PHY status change configuration register */
116 #define SCEN 0x8000 /* PHY status change enable */
117 #define PHYAD_SHIFT 8 /* PHY address shift */
118 #define TMRDIV_SHIFT 0 /* Timer divider shift */
119 #define PHY_ST 0x8A /* PHY status register */
120 #define MAC_SM 0xAC /* MAC status machine */
121 #define MAC_SM_RST 0x0002 /* MAC status machine reset */
122 #define MAC_ID 0xBE /* Identifier register */
123
124 #define TX_DCNT 0x80 /* TX descriptor count */
125 #define RX_DCNT 0x80 /* RX descriptor count */
126 #define MAX_BUF_SIZE 0x600
127 #define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor))
128 #define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor))
129 #define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */
130 #define MCAST_MAX 3 /* Max number multicast addresses to filter */
131
132 #define MAC_DEF_TIMEOUT 2048 /* Default MAC read/write operation timeout */
133
134 /* Descriptor status */
135 #define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */
136 #define DSC_RX_OK 0x4000 /* RX was successful */
137 #define DSC_RX_ERR 0x0800 /* RX PHY error */
138 #define DSC_RX_ERR_DRI 0x0400 /* RX dribble packet */
139 #define DSC_RX_ERR_BUF 0x0200 /* RX length exceeds buffer size */
140 #define DSC_RX_ERR_LONG 0x0100 /* RX length > maximum packet length */
141 #define DSC_RX_ERR_RUNT 0x0080 /* RX packet length < 64 byte */
142 #define DSC_RX_ERR_CRC 0x0040 /* RX CRC error */
143 #define DSC_RX_BCAST 0x0020 /* RX broadcast (no error) */
144 #define DSC_RX_MCAST 0x0010 /* RX multicast (no error) */
145 #define DSC_RX_MCH_HIT 0x0008 /* RX multicast hit in hash table (no error) */
146 #define DSC_RX_MIDH_HIT 0x0004 /* RX MID table hit (no error) */
147 #define DSC_RX_IDX_MID_MASK 3 /* RX mask for the index of matched MIDx */
148
149 MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>,"
150 "Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>,"
151 "Florian Fainelli <f.fainelli@gmail.com>");
152 MODULE_LICENSE("GPL");
153 MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver");
154 MODULE_VERSION(DRV_VERSION " " DRV_RELDATE);
155
156 /* RX and TX interrupts that we handle */
157 #define RX_INTS (RX_FIFO_FULL | RX_NO_DESC | RX_FINISH)
158 #define TX_INTS (TX_FINISH)
159 #define INT_MASK (RX_INTS | TX_INTS)
160
161 struct r6040_descriptor {
162 u16 status, len; /* 0-3 */
163 __le32 buf; /* 4-7 */
164 __le32 ndesc; /* 8-B */
165 u32 rev1; /* C-F */
166 char *vbufp; /* 10-13 */
167 struct r6040_descriptor *vndescp; /* 14-17 */
168 struct sk_buff *skb_ptr; /* 18-1B */
169 u32 rev2; /* 1C-1F */
170 } __aligned(32);
171
172 struct r6040_private {
173 spinlock_t lock; /* driver lock */
174 struct pci_dev *pdev;
175 struct r6040_descriptor *rx_insert_ptr;
176 struct r6040_descriptor *rx_remove_ptr;
177 struct r6040_descriptor *tx_insert_ptr;
178 struct r6040_descriptor *tx_remove_ptr;
179 struct r6040_descriptor *rx_ring;
180 struct r6040_descriptor *tx_ring;
181 dma_addr_t rx_ring_dma;
182 dma_addr_t tx_ring_dma;
183 u16 tx_free_desc;
184 u16 mcr0;
185 struct net_device *dev;
186 struct mii_bus *mii_bus;
187 struct napi_struct napi;
188 void __iomem *base;
189 int old_link;
190 int old_duplex;
191 };
192
193 static char version[] = DRV_NAME
194 ": RDC R6040 NAPI net driver,"
195 "version "DRV_VERSION " (" DRV_RELDATE ")";
196
197 /* Read a word data from PHY Chip */
198 static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
199 {
200 int limit = MAC_DEF_TIMEOUT;
201 u16 cmd;
202
203 iowrite16(MDIO_READ + reg + (phy_addr << 8), ioaddr + MMDIO);
204 /* Wait for the read bit to be cleared */
205 while (limit--) {
206 cmd = ioread16(ioaddr + MMDIO);
207 if (!(cmd & MDIO_READ))
208 break;
209 udelay(1);
210 }
211
212 if (limit < 0)
213 return -ETIMEDOUT;
214
215 return ioread16(ioaddr + MMRD);
216 }
217
218 /* Write a word data from PHY Chip */
219 static int r6040_phy_write(void __iomem *ioaddr,
220 int phy_addr, int reg, u16 val)
221 {
222 int limit = MAC_DEF_TIMEOUT;
223 u16 cmd;
224
225 iowrite16(val, ioaddr + MMWD);
226 /* Write the command to the MDIO bus */
227 iowrite16(MDIO_WRITE + reg + (phy_addr << 8), ioaddr + MMDIO);
228 /* Wait for the write bit to be cleared */
229 while (limit--) {
230 cmd = ioread16(ioaddr + MMDIO);
231 if (!(cmd & MDIO_WRITE))
232 break;
233 udelay(1);
234 }
235
236 return (limit < 0) ? -ETIMEDOUT : 0;
237 }
238
239 static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg)
240 {
241 struct net_device *dev = bus->priv;
242 struct r6040_private *lp = netdev_priv(dev);
243 void __iomem *ioaddr = lp->base;
244
245 return r6040_phy_read(ioaddr, phy_addr, reg);
246 }
247
248 static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr,
249 int reg, u16 value)
250 {
251 struct net_device *dev = bus->priv;
252 struct r6040_private *lp = netdev_priv(dev);
253 void __iomem *ioaddr = lp->base;
254
255 return r6040_phy_write(ioaddr, phy_addr, reg, value);
256 }
257
258 static void r6040_free_txbufs(struct net_device *dev)
259 {
260 struct r6040_private *lp = netdev_priv(dev);
261 int i;
262
263 for (i = 0; i < TX_DCNT; i++) {
264 if (lp->tx_insert_ptr->skb_ptr) {
265 pci_unmap_single(lp->pdev,
266 le32_to_cpu(lp->tx_insert_ptr->buf),
267 MAX_BUF_SIZE, PCI_DMA_TODEVICE);
268 dev_kfree_skb(lp->tx_insert_ptr->skb_ptr);
269 lp->tx_insert_ptr->skb_ptr = NULL;
270 }
271 lp->tx_insert_ptr = lp->tx_insert_ptr->vndescp;
272 }
273 }
274
275 static void r6040_free_rxbufs(struct net_device *dev)
276 {
277 struct r6040_private *lp = netdev_priv(dev);
278 int i;
279
280 for (i = 0; i < RX_DCNT; i++) {
281 if (lp->rx_insert_ptr->skb_ptr) {
282 pci_unmap_single(lp->pdev,
283 le32_to_cpu(lp->rx_insert_ptr->buf),
284 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
285 dev_kfree_skb(lp->rx_insert_ptr->skb_ptr);
286 lp->rx_insert_ptr->skb_ptr = NULL;
287 }
288 lp->rx_insert_ptr = lp->rx_insert_ptr->vndescp;
289 }
290 }
291
292 static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring,
293 dma_addr_t desc_dma, int size)
294 {
295 struct r6040_descriptor *desc = desc_ring;
296 dma_addr_t mapping = desc_dma;
297
298 while (size-- > 0) {
299 mapping += sizeof(*desc);
300 desc->ndesc = cpu_to_le32(mapping);
301 desc->vndescp = desc + 1;
302 desc++;
303 }
304 desc--;
305 desc->ndesc = cpu_to_le32(desc_dma);
306 desc->vndescp = desc_ring;
307 }
308
309 static void r6040_init_txbufs(struct net_device *dev)
310 {
311 struct r6040_private *lp = netdev_priv(dev);
312
313 lp->tx_free_desc = TX_DCNT;
314
315 lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring;
316 r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT);
317 }
318
319 static int r6040_alloc_rxbufs(struct net_device *dev)
320 {
321 struct r6040_private *lp = netdev_priv(dev);
322 struct r6040_descriptor *desc;
323 struct sk_buff *skb;
324 int rc;
325
326 lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring;
327 r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT);
328
329 /* Allocate skbs for the rx descriptors */
330 desc = lp->rx_ring;
331 do {
332 skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
333 if (!skb) {
334 rc = -ENOMEM;
335 goto err_exit;
336 }
337 desc->skb_ptr = skb;
338 desc->buf = cpu_to_le32(pci_map_single(lp->pdev,
339 desc->skb_ptr->data,
340 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));
341 desc->status = DSC_OWNER_MAC;
342 desc = desc->vndescp;
343 } while (desc != lp->rx_ring);
344
345 return 0;
346
347 err_exit:
348 /* Deallocate all previously allocated skbs */
349 r6040_free_rxbufs(dev);
350 return rc;
351 }
352
353 static void r6040_reset_mac(struct r6040_private *lp)
354 {
355 void __iomem *ioaddr = lp->base;
356 int limit = MAC_DEF_TIMEOUT;
357 u16 cmd;
358
359 iowrite16(MAC_RST, ioaddr + MCR1);
360 while (limit--) {
361 cmd = ioread16(ioaddr + MCR1);
362 if (cmd & MAC_RST)
363 break;
364 }
365
366 /* Reset internal state machine */
367 iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
368 iowrite16(0, ioaddr + MAC_SM);
369 mdelay(5);
370 }
371
372 static void r6040_init_mac_regs(struct net_device *dev)
373 {
374 struct r6040_private *lp = netdev_priv(dev);
375 void __iomem *ioaddr = lp->base;
376
377 /* Mask Off Interrupt */
378 iowrite16(MSK_INT, ioaddr + MIER);
379
380 /* Reset RDC MAC */
381 r6040_reset_mac(lp);
382
383 /* MAC Bus Control Register */
384 iowrite16(MBCR_DEFAULT, ioaddr + MBCR);
385
386 /* Buffer Size Register */
387 iowrite16(MAX_BUF_SIZE, ioaddr + MR_BSR);
388
389 /* Write TX ring start address */
390 iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0);
391 iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1);
392
393 /* Write RX ring start address */
394 iowrite16(lp->rx_ring_dma, ioaddr + MRD_SA0);
395 iowrite16(lp->rx_ring_dma >> 16, ioaddr + MRD_SA1);
396
397 /* Set interrupt waiting time and packet numbers */
398 iowrite16(0, ioaddr + MT_ICR);
399 iowrite16(0, ioaddr + MR_ICR);
400
401 /* Enable interrupts */
402 iowrite16(INT_MASK, ioaddr + MIER);
403
404 /* Enable TX and RX */
405 iowrite16(lp->mcr0 | MCR0_RCVEN, ioaddr);
406
407 /* Let TX poll the descriptors
408 * we may got called by r6040_tx_timeout which has left
409 * some unsent tx buffers */
410 iowrite16(TM2TX, ioaddr + MTPR);
411 }
412
413 static void r6040_tx_timeout(struct net_device *dev, unsigned int txqueue)
414 {
415 struct r6040_private *priv = netdev_priv(dev);
416 void __iomem *ioaddr = priv->base;
417
418 netdev_warn(dev, "transmit timed out, int enable %4.4x "
419 "status %4.4x\n",
420 ioread16(ioaddr + MIER),
421 ioread16(ioaddr + MISR));
422
423 dev->stats.tx_errors++;
424
425 /* Reset MAC and re-init all registers */
426 r6040_init_mac_regs(dev);
427 }
428
429 static struct net_device_stats *r6040_get_stats(struct net_device *dev)
430 {
431 struct r6040_private *priv = netdev_priv(dev);
432 void __iomem *ioaddr = priv->base;
433 unsigned long flags;
434
435 spin_lock_irqsave(&priv->lock, flags);
436 dev->stats.rx_crc_errors += ioread8(ioaddr + ME_CNT1);
437 dev->stats.multicast += ioread8(ioaddr + ME_CNT0);
438 spin_unlock_irqrestore(&priv->lock, flags);
439
440 return &dev->stats;
441 }
442
443 /* Stop RDC MAC and Free the allocated resource */
444 static void r6040_down(struct net_device *dev)
445 {
446 struct r6040_private *lp = netdev_priv(dev);
447 void __iomem *ioaddr = lp->base;
448 u16 *adrp;
449
450 /* Stop MAC */
451 iowrite16(MSK_INT, ioaddr + MIER); /* Mask Off Interrupt */
452
453 /* Reset RDC MAC */
454 r6040_reset_mac(lp);
455
456 /* Restore MAC Address to MIDx */
457 adrp = (u16 *) dev->dev_addr;
458 iowrite16(adrp[0], ioaddr + MID_0L);
459 iowrite16(adrp[1], ioaddr + MID_0M);
460 iowrite16(adrp[2], ioaddr + MID_0H);
461 }
462
463 static int r6040_close(struct net_device *dev)
464 {
465 struct r6040_private *lp = netdev_priv(dev);
466 struct pci_dev *pdev = lp->pdev;
467
468 phy_stop(dev->phydev);
469 napi_disable(&lp->napi);
470 netif_stop_queue(dev);
471
472 spin_lock_irq(&lp->lock);
473 r6040_down(dev);
474
475 /* Free RX buffer */
476 r6040_free_rxbufs(dev);
477
478 /* Free TX buffer */
479 r6040_free_txbufs(dev);
480
481 spin_unlock_irq(&lp->lock);
482
483 free_irq(dev->irq, dev);
484
485 /* Free Descriptor memory */
486 if (lp->rx_ring) {
487 pci_free_consistent(pdev,
488 RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma);
489 lp->rx_ring = NULL;
490 }
491
492 if (lp->tx_ring) {
493 pci_free_consistent(pdev,
494 TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma);
495 lp->tx_ring = NULL;
496 }
497
498 return 0;
499 }
500
501 static int r6040_rx(struct net_device *dev, int limit)
502 {
503 struct r6040_private *priv = netdev_priv(dev);
504 struct r6040_descriptor *descptr = priv->rx_remove_ptr;
505 struct sk_buff *skb_ptr, *new_skb;
506 int count = 0;
507 u16 err;
508
509 /* Limit not reached and the descriptor belongs to the CPU */
510 while (count < limit && !(descptr->status & DSC_OWNER_MAC)) {
511 /* Read the descriptor status */
512 err = descptr->status;
513 /* Global error status set */
514 if (err & DSC_RX_ERR) {
515 /* RX dribble */
516 if (err & DSC_RX_ERR_DRI)
517 dev->stats.rx_frame_errors++;
518 /* Buffer length exceeded */
519 if (err & DSC_RX_ERR_BUF)
520 dev->stats.rx_length_errors++;
521 /* Packet too long */
522 if (err & DSC_RX_ERR_LONG)
523 dev->stats.rx_length_errors++;
524 /* Packet < 64 bytes */
525 if (err & DSC_RX_ERR_RUNT)
526 dev->stats.rx_length_errors++;
527 /* CRC error */
528 if (err & DSC_RX_ERR_CRC) {
529 spin_lock(&priv->lock);
530 dev->stats.rx_crc_errors++;
531 spin_unlock(&priv->lock);
532 }
533 goto next_descr;
534 }
535
536 /* Packet successfully received */
537 new_skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
538 if (!new_skb) {
539 dev->stats.rx_dropped++;
540 goto next_descr;
541 }
542 skb_ptr = descptr->skb_ptr;
543 skb_ptr->dev = priv->dev;
544
545 /* Do not count the CRC */
546 skb_put(skb_ptr, descptr->len - 4);
547 pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
548 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
549 skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev);
550
551 /* Send to upper layer */
552 netif_receive_skb(skb_ptr);
553 dev->stats.rx_packets++;
554 dev->stats.rx_bytes += descptr->len - 4;
555
556 /* put new skb into descriptor */
557 descptr->skb_ptr = new_skb;
558 descptr->buf = cpu_to_le32(pci_map_single(priv->pdev,
559 descptr->skb_ptr->data,
560 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));
561
562 next_descr:
563 /* put the descriptor back to the MAC */
564 descptr->status = DSC_OWNER_MAC;
565 descptr = descptr->vndescp;
566 count++;
567 }
568 priv->rx_remove_ptr = descptr;
569
570 return count;
571 }
572
573 static void r6040_tx(struct net_device *dev)
574 {
575 struct r6040_private *priv = netdev_priv(dev);
576 struct r6040_descriptor *descptr;
577 void __iomem *ioaddr = priv->base;
578 struct sk_buff *skb_ptr;
579 u16 err;
580
581 spin_lock(&priv->lock);
582 descptr = priv->tx_remove_ptr;
583 while (priv->tx_free_desc < TX_DCNT) {
584 /* Check for errors */
585 err = ioread16(ioaddr + MLSR);
586
587 if (err & TX_FIFO_UNDR)
588 dev->stats.tx_fifo_errors++;
589 if (err & (TX_EXCEEDC | TX_LATEC))
590 dev->stats.tx_carrier_errors++;
591
592 if (descptr->status & DSC_OWNER_MAC)
593 break; /* Not complete */
594 skb_ptr = descptr->skb_ptr;
595
596 /* Statistic Counter */
597 dev->stats.tx_packets++;
598 dev->stats.tx_bytes += skb_ptr->len;
599
600 pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
601 skb_ptr->len, PCI_DMA_TODEVICE);
602 /* Free buffer */
603 dev_kfree_skb(skb_ptr);
604 descptr->skb_ptr = NULL;
605 /* To next descriptor */
606 descptr = descptr->vndescp;
607 priv->tx_free_desc++;
608 }
609 priv->tx_remove_ptr = descptr;
610
611 if (priv->tx_free_desc)
612 netif_wake_queue(dev);
613 spin_unlock(&priv->lock);
614 }
615
616 static int r6040_poll(struct napi_struct *napi, int budget)
617 {
618 struct r6040_private *priv =
619 container_of(napi, struct r6040_private, napi);
620 struct net_device *dev = priv->dev;
621 void __iomem *ioaddr = priv->base;
622 int work_done;
623
624 r6040_tx(dev);
625
626 work_done = r6040_rx(dev, budget);
627
628 if (work_done < budget) {
629 napi_complete_done(napi, work_done);
630 /* Enable RX/TX interrupt */
631 iowrite16(ioread16(ioaddr + MIER) | RX_INTS | TX_INTS,
632 ioaddr + MIER);
633 }
634 return work_done;
635 }
636
637 /* The RDC interrupt handler. */
638 static irqreturn_t r6040_interrupt(int irq, void *dev_id)
639 {
640 struct net_device *dev = dev_id;
641 struct r6040_private *lp = netdev_priv(dev);
642 void __iomem *ioaddr = lp->base;
643 u16 misr, status;
644
645 /* Save MIER */
646 misr = ioread16(ioaddr + MIER);
647 /* Mask off RDC MAC interrupt */
648 iowrite16(MSK_INT, ioaddr + MIER);
649 /* Read MISR status and clear */
650 status = ioread16(ioaddr + MISR);
651
652 if (status == 0x0000 || status == 0xffff) {
653 /* Restore RDC MAC interrupt */
654 iowrite16(misr, ioaddr + MIER);
655 return IRQ_NONE;
656 }
657
658 /* RX interrupt request */
659 if (status & (RX_INTS | TX_INTS)) {
660 if (status & RX_NO_DESC) {
661 /* RX descriptor unavailable */
662 dev->stats.rx_dropped++;
663 dev->stats.rx_missed_errors++;
664 }
665 if (status & RX_FIFO_FULL)
666 dev->stats.rx_fifo_errors++;
667
668 if (likely(napi_schedule_prep(&lp->napi))) {
669 /* Mask off RX interrupt */
670 misr &= ~(RX_INTS | TX_INTS);
671 __napi_schedule_irqoff(&lp->napi);
672 }
673 }
674
675 /* Restore RDC MAC interrupt */
676 iowrite16(misr, ioaddr + MIER);
677
678 return IRQ_HANDLED;
679 }
680
681 #ifdef CONFIG_NET_POLL_CONTROLLER
682 static void r6040_poll_controller(struct net_device *dev)
683 {
684 disable_irq(dev->irq);
685 r6040_interrupt(dev->irq, dev);
686 enable_irq(dev->irq);
687 }
688 #endif
689
690 /* Init RDC MAC */
691 static int r6040_up(struct net_device *dev)
692 {
693 struct r6040_private *lp = netdev_priv(dev);
694 void __iomem *ioaddr = lp->base;
695 int ret;
696
697 /* Initialise and alloc RX/TX buffers */
698 r6040_init_txbufs(dev);
699 ret = r6040_alloc_rxbufs(dev);
700 if (ret)
701 return ret;
702
703 /* improve performance (by RDC guys) */
704 r6040_phy_write(ioaddr, 30, 17,
705 (r6040_phy_read(ioaddr, 30, 17) | 0x4000));
706 r6040_phy_write(ioaddr, 30, 17,
707 ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000));
708 r6040_phy_write(ioaddr, 0, 19, 0x0000);
709 r6040_phy_write(ioaddr, 0, 30, 0x01F0);
710
711 /* Initialize all MAC registers */
712 r6040_init_mac_regs(dev);
713
714 phy_start(dev->phydev);
715
716 return 0;
717 }
718
719
720 /* Read/set MAC address routines */
721 static void r6040_mac_address(struct net_device *dev)
722 {
723 struct r6040_private *lp = netdev_priv(dev);
724 void __iomem *ioaddr = lp->base;
725 u16 *adrp;
726
727 /* Reset MAC */
728 r6040_reset_mac(lp);
729
730 /* Restore MAC Address */
731 adrp = (u16 *) dev->dev_addr;
732 iowrite16(adrp[0], ioaddr + MID_0L);
733 iowrite16(adrp[1], ioaddr + MID_0M);
734 iowrite16(adrp[2], ioaddr + MID_0H);
735 }
736
737 static int r6040_open(struct net_device *dev)
738 {
739 struct r6040_private *lp = netdev_priv(dev);
740 int ret;
741
742 /* Request IRQ and Register interrupt handler */
743 ret = request_irq(dev->irq, r6040_interrupt,
744 IRQF_SHARED, dev->name, dev);
745 if (ret)
746 goto out;
747
748 /* Set MAC address */
749 r6040_mac_address(dev);
750
751 /* Allocate Descriptor memory */
752 lp->rx_ring =
753 pci_alloc_consistent(lp->pdev, RX_DESC_SIZE, &lp->rx_ring_dma);
754 if (!lp->rx_ring) {
755 ret = -ENOMEM;
756 goto err_free_irq;
757 }
758
759 lp->tx_ring =
760 pci_alloc_consistent(lp->pdev, TX_DESC_SIZE, &lp->tx_ring_dma);
761 if (!lp->tx_ring) {
762 ret = -ENOMEM;
763 goto err_free_rx_ring;
764 }
765
766 ret = r6040_up(dev);
767 if (ret)
768 goto err_free_tx_ring;
769
770 napi_enable(&lp->napi);
771 netif_start_queue(dev);
772
773 return 0;
774
775 err_free_tx_ring:
776 pci_free_consistent(lp->pdev, TX_DESC_SIZE, lp->tx_ring,
777 lp->tx_ring_dma);
778 err_free_rx_ring:
779 pci_free_consistent(lp->pdev, RX_DESC_SIZE, lp->rx_ring,
780 lp->rx_ring_dma);
781 err_free_irq:
782 free_irq(dev->irq, dev);
783 out:
784 return ret;
785 }
786
787 static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
788 struct net_device *dev)
789 {
790 struct r6040_private *lp = netdev_priv(dev);
791 struct r6040_descriptor *descptr;
792 void __iomem *ioaddr = lp->base;
793 unsigned long flags;
794
795 if (skb_put_padto(skb, ETH_ZLEN) < 0)
796 return NETDEV_TX_OK;
797
798 /* Critical Section */
799 spin_lock_irqsave(&lp->lock, flags);
800
801 /* TX resource check */
802 if (!lp->tx_free_desc) {
803 spin_unlock_irqrestore(&lp->lock, flags);
804 netif_stop_queue(dev);
805 netdev_err(dev, ": no tx descriptor\n");
806 return NETDEV_TX_BUSY;
807 }
808
809 /* Set TX descriptor & Transmit it */
810 lp->tx_free_desc--;
811 descptr = lp->tx_insert_ptr;
812 descptr->len = skb->len;
813 descptr->skb_ptr = skb;
814 descptr->buf = cpu_to_le32(pci_map_single(lp->pdev,
815 skb->data, skb->len, PCI_DMA_TODEVICE));
816 descptr->status = DSC_OWNER_MAC;
817
818 skb_tx_timestamp(skb);
819
820 /* Trigger the MAC to check the TX descriptor */
821 if (!netdev_xmit_more() || netif_queue_stopped(dev))
822 iowrite16(TM2TX, ioaddr + MTPR);
823 lp->tx_insert_ptr = descptr->vndescp;
824
825 /* If no tx resource, stop */
826 if (!lp->tx_free_desc)
827 netif_stop_queue(dev);
828
829 spin_unlock_irqrestore(&lp->lock, flags);
830
831 return NETDEV_TX_OK;
832 }
833
834 static void r6040_multicast_list(struct net_device *dev)
835 {
836 struct r6040_private *lp = netdev_priv(dev);
837 void __iomem *ioaddr = lp->base;
838 unsigned long flags;
839 struct netdev_hw_addr *ha;
840 int i;
841 u16 *adrp;
842 u16 hash_table[4] = { 0 };
843
844 spin_lock_irqsave(&lp->lock, flags);
845
846 /* Keep our MAC Address */
847 adrp = (u16 *)dev->dev_addr;
848 iowrite16(adrp[0], ioaddr + MID_0L);
849 iowrite16(adrp[1], ioaddr + MID_0M);
850 iowrite16(adrp[2], ioaddr + MID_0H);
851
852 /* Clear AMCP & PROM bits */
853 lp->mcr0 = ioread16(ioaddr + MCR0) & ~(MCR0_PROMISC | MCR0_HASH_EN);
854
855 /* Promiscuous mode */
856 if (dev->flags & IFF_PROMISC)
857 lp->mcr0 |= MCR0_PROMISC;
858
859 /* Enable multicast hash table function to
860 * receive all multicast packets. */
861 else if (dev->flags & IFF_ALLMULTI) {
862 lp->mcr0 |= MCR0_HASH_EN;
863
864 for (i = 0; i < MCAST_MAX ; i++) {
865 iowrite16(0, ioaddr + MID_1L + 8 * i);
866 iowrite16(0, ioaddr + MID_1M + 8 * i);
867 iowrite16(0, ioaddr + MID_1H + 8 * i);
868 }
869
870 for (i = 0; i < 4; i++)
871 hash_table[i] = 0xffff;
872 }
873 /* Use internal multicast address registers if the number of
874 * multicast addresses is not greater than MCAST_MAX. */
875 else if (netdev_mc_count(dev) <= MCAST_MAX) {
876 i = 0;
877 netdev_for_each_mc_addr(ha, dev) {
878 u16 *adrp = (u16 *) ha->addr;
879 iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
880 iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
881 iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
882 i++;
883 }
884 while (i < MCAST_MAX) {
885 iowrite16(0, ioaddr + MID_1L + 8 * i);
886 iowrite16(0, ioaddr + MID_1M + 8 * i);
887 iowrite16(0, ioaddr + MID_1H + 8 * i);
888 i++;
889 }
890 }
891 /* Otherwise, Enable multicast hash table function. */
892 else {
893 u32 crc;
894
895 lp->mcr0 |= MCR0_HASH_EN;
896
897 for (i = 0; i < MCAST_MAX ; i++) {
898 iowrite16(0, ioaddr + MID_1L + 8 * i);
899 iowrite16(0, ioaddr + MID_1M + 8 * i);
900 iowrite16(0, ioaddr + MID_1H + 8 * i);
901 }
902
903 /* Build multicast hash table */
904 netdev_for_each_mc_addr(ha, dev) {
905 u8 *addrs = ha->addr;
906
907 crc = ether_crc(ETH_ALEN, addrs);
908 crc >>= 26;
909 hash_table[crc >> 4] |= 1 << (crc & 0xf);
910 }
911 }
912
913 iowrite16(lp->mcr0, ioaddr + MCR0);
914
915 /* Fill the MAC hash tables with their values */
916 if (lp->mcr0 & MCR0_HASH_EN) {
917 iowrite16(hash_table[0], ioaddr + MAR0);
918 iowrite16(hash_table[1], ioaddr + MAR1);
919 iowrite16(hash_table[2], ioaddr + MAR2);
920 iowrite16(hash_table[3], ioaddr + MAR3);
921 }
922
923 spin_unlock_irqrestore(&lp->lock, flags);
924 }
925
926 static void netdev_get_drvinfo(struct net_device *dev,
927 struct ethtool_drvinfo *info)
928 {
929 struct r6040_private *rp = netdev_priv(dev);
930
931 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
932 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
933 strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
934 }
935
936 static const struct ethtool_ops netdev_ethtool_ops = {
937 .get_drvinfo = netdev_get_drvinfo,
938 .get_link = ethtool_op_get_link,
939 .get_ts_info = ethtool_op_get_ts_info,
940 .get_link_ksettings = phy_ethtool_get_link_ksettings,
941 .set_link_ksettings = phy_ethtool_set_link_ksettings,
942 };
943
944 static const struct net_device_ops r6040_netdev_ops = {
945 .ndo_open = r6040_open,
946 .ndo_stop = r6040_close,
947 .ndo_start_xmit = r6040_start_xmit,
948 .ndo_get_stats = r6040_get_stats,
949 .ndo_set_rx_mode = r6040_multicast_list,
950 .ndo_validate_addr = eth_validate_addr,
951 .ndo_set_mac_address = eth_mac_addr,
952 .ndo_do_ioctl = phy_do_ioctl,
953 .ndo_tx_timeout = r6040_tx_timeout,
954 #ifdef CONFIG_NET_POLL_CONTROLLER
955 .ndo_poll_controller = r6040_poll_controller,
956 #endif
957 };
958
959 static void r6040_adjust_link(struct net_device *dev)
960 {
961 struct r6040_private *lp = netdev_priv(dev);
962 struct phy_device *phydev = dev->phydev;
963 int status_changed = 0;
964 void __iomem *ioaddr = lp->base;
965
966 BUG_ON(!phydev);
967
968 if (lp->old_link != phydev->link) {
969 status_changed = 1;
970 lp->old_link = phydev->link;
971 }
972
973 /* reflect duplex change */
974 if (phydev->link && (lp->old_duplex != phydev->duplex)) {
975 lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? MCR0_FD : 0);
976 iowrite16(lp->mcr0, ioaddr);
977
978 status_changed = 1;
979 lp->old_duplex = phydev->duplex;
980 }
981
982 if (status_changed)
983 phy_print_status(phydev);
984 }
985
986 static int r6040_mii_probe(struct net_device *dev)
987 {
988 struct r6040_private *lp = netdev_priv(dev);
989 struct phy_device *phydev = NULL;
990
991 phydev = phy_find_first(lp->mii_bus);
992 if (!phydev) {
993 dev_err(&lp->pdev->dev, "no PHY found\n");
994 return -ENODEV;
995 }
996
997 phydev = phy_connect(dev, phydev_name(phydev), &r6040_adjust_link,
998 PHY_INTERFACE_MODE_MII);
999
1000 if (IS_ERR(phydev)) {
1001 dev_err(&lp->pdev->dev, "could not attach to PHY\n");
1002 return PTR_ERR(phydev);
1003 }
1004
1005 phy_set_max_speed(phydev, SPEED_100);
1006
1007 lp->old_link = 0;
1008 lp->old_duplex = -1;
1009
1010 phy_attached_info(phydev);
1011
1012 return 0;
1013 }
1014
1015 static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1016 {
1017 struct net_device *dev;
1018 struct r6040_private *lp;
1019 void __iomem *ioaddr;
1020 int err, io_size = R6040_IO_SIZE;
1021 static int card_idx = -1;
1022 int bar = 0;
1023 u16 *adrp;
1024
1025 pr_info("%s\n", version);
1026
1027 err = pci_enable_device(pdev);
1028 if (err)
1029 goto err_out;
1030
1031 /* this should always be supported */
1032 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1033 if (err) {
1034 dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card\n");
1035 goto err_out_disable_dev;
1036 }
1037 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1038 if (err) {
1039 dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card\n");
1040 goto err_out_disable_dev;
1041 }
1042
1043 /* IO Size check */
1044 if (pci_resource_len(pdev, bar) < io_size) {
1045 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
1046 err = -EIO;
1047 goto err_out_disable_dev;
1048 }
1049
1050 pci_set_master(pdev);
1051
1052 dev = alloc_etherdev(sizeof(struct r6040_private));
1053 if (!dev) {
1054 err = -ENOMEM;
1055 goto err_out_disable_dev;
1056 }
1057 SET_NETDEV_DEV(dev, &pdev->dev);
1058 lp = netdev_priv(dev);
1059
1060 err = pci_request_regions(pdev, DRV_NAME);
1061
1062 if (err) {
1063 dev_err(&pdev->dev, "Failed to request PCI regions\n");
1064 goto err_out_free_dev;
1065 }
1066
1067 ioaddr = pci_iomap(pdev, bar, io_size);
1068 if (!ioaddr) {
1069 dev_err(&pdev->dev, "ioremap failed for device\n");
1070 err = -EIO;
1071 goto err_out_free_res;
1072 }
1073
1074 /* If PHY status change register is still set to zero it means the
1075 * bootloader didn't initialize it, so we set it to:
1076 * - enable phy status change
1077 * - enable all phy addresses
1078 * - set to lowest timer divider */
1079 if (ioread16(ioaddr + PHY_CC) == 0)
1080 iowrite16(SCEN | PHY_MAX_ADDR << PHYAD_SHIFT |
1081 7 << TMRDIV_SHIFT, ioaddr + PHY_CC);
1082
1083 /* Init system & device */
1084 lp->base = ioaddr;
1085 dev->irq = pdev->irq;
1086
1087 spin_lock_init(&lp->lock);
1088 pci_set_drvdata(pdev, dev);
1089
1090 /* Set MAC address */
1091 card_idx++;
1092
1093 adrp = (u16 *)dev->dev_addr;
1094 adrp[0] = ioread16(ioaddr + MID_0L);
1095 adrp[1] = ioread16(ioaddr + MID_0M);
1096 adrp[2] = ioread16(ioaddr + MID_0H);
1097
1098 /* Some bootloader/BIOSes do not initialize
1099 * MAC address, warn about that */
1100 if (!(adrp[0] || adrp[1] || adrp[2])) {
1101 netdev_warn(dev, "MAC address not initialized, "
1102 "generating random\n");
1103 eth_hw_addr_random(dev);
1104 }
1105
1106 /* Link new device into r6040_root_dev */
1107 lp->pdev = pdev;
1108 lp->dev = dev;
1109
1110 /* Init RDC private data */
1111 lp->mcr0 = MCR0_XMTEN | MCR0_RCVEN;
1112
1113 /* The RDC-specific entries in the device structure. */
1114 dev->netdev_ops = &r6040_netdev_ops;
1115 dev->ethtool_ops = &netdev_ethtool_ops;
1116 dev->watchdog_timeo = TX_TIMEOUT;
1117
1118 netif_napi_add(dev, &lp->napi, r6040_poll, 64);
1119
1120 lp->mii_bus = mdiobus_alloc();
1121 if (!lp->mii_bus) {
1122 dev_err(&pdev->dev, "mdiobus_alloc() failed\n");
1123 err = -ENOMEM;
1124 goto err_out_unmap;
1125 }
1126
1127 lp->mii_bus->priv = dev;
1128 lp->mii_bus->read = r6040_mdiobus_read;
1129 lp->mii_bus->write = r6040_mdiobus_write;
1130 lp->mii_bus->name = "r6040_eth_mii";
1131 snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1132 dev_name(&pdev->dev), card_idx);
1133
1134 err = mdiobus_register(lp->mii_bus);
1135 if (err) {
1136 dev_err(&pdev->dev, "failed to register MII bus\n");
1137 goto err_out_mdio;
1138 }
1139
1140 err = r6040_mii_probe(dev);
1141 if (err) {
1142 dev_err(&pdev->dev, "failed to probe MII bus\n");
1143 goto err_out_mdio_unregister;
1144 }
1145
1146 /* Register net device. After this dev->name assign */
1147 err = register_netdev(dev);
1148 if (err) {
1149 dev_err(&pdev->dev, "Failed to register net device\n");
1150 goto err_out_mdio_unregister;
1151 }
1152 return 0;
1153
1154 err_out_mdio_unregister:
1155 mdiobus_unregister(lp->mii_bus);
1156 err_out_mdio:
1157 mdiobus_free(lp->mii_bus);
1158 err_out_unmap:
1159 netif_napi_del(&lp->napi);
1160 pci_iounmap(pdev, ioaddr);
1161 err_out_free_res:
1162 pci_release_regions(pdev);
1163 err_out_free_dev:
1164 free_netdev(dev);
1165 err_out_disable_dev:
1166 pci_disable_device(pdev);
1167 err_out:
1168 return err;
1169 }
1170
1171 static void r6040_remove_one(struct pci_dev *pdev)
1172 {
1173 struct net_device *dev = pci_get_drvdata(pdev);
1174 struct r6040_private *lp = netdev_priv(dev);
1175
1176 unregister_netdev(dev);
1177 mdiobus_unregister(lp->mii_bus);
1178 mdiobus_free(lp->mii_bus);
1179 netif_napi_del(&lp->napi);
1180 pci_iounmap(pdev, lp->base);
1181 pci_release_regions(pdev);
1182 free_netdev(dev);
1183 pci_disable_device(pdev);
1184 }
1185
1186
1187 static const struct pci_device_id r6040_pci_tbl[] = {
1188 { PCI_DEVICE(PCI_VENDOR_ID_RDC, 0x6040) },
1189 { 0 }
1190 };
1191 MODULE_DEVICE_TABLE(pci, r6040_pci_tbl);
1192
1193 static struct pci_driver r6040_driver = {
1194 .name = DRV_NAME,
1195 .id_table = r6040_pci_tbl,
1196 .probe = r6040_init_one,
1197 .remove = r6040_remove_one,
1198 };
1199
1200 module_pci_driver(r6040_driver);