2 * Cadence MACB/GEM Ethernet Controller driver
4 * Copyright (C) 2004-2006 Atmel Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/clk.h>
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/circ_buf.h>
18 #include <linux/slab.h>
19 #include <linux/init.h>
21 #include <linux/gpio.h>
22 #include <linux/gpio/consumer.h>
23 #include <linux/interrupt.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/platform_data/macb.h>
28 #include <linux/platform_device.h>
29 #include <linux/phy.h>
31 #include <linux/of_device.h>
32 #include <linux/of_gpio.h>
33 #include <linux/of_mdio.h>
34 #include <linux/of_net.h>
36 #include <linux/udp.h>
37 #include <linux/tcp.h>
40 #define MACB_RX_BUFFER_SIZE 128
41 #define RX_BUFFER_MULTIPLE 64 /* bytes */
43 #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
44 #define MIN_RX_RING_SIZE 64
45 #define MAX_RX_RING_SIZE 8192
46 #define RX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \
49 #define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
50 #define MIN_TX_RING_SIZE 64
51 #define MAX_TX_RING_SIZE 4096
52 #define TX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \
55 /* level of occupied TX descriptors under which we wake up TX process */
56 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
58 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
60 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
63 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
65 /* Max length of transmit frame must be a multiple of 8 bytes */
66 #define MACB_TX_LEN_ALIGN 8
67 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
68 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
70 #define GEM_MTU_MIN_SIZE ETH_MIN_MTU
71 #define MACB_NETIF_LSO (NETIF_F_TSO | NETIF_F_UFO)
73 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
74 #define MACB_WOL_ENABLED (0x1 << 1)
76 /* Graceful stop timeouts in us. We should allow up to
77 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
79 #define MACB_HALT_TIMEOUT 1230
81 /* Ring buffer accessors */
82 static unsigned int macb_tx_ring_wrap(struct macb
*bp
, unsigned int index
)
84 return index
& (bp
->tx_ring_size
- 1);
87 static struct macb_dma_desc
*macb_tx_desc(struct macb_queue
*queue
,
90 return &queue
->tx_ring
[macb_tx_ring_wrap(queue
->bp
, index
)];
93 static struct macb_tx_skb
*macb_tx_skb(struct macb_queue
*queue
,
96 return &queue
->tx_skb
[macb_tx_ring_wrap(queue
->bp
, index
)];
99 static dma_addr_t
macb_tx_dma(struct macb_queue
*queue
, unsigned int index
)
103 offset
= macb_tx_ring_wrap(queue
->bp
, index
) *
104 sizeof(struct macb_dma_desc
);
106 return queue
->tx_ring_dma
+ offset
;
109 static unsigned int macb_rx_ring_wrap(struct macb
*bp
, unsigned int index
)
111 return index
& (bp
->rx_ring_size
- 1);
114 static struct macb_dma_desc
*macb_rx_desc(struct macb
*bp
, unsigned int index
)
116 return &bp
->rx_ring
[macb_rx_ring_wrap(bp
, index
)];
119 static void *macb_rx_buffer(struct macb
*bp
, unsigned int index
)
121 return bp
->rx_buffers
+ bp
->rx_buffer_size
*
122 macb_rx_ring_wrap(bp
, index
);
126 static u32
hw_readl_native(struct macb
*bp
, int offset
)
128 return __raw_readl(bp
->regs
+ offset
);
131 static void hw_writel_native(struct macb
*bp
, int offset
, u32 value
)
133 __raw_writel(value
, bp
->regs
+ offset
);
136 static u32
hw_readl(struct macb
*bp
, int offset
)
138 return readl_relaxed(bp
->regs
+ offset
);
141 static void hw_writel(struct macb
*bp
, int offset
, u32 value
)
143 writel_relaxed(value
, bp
->regs
+ offset
);
146 /* Find the CPU endianness by using the loopback bit of NCR register. When the
147 * CPU is in big endian we need to program swapped mode for management
150 static bool hw_is_native_io(void __iomem
*addr
)
152 u32 value
= MACB_BIT(LLB
);
154 __raw_writel(value
, addr
+ MACB_NCR
);
155 value
= __raw_readl(addr
+ MACB_NCR
);
157 /* Write 0 back to disable everything */
158 __raw_writel(0, addr
+ MACB_NCR
);
160 return value
== MACB_BIT(LLB
);
163 static bool hw_is_gem(void __iomem
*addr
, bool native_io
)
168 id
= __raw_readl(addr
+ MACB_MID
);
170 id
= readl_relaxed(addr
+ MACB_MID
);
172 return MACB_BFEXT(IDNUM
, id
) >= 0x2;
175 static void macb_set_hwaddr(struct macb
*bp
)
180 bottom
= cpu_to_le32(*((u32
*)bp
->dev
->dev_addr
));
181 macb_or_gem_writel(bp
, SA1B
, bottom
);
182 top
= cpu_to_le16(*((u16
*)(bp
->dev
->dev_addr
+ 4)));
183 macb_or_gem_writel(bp
, SA1T
, top
);
185 /* Clear unused address register sets */
186 macb_or_gem_writel(bp
, SA2B
, 0);
187 macb_or_gem_writel(bp
, SA2T
, 0);
188 macb_or_gem_writel(bp
, SA3B
, 0);
189 macb_or_gem_writel(bp
, SA3T
, 0);
190 macb_or_gem_writel(bp
, SA4B
, 0);
191 macb_or_gem_writel(bp
, SA4T
, 0);
194 static void macb_get_hwaddr(struct macb
*bp
)
196 struct macb_platform_data
*pdata
;
202 pdata
= dev_get_platdata(&bp
->pdev
->dev
);
204 /* Check all 4 address register for valid address */
205 for (i
= 0; i
< 4; i
++) {
206 bottom
= macb_or_gem_readl(bp
, SA1B
+ i
* 8);
207 top
= macb_or_gem_readl(bp
, SA1T
+ i
* 8);
209 if (pdata
&& pdata
->rev_eth_addr
) {
210 addr
[5] = bottom
& 0xff;
211 addr
[4] = (bottom
>> 8) & 0xff;
212 addr
[3] = (bottom
>> 16) & 0xff;
213 addr
[2] = (bottom
>> 24) & 0xff;
214 addr
[1] = top
& 0xff;
215 addr
[0] = (top
& 0xff00) >> 8;
217 addr
[0] = bottom
& 0xff;
218 addr
[1] = (bottom
>> 8) & 0xff;
219 addr
[2] = (bottom
>> 16) & 0xff;
220 addr
[3] = (bottom
>> 24) & 0xff;
221 addr
[4] = top
& 0xff;
222 addr
[5] = (top
>> 8) & 0xff;
225 if (is_valid_ether_addr(addr
)) {
226 memcpy(bp
->dev
->dev_addr
, addr
, sizeof(addr
));
231 dev_info(&bp
->pdev
->dev
, "invalid hw address, using random\n");
232 eth_hw_addr_random(bp
->dev
);
235 static int macb_mdio_read(struct mii_bus
*bus
, int mii_id
, int regnum
)
237 struct macb
*bp
= bus
->priv
;
240 macb_writel(bp
, MAN
, (MACB_BF(SOF
, MACB_MAN_SOF
)
241 | MACB_BF(RW
, MACB_MAN_READ
)
242 | MACB_BF(PHYA
, mii_id
)
243 | MACB_BF(REGA
, regnum
)
244 | MACB_BF(CODE
, MACB_MAN_CODE
)));
246 /* wait for end of transfer */
247 while (!MACB_BFEXT(IDLE
, macb_readl(bp
, NSR
)))
250 value
= MACB_BFEXT(DATA
, macb_readl(bp
, MAN
));
255 static int macb_mdio_write(struct mii_bus
*bus
, int mii_id
, int regnum
,
258 struct macb
*bp
= bus
->priv
;
260 macb_writel(bp
, MAN
, (MACB_BF(SOF
, MACB_MAN_SOF
)
261 | MACB_BF(RW
, MACB_MAN_WRITE
)
262 | MACB_BF(PHYA
, mii_id
)
263 | MACB_BF(REGA
, regnum
)
264 | MACB_BF(CODE
, MACB_MAN_CODE
)
265 | MACB_BF(DATA
, value
)));
267 /* wait for end of transfer */
268 while (!MACB_BFEXT(IDLE
, macb_readl(bp
, NSR
)))
275 * macb_set_tx_clk() - Set a clock to a new frequency
276 * @clk Pointer to the clock to change
277 * @rate New frequency in Hz
278 * @dev Pointer to the struct net_device
280 static void macb_set_tx_clk(struct clk
*clk
, int speed
, struct net_device
*dev
)
282 long ferr
, rate
, rate_rounded
;
301 rate_rounded
= clk_round_rate(clk
, rate
);
302 if (rate_rounded
< 0)
305 /* RGMII allows 50 ppm frequency error. Test and warn if this limit
308 ferr
= abs(rate_rounded
- rate
);
309 ferr
= DIV_ROUND_UP(ferr
, rate
/ 100000);
311 netdev_warn(dev
, "unable to generate target frequency: %ld Hz\n",
314 if (clk_set_rate(clk
, rate_rounded
))
315 netdev_err(dev
, "adjusting tx_clk failed.\n");
318 static void macb_handle_link_change(struct net_device
*dev
)
320 struct macb
*bp
= netdev_priv(dev
);
321 struct phy_device
*phydev
= dev
->phydev
;
323 int status_change
= 0;
325 spin_lock_irqsave(&bp
->lock
, flags
);
328 if ((bp
->speed
!= phydev
->speed
) ||
329 (bp
->duplex
!= phydev
->duplex
)) {
332 reg
= macb_readl(bp
, NCFGR
);
333 reg
&= ~(MACB_BIT(SPD
) | MACB_BIT(FD
));
335 reg
&= ~GEM_BIT(GBE
);
339 if (phydev
->speed
== SPEED_100
)
340 reg
|= MACB_BIT(SPD
);
341 if (phydev
->speed
== SPEED_1000
&&
342 bp
->caps
& MACB_CAPS_GIGABIT_MODE_AVAILABLE
)
345 macb_or_gem_writel(bp
, NCFGR
, reg
);
347 bp
->speed
= phydev
->speed
;
348 bp
->duplex
= phydev
->duplex
;
353 if (phydev
->link
!= bp
->link
) {
358 bp
->link
= phydev
->link
;
363 spin_unlock_irqrestore(&bp
->lock
, flags
);
367 /* Update the TX clock rate if and only if the link is
368 * up and there has been a link change.
370 macb_set_tx_clk(bp
->tx_clk
, phydev
->speed
, dev
);
372 netif_carrier_on(dev
);
373 netdev_info(dev
, "link up (%d/%s)\n",
375 phydev
->duplex
== DUPLEX_FULL
?
378 netif_carrier_off(dev
);
379 netdev_info(dev
, "link down\n");
384 /* based on au1000_eth. c*/
385 static int macb_mii_probe(struct net_device
*dev
)
387 struct macb
*bp
= netdev_priv(dev
);
388 struct macb_platform_data
*pdata
;
389 struct phy_device
*phydev
;
393 phydev
= phy_find_first(bp
->mii_bus
);
395 netdev_err(dev
, "no PHY found\n");
399 pdata
= dev_get_platdata(&bp
->pdev
->dev
);
400 if (pdata
&& gpio_is_valid(pdata
->phy_irq_pin
)) {
401 ret
= devm_gpio_request(&bp
->pdev
->dev
, pdata
->phy_irq_pin
,
404 phy_irq
= gpio_to_irq(pdata
->phy_irq_pin
);
405 phydev
->irq
= (phy_irq
< 0) ? PHY_POLL
: phy_irq
;
408 phydev
->irq
= PHY_POLL
;
411 /* attach the mac to the phy */
412 ret
= phy_connect_direct(dev
, phydev
, &macb_handle_link_change
,
415 netdev_err(dev
, "Could not attach to PHY\n");
419 /* mask with MAC supported features */
420 if (macb_is_gem(bp
) && bp
->caps
& MACB_CAPS_GIGABIT_MODE_AVAILABLE
)
421 phydev
->supported
&= PHY_GBIT_FEATURES
;
423 phydev
->supported
&= PHY_BASIC_FEATURES
;
425 if (bp
->caps
& MACB_CAPS_NO_GIGABIT_HALF
)
426 phydev
->supported
&= ~SUPPORTED_1000baseT_Half
;
428 phydev
->advertising
= phydev
->supported
;
437 static int macb_mii_init(struct macb
*bp
)
439 struct macb_platform_data
*pdata
;
440 struct device_node
*np
;
443 /* Enable management port */
444 macb_writel(bp
, NCR
, MACB_BIT(MPE
));
446 bp
->mii_bus
= mdiobus_alloc();
452 bp
->mii_bus
->name
= "MACB_mii_bus";
453 bp
->mii_bus
->read
= &macb_mdio_read
;
454 bp
->mii_bus
->write
= &macb_mdio_write
;
455 snprintf(bp
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%s-%x",
456 bp
->pdev
->name
, bp
->pdev
->id
);
457 bp
->mii_bus
->priv
= bp
;
458 bp
->mii_bus
->parent
= &bp
->pdev
->dev
;
459 pdata
= dev_get_platdata(&bp
->pdev
->dev
);
461 dev_set_drvdata(&bp
->dev
->dev
, bp
->mii_bus
);
463 np
= bp
->pdev
->dev
.of_node
;
465 /* try dt phy registration */
466 err
= of_mdiobus_register(bp
->mii_bus
, np
);
468 /* fallback to standard phy registration if no phy were
469 * found during dt phy registration
471 if (!err
&& !phy_find_first(bp
->mii_bus
)) {
472 for (i
= 0; i
< PHY_MAX_ADDR
; i
++) {
473 struct phy_device
*phydev
;
475 phydev
= mdiobus_scan(bp
->mii_bus
, i
);
476 if (IS_ERR(phydev
) &&
477 PTR_ERR(phydev
) != -ENODEV
) {
478 err
= PTR_ERR(phydev
);
484 goto err_out_unregister_bus
;
487 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
488 bp
->mii_bus
->irq
[i
] = PHY_POLL
;
491 bp
->mii_bus
->phy_mask
= pdata
->phy_mask
;
493 err
= mdiobus_register(bp
->mii_bus
);
497 goto err_out_free_mdiobus
;
499 err
= macb_mii_probe(bp
->dev
);
501 goto err_out_unregister_bus
;
505 err_out_unregister_bus
:
506 mdiobus_unregister(bp
->mii_bus
);
507 err_out_free_mdiobus
:
508 mdiobus_free(bp
->mii_bus
);
513 static void macb_update_stats(struct macb
*bp
)
515 u32
*p
= &bp
->hw_stats
.macb
.rx_pause_frames
;
516 u32
*end
= &bp
->hw_stats
.macb
.tx_pause_frames
+ 1;
517 int offset
= MACB_PFR
;
519 WARN_ON((unsigned long)(end
- p
- 1) != (MACB_TPF
- MACB_PFR
) / 4);
521 for (; p
< end
; p
++, offset
+= 4)
522 *p
+= bp
->macb_reg_readl(bp
, offset
);
525 static int macb_halt_tx(struct macb
*bp
)
527 unsigned long halt_time
, timeout
;
530 macb_writel(bp
, NCR
, macb_readl(bp
, NCR
) | MACB_BIT(THALT
));
532 timeout
= jiffies
+ usecs_to_jiffies(MACB_HALT_TIMEOUT
);
535 status
= macb_readl(bp
, TSR
);
536 if (!(status
& MACB_BIT(TGO
)))
539 usleep_range(10, 250);
540 } while (time_before(halt_time
, timeout
));
545 static void macb_tx_unmap(struct macb
*bp
, struct macb_tx_skb
*tx_skb
)
547 if (tx_skb
->mapping
) {
548 if (tx_skb
->mapped_as_page
)
549 dma_unmap_page(&bp
->pdev
->dev
, tx_skb
->mapping
,
550 tx_skb
->size
, DMA_TO_DEVICE
);
552 dma_unmap_single(&bp
->pdev
->dev
, tx_skb
->mapping
,
553 tx_skb
->size
, DMA_TO_DEVICE
);
558 dev_kfree_skb_any(tx_skb
->skb
);
563 static inline void macb_set_addr(struct macb_dma_desc
*desc
, dma_addr_t addr
)
565 desc
->addr
= (u32
)addr
;
566 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
567 desc
->addrh
= (u32
)(addr
>> 32);
571 static void macb_tx_error_task(struct work_struct
*work
)
573 struct macb_queue
*queue
= container_of(work
, struct macb_queue
,
575 struct macb
*bp
= queue
->bp
;
576 struct macb_tx_skb
*tx_skb
;
577 struct macb_dma_desc
*desc
;
582 netdev_vdbg(bp
->dev
, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
583 (unsigned int)(queue
- bp
->queues
),
584 queue
->tx_tail
, queue
->tx_head
);
586 /* Prevent the queue IRQ handlers from running: each of them may call
587 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
588 * As explained below, we have to halt the transmission before updating
589 * TBQP registers so we call netif_tx_stop_all_queues() to notify the
590 * network engine about the macb/gem being halted.
592 spin_lock_irqsave(&bp
->lock
, flags
);
594 /* Make sure nobody is trying to queue up new packets */
595 netif_tx_stop_all_queues(bp
->dev
);
597 /* Stop transmission now
598 * (in case we have just queued new packets)
599 * macb/gem must be halted to write TBQP register
601 if (macb_halt_tx(bp
))
602 /* Just complain for now, reinitializing TX path can be good */
603 netdev_err(bp
->dev
, "BUG: halt tx timed out\n");
605 /* Treat frames in TX queue including the ones that caused the error.
606 * Free transmit buffers in upper layer.
608 for (tail
= queue
->tx_tail
; tail
!= queue
->tx_head
; tail
++) {
611 desc
= macb_tx_desc(queue
, tail
);
613 tx_skb
= macb_tx_skb(queue
, tail
);
616 if (ctrl
& MACB_BIT(TX_USED
)) {
617 /* skb is set for the last buffer of the frame */
619 macb_tx_unmap(bp
, tx_skb
);
621 tx_skb
= macb_tx_skb(queue
, tail
);
625 /* ctrl still refers to the first buffer descriptor
626 * since it's the only one written back by the hardware
628 if (!(ctrl
& MACB_BIT(TX_BUF_EXHAUSTED
))) {
629 netdev_vdbg(bp
->dev
, "txerr skb %u (data %p) TX complete\n",
630 macb_tx_ring_wrap(bp
, tail
),
632 bp
->stats
.tx_packets
++;
633 bp
->stats
.tx_bytes
+= skb
->len
;
636 /* "Buffers exhausted mid-frame" errors may only happen
637 * if the driver is buggy, so complain loudly about
638 * those. Statistics are updated by hardware.
640 if (ctrl
& MACB_BIT(TX_BUF_EXHAUSTED
))
642 "BUG: TX buffers exhausted mid-frame\n");
644 desc
->ctrl
= ctrl
| MACB_BIT(TX_USED
);
647 macb_tx_unmap(bp
, tx_skb
);
650 /* Set end of TX queue */
651 desc
= macb_tx_desc(queue
, 0);
652 macb_set_addr(desc
, 0);
653 desc
->ctrl
= MACB_BIT(TX_USED
);
655 /* Make descriptor updates visible to hardware */
658 /* Reinitialize the TX desc queue */
659 queue_writel(queue
, TBQP
, (u32
)(queue
->tx_ring_dma
));
660 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
661 queue_writel(queue
, TBQPH
, (u32
)(queue
->tx_ring_dma
>> 32));
663 /* Make TX ring reflect state of hardware */
667 /* Housework before enabling TX IRQ */
668 macb_writel(bp
, TSR
, macb_readl(bp
, TSR
));
669 queue_writel(queue
, IER
, MACB_TX_INT_FLAGS
);
671 /* Now we are ready to start transmission again */
672 netif_tx_start_all_queues(bp
->dev
);
673 macb_writel(bp
, NCR
, macb_readl(bp
, NCR
) | MACB_BIT(TSTART
));
675 spin_unlock_irqrestore(&bp
->lock
, flags
);
678 static void macb_tx_interrupt(struct macb_queue
*queue
)
683 struct macb
*bp
= queue
->bp
;
684 u16 queue_index
= queue
- bp
->queues
;
686 status
= macb_readl(bp
, TSR
);
687 macb_writel(bp
, TSR
, status
);
689 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
690 queue_writel(queue
, ISR
, MACB_BIT(TCOMP
));
692 netdev_vdbg(bp
->dev
, "macb_tx_interrupt status = 0x%03lx\n",
693 (unsigned long)status
);
695 head
= queue
->tx_head
;
696 for (tail
= queue
->tx_tail
; tail
!= head
; tail
++) {
697 struct macb_tx_skb
*tx_skb
;
699 struct macb_dma_desc
*desc
;
702 desc
= macb_tx_desc(queue
, tail
);
704 /* Make hw descriptor updates visible to CPU */
709 /* TX_USED bit is only set by hardware on the very first buffer
710 * descriptor of the transmitted frame.
712 if (!(ctrl
& MACB_BIT(TX_USED
)))
715 /* Process all buffers of the current transmitted frame */
717 tx_skb
= macb_tx_skb(queue
, tail
);
720 /* First, update TX stats if needed */
722 netdev_vdbg(bp
->dev
, "skb %u (data %p) TX complete\n",
723 macb_tx_ring_wrap(bp
, tail
),
725 bp
->stats
.tx_packets
++;
726 bp
->stats
.tx_bytes
+= skb
->len
;
729 /* Now we can safely release resources */
730 macb_tx_unmap(bp
, tx_skb
);
732 /* skb is set only for the last buffer of the frame.
733 * WARNING: at this point skb has been freed by
741 queue
->tx_tail
= tail
;
742 if (__netif_subqueue_stopped(bp
->dev
, queue_index
) &&
743 CIRC_CNT(queue
->tx_head
, queue
->tx_tail
,
744 bp
->tx_ring_size
) <= MACB_TX_WAKEUP_THRESH(bp
))
745 netif_wake_subqueue(bp
->dev
, queue_index
);
748 static void gem_rx_refill(struct macb
*bp
)
754 while (CIRC_SPACE(bp
->rx_prepared_head
, bp
->rx_tail
,
755 bp
->rx_ring_size
) > 0) {
756 entry
= macb_rx_ring_wrap(bp
, bp
->rx_prepared_head
);
758 /* Make hw descriptor updates visible to CPU */
761 bp
->rx_prepared_head
++;
763 if (!bp
->rx_skbuff
[entry
]) {
764 /* allocate sk_buff for this free entry in ring */
765 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buffer_size
);
766 if (unlikely(!skb
)) {
768 "Unable to allocate sk_buff\n");
772 /* now fill corresponding descriptor entry */
773 paddr
= dma_map_single(&bp
->pdev
->dev
, skb
->data
,
776 if (dma_mapping_error(&bp
->pdev
->dev
, paddr
)) {
781 bp
->rx_skbuff
[entry
] = skb
;
783 if (entry
== bp
->rx_ring_size
- 1)
784 paddr
|= MACB_BIT(RX_WRAP
);
785 macb_set_addr(&(bp
->rx_ring
[entry
]), paddr
);
786 bp
->rx_ring
[entry
].ctrl
= 0;
788 /* properly align Ethernet header */
789 skb_reserve(skb
, NET_IP_ALIGN
);
791 bp
->rx_ring
[entry
].addr
&= ~MACB_BIT(RX_USED
);
792 bp
->rx_ring
[entry
].ctrl
= 0;
796 /* Make descriptor updates visible to hardware */
799 netdev_vdbg(bp
->dev
, "rx ring: prepared head %d, tail %d\n",
800 bp
->rx_prepared_head
, bp
->rx_tail
);
803 /* Mark DMA descriptors from begin up to and not including end as unused */
804 static void discard_partial_frame(struct macb
*bp
, unsigned int begin
,
809 for (frag
= begin
; frag
!= end
; frag
++) {
810 struct macb_dma_desc
*desc
= macb_rx_desc(bp
, frag
);
812 desc
->addr
&= ~MACB_BIT(RX_USED
);
815 /* Make descriptor updates visible to hardware */
818 /* When this happens, the hardware stats registers for
819 * whatever caused this is updated, so we don't have to record
824 static int gem_rx(struct macb
*bp
, int budget
)
829 struct macb_dma_desc
*desc
;
832 while (count
< budget
) {
837 entry
= macb_rx_ring_wrap(bp
, bp
->rx_tail
);
838 desc
= &bp
->rx_ring
[entry
];
840 /* Make hw descriptor updates visible to CPU */
843 rxused
= (desc
->addr
& MACB_BIT(RX_USED
)) ? true : false;
844 addr
= MACB_BF(RX_WADDR
, MACB_BFEXT(RX_WADDR
, desc
->addr
));
845 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
846 addr
|= ((u64
)(desc
->addrh
) << 32);
856 if (!(ctrl
& MACB_BIT(RX_SOF
) && ctrl
& MACB_BIT(RX_EOF
))) {
858 "not whole frame pointed by descriptor\n");
859 bp
->stats
.rx_dropped
++;
862 skb
= bp
->rx_skbuff
[entry
];
863 if (unlikely(!skb
)) {
865 "inconsistent Rx descriptor chain\n");
866 bp
->stats
.rx_dropped
++;
869 /* now everything is ready for receiving packet */
870 bp
->rx_skbuff
[entry
] = NULL
;
871 len
= ctrl
& bp
->rx_frm_len_mask
;
873 netdev_vdbg(bp
->dev
, "gem_rx %u (len %u)\n", entry
, len
);
876 dma_unmap_single(&bp
->pdev
->dev
, addr
,
877 bp
->rx_buffer_size
, DMA_FROM_DEVICE
);
879 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
880 skb_checksum_none_assert(skb
);
881 if (bp
->dev
->features
& NETIF_F_RXCSUM
&&
882 !(bp
->dev
->flags
& IFF_PROMISC
) &&
883 GEM_BFEXT(RX_CSUM
, ctrl
) & GEM_RX_CSUM_CHECKED_MASK
)
884 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
886 bp
->stats
.rx_packets
++;
887 bp
->stats
.rx_bytes
+= skb
->len
;
889 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
890 netdev_vdbg(bp
->dev
, "received skb of length %u, csum: %08x\n",
891 skb
->len
, skb
->csum
);
892 print_hex_dump(KERN_DEBUG
, " mac: ", DUMP_PREFIX_ADDRESS
, 16, 1,
893 skb_mac_header(skb
), 16, true);
894 print_hex_dump(KERN_DEBUG
, "data: ", DUMP_PREFIX_ADDRESS
, 16, 1,
895 skb
->data
, 32, true);
898 netif_receive_skb(skb
);
906 static int macb_rx_frame(struct macb
*bp
, unsigned int first_frag
,
907 unsigned int last_frag
)
913 struct macb_dma_desc
*desc
;
915 desc
= macb_rx_desc(bp
, last_frag
);
916 len
= desc
->ctrl
& bp
->rx_frm_len_mask
;
918 netdev_vdbg(bp
->dev
, "macb_rx_frame frags %u - %u (len %u)\n",
919 macb_rx_ring_wrap(bp
, first_frag
),
920 macb_rx_ring_wrap(bp
, last_frag
), len
);
922 /* The ethernet header starts NET_IP_ALIGN bytes into the
923 * first buffer. Since the header is 14 bytes, this makes the
924 * payload word-aligned.
926 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
927 * the two padding bytes into the skb so that we avoid hitting
928 * the slowpath in memcpy(), and pull them off afterwards.
930 skb
= netdev_alloc_skb(bp
->dev
, len
+ NET_IP_ALIGN
);
932 bp
->stats
.rx_dropped
++;
933 for (frag
= first_frag
; ; frag
++) {
934 desc
= macb_rx_desc(bp
, frag
);
935 desc
->addr
&= ~MACB_BIT(RX_USED
);
936 if (frag
== last_frag
)
940 /* Make descriptor updates visible to hardware */
948 skb_checksum_none_assert(skb
);
951 for (frag
= first_frag
; ; frag
++) {
952 unsigned int frag_len
= bp
->rx_buffer_size
;
954 if (offset
+ frag_len
> len
) {
955 if (unlikely(frag
!= last_frag
)) {
956 dev_kfree_skb_any(skb
);
959 frag_len
= len
- offset
;
961 skb_copy_to_linear_data_offset(skb
, offset
,
962 macb_rx_buffer(bp
, frag
),
964 offset
+= bp
->rx_buffer_size
;
965 desc
= macb_rx_desc(bp
, frag
);
966 desc
->addr
&= ~MACB_BIT(RX_USED
);
968 if (frag
== last_frag
)
972 /* Make descriptor updates visible to hardware */
975 __skb_pull(skb
, NET_IP_ALIGN
);
976 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
978 bp
->stats
.rx_packets
++;
979 bp
->stats
.rx_bytes
+= skb
->len
;
980 netdev_vdbg(bp
->dev
, "received skb of length %u, csum: %08x\n",
981 skb
->len
, skb
->csum
);
982 netif_receive_skb(skb
);
987 static inline void macb_init_rx_ring(struct macb
*bp
)
992 addr
= bp
->rx_buffers_dma
;
993 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
994 bp
->rx_ring
[i
].addr
= addr
;
995 bp
->rx_ring
[i
].ctrl
= 0;
996 addr
+= bp
->rx_buffer_size
;
998 bp
->rx_ring
[bp
->rx_ring_size
- 1].addr
|= MACB_BIT(RX_WRAP
);
1002 static int macb_rx(struct macb
*bp
, int budget
)
1004 bool reset_rx_queue
= false;
1007 int first_frag
= -1;
1009 for (tail
= bp
->rx_tail
; budget
> 0; tail
++) {
1010 struct macb_dma_desc
*desc
= macb_rx_desc(bp
, tail
);
1013 /* Make hw descriptor updates visible to CPU */
1019 if (!(addr
& MACB_BIT(RX_USED
)))
1022 if (ctrl
& MACB_BIT(RX_SOF
)) {
1023 if (first_frag
!= -1)
1024 discard_partial_frame(bp
, first_frag
, tail
);
1028 if (ctrl
& MACB_BIT(RX_EOF
)) {
1031 if (unlikely(first_frag
== -1)) {
1032 reset_rx_queue
= true;
1036 dropped
= macb_rx_frame(bp
, first_frag
, tail
);
1038 if (unlikely(dropped
< 0)) {
1039 reset_rx_queue
= true;
1049 if (unlikely(reset_rx_queue
)) {
1050 unsigned long flags
;
1053 netdev_err(bp
->dev
, "RX queue corruption: reset it\n");
1055 spin_lock_irqsave(&bp
->lock
, flags
);
1057 ctrl
= macb_readl(bp
, NCR
);
1058 macb_writel(bp
, NCR
, ctrl
& ~MACB_BIT(RE
));
1060 macb_init_rx_ring(bp
);
1061 macb_writel(bp
, RBQP
, bp
->rx_ring_dma
);
1063 macb_writel(bp
, NCR
, ctrl
| MACB_BIT(RE
));
1065 spin_unlock_irqrestore(&bp
->lock
, flags
);
1069 if (first_frag
!= -1)
1070 bp
->rx_tail
= first_frag
;
1077 static int macb_poll(struct napi_struct
*napi
, int budget
)
1079 struct macb
*bp
= container_of(napi
, struct macb
, napi
);
1083 status
= macb_readl(bp
, RSR
);
1084 macb_writel(bp
, RSR
, status
);
1088 netdev_vdbg(bp
->dev
, "poll: status = %08lx, budget = %d\n",
1089 (unsigned long)status
, budget
);
1091 work_done
= bp
->macbgem_ops
.mog_rx(bp
, budget
);
1092 if (work_done
< budget
) {
1093 napi_complete(napi
);
1095 /* Packets received while interrupts were disabled */
1096 status
= macb_readl(bp
, RSR
);
1098 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1099 macb_writel(bp
, ISR
, MACB_BIT(RCOMP
));
1100 napi_reschedule(napi
);
1102 macb_writel(bp
, IER
, MACB_RX_INT_FLAGS
);
1106 /* TODO: Handle errors */
1111 static irqreturn_t
macb_interrupt(int irq
, void *dev_id
)
1113 struct macb_queue
*queue
= dev_id
;
1114 struct macb
*bp
= queue
->bp
;
1115 struct net_device
*dev
= bp
->dev
;
1118 status
= queue_readl(queue
, ISR
);
1120 if (unlikely(!status
))
1123 spin_lock(&bp
->lock
);
1126 /* close possible race with dev_close */
1127 if (unlikely(!netif_running(dev
))) {
1128 queue_writel(queue
, IDR
, -1);
1129 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1130 queue_writel(queue
, ISR
, -1);
1134 netdev_vdbg(bp
->dev
, "queue = %u, isr = 0x%08lx\n",
1135 (unsigned int)(queue
- bp
->queues
),
1136 (unsigned long)status
);
1138 if (status
& MACB_RX_INT_FLAGS
) {
1139 /* There's no point taking any more interrupts
1140 * until we have processed the buffers. The
1141 * scheduling call may fail if the poll routine
1142 * is already scheduled, so disable interrupts
1145 queue_writel(queue
, IDR
, MACB_RX_INT_FLAGS
);
1146 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1147 queue_writel(queue
, ISR
, MACB_BIT(RCOMP
));
1149 if (napi_schedule_prep(&bp
->napi
)) {
1150 netdev_vdbg(bp
->dev
, "scheduling RX softirq\n");
1151 __napi_schedule(&bp
->napi
);
1155 if (unlikely(status
& (MACB_TX_ERR_FLAGS
))) {
1156 queue_writel(queue
, IDR
, MACB_TX_INT_FLAGS
);
1157 schedule_work(&queue
->tx_error_task
);
1159 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1160 queue_writel(queue
, ISR
, MACB_TX_ERR_FLAGS
);
1165 if (status
& MACB_BIT(TCOMP
))
1166 macb_tx_interrupt(queue
);
1168 /* Link change detection isn't possible with RMII, so we'll
1169 * add that if/when we get our hands on a full-blown MII PHY.
1172 /* There is a hardware issue under heavy load where DMA can
1173 * stop, this causes endless "used buffer descriptor read"
1174 * interrupts but it can be cleared by re-enabling RX. See
1175 * the at91 manual, section 41.3.1 or the Zynq manual
1176 * section 16.7.4 for details.
1178 if (status
& MACB_BIT(RXUBR
)) {
1179 ctrl
= macb_readl(bp
, NCR
);
1180 macb_writel(bp
, NCR
, ctrl
& ~MACB_BIT(RE
));
1182 macb_writel(bp
, NCR
, ctrl
| MACB_BIT(RE
));
1184 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1185 queue_writel(queue
, ISR
, MACB_BIT(RXUBR
));
1188 if (status
& MACB_BIT(ISR_ROVR
)) {
1189 /* We missed at least one packet */
1190 if (macb_is_gem(bp
))
1191 bp
->hw_stats
.gem
.rx_overruns
++;
1193 bp
->hw_stats
.macb
.rx_overruns
++;
1195 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1196 queue_writel(queue
, ISR
, MACB_BIT(ISR_ROVR
));
1199 if (status
& MACB_BIT(HRESP
)) {
1200 /* TODO: Reset the hardware, and maybe move the
1201 * netdev_err to a lower-priority context as well
1204 netdev_err(dev
, "DMA bus error: HRESP not OK\n");
1206 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1207 queue_writel(queue
, ISR
, MACB_BIT(HRESP
));
1210 status
= queue_readl(queue
, ISR
);
1213 spin_unlock(&bp
->lock
);
1218 #ifdef CONFIG_NET_POLL_CONTROLLER
1219 /* Polling receive - used by netconsole and other diagnostic tools
1220 * to allow network i/o with interrupts disabled.
1222 static void macb_poll_controller(struct net_device
*dev
)
1224 struct macb
*bp
= netdev_priv(dev
);
1225 struct macb_queue
*queue
;
1226 unsigned long flags
;
1229 local_irq_save(flags
);
1230 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
)
1231 macb_interrupt(dev
->irq
, queue
);
1232 local_irq_restore(flags
);
1236 static unsigned int macb_tx_map(struct macb
*bp
,
1237 struct macb_queue
*queue
,
1238 struct sk_buff
*skb
,
1239 unsigned int hdrlen
)
1242 unsigned int len
, entry
, i
, tx_head
= queue
->tx_head
;
1243 struct macb_tx_skb
*tx_skb
= NULL
;
1244 struct macb_dma_desc
*desc
;
1245 unsigned int offset
, size
, count
= 0;
1246 unsigned int f
, nr_frags
= skb_shinfo(skb
)->nr_frags
;
1247 unsigned int eof
= 1, mss_mfs
= 0;
1248 u32 ctrl
, lso_ctrl
= 0, seq_ctrl
= 0;
1251 if (skb_shinfo(skb
)->gso_size
!= 0) {
1252 if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
)
1254 lso_ctrl
= MACB_LSO_UFO_ENABLE
;
1257 lso_ctrl
= MACB_LSO_TSO_ENABLE
;
1260 /* First, map non-paged data */
1261 len
= skb_headlen(skb
);
1263 /* first buffer length */
1268 entry
= macb_tx_ring_wrap(bp
, tx_head
);
1269 tx_skb
= &queue
->tx_skb
[entry
];
1271 mapping
= dma_map_single(&bp
->pdev
->dev
,
1273 size
, DMA_TO_DEVICE
);
1274 if (dma_mapping_error(&bp
->pdev
->dev
, mapping
))
1277 /* Save info to properly release resources */
1279 tx_skb
->mapping
= mapping
;
1280 tx_skb
->size
= size
;
1281 tx_skb
->mapped_as_page
= false;
1288 size
= min(len
, bp
->max_tx_length
);
1291 /* Then, map paged data from fragments */
1292 for (f
= 0; f
< nr_frags
; f
++) {
1293 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[f
];
1295 len
= skb_frag_size(frag
);
1298 size
= min(len
, bp
->max_tx_length
);
1299 entry
= macb_tx_ring_wrap(bp
, tx_head
);
1300 tx_skb
= &queue
->tx_skb
[entry
];
1302 mapping
= skb_frag_dma_map(&bp
->pdev
->dev
, frag
,
1303 offset
, size
, DMA_TO_DEVICE
);
1304 if (dma_mapping_error(&bp
->pdev
->dev
, mapping
))
1307 /* Save info to properly release resources */
1309 tx_skb
->mapping
= mapping
;
1310 tx_skb
->size
= size
;
1311 tx_skb
->mapped_as_page
= true;
1320 /* Should never happen */
1321 if (unlikely(!tx_skb
)) {
1322 netdev_err(bp
->dev
, "BUG! empty skb!\n");
1326 /* This is the last buffer of the frame: save socket buffer */
1329 /* Update TX ring: update buffer descriptors in reverse order
1330 * to avoid race condition
1333 /* Set 'TX_USED' bit in buffer descriptor at tx_head position
1334 * to set the end of TX queue
1337 entry
= macb_tx_ring_wrap(bp
, i
);
1338 ctrl
= MACB_BIT(TX_USED
);
1339 desc
= &queue
->tx_ring
[entry
];
1343 if (lso_ctrl
== MACB_LSO_UFO_ENABLE
)
1344 /* include header and FCS in value given to h/w */
1345 mss_mfs
= skb_shinfo(skb
)->gso_size
+
1346 skb_transport_offset(skb
) +
1349 mss_mfs
= skb_shinfo(skb
)->gso_size
;
1350 /* TCP Sequence Number Source Select
1351 * can be set only for TSO
1359 entry
= macb_tx_ring_wrap(bp
, i
);
1360 tx_skb
= &queue
->tx_skb
[entry
];
1361 desc
= &queue
->tx_ring
[entry
];
1363 ctrl
= (u32
)tx_skb
->size
;
1365 ctrl
|= MACB_BIT(TX_LAST
);
1368 if (unlikely(entry
== (bp
->tx_ring_size
- 1)))
1369 ctrl
|= MACB_BIT(TX_WRAP
);
1371 /* First descriptor is header descriptor */
1372 if (i
== queue
->tx_head
) {
1373 ctrl
|= MACB_BF(TX_LSO
, lso_ctrl
);
1374 ctrl
|= MACB_BF(TX_TCP_SEQ_SRC
, seq_ctrl
);
1376 /* Only set MSS/MFS on payload descriptors
1377 * (second or later descriptor)
1379 ctrl
|= MACB_BF(MSS_MFS
, mss_mfs
);
1381 /* Set TX buffer descriptor */
1382 macb_set_addr(desc
, tx_skb
->mapping
);
1383 /* desc->addr must be visible to hardware before clearing
1384 * 'TX_USED' bit in desc->ctrl.
1388 } while (i
!= queue
->tx_head
);
1390 queue
->tx_head
= tx_head
;
1395 netdev_err(bp
->dev
, "TX DMA map failed\n");
1397 for (i
= queue
->tx_head
; i
!= tx_head
; i
++) {
1398 tx_skb
= macb_tx_skb(queue
, i
);
1400 macb_tx_unmap(bp
, tx_skb
);
1406 static netdev_features_t
macb_features_check(struct sk_buff
*skb
,
1407 struct net_device
*dev
,
1408 netdev_features_t features
)
1410 unsigned int nr_frags
, f
;
1411 unsigned int hdrlen
;
1413 /* Validate LSO compatibility */
1415 /* there is only one buffer */
1416 if (!skb_is_nonlinear(skb
))
1419 /* length of header */
1420 hdrlen
= skb_transport_offset(skb
);
1421 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
1422 hdrlen
+= tcp_hdrlen(skb
);
1425 * When software supplies two or more payload buffers all payload buffers
1426 * apart from the last must be a multiple of 8 bytes in size.
1428 if (!IS_ALIGNED(skb_headlen(skb
) - hdrlen
, MACB_TX_LEN_ALIGN
))
1429 return features
& ~MACB_NETIF_LSO
;
1431 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1432 /* No need to check last fragment */
1434 for (f
= 0; f
< nr_frags
; f
++) {
1435 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[f
];
1437 if (!IS_ALIGNED(skb_frag_size(frag
), MACB_TX_LEN_ALIGN
))
1438 return features
& ~MACB_NETIF_LSO
;
1443 static inline int macb_clear_csum(struct sk_buff
*skb
)
1445 /* no change for packets without checksum offloading */
1446 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1449 /* make sure we can modify the header */
1450 if (unlikely(skb_cow_head(skb
, 0)))
1453 /* initialize checksum field
1454 * This is required - at least for Zynq, which otherwise calculates
1455 * wrong UDP header checksums for UDP packets with UDP data len <=2
1457 *(__sum16
*)(skb_checksum_start(skb
) + skb
->csum_offset
) = 0;
1461 static int macb_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1463 u16 queue_index
= skb_get_queue_mapping(skb
);
1464 struct macb
*bp
= netdev_priv(dev
);
1465 struct macb_queue
*queue
= &bp
->queues
[queue_index
];
1466 unsigned long flags
;
1467 unsigned int desc_cnt
, nr_frags
, frag_size
, f
;
1468 unsigned int hdrlen
;
1469 bool is_lso
, is_udp
= 0;
1471 is_lso
= (skb_shinfo(skb
)->gso_size
!= 0);
1474 is_udp
= !!(ip_hdr(skb
)->protocol
== IPPROTO_UDP
);
1476 /* length of headers */
1478 /* only queue eth + ip headers separately for UDP */
1479 hdrlen
= skb_transport_offset(skb
);
1481 hdrlen
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
1482 if (skb_headlen(skb
) < hdrlen
) {
1483 netdev_err(bp
->dev
, "Error - LSO headers fragmented!!!\n");
1484 /* if this is required, would need to copy to single buffer */
1485 return NETDEV_TX_BUSY
;
1488 hdrlen
= min(skb_headlen(skb
), bp
->max_tx_length
);
1490 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
1491 netdev_vdbg(bp
->dev
,
1492 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1493 queue_index
, skb
->len
, skb
->head
, skb
->data
,
1494 skb_tail_pointer(skb
), skb_end_pointer(skb
));
1495 print_hex_dump(KERN_DEBUG
, "data: ", DUMP_PREFIX_OFFSET
, 16, 1,
1496 skb
->data
, 16, true);
1499 /* Count how many TX buffer descriptors are needed to send this
1500 * socket buffer: skb fragments of jumbo frames may need to be
1501 * split into many buffer descriptors.
1503 if (is_lso
&& (skb_headlen(skb
) > hdrlen
))
1504 /* extra header descriptor if also payload in first buffer */
1505 desc_cnt
= DIV_ROUND_UP((skb_headlen(skb
) - hdrlen
), bp
->max_tx_length
) + 1;
1507 desc_cnt
= DIV_ROUND_UP(skb_headlen(skb
), bp
->max_tx_length
);
1508 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1509 for (f
= 0; f
< nr_frags
; f
++) {
1510 frag_size
= skb_frag_size(&skb_shinfo(skb
)->frags
[f
]);
1511 desc_cnt
+= DIV_ROUND_UP(frag_size
, bp
->max_tx_length
);
1514 spin_lock_irqsave(&bp
->lock
, flags
);
1516 /* This is a hard error, log it. */
1517 if (CIRC_SPACE(queue
->tx_head
, queue
->tx_tail
,
1518 bp
->tx_ring_size
) < desc_cnt
) {
1519 netif_stop_subqueue(dev
, queue_index
);
1520 spin_unlock_irqrestore(&bp
->lock
, flags
);
1521 netdev_dbg(bp
->dev
, "tx_head = %u, tx_tail = %u\n",
1522 queue
->tx_head
, queue
->tx_tail
);
1523 return NETDEV_TX_BUSY
;
1526 if (macb_clear_csum(skb
)) {
1527 dev_kfree_skb_any(skb
);
1531 /* Map socket buffer for DMA transfer */
1532 if (!macb_tx_map(bp
, queue
, skb
, hdrlen
)) {
1533 dev_kfree_skb_any(skb
);
1537 /* Make newly initialized descriptor visible to hardware */
1540 skb_tx_timestamp(skb
);
1542 macb_writel(bp
, NCR
, macb_readl(bp
, NCR
) | MACB_BIT(TSTART
));
1544 if (CIRC_SPACE(queue
->tx_head
, queue
->tx_tail
, bp
->tx_ring_size
) < 1)
1545 netif_stop_subqueue(dev
, queue_index
);
1548 spin_unlock_irqrestore(&bp
->lock
, flags
);
1550 return NETDEV_TX_OK
;
1553 static void macb_init_rx_buffer_size(struct macb
*bp
, size_t size
)
1555 if (!macb_is_gem(bp
)) {
1556 bp
->rx_buffer_size
= MACB_RX_BUFFER_SIZE
;
1558 bp
->rx_buffer_size
= size
;
1560 if (bp
->rx_buffer_size
% RX_BUFFER_MULTIPLE
) {
1562 "RX buffer must be multiple of %d bytes, expanding\n",
1563 RX_BUFFER_MULTIPLE
);
1564 bp
->rx_buffer_size
=
1565 roundup(bp
->rx_buffer_size
, RX_BUFFER_MULTIPLE
);
1569 netdev_dbg(bp
->dev
, "mtu [%u] rx_buffer_size [%Zu]\n",
1570 bp
->dev
->mtu
, bp
->rx_buffer_size
);
1573 static void gem_free_rx_buffers(struct macb
*bp
)
1575 struct sk_buff
*skb
;
1576 struct macb_dma_desc
*desc
;
1583 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
1584 skb
= bp
->rx_skbuff
[i
];
1589 desc
= &bp
->rx_ring
[i
];
1590 addr
= MACB_BF(RX_WADDR
, MACB_BFEXT(RX_WADDR
, desc
->addr
));
1591 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1592 addr
|= ((u64
)(desc
->addrh
) << 32);
1594 dma_unmap_single(&bp
->pdev
->dev
, addr
, bp
->rx_buffer_size
,
1596 dev_kfree_skb_any(skb
);
1600 kfree(bp
->rx_skbuff
);
1601 bp
->rx_skbuff
= NULL
;
1604 static void macb_free_rx_buffers(struct macb
*bp
)
1606 if (bp
->rx_buffers
) {
1607 dma_free_coherent(&bp
->pdev
->dev
,
1608 bp
->rx_ring_size
* bp
->rx_buffer_size
,
1609 bp
->rx_buffers
, bp
->rx_buffers_dma
);
1610 bp
->rx_buffers
= NULL
;
1614 static void macb_free_consistent(struct macb
*bp
)
1616 struct macb_queue
*queue
;
1619 bp
->macbgem_ops
.mog_free_rx_buffers(bp
);
1621 dma_free_coherent(&bp
->pdev
->dev
, RX_RING_BYTES(bp
),
1622 bp
->rx_ring
, bp
->rx_ring_dma
);
1626 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
1627 kfree(queue
->tx_skb
);
1628 queue
->tx_skb
= NULL
;
1629 if (queue
->tx_ring
) {
1630 dma_free_coherent(&bp
->pdev
->dev
, TX_RING_BYTES(bp
),
1631 queue
->tx_ring
, queue
->tx_ring_dma
);
1632 queue
->tx_ring
= NULL
;
1637 static int gem_alloc_rx_buffers(struct macb
*bp
)
1641 size
= bp
->rx_ring_size
* sizeof(struct sk_buff
*);
1642 bp
->rx_skbuff
= kzalloc(size
, GFP_KERNEL
);
1647 "Allocated %d RX struct sk_buff entries at %p\n",
1648 bp
->rx_ring_size
, bp
->rx_skbuff
);
1652 static int macb_alloc_rx_buffers(struct macb
*bp
)
1656 size
= bp
->rx_ring_size
* bp
->rx_buffer_size
;
1657 bp
->rx_buffers
= dma_alloc_coherent(&bp
->pdev
->dev
, size
,
1658 &bp
->rx_buffers_dma
, GFP_KERNEL
);
1659 if (!bp
->rx_buffers
)
1663 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
1664 size
, (unsigned long)bp
->rx_buffers_dma
, bp
->rx_buffers
);
1668 static int macb_alloc_consistent(struct macb
*bp
)
1670 struct macb_queue
*queue
;
1674 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
1675 size
= TX_RING_BYTES(bp
);
1676 queue
->tx_ring
= dma_alloc_coherent(&bp
->pdev
->dev
, size
,
1677 &queue
->tx_ring_dma
,
1679 if (!queue
->tx_ring
)
1682 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
1683 q
, size
, (unsigned long)queue
->tx_ring_dma
,
1686 size
= bp
->tx_ring_size
* sizeof(struct macb_tx_skb
);
1687 queue
->tx_skb
= kmalloc(size
, GFP_KERNEL
);
1692 size
= RX_RING_BYTES(bp
);
1693 bp
->rx_ring
= dma_alloc_coherent(&bp
->pdev
->dev
, size
,
1694 &bp
->rx_ring_dma
, GFP_KERNEL
);
1698 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
1699 size
, (unsigned long)bp
->rx_ring_dma
, bp
->rx_ring
);
1701 if (bp
->macbgem_ops
.mog_alloc_rx_buffers(bp
))
1707 macb_free_consistent(bp
);
1711 static void gem_init_rings(struct macb
*bp
)
1713 struct macb_queue
*queue
;
1717 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
1718 for (i
= 0; i
< bp
->tx_ring_size
; i
++) {
1719 queue
->tx_ring
[i
].addr
= 0;
1720 queue
->tx_ring
[i
].ctrl
= MACB_BIT(TX_USED
);
1722 queue
->tx_ring
[bp
->tx_ring_size
- 1].ctrl
|= MACB_BIT(TX_WRAP
);
1728 bp
->rx_prepared_head
= 0;
1733 static void macb_init_rings(struct macb
*bp
)
1737 macb_init_rx_ring(bp
);
1739 for (i
= 0; i
< bp
->tx_ring_size
; i
++) {
1740 bp
->queues
[0].tx_ring
[i
].addr
= 0;
1741 bp
->queues
[0].tx_ring
[i
].ctrl
= MACB_BIT(TX_USED
);
1743 bp
->queues
[0].tx_head
= 0;
1744 bp
->queues
[0].tx_tail
= 0;
1745 bp
->queues
[0].tx_ring
[bp
->tx_ring_size
- 1].ctrl
|= MACB_BIT(TX_WRAP
);
1748 static void macb_reset_hw(struct macb
*bp
)
1750 struct macb_queue
*queue
;
1753 /* Disable RX and TX (XXX: Should we halt the transmission
1756 macb_writel(bp
, NCR
, 0);
1758 /* Clear the stats registers (XXX: Update stats first?) */
1759 macb_writel(bp
, NCR
, MACB_BIT(CLRSTAT
));
1761 /* Clear all status flags */
1762 macb_writel(bp
, TSR
, -1);
1763 macb_writel(bp
, RSR
, -1);
1765 /* Disable all interrupts */
1766 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
1767 queue_writel(queue
, IDR
, -1);
1768 queue_readl(queue
, ISR
);
1769 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1770 queue_writel(queue
, ISR
, -1);
1774 static u32
gem_mdc_clk_div(struct macb
*bp
)
1777 unsigned long pclk_hz
= clk_get_rate(bp
->pclk
);
1779 if (pclk_hz
<= 20000000)
1780 config
= GEM_BF(CLK
, GEM_CLK_DIV8
);
1781 else if (pclk_hz
<= 40000000)
1782 config
= GEM_BF(CLK
, GEM_CLK_DIV16
);
1783 else if (pclk_hz
<= 80000000)
1784 config
= GEM_BF(CLK
, GEM_CLK_DIV32
);
1785 else if (pclk_hz
<= 120000000)
1786 config
= GEM_BF(CLK
, GEM_CLK_DIV48
);
1787 else if (pclk_hz
<= 160000000)
1788 config
= GEM_BF(CLK
, GEM_CLK_DIV64
);
1790 config
= GEM_BF(CLK
, GEM_CLK_DIV96
);
1795 static u32
macb_mdc_clk_div(struct macb
*bp
)
1798 unsigned long pclk_hz
;
1800 if (macb_is_gem(bp
))
1801 return gem_mdc_clk_div(bp
);
1803 pclk_hz
= clk_get_rate(bp
->pclk
);
1804 if (pclk_hz
<= 20000000)
1805 config
= MACB_BF(CLK
, MACB_CLK_DIV8
);
1806 else if (pclk_hz
<= 40000000)
1807 config
= MACB_BF(CLK
, MACB_CLK_DIV16
);
1808 else if (pclk_hz
<= 80000000)
1809 config
= MACB_BF(CLK
, MACB_CLK_DIV32
);
1811 config
= MACB_BF(CLK
, MACB_CLK_DIV64
);
1816 /* Get the DMA bus width field of the network configuration register that we
1817 * should program. We find the width from decoding the design configuration
1818 * register to find the maximum supported data bus width.
1820 static u32
macb_dbw(struct macb
*bp
)
1822 if (!macb_is_gem(bp
))
1825 switch (GEM_BFEXT(DBWDEF
, gem_readl(bp
, DCFG1
))) {
1827 return GEM_BF(DBW
, GEM_DBW128
);
1829 return GEM_BF(DBW
, GEM_DBW64
);
1832 return GEM_BF(DBW
, GEM_DBW32
);
1836 /* Configure the receive DMA engine
1837 * - use the correct receive buffer size
1838 * - set best burst length for DMA operations
1839 * (if not supported by FIFO, it will fallback to default)
1840 * - set both rx/tx packet buffers to full memory size
1841 * These are configurable parameters for GEM.
1843 static void macb_configure_dma(struct macb
*bp
)
1847 if (macb_is_gem(bp
)) {
1848 dmacfg
= gem_readl(bp
, DMACFG
) & ~GEM_BF(RXBS
, -1L);
1849 dmacfg
|= GEM_BF(RXBS
, bp
->rx_buffer_size
/ RX_BUFFER_MULTIPLE
);
1850 if (bp
->dma_burst_length
)
1851 dmacfg
= GEM_BFINS(FBLDO
, bp
->dma_burst_length
, dmacfg
);
1852 dmacfg
|= GEM_BIT(TXPBMS
) | GEM_BF(RXBMS
, -1L);
1853 dmacfg
&= ~GEM_BIT(ENDIA_PKT
);
1856 dmacfg
&= ~GEM_BIT(ENDIA_DESC
);
1858 dmacfg
|= GEM_BIT(ENDIA_DESC
); /* CPU in big endian */
1860 if (bp
->dev
->features
& NETIF_F_HW_CSUM
)
1861 dmacfg
|= GEM_BIT(TXCOEN
);
1863 dmacfg
&= ~GEM_BIT(TXCOEN
);
1865 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1866 dmacfg
|= GEM_BIT(ADDR64
);
1868 netdev_dbg(bp
->dev
, "Cadence configure DMA with 0x%08x\n",
1870 gem_writel(bp
, DMACFG
, dmacfg
);
1874 static void macb_init_hw(struct macb
*bp
)
1876 struct macb_queue
*queue
;
1882 macb_set_hwaddr(bp
);
1884 config
= macb_mdc_clk_div(bp
);
1885 if (bp
->phy_interface
== PHY_INTERFACE_MODE_SGMII
)
1886 config
|= GEM_BIT(SGMIIEN
) | GEM_BIT(PCSSEL
);
1887 config
|= MACB_BF(RBOF
, NET_IP_ALIGN
); /* Make eth data aligned */
1888 config
|= MACB_BIT(PAE
); /* PAuse Enable */
1889 config
|= MACB_BIT(DRFCS
); /* Discard Rx FCS */
1890 if (bp
->caps
& MACB_CAPS_JUMBO
)
1891 config
|= MACB_BIT(JFRAME
); /* Enable jumbo frames */
1893 config
|= MACB_BIT(BIG
); /* Receive oversized frames */
1894 if (bp
->dev
->flags
& IFF_PROMISC
)
1895 config
|= MACB_BIT(CAF
); /* Copy All Frames */
1896 else if (macb_is_gem(bp
) && bp
->dev
->features
& NETIF_F_RXCSUM
)
1897 config
|= GEM_BIT(RXCOEN
);
1898 if (!(bp
->dev
->flags
& IFF_BROADCAST
))
1899 config
|= MACB_BIT(NBC
); /* No BroadCast */
1900 config
|= macb_dbw(bp
);
1901 macb_writel(bp
, NCFGR
, config
);
1902 if ((bp
->caps
& MACB_CAPS_JUMBO
) && bp
->jumbo_max_len
)
1903 gem_writel(bp
, JML
, bp
->jumbo_max_len
);
1904 bp
->speed
= SPEED_10
;
1905 bp
->duplex
= DUPLEX_HALF
;
1906 bp
->rx_frm_len_mask
= MACB_RX_FRMLEN_MASK
;
1907 if (bp
->caps
& MACB_CAPS_JUMBO
)
1908 bp
->rx_frm_len_mask
= MACB_RX_JFRMLEN_MASK
;
1910 macb_configure_dma(bp
);
1912 /* Initialize TX and RX buffers */
1913 macb_writel(bp
, RBQP
, (u32
)(bp
->rx_ring_dma
));
1914 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1915 macb_writel(bp
, RBQPH
, (u32
)(bp
->rx_ring_dma
>> 32));
1917 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
1918 queue_writel(queue
, TBQP
, (u32
)(queue
->tx_ring_dma
));
1919 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1920 queue_writel(queue
, TBQPH
, (u32
)(queue
->tx_ring_dma
>> 32));
1923 /* Enable interrupts */
1924 queue_writel(queue
, IER
,
1930 /* Enable TX and RX */
1931 macb_writel(bp
, NCR
, MACB_BIT(RE
) | MACB_BIT(TE
) | MACB_BIT(MPE
));
1934 /* The hash address register is 64 bits long and takes up two
1935 * locations in the memory map. The least significant bits are stored
1936 * in EMAC_HSL and the most significant bits in EMAC_HSH.
1938 * The unicast hash enable and the multicast hash enable bits in the
1939 * network configuration register enable the reception of hash matched
1940 * frames. The destination address is reduced to a 6 bit index into
1941 * the 64 bit hash register using the following hash function. The
1942 * hash function is an exclusive or of every sixth bit of the
1943 * destination address.
1945 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
1946 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
1947 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
1948 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
1949 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
1950 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
1952 * da[0] represents the least significant bit of the first byte
1953 * received, that is, the multicast/unicast indicator, and da[47]
1954 * represents the most significant bit of the last byte received. If
1955 * the hash index, hi[n], points to a bit that is set in the hash
1956 * register then the frame will be matched according to whether the
1957 * frame is multicast or unicast. A multicast match will be signalled
1958 * if the multicast hash enable bit is set, da[0] is 1 and the hash
1959 * index points to a bit set in the hash register. A unicast match
1960 * will be signalled if the unicast hash enable bit is set, da[0] is 0
1961 * and the hash index points to a bit set in the hash register. To
1962 * receive all multicast frames, the hash register should be set with
1963 * all ones and the multicast hash enable bit should be set in the
1964 * network configuration register.
1967 static inline int hash_bit_value(int bitnr
, __u8
*addr
)
1969 if (addr
[bitnr
/ 8] & (1 << (bitnr
% 8)))
1974 /* Return the hash index value for the specified address. */
1975 static int hash_get_index(__u8
*addr
)
1980 for (j
= 0; j
< 6; j
++) {
1981 for (i
= 0, bitval
= 0; i
< 8; i
++)
1982 bitval
^= hash_bit_value(i
* 6 + j
, addr
);
1984 hash_index
|= (bitval
<< j
);
1990 /* Add multicast addresses to the internal multicast-hash table. */
1991 static void macb_sethashtable(struct net_device
*dev
)
1993 struct netdev_hw_addr
*ha
;
1994 unsigned long mc_filter
[2];
1996 struct macb
*bp
= netdev_priv(dev
);
2001 netdev_for_each_mc_addr(ha
, dev
) {
2002 bitnr
= hash_get_index(ha
->addr
);
2003 mc_filter
[bitnr
>> 5] |= 1 << (bitnr
& 31);
2006 macb_or_gem_writel(bp
, HRB
, mc_filter
[0]);
2007 macb_or_gem_writel(bp
, HRT
, mc_filter
[1]);
2010 /* Enable/Disable promiscuous and multicast modes. */
2011 static void macb_set_rx_mode(struct net_device
*dev
)
2014 struct macb
*bp
= netdev_priv(dev
);
2016 cfg
= macb_readl(bp
, NCFGR
);
2018 if (dev
->flags
& IFF_PROMISC
) {
2019 /* Enable promiscuous mode */
2020 cfg
|= MACB_BIT(CAF
);
2022 /* Disable RX checksum offload */
2023 if (macb_is_gem(bp
))
2024 cfg
&= ~GEM_BIT(RXCOEN
);
2026 /* Disable promiscuous mode */
2027 cfg
&= ~MACB_BIT(CAF
);
2029 /* Enable RX checksum offload only if requested */
2030 if (macb_is_gem(bp
) && dev
->features
& NETIF_F_RXCSUM
)
2031 cfg
|= GEM_BIT(RXCOEN
);
2034 if (dev
->flags
& IFF_ALLMULTI
) {
2035 /* Enable all multicast mode */
2036 macb_or_gem_writel(bp
, HRB
, -1);
2037 macb_or_gem_writel(bp
, HRT
, -1);
2038 cfg
|= MACB_BIT(NCFGR_MTI
);
2039 } else if (!netdev_mc_empty(dev
)) {
2040 /* Enable specific multicasts */
2041 macb_sethashtable(dev
);
2042 cfg
|= MACB_BIT(NCFGR_MTI
);
2043 } else if (dev
->flags
& (~IFF_ALLMULTI
)) {
2044 /* Disable all multicast mode */
2045 macb_or_gem_writel(bp
, HRB
, 0);
2046 macb_or_gem_writel(bp
, HRT
, 0);
2047 cfg
&= ~MACB_BIT(NCFGR_MTI
);
2050 macb_writel(bp
, NCFGR
, cfg
);
2053 static int macb_open(struct net_device
*dev
)
2055 struct macb
*bp
= netdev_priv(dev
);
2056 size_t bufsz
= dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ NET_IP_ALIGN
;
2059 netdev_dbg(bp
->dev
, "open\n");
2061 /* carrier starts down */
2062 netif_carrier_off(dev
);
2064 /* if the phy is not yet register, retry later*/
2068 /* RX buffers initialization */
2069 macb_init_rx_buffer_size(bp
, bufsz
);
2071 err
= macb_alloc_consistent(bp
);
2073 netdev_err(dev
, "Unable to allocate DMA memory (error %d)\n",
2078 napi_enable(&bp
->napi
);
2080 bp
->macbgem_ops
.mog_init_rings(bp
);
2083 /* schedule a link state check */
2084 phy_start(dev
->phydev
);
2086 netif_tx_start_all_queues(dev
);
2091 static int macb_close(struct net_device
*dev
)
2093 struct macb
*bp
= netdev_priv(dev
);
2094 unsigned long flags
;
2096 netif_tx_stop_all_queues(dev
);
2097 napi_disable(&bp
->napi
);
2100 phy_stop(dev
->phydev
);
2102 spin_lock_irqsave(&bp
->lock
, flags
);
2104 netif_carrier_off(dev
);
2105 spin_unlock_irqrestore(&bp
->lock
, flags
);
2107 macb_free_consistent(bp
);
2112 static int macb_change_mtu(struct net_device
*dev
, int new_mtu
)
2114 if (netif_running(dev
))
2122 static void gem_update_stats(struct macb
*bp
)
2125 u32
*p
= &bp
->hw_stats
.gem
.tx_octets_31_0
;
2127 for (i
= 0; i
< GEM_STATS_LEN
; ++i
, ++p
) {
2128 u32 offset
= gem_statistics
[i
].offset
;
2129 u64 val
= bp
->macb_reg_readl(bp
, offset
);
2131 bp
->ethtool_stats
[i
] += val
;
2134 if (offset
== GEM_OCTTXL
|| offset
== GEM_OCTRXL
) {
2135 /* Add GEM_OCTTXH, GEM_OCTRXH */
2136 val
= bp
->macb_reg_readl(bp
, offset
+ 4);
2137 bp
->ethtool_stats
[i
] += ((u64
)val
) << 32;
2143 static struct net_device_stats
*gem_get_stats(struct macb
*bp
)
2145 struct gem_stats
*hwstat
= &bp
->hw_stats
.gem
;
2146 struct net_device_stats
*nstat
= &bp
->stats
;
2148 gem_update_stats(bp
);
2150 nstat
->rx_errors
= (hwstat
->rx_frame_check_sequence_errors
+
2151 hwstat
->rx_alignment_errors
+
2152 hwstat
->rx_resource_errors
+
2153 hwstat
->rx_overruns
+
2154 hwstat
->rx_oversize_frames
+
2155 hwstat
->rx_jabbers
+
2156 hwstat
->rx_undersized_frames
+
2157 hwstat
->rx_length_field_frame_errors
);
2158 nstat
->tx_errors
= (hwstat
->tx_late_collisions
+
2159 hwstat
->tx_excessive_collisions
+
2160 hwstat
->tx_underrun
+
2161 hwstat
->tx_carrier_sense_errors
);
2162 nstat
->multicast
= hwstat
->rx_multicast_frames
;
2163 nstat
->collisions
= (hwstat
->tx_single_collision_frames
+
2164 hwstat
->tx_multiple_collision_frames
+
2165 hwstat
->tx_excessive_collisions
);
2166 nstat
->rx_length_errors
= (hwstat
->rx_oversize_frames
+
2167 hwstat
->rx_jabbers
+
2168 hwstat
->rx_undersized_frames
+
2169 hwstat
->rx_length_field_frame_errors
);
2170 nstat
->rx_over_errors
= hwstat
->rx_resource_errors
;
2171 nstat
->rx_crc_errors
= hwstat
->rx_frame_check_sequence_errors
;
2172 nstat
->rx_frame_errors
= hwstat
->rx_alignment_errors
;
2173 nstat
->rx_fifo_errors
= hwstat
->rx_overruns
;
2174 nstat
->tx_aborted_errors
= hwstat
->tx_excessive_collisions
;
2175 nstat
->tx_carrier_errors
= hwstat
->tx_carrier_sense_errors
;
2176 nstat
->tx_fifo_errors
= hwstat
->tx_underrun
;
2181 static void gem_get_ethtool_stats(struct net_device
*dev
,
2182 struct ethtool_stats
*stats
, u64
*data
)
2186 bp
= netdev_priv(dev
);
2187 gem_update_stats(bp
);
2188 memcpy(data
, &bp
->ethtool_stats
, sizeof(u64
) * GEM_STATS_LEN
);
2191 static int gem_get_sset_count(struct net_device
*dev
, int sset
)
2195 return GEM_STATS_LEN
;
2201 static void gem_get_ethtool_strings(struct net_device
*dev
, u32 sset
, u8
*p
)
2207 for (i
= 0; i
< GEM_STATS_LEN
; i
++, p
+= ETH_GSTRING_LEN
)
2208 memcpy(p
, gem_statistics
[i
].stat_string
,
2214 static struct net_device_stats
*macb_get_stats(struct net_device
*dev
)
2216 struct macb
*bp
= netdev_priv(dev
);
2217 struct net_device_stats
*nstat
= &bp
->stats
;
2218 struct macb_stats
*hwstat
= &bp
->hw_stats
.macb
;
2220 if (macb_is_gem(bp
))
2221 return gem_get_stats(bp
);
2223 /* read stats from hardware */
2224 macb_update_stats(bp
);
2226 /* Convert HW stats into netdevice stats */
2227 nstat
->rx_errors
= (hwstat
->rx_fcs_errors
+
2228 hwstat
->rx_align_errors
+
2229 hwstat
->rx_resource_errors
+
2230 hwstat
->rx_overruns
+
2231 hwstat
->rx_oversize_pkts
+
2232 hwstat
->rx_jabbers
+
2233 hwstat
->rx_undersize_pkts
+
2234 hwstat
->rx_length_mismatch
);
2235 nstat
->tx_errors
= (hwstat
->tx_late_cols
+
2236 hwstat
->tx_excessive_cols
+
2237 hwstat
->tx_underruns
+
2238 hwstat
->tx_carrier_errors
+
2239 hwstat
->sqe_test_errors
);
2240 nstat
->collisions
= (hwstat
->tx_single_cols
+
2241 hwstat
->tx_multiple_cols
+
2242 hwstat
->tx_excessive_cols
);
2243 nstat
->rx_length_errors
= (hwstat
->rx_oversize_pkts
+
2244 hwstat
->rx_jabbers
+
2245 hwstat
->rx_undersize_pkts
+
2246 hwstat
->rx_length_mismatch
);
2247 nstat
->rx_over_errors
= hwstat
->rx_resource_errors
+
2248 hwstat
->rx_overruns
;
2249 nstat
->rx_crc_errors
= hwstat
->rx_fcs_errors
;
2250 nstat
->rx_frame_errors
= hwstat
->rx_align_errors
;
2251 nstat
->rx_fifo_errors
= hwstat
->rx_overruns
;
2252 /* XXX: What does "missed" mean? */
2253 nstat
->tx_aborted_errors
= hwstat
->tx_excessive_cols
;
2254 nstat
->tx_carrier_errors
= hwstat
->tx_carrier_errors
;
2255 nstat
->tx_fifo_errors
= hwstat
->tx_underruns
;
2256 /* Don't know about heartbeat or window errors... */
2261 static int macb_get_regs_len(struct net_device
*netdev
)
2263 return MACB_GREGS_NBR
* sizeof(u32
);
2266 static void macb_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
2269 struct macb
*bp
= netdev_priv(dev
);
2270 unsigned int tail
, head
;
2273 regs
->version
= (macb_readl(bp
, MID
) & ((1 << MACB_REV_SIZE
) - 1))
2274 | MACB_GREGS_VERSION
;
2276 tail
= macb_tx_ring_wrap(bp
, bp
->queues
[0].tx_tail
);
2277 head
= macb_tx_ring_wrap(bp
, bp
->queues
[0].tx_head
);
2279 regs_buff
[0] = macb_readl(bp
, NCR
);
2280 regs_buff
[1] = macb_or_gem_readl(bp
, NCFGR
);
2281 regs_buff
[2] = macb_readl(bp
, NSR
);
2282 regs_buff
[3] = macb_readl(bp
, TSR
);
2283 regs_buff
[4] = macb_readl(bp
, RBQP
);
2284 regs_buff
[5] = macb_readl(bp
, TBQP
);
2285 regs_buff
[6] = macb_readl(bp
, RSR
);
2286 regs_buff
[7] = macb_readl(bp
, IMR
);
2288 regs_buff
[8] = tail
;
2289 regs_buff
[9] = head
;
2290 regs_buff
[10] = macb_tx_dma(&bp
->queues
[0], tail
);
2291 regs_buff
[11] = macb_tx_dma(&bp
->queues
[0], head
);
2293 if (!(bp
->caps
& MACB_CAPS_USRIO_DISABLED
))
2294 regs_buff
[12] = macb_or_gem_readl(bp
, USRIO
);
2295 if (macb_is_gem(bp
))
2296 regs_buff
[13] = gem_readl(bp
, DMACFG
);
2299 static void macb_get_wol(struct net_device
*netdev
, struct ethtool_wolinfo
*wol
)
2301 struct macb
*bp
= netdev_priv(netdev
);
2306 if (bp
->wol
& MACB_WOL_HAS_MAGIC_PACKET
) {
2307 wol
->supported
= WAKE_MAGIC
;
2309 if (bp
->wol
& MACB_WOL_ENABLED
)
2310 wol
->wolopts
|= WAKE_MAGIC
;
2314 static int macb_set_wol(struct net_device
*netdev
, struct ethtool_wolinfo
*wol
)
2316 struct macb
*bp
= netdev_priv(netdev
);
2318 if (!(bp
->wol
& MACB_WOL_HAS_MAGIC_PACKET
) ||
2319 (wol
->wolopts
& ~WAKE_MAGIC
))
2322 if (wol
->wolopts
& WAKE_MAGIC
)
2323 bp
->wol
|= MACB_WOL_ENABLED
;
2325 bp
->wol
&= ~MACB_WOL_ENABLED
;
2327 device_set_wakeup_enable(&bp
->pdev
->dev
, bp
->wol
& MACB_WOL_ENABLED
);
2332 static void macb_get_ringparam(struct net_device
*netdev
,
2333 struct ethtool_ringparam
*ring
)
2335 struct macb
*bp
= netdev_priv(netdev
);
2337 ring
->rx_max_pending
= MAX_RX_RING_SIZE
;
2338 ring
->tx_max_pending
= MAX_TX_RING_SIZE
;
2340 ring
->rx_pending
= bp
->rx_ring_size
;
2341 ring
->tx_pending
= bp
->tx_ring_size
;
2344 static int macb_set_ringparam(struct net_device
*netdev
,
2345 struct ethtool_ringparam
*ring
)
2347 struct macb
*bp
= netdev_priv(netdev
);
2348 u32 new_rx_size
, new_tx_size
;
2349 unsigned int reset
= 0;
2351 if ((ring
->rx_mini_pending
) || (ring
->rx_jumbo_pending
))
2354 new_rx_size
= clamp_t(u32
, ring
->rx_pending
,
2355 MIN_RX_RING_SIZE
, MAX_RX_RING_SIZE
);
2356 new_rx_size
= roundup_pow_of_two(new_rx_size
);
2358 new_tx_size
= clamp_t(u32
, ring
->tx_pending
,
2359 MIN_TX_RING_SIZE
, MAX_TX_RING_SIZE
);
2360 new_tx_size
= roundup_pow_of_two(new_tx_size
);
2362 if ((new_tx_size
== bp
->tx_ring_size
) &&
2363 (new_rx_size
== bp
->rx_ring_size
)) {
2368 if (netif_running(bp
->dev
)) {
2370 macb_close(bp
->dev
);
2373 bp
->rx_ring_size
= new_rx_size
;
2374 bp
->tx_ring_size
= new_tx_size
;
2382 static const struct ethtool_ops macb_ethtool_ops
= {
2383 .get_regs_len
= macb_get_regs_len
,
2384 .get_regs
= macb_get_regs
,
2385 .get_link
= ethtool_op_get_link
,
2386 .get_ts_info
= ethtool_op_get_ts_info
,
2387 .get_wol
= macb_get_wol
,
2388 .set_wol
= macb_set_wol
,
2389 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
2390 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
2391 .get_ringparam
= macb_get_ringparam
,
2392 .set_ringparam
= macb_set_ringparam
,
2395 static const struct ethtool_ops gem_ethtool_ops
= {
2396 .get_regs_len
= macb_get_regs_len
,
2397 .get_regs
= macb_get_regs
,
2398 .get_link
= ethtool_op_get_link
,
2399 .get_ts_info
= ethtool_op_get_ts_info
,
2400 .get_ethtool_stats
= gem_get_ethtool_stats
,
2401 .get_strings
= gem_get_ethtool_strings
,
2402 .get_sset_count
= gem_get_sset_count
,
2403 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
2404 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
2405 .get_ringparam
= macb_get_ringparam
,
2406 .set_ringparam
= macb_set_ringparam
,
2409 static int macb_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
2411 struct phy_device
*phydev
= dev
->phydev
;
2413 if (!netif_running(dev
))
2419 return phy_mii_ioctl(phydev
, rq
, cmd
);
2422 static int macb_set_features(struct net_device
*netdev
,
2423 netdev_features_t features
)
2425 struct macb
*bp
= netdev_priv(netdev
);
2426 netdev_features_t changed
= features
^ netdev
->features
;
2428 /* TX checksum offload */
2429 if ((changed
& NETIF_F_HW_CSUM
) && macb_is_gem(bp
)) {
2432 dmacfg
= gem_readl(bp
, DMACFG
);
2433 if (features
& NETIF_F_HW_CSUM
)
2434 dmacfg
|= GEM_BIT(TXCOEN
);
2436 dmacfg
&= ~GEM_BIT(TXCOEN
);
2437 gem_writel(bp
, DMACFG
, dmacfg
);
2440 /* RX checksum offload */
2441 if ((changed
& NETIF_F_RXCSUM
) && macb_is_gem(bp
)) {
2444 netcfg
= gem_readl(bp
, NCFGR
);
2445 if (features
& NETIF_F_RXCSUM
&&
2446 !(netdev
->flags
& IFF_PROMISC
))
2447 netcfg
|= GEM_BIT(RXCOEN
);
2449 netcfg
&= ~GEM_BIT(RXCOEN
);
2450 gem_writel(bp
, NCFGR
, netcfg
);
2456 static const struct net_device_ops macb_netdev_ops
= {
2457 .ndo_open
= macb_open
,
2458 .ndo_stop
= macb_close
,
2459 .ndo_start_xmit
= macb_start_xmit
,
2460 .ndo_set_rx_mode
= macb_set_rx_mode
,
2461 .ndo_get_stats
= macb_get_stats
,
2462 .ndo_do_ioctl
= macb_ioctl
,
2463 .ndo_validate_addr
= eth_validate_addr
,
2464 .ndo_change_mtu
= macb_change_mtu
,
2465 .ndo_set_mac_address
= eth_mac_addr
,
2466 #ifdef CONFIG_NET_POLL_CONTROLLER
2467 .ndo_poll_controller
= macb_poll_controller
,
2469 .ndo_set_features
= macb_set_features
,
2470 .ndo_features_check
= macb_features_check
,
2473 /* Configure peripheral capabilities according to device tree
2474 * and integration options used
2476 static void macb_configure_caps(struct macb
*bp
,
2477 const struct macb_config
*dt_conf
)
2482 bp
->caps
= dt_conf
->caps
;
2484 if (hw_is_gem(bp
->regs
, bp
->native_io
)) {
2485 bp
->caps
|= MACB_CAPS_MACB_IS_GEM
;
2487 dcfg
= gem_readl(bp
, DCFG1
);
2488 if (GEM_BFEXT(IRQCOR
, dcfg
) == 0)
2489 bp
->caps
|= MACB_CAPS_ISR_CLEAR_ON_WRITE
;
2490 dcfg
= gem_readl(bp
, DCFG2
);
2491 if ((dcfg
& (GEM_BIT(RX_PKT_BUFF
) | GEM_BIT(TX_PKT_BUFF
))) == 0)
2492 bp
->caps
|= MACB_CAPS_FIFO_MODE
;
2495 dev_dbg(&bp
->pdev
->dev
, "Cadence caps 0x%08x\n", bp
->caps
);
2498 static void macb_probe_queues(void __iomem
*mem
,
2500 unsigned int *queue_mask
,
2501 unsigned int *num_queues
)
2508 /* is it macb or gem ?
2510 * We need to read directly from the hardware here because
2511 * we are early in the probe process and don't have the
2512 * MACB_CAPS_MACB_IS_GEM flag positioned
2514 if (!hw_is_gem(mem
, native_io
))
2517 /* bit 0 is never set but queue 0 always exists */
2518 *queue_mask
= readl_relaxed(mem
+ GEM_DCFG6
) & 0xff;
2522 for (hw_q
= 1; hw_q
< MACB_MAX_QUEUES
; ++hw_q
)
2523 if (*queue_mask
& (1 << hw_q
))
2527 static int macb_clk_init(struct platform_device
*pdev
, struct clk
**pclk
,
2528 struct clk
**hclk
, struct clk
**tx_clk
,
2529 struct clk
**rx_clk
)
2531 struct macb_platform_data
*pdata
;
2534 pdata
= dev_get_platdata(&pdev
->dev
);
2536 *pclk
= pdata
->pclk
;
2537 *hclk
= pdata
->hclk
;
2539 *pclk
= devm_clk_get(&pdev
->dev
, "pclk");
2540 *hclk
= devm_clk_get(&pdev
->dev
, "hclk");
2543 if (IS_ERR(*pclk
)) {
2544 err
= PTR_ERR(*pclk
);
2545 dev_err(&pdev
->dev
, "failed to get macb_clk (%u)\n", err
);
2549 if (IS_ERR(*hclk
)) {
2550 err
= PTR_ERR(*hclk
);
2551 dev_err(&pdev
->dev
, "failed to get hclk (%u)\n", err
);
2555 *tx_clk
= devm_clk_get(&pdev
->dev
, "tx_clk");
2556 if (IS_ERR(*tx_clk
))
2559 *rx_clk
= devm_clk_get(&pdev
->dev
, "rx_clk");
2560 if (IS_ERR(*rx_clk
))
2563 err
= clk_prepare_enable(*pclk
);
2565 dev_err(&pdev
->dev
, "failed to enable pclk (%u)\n", err
);
2569 err
= clk_prepare_enable(*hclk
);
2571 dev_err(&pdev
->dev
, "failed to enable hclk (%u)\n", err
);
2572 goto err_disable_pclk
;
2575 err
= clk_prepare_enable(*tx_clk
);
2577 dev_err(&pdev
->dev
, "failed to enable tx_clk (%u)\n", err
);
2578 goto err_disable_hclk
;
2581 err
= clk_prepare_enable(*rx_clk
);
2583 dev_err(&pdev
->dev
, "failed to enable rx_clk (%u)\n", err
);
2584 goto err_disable_txclk
;
2590 clk_disable_unprepare(*tx_clk
);
2593 clk_disable_unprepare(*hclk
);
2596 clk_disable_unprepare(*pclk
);
2601 static int macb_init(struct platform_device
*pdev
)
2603 struct net_device
*dev
= platform_get_drvdata(pdev
);
2604 unsigned int hw_q
, q
;
2605 struct macb
*bp
= netdev_priv(dev
);
2606 struct macb_queue
*queue
;
2610 bp
->tx_ring_size
= DEFAULT_TX_RING_SIZE
;
2611 bp
->rx_ring_size
= DEFAULT_RX_RING_SIZE
;
2613 /* set the queue register mapping once for all: queue0 has a special
2614 * register mapping but we don't want to test the queue index then
2615 * compute the corresponding register offset at run time.
2617 for (hw_q
= 0, q
= 0; hw_q
< MACB_MAX_QUEUES
; ++hw_q
) {
2618 if (!(bp
->queue_mask
& (1 << hw_q
)))
2621 queue
= &bp
->queues
[q
];
2624 queue
->ISR
= GEM_ISR(hw_q
- 1);
2625 queue
->IER
= GEM_IER(hw_q
- 1);
2626 queue
->IDR
= GEM_IDR(hw_q
- 1);
2627 queue
->IMR
= GEM_IMR(hw_q
- 1);
2628 queue
->TBQP
= GEM_TBQP(hw_q
- 1);
2629 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2630 queue
->TBQPH
= GEM_TBQPH(hw_q
-1);
2633 /* queue0 uses legacy registers */
2634 queue
->ISR
= MACB_ISR
;
2635 queue
->IER
= MACB_IER
;
2636 queue
->IDR
= MACB_IDR
;
2637 queue
->IMR
= MACB_IMR
;
2638 queue
->TBQP
= MACB_TBQP
;
2639 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2640 queue
->TBQPH
= MACB_TBQPH
;
2644 /* get irq: here we use the linux queue index, not the hardware
2645 * queue index. the queue irq definitions in the device tree
2646 * must remove the optional gaps that could exist in the
2647 * hardware queue mask.
2649 queue
->irq
= platform_get_irq(pdev
, q
);
2650 err
= devm_request_irq(&pdev
->dev
, queue
->irq
, macb_interrupt
,
2651 IRQF_SHARED
, dev
->name
, queue
);
2654 "Unable to request IRQ %d (error %d)\n",
2659 INIT_WORK(&queue
->tx_error_task
, macb_tx_error_task
);
2663 dev
->netdev_ops
= &macb_netdev_ops
;
2664 netif_napi_add(dev
, &bp
->napi
, macb_poll
, 64);
2666 /* setup appropriated routines according to adapter type */
2667 if (macb_is_gem(bp
)) {
2668 bp
->max_tx_length
= GEM_MAX_TX_LEN
;
2669 bp
->macbgem_ops
.mog_alloc_rx_buffers
= gem_alloc_rx_buffers
;
2670 bp
->macbgem_ops
.mog_free_rx_buffers
= gem_free_rx_buffers
;
2671 bp
->macbgem_ops
.mog_init_rings
= gem_init_rings
;
2672 bp
->macbgem_ops
.mog_rx
= gem_rx
;
2673 dev
->ethtool_ops
= &gem_ethtool_ops
;
2675 bp
->max_tx_length
= MACB_MAX_TX_LEN
;
2676 bp
->macbgem_ops
.mog_alloc_rx_buffers
= macb_alloc_rx_buffers
;
2677 bp
->macbgem_ops
.mog_free_rx_buffers
= macb_free_rx_buffers
;
2678 bp
->macbgem_ops
.mog_init_rings
= macb_init_rings
;
2679 bp
->macbgem_ops
.mog_rx
= macb_rx
;
2680 dev
->ethtool_ops
= &macb_ethtool_ops
;
2684 dev
->hw_features
= NETIF_F_SG
;
2686 /* Check LSO capability */
2687 if (GEM_BFEXT(PBUF_LSO
, gem_readl(bp
, DCFG6
)))
2688 dev
->hw_features
|= MACB_NETIF_LSO
;
2690 /* Checksum offload is only available on gem with packet buffer */
2691 if (macb_is_gem(bp
) && !(bp
->caps
& MACB_CAPS_FIFO_MODE
))
2692 dev
->hw_features
|= NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
;
2693 if (bp
->caps
& MACB_CAPS_SG_DISABLED
)
2694 dev
->hw_features
&= ~NETIF_F_SG
;
2695 dev
->features
= dev
->hw_features
;
2697 if (!(bp
->caps
& MACB_CAPS_USRIO_DISABLED
)) {
2699 if (bp
->phy_interface
== PHY_INTERFACE_MODE_RGMII
)
2700 val
= GEM_BIT(RGMII
);
2701 else if (bp
->phy_interface
== PHY_INTERFACE_MODE_RMII
&&
2702 (bp
->caps
& MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
))
2703 val
= MACB_BIT(RMII
);
2704 else if (!(bp
->caps
& MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
))
2705 val
= MACB_BIT(MII
);
2707 if (bp
->caps
& MACB_CAPS_USRIO_HAS_CLKEN
)
2708 val
|= MACB_BIT(CLKEN
);
2710 macb_or_gem_writel(bp
, USRIO
, val
);
2713 /* Set MII management clock divider */
2714 val
= macb_mdc_clk_div(bp
);
2715 val
|= macb_dbw(bp
);
2716 if (bp
->phy_interface
== PHY_INTERFACE_MODE_SGMII
)
2717 val
|= GEM_BIT(SGMIIEN
) | GEM_BIT(PCSSEL
);
2718 macb_writel(bp
, NCFGR
, val
);
2723 #if defined(CONFIG_OF)
2724 /* 1518 rounded up */
2725 #define AT91ETHER_MAX_RBUFF_SZ 0x600
2726 /* max number of receive buffers */
2727 #define AT91ETHER_MAX_RX_DESCR 9
2729 /* Initialize and start the Receiver and Transmit subsystems */
2730 static int at91ether_start(struct net_device
*dev
)
2732 struct macb
*lp
= netdev_priv(dev
);
2737 lp
->rx_ring
= dma_alloc_coherent(&lp
->pdev
->dev
,
2738 (AT91ETHER_MAX_RX_DESCR
*
2739 sizeof(struct macb_dma_desc
)),
2740 &lp
->rx_ring_dma
, GFP_KERNEL
);
2744 lp
->rx_buffers
= dma_alloc_coherent(&lp
->pdev
->dev
,
2745 AT91ETHER_MAX_RX_DESCR
*
2746 AT91ETHER_MAX_RBUFF_SZ
,
2747 &lp
->rx_buffers_dma
, GFP_KERNEL
);
2748 if (!lp
->rx_buffers
) {
2749 dma_free_coherent(&lp
->pdev
->dev
,
2750 AT91ETHER_MAX_RX_DESCR
*
2751 sizeof(struct macb_dma_desc
),
2752 lp
->rx_ring
, lp
->rx_ring_dma
);
2757 addr
= lp
->rx_buffers_dma
;
2758 for (i
= 0; i
< AT91ETHER_MAX_RX_DESCR
; i
++) {
2759 lp
->rx_ring
[i
].addr
= addr
;
2760 lp
->rx_ring
[i
].ctrl
= 0;
2761 addr
+= AT91ETHER_MAX_RBUFF_SZ
;
2764 /* Set the Wrap bit on the last descriptor */
2765 lp
->rx_ring
[AT91ETHER_MAX_RX_DESCR
- 1].addr
|= MACB_BIT(RX_WRAP
);
2767 /* Reset buffer index */
2770 /* Program address of descriptor list in Rx Buffer Queue register */
2771 macb_writel(lp
, RBQP
, lp
->rx_ring_dma
);
2773 /* Enable Receive and Transmit */
2774 ctl
= macb_readl(lp
, NCR
);
2775 macb_writel(lp
, NCR
, ctl
| MACB_BIT(RE
) | MACB_BIT(TE
));
2780 /* Open the ethernet interface */
2781 static int at91ether_open(struct net_device
*dev
)
2783 struct macb
*lp
= netdev_priv(dev
);
2787 /* Clear internal statistics */
2788 ctl
= macb_readl(lp
, NCR
);
2789 macb_writel(lp
, NCR
, ctl
| MACB_BIT(CLRSTAT
));
2791 macb_set_hwaddr(lp
);
2793 ret
= at91ether_start(dev
);
2797 /* Enable MAC interrupts */
2798 macb_writel(lp
, IER
, MACB_BIT(RCOMP
) |
2800 MACB_BIT(ISR_TUND
) |
2803 MACB_BIT(ISR_ROVR
) |
2806 /* schedule a link state check */
2807 phy_start(dev
->phydev
);
2809 netif_start_queue(dev
);
2814 /* Close the interface */
2815 static int at91ether_close(struct net_device
*dev
)
2817 struct macb
*lp
= netdev_priv(dev
);
2820 /* Disable Receiver and Transmitter */
2821 ctl
= macb_readl(lp
, NCR
);
2822 macb_writel(lp
, NCR
, ctl
& ~(MACB_BIT(TE
) | MACB_BIT(RE
)));
2824 /* Disable MAC interrupts */
2825 macb_writel(lp
, IDR
, MACB_BIT(RCOMP
) |
2827 MACB_BIT(ISR_TUND
) |
2830 MACB_BIT(ISR_ROVR
) |
2833 netif_stop_queue(dev
);
2835 dma_free_coherent(&lp
->pdev
->dev
,
2836 AT91ETHER_MAX_RX_DESCR
*
2837 sizeof(struct macb_dma_desc
),
2838 lp
->rx_ring
, lp
->rx_ring_dma
);
2841 dma_free_coherent(&lp
->pdev
->dev
,
2842 AT91ETHER_MAX_RX_DESCR
* AT91ETHER_MAX_RBUFF_SZ
,
2843 lp
->rx_buffers
, lp
->rx_buffers_dma
);
2844 lp
->rx_buffers
= NULL
;
2849 /* Transmit packet */
2850 static int at91ether_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2852 struct macb
*lp
= netdev_priv(dev
);
2854 if (macb_readl(lp
, TSR
) & MACB_BIT(RM9200_BNQ
)) {
2855 netif_stop_queue(dev
);
2857 /* Store packet information (to free when Tx completed) */
2859 lp
->skb_length
= skb
->len
;
2860 lp
->skb_physaddr
= dma_map_single(NULL
, skb
->data
, skb
->len
,
2862 if (dma_mapping_error(NULL
, lp
->skb_physaddr
)) {
2863 dev_kfree_skb_any(skb
);
2864 dev
->stats
.tx_dropped
++;
2865 netdev_err(dev
, "%s: DMA mapping error\n", __func__
);
2866 return NETDEV_TX_OK
;
2869 /* Set address of the data in the Transmit Address register */
2870 macb_writel(lp
, TAR
, lp
->skb_physaddr
);
2871 /* Set length of the packet in the Transmit Control register */
2872 macb_writel(lp
, TCR
, skb
->len
);
2875 netdev_err(dev
, "%s called, but device is busy!\n", __func__
);
2876 return NETDEV_TX_BUSY
;
2879 return NETDEV_TX_OK
;
2882 /* Extract received frame from buffer descriptors and sent to upper layers.
2883 * (Called from interrupt context)
2885 static void at91ether_rx(struct net_device
*dev
)
2887 struct macb
*lp
= netdev_priv(dev
);
2888 unsigned char *p_recv
;
2889 struct sk_buff
*skb
;
2890 unsigned int pktlen
;
2892 while (lp
->rx_ring
[lp
->rx_tail
].addr
& MACB_BIT(RX_USED
)) {
2893 p_recv
= lp
->rx_buffers
+ lp
->rx_tail
* AT91ETHER_MAX_RBUFF_SZ
;
2894 pktlen
= MACB_BF(RX_FRMLEN
, lp
->rx_ring
[lp
->rx_tail
].ctrl
);
2895 skb
= netdev_alloc_skb(dev
, pktlen
+ 2);
2897 skb_reserve(skb
, 2);
2898 memcpy(skb_put(skb
, pktlen
), p_recv
, pktlen
);
2900 skb
->protocol
= eth_type_trans(skb
, dev
);
2901 lp
->stats
.rx_packets
++;
2902 lp
->stats
.rx_bytes
+= pktlen
;
2905 lp
->stats
.rx_dropped
++;
2908 if (lp
->rx_ring
[lp
->rx_tail
].ctrl
& MACB_BIT(RX_MHASH_MATCH
))
2909 lp
->stats
.multicast
++;
2911 /* reset ownership bit */
2912 lp
->rx_ring
[lp
->rx_tail
].addr
&= ~MACB_BIT(RX_USED
);
2914 /* wrap after last buffer */
2915 if (lp
->rx_tail
== AT91ETHER_MAX_RX_DESCR
- 1)
2922 /* MAC interrupt handler */
2923 static irqreturn_t
at91ether_interrupt(int irq
, void *dev_id
)
2925 struct net_device
*dev
= dev_id
;
2926 struct macb
*lp
= netdev_priv(dev
);
2929 /* MAC Interrupt Status register indicates what interrupts are pending.
2930 * It is automatically cleared once read.
2932 intstatus
= macb_readl(lp
, ISR
);
2934 /* Receive complete */
2935 if (intstatus
& MACB_BIT(RCOMP
))
2938 /* Transmit complete */
2939 if (intstatus
& MACB_BIT(TCOMP
)) {
2940 /* The TCOM bit is set even if the transmission failed */
2941 if (intstatus
& (MACB_BIT(ISR_TUND
) | MACB_BIT(ISR_RLE
)))
2942 lp
->stats
.tx_errors
++;
2945 dev_kfree_skb_irq(lp
->skb
);
2947 dma_unmap_single(NULL
, lp
->skb_physaddr
,
2948 lp
->skb_length
, DMA_TO_DEVICE
);
2949 lp
->stats
.tx_packets
++;
2950 lp
->stats
.tx_bytes
+= lp
->skb_length
;
2952 netif_wake_queue(dev
);
2955 /* Work-around for EMAC Errata section 41.3.1 */
2956 if (intstatus
& MACB_BIT(RXUBR
)) {
2957 ctl
= macb_readl(lp
, NCR
);
2958 macb_writel(lp
, NCR
, ctl
& ~MACB_BIT(RE
));
2960 macb_writel(lp
, NCR
, ctl
| MACB_BIT(RE
));
2963 if (intstatus
& MACB_BIT(ISR_ROVR
))
2964 netdev_err(dev
, "ROVR error\n");
2969 #ifdef CONFIG_NET_POLL_CONTROLLER
2970 static void at91ether_poll_controller(struct net_device
*dev
)
2972 unsigned long flags
;
2974 local_irq_save(flags
);
2975 at91ether_interrupt(dev
->irq
, dev
);
2976 local_irq_restore(flags
);
2980 static const struct net_device_ops at91ether_netdev_ops
= {
2981 .ndo_open
= at91ether_open
,
2982 .ndo_stop
= at91ether_close
,
2983 .ndo_start_xmit
= at91ether_start_xmit
,
2984 .ndo_get_stats
= macb_get_stats
,
2985 .ndo_set_rx_mode
= macb_set_rx_mode
,
2986 .ndo_set_mac_address
= eth_mac_addr
,
2987 .ndo_do_ioctl
= macb_ioctl
,
2988 .ndo_validate_addr
= eth_validate_addr
,
2989 #ifdef CONFIG_NET_POLL_CONTROLLER
2990 .ndo_poll_controller
= at91ether_poll_controller
,
2994 static int at91ether_clk_init(struct platform_device
*pdev
, struct clk
**pclk
,
2995 struct clk
**hclk
, struct clk
**tx_clk
,
2996 struct clk
**rx_clk
)
3004 *pclk
= devm_clk_get(&pdev
->dev
, "ether_clk");
3006 return PTR_ERR(*pclk
);
3008 err
= clk_prepare_enable(*pclk
);
3010 dev_err(&pdev
->dev
, "failed to enable pclk (%u)\n", err
);
3017 static int at91ether_init(struct platform_device
*pdev
)
3019 struct net_device
*dev
= platform_get_drvdata(pdev
);
3020 struct macb
*bp
= netdev_priv(dev
);
3024 dev
->netdev_ops
= &at91ether_netdev_ops
;
3025 dev
->ethtool_ops
= &macb_ethtool_ops
;
3027 err
= devm_request_irq(&pdev
->dev
, dev
->irq
, at91ether_interrupt
,
3032 macb_writel(bp
, NCR
, 0);
3034 reg
= MACB_BF(CLK
, MACB_CLK_DIV32
) | MACB_BIT(BIG
);
3035 if (bp
->phy_interface
== PHY_INTERFACE_MODE_RMII
)
3036 reg
|= MACB_BIT(RM9200_RMII
);
3038 macb_writel(bp
, NCFGR
, reg
);
3043 static const struct macb_config at91sam9260_config
= {
3044 .caps
= MACB_CAPS_USRIO_HAS_CLKEN
| MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
,
3045 .clk_init
= macb_clk_init
,
3049 static const struct macb_config pc302gem_config
= {
3050 .caps
= MACB_CAPS_SG_DISABLED
| MACB_CAPS_GIGABIT_MODE_AVAILABLE
,
3051 .dma_burst_length
= 16,
3052 .clk_init
= macb_clk_init
,
3056 static const struct macb_config sama5d2_config
= {
3057 .caps
= MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
,
3058 .dma_burst_length
= 16,
3059 .clk_init
= macb_clk_init
,
3063 static const struct macb_config sama5d3_config
= {
3064 .caps
= MACB_CAPS_SG_DISABLED
| MACB_CAPS_GIGABIT_MODE_AVAILABLE
3065 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
,
3066 .dma_burst_length
= 16,
3067 .clk_init
= macb_clk_init
,
3071 static const struct macb_config sama5d4_config
= {
3072 .caps
= MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
,
3073 .dma_burst_length
= 4,
3074 .clk_init
= macb_clk_init
,
3078 static const struct macb_config emac_config
= {
3079 .clk_init
= at91ether_clk_init
,
3080 .init
= at91ether_init
,
3083 static const struct macb_config np4_config
= {
3084 .caps
= MACB_CAPS_USRIO_DISABLED
,
3085 .clk_init
= macb_clk_init
,
3089 static const struct macb_config zynqmp_config
= {
3090 .caps
= MACB_CAPS_GIGABIT_MODE_AVAILABLE
| MACB_CAPS_JUMBO
,
3091 .dma_burst_length
= 16,
3092 .clk_init
= macb_clk_init
,
3094 .jumbo_max_len
= 10240,
3097 static const struct macb_config zynq_config
= {
3098 .caps
= MACB_CAPS_GIGABIT_MODE_AVAILABLE
| MACB_CAPS_NO_GIGABIT_HALF
,
3099 .dma_burst_length
= 16,
3100 .clk_init
= macb_clk_init
,
3104 static const struct of_device_id macb_dt_ids
[] = {
3105 { .compatible
= "cdns,at32ap7000-macb" },
3106 { .compatible
= "cdns,at91sam9260-macb", .data
= &at91sam9260_config
},
3107 { .compatible
= "cdns,macb" },
3108 { .compatible
= "cdns,np4-macb", .data
= &np4_config
},
3109 { .compatible
= "cdns,pc302-gem", .data
= &pc302gem_config
},
3110 { .compatible
= "cdns,gem", .data
= &pc302gem_config
},
3111 { .compatible
= "atmel,sama5d2-gem", .data
= &sama5d2_config
},
3112 { .compatible
= "atmel,sama5d3-gem", .data
= &sama5d3_config
},
3113 { .compatible
= "atmel,sama5d4-gem", .data
= &sama5d4_config
},
3114 { .compatible
= "cdns,at91rm9200-emac", .data
= &emac_config
},
3115 { .compatible
= "cdns,emac", .data
= &emac_config
},
3116 { .compatible
= "cdns,zynqmp-gem", .data
= &zynqmp_config
},
3117 { .compatible
= "cdns,zynq-gem", .data
= &zynq_config
},
3120 MODULE_DEVICE_TABLE(of
, macb_dt_ids
);
3121 #endif /* CONFIG_OF */
3123 static const struct macb_config default_gem_config
= {
3124 .caps
= MACB_CAPS_GIGABIT_MODE_AVAILABLE
| MACB_CAPS_JUMBO
,
3125 .dma_burst_length
= 16,
3126 .clk_init
= macb_clk_init
,
3128 .jumbo_max_len
= 10240,
3131 static int macb_probe(struct platform_device
*pdev
)
3133 const struct macb_config
*macb_config
= &default_gem_config
;
3134 int (*clk_init
)(struct platform_device
*, struct clk
**,
3135 struct clk
**, struct clk
**, struct clk
**)
3136 = macb_config
->clk_init
;
3137 int (*init
)(struct platform_device
*) = macb_config
->init
;
3138 struct device_node
*np
= pdev
->dev
.of_node
;
3139 struct device_node
*phy_node
;
3140 struct clk
*pclk
, *hclk
= NULL
, *tx_clk
= NULL
, *rx_clk
= NULL
;
3141 unsigned int queue_mask
, num_queues
;
3142 struct macb_platform_data
*pdata
;
3144 struct phy_device
*phydev
;
3145 struct net_device
*dev
;
3146 struct resource
*regs
;
3152 regs
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
3153 mem
= devm_ioremap_resource(&pdev
->dev
, regs
);
3155 return PTR_ERR(mem
);
3158 const struct of_device_id
*match
;
3160 match
= of_match_node(macb_dt_ids
, np
);
3161 if (match
&& match
->data
) {
3162 macb_config
= match
->data
;
3163 clk_init
= macb_config
->clk_init
;
3164 init
= macb_config
->init
;
3168 err
= clk_init(pdev
, &pclk
, &hclk
, &tx_clk
, &rx_clk
);
3172 native_io
= hw_is_native_io(mem
);
3174 macb_probe_queues(mem
, native_io
, &queue_mask
, &num_queues
);
3175 dev
= alloc_etherdev_mq(sizeof(*bp
), num_queues
);
3178 goto err_disable_clocks
;
3181 dev
->base_addr
= regs
->start
;
3183 SET_NETDEV_DEV(dev
, &pdev
->dev
);
3185 bp
= netdev_priv(dev
);
3189 bp
->native_io
= native_io
;
3191 bp
->macb_reg_readl
= hw_readl_native
;
3192 bp
->macb_reg_writel
= hw_writel_native
;
3194 bp
->macb_reg_readl
= hw_readl
;
3195 bp
->macb_reg_writel
= hw_writel
;
3197 bp
->num_queues
= num_queues
;
3198 bp
->queue_mask
= queue_mask
;
3200 bp
->dma_burst_length
= macb_config
->dma_burst_length
;
3203 bp
->tx_clk
= tx_clk
;
3204 bp
->rx_clk
= rx_clk
;
3206 bp
->jumbo_max_len
= macb_config
->jumbo_max_len
;
3209 if (of_get_property(np
, "magic-packet", NULL
))
3210 bp
->wol
|= MACB_WOL_HAS_MAGIC_PACKET
;
3211 device_init_wakeup(&pdev
->dev
, bp
->wol
& MACB_WOL_HAS_MAGIC_PACKET
);
3213 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3214 if (GEM_BFEXT(DBWDEF
, gem_readl(bp
, DCFG1
)) > GEM_DBW32
)
3215 dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(44));
3218 spin_lock_init(&bp
->lock
);
3220 /* setup capabilities */
3221 macb_configure_caps(bp
, macb_config
);
3223 platform_set_drvdata(pdev
, dev
);
3225 dev
->irq
= platform_get_irq(pdev
, 0);
3228 goto err_out_free_netdev
;
3231 /* MTU range: 68 - 1500 or 10240 */
3232 dev
->min_mtu
= GEM_MTU_MIN_SIZE
;
3233 if (bp
->caps
& MACB_CAPS_JUMBO
)
3234 dev
->max_mtu
= gem_readl(bp
, JML
) - ETH_HLEN
- ETH_FCS_LEN
;
3236 dev
->max_mtu
= ETH_DATA_LEN
;
3238 mac
= of_get_mac_address(np
);
3240 ether_addr_copy(bp
->dev
->dev_addr
, mac
);
3242 macb_get_hwaddr(bp
);
3244 /* Power up the PHY if there is a GPIO reset */
3245 phy_node
= of_get_next_available_child(np
, NULL
);
3247 int gpio
= of_get_named_gpio(phy_node
, "reset-gpios", 0);
3249 if (gpio_is_valid(gpio
)) {
3250 bp
->reset_gpio
= gpio_to_desc(gpio
);
3251 gpiod_direction_output(bp
->reset_gpio
, 1);
3254 of_node_put(phy_node
);
3256 err
= of_get_phy_mode(np
);
3258 pdata
= dev_get_platdata(&pdev
->dev
);
3259 if (pdata
&& pdata
->is_rmii
)
3260 bp
->phy_interface
= PHY_INTERFACE_MODE_RMII
;
3262 bp
->phy_interface
= PHY_INTERFACE_MODE_MII
;
3264 bp
->phy_interface
= err
;
3267 /* IP specific init */
3270 goto err_out_free_netdev
;
3272 err
= macb_mii_init(bp
);
3274 goto err_out_free_netdev
;
3276 phydev
= dev
->phydev
;
3278 netif_carrier_off(dev
);
3280 err
= register_netdev(dev
);
3282 dev_err(&pdev
->dev
, "Cannot register net device, aborting.\n");
3283 goto err_out_unregister_mdio
;
3286 phy_attached_info(phydev
);
3288 netdev_info(dev
, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
3289 macb_is_gem(bp
) ? "GEM" : "MACB", macb_readl(bp
, MID
),
3290 dev
->base_addr
, dev
->irq
, dev
->dev_addr
);
3294 err_out_unregister_mdio
:
3295 phy_disconnect(dev
->phydev
);
3296 mdiobus_unregister(bp
->mii_bus
);
3297 mdiobus_free(bp
->mii_bus
);
3299 /* Shutdown the PHY if there is a GPIO reset */
3301 gpiod_set_value(bp
->reset_gpio
, 0);
3303 err_out_free_netdev
:
3307 clk_disable_unprepare(tx_clk
);
3308 clk_disable_unprepare(hclk
);
3309 clk_disable_unprepare(pclk
);
3310 clk_disable_unprepare(rx_clk
);
3315 static int macb_remove(struct platform_device
*pdev
)
3317 struct net_device
*dev
;
3320 dev
= platform_get_drvdata(pdev
);
3323 bp
= netdev_priv(dev
);
3325 phy_disconnect(dev
->phydev
);
3326 mdiobus_unregister(bp
->mii_bus
);
3328 mdiobus_free(bp
->mii_bus
);
3330 /* Shutdown the PHY if there is a GPIO reset */
3332 gpiod_set_value(bp
->reset_gpio
, 0);
3334 unregister_netdev(dev
);
3335 clk_disable_unprepare(bp
->tx_clk
);
3336 clk_disable_unprepare(bp
->hclk
);
3337 clk_disable_unprepare(bp
->pclk
);
3338 clk_disable_unprepare(bp
->rx_clk
);
3345 static int __maybe_unused
macb_suspend(struct device
*dev
)
3347 struct platform_device
*pdev
= to_platform_device(dev
);
3348 struct net_device
*netdev
= platform_get_drvdata(pdev
);
3349 struct macb
*bp
= netdev_priv(netdev
);
3351 netif_carrier_off(netdev
);
3352 netif_device_detach(netdev
);
3354 if (bp
->wol
& MACB_WOL_ENABLED
) {
3355 macb_writel(bp
, IER
, MACB_BIT(WOL
));
3356 macb_writel(bp
, WOL
, MACB_BIT(MAG
));
3357 enable_irq_wake(bp
->queues
[0].irq
);
3359 clk_disable_unprepare(bp
->tx_clk
);
3360 clk_disable_unprepare(bp
->hclk
);
3361 clk_disable_unprepare(bp
->pclk
);
3362 clk_disable_unprepare(bp
->rx_clk
);
3368 static int __maybe_unused
macb_resume(struct device
*dev
)
3370 struct platform_device
*pdev
= to_platform_device(dev
);
3371 struct net_device
*netdev
= platform_get_drvdata(pdev
);
3372 struct macb
*bp
= netdev_priv(netdev
);
3374 if (bp
->wol
& MACB_WOL_ENABLED
) {
3375 macb_writel(bp
, IDR
, MACB_BIT(WOL
));
3376 macb_writel(bp
, WOL
, 0);
3377 disable_irq_wake(bp
->queues
[0].irq
);
3379 clk_prepare_enable(bp
->pclk
);
3380 clk_prepare_enable(bp
->hclk
);
3381 clk_prepare_enable(bp
->tx_clk
);
3382 clk_prepare_enable(bp
->rx_clk
);
3385 netif_device_attach(netdev
);
3390 static SIMPLE_DEV_PM_OPS(macb_pm_ops
, macb_suspend
, macb_resume
);
3392 static struct platform_driver macb_driver
= {
3393 .probe
= macb_probe
,
3394 .remove
= macb_remove
,
3397 .of_match_table
= of_match_ptr(macb_dt_ids
),
3402 module_platform_driver(macb_driver
);
3404 MODULE_LICENSE("GPL");
3405 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
3406 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
3407 MODULE_ALIAS("platform:macb");