2 * Cadence MACB/GEM Ethernet Controller driver
4 * Copyright (C) 2004-2006 Atmel Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/clk.h>
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/circ_buf.h>
18 #include <linux/slab.h>
19 #include <linux/init.h>
21 #include <linux/gpio.h>
22 #include <linux/gpio/consumer.h>
23 #include <linux/interrupt.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/platform_data/macb.h>
28 #include <linux/platform_device.h>
29 #include <linux/phy.h>
31 #include <linux/of_device.h>
32 #include <linux/of_gpio.h>
33 #include <linux/of_mdio.h>
34 #include <linux/of_net.h>
36 #include <linux/udp.h>
37 #include <linux/tcp.h>
40 #define MACB_RX_BUFFER_SIZE 128
41 #define RX_BUFFER_MULTIPLE 64 /* bytes */
43 #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
44 #define MIN_RX_RING_SIZE 64
45 #define MAX_RX_RING_SIZE 8192
46 #define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
49 #define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
50 #define MIN_TX_RING_SIZE 64
51 #define MAX_TX_RING_SIZE 4096
52 #define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
55 /* level of occupied TX descriptors under which we wake up TX process */
56 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
58 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
60 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
63 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \
66 /* Max length of transmit frame must be a multiple of 8 bytes */
67 #define MACB_TX_LEN_ALIGN 8
68 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
69 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
71 #define GEM_MTU_MIN_SIZE ETH_MIN_MTU
72 #define MACB_NETIF_LSO NETIF_F_TSO
74 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
75 #define MACB_WOL_ENABLED (0x1 << 1)
77 /* Graceful stop timeouts in us. We should allow up to
78 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
80 #define MACB_HALT_TIMEOUT 1230
82 /* DMA buffer descriptor might be different size
83 * depends on hardware configuration:
85 * 1. dma address width 32 bits:
86 * word 1: 32 bit address of Data Buffer
89 * 2. dma address width 64 bits:
90 * word 1: 32 bit address of Data Buffer
92 * word 3: upper 32 bit address of Data Buffer
95 * 3. dma address width 32 bits with hardware timestamping:
96 * word 1: 32 bit address of Data Buffer
98 * word 3: timestamp word 1
99 * word 4: timestamp word 2
101 * 4. dma address width 64 bits with hardware timestamping:
102 * word 1: 32 bit address of Data Buffer
104 * word 3: upper 32 bit address of Data Buffer
106 * word 5: timestamp word 1
107 * word 6: timestamp word 2
109 static unsigned int macb_dma_desc_get_size(struct macb
*bp
)
112 unsigned int desc_size
;
114 switch (bp
->hw_dma_cap
) {
116 desc_size
= sizeof(struct macb_dma_desc
)
117 + sizeof(struct macb_dma_desc_64
);
120 desc_size
= sizeof(struct macb_dma_desc
)
121 + sizeof(struct macb_dma_desc_ptp
);
123 case HW_DMA_CAP_64B_PTP
:
124 desc_size
= sizeof(struct macb_dma_desc
)
125 + sizeof(struct macb_dma_desc_64
)
126 + sizeof(struct macb_dma_desc_ptp
);
129 desc_size
= sizeof(struct macb_dma_desc
);
133 return sizeof(struct macb_dma_desc
);
136 static unsigned int macb_adj_dma_desc_idx(struct macb
*bp
, unsigned int desc_idx
)
139 switch (bp
->hw_dma_cap
) {
144 case HW_DMA_CAP_64B_PTP
:
154 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
155 static struct macb_dma_desc_64
*macb_64b_desc(struct macb
*bp
, struct macb_dma_desc
*desc
)
157 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
)
158 return (struct macb_dma_desc_64
*)((void *)desc
+ sizeof(struct macb_dma_desc
));
163 /* Ring buffer accessors */
164 static unsigned int macb_tx_ring_wrap(struct macb
*bp
, unsigned int index
)
166 return index
& (bp
->tx_ring_size
- 1);
169 static struct macb_dma_desc
*macb_tx_desc(struct macb_queue
*queue
,
172 index
= macb_tx_ring_wrap(queue
->bp
, index
);
173 index
= macb_adj_dma_desc_idx(queue
->bp
, index
);
174 return &queue
->tx_ring
[index
];
177 static struct macb_tx_skb
*macb_tx_skb(struct macb_queue
*queue
,
180 return &queue
->tx_skb
[macb_tx_ring_wrap(queue
->bp
, index
)];
183 static dma_addr_t
macb_tx_dma(struct macb_queue
*queue
, unsigned int index
)
187 offset
= macb_tx_ring_wrap(queue
->bp
, index
) *
188 macb_dma_desc_get_size(queue
->bp
);
190 return queue
->tx_ring_dma
+ offset
;
193 static unsigned int macb_rx_ring_wrap(struct macb
*bp
, unsigned int index
)
195 return index
& (bp
->rx_ring_size
- 1);
198 static struct macb_dma_desc
*macb_rx_desc(struct macb
*bp
, unsigned int index
)
200 index
= macb_rx_ring_wrap(bp
, index
);
201 index
= macb_adj_dma_desc_idx(bp
, index
);
202 return &bp
->rx_ring
[index
];
205 static void *macb_rx_buffer(struct macb
*bp
, unsigned int index
)
207 return bp
->rx_buffers
+ bp
->rx_buffer_size
*
208 macb_rx_ring_wrap(bp
, index
);
212 static u32
hw_readl_native(struct macb
*bp
, int offset
)
214 return __raw_readl(bp
->regs
+ offset
);
217 static void hw_writel_native(struct macb
*bp
, int offset
, u32 value
)
219 __raw_writel(value
, bp
->regs
+ offset
);
222 static u32
hw_readl(struct macb
*bp
, int offset
)
224 return readl_relaxed(bp
->regs
+ offset
);
227 static void hw_writel(struct macb
*bp
, int offset
, u32 value
)
229 writel_relaxed(value
, bp
->regs
+ offset
);
232 /* Find the CPU endianness by using the loopback bit of NCR register. When the
233 * CPU is in big endian we need to program swapped mode for management
236 static bool hw_is_native_io(void __iomem
*addr
)
238 u32 value
= MACB_BIT(LLB
);
240 __raw_writel(value
, addr
+ MACB_NCR
);
241 value
= __raw_readl(addr
+ MACB_NCR
);
243 /* Write 0 back to disable everything */
244 __raw_writel(0, addr
+ MACB_NCR
);
246 return value
== MACB_BIT(LLB
);
249 static bool hw_is_gem(void __iomem
*addr
, bool native_io
)
254 id
= __raw_readl(addr
+ MACB_MID
);
256 id
= readl_relaxed(addr
+ MACB_MID
);
258 return MACB_BFEXT(IDNUM
, id
) >= 0x2;
261 static void macb_set_hwaddr(struct macb
*bp
)
266 bottom
= cpu_to_le32(*((u32
*)bp
->dev
->dev_addr
));
267 macb_or_gem_writel(bp
, SA1B
, bottom
);
268 top
= cpu_to_le16(*((u16
*)(bp
->dev
->dev_addr
+ 4)));
269 macb_or_gem_writel(bp
, SA1T
, top
);
271 /* Clear unused address register sets */
272 macb_or_gem_writel(bp
, SA2B
, 0);
273 macb_or_gem_writel(bp
, SA2T
, 0);
274 macb_or_gem_writel(bp
, SA3B
, 0);
275 macb_or_gem_writel(bp
, SA3T
, 0);
276 macb_or_gem_writel(bp
, SA4B
, 0);
277 macb_or_gem_writel(bp
, SA4T
, 0);
280 static void macb_get_hwaddr(struct macb
*bp
)
282 struct macb_platform_data
*pdata
;
288 pdata
= dev_get_platdata(&bp
->pdev
->dev
);
290 /* Check all 4 address register for valid address */
291 for (i
= 0; i
< 4; i
++) {
292 bottom
= macb_or_gem_readl(bp
, SA1B
+ i
* 8);
293 top
= macb_or_gem_readl(bp
, SA1T
+ i
* 8);
295 if (pdata
&& pdata
->rev_eth_addr
) {
296 addr
[5] = bottom
& 0xff;
297 addr
[4] = (bottom
>> 8) & 0xff;
298 addr
[3] = (bottom
>> 16) & 0xff;
299 addr
[2] = (bottom
>> 24) & 0xff;
300 addr
[1] = top
& 0xff;
301 addr
[0] = (top
& 0xff00) >> 8;
303 addr
[0] = bottom
& 0xff;
304 addr
[1] = (bottom
>> 8) & 0xff;
305 addr
[2] = (bottom
>> 16) & 0xff;
306 addr
[3] = (bottom
>> 24) & 0xff;
307 addr
[4] = top
& 0xff;
308 addr
[5] = (top
>> 8) & 0xff;
311 if (is_valid_ether_addr(addr
)) {
312 memcpy(bp
->dev
->dev_addr
, addr
, sizeof(addr
));
317 dev_info(&bp
->pdev
->dev
, "invalid hw address, using random\n");
318 eth_hw_addr_random(bp
->dev
);
321 static int macb_mdio_read(struct mii_bus
*bus
, int mii_id
, int regnum
)
323 struct macb
*bp
= bus
->priv
;
326 macb_writel(bp
, MAN
, (MACB_BF(SOF
, MACB_MAN_SOF
)
327 | MACB_BF(RW
, MACB_MAN_READ
)
328 | MACB_BF(PHYA
, mii_id
)
329 | MACB_BF(REGA
, regnum
)
330 | MACB_BF(CODE
, MACB_MAN_CODE
)));
332 /* wait for end of transfer */
333 while (!MACB_BFEXT(IDLE
, macb_readl(bp
, NSR
)))
336 value
= MACB_BFEXT(DATA
, macb_readl(bp
, MAN
));
341 static int macb_mdio_write(struct mii_bus
*bus
, int mii_id
, int regnum
,
344 struct macb
*bp
= bus
->priv
;
346 macb_writel(bp
, MAN
, (MACB_BF(SOF
, MACB_MAN_SOF
)
347 | MACB_BF(RW
, MACB_MAN_WRITE
)
348 | MACB_BF(PHYA
, mii_id
)
349 | MACB_BF(REGA
, regnum
)
350 | MACB_BF(CODE
, MACB_MAN_CODE
)
351 | MACB_BF(DATA
, value
)));
353 /* wait for end of transfer */
354 while (!MACB_BFEXT(IDLE
, macb_readl(bp
, NSR
)))
361 * macb_set_tx_clk() - Set a clock to a new frequency
362 * @clk Pointer to the clock to change
363 * @rate New frequency in Hz
364 * @dev Pointer to the struct net_device
366 static void macb_set_tx_clk(struct clk
*clk
, int speed
, struct net_device
*dev
)
368 long ferr
, rate
, rate_rounded
;
387 rate_rounded
= clk_round_rate(clk
, rate
);
388 if (rate_rounded
< 0)
391 /* RGMII allows 50 ppm frequency error. Test and warn if this limit
394 ferr
= abs(rate_rounded
- rate
);
395 ferr
= DIV_ROUND_UP(ferr
, rate
/ 100000);
397 netdev_warn(dev
, "unable to generate target frequency: %ld Hz\n",
400 if (clk_set_rate(clk
, rate_rounded
))
401 netdev_err(dev
, "adjusting tx_clk failed.\n");
404 static void macb_handle_link_change(struct net_device
*dev
)
406 struct macb
*bp
= netdev_priv(dev
);
407 struct phy_device
*phydev
= dev
->phydev
;
409 int status_change
= 0;
411 spin_lock_irqsave(&bp
->lock
, flags
);
414 if ((bp
->speed
!= phydev
->speed
) ||
415 (bp
->duplex
!= phydev
->duplex
)) {
418 reg
= macb_readl(bp
, NCFGR
);
419 reg
&= ~(MACB_BIT(SPD
) | MACB_BIT(FD
));
421 reg
&= ~GEM_BIT(GBE
);
425 if (phydev
->speed
== SPEED_100
)
426 reg
|= MACB_BIT(SPD
);
427 if (phydev
->speed
== SPEED_1000
&&
428 bp
->caps
& MACB_CAPS_GIGABIT_MODE_AVAILABLE
)
431 macb_or_gem_writel(bp
, NCFGR
, reg
);
433 bp
->speed
= phydev
->speed
;
434 bp
->duplex
= phydev
->duplex
;
439 if (phydev
->link
!= bp
->link
) {
444 bp
->link
= phydev
->link
;
449 spin_unlock_irqrestore(&bp
->lock
, flags
);
453 /* Update the TX clock rate if and only if the link is
454 * up and there has been a link change.
456 macb_set_tx_clk(bp
->tx_clk
, phydev
->speed
, dev
);
458 netif_carrier_on(dev
);
459 netdev_info(dev
, "link up (%d/%s)\n",
461 phydev
->duplex
== DUPLEX_FULL
?
464 netif_carrier_off(dev
);
465 netdev_info(dev
, "link down\n");
470 /* based on au1000_eth. c*/
471 static int macb_mii_probe(struct net_device
*dev
)
473 struct macb
*bp
= netdev_priv(dev
);
474 struct macb_platform_data
*pdata
;
475 struct phy_device
*phydev
;
480 phydev
= of_phy_connect(dev
, bp
->phy_node
,
481 &macb_handle_link_change
, 0,
486 phydev
= phy_find_first(bp
->mii_bus
);
488 netdev_err(dev
, "no PHY found\n");
492 pdata
= dev_get_platdata(&bp
->pdev
->dev
);
494 if (gpio_is_valid(pdata
->phy_irq_pin
)) {
495 ret
= devm_gpio_request(&bp
->pdev
->dev
,
496 pdata
->phy_irq_pin
, "phy int");
498 phy_irq
= gpio_to_irq(pdata
->phy_irq_pin
);
499 phydev
->irq
= (phy_irq
< 0) ? PHY_POLL
: phy_irq
;
502 phydev
->irq
= PHY_POLL
;
506 /* attach the mac to the phy */
507 ret
= phy_connect_direct(dev
, phydev
, &macb_handle_link_change
,
510 netdev_err(dev
, "Could not attach to PHY\n");
515 /* mask with MAC supported features */
516 if (macb_is_gem(bp
) && bp
->caps
& MACB_CAPS_GIGABIT_MODE_AVAILABLE
)
517 phydev
->supported
&= PHY_GBIT_FEATURES
;
519 phydev
->supported
&= PHY_BASIC_FEATURES
;
521 if (bp
->caps
& MACB_CAPS_NO_GIGABIT_HALF
)
522 phydev
->supported
&= ~SUPPORTED_1000baseT_Half
;
524 phydev
->advertising
= phydev
->supported
;
533 static int macb_mii_init(struct macb
*bp
)
535 struct macb_platform_data
*pdata
;
536 struct device_node
*np
;
539 /* Enable management port */
540 macb_writel(bp
, NCR
, MACB_BIT(MPE
));
542 bp
->mii_bus
= mdiobus_alloc();
548 bp
->mii_bus
->name
= "MACB_mii_bus";
549 bp
->mii_bus
->read
= &macb_mdio_read
;
550 bp
->mii_bus
->write
= &macb_mdio_write
;
551 snprintf(bp
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%s-%x",
552 bp
->pdev
->name
, bp
->pdev
->id
);
553 bp
->mii_bus
->priv
= bp
;
554 bp
->mii_bus
->parent
= &bp
->pdev
->dev
;
555 pdata
= dev_get_platdata(&bp
->pdev
->dev
);
557 dev_set_drvdata(&bp
->dev
->dev
, bp
->mii_bus
);
559 np
= bp
->pdev
->dev
.of_node
;
561 if (of_phy_is_fixed_link(np
)) {
562 if (of_phy_register_fixed_link(np
) < 0) {
563 dev_err(&bp
->pdev
->dev
,
564 "broken fixed-link specification\n");
565 goto err_out_unregister_bus
;
567 bp
->phy_node
= of_node_get(np
);
569 err
= mdiobus_register(bp
->mii_bus
);
571 /* try dt phy registration */
572 err
= of_mdiobus_register(bp
->mii_bus
, np
);
574 /* fallback to standard phy registration if no phy were
575 * found during dt phy registration
577 if (!err
&& !phy_find_first(bp
->mii_bus
)) {
578 for (i
= 0; i
< PHY_MAX_ADDR
; i
++) {
579 struct phy_device
*phydev
;
581 phydev
= mdiobus_scan(bp
->mii_bus
, i
);
582 if (IS_ERR(phydev
) &&
583 PTR_ERR(phydev
) != -ENODEV
) {
584 err
= PTR_ERR(phydev
);
590 goto err_out_unregister_bus
;
594 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
595 bp
->mii_bus
->irq
[i
] = PHY_POLL
;
598 bp
->mii_bus
->phy_mask
= pdata
->phy_mask
;
600 err
= mdiobus_register(bp
->mii_bus
);
604 goto err_out_free_mdiobus
;
606 err
= macb_mii_probe(bp
->dev
);
608 goto err_out_unregister_bus
;
612 err_out_unregister_bus
:
613 mdiobus_unregister(bp
->mii_bus
);
614 err_out_free_mdiobus
:
615 of_node_put(bp
->phy_node
);
616 if (np
&& of_phy_is_fixed_link(np
))
617 of_phy_deregister_fixed_link(np
);
618 mdiobus_free(bp
->mii_bus
);
623 static void macb_update_stats(struct macb
*bp
)
625 u32
*p
= &bp
->hw_stats
.macb
.rx_pause_frames
;
626 u32
*end
= &bp
->hw_stats
.macb
.tx_pause_frames
+ 1;
627 int offset
= MACB_PFR
;
629 WARN_ON((unsigned long)(end
- p
- 1) != (MACB_TPF
- MACB_PFR
) / 4);
631 for (; p
< end
; p
++, offset
+= 4)
632 *p
+= bp
->macb_reg_readl(bp
, offset
);
635 static int macb_halt_tx(struct macb
*bp
)
637 unsigned long halt_time
, timeout
;
640 macb_writel(bp
, NCR
, macb_readl(bp
, NCR
) | MACB_BIT(THALT
));
642 timeout
= jiffies
+ usecs_to_jiffies(MACB_HALT_TIMEOUT
);
645 status
= macb_readl(bp
, TSR
);
646 if (!(status
& MACB_BIT(TGO
)))
650 } while (time_before(halt_time
, timeout
));
655 static void macb_tx_unmap(struct macb
*bp
, struct macb_tx_skb
*tx_skb
)
657 if (tx_skb
->mapping
) {
658 if (tx_skb
->mapped_as_page
)
659 dma_unmap_page(&bp
->pdev
->dev
, tx_skb
->mapping
,
660 tx_skb
->size
, DMA_TO_DEVICE
);
662 dma_unmap_single(&bp
->pdev
->dev
, tx_skb
->mapping
,
663 tx_skb
->size
, DMA_TO_DEVICE
);
668 dev_kfree_skb_any(tx_skb
->skb
);
673 static void macb_set_addr(struct macb
*bp
, struct macb_dma_desc
*desc
, dma_addr_t addr
)
675 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
676 struct macb_dma_desc_64
*desc_64
;
678 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
) {
679 desc_64
= macb_64b_desc(bp
, desc
);
680 desc_64
->addrh
= upper_32_bits(addr
);
681 /* The low bits of RX address contain the RX_USED bit, clearing
682 * of which allows packet RX. Make sure the high bits are also
683 * visible to HW at that point.
688 desc
->addr
= lower_32_bits(addr
);
691 static dma_addr_t
macb_get_addr(struct macb
*bp
, struct macb_dma_desc
*desc
)
694 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
695 struct macb_dma_desc_64
*desc_64
;
697 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
) {
698 desc_64
= macb_64b_desc(bp
, desc
);
699 addr
= ((u64
)(desc_64
->addrh
) << 32);
702 addr
|= MACB_BF(RX_WADDR
, MACB_BFEXT(RX_WADDR
, desc
->addr
));
706 static void macb_tx_error_task(struct work_struct
*work
)
708 struct macb_queue
*queue
= container_of(work
, struct macb_queue
,
710 struct macb
*bp
= queue
->bp
;
711 struct macb_tx_skb
*tx_skb
;
712 struct macb_dma_desc
*desc
;
717 netdev_vdbg(bp
->dev
, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
718 (unsigned int)(queue
- bp
->queues
),
719 queue
->tx_tail
, queue
->tx_head
);
721 /* Prevent the queue IRQ handlers from running: each of them may call
722 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
723 * As explained below, we have to halt the transmission before updating
724 * TBQP registers so we call netif_tx_stop_all_queues() to notify the
725 * network engine about the macb/gem being halted.
727 spin_lock_irqsave(&bp
->lock
, flags
);
729 /* Make sure nobody is trying to queue up new packets */
730 netif_tx_stop_all_queues(bp
->dev
);
732 /* Stop transmission now
733 * (in case we have just queued new packets)
734 * macb/gem must be halted to write TBQP register
736 if (macb_halt_tx(bp
))
737 /* Just complain for now, reinitializing TX path can be good */
738 netdev_err(bp
->dev
, "BUG: halt tx timed out\n");
740 /* Treat frames in TX queue including the ones that caused the error.
741 * Free transmit buffers in upper layer.
743 for (tail
= queue
->tx_tail
; tail
!= queue
->tx_head
; tail
++) {
746 desc
= macb_tx_desc(queue
, tail
);
748 tx_skb
= macb_tx_skb(queue
, tail
);
751 if (ctrl
& MACB_BIT(TX_USED
)) {
752 /* skb is set for the last buffer of the frame */
754 macb_tx_unmap(bp
, tx_skb
);
756 tx_skb
= macb_tx_skb(queue
, tail
);
760 /* ctrl still refers to the first buffer descriptor
761 * since it's the only one written back by the hardware
763 if (!(ctrl
& MACB_BIT(TX_BUF_EXHAUSTED
))) {
764 netdev_vdbg(bp
->dev
, "txerr skb %u (data %p) TX complete\n",
765 macb_tx_ring_wrap(bp
, tail
),
767 bp
->dev
->stats
.tx_packets
++;
768 bp
->dev
->stats
.tx_bytes
+= skb
->len
;
771 /* "Buffers exhausted mid-frame" errors may only happen
772 * if the driver is buggy, so complain loudly about
773 * those. Statistics are updated by hardware.
775 if (ctrl
& MACB_BIT(TX_BUF_EXHAUSTED
))
777 "BUG: TX buffers exhausted mid-frame\n");
779 desc
->ctrl
= ctrl
| MACB_BIT(TX_USED
);
782 macb_tx_unmap(bp
, tx_skb
);
785 /* Set end of TX queue */
786 desc
= macb_tx_desc(queue
, 0);
787 macb_set_addr(bp
, desc
, 0);
788 desc
->ctrl
= MACB_BIT(TX_USED
);
790 /* Make descriptor updates visible to hardware */
793 /* Reinitialize the TX desc queue */
794 queue_writel(queue
, TBQP
, lower_32_bits(queue
->tx_ring_dma
));
795 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
796 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
)
797 queue_writel(queue
, TBQPH
, upper_32_bits(queue
->tx_ring_dma
));
799 /* Make TX ring reflect state of hardware */
803 /* Housework before enabling TX IRQ */
804 macb_writel(bp
, TSR
, macb_readl(bp
, TSR
));
805 queue_writel(queue
, IER
, MACB_TX_INT_FLAGS
);
807 /* Now we are ready to start transmission again */
808 netif_tx_start_all_queues(bp
->dev
);
809 macb_writel(bp
, NCR
, macb_readl(bp
, NCR
) | MACB_BIT(TSTART
));
811 spin_unlock_irqrestore(&bp
->lock
, flags
);
814 static void macb_tx_interrupt(struct macb_queue
*queue
)
819 struct macb
*bp
= queue
->bp
;
820 u16 queue_index
= queue
- bp
->queues
;
822 status
= macb_readl(bp
, TSR
);
823 macb_writel(bp
, TSR
, status
);
825 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
826 queue_writel(queue
, ISR
, MACB_BIT(TCOMP
));
828 netdev_vdbg(bp
->dev
, "macb_tx_interrupt status = 0x%03lx\n",
829 (unsigned long)status
);
831 head
= queue
->tx_head
;
832 for (tail
= queue
->tx_tail
; tail
!= head
; tail
++) {
833 struct macb_tx_skb
*tx_skb
;
835 struct macb_dma_desc
*desc
;
838 desc
= macb_tx_desc(queue
, tail
);
840 /* Make hw descriptor updates visible to CPU */
845 /* TX_USED bit is only set by hardware on the very first buffer
846 * descriptor of the transmitted frame.
848 if (!(ctrl
& MACB_BIT(TX_USED
)))
851 /* Process all buffers of the current transmitted frame */
853 tx_skb
= macb_tx_skb(queue
, tail
);
856 /* First, update TX stats if needed */
858 if (unlikely(skb_shinfo(skb
)->tx_flags
&
860 gem_ptp_do_txstamp(queue
, skb
, desc
) == 0) {
861 /* skb now belongs to timestamp buffer
862 * and will be removed later
866 netdev_vdbg(bp
->dev
, "skb %u (data %p) TX complete\n",
867 macb_tx_ring_wrap(bp
, tail
),
869 bp
->dev
->stats
.tx_packets
++;
870 bp
->dev
->stats
.tx_bytes
+= skb
->len
;
873 /* Now we can safely release resources */
874 macb_tx_unmap(bp
, tx_skb
);
876 /* skb is set only for the last buffer of the frame.
877 * WARNING: at this point skb has been freed by
885 queue
->tx_tail
= tail
;
886 if (__netif_subqueue_stopped(bp
->dev
, queue_index
) &&
887 CIRC_CNT(queue
->tx_head
, queue
->tx_tail
,
888 bp
->tx_ring_size
) <= MACB_TX_WAKEUP_THRESH(bp
))
889 netif_wake_subqueue(bp
->dev
, queue_index
);
892 static void gem_rx_refill(struct macb
*bp
)
897 struct macb_dma_desc
*desc
;
899 while (CIRC_SPACE(bp
->rx_prepared_head
, bp
->rx_tail
,
900 bp
->rx_ring_size
) > 0) {
901 entry
= macb_rx_ring_wrap(bp
, bp
->rx_prepared_head
);
903 /* Make hw descriptor updates visible to CPU */
906 bp
->rx_prepared_head
++;
907 desc
= macb_rx_desc(bp
, entry
);
909 if (!bp
->rx_skbuff
[entry
]) {
910 /* allocate sk_buff for this free entry in ring */
911 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buffer_size
);
912 if (unlikely(!skb
)) {
914 "Unable to allocate sk_buff\n");
918 /* now fill corresponding descriptor entry */
919 paddr
= dma_map_single(&bp
->pdev
->dev
, skb
->data
,
922 if (dma_mapping_error(&bp
->pdev
->dev
, paddr
)) {
927 bp
->rx_skbuff
[entry
] = skb
;
929 if (entry
== bp
->rx_ring_size
- 1)
930 paddr
|= MACB_BIT(RX_WRAP
);
932 /* Setting addr clears RX_USED and allows reception,
933 * make sure ctrl is cleared first to avoid a race.
936 macb_set_addr(bp
, desc
, paddr
);
938 /* properly align Ethernet header */
939 skb_reserve(skb
, NET_IP_ALIGN
);
943 desc
->addr
&= ~MACB_BIT(RX_USED
);
947 /* Make descriptor updates visible to hardware */
950 netdev_vdbg(bp
->dev
, "rx ring: prepared head %d, tail %d\n",
951 bp
->rx_prepared_head
, bp
->rx_tail
);
954 /* Mark DMA descriptors from begin up to and not including end as unused */
955 static void discard_partial_frame(struct macb
*bp
, unsigned int begin
,
960 for (frag
= begin
; frag
!= end
; frag
++) {
961 struct macb_dma_desc
*desc
= macb_rx_desc(bp
, frag
);
963 desc
->addr
&= ~MACB_BIT(RX_USED
);
966 /* Make descriptor updates visible to hardware */
969 /* When this happens, the hardware stats registers for
970 * whatever caused this is updated, so we don't have to record
975 static int gem_rx(struct macb
*bp
, int budget
)
980 struct macb_dma_desc
*desc
;
983 while (count
< budget
) {
988 entry
= macb_rx_ring_wrap(bp
, bp
->rx_tail
);
989 desc
= macb_rx_desc(bp
, entry
);
991 /* Make hw descriptor updates visible to CPU */
994 rxused
= (desc
->addr
& MACB_BIT(RX_USED
)) ? true : false;
995 addr
= macb_get_addr(bp
, desc
);
1000 /* Ensure ctrl is at least as up-to-date as rxused */
1008 if (!(ctrl
& MACB_BIT(RX_SOF
) && ctrl
& MACB_BIT(RX_EOF
))) {
1010 "not whole frame pointed by descriptor\n");
1011 bp
->dev
->stats
.rx_dropped
++;
1014 skb
= bp
->rx_skbuff
[entry
];
1015 if (unlikely(!skb
)) {
1017 "inconsistent Rx descriptor chain\n");
1018 bp
->dev
->stats
.rx_dropped
++;
1021 /* now everything is ready for receiving packet */
1022 bp
->rx_skbuff
[entry
] = NULL
;
1023 len
= ctrl
& bp
->rx_frm_len_mask
;
1025 netdev_vdbg(bp
->dev
, "gem_rx %u (len %u)\n", entry
, len
);
1028 dma_unmap_single(&bp
->pdev
->dev
, addr
,
1029 bp
->rx_buffer_size
, DMA_FROM_DEVICE
);
1031 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1032 skb_checksum_none_assert(skb
);
1033 if (bp
->dev
->features
& NETIF_F_RXCSUM
&&
1034 !(bp
->dev
->flags
& IFF_PROMISC
) &&
1035 GEM_BFEXT(RX_CSUM
, ctrl
) & GEM_RX_CSUM_CHECKED_MASK
)
1036 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1038 bp
->dev
->stats
.rx_packets
++;
1039 bp
->dev
->stats
.rx_bytes
+= skb
->len
;
1041 gem_ptp_do_rxstamp(bp
, skb
, desc
);
1043 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
1044 netdev_vdbg(bp
->dev
, "received skb of length %u, csum: %08x\n",
1045 skb
->len
, skb
->csum
);
1046 print_hex_dump(KERN_DEBUG
, " mac: ", DUMP_PREFIX_ADDRESS
, 16, 1,
1047 skb_mac_header(skb
), 16, true);
1048 print_hex_dump(KERN_DEBUG
, "data: ", DUMP_PREFIX_ADDRESS
, 16, 1,
1049 skb
->data
, 32, true);
1052 netif_receive_skb(skb
);
1060 static int macb_rx_frame(struct macb
*bp
, unsigned int first_frag
,
1061 unsigned int last_frag
)
1065 unsigned int offset
;
1066 struct sk_buff
*skb
;
1067 struct macb_dma_desc
*desc
;
1069 desc
= macb_rx_desc(bp
, last_frag
);
1070 len
= desc
->ctrl
& bp
->rx_frm_len_mask
;
1072 netdev_vdbg(bp
->dev
, "macb_rx_frame frags %u - %u (len %u)\n",
1073 macb_rx_ring_wrap(bp
, first_frag
),
1074 macb_rx_ring_wrap(bp
, last_frag
), len
);
1076 /* The ethernet header starts NET_IP_ALIGN bytes into the
1077 * first buffer. Since the header is 14 bytes, this makes the
1078 * payload word-aligned.
1080 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
1081 * the two padding bytes into the skb so that we avoid hitting
1082 * the slowpath in memcpy(), and pull them off afterwards.
1084 skb
= netdev_alloc_skb(bp
->dev
, len
+ NET_IP_ALIGN
);
1086 bp
->dev
->stats
.rx_dropped
++;
1087 for (frag
= first_frag
; ; frag
++) {
1088 desc
= macb_rx_desc(bp
, frag
);
1089 desc
->addr
&= ~MACB_BIT(RX_USED
);
1090 if (frag
== last_frag
)
1094 /* Make descriptor updates visible to hardware */
1101 len
+= NET_IP_ALIGN
;
1102 skb_checksum_none_assert(skb
);
1105 for (frag
= first_frag
; ; frag
++) {
1106 unsigned int frag_len
= bp
->rx_buffer_size
;
1108 if (offset
+ frag_len
> len
) {
1109 if (unlikely(frag
!= last_frag
)) {
1110 dev_kfree_skb_any(skb
);
1113 frag_len
= len
- offset
;
1115 skb_copy_to_linear_data_offset(skb
, offset
,
1116 macb_rx_buffer(bp
, frag
),
1118 offset
+= bp
->rx_buffer_size
;
1119 desc
= macb_rx_desc(bp
, frag
);
1120 desc
->addr
&= ~MACB_BIT(RX_USED
);
1122 if (frag
== last_frag
)
1126 /* Make descriptor updates visible to hardware */
1129 __skb_pull(skb
, NET_IP_ALIGN
);
1130 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1132 bp
->dev
->stats
.rx_packets
++;
1133 bp
->dev
->stats
.rx_bytes
+= skb
->len
;
1134 netdev_vdbg(bp
->dev
, "received skb of length %u, csum: %08x\n",
1135 skb
->len
, skb
->csum
);
1136 netif_receive_skb(skb
);
1141 static inline void macb_init_rx_ring(struct macb
*bp
)
1144 struct macb_dma_desc
*desc
= NULL
;
1147 addr
= bp
->rx_buffers_dma
;
1148 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
1149 desc
= macb_rx_desc(bp
, i
);
1150 macb_set_addr(bp
, desc
, addr
);
1152 addr
+= bp
->rx_buffer_size
;
1154 desc
->addr
|= MACB_BIT(RX_WRAP
);
1158 static int macb_rx(struct macb
*bp
, int budget
)
1160 bool reset_rx_queue
= false;
1163 int first_frag
= -1;
1165 for (tail
= bp
->rx_tail
; budget
> 0; tail
++) {
1166 struct macb_dma_desc
*desc
= macb_rx_desc(bp
, tail
);
1169 /* Make hw descriptor updates visible to CPU */
1172 if (!(desc
->addr
& MACB_BIT(RX_USED
)))
1175 /* Ensure ctrl is at least as up-to-date as addr */
1180 if (ctrl
& MACB_BIT(RX_SOF
)) {
1181 if (first_frag
!= -1)
1182 discard_partial_frame(bp
, first_frag
, tail
);
1186 if (ctrl
& MACB_BIT(RX_EOF
)) {
1189 if (unlikely(first_frag
== -1)) {
1190 reset_rx_queue
= true;
1194 dropped
= macb_rx_frame(bp
, first_frag
, tail
);
1196 if (unlikely(dropped
< 0)) {
1197 reset_rx_queue
= true;
1207 if (unlikely(reset_rx_queue
)) {
1208 unsigned long flags
;
1211 netdev_err(bp
->dev
, "RX queue corruption: reset it\n");
1213 spin_lock_irqsave(&bp
->lock
, flags
);
1215 ctrl
= macb_readl(bp
, NCR
);
1216 macb_writel(bp
, NCR
, ctrl
& ~MACB_BIT(RE
));
1218 macb_init_rx_ring(bp
);
1219 macb_writel(bp
, RBQP
, bp
->rx_ring_dma
);
1221 macb_writel(bp
, NCR
, ctrl
| MACB_BIT(RE
));
1223 spin_unlock_irqrestore(&bp
->lock
, flags
);
1227 if (first_frag
!= -1)
1228 bp
->rx_tail
= first_frag
;
1235 static int macb_poll(struct napi_struct
*napi
, int budget
)
1237 struct macb
*bp
= container_of(napi
, struct macb
, napi
);
1241 status
= macb_readl(bp
, RSR
);
1242 macb_writel(bp
, RSR
, status
);
1244 netdev_vdbg(bp
->dev
, "poll: status = %08lx, budget = %d\n",
1245 (unsigned long)status
, budget
);
1247 work_done
= bp
->macbgem_ops
.mog_rx(bp
, budget
);
1248 if (work_done
< budget
) {
1249 napi_complete_done(napi
, work_done
);
1251 /* Packets received while interrupts were disabled */
1252 status
= macb_readl(bp
, RSR
);
1254 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1255 macb_writel(bp
, ISR
, MACB_BIT(RCOMP
));
1256 napi_reschedule(napi
);
1258 macb_writel(bp
, IER
, MACB_RX_INT_FLAGS
);
1262 /* TODO: Handle errors */
1267 static void macb_tx_restart(struct macb_queue
*queue
)
1269 unsigned int head
= queue
->tx_head
;
1270 unsigned int tail
= queue
->tx_tail
;
1271 struct macb
*bp
= queue
->bp
;
1273 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1274 queue_writel(queue
, ISR
, MACB_BIT(TXUBR
));
1279 macb_writel(bp
, NCR
, macb_readl(bp
, NCR
) | MACB_BIT(TSTART
));
1282 static irqreturn_t
macb_interrupt(int irq
, void *dev_id
)
1284 struct macb_queue
*queue
= dev_id
;
1285 struct macb
*bp
= queue
->bp
;
1286 struct net_device
*dev
= bp
->dev
;
1289 status
= queue_readl(queue
, ISR
);
1291 if (unlikely(!status
))
1294 spin_lock(&bp
->lock
);
1297 /* close possible race with dev_close */
1298 if (unlikely(!netif_running(dev
))) {
1299 queue_writel(queue
, IDR
, -1);
1300 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1301 queue_writel(queue
, ISR
, -1);
1305 netdev_vdbg(bp
->dev
, "queue = %u, isr = 0x%08lx\n",
1306 (unsigned int)(queue
- bp
->queues
),
1307 (unsigned long)status
);
1309 if (status
& MACB_RX_INT_FLAGS
) {
1310 /* There's no point taking any more interrupts
1311 * until we have processed the buffers. The
1312 * scheduling call may fail if the poll routine
1313 * is already scheduled, so disable interrupts
1316 queue_writel(queue
, IDR
, MACB_RX_INT_FLAGS
);
1317 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1318 queue_writel(queue
, ISR
, MACB_BIT(RCOMP
));
1320 if (napi_schedule_prep(&bp
->napi
)) {
1321 netdev_vdbg(bp
->dev
, "scheduling RX softirq\n");
1322 __napi_schedule(&bp
->napi
);
1326 if (unlikely(status
& (MACB_TX_ERR_FLAGS
))) {
1327 queue_writel(queue
, IDR
, MACB_TX_INT_FLAGS
);
1328 schedule_work(&queue
->tx_error_task
);
1330 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1331 queue_writel(queue
, ISR
, MACB_TX_ERR_FLAGS
);
1336 if (status
& MACB_BIT(TCOMP
))
1337 macb_tx_interrupt(queue
);
1339 if (status
& MACB_BIT(TXUBR
))
1340 macb_tx_restart(queue
);
1342 /* Link change detection isn't possible with RMII, so we'll
1343 * add that if/when we get our hands on a full-blown MII PHY.
1346 /* There is a hardware issue under heavy load where DMA can
1347 * stop, this causes endless "used buffer descriptor read"
1348 * interrupts but it can be cleared by re-enabling RX. See
1349 * the at91 manual, section 41.3.1 or the Zynq manual
1350 * section 16.7.4 for details.
1352 if (status
& MACB_BIT(RXUBR
)) {
1353 ctrl
= macb_readl(bp
, NCR
);
1354 macb_writel(bp
, NCR
, ctrl
& ~MACB_BIT(RE
));
1356 macb_writel(bp
, NCR
, ctrl
| MACB_BIT(RE
));
1358 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1359 queue_writel(queue
, ISR
, MACB_BIT(RXUBR
));
1362 if (status
& MACB_BIT(ISR_ROVR
)) {
1363 /* We missed at least one packet */
1364 if (macb_is_gem(bp
))
1365 bp
->hw_stats
.gem
.rx_overruns
++;
1367 bp
->hw_stats
.macb
.rx_overruns
++;
1369 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1370 queue_writel(queue
, ISR
, MACB_BIT(ISR_ROVR
));
1373 if (status
& MACB_BIT(HRESP
)) {
1374 /* TODO: Reset the hardware, and maybe move the
1375 * netdev_err to a lower-priority context as well
1378 netdev_err(dev
, "DMA bus error: HRESP not OK\n");
1380 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1381 queue_writel(queue
, ISR
, MACB_BIT(HRESP
));
1383 status
= queue_readl(queue
, ISR
);
1386 spin_unlock(&bp
->lock
);
1391 #ifdef CONFIG_NET_POLL_CONTROLLER
1392 /* Polling receive - used by netconsole and other diagnostic tools
1393 * to allow network i/o with interrupts disabled.
1395 static void macb_poll_controller(struct net_device
*dev
)
1397 struct macb
*bp
= netdev_priv(dev
);
1398 struct macb_queue
*queue
;
1399 unsigned long flags
;
1402 local_irq_save(flags
);
1403 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
)
1404 macb_interrupt(dev
->irq
, queue
);
1405 local_irq_restore(flags
);
1409 static unsigned int macb_tx_map(struct macb
*bp
,
1410 struct macb_queue
*queue
,
1411 struct sk_buff
*skb
,
1412 unsigned int hdrlen
)
1415 unsigned int len
, entry
, i
, tx_head
= queue
->tx_head
;
1416 struct macb_tx_skb
*tx_skb
= NULL
;
1417 struct macb_dma_desc
*desc
;
1418 unsigned int offset
, size
, count
= 0;
1419 unsigned int f
, nr_frags
= skb_shinfo(skb
)->nr_frags
;
1420 unsigned int eof
= 1, mss_mfs
= 0;
1421 u32 ctrl
, lso_ctrl
= 0, seq_ctrl
= 0;
1424 if (skb_shinfo(skb
)->gso_size
!= 0) {
1425 if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
)
1427 lso_ctrl
= MACB_LSO_UFO_ENABLE
;
1430 lso_ctrl
= MACB_LSO_TSO_ENABLE
;
1433 /* First, map non-paged data */
1434 len
= skb_headlen(skb
);
1436 /* first buffer length */
1441 entry
= macb_tx_ring_wrap(bp
, tx_head
);
1442 tx_skb
= &queue
->tx_skb
[entry
];
1444 mapping
= dma_map_single(&bp
->pdev
->dev
,
1446 size
, DMA_TO_DEVICE
);
1447 if (dma_mapping_error(&bp
->pdev
->dev
, mapping
))
1450 /* Save info to properly release resources */
1452 tx_skb
->mapping
= mapping
;
1453 tx_skb
->size
= size
;
1454 tx_skb
->mapped_as_page
= false;
1461 size
= min(len
, bp
->max_tx_length
);
1464 /* Then, map paged data from fragments */
1465 for (f
= 0; f
< nr_frags
; f
++) {
1466 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[f
];
1468 len
= skb_frag_size(frag
);
1471 size
= min(len
, bp
->max_tx_length
);
1472 entry
= macb_tx_ring_wrap(bp
, tx_head
);
1473 tx_skb
= &queue
->tx_skb
[entry
];
1475 mapping
= skb_frag_dma_map(&bp
->pdev
->dev
, frag
,
1476 offset
, size
, DMA_TO_DEVICE
);
1477 if (dma_mapping_error(&bp
->pdev
->dev
, mapping
))
1480 /* Save info to properly release resources */
1482 tx_skb
->mapping
= mapping
;
1483 tx_skb
->size
= size
;
1484 tx_skb
->mapped_as_page
= true;
1493 /* Should never happen */
1494 if (unlikely(!tx_skb
)) {
1495 netdev_err(bp
->dev
, "BUG! empty skb!\n");
1499 /* This is the last buffer of the frame: save socket buffer */
1502 /* Update TX ring: update buffer descriptors in reverse order
1503 * to avoid race condition
1506 /* Set 'TX_USED' bit in buffer descriptor at tx_head position
1507 * to set the end of TX queue
1510 entry
= macb_tx_ring_wrap(bp
, i
);
1511 ctrl
= MACB_BIT(TX_USED
);
1512 desc
= macb_tx_desc(queue
, entry
);
1516 if (lso_ctrl
== MACB_LSO_UFO_ENABLE
)
1517 /* include header and FCS in value given to h/w */
1518 mss_mfs
= skb_shinfo(skb
)->gso_size
+
1519 skb_transport_offset(skb
) +
1522 mss_mfs
= skb_shinfo(skb
)->gso_size
;
1523 /* TCP Sequence Number Source Select
1524 * can be set only for TSO
1532 entry
= macb_tx_ring_wrap(bp
, i
);
1533 tx_skb
= &queue
->tx_skb
[entry
];
1534 desc
= macb_tx_desc(queue
, entry
);
1536 ctrl
= (u32
)tx_skb
->size
;
1538 ctrl
|= MACB_BIT(TX_LAST
);
1541 if (unlikely(entry
== (bp
->tx_ring_size
- 1)))
1542 ctrl
|= MACB_BIT(TX_WRAP
);
1544 /* First descriptor is header descriptor */
1545 if (i
== queue
->tx_head
) {
1546 ctrl
|= MACB_BF(TX_LSO
, lso_ctrl
);
1547 ctrl
|= MACB_BF(TX_TCP_SEQ_SRC
, seq_ctrl
);
1549 /* Only set MSS/MFS on payload descriptors
1550 * (second or later descriptor)
1552 ctrl
|= MACB_BF(MSS_MFS
, mss_mfs
);
1554 /* Set TX buffer descriptor */
1555 macb_set_addr(bp
, desc
, tx_skb
->mapping
);
1556 /* desc->addr must be visible to hardware before clearing
1557 * 'TX_USED' bit in desc->ctrl.
1561 } while (i
!= queue
->tx_head
);
1563 queue
->tx_head
= tx_head
;
1568 netdev_err(bp
->dev
, "TX DMA map failed\n");
1570 for (i
= queue
->tx_head
; i
!= tx_head
; i
++) {
1571 tx_skb
= macb_tx_skb(queue
, i
);
1573 macb_tx_unmap(bp
, tx_skb
);
1579 static netdev_features_t
macb_features_check(struct sk_buff
*skb
,
1580 struct net_device
*dev
,
1581 netdev_features_t features
)
1583 unsigned int nr_frags
, f
;
1584 unsigned int hdrlen
;
1586 /* Validate LSO compatibility */
1588 /* there is only one buffer */
1589 if (!skb_is_nonlinear(skb
))
1592 /* length of header */
1593 hdrlen
= skb_transport_offset(skb
);
1594 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
1595 hdrlen
+= tcp_hdrlen(skb
);
1598 * When software supplies two or more payload buffers all payload buffers
1599 * apart from the last must be a multiple of 8 bytes in size.
1601 if (!IS_ALIGNED(skb_headlen(skb
) - hdrlen
, MACB_TX_LEN_ALIGN
))
1602 return features
& ~MACB_NETIF_LSO
;
1604 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1605 /* No need to check last fragment */
1607 for (f
= 0; f
< nr_frags
; f
++) {
1608 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[f
];
1610 if (!IS_ALIGNED(skb_frag_size(frag
), MACB_TX_LEN_ALIGN
))
1611 return features
& ~MACB_NETIF_LSO
;
1616 static inline int macb_clear_csum(struct sk_buff
*skb
)
1618 /* no change for packets without checksum offloading */
1619 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1622 /* make sure we can modify the header */
1623 if (unlikely(skb_cow_head(skb
, 0)))
1626 /* initialize checksum field
1627 * This is required - at least for Zynq, which otherwise calculates
1628 * wrong UDP header checksums for UDP packets with UDP data len <=2
1630 *(__sum16
*)(skb_checksum_start(skb
) + skb
->csum_offset
) = 0;
1634 static int macb_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1636 u16 queue_index
= skb_get_queue_mapping(skb
);
1637 struct macb
*bp
= netdev_priv(dev
);
1638 struct macb_queue
*queue
= &bp
->queues
[queue_index
];
1639 unsigned long flags
;
1640 unsigned int desc_cnt
, nr_frags
, frag_size
, f
;
1641 unsigned int hdrlen
;
1642 bool is_lso
, is_udp
= 0;
1644 is_lso
= (skb_shinfo(skb
)->gso_size
!= 0);
1647 is_udp
= !!(ip_hdr(skb
)->protocol
== IPPROTO_UDP
);
1649 /* length of headers */
1651 /* only queue eth + ip headers separately for UDP */
1652 hdrlen
= skb_transport_offset(skb
);
1654 hdrlen
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
1655 if (skb_headlen(skb
) < hdrlen
) {
1656 netdev_err(bp
->dev
, "Error - LSO headers fragmented!!!\n");
1657 /* if this is required, would need to copy to single buffer */
1658 return NETDEV_TX_BUSY
;
1661 hdrlen
= min(skb_headlen(skb
), bp
->max_tx_length
);
1663 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
1664 netdev_vdbg(bp
->dev
,
1665 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1666 queue_index
, skb
->len
, skb
->head
, skb
->data
,
1667 skb_tail_pointer(skb
), skb_end_pointer(skb
));
1668 print_hex_dump(KERN_DEBUG
, "data: ", DUMP_PREFIX_OFFSET
, 16, 1,
1669 skb
->data
, 16, true);
1672 /* Count how many TX buffer descriptors are needed to send this
1673 * socket buffer: skb fragments of jumbo frames may need to be
1674 * split into many buffer descriptors.
1676 if (is_lso
&& (skb_headlen(skb
) > hdrlen
))
1677 /* extra header descriptor if also payload in first buffer */
1678 desc_cnt
= DIV_ROUND_UP((skb_headlen(skb
) - hdrlen
), bp
->max_tx_length
) + 1;
1680 desc_cnt
= DIV_ROUND_UP(skb_headlen(skb
), bp
->max_tx_length
);
1681 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1682 for (f
= 0; f
< nr_frags
; f
++) {
1683 frag_size
= skb_frag_size(&skb_shinfo(skb
)->frags
[f
]);
1684 desc_cnt
+= DIV_ROUND_UP(frag_size
, bp
->max_tx_length
);
1687 spin_lock_irqsave(&bp
->lock
, flags
);
1689 /* This is a hard error, log it. */
1690 if (CIRC_SPACE(queue
->tx_head
, queue
->tx_tail
,
1691 bp
->tx_ring_size
) < desc_cnt
) {
1692 netif_stop_subqueue(dev
, queue_index
);
1693 spin_unlock_irqrestore(&bp
->lock
, flags
);
1694 netdev_dbg(bp
->dev
, "tx_head = %u, tx_tail = %u\n",
1695 queue
->tx_head
, queue
->tx_tail
);
1696 return NETDEV_TX_BUSY
;
1699 if (macb_clear_csum(skb
)) {
1700 dev_kfree_skb_any(skb
);
1704 /* Map socket buffer for DMA transfer */
1705 if (!macb_tx_map(bp
, queue
, skb
, hdrlen
)) {
1706 dev_kfree_skb_any(skb
);
1710 /* Make newly initialized descriptor visible to hardware */
1712 skb_tx_timestamp(skb
);
1714 macb_writel(bp
, NCR
, macb_readl(bp
, NCR
) | MACB_BIT(TSTART
));
1716 if (CIRC_SPACE(queue
->tx_head
, queue
->tx_tail
, bp
->tx_ring_size
) < 1)
1717 netif_stop_subqueue(dev
, queue_index
);
1720 spin_unlock_irqrestore(&bp
->lock
, flags
);
1722 return NETDEV_TX_OK
;
1725 static void macb_init_rx_buffer_size(struct macb
*bp
, size_t size
)
1727 if (!macb_is_gem(bp
)) {
1728 bp
->rx_buffer_size
= MACB_RX_BUFFER_SIZE
;
1730 bp
->rx_buffer_size
= size
;
1732 if (bp
->rx_buffer_size
% RX_BUFFER_MULTIPLE
) {
1734 "RX buffer must be multiple of %d bytes, expanding\n",
1735 RX_BUFFER_MULTIPLE
);
1736 bp
->rx_buffer_size
=
1737 roundup(bp
->rx_buffer_size
, RX_BUFFER_MULTIPLE
);
1741 netdev_dbg(bp
->dev
, "mtu [%u] rx_buffer_size [%zu]\n",
1742 bp
->dev
->mtu
, bp
->rx_buffer_size
);
1745 static void gem_free_rx_buffers(struct macb
*bp
)
1747 struct sk_buff
*skb
;
1748 struct macb_dma_desc
*desc
;
1755 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
1756 skb
= bp
->rx_skbuff
[i
];
1761 desc
= macb_rx_desc(bp
, i
);
1762 addr
= macb_get_addr(bp
, desc
);
1764 dma_unmap_single(&bp
->pdev
->dev
, addr
, bp
->rx_buffer_size
,
1766 dev_kfree_skb_any(skb
);
1770 kfree(bp
->rx_skbuff
);
1771 bp
->rx_skbuff
= NULL
;
1774 static void macb_free_rx_buffers(struct macb
*bp
)
1776 if (bp
->rx_buffers
) {
1777 dma_free_coherent(&bp
->pdev
->dev
,
1778 bp
->rx_ring_size
* bp
->rx_buffer_size
,
1779 bp
->rx_buffers
, bp
->rx_buffers_dma
);
1780 bp
->rx_buffers
= NULL
;
1784 static void macb_free_consistent(struct macb
*bp
)
1786 struct macb_queue
*queue
;
1789 bp
->macbgem_ops
.mog_free_rx_buffers(bp
);
1791 dma_free_coherent(&bp
->pdev
->dev
, RX_RING_BYTES(bp
),
1792 bp
->rx_ring
, bp
->rx_ring_dma
);
1796 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
1797 kfree(queue
->tx_skb
);
1798 queue
->tx_skb
= NULL
;
1799 if (queue
->tx_ring
) {
1800 dma_free_coherent(&bp
->pdev
->dev
, TX_RING_BYTES(bp
),
1801 queue
->tx_ring
, queue
->tx_ring_dma
);
1802 queue
->tx_ring
= NULL
;
1807 static int gem_alloc_rx_buffers(struct macb
*bp
)
1811 size
= bp
->rx_ring_size
* sizeof(struct sk_buff
*);
1812 bp
->rx_skbuff
= kzalloc(size
, GFP_KERNEL
);
1817 "Allocated %d RX struct sk_buff entries at %p\n",
1818 bp
->rx_ring_size
, bp
->rx_skbuff
);
1822 static int macb_alloc_rx_buffers(struct macb
*bp
)
1826 size
= bp
->rx_ring_size
* bp
->rx_buffer_size
;
1827 bp
->rx_buffers
= dma_alloc_coherent(&bp
->pdev
->dev
, size
,
1828 &bp
->rx_buffers_dma
, GFP_KERNEL
);
1829 if (!bp
->rx_buffers
)
1833 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
1834 size
, (unsigned long)bp
->rx_buffers_dma
, bp
->rx_buffers
);
1838 static int macb_alloc_consistent(struct macb
*bp
)
1840 struct macb_queue
*queue
;
1844 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
1845 size
= TX_RING_BYTES(bp
);
1846 queue
->tx_ring
= dma_alloc_coherent(&bp
->pdev
->dev
, size
,
1847 &queue
->tx_ring_dma
,
1849 if (!queue
->tx_ring
)
1852 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
1853 q
, size
, (unsigned long)queue
->tx_ring_dma
,
1856 size
= bp
->tx_ring_size
* sizeof(struct macb_tx_skb
);
1857 queue
->tx_skb
= kmalloc(size
, GFP_KERNEL
);
1862 size
= RX_RING_BYTES(bp
);
1863 bp
->rx_ring
= dma_alloc_coherent(&bp
->pdev
->dev
, size
,
1864 &bp
->rx_ring_dma
, GFP_KERNEL
);
1868 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
1869 size
, (unsigned long)bp
->rx_ring_dma
, bp
->rx_ring
);
1871 if (bp
->macbgem_ops
.mog_alloc_rx_buffers(bp
))
1877 macb_free_consistent(bp
);
1881 static void gem_init_rings(struct macb
*bp
)
1883 struct macb_queue
*queue
;
1884 struct macb_dma_desc
*desc
= NULL
;
1888 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
1889 for (i
= 0; i
< bp
->tx_ring_size
; i
++) {
1890 desc
= macb_tx_desc(queue
, i
);
1891 macb_set_addr(bp
, desc
, 0);
1892 desc
->ctrl
= MACB_BIT(TX_USED
);
1894 desc
->ctrl
|= MACB_BIT(TX_WRAP
);
1900 bp
->rx_prepared_head
= 0;
1905 static void macb_init_rings(struct macb
*bp
)
1908 struct macb_dma_desc
*desc
= NULL
;
1910 macb_init_rx_ring(bp
);
1912 for (i
= 0; i
< bp
->tx_ring_size
; i
++) {
1913 desc
= macb_tx_desc(&bp
->queues
[0], i
);
1914 macb_set_addr(bp
, desc
, 0);
1915 desc
->ctrl
= MACB_BIT(TX_USED
);
1917 bp
->queues
[0].tx_head
= 0;
1918 bp
->queues
[0].tx_tail
= 0;
1919 desc
->ctrl
|= MACB_BIT(TX_WRAP
);
1922 static void macb_reset_hw(struct macb
*bp
)
1924 struct macb_queue
*queue
;
1926 u32 ctrl
= macb_readl(bp
, NCR
);
1928 /* Disable RX and TX (XXX: Should we halt the transmission
1931 ctrl
&= ~(MACB_BIT(RE
) | MACB_BIT(TE
));
1933 /* Clear the stats registers (XXX: Update stats first?) */
1934 ctrl
|= MACB_BIT(CLRSTAT
);
1936 macb_writel(bp
, NCR
, ctrl
);
1938 /* Clear all status flags */
1939 macb_writel(bp
, TSR
, -1);
1940 macb_writel(bp
, RSR
, -1);
1942 /* Disable all interrupts */
1943 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
1944 queue_writel(queue
, IDR
, -1);
1945 queue_readl(queue
, ISR
);
1946 if (bp
->caps
& MACB_CAPS_ISR_CLEAR_ON_WRITE
)
1947 queue_writel(queue
, ISR
, -1);
1951 static u32
gem_mdc_clk_div(struct macb
*bp
)
1954 unsigned long pclk_hz
= clk_get_rate(bp
->pclk
);
1956 if (pclk_hz
<= 20000000)
1957 config
= GEM_BF(CLK
, GEM_CLK_DIV8
);
1958 else if (pclk_hz
<= 40000000)
1959 config
= GEM_BF(CLK
, GEM_CLK_DIV16
);
1960 else if (pclk_hz
<= 80000000)
1961 config
= GEM_BF(CLK
, GEM_CLK_DIV32
);
1962 else if (pclk_hz
<= 120000000)
1963 config
= GEM_BF(CLK
, GEM_CLK_DIV48
);
1964 else if (pclk_hz
<= 160000000)
1965 config
= GEM_BF(CLK
, GEM_CLK_DIV64
);
1967 config
= GEM_BF(CLK
, GEM_CLK_DIV96
);
1972 static u32
macb_mdc_clk_div(struct macb
*bp
)
1975 unsigned long pclk_hz
;
1977 if (macb_is_gem(bp
))
1978 return gem_mdc_clk_div(bp
);
1980 pclk_hz
= clk_get_rate(bp
->pclk
);
1981 if (pclk_hz
<= 20000000)
1982 config
= MACB_BF(CLK
, MACB_CLK_DIV8
);
1983 else if (pclk_hz
<= 40000000)
1984 config
= MACB_BF(CLK
, MACB_CLK_DIV16
);
1985 else if (pclk_hz
<= 80000000)
1986 config
= MACB_BF(CLK
, MACB_CLK_DIV32
);
1988 config
= MACB_BF(CLK
, MACB_CLK_DIV64
);
1993 /* Get the DMA bus width field of the network configuration register that we
1994 * should program. We find the width from decoding the design configuration
1995 * register to find the maximum supported data bus width.
1997 static u32
macb_dbw(struct macb
*bp
)
1999 if (!macb_is_gem(bp
))
2002 switch (GEM_BFEXT(DBWDEF
, gem_readl(bp
, DCFG1
))) {
2004 return GEM_BF(DBW
, GEM_DBW128
);
2006 return GEM_BF(DBW
, GEM_DBW64
);
2009 return GEM_BF(DBW
, GEM_DBW32
);
2013 /* Configure the receive DMA engine
2014 * - use the correct receive buffer size
2015 * - set best burst length for DMA operations
2016 * (if not supported by FIFO, it will fallback to default)
2017 * - set both rx/tx packet buffers to full memory size
2018 * These are configurable parameters for GEM.
2020 static void macb_configure_dma(struct macb
*bp
)
2024 if (macb_is_gem(bp
)) {
2025 dmacfg
= gem_readl(bp
, DMACFG
) & ~GEM_BF(RXBS
, -1L);
2026 dmacfg
|= GEM_BF(RXBS
, bp
->rx_buffer_size
/ RX_BUFFER_MULTIPLE
);
2027 if (bp
->dma_burst_length
)
2028 dmacfg
= GEM_BFINS(FBLDO
, bp
->dma_burst_length
, dmacfg
);
2029 dmacfg
|= GEM_BIT(TXPBMS
) | GEM_BF(RXBMS
, -1L);
2030 dmacfg
&= ~GEM_BIT(ENDIA_PKT
);
2033 dmacfg
&= ~GEM_BIT(ENDIA_DESC
);
2035 dmacfg
|= GEM_BIT(ENDIA_DESC
); /* CPU in big endian */
2037 if (bp
->dev
->features
& NETIF_F_HW_CSUM
)
2038 dmacfg
|= GEM_BIT(TXCOEN
);
2040 dmacfg
&= ~GEM_BIT(TXCOEN
);
2042 dmacfg
&= ~GEM_BIT(ADDR64
);
2043 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2044 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
)
2045 dmacfg
|= GEM_BIT(ADDR64
);
2047 #ifdef CONFIG_MACB_USE_HWSTAMP
2048 if (bp
->hw_dma_cap
& HW_DMA_CAP_PTP
)
2049 dmacfg
|= GEM_BIT(RXEXT
) | GEM_BIT(TXEXT
);
2051 netdev_dbg(bp
->dev
, "Cadence configure DMA with 0x%08x\n",
2053 gem_writel(bp
, DMACFG
, dmacfg
);
2057 static void macb_init_hw(struct macb
*bp
)
2059 struct macb_queue
*queue
;
2065 macb_set_hwaddr(bp
);
2067 config
= macb_mdc_clk_div(bp
);
2068 if (bp
->phy_interface
== PHY_INTERFACE_MODE_SGMII
)
2069 config
|= GEM_BIT(SGMIIEN
) | GEM_BIT(PCSSEL
);
2070 config
|= MACB_BF(RBOF
, NET_IP_ALIGN
); /* Make eth data aligned */
2071 config
|= MACB_BIT(PAE
); /* PAuse Enable */
2072 config
|= MACB_BIT(DRFCS
); /* Discard Rx FCS */
2073 if (bp
->caps
& MACB_CAPS_JUMBO
)
2074 config
|= MACB_BIT(JFRAME
); /* Enable jumbo frames */
2076 config
|= MACB_BIT(BIG
); /* Receive oversized frames */
2077 if (bp
->dev
->flags
& IFF_PROMISC
)
2078 config
|= MACB_BIT(CAF
); /* Copy All Frames */
2079 else if (macb_is_gem(bp
) && bp
->dev
->features
& NETIF_F_RXCSUM
)
2080 config
|= GEM_BIT(RXCOEN
);
2081 if (!(bp
->dev
->flags
& IFF_BROADCAST
))
2082 config
|= MACB_BIT(NBC
); /* No BroadCast */
2083 config
|= macb_dbw(bp
);
2084 macb_writel(bp
, NCFGR
, config
);
2085 if ((bp
->caps
& MACB_CAPS_JUMBO
) && bp
->jumbo_max_len
)
2086 gem_writel(bp
, JML
, bp
->jumbo_max_len
);
2087 bp
->speed
= SPEED_10
;
2088 bp
->duplex
= DUPLEX_HALF
;
2089 bp
->rx_frm_len_mask
= MACB_RX_FRMLEN_MASK
;
2090 if (bp
->caps
& MACB_CAPS_JUMBO
)
2091 bp
->rx_frm_len_mask
= MACB_RX_JFRMLEN_MASK
;
2093 macb_configure_dma(bp
);
2095 /* Initialize TX and RX buffers */
2096 macb_writel(bp
, RBQP
, lower_32_bits(bp
->rx_ring_dma
));
2097 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2098 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
)
2099 macb_writel(bp
, RBQPH
, upper_32_bits(bp
->rx_ring_dma
));
2101 for (q
= 0, queue
= bp
->queues
; q
< bp
->num_queues
; ++q
, ++queue
) {
2102 queue_writel(queue
, TBQP
, lower_32_bits(queue
->tx_ring_dma
));
2103 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2104 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
)
2105 queue_writel(queue
, TBQPH
, upper_32_bits(queue
->tx_ring_dma
));
2108 /* Enable interrupts */
2109 queue_writel(queue
, IER
,
2115 /* Enable TX and RX */
2116 macb_writel(bp
, NCR
, macb_readl(bp
, NCR
) | MACB_BIT(RE
) | MACB_BIT(TE
));
2119 /* The hash address register is 64 bits long and takes up two
2120 * locations in the memory map. The least significant bits are stored
2121 * in EMAC_HSL and the most significant bits in EMAC_HSH.
2123 * The unicast hash enable and the multicast hash enable bits in the
2124 * network configuration register enable the reception of hash matched
2125 * frames. The destination address is reduced to a 6 bit index into
2126 * the 64 bit hash register using the following hash function. The
2127 * hash function is an exclusive or of every sixth bit of the
2128 * destination address.
2130 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
2131 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
2132 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
2133 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
2134 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
2135 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
2137 * da[0] represents the least significant bit of the first byte
2138 * received, that is, the multicast/unicast indicator, and da[47]
2139 * represents the most significant bit of the last byte received. If
2140 * the hash index, hi[n], points to a bit that is set in the hash
2141 * register then the frame will be matched according to whether the
2142 * frame is multicast or unicast. A multicast match will be signalled
2143 * if the multicast hash enable bit is set, da[0] is 1 and the hash
2144 * index points to a bit set in the hash register. A unicast match
2145 * will be signalled if the unicast hash enable bit is set, da[0] is 0
2146 * and the hash index points to a bit set in the hash register. To
2147 * receive all multicast frames, the hash register should be set with
2148 * all ones and the multicast hash enable bit should be set in the
2149 * network configuration register.
2152 static inline int hash_bit_value(int bitnr
, __u8
*addr
)
2154 if (addr
[bitnr
/ 8] & (1 << (bitnr
% 8)))
2159 /* Return the hash index value for the specified address. */
2160 static int hash_get_index(__u8
*addr
)
2165 for (j
= 0; j
< 6; j
++) {
2166 for (i
= 0, bitval
= 0; i
< 8; i
++)
2167 bitval
^= hash_bit_value(i
* 6 + j
, addr
);
2169 hash_index
|= (bitval
<< j
);
2175 /* Add multicast addresses to the internal multicast-hash table. */
2176 static void macb_sethashtable(struct net_device
*dev
)
2178 struct netdev_hw_addr
*ha
;
2179 unsigned long mc_filter
[2];
2181 struct macb
*bp
= netdev_priv(dev
);
2186 netdev_for_each_mc_addr(ha
, dev
) {
2187 bitnr
= hash_get_index(ha
->addr
);
2188 mc_filter
[bitnr
>> 5] |= 1 << (bitnr
& 31);
2191 macb_or_gem_writel(bp
, HRB
, mc_filter
[0]);
2192 macb_or_gem_writel(bp
, HRT
, mc_filter
[1]);
2195 /* Enable/Disable promiscuous and multicast modes. */
2196 static void macb_set_rx_mode(struct net_device
*dev
)
2199 struct macb
*bp
= netdev_priv(dev
);
2201 cfg
= macb_readl(bp
, NCFGR
);
2203 if (dev
->flags
& IFF_PROMISC
) {
2204 /* Enable promiscuous mode */
2205 cfg
|= MACB_BIT(CAF
);
2207 /* Disable RX checksum offload */
2208 if (macb_is_gem(bp
))
2209 cfg
&= ~GEM_BIT(RXCOEN
);
2211 /* Disable promiscuous mode */
2212 cfg
&= ~MACB_BIT(CAF
);
2214 /* Enable RX checksum offload only if requested */
2215 if (macb_is_gem(bp
) && dev
->features
& NETIF_F_RXCSUM
)
2216 cfg
|= GEM_BIT(RXCOEN
);
2219 if (dev
->flags
& IFF_ALLMULTI
) {
2220 /* Enable all multicast mode */
2221 macb_or_gem_writel(bp
, HRB
, -1);
2222 macb_or_gem_writel(bp
, HRT
, -1);
2223 cfg
|= MACB_BIT(NCFGR_MTI
);
2224 } else if (!netdev_mc_empty(dev
)) {
2225 /* Enable specific multicasts */
2226 macb_sethashtable(dev
);
2227 cfg
|= MACB_BIT(NCFGR_MTI
);
2228 } else if (dev
->flags
& (~IFF_ALLMULTI
)) {
2229 /* Disable all multicast mode */
2230 macb_or_gem_writel(bp
, HRB
, 0);
2231 macb_or_gem_writel(bp
, HRT
, 0);
2232 cfg
&= ~MACB_BIT(NCFGR_MTI
);
2235 macb_writel(bp
, NCFGR
, cfg
);
2238 static int macb_open(struct net_device
*dev
)
2240 struct macb
*bp
= netdev_priv(dev
);
2241 size_t bufsz
= dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ NET_IP_ALIGN
;
2244 netdev_dbg(bp
->dev
, "open\n");
2246 /* carrier starts down */
2247 netif_carrier_off(dev
);
2249 /* if the phy is not yet register, retry later*/
2253 /* RX buffers initialization */
2254 macb_init_rx_buffer_size(bp
, bufsz
);
2256 err
= macb_alloc_consistent(bp
);
2258 netdev_err(dev
, "Unable to allocate DMA memory (error %d)\n",
2263 napi_enable(&bp
->napi
);
2265 bp
->macbgem_ops
.mog_init_rings(bp
);
2268 /* schedule a link state check */
2269 phy_start(dev
->phydev
);
2271 netif_tx_start_all_queues(dev
);
2274 bp
->ptp_info
->ptp_init(dev
);
2279 static int macb_close(struct net_device
*dev
)
2281 struct macb
*bp
= netdev_priv(dev
);
2282 unsigned long flags
;
2284 netif_tx_stop_all_queues(dev
);
2285 napi_disable(&bp
->napi
);
2288 phy_stop(dev
->phydev
);
2290 spin_lock_irqsave(&bp
->lock
, flags
);
2292 netif_carrier_off(dev
);
2293 spin_unlock_irqrestore(&bp
->lock
, flags
);
2295 macb_free_consistent(bp
);
2298 bp
->ptp_info
->ptp_remove(dev
);
2303 static int macb_change_mtu(struct net_device
*dev
, int new_mtu
)
2305 if (netif_running(dev
))
2313 static void gem_update_stats(struct macb
*bp
)
2316 u32
*p
= &bp
->hw_stats
.gem
.tx_octets_31_0
;
2318 for (i
= 0; i
< GEM_STATS_LEN
; ++i
, ++p
) {
2319 u32 offset
= gem_statistics
[i
].offset
;
2320 u64 val
= bp
->macb_reg_readl(bp
, offset
);
2322 bp
->ethtool_stats
[i
] += val
;
2325 if (offset
== GEM_OCTTXL
|| offset
== GEM_OCTRXL
) {
2326 /* Add GEM_OCTTXH, GEM_OCTRXH */
2327 val
= bp
->macb_reg_readl(bp
, offset
+ 4);
2328 bp
->ethtool_stats
[i
] += ((u64
)val
) << 32;
2334 static struct net_device_stats
*gem_get_stats(struct macb
*bp
)
2336 struct gem_stats
*hwstat
= &bp
->hw_stats
.gem
;
2337 struct net_device_stats
*nstat
= &bp
->dev
->stats
;
2339 gem_update_stats(bp
);
2341 nstat
->rx_errors
= (hwstat
->rx_frame_check_sequence_errors
+
2342 hwstat
->rx_alignment_errors
+
2343 hwstat
->rx_resource_errors
+
2344 hwstat
->rx_overruns
+
2345 hwstat
->rx_oversize_frames
+
2346 hwstat
->rx_jabbers
+
2347 hwstat
->rx_undersized_frames
+
2348 hwstat
->rx_length_field_frame_errors
);
2349 nstat
->tx_errors
= (hwstat
->tx_late_collisions
+
2350 hwstat
->tx_excessive_collisions
+
2351 hwstat
->tx_underrun
+
2352 hwstat
->tx_carrier_sense_errors
);
2353 nstat
->multicast
= hwstat
->rx_multicast_frames
;
2354 nstat
->collisions
= (hwstat
->tx_single_collision_frames
+
2355 hwstat
->tx_multiple_collision_frames
+
2356 hwstat
->tx_excessive_collisions
);
2357 nstat
->rx_length_errors
= (hwstat
->rx_oversize_frames
+
2358 hwstat
->rx_jabbers
+
2359 hwstat
->rx_undersized_frames
+
2360 hwstat
->rx_length_field_frame_errors
);
2361 nstat
->rx_over_errors
= hwstat
->rx_resource_errors
;
2362 nstat
->rx_crc_errors
= hwstat
->rx_frame_check_sequence_errors
;
2363 nstat
->rx_frame_errors
= hwstat
->rx_alignment_errors
;
2364 nstat
->rx_fifo_errors
= hwstat
->rx_overruns
;
2365 nstat
->tx_aborted_errors
= hwstat
->tx_excessive_collisions
;
2366 nstat
->tx_carrier_errors
= hwstat
->tx_carrier_sense_errors
;
2367 nstat
->tx_fifo_errors
= hwstat
->tx_underrun
;
2372 static void gem_get_ethtool_stats(struct net_device
*dev
,
2373 struct ethtool_stats
*stats
, u64
*data
)
2377 bp
= netdev_priv(dev
);
2378 gem_update_stats(bp
);
2379 memcpy(data
, &bp
->ethtool_stats
, sizeof(u64
) * GEM_STATS_LEN
);
2382 static int gem_get_sset_count(struct net_device
*dev
, int sset
)
2386 return GEM_STATS_LEN
;
2392 static void gem_get_ethtool_strings(struct net_device
*dev
, u32 sset
, u8
*p
)
2398 for (i
= 0; i
< GEM_STATS_LEN
; i
++, p
+= ETH_GSTRING_LEN
)
2399 memcpy(p
, gem_statistics
[i
].stat_string
,
2405 static struct net_device_stats
*macb_get_stats(struct net_device
*dev
)
2407 struct macb
*bp
= netdev_priv(dev
);
2408 struct net_device_stats
*nstat
= &bp
->dev
->stats
;
2409 struct macb_stats
*hwstat
= &bp
->hw_stats
.macb
;
2411 if (macb_is_gem(bp
))
2412 return gem_get_stats(bp
);
2414 /* read stats from hardware */
2415 macb_update_stats(bp
);
2417 /* Convert HW stats into netdevice stats */
2418 nstat
->rx_errors
= (hwstat
->rx_fcs_errors
+
2419 hwstat
->rx_align_errors
+
2420 hwstat
->rx_resource_errors
+
2421 hwstat
->rx_overruns
+
2422 hwstat
->rx_oversize_pkts
+
2423 hwstat
->rx_jabbers
+
2424 hwstat
->rx_undersize_pkts
+
2425 hwstat
->rx_length_mismatch
);
2426 nstat
->tx_errors
= (hwstat
->tx_late_cols
+
2427 hwstat
->tx_excessive_cols
+
2428 hwstat
->tx_underruns
+
2429 hwstat
->tx_carrier_errors
+
2430 hwstat
->sqe_test_errors
);
2431 nstat
->collisions
= (hwstat
->tx_single_cols
+
2432 hwstat
->tx_multiple_cols
+
2433 hwstat
->tx_excessive_cols
);
2434 nstat
->rx_length_errors
= (hwstat
->rx_oversize_pkts
+
2435 hwstat
->rx_jabbers
+
2436 hwstat
->rx_undersize_pkts
+
2437 hwstat
->rx_length_mismatch
);
2438 nstat
->rx_over_errors
= hwstat
->rx_resource_errors
+
2439 hwstat
->rx_overruns
;
2440 nstat
->rx_crc_errors
= hwstat
->rx_fcs_errors
;
2441 nstat
->rx_frame_errors
= hwstat
->rx_align_errors
;
2442 nstat
->rx_fifo_errors
= hwstat
->rx_overruns
;
2443 /* XXX: What does "missed" mean? */
2444 nstat
->tx_aborted_errors
= hwstat
->tx_excessive_cols
;
2445 nstat
->tx_carrier_errors
= hwstat
->tx_carrier_errors
;
2446 nstat
->tx_fifo_errors
= hwstat
->tx_underruns
;
2447 /* Don't know about heartbeat or window errors... */
2452 static int macb_get_regs_len(struct net_device
*netdev
)
2454 return MACB_GREGS_NBR
* sizeof(u32
);
2457 static void macb_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
2460 struct macb
*bp
= netdev_priv(dev
);
2461 unsigned int tail
, head
;
2464 regs
->version
= (macb_readl(bp
, MID
) & ((1 << MACB_REV_SIZE
) - 1))
2465 | MACB_GREGS_VERSION
;
2467 tail
= macb_tx_ring_wrap(bp
, bp
->queues
[0].tx_tail
);
2468 head
= macb_tx_ring_wrap(bp
, bp
->queues
[0].tx_head
);
2470 regs_buff
[0] = macb_readl(bp
, NCR
);
2471 regs_buff
[1] = macb_or_gem_readl(bp
, NCFGR
);
2472 regs_buff
[2] = macb_readl(bp
, NSR
);
2473 regs_buff
[3] = macb_readl(bp
, TSR
);
2474 regs_buff
[4] = macb_readl(bp
, RBQP
);
2475 regs_buff
[5] = macb_readl(bp
, TBQP
);
2476 regs_buff
[6] = macb_readl(bp
, RSR
);
2477 regs_buff
[7] = macb_readl(bp
, IMR
);
2479 regs_buff
[8] = tail
;
2480 regs_buff
[9] = head
;
2481 regs_buff
[10] = macb_tx_dma(&bp
->queues
[0], tail
);
2482 regs_buff
[11] = macb_tx_dma(&bp
->queues
[0], head
);
2484 if (!(bp
->caps
& MACB_CAPS_USRIO_DISABLED
))
2485 regs_buff
[12] = macb_or_gem_readl(bp
, USRIO
);
2486 if (macb_is_gem(bp
))
2487 regs_buff
[13] = gem_readl(bp
, DMACFG
);
2490 static void macb_get_wol(struct net_device
*netdev
, struct ethtool_wolinfo
*wol
)
2492 struct macb
*bp
= netdev_priv(netdev
);
2497 if (bp
->wol
& MACB_WOL_HAS_MAGIC_PACKET
) {
2498 wol
->supported
= WAKE_MAGIC
;
2500 if (bp
->wol
& MACB_WOL_ENABLED
)
2501 wol
->wolopts
|= WAKE_MAGIC
;
2505 static int macb_set_wol(struct net_device
*netdev
, struct ethtool_wolinfo
*wol
)
2507 struct macb
*bp
= netdev_priv(netdev
);
2509 if (!(bp
->wol
& MACB_WOL_HAS_MAGIC_PACKET
) ||
2510 (wol
->wolopts
& ~WAKE_MAGIC
))
2513 if (wol
->wolopts
& WAKE_MAGIC
)
2514 bp
->wol
|= MACB_WOL_ENABLED
;
2516 bp
->wol
&= ~MACB_WOL_ENABLED
;
2518 device_set_wakeup_enable(&bp
->pdev
->dev
, bp
->wol
& MACB_WOL_ENABLED
);
2523 static void macb_get_ringparam(struct net_device
*netdev
,
2524 struct ethtool_ringparam
*ring
)
2526 struct macb
*bp
= netdev_priv(netdev
);
2528 ring
->rx_max_pending
= MAX_RX_RING_SIZE
;
2529 ring
->tx_max_pending
= MAX_TX_RING_SIZE
;
2531 ring
->rx_pending
= bp
->rx_ring_size
;
2532 ring
->tx_pending
= bp
->tx_ring_size
;
2535 static int macb_set_ringparam(struct net_device
*netdev
,
2536 struct ethtool_ringparam
*ring
)
2538 struct macb
*bp
= netdev_priv(netdev
);
2539 u32 new_rx_size
, new_tx_size
;
2540 unsigned int reset
= 0;
2542 if ((ring
->rx_mini_pending
) || (ring
->rx_jumbo_pending
))
2545 new_rx_size
= clamp_t(u32
, ring
->rx_pending
,
2546 MIN_RX_RING_SIZE
, MAX_RX_RING_SIZE
);
2547 new_rx_size
= roundup_pow_of_two(new_rx_size
);
2549 new_tx_size
= clamp_t(u32
, ring
->tx_pending
,
2550 MIN_TX_RING_SIZE
, MAX_TX_RING_SIZE
);
2551 new_tx_size
= roundup_pow_of_two(new_tx_size
);
2553 if ((new_tx_size
== bp
->tx_ring_size
) &&
2554 (new_rx_size
== bp
->rx_ring_size
)) {
2559 if (netif_running(bp
->dev
)) {
2561 macb_close(bp
->dev
);
2564 bp
->rx_ring_size
= new_rx_size
;
2565 bp
->tx_ring_size
= new_tx_size
;
2573 #ifdef CONFIG_MACB_USE_HWSTAMP
2574 static unsigned int gem_get_tsu_rate(struct macb
*bp
)
2576 struct clk
*tsu_clk
;
2577 unsigned int tsu_rate
;
2579 tsu_clk
= devm_clk_get(&bp
->pdev
->dev
, "tsu_clk");
2580 if (!IS_ERR(tsu_clk
))
2581 tsu_rate
= clk_get_rate(tsu_clk
);
2582 /* try pclk instead */
2583 else if (!IS_ERR(bp
->pclk
)) {
2585 tsu_rate
= clk_get_rate(tsu_clk
);
2591 static s32
gem_get_ptp_max_adj(void)
2596 static int gem_get_ts_info(struct net_device
*dev
,
2597 struct ethtool_ts_info
*info
)
2599 struct macb
*bp
= netdev_priv(dev
);
2601 if ((bp
->hw_dma_cap
& HW_DMA_CAP_PTP
) == 0) {
2602 ethtool_op_get_ts_info(dev
, info
);
2606 info
->so_timestamping
=
2607 SOF_TIMESTAMPING_TX_SOFTWARE
|
2608 SOF_TIMESTAMPING_RX_SOFTWARE
|
2609 SOF_TIMESTAMPING_SOFTWARE
|
2610 SOF_TIMESTAMPING_TX_HARDWARE
|
2611 SOF_TIMESTAMPING_RX_HARDWARE
|
2612 SOF_TIMESTAMPING_RAW_HARDWARE
;
2614 (1 << HWTSTAMP_TX_ONESTEP_SYNC
) |
2615 (1 << HWTSTAMP_TX_OFF
) |
2616 (1 << HWTSTAMP_TX_ON
);
2618 (1 << HWTSTAMP_FILTER_NONE
) |
2619 (1 << HWTSTAMP_FILTER_ALL
);
2621 info
->phc_index
= bp
->ptp_clock
? ptp_clock_index(bp
->ptp_clock
) : -1;
2626 static struct macb_ptp_info gem_ptp_info
= {
2627 .ptp_init
= gem_ptp_init
,
2628 .ptp_remove
= gem_ptp_remove
,
2629 .get_ptp_max_adj
= gem_get_ptp_max_adj
,
2630 .get_tsu_rate
= gem_get_tsu_rate
,
2631 .get_ts_info
= gem_get_ts_info
,
2632 .get_hwtst
= gem_get_hwtst
,
2633 .set_hwtst
= gem_set_hwtst
,
2637 static int macb_get_ts_info(struct net_device
*netdev
,
2638 struct ethtool_ts_info
*info
)
2640 struct macb
*bp
= netdev_priv(netdev
);
2643 return bp
->ptp_info
->get_ts_info(netdev
, info
);
2645 return ethtool_op_get_ts_info(netdev
, info
);
2648 static const struct ethtool_ops macb_ethtool_ops
= {
2649 .get_regs_len
= macb_get_regs_len
,
2650 .get_regs
= macb_get_regs
,
2651 .get_link
= ethtool_op_get_link
,
2652 .get_ts_info
= ethtool_op_get_ts_info
,
2653 .get_wol
= macb_get_wol
,
2654 .set_wol
= macb_set_wol
,
2655 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
2656 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
2657 .get_ringparam
= macb_get_ringparam
,
2658 .set_ringparam
= macb_set_ringparam
,
2661 static const struct ethtool_ops gem_ethtool_ops
= {
2662 .get_regs_len
= macb_get_regs_len
,
2663 .get_regs
= macb_get_regs
,
2664 .get_link
= ethtool_op_get_link
,
2665 .get_ts_info
= macb_get_ts_info
,
2666 .get_ethtool_stats
= gem_get_ethtool_stats
,
2667 .get_strings
= gem_get_ethtool_strings
,
2668 .get_sset_count
= gem_get_sset_count
,
2669 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
2670 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
2671 .get_ringparam
= macb_get_ringparam
,
2672 .set_ringparam
= macb_set_ringparam
,
2675 static int macb_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
2677 struct phy_device
*phydev
= dev
->phydev
;
2678 struct macb
*bp
= netdev_priv(dev
);
2680 if (!netif_running(dev
))
2687 return phy_mii_ioctl(phydev
, rq
, cmd
);
2691 return bp
->ptp_info
->set_hwtst(dev
, rq
, cmd
);
2693 return bp
->ptp_info
->get_hwtst(dev
, rq
);
2695 return phy_mii_ioctl(phydev
, rq
, cmd
);
2699 static int macb_set_features(struct net_device
*netdev
,
2700 netdev_features_t features
)
2702 struct macb
*bp
= netdev_priv(netdev
);
2703 netdev_features_t changed
= features
^ netdev
->features
;
2705 /* TX checksum offload */
2706 if ((changed
& NETIF_F_HW_CSUM
) && macb_is_gem(bp
)) {
2709 dmacfg
= gem_readl(bp
, DMACFG
);
2710 if (features
& NETIF_F_HW_CSUM
)
2711 dmacfg
|= GEM_BIT(TXCOEN
);
2713 dmacfg
&= ~GEM_BIT(TXCOEN
);
2714 gem_writel(bp
, DMACFG
, dmacfg
);
2717 /* RX checksum offload */
2718 if ((changed
& NETIF_F_RXCSUM
) && macb_is_gem(bp
)) {
2721 netcfg
= gem_readl(bp
, NCFGR
);
2722 if (features
& NETIF_F_RXCSUM
&&
2723 !(netdev
->flags
& IFF_PROMISC
))
2724 netcfg
|= GEM_BIT(RXCOEN
);
2726 netcfg
&= ~GEM_BIT(RXCOEN
);
2727 gem_writel(bp
, NCFGR
, netcfg
);
2733 static const struct net_device_ops macb_netdev_ops
= {
2734 .ndo_open
= macb_open
,
2735 .ndo_stop
= macb_close
,
2736 .ndo_start_xmit
= macb_start_xmit
,
2737 .ndo_set_rx_mode
= macb_set_rx_mode
,
2738 .ndo_get_stats
= macb_get_stats
,
2739 .ndo_do_ioctl
= macb_ioctl
,
2740 .ndo_validate_addr
= eth_validate_addr
,
2741 .ndo_change_mtu
= macb_change_mtu
,
2742 .ndo_set_mac_address
= eth_mac_addr
,
2743 #ifdef CONFIG_NET_POLL_CONTROLLER
2744 .ndo_poll_controller
= macb_poll_controller
,
2746 .ndo_set_features
= macb_set_features
,
2747 .ndo_features_check
= macb_features_check
,
2750 /* Configure peripheral capabilities according to device tree
2751 * and integration options used
2753 static void macb_configure_caps(struct macb
*bp
,
2754 const struct macb_config
*dt_conf
)
2759 bp
->caps
= dt_conf
->caps
;
2761 if (hw_is_gem(bp
->regs
, bp
->native_io
)) {
2762 bp
->caps
|= MACB_CAPS_MACB_IS_GEM
;
2764 dcfg
= gem_readl(bp
, DCFG1
);
2765 if (GEM_BFEXT(IRQCOR
, dcfg
) == 0)
2766 bp
->caps
|= MACB_CAPS_ISR_CLEAR_ON_WRITE
;
2767 dcfg
= gem_readl(bp
, DCFG2
);
2768 if ((dcfg
& (GEM_BIT(RX_PKT_BUFF
) | GEM_BIT(TX_PKT_BUFF
))) == 0)
2769 bp
->caps
|= MACB_CAPS_FIFO_MODE
;
2770 #ifdef CONFIG_MACB_USE_HWSTAMP
2771 if (gem_has_ptp(bp
)) {
2772 if (!GEM_BFEXT(TSU
, gem_readl(bp
, DCFG5
)))
2773 pr_err("GEM doesn't support hardware ptp.\n");
2775 bp
->hw_dma_cap
|= HW_DMA_CAP_PTP
;
2776 bp
->ptp_info
= &gem_ptp_info
;
2782 dev_dbg(&bp
->pdev
->dev
, "Cadence caps 0x%08x\n", bp
->caps
);
2785 static void macb_probe_queues(void __iomem
*mem
,
2787 unsigned int *queue_mask
,
2788 unsigned int *num_queues
)
2795 /* is it macb or gem ?
2797 * We need to read directly from the hardware here because
2798 * we are early in the probe process and don't have the
2799 * MACB_CAPS_MACB_IS_GEM flag positioned
2801 if (!hw_is_gem(mem
, native_io
))
2804 /* bit 0 is never set but queue 0 always exists */
2805 *queue_mask
= readl_relaxed(mem
+ GEM_DCFG6
) & 0xff;
2809 for (hw_q
= 1; hw_q
< MACB_MAX_QUEUES
; ++hw_q
)
2810 if (*queue_mask
& (1 << hw_q
))
2814 static int macb_clk_init(struct platform_device
*pdev
, struct clk
**pclk
,
2815 struct clk
**hclk
, struct clk
**tx_clk
,
2816 struct clk
**rx_clk
)
2818 struct macb_platform_data
*pdata
;
2821 pdata
= dev_get_platdata(&pdev
->dev
);
2823 *pclk
= pdata
->pclk
;
2824 *hclk
= pdata
->hclk
;
2826 *pclk
= devm_clk_get(&pdev
->dev
, "pclk");
2827 *hclk
= devm_clk_get(&pdev
->dev
, "hclk");
2830 if (IS_ERR_OR_NULL(*pclk
)) {
2831 err
= PTR_ERR(*pclk
);
2835 dev_err(&pdev
->dev
, "failed to get macb_clk (%d)\n", err
);
2839 if (IS_ERR_OR_NULL(*hclk
)) {
2840 err
= PTR_ERR(*hclk
);
2844 dev_err(&pdev
->dev
, "failed to get hclk (%d)\n", err
);
2848 *tx_clk
= devm_clk_get(&pdev
->dev
, "tx_clk");
2849 if (IS_ERR(*tx_clk
))
2852 *rx_clk
= devm_clk_get(&pdev
->dev
, "rx_clk");
2853 if (IS_ERR(*rx_clk
))
2856 err
= clk_prepare_enable(*pclk
);
2858 dev_err(&pdev
->dev
, "failed to enable pclk (%d)\n", err
);
2862 err
= clk_prepare_enable(*hclk
);
2864 dev_err(&pdev
->dev
, "failed to enable hclk (%d)\n", err
);
2865 goto err_disable_pclk
;
2868 err
= clk_prepare_enable(*tx_clk
);
2870 dev_err(&pdev
->dev
, "failed to enable tx_clk (%d)\n", err
);
2871 goto err_disable_hclk
;
2874 err
= clk_prepare_enable(*rx_clk
);
2876 dev_err(&pdev
->dev
, "failed to enable rx_clk (%d)\n", err
);
2877 goto err_disable_txclk
;
2883 clk_disable_unprepare(*tx_clk
);
2886 clk_disable_unprepare(*hclk
);
2889 clk_disable_unprepare(*pclk
);
2894 static int macb_init(struct platform_device
*pdev
)
2896 struct net_device
*dev
= platform_get_drvdata(pdev
);
2897 unsigned int hw_q
, q
;
2898 struct macb
*bp
= netdev_priv(dev
);
2899 struct macb_queue
*queue
;
2903 bp
->tx_ring_size
= DEFAULT_TX_RING_SIZE
;
2904 bp
->rx_ring_size
= DEFAULT_RX_RING_SIZE
;
2906 /* set the queue register mapping once for all: queue0 has a special
2907 * register mapping but we don't want to test the queue index then
2908 * compute the corresponding register offset at run time.
2910 for (hw_q
= 0, q
= 0; hw_q
< MACB_MAX_QUEUES
; ++hw_q
) {
2911 if (!(bp
->queue_mask
& (1 << hw_q
)))
2914 queue
= &bp
->queues
[q
];
2917 queue
->ISR
= GEM_ISR(hw_q
- 1);
2918 queue
->IER
= GEM_IER(hw_q
- 1);
2919 queue
->IDR
= GEM_IDR(hw_q
- 1);
2920 queue
->IMR
= GEM_IMR(hw_q
- 1);
2921 queue
->TBQP
= GEM_TBQP(hw_q
- 1);
2922 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2923 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
)
2924 queue
->TBQPH
= GEM_TBQPH(hw_q
- 1);
2927 /* queue0 uses legacy registers */
2928 queue
->ISR
= MACB_ISR
;
2929 queue
->IER
= MACB_IER
;
2930 queue
->IDR
= MACB_IDR
;
2931 queue
->IMR
= MACB_IMR
;
2932 queue
->TBQP
= MACB_TBQP
;
2933 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2934 if (bp
->hw_dma_cap
& HW_DMA_CAP_64B
)
2935 queue
->TBQPH
= MACB_TBQPH
;
2939 /* get irq: here we use the linux queue index, not the hardware
2940 * queue index. the queue irq definitions in the device tree
2941 * must remove the optional gaps that could exist in the
2942 * hardware queue mask.
2944 queue
->irq
= platform_get_irq(pdev
, q
);
2945 err
= devm_request_irq(&pdev
->dev
, queue
->irq
, macb_interrupt
,
2946 IRQF_SHARED
, dev
->name
, queue
);
2949 "Unable to request IRQ %d (error %d)\n",
2954 INIT_WORK(&queue
->tx_error_task
, macb_tx_error_task
);
2958 dev
->netdev_ops
= &macb_netdev_ops
;
2959 netif_napi_add(dev
, &bp
->napi
, macb_poll
, 64);
2961 /* setup appropriated routines according to adapter type */
2962 if (macb_is_gem(bp
)) {
2963 bp
->max_tx_length
= GEM_MAX_TX_LEN
;
2964 bp
->macbgem_ops
.mog_alloc_rx_buffers
= gem_alloc_rx_buffers
;
2965 bp
->macbgem_ops
.mog_free_rx_buffers
= gem_free_rx_buffers
;
2966 bp
->macbgem_ops
.mog_init_rings
= gem_init_rings
;
2967 bp
->macbgem_ops
.mog_rx
= gem_rx
;
2968 dev
->ethtool_ops
= &gem_ethtool_ops
;
2970 bp
->max_tx_length
= MACB_MAX_TX_LEN
;
2971 bp
->macbgem_ops
.mog_alloc_rx_buffers
= macb_alloc_rx_buffers
;
2972 bp
->macbgem_ops
.mog_free_rx_buffers
= macb_free_rx_buffers
;
2973 bp
->macbgem_ops
.mog_init_rings
= macb_init_rings
;
2974 bp
->macbgem_ops
.mog_rx
= macb_rx
;
2975 dev
->ethtool_ops
= &macb_ethtool_ops
;
2979 dev
->hw_features
= NETIF_F_SG
;
2981 /* Check LSO capability */
2982 if (GEM_BFEXT(PBUF_LSO
, gem_readl(bp
, DCFG6
)))
2983 dev
->hw_features
|= MACB_NETIF_LSO
;
2985 /* Checksum offload is only available on gem with packet buffer */
2986 if (macb_is_gem(bp
) && !(bp
->caps
& MACB_CAPS_FIFO_MODE
))
2987 dev
->hw_features
|= NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
;
2988 if (bp
->caps
& MACB_CAPS_SG_DISABLED
)
2989 dev
->hw_features
&= ~NETIF_F_SG
;
2990 dev
->features
= dev
->hw_features
;
2992 if (!(bp
->caps
& MACB_CAPS_USRIO_DISABLED
)) {
2994 if (bp
->phy_interface
== PHY_INTERFACE_MODE_RGMII
)
2995 val
= GEM_BIT(RGMII
);
2996 else if (bp
->phy_interface
== PHY_INTERFACE_MODE_RMII
&&
2997 (bp
->caps
& MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
))
2998 val
= MACB_BIT(RMII
);
2999 else if (!(bp
->caps
& MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
))
3000 val
= MACB_BIT(MII
);
3002 if (bp
->caps
& MACB_CAPS_USRIO_HAS_CLKEN
)
3003 val
|= MACB_BIT(CLKEN
);
3005 macb_or_gem_writel(bp
, USRIO
, val
);
3008 /* Set MII management clock divider */
3009 val
= macb_mdc_clk_div(bp
);
3010 val
|= macb_dbw(bp
);
3011 if (bp
->phy_interface
== PHY_INTERFACE_MODE_SGMII
)
3012 val
|= GEM_BIT(SGMIIEN
) | GEM_BIT(PCSSEL
);
3013 macb_writel(bp
, NCFGR
, val
);
3018 #if defined(CONFIG_OF)
3019 /* 1518 rounded up */
3020 #define AT91ETHER_MAX_RBUFF_SZ 0x600
3021 /* max number of receive buffers */
3022 #define AT91ETHER_MAX_RX_DESCR 9
3024 /* Initialize and start the Receiver and Transmit subsystems */
3025 static int at91ether_start(struct net_device
*dev
)
3027 struct macb
*lp
= netdev_priv(dev
);
3028 struct macb_dma_desc
*desc
;
3033 lp
->rx_ring
= dma_alloc_coherent(&lp
->pdev
->dev
,
3034 (AT91ETHER_MAX_RX_DESCR
*
3035 macb_dma_desc_get_size(lp
)),
3036 &lp
->rx_ring_dma
, GFP_KERNEL
);
3040 lp
->rx_buffers
= dma_alloc_coherent(&lp
->pdev
->dev
,
3041 AT91ETHER_MAX_RX_DESCR
*
3042 AT91ETHER_MAX_RBUFF_SZ
,
3043 &lp
->rx_buffers_dma
, GFP_KERNEL
);
3044 if (!lp
->rx_buffers
) {
3045 dma_free_coherent(&lp
->pdev
->dev
,
3046 AT91ETHER_MAX_RX_DESCR
*
3047 macb_dma_desc_get_size(lp
),
3048 lp
->rx_ring
, lp
->rx_ring_dma
);
3053 addr
= lp
->rx_buffers_dma
;
3054 for (i
= 0; i
< AT91ETHER_MAX_RX_DESCR
; i
++) {
3055 desc
= macb_rx_desc(lp
, i
);
3056 macb_set_addr(lp
, desc
, addr
);
3058 addr
+= AT91ETHER_MAX_RBUFF_SZ
;
3061 /* Set the Wrap bit on the last descriptor */
3062 desc
->addr
|= MACB_BIT(RX_WRAP
);
3064 /* Reset buffer index */
3067 /* Program address of descriptor list in Rx Buffer Queue register */
3068 macb_writel(lp
, RBQP
, lp
->rx_ring_dma
);
3070 /* Enable Receive and Transmit */
3071 ctl
= macb_readl(lp
, NCR
);
3072 macb_writel(lp
, NCR
, ctl
| MACB_BIT(RE
) | MACB_BIT(TE
));
3077 /* Open the ethernet interface */
3078 static int at91ether_open(struct net_device
*dev
)
3080 struct macb
*lp
= netdev_priv(dev
);
3084 /* Clear internal statistics */
3085 ctl
= macb_readl(lp
, NCR
);
3086 macb_writel(lp
, NCR
, ctl
| MACB_BIT(CLRSTAT
));
3088 macb_set_hwaddr(lp
);
3090 ret
= at91ether_start(dev
);
3094 /* Enable MAC interrupts */
3095 macb_writel(lp
, IER
, MACB_BIT(RCOMP
) |
3097 MACB_BIT(ISR_TUND
) |
3100 MACB_BIT(ISR_ROVR
) |
3103 /* schedule a link state check */
3104 phy_start(dev
->phydev
);
3106 netif_start_queue(dev
);
3111 /* Close the interface */
3112 static int at91ether_close(struct net_device
*dev
)
3114 struct macb
*lp
= netdev_priv(dev
);
3117 /* Disable Receiver and Transmitter */
3118 ctl
= macb_readl(lp
, NCR
);
3119 macb_writel(lp
, NCR
, ctl
& ~(MACB_BIT(TE
) | MACB_BIT(RE
)));
3121 /* Disable MAC interrupts */
3122 macb_writel(lp
, IDR
, MACB_BIT(RCOMP
) |
3124 MACB_BIT(ISR_TUND
) |
3127 MACB_BIT(ISR_ROVR
) |
3130 netif_stop_queue(dev
);
3132 dma_free_coherent(&lp
->pdev
->dev
,
3133 AT91ETHER_MAX_RX_DESCR
*
3134 macb_dma_desc_get_size(lp
),
3135 lp
->rx_ring
, lp
->rx_ring_dma
);
3138 dma_free_coherent(&lp
->pdev
->dev
,
3139 AT91ETHER_MAX_RX_DESCR
* AT91ETHER_MAX_RBUFF_SZ
,
3140 lp
->rx_buffers
, lp
->rx_buffers_dma
);
3141 lp
->rx_buffers
= NULL
;
3146 /* Transmit packet */
3147 static int at91ether_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
3149 struct macb
*lp
= netdev_priv(dev
);
3151 if (macb_readl(lp
, TSR
) & MACB_BIT(RM9200_BNQ
)) {
3152 netif_stop_queue(dev
);
3154 /* Store packet information (to free when Tx completed) */
3156 lp
->skb_length
= skb
->len
;
3157 lp
->skb_physaddr
= dma_map_single(NULL
, skb
->data
, skb
->len
,
3159 if (dma_mapping_error(NULL
, lp
->skb_physaddr
)) {
3160 dev_kfree_skb_any(skb
);
3161 dev
->stats
.tx_dropped
++;
3162 netdev_err(dev
, "%s: DMA mapping error\n", __func__
);
3163 return NETDEV_TX_OK
;
3166 /* Set address of the data in the Transmit Address register */
3167 macb_writel(lp
, TAR
, lp
->skb_physaddr
);
3168 /* Set length of the packet in the Transmit Control register */
3169 macb_writel(lp
, TCR
, skb
->len
);
3172 netdev_err(dev
, "%s called, but device is busy!\n", __func__
);
3173 return NETDEV_TX_BUSY
;
3176 return NETDEV_TX_OK
;
3179 /* Extract received frame from buffer descriptors and sent to upper layers.
3180 * (Called from interrupt context)
3182 static void at91ether_rx(struct net_device
*dev
)
3184 struct macb
*lp
= netdev_priv(dev
);
3185 struct macb_dma_desc
*desc
;
3186 unsigned char *p_recv
;
3187 struct sk_buff
*skb
;
3188 unsigned int pktlen
;
3190 desc
= macb_rx_desc(lp
, lp
->rx_tail
);
3191 while (desc
->addr
& MACB_BIT(RX_USED
)) {
3192 p_recv
= lp
->rx_buffers
+ lp
->rx_tail
* AT91ETHER_MAX_RBUFF_SZ
;
3193 pktlen
= MACB_BF(RX_FRMLEN
, desc
->ctrl
);
3194 skb
= netdev_alloc_skb(dev
, pktlen
+ 2);
3196 skb_reserve(skb
, 2);
3197 skb_put_data(skb
, p_recv
, pktlen
);
3199 skb
->protocol
= eth_type_trans(skb
, dev
);
3200 dev
->stats
.rx_packets
++;
3201 dev
->stats
.rx_bytes
+= pktlen
;
3204 dev
->stats
.rx_dropped
++;
3207 if (desc
->ctrl
& MACB_BIT(RX_MHASH_MATCH
))
3208 dev
->stats
.multicast
++;
3210 /* reset ownership bit */
3211 desc
->addr
&= ~MACB_BIT(RX_USED
);
3213 /* wrap after last buffer */
3214 if (lp
->rx_tail
== AT91ETHER_MAX_RX_DESCR
- 1)
3219 desc
= macb_rx_desc(lp
, lp
->rx_tail
);
3223 /* MAC interrupt handler */
3224 static irqreturn_t
at91ether_interrupt(int irq
, void *dev_id
)
3226 struct net_device
*dev
= dev_id
;
3227 struct macb
*lp
= netdev_priv(dev
);
3230 /* MAC Interrupt Status register indicates what interrupts are pending.
3231 * It is automatically cleared once read.
3233 intstatus
= macb_readl(lp
, ISR
);
3235 /* Receive complete */
3236 if (intstatus
& MACB_BIT(RCOMP
))
3239 /* Transmit complete */
3240 if (intstatus
& MACB_BIT(TCOMP
)) {
3241 /* The TCOM bit is set even if the transmission failed */
3242 if (intstatus
& (MACB_BIT(ISR_TUND
) | MACB_BIT(ISR_RLE
)))
3243 dev
->stats
.tx_errors
++;
3246 dev_kfree_skb_irq(lp
->skb
);
3248 dma_unmap_single(NULL
, lp
->skb_physaddr
,
3249 lp
->skb_length
, DMA_TO_DEVICE
);
3250 dev
->stats
.tx_packets
++;
3251 dev
->stats
.tx_bytes
+= lp
->skb_length
;
3253 netif_wake_queue(dev
);
3256 /* Work-around for EMAC Errata section 41.3.1 */
3257 if (intstatus
& MACB_BIT(RXUBR
)) {
3258 ctl
= macb_readl(lp
, NCR
);
3259 macb_writel(lp
, NCR
, ctl
& ~MACB_BIT(RE
));
3261 macb_writel(lp
, NCR
, ctl
| MACB_BIT(RE
));
3264 if (intstatus
& MACB_BIT(ISR_ROVR
))
3265 netdev_err(dev
, "ROVR error\n");
3270 #ifdef CONFIG_NET_POLL_CONTROLLER
3271 static void at91ether_poll_controller(struct net_device
*dev
)
3273 unsigned long flags
;
3275 local_irq_save(flags
);
3276 at91ether_interrupt(dev
->irq
, dev
);
3277 local_irq_restore(flags
);
3281 static const struct net_device_ops at91ether_netdev_ops
= {
3282 .ndo_open
= at91ether_open
,
3283 .ndo_stop
= at91ether_close
,
3284 .ndo_start_xmit
= at91ether_start_xmit
,
3285 .ndo_get_stats
= macb_get_stats
,
3286 .ndo_set_rx_mode
= macb_set_rx_mode
,
3287 .ndo_set_mac_address
= eth_mac_addr
,
3288 .ndo_do_ioctl
= macb_ioctl
,
3289 .ndo_validate_addr
= eth_validate_addr
,
3290 #ifdef CONFIG_NET_POLL_CONTROLLER
3291 .ndo_poll_controller
= at91ether_poll_controller
,
3295 static int at91ether_clk_init(struct platform_device
*pdev
, struct clk
**pclk
,
3296 struct clk
**hclk
, struct clk
**tx_clk
,
3297 struct clk
**rx_clk
)
3305 *pclk
= devm_clk_get(&pdev
->dev
, "ether_clk");
3307 return PTR_ERR(*pclk
);
3309 err
= clk_prepare_enable(*pclk
);
3311 dev_err(&pdev
->dev
, "failed to enable pclk (%d)\n", err
);
3318 static int at91ether_init(struct platform_device
*pdev
)
3320 struct net_device
*dev
= platform_get_drvdata(pdev
);
3321 struct macb
*bp
= netdev_priv(dev
);
3325 bp
->queues
[0].bp
= bp
;
3327 dev
->netdev_ops
= &at91ether_netdev_ops
;
3328 dev
->ethtool_ops
= &macb_ethtool_ops
;
3330 err
= devm_request_irq(&pdev
->dev
, dev
->irq
, at91ether_interrupt
,
3335 macb_writel(bp
, NCR
, 0);
3337 reg
= MACB_BF(CLK
, MACB_CLK_DIV32
) | MACB_BIT(BIG
);
3338 if (bp
->phy_interface
== PHY_INTERFACE_MODE_RMII
)
3339 reg
|= MACB_BIT(RM9200_RMII
);
3341 macb_writel(bp
, NCFGR
, reg
);
3346 static const struct macb_config at91sam9260_config
= {
3347 .caps
= MACB_CAPS_USRIO_HAS_CLKEN
| MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
,
3348 .clk_init
= macb_clk_init
,
3352 static const struct macb_config sama5d3macb_config
= {
3353 .caps
= MACB_CAPS_SG_DISABLED
3354 | MACB_CAPS_USRIO_HAS_CLKEN
| MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
,
3355 .clk_init
= macb_clk_init
,
3359 static const struct macb_config pc302gem_config
= {
3360 .caps
= MACB_CAPS_SG_DISABLED
| MACB_CAPS_GIGABIT_MODE_AVAILABLE
,
3361 .dma_burst_length
= 16,
3362 .clk_init
= macb_clk_init
,
3366 static const struct macb_config sama5d2_config
= {
3367 .caps
= MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
,
3368 .dma_burst_length
= 16,
3369 .clk_init
= macb_clk_init
,
3373 static const struct macb_config sama5d3_config
= {
3374 .caps
= MACB_CAPS_SG_DISABLED
| MACB_CAPS_GIGABIT_MODE_AVAILABLE
3375 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
| MACB_CAPS_JUMBO
,
3376 .dma_burst_length
= 16,
3377 .clk_init
= macb_clk_init
,
3379 .jumbo_max_len
= 10240,
3382 static const struct macb_config sama5d4_config
= {
3383 .caps
= MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII
,
3384 .dma_burst_length
= 4,
3385 .clk_init
= macb_clk_init
,
3389 static const struct macb_config emac_config
= {
3390 .clk_init
= at91ether_clk_init
,
3391 .init
= at91ether_init
,
3394 static const struct macb_config np4_config
= {
3395 .caps
= MACB_CAPS_USRIO_DISABLED
,
3396 .clk_init
= macb_clk_init
,
3400 static const struct macb_config zynqmp_config
= {
3401 .caps
= MACB_CAPS_GIGABIT_MODE_AVAILABLE
|
3403 MACB_CAPS_GEM_HAS_PTP
,
3404 .dma_burst_length
= 16,
3405 .clk_init
= macb_clk_init
,
3407 .jumbo_max_len
= 10240,
3410 static const struct macb_config zynq_config
= {
3411 .caps
= MACB_CAPS_GIGABIT_MODE_AVAILABLE
| MACB_CAPS_NO_GIGABIT_HALF
,
3412 .dma_burst_length
= 16,
3413 .clk_init
= macb_clk_init
,
3417 static const struct of_device_id macb_dt_ids
[] = {
3418 { .compatible
= "cdns,at32ap7000-macb" },
3419 { .compatible
= "cdns,at91sam9260-macb", .data
= &at91sam9260_config
},
3420 { .compatible
= "cdns,macb" },
3421 { .compatible
= "cdns,np4-macb", .data
= &np4_config
},
3422 { .compatible
= "cdns,pc302-gem", .data
= &pc302gem_config
},
3423 { .compatible
= "cdns,gem", .data
= &pc302gem_config
},
3424 { .compatible
= "atmel,sama5d2-gem", .data
= &sama5d2_config
},
3425 { .compatible
= "atmel,sama5d3-gem", .data
= &sama5d3_config
},
3426 { .compatible
= "atmel,sama5d3-macb", .data
= &sama5d3macb_config
},
3427 { .compatible
= "atmel,sama5d4-gem", .data
= &sama5d4_config
},
3428 { .compatible
= "cdns,at91rm9200-emac", .data
= &emac_config
},
3429 { .compatible
= "cdns,emac", .data
= &emac_config
},
3430 { .compatible
= "cdns,zynqmp-gem", .data
= &zynqmp_config
},
3431 { .compatible
= "cdns,zynq-gem", .data
= &zynq_config
},
3434 MODULE_DEVICE_TABLE(of
, macb_dt_ids
);
3435 #endif /* CONFIG_OF */
3437 static const struct macb_config default_gem_config
= {
3438 .caps
= MACB_CAPS_GIGABIT_MODE_AVAILABLE
|
3440 MACB_CAPS_GEM_HAS_PTP
,
3441 .dma_burst_length
= 16,
3442 .clk_init
= macb_clk_init
,
3444 .jumbo_max_len
= 10240,
3447 static int macb_probe(struct platform_device
*pdev
)
3449 const struct macb_config
*macb_config
= &default_gem_config
;
3450 int (*clk_init
)(struct platform_device
*, struct clk
**,
3451 struct clk
**, struct clk
**, struct clk
**)
3452 = macb_config
->clk_init
;
3453 int (*init
)(struct platform_device
*) = macb_config
->init
;
3454 struct device_node
*np
= pdev
->dev
.of_node
;
3455 struct device_node
*phy_node
;
3456 struct clk
*pclk
, *hclk
= NULL
, *tx_clk
= NULL
, *rx_clk
= NULL
;
3457 unsigned int queue_mask
, num_queues
;
3458 struct macb_platform_data
*pdata
;
3460 struct phy_device
*phydev
;
3461 struct net_device
*dev
;
3462 struct resource
*regs
;
3468 regs
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
3469 mem
= devm_ioremap_resource(&pdev
->dev
, regs
);
3471 return PTR_ERR(mem
);
3474 const struct of_device_id
*match
;
3476 match
= of_match_node(macb_dt_ids
, np
);
3477 if (match
&& match
->data
) {
3478 macb_config
= match
->data
;
3479 clk_init
= macb_config
->clk_init
;
3480 init
= macb_config
->init
;
3484 err
= clk_init(pdev
, &pclk
, &hclk
, &tx_clk
, &rx_clk
);
3488 native_io
= hw_is_native_io(mem
);
3490 macb_probe_queues(mem
, native_io
, &queue_mask
, &num_queues
);
3491 dev
= alloc_etherdev_mq(sizeof(*bp
), num_queues
);
3494 goto err_disable_clocks
;
3497 dev
->base_addr
= regs
->start
;
3499 SET_NETDEV_DEV(dev
, &pdev
->dev
);
3501 bp
= netdev_priv(dev
);
3505 bp
->native_io
= native_io
;
3507 bp
->macb_reg_readl
= hw_readl_native
;
3508 bp
->macb_reg_writel
= hw_writel_native
;
3510 bp
->macb_reg_readl
= hw_readl
;
3511 bp
->macb_reg_writel
= hw_writel
;
3513 bp
->num_queues
= num_queues
;
3514 bp
->queue_mask
= queue_mask
;
3516 bp
->dma_burst_length
= macb_config
->dma_burst_length
;
3519 bp
->tx_clk
= tx_clk
;
3520 bp
->rx_clk
= rx_clk
;
3522 bp
->jumbo_max_len
= macb_config
->jumbo_max_len
;
3525 if (of_get_property(np
, "magic-packet", NULL
))
3526 bp
->wol
|= MACB_WOL_HAS_MAGIC_PACKET
;
3527 device_init_wakeup(&pdev
->dev
, bp
->wol
& MACB_WOL_HAS_MAGIC_PACKET
);
3529 spin_lock_init(&bp
->lock
);
3531 /* setup capabilities */
3532 macb_configure_caps(bp
, macb_config
);
3534 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3535 if (GEM_BFEXT(DAW64
, gem_readl(bp
, DCFG6
))) {
3536 dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(44));
3537 bp
->hw_dma_cap
|= HW_DMA_CAP_64B
;
3540 platform_set_drvdata(pdev
, dev
);
3542 dev
->irq
= platform_get_irq(pdev
, 0);
3545 goto err_out_free_netdev
;
3548 /* MTU range: 68 - 1500 or 10240 */
3549 dev
->min_mtu
= GEM_MTU_MIN_SIZE
;
3550 if (bp
->caps
& MACB_CAPS_JUMBO
)
3551 dev
->max_mtu
= gem_readl(bp
, JML
) - ETH_HLEN
- ETH_FCS_LEN
;
3553 dev
->max_mtu
= ETH_DATA_LEN
;
3555 mac
= of_get_mac_address(np
);
3557 ether_addr_copy(bp
->dev
->dev_addr
, mac
);
3559 macb_get_hwaddr(bp
);
3561 /* Power up the PHY if there is a GPIO reset */
3562 phy_node
= of_get_next_available_child(np
, NULL
);
3564 int gpio
= of_get_named_gpio(phy_node
, "reset-gpios", 0);
3566 if (gpio_is_valid(gpio
)) {
3567 bp
->reset_gpio
= gpio_to_desc(gpio
);
3568 gpiod_direction_output(bp
->reset_gpio
, 1);
3571 of_node_put(phy_node
);
3573 err
= of_get_phy_mode(np
);
3575 pdata
= dev_get_platdata(&pdev
->dev
);
3576 if (pdata
&& pdata
->is_rmii
)
3577 bp
->phy_interface
= PHY_INTERFACE_MODE_RMII
;
3579 bp
->phy_interface
= PHY_INTERFACE_MODE_MII
;
3581 bp
->phy_interface
= err
;
3584 /* IP specific init */
3587 goto err_out_free_netdev
;
3589 err
= macb_mii_init(bp
);
3591 goto err_out_free_netdev
;
3593 phydev
= dev
->phydev
;
3595 netif_carrier_off(dev
);
3597 err
= register_netdev(dev
);
3599 dev_err(&pdev
->dev
, "Cannot register net device, aborting.\n");
3600 goto err_out_unregister_mdio
;
3603 phy_attached_info(phydev
);
3605 netdev_info(dev
, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
3606 macb_is_gem(bp
) ? "GEM" : "MACB", macb_readl(bp
, MID
),
3607 dev
->base_addr
, dev
->irq
, dev
->dev_addr
);
3611 err_out_unregister_mdio
:
3612 phy_disconnect(dev
->phydev
);
3613 mdiobus_unregister(bp
->mii_bus
);
3614 of_node_put(bp
->phy_node
);
3615 if (np
&& of_phy_is_fixed_link(np
))
3616 of_phy_deregister_fixed_link(np
);
3617 mdiobus_free(bp
->mii_bus
);
3619 /* Shutdown the PHY if there is a GPIO reset */
3621 gpiod_set_value(bp
->reset_gpio
, 0);
3623 err_out_free_netdev
:
3627 clk_disable_unprepare(tx_clk
);
3628 clk_disable_unprepare(hclk
);
3629 clk_disable_unprepare(pclk
);
3630 clk_disable_unprepare(rx_clk
);
3635 static int macb_remove(struct platform_device
*pdev
)
3637 struct net_device
*dev
;
3639 struct device_node
*np
= pdev
->dev
.of_node
;
3641 dev
= platform_get_drvdata(pdev
);
3644 bp
= netdev_priv(dev
);
3646 phy_disconnect(dev
->phydev
);
3647 mdiobus_unregister(bp
->mii_bus
);
3648 if (np
&& of_phy_is_fixed_link(np
))
3649 of_phy_deregister_fixed_link(np
);
3651 mdiobus_free(bp
->mii_bus
);
3653 /* Shutdown the PHY if there is a GPIO reset */
3655 gpiod_set_value(bp
->reset_gpio
, 0);
3657 unregister_netdev(dev
);
3658 clk_disable_unprepare(bp
->tx_clk
);
3659 clk_disable_unprepare(bp
->hclk
);
3660 clk_disable_unprepare(bp
->pclk
);
3661 clk_disable_unprepare(bp
->rx_clk
);
3662 of_node_put(bp
->phy_node
);
3669 static int __maybe_unused
macb_suspend(struct device
*dev
)
3671 struct platform_device
*pdev
= to_platform_device(dev
);
3672 struct net_device
*netdev
= platform_get_drvdata(pdev
);
3673 struct macb
*bp
= netdev_priv(netdev
);
3675 netif_carrier_off(netdev
);
3676 netif_device_detach(netdev
);
3678 if (bp
->wol
& MACB_WOL_ENABLED
) {
3679 macb_writel(bp
, IER
, MACB_BIT(WOL
));
3680 macb_writel(bp
, WOL
, MACB_BIT(MAG
));
3681 enable_irq_wake(bp
->queues
[0].irq
);
3683 clk_disable_unprepare(bp
->tx_clk
);
3684 clk_disable_unprepare(bp
->hclk
);
3685 clk_disable_unprepare(bp
->pclk
);
3686 clk_disable_unprepare(bp
->rx_clk
);
3692 static int __maybe_unused
macb_resume(struct device
*dev
)
3694 struct platform_device
*pdev
= to_platform_device(dev
);
3695 struct net_device
*netdev
= platform_get_drvdata(pdev
);
3696 struct macb
*bp
= netdev_priv(netdev
);
3698 if (bp
->wol
& MACB_WOL_ENABLED
) {
3699 macb_writel(bp
, IDR
, MACB_BIT(WOL
));
3700 macb_writel(bp
, WOL
, 0);
3701 disable_irq_wake(bp
->queues
[0].irq
);
3703 clk_prepare_enable(bp
->pclk
);
3704 clk_prepare_enable(bp
->hclk
);
3705 clk_prepare_enable(bp
->tx_clk
);
3706 clk_prepare_enable(bp
->rx_clk
);
3709 netif_device_attach(netdev
);
3714 static SIMPLE_DEV_PM_OPS(macb_pm_ops
, macb_suspend
, macb_resume
);
3716 static struct platform_driver macb_driver
= {
3717 .probe
= macb_probe
,
3718 .remove
= macb_remove
,
3721 .of_match_table
= of_match_ptr(macb_dt_ids
),
3726 module_platform_driver(macb_driver
);
3728 MODULE_LICENSE("GPL");
3729 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
3730 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
3731 MODULE_ALIAS("platform:macb");