]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/cadence/macb_main.c
net: macb: Limit maximum GEM TX length in TSO
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / cadence / macb_main.c
CommitLineData
89e5785f 1/*
f75ba50b 2 * Cadence MACB/GEM Ethernet Controller driver
89e5785f
HS
3 *
4 * Copyright (C) 2004-2006 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
c220f8cd 11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
89e5785f
HS
12#include <linux/clk.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
909a8583 17#include <linux/circ_buf.h>
89e5785f
HS
18#include <linux/slab.h>
19#include <linux/init.h>
60fe716f 20#include <linux/io.h>
2dbfdbb9 21#include <linux/gpio.h>
270c499f 22#include <linux/gpio/consumer.h>
a6b7a407 23#include <linux/interrupt.h>
89e5785f
HS
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
89e5785f 26#include <linux/dma-mapping.h>
84e0cdb0 27#include <linux/platform_data/macb.h>
89e5785f 28#include <linux/platform_device.h>
6c36a707 29#include <linux/phy.h>
b17471f5 30#include <linux/of.h>
fb97a846 31#include <linux/of_device.h>
270c499f 32#include <linux/of_gpio.h>
148cbb53 33#include <linux/of_mdio.h>
fb97a846 34#include <linux/of_net.h>
1629dd4f
RO
35#include <linux/ip.h>
36#include <linux/udp.h>
37#include <linux/tcp.h>
89e5785f
HS
38#include "macb.h"
39
1b44791a 40#define MACB_RX_BUFFER_SIZE 128
1b44791a 41#define RX_BUFFER_MULTIPLE 64 /* bytes */
8441bb33 42
b410d13e 43#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
8441bb33
ZB
44#define MIN_RX_RING_SIZE 64
45#define MAX_RX_RING_SIZE 8192
dc97a89e 46#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
b410d13e 47 * (bp)->rx_ring_size)
89e5785f 48
b410d13e 49#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
8441bb33
ZB
50#define MIN_TX_RING_SIZE 64
51#define MAX_TX_RING_SIZE 4096
dc97a89e 52#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
b410d13e 53 * (bp)->tx_ring_size)
89e5785f 54
909a8583 55/* level of occupied TX descriptors under which we wake up TX process */
b410d13e 56#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
89e5785f
HS
57
58#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
59 | MACB_BIT(ISR_ROVR))
e86cd53a
NF
60#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
61 | MACB_BIT(ISR_RLE) \
62 | MACB_BIT(TXERR))
e0cfe809
CB
63#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \
64 | MACB_BIT(TXUBR))
e86cd53a 65
1629dd4f
RO
66/* Max length of transmit frame must be a multiple of 8 bytes */
67#define MACB_TX_LEN_ALIGN 8
68#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
3300ab34
HK
69/* Limit maximum TX length as per Cadence TSO errata. This is to avoid a
70 * false amba_error in TX path from the DMA assuming there is not enough
71 * space in the SRAM (16KB) even when there is.
72 */
73#define GEM_MAX_TX_LEN (unsigned int)(0x3FC0)
a4c35ed3 74
44770e11 75#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
f9c45ae0 76#define MACB_NETIF_LSO NETIF_F_TSO
a5898ea0 77
3e2a5e15
SP
78#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
79#define MACB_WOL_ENABLED (0x1 << 1)
80
64ec42fe 81/* Graceful stop timeouts in us. We should allow up to
e86cd53a
NF
82 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
83 */
84#define MACB_HALT_TIMEOUT 1230
89e5785f 85
dc97a89e 86/* DMA buffer descriptor might be different size
7b429614
RO
87 * depends on hardware configuration:
88 *
89 * 1. dma address width 32 bits:
90 * word 1: 32 bit address of Data Buffer
91 * word 2: control
92 *
93 * 2. dma address width 64 bits:
94 * word 1: 32 bit address of Data Buffer
95 * word 2: control
96 * word 3: upper 32 bit address of Data Buffer
97 * word 4: unused
98 *
99 * 3. dma address width 32 bits with hardware timestamping:
100 * word 1: 32 bit address of Data Buffer
101 * word 2: control
102 * word 3: timestamp word 1
103 * word 4: timestamp word 2
104 *
105 * 4. dma address width 64 bits with hardware timestamping:
106 * word 1: 32 bit address of Data Buffer
107 * word 2: control
108 * word 3: upper 32 bit address of Data Buffer
109 * word 4: unused
110 * word 5: timestamp word 1
111 * word 6: timestamp word 2
dc97a89e
RO
112 */
113static unsigned int macb_dma_desc_get_size(struct macb *bp)
114{
7b429614
RO
115#ifdef MACB_EXT_DESC
116 unsigned int desc_size;
117
118 switch (bp->hw_dma_cap) {
119 case HW_DMA_CAP_64B:
120 desc_size = sizeof(struct macb_dma_desc)
121 + sizeof(struct macb_dma_desc_64);
122 break;
123 case HW_DMA_CAP_PTP:
124 desc_size = sizeof(struct macb_dma_desc)
125 + sizeof(struct macb_dma_desc_ptp);
126 break;
127 case HW_DMA_CAP_64B_PTP:
128 desc_size = sizeof(struct macb_dma_desc)
129 + sizeof(struct macb_dma_desc_64)
130 + sizeof(struct macb_dma_desc_ptp);
131 break;
132 default:
133 desc_size = sizeof(struct macb_dma_desc);
134 }
135 return desc_size;
dc97a89e
RO
136#endif
137 return sizeof(struct macb_dma_desc);
138}
139
7b429614 140static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
dc97a89e 141{
7b429614
RO
142#ifdef MACB_EXT_DESC
143 switch (bp->hw_dma_cap) {
144 case HW_DMA_CAP_64B:
145 case HW_DMA_CAP_PTP:
146 desc_idx <<= 1;
147 break;
148 case HW_DMA_CAP_64B_PTP:
149 desc_idx *= 3;
150 break;
151 default:
152 break;
153 }
dc97a89e 154#endif
7b429614 155 return desc_idx;
dc97a89e
RO
156}
157
158#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
159static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
160{
7b429614
RO
161 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
162 return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
163 return NULL;
dc97a89e
RO
164}
165#endif
166
55054a16 167/* Ring buffer accessors */
b410d13e 168static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
55054a16 169{
b410d13e 170 return index & (bp->tx_ring_size - 1);
55054a16
HS
171}
172
02c958dd
CP
173static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
174 unsigned int index)
55054a16 175{
dc97a89e
RO
176 index = macb_tx_ring_wrap(queue->bp, index);
177 index = macb_adj_dma_desc_idx(queue->bp, index);
178 return &queue->tx_ring[index];
55054a16
HS
179}
180
02c958dd
CP
181static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
182 unsigned int index)
55054a16 183{
b410d13e 184 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
55054a16
HS
185}
186
02c958dd 187static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
55054a16
HS
188{
189 dma_addr_t offset;
190
b410d13e 191 offset = macb_tx_ring_wrap(queue->bp, index) *
dc97a89e 192 macb_dma_desc_get_size(queue->bp);
55054a16 193
02c958dd 194 return queue->tx_ring_dma + offset;
55054a16
HS
195}
196
b410d13e 197static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
55054a16 198{
b410d13e 199 return index & (bp->rx_ring_size - 1);
55054a16
HS
200}
201
202static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
203{
dc97a89e
RO
204 index = macb_rx_ring_wrap(bp, index);
205 index = macb_adj_dma_desc_idx(bp, index);
206 return &bp->rx_ring[index];
55054a16
HS
207}
208
209static void *macb_rx_buffer(struct macb *bp, unsigned int index)
210{
b410d13e
ZB
211 return bp->rx_buffers + bp->rx_buffer_size *
212 macb_rx_ring_wrap(bp, index);
55054a16
HS
213}
214
f2ce8a9e
AS
215/* I/O accessors */
216static u32 hw_readl_native(struct macb *bp, int offset)
217{
218 return __raw_readl(bp->regs + offset);
219}
220
221static void hw_writel_native(struct macb *bp, int offset, u32 value)
222{
223 __raw_writel(value, bp->regs + offset);
224}
225
226static u32 hw_readl(struct macb *bp, int offset)
227{
228 return readl_relaxed(bp->regs + offset);
229}
230
231static void hw_writel(struct macb *bp, int offset, u32 value)
232{
233 writel_relaxed(value, bp->regs + offset);
234}
235
64ec42fe 236/* Find the CPU endianness by using the loopback bit of NCR register. When the
88023beb 237 * CPU is in big endian we need to program swapped mode for management
f2ce8a9e
AS
238 * descriptor access.
239 */
240static bool hw_is_native_io(void __iomem *addr)
241{
242 u32 value = MACB_BIT(LLB);
243
244 __raw_writel(value, addr + MACB_NCR);
245 value = __raw_readl(addr + MACB_NCR);
246
247 /* Write 0 back to disable everything */
248 __raw_writel(0, addr + MACB_NCR);
249
250 return value == MACB_BIT(LLB);
251}
252
253static bool hw_is_gem(void __iomem *addr, bool native_io)
254{
255 u32 id;
256
257 if (native_io)
258 id = __raw_readl(addr + MACB_MID);
259 else
260 id = readl_relaxed(addr + MACB_MID);
261
262 return MACB_BFEXT(IDNUM, id) >= 0x2;
263}
264
421d9df0 265static void macb_set_hwaddr(struct macb *bp)
89e5785f
HS
266{
267 u32 bottom;
268 u16 top;
269
270 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
f75ba50b 271 macb_or_gem_writel(bp, SA1B, bottom);
89e5785f 272 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
f75ba50b 273 macb_or_gem_writel(bp, SA1T, top);
3629a6ce
JE
274
275 /* Clear unused address register sets */
276 macb_or_gem_writel(bp, SA2B, 0);
277 macb_or_gem_writel(bp, SA2T, 0);
278 macb_or_gem_writel(bp, SA3B, 0);
279 macb_or_gem_writel(bp, SA3T, 0);
280 macb_or_gem_writel(bp, SA4B, 0);
281 macb_or_gem_writel(bp, SA4T, 0);
89e5785f
HS
282}
283
421d9df0 284static void macb_get_hwaddr(struct macb *bp)
89e5785f 285{
d25e78aa 286 struct macb_platform_data *pdata;
89e5785f
HS
287 u32 bottom;
288 u16 top;
289 u8 addr[6];
17b8bb3e
JE
290 int i;
291
c607a0d9 292 pdata = dev_get_platdata(&bp->pdev->dev);
d25e78aa 293
aa50b552 294 /* Check all 4 address register for valid address */
17b8bb3e
JE
295 for (i = 0; i < 4; i++) {
296 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
297 top = macb_or_gem_readl(bp, SA1T + i * 8);
298
d25e78aa
JE
299 if (pdata && pdata->rev_eth_addr) {
300 addr[5] = bottom & 0xff;
301 addr[4] = (bottom >> 8) & 0xff;
302 addr[3] = (bottom >> 16) & 0xff;
303 addr[2] = (bottom >> 24) & 0xff;
304 addr[1] = top & 0xff;
305 addr[0] = (top & 0xff00) >> 8;
306 } else {
307 addr[0] = bottom & 0xff;
308 addr[1] = (bottom >> 8) & 0xff;
309 addr[2] = (bottom >> 16) & 0xff;
310 addr[3] = (bottom >> 24) & 0xff;
311 addr[4] = top & 0xff;
312 addr[5] = (top >> 8) & 0xff;
313 }
17b8bb3e
JE
314
315 if (is_valid_ether_addr(addr)) {
316 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
317 return;
318 }
d1d5741d 319 }
17b8bb3e 320
a35919e1 321 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
17b8bb3e 322 eth_hw_addr_random(bp->dev);
89e5785f
HS
323}
324
6c36a707 325static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
89e5785f 326{
6c36a707 327 struct macb *bp = bus->priv;
89e5785f
HS
328 int value;
329
89e5785f
HS
330 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
331 | MACB_BF(RW, MACB_MAN_READ)
6c36a707
R
332 | MACB_BF(PHYA, mii_id)
333 | MACB_BF(REGA, regnum)
89e5785f
HS
334 | MACB_BF(CODE, MACB_MAN_CODE)));
335
6c36a707
R
336 /* wait for end of transfer */
337 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
338 cpu_relax();
89e5785f
HS
339
340 value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
89e5785f
HS
341
342 return value;
343}
344
6c36a707
R
345static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
346 u16 value)
89e5785f 347{
6c36a707 348 struct macb *bp = bus->priv;
89e5785f
HS
349
350 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
351 | MACB_BF(RW, MACB_MAN_WRITE)
6c36a707
R
352 | MACB_BF(PHYA, mii_id)
353 | MACB_BF(REGA, regnum)
89e5785f 354 | MACB_BF(CODE, MACB_MAN_CODE)
6c36a707 355 | MACB_BF(DATA, value)));
89e5785f 356
6c36a707
R
357 /* wait for end of transfer */
358 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
359 cpu_relax();
360
361 return 0;
362}
89e5785f 363
e1824dfe
SB
364/**
365 * macb_set_tx_clk() - Set a clock to a new frequency
366 * @clk Pointer to the clock to change
367 * @rate New frequency in Hz
368 * @dev Pointer to the struct net_device
369 */
370static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
371{
372 long ferr, rate, rate_rounded;
373
93b31f48
CP
374 if (!clk)
375 return;
376
e1824dfe
SB
377 switch (speed) {
378 case SPEED_10:
379 rate = 2500000;
380 break;
381 case SPEED_100:
382 rate = 25000000;
383 break;
384 case SPEED_1000:
385 rate = 125000000;
386 break;
387 default:
9319e47c 388 return;
e1824dfe
SB
389 }
390
391 rate_rounded = clk_round_rate(clk, rate);
392 if (rate_rounded < 0)
393 return;
394
395 /* RGMII allows 50 ppm frequency error. Test and warn if this limit
396 * is not satisfied.
397 */
398 ferr = abs(rate_rounded - rate);
399 ferr = DIV_ROUND_UP(ferr, rate / 100000);
400 if (ferr > 5)
401 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
aa50b552 402 rate);
e1824dfe
SB
403
404 if (clk_set_rate(clk, rate_rounded))
405 netdev_err(dev, "adjusting tx_clk failed.\n");
406}
407
6c36a707 408static void macb_handle_link_change(struct net_device *dev)
89e5785f 409{
6c36a707 410 struct macb *bp = netdev_priv(dev);
0a91281e 411 struct phy_device *phydev = dev->phydev;
6c36a707 412 unsigned long flags;
6c36a707 413 int status_change = 0;
89e5785f 414
6c36a707
R
415 spin_lock_irqsave(&bp->lock, flags);
416
417 if (phydev->link) {
418 if ((bp->speed != phydev->speed) ||
419 (bp->duplex != phydev->duplex)) {
420 u32 reg;
421
422 reg = macb_readl(bp, NCFGR);
423 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
140b7552
PV
424 if (macb_is_gem(bp))
425 reg &= ~GEM_BIT(GBE);
6c36a707
R
426
427 if (phydev->duplex)
428 reg |= MACB_BIT(FD);
179956f4 429 if (phydev->speed == SPEED_100)
6c36a707 430 reg |= MACB_BIT(SPD);
e175587f
NF
431 if (phydev->speed == SPEED_1000 &&
432 bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
140b7552 433 reg |= GEM_BIT(GBE);
6c36a707 434
140b7552 435 macb_or_gem_writel(bp, NCFGR, reg);
6c36a707
R
436
437 bp->speed = phydev->speed;
438 bp->duplex = phydev->duplex;
439 status_change = 1;
440 }
89e5785f
HS
441 }
442
6c36a707 443 if (phydev->link != bp->link) {
c8f15686 444 if (!phydev->link) {
6c36a707
R
445 bp->speed = 0;
446 bp->duplex = -1;
447 }
448 bp->link = phydev->link;
89e5785f 449
6c36a707
R
450 status_change = 1;
451 }
89e5785f 452
6c36a707
R
453 spin_unlock_irqrestore(&bp->lock, flags);
454
455 if (status_change) {
03fc4721 456 if (phydev->link) {
2c29b235
JA
457 /* Update the TX clock rate if and only if the link is
458 * up and there has been a link change.
459 */
460 macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
461
03fc4721 462 netif_carrier_on(dev);
c220f8cd
JI
463 netdev_info(dev, "link up (%d/%s)\n",
464 phydev->speed,
465 phydev->duplex == DUPLEX_FULL ?
466 "Full" : "Half");
03fc4721
NF
467 } else {
468 netif_carrier_off(dev);
c220f8cd 469 netdev_info(dev, "link down\n");
03fc4721 470 }
6c36a707 471 }
89e5785f
HS
472}
473
6c36a707
R
474/* based on au1000_eth. c*/
475static int macb_mii_probe(struct net_device *dev)
89e5785f 476{
6c36a707 477 struct macb *bp = netdev_priv(dev);
2dbfdbb9 478 struct macb_platform_data *pdata;
7455a76f 479 struct phy_device *phydev;
2dbfdbb9 480 int phy_irq;
7455a76f 481 int ret;
6c36a707 482
dacdbb4d
MG
483 if (bp->phy_node) {
484 phydev = of_phy_connect(dev, bp->phy_node,
485 &macb_handle_link_change, 0,
486 bp->phy_interface);
487 if (!phydev)
488 return -ENODEV;
489 } else {
490 phydev = phy_find_first(bp->mii_bus);
491 if (!phydev) {
492 netdev_err(dev, "no PHY found\n");
493 return -ENXIO;
494 }
6c36a707 495
dacdbb4d
MG
496 pdata = dev_get_platdata(&bp->pdev->dev);
497 if (pdata) {
498 if (gpio_is_valid(pdata->phy_irq_pin)) {
499 ret = devm_gpio_request(&bp->pdev->dev,
500 pdata->phy_irq_pin, "phy int");
501 if (!ret) {
502 phy_irq = gpio_to_irq(pdata->phy_irq_pin);
503 phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
504 }
505 } else {
506 phydev->irq = PHY_POLL;
ae3696c1 507 }
2dbfdbb9 508 }
6c36a707 509
dacdbb4d
MG
510 /* attach the mac to the phy */
511 ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
512 bp->phy_interface);
513 if (ret) {
514 netdev_err(dev, "Could not attach to PHY\n");
515 return ret;
516 }
6c36a707
R
517 }
518
519 /* mask with MAC supported features */
e175587f 520 if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
140b7552
PV
521 phydev->supported &= PHY_GBIT_FEATURES;
522 else
523 phydev->supported &= PHY_BASIC_FEATURES;
6c36a707 524
222ca8e0
NS
525 if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
526 phydev->supported &= ~SUPPORTED_1000baseT_Half;
527
6c36a707
R
528 phydev->advertising = phydev->supported;
529
530 bp->link = 0;
531 bp->speed = 0;
532 bp->duplex = -1;
6c36a707
R
533
534 return 0;
89e5785f
HS
535}
536
421d9df0 537static int macb_mii_init(struct macb *bp)
89e5785f 538{
84e0cdb0 539 struct macb_platform_data *pdata;
148cbb53 540 struct device_node *np;
6c36a707 541 int err = -ENXIO, i;
89e5785f 542
3dbda77e 543 /* Enable management port */
6c36a707 544 macb_writel(bp, NCR, MACB_BIT(MPE));
89e5785f 545
298cf9be 546 bp->mii_bus = mdiobus_alloc();
aa50b552 547 if (!bp->mii_bus) {
298cf9be
LB
548 err = -ENOMEM;
549 goto err_out;
550 }
551
552 bp->mii_bus->name = "MACB_mii_bus";
553 bp->mii_bus->read = &macb_mdio_read;
554 bp->mii_bus->write = &macb_mdio_write;
98d5e57e 555 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
aa50b552 556 bp->pdev->name, bp->pdev->id);
298cf9be 557 bp->mii_bus->priv = bp;
cf669660 558 bp->mii_bus->parent = &bp->pdev->dev;
c607a0d9 559 pdata = dev_get_platdata(&bp->pdev->dev);
89e5785f 560
91523947 561 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
89e5785f 562
148cbb53
BB
563 np = bp->pdev->dev.of_node;
564 if (np) {
dacdbb4d
MG
565 if (of_phy_is_fixed_link(np)) {
566 if (of_phy_register_fixed_link(np) < 0) {
567 dev_err(&bp->pdev->dev,
568 "broken fixed-link specification\n");
569 goto err_out_unregister_bus;
570 }
571 bp->phy_node = of_node_get(np);
148cbb53 572
dacdbb4d
MG
573 err = mdiobus_register(bp->mii_bus);
574 } else {
575 /* try dt phy registration */
576 err = of_mdiobus_register(bp->mii_bus, np);
577
578 /* fallback to standard phy registration if no phy were
579 * found during dt phy registration
580 */
581 if (!err && !phy_find_first(bp->mii_bus)) {
582 for (i = 0; i < PHY_MAX_ADDR; i++) {
583 struct phy_device *phydev;
584
585 phydev = mdiobus_scan(bp->mii_bus, i);
586 if (IS_ERR(phydev) &&
587 PTR_ERR(phydev) != -ENODEV) {
588 err = PTR_ERR(phydev);
589 break;
590 }
148cbb53 591 }
148cbb53 592
dacdbb4d
MG
593 if (err)
594 goto err_out_unregister_bus;
595 }
148cbb53
BB
596 }
597 } else {
83a77e9e
BF
598 for (i = 0; i < PHY_MAX_ADDR; i++)
599 bp->mii_bus->irq[i] = PHY_POLL;
600
148cbb53
BB
601 if (pdata)
602 bp->mii_bus->phy_mask = pdata->phy_mask;
603
604 err = mdiobus_register(bp->mii_bus);
605 }
606
607 if (err)
e7f4dc35 608 goto err_out_free_mdiobus;
89e5785f 609
7daa78e3
BB
610 err = macb_mii_probe(bp->dev);
611 if (err)
6c36a707 612 goto err_out_unregister_bus;
89e5785f 613
6c36a707 614 return 0;
89e5785f 615
6c36a707 616err_out_unregister_bus:
298cf9be 617 mdiobus_unregister(bp->mii_bus);
298cf9be 618err_out_free_mdiobus:
66ee6a06 619 of_node_put(bp->phy_node);
9ce98140
MG
620 if (np && of_phy_is_fixed_link(np))
621 of_phy_deregister_fixed_link(np);
298cf9be 622 mdiobus_free(bp->mii_bus);
6c36a707
R
623err_out:
624 return err;
89e5785f
HS
625}
626
627static void macb_update_stats(struct macb *bp)
628{
a494ed8e
JI
629 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
630 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
f2ce8a9e 631 int offset = MACB_PFR;
89e5785f
HS
632
633 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
634
96ec6310 635 for (; p < end; p++, offset += 4)
7a6e0706 636 *p += bp->macb_reg_readl(bp, offset);
89e5785f
HS
637}
638
e86cd53a 639static int macb_halt_tx(struct macb *bp)
89e5785f 640{
e86cd53a
NF
641 unsigned long halt_time, timeout;
642 u32 status;
89e5785f 643
e86cd53a 644 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
89e5785f 645
e86cd53a
NF
646 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
647 do {
648 halt_time = jiffies;
649 status = macb_readl(bp, TSR);
650 if (!(status & MACB_BIT(TGO)))
651 return 0;
89e5785f 652
053fbf67 653 udelay(250);
e86cd53a 654 } while (time_before(halt_time, timeout));
bdcba151 655
e86cd53a
NF
656 return -ETIMEDOUT;
657}
39eddb4c 658
a4c35ed3
CP
659static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
660{
661 if (tx_skb->mapping) {
662 if (tx_skb->mapped_as_page)
663 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
664 tx_skb->size, DMA_TO_DEVICE);
665 else
666 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
667 tx_skb->size, DMA_TO_DEVICE);
668 tx_skb->mapping = 0;
669 }
670
671 if (tx_skb->skb) {
672 dev_kfree_skb_any(tx_skb->skb);
673 tx_skb->skb = NULL;
674 }
675}
676
dc97a89e 677static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
fff8019a 678{
fff8019a 679#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
dc97a89e
RO
680 struct macb_dma_desc_64 *desc_64;
681
7b429614 682 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
dc97a89e
RO
683 desc_64 = macb_64b_desc(bp, desc);
684 desc_64->addrh = upper_32_bits(addr);
df6ce148
AH
685 /* The low bits of RX address contain the RX_USED bit, clearing
686 * of which allows packet RX. Make sure the high bits are also
687 * visible to HW at that point.
688 */
689 dma_wmb();
dc97a89e 690 }
fff8019a 691#endif
dc97a89e
RO
692 desc->addr = lower_32_bits(addr);
693}
694
695static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
696{
697 dma_addr_t addr = 0;
698#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
699 struct macb_dma_desc_64 *desc_64;
700
7b429614 701 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
dc97a89e
RO
702 desc_64 = macb_64b_desc(bp, desc);
703 addr = ((u64)(desc_64->addrh) << 32);
704 }
705#endif
706 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
707 return addr;
fff8019a
HK
708}
709
e86cd53a
NF
710static void macb_tx_error_task(struct work_struct *work)
711{
02c958dd
CP
712 struct macb_queue *queue = container_of(work, struct macb_queue,
713 tx_error_task);
714 struct macb *bp = queue->bp;
e86cd53a 715 struct macb_tx_skb *tx_skb;
02c958dd 716 struct macb_dma_desc *desc;
e86cd53a
NF
717 struct sk_buff *skb;
718 unsigned int tail;
02c958dd
CP
719 unsigned long flags;
720
721 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
722 (unsigned int)(queue - bp->queues),
723 queue->tx_tail, queue->tx_head);
bdcba151 724
02c958dd
CP
725 /* Prevent the queue IRQ handlers from running: each of them may call
726 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
727 * As explained below, we have to halt the transmission before updating
728 * TBQP registers so we call netif_tx_stop_all_queues() to notify the
729 * network engine about the macb/gem being halted.
730 */
731 spin_lock_irqsave(&bp->lock, flags);
bdcba151 732
e86cd53a 733 /* Make sure nobody is trying to queue up new packets */
02c958dd 734 netif_tx_stop_all_queues(bp->dev);
d3e61457 735
64ec42fe 736 /* Stop transmission now
e86cd53a 737 * (in case we have just queued new packets)
02c958dd 738 * macb/gem must be halted to write TBQP register
e86cd53a
NF
739 */
740 if (macb_halt_tx(bp))
741 /* Just complain for now, reinitializing TX path can be good */
742 netdev_err(bp->dev, "BUG: halt tx timed out\n");
bdcba151 743
64ec42fe 744 /* Treat frames in TX queue including the ones that caused the error.
e86cd53a
NF
745 * Free transmit buffers in upper layer.
746 */
02c958dd
CP
747 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
748 u32 ctrl;
55054a16 749
02c958dd 750 desc = macb_tx_desc(queue, tail);
e86cd53a 751 ctrl = desc->ctrl;
02c958dd 752 tx_skb = macb_tx_skb(queue, tail);
e86cd53a 753 skb = tx_skb->skb;
bdcba151 754
e86cd53a 755 if (ctrl & MACB_BIT(TX_USED)) {
a4c35ed3
CP
756 /* skb is set for the last buffer of the frame */
757 while (!skb) {
758 macb_tx_unmap(bp, tx_skb);
759 tail++;
02c958dd 760 tx_skb = macb_tx_skb(queue, tail);
a4c35ed3
CP
761 skb = tx_skb->skb;
762 }
763
764 /* ctrl still refers to the first buffer descriptor
765 * since it's the only one written back by the hardware
766 */
767 if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
768 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
b410d13e
ZB
769 macb_tx_ring_wrap(bp, tail),
770 skb->data);
5f1d3a5c
TK
771 bp->dev->stats.tx_packets++;
772 bp->dev->stats.tx_bytes += skb->len;
a4c35ed3 773 }
e86cd53a 774 } else {
64ec42fe
MF
775 /* "Buffers exhausted mid-frame" errors may only happen
776 * if the driver is buggy, so complain loudly about
777 * those. Statistics are updated by hardware.
e86cd53a
NF
778 */
779 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
780 netdev_err(bp->dev,
781 "BUG: TX buffers exhausted mid-frame\n");
39eddb4c 782
e86cd53a
NF
783 desc->ctrl = ctrl | MACB_BIT(TX_USED);
784 }
785
a4c35ed3 786 macb_tx_unmap(bp, tx_skb);
89e5785f
HS
787 }
788
02c958dd
CP
789 /* Set end of TX queue */
790 desc = macb_tx_desc(queue, 0);
dc97a89e 791 macb_set_addr(bp, desc, 0);
02c958dd
CP
792 desc->ctrl = MACB_BIT(TX_USED);
793
e86cd53a
NF
794 /* Make descriptor updates visible to hardware */
795 wmb();
796
797 /* Reinitialize the TX desc queue */
dc97a89e 798 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
fff8019a 799#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
7b429614 800 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
dc97a89e 801 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
fff8019a 802#endif
e86cd53a 803 /* Make TX ring reflect state of hardware */
02c958dd
CP
804 queue->tx_head = 0;
805 queue->tx_tail = 0;
e86cd53a
NF
806
807 /* Housework before enabling TX IRQ */
808 macb_writel(bp, TSR, macb_readl(bp, TSR));
02c958dd
CP
809 queue_writel(queue, IER, MACB_TX_INT_FLAGS);
810
811 /* Now we are ready to start transmission again */
812 netif_tx_start_all_queues(bp->dev);
813 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
814
815 spin_unlock_irqrestore(&bp->lock, flags);
e86cd53a
NF
816}
817
02c958dd 818static void macb_tx_interrupt(struct macb_queue *queue)
e86cd53a
NF
819{
820 unsigned int tail;
821 unsigned int head;
822 u32 status;
02c958dd
CP
823 struct macb *bp = queue->bp;
824 u16 queue_index = queue - bp->queues;
e86cd53a
NF
825
826 status = macb_readl(bp, TSR);
827 macb_writel(bp, TSR, status);
828
581df9e1 829 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
02c958dd 830 queue_writel(queue, ISR, MACB_BIT(TCOMP));
749a2b66 831
e86cd53a 832 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
aa50b552 833 (unsigned long)status);
89e5785f 834
02c958dd
CP
835 head = queue->tx_head;
836 for (tail = queue->tx_tail; tail != head; tail++) {
55054a16
HS
837 struct macb_tx_skb *tx_skb;
838 struct sk_buff *skb;
839 struct macb_dma_desc *desc;
840 u32 ctrl;
89e5785f 841
02c958dd 842 desc = macb_tx_desc(queue, tail);
89e5785f 843
03dbe05f 844 /* Make hw descriptor updates visible to CPU */
89e5785f 845 rmb();
03dbe05f 846
55054a16 847 ctrl = desc->ctrl;
89e5785f 848
a4c35ed3
CP
849 /* TX_USED bit is only set by hardware on the very first buffer
850 * descriptor of the transmitted frame.
851 */
55054a16 852 if (!(ctrl & MACB_BIT(TX_USED)))
89e5785f
HS
853 break;
854
a4c35ed3
CP
855 /* Process all buffers of the current transmitted frame */
856 for (;; tail++) {
02c958dd 857 tx_skb = macb_tx_skb(queue, tail);
a4c35ed3
CP
858 skb = tx_skb->skb;
859
860 /* First, update TX stats if needed */
861 if (skb) {
a112cefb
PT
862 if (unlikely(skb_shinfo(skb)->tx_flags &
863 SKBTX_HW_TSTAMP) &&
864 gem_ptp_do_txstamp(queue, skb, desc) == 0) {
ab91f0a9
RO
865 /* skb now belongs to timestamp buffer
866 * and will be removed later
867 */
868 tx_skb->skb = NULL;
869 }
a4c35ed3 870 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
b410d13e
ZB
871 macb_tx_ring_wrap(bp, tail),
872 skb->data);
5f1d3a5c
TK
873 bp->dev->stats.tx_packets++;
874 bp->dev->stats.tx_bytes += skb->len;
a4c35ed3 875 }
55054a16 876
a4c35ed3
CP
877 /* Now we can safely release resources */
878 macb_tx_unmap(bp, tx_skb);
879
880 /* skb is set only for the last buffer of the frame.
881 * WARNING: at this point skb has been freed by
882 * macb_tx_unmap().
883 */
884 if (skb)
885 break;
886 }
89e5785f
HS
887 }
888
02c958dd
CP
889 queue->tx_tail = tail;
890 if (__netif_subqueue_stopped(bp->dev, queue_index) &&
891 CIRC_CNT(queue->tx_head, queue->tx_tail,
b410d13e 892 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
02c958dd 893 netif_wake_subqueue(bp->dev, queue_index);
89e5785f
HS
894}
895
4df95131
NF
896static void gem_rx_refill(struct macb *bp)
897{
898 unsigned int entry;
899 struct sk_buff *skb;
4df95131 900 dma_addr_t paddr;
dc97a89e 901 struct macb_dma_desc *desc;
4df95131 902
64ec42fe 903 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
b410d13e
ZB
904 bp->rx_ring_size) > 0) {
905 entry = macb_rx_ring_wrap(bp, bp->rx_prepared_head);
4df95131
NF
906
907 /* Make hw descriptor updates visible to CPU */
908 rmb();
909
4df95131 910 bp->rx_prepared_head++;
dc97a89e 911 desc = macb_rx_desc(bp, entry);
4df95131 912
aa50b552 913 if (!bp->rx_skbuff[entry]) {
4df95131
NF
914 /* allocate sk_buff for this free entry in ring */
915 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
aa50b552 916 if (unlikely(!skb)) {
4df95131
NF
917 netdev_err(bp->dev,
918 "Unable to allocate sk_buff\n");
919 break;
920 }
4df95131
NF
921
922 /* now fill corresponding descriptor entry */
923 paddr = dma_map_single(&bp->pdev->dev, skb->data,
64ec42fe
MF
924 bp->rx_buffer_size,
925 DMA_FROM_DEVICE);
92030908
SB
926 if (dma_mapping_error(&bp->pdev->dev, paddr)) {
927 dev_kfree_skb(skb);
928 break;
929 }
930
931 bp->rx_skbuff[entry] = skb;
4df95131 932
b410d13e 933 if (entry == bp->rx_ring_size - 1)
4df95131 934 paddr |= MACB_BIT(RX_WRAP);
dc97a89e 935 desc->ctrl = 0;
8c0f2e3c
AH
936 /* Setting addr clears RX_USED and allows reception,
937 * make sure ctrl is cleared first to avoid a race.
938 */
939 dma_wmb();
940 macb_set_addr(bp, desc, paddr);
4df95131
NF
941
942 /* properly align Ethernet header */
943 skb_reserve(skb, NET_IP_ALIGN);
d4c216c5 944 } else {
dc97a89e 945 desc->ctrl = 0;
8c0f2e3c
AH
946 dma_wmb();
947 desc->addr &= ~MACB_BIT(RX_USED);
4df95131
NF
948 }
949 }
950
951 /* Make descriptor updates visible to hardware */
952 wmb();
953
954 netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
aa50b552 955 bp->rx_prepared_head, bp->rx_tail);
4df95131
NF
956}
957
958/* Mark DMA descriptors from begin up to and not including end as unused */
959static void discard_partial_frame(struct macb *bp, unsigned int begin,
960 unsigned int end)
961{
962 unsigned int frag;
963
964 for (frag = begin; frag != end; frag++) {
965 struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
64ec42fe 966
4df95131
NF
967 desc->addr &= ~MACB_BIT(RX_USED);
968 }
969
970 /* Make descriptor updates visible to hardware */
971 wmb();
972
64ec42fe 973 /* When this happens, the hardware stats registers for
4df95131
NF
974 * whatever caused this is updated, so we don't have to record
975 * anything.
976 */
977}
978
979static int gem_rx(struct macb *bp, int budget)
980{
981 unsigned int len;
982 unsigned int entry;
983 struct sk_buff *skb;
984 struct macb_dma_desc *desc;
985 int count = 0;
986
987 while (count < budget) {
fff8019a
HK
988 u32 ctrl;
989 dma_addr_t addr;
990 bool rxused;
4df95131 991
b410d13e 992 entry = macb_rx_ring_wrap(bp, bp->rx_tail);
dc97a89e 993 desc = macb_rx_desc(bp, entry);
4df95131
NF
994
995 /* Make hw descriptor updates visible to CPU */
996 rmb();
997
fff8019a 998 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
dc97a89e 999 addr = macb_get_addr(bp, desc);
4df95131 1000
fff8019a 1001 if (!rxused)
4df95131
NF
1002 break;
1003
b0d1a49c
AH
1004 /* Ensure ctrl is at least as up-to-date as rxused */
1005 dma_rmb();
1006
1007 ctrl = desc->ctrl;
1008
4df95131
NF
1009 bp->rx_tail++;
1010 count++;
1011
1012 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
1013 netdev_err(bp->dev,
1014 "not whole frame pointed by descriptor\n");
5f1d3a5c 1015 bp->dev->stats.rx_dropped++;
4df95131
NF
1016 break;
1017 }
1018 skb = bp->rx_skbuff[entry];
1019 if (unlikely(!skb)) {
1020 netdev_err(bp->dev,
1021 "inconsistent Rx descriptor chain\n");
5f1d3a5c 1022 bp->dev->stats.rx_dropped++;
4df95131
NF
1023 break;
1024 }
1025 /* now everything is ready for receiving packet */
1026 bp->rx_skbuff[entry] = NULL;
98b5a0f4 1027 len = ctrl & bp->rx_frm_len_mask;
4df95131
NF
1028
1029 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
1030
1031 skb_put(skb, len);
4df95131 1032 dma_unmap_single(&bp->pdev->dev, addr,
48330e08 1033 bp->rx_buffer_size, DMA_FROM_DEVICE);
4df95131
NF
1034
1035 skb->protocol = eth_type_trans(skb, bp->dev);
1036 skb_checksum_none_assert(skb);
924ec53c
CP
1037 if (bp->dev->features & NETIF_F_RXCSUM &&
1038 !(bp->dev->flags & IFF_PROMISC) &&
1039 GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
1040 skb->ip_summed = CHECKSUM_UNNECESSARY;
4df95131 1041
5f1d3a5c
TK
1042 bp->dev->stats.rx_packets++;
1043 bp->dev->stats.rx_bytes += skb->len;
4df95131 1044
ab91f0a9
RO
1045 gem_ptp_do_rxstamp(bp, skb, desc);
1046
4df95131
NF
1047#if defined(DEBUG) && defined(VERBOSE_DEBUG)
1048 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1049 skb->len, skb->csum);
1050 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
51f83014 1051 skb_mac_header(skb), 16, true);
4df95131
NF
1052 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
1053 skb->data, 32, true);
1054#endif
1055
1056 netif_receive_skb(skb);
1057 }
1058
1059 gem_rx_refill(bp);
1060
1061 return count;
1062}
1063
89e5785f
HS
1064static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
1065 unsigned int last_frag)
1066{
1067 unsigned int len;
1068 unsigned int frag;
29bc2e1e 1069 unsigned int offset;
89e5785f 1070 struct sk_buff *skb;
55054a16 1071 struct macb_dma_desc *desc;
89e5785f 1072
55054a16 1073 desc = macb_rx_desc(bp, last_frag);
98b5a0f4 1074 len = desc->ctrl & bp->rx_frm_len_mask;
89e5785f 1075
a268adb1 1076 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
b410d13e
ZB
1077 macb_rx_ring_wrap(bp, first_frag),
1078 macb_rx_ring_wrap(bp, last_frag), len);
89e5785f 1079
64ec42fe 1080 /* The ethernet header starts NET_IP_ALIGN bytes into the
29bc2e1e
HS
1081 * first buffer. Since the header is 14 bytes, this makes the
1082 * payload word-aligned.
1083 *
1084 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
1085 * the two padding bytes into the skb so that we avoid hitting
1086 * the slowpath in memcpy(), and pull them off afterwards.
1087 */
1088 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
89e5785f 1089 if (!skb) {
5f1d3a5c 1090 bp->dev->stats.rx_dropped++;
55054a16
HS
1091 for (frag = first_frag; ; frag++) {
1092 desc = macb_rx_desc(bp, frag);
1093 desc->addr &= ~MACB_BIT(RX_USED);
89e5785f
HS
1094 if (frag == last_frag)
1095 break;
1096 }
03dbe05f
HS
1097
1098 /* Make descriptor updates visible to hardware */
89e5785f 1099 wmb();
03dbe05f 1100
89e5785f
HS
1101 return 1;
1102 }
1103
29bc2e1e
HS
1104 offset = 0;
1105 len += NET_IP_ALIGN;
bc8acf2c 1106 skb_checksum_none_assert(skb);
89e5785f
HS
1107 skb_put(skb, len);
1108
55054a16 1109 for (frag = first_frag; ; frag++) {
1b44791a 1110 unsigned int frag_len = bp->rx_buffer_size;
89e5785f
HS
1111
1112 if (offset + frag_len > len) {
9ba723b0
CP
1113 if (unlikely(frag != last_frag)) {
1114 dev_kfree_skb_any(skb);
1115 return -1;
1116 }
89e5785f
HS
1117 frag_len = len - offset;
1118 }
27d7ff46 1119 skb_copy_to_linear_data_offset(skb, offset,
aa50b552
MF
1120 macb_rx_buffer(bp, frag),
1121 frag_len);
1b44791a 1122 offset += bp->rx_buffer_size;
55054a16
HS
1123 desc = macb_rx_desc(bp, frag);
1124 desc->addr &= ~MACB_BIT(RX_USED);
89e5785f
HS
1125
1126 if (frag == last_frag)
1127 break;
1128 }
1129
03dbe05f
HS
1130 /* Make descriptor updates visible to hardware */
1131 wmb();
1132
29bc2e1e 1133 __skb_pull(skb, NET_IP_ALIGN);
89e5785f
HS
1134 skb->protocol = eth_type_trans(skb, bp->dev);
1135
5f1d3a5c
TK
1136 bp->dev->stats.rx_packets++;
1137 bp->dev->stats.rx_bytes += skb->len;
a268adb1 1138 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
aa50b552 1139 skb->len, skb->csum);
89e5785f
HS
1140 netif_receive_skb(skb);
1141
1142 return 0;
1143}
1144
9ba723b0
CP
1145static inline void macb_init_rx_ring(struct macb *bp)
1146{
1147 dma_addr_t addr;
dc97a89e 1148 struct macb_dma_desc *desc = NULL;
9ba723b0
CP
1149 int i;
1150
1151 addr = bp->rx_buffers_dma;
b410d13e 1152 for (i = 0; i < bp->rx_ring_size; i++) {
dc97a89e
RO
1153 desc = macb_rx_desc(bp, i);
1154 macb_set_addr(bp, desc, addr);
1155 desc->ctrl = 0;
9ba723b0
CP
1156 addr += bp->rx_buffer_size;
1157 }
dc97a89e 1158 desc->addr |= MACB_BIT(RX_WRAP);
a0b44eea 1159 bp->rx_tail = 0;
9ba723b0
CP
1160}
1161
89e5785f
HS
1162static int macb_rx(struct macb *bp, int budget)
1163{
9ba723b0 1164 bool reset_rx_queue = false;
89e5785f 1165 int received = 0;
55054a16 1166 unsigned int tail;
89e5785f
HS
1167 int first_frag = -1;
1168
55054a16
HS
1169 for (tail = bp->rx_tail; budget > 0; tail++) {
1170 struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
dc97a89e 1171 u32 ctrl;
89e5785f 1172
03dbe05f 1173 /* Make hw descriptor updates visible to CPU */
89e5785f 1174 rmb();
03dbe05f 1175
dc97a89e 1176 if (!(desc->addr & MACB_BIT(RX_USED)))
89e5785f
HS
1177 break;
1178
b0d1a49c
AH
1179 /* Ensure ctrl is at least as up-to-date as addr */
1180 dma_rmb();
1181
1182 ctrl = desc->ctrl;
1183
89e5785f
HS
1184 if (ctrl & MACB_BIT(RX_SOF)) {
1185 if (first_frag != -1)
1186 discard_partial_frame(bp, first_frag, tail);
1187 first_frag = tail;
1188 }
1189
1190 if (ctrl & MACB_BIT(RX_EOF)) {
1191 int dropped;
9ba723b0
CP
1192
1193 if (unlikely(first_frag == -1)) {
1194 reset_rx_queue = true;
1195 continue;
1196 }
89e5785f
HS
1197
1198 dropped = macb_rx_frame(bp, first_frag, tail);
1199 first_frag = -1;
9ba723b0
CP
1200 if (unlikely(dropped < 0)) {
1201 reset_rx_queue = true;
1202 continue;
1203 }
89e5785f
HS
1204 if (!dropped) {
1205 received++;
1206 budget--;
1207 }
1208 }
1209 }
1210
9ba723b0
CP
1211 if (unlikely(reset_rx_queue)) {
1212 unsigned long flags;
1213 u32 ctrl;
1214
1215 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1216
1217 spin_lock_irqsave(&bp->lock, flags);
1218
1219 ctrl = macb_readl(bp, NCR);
1220 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1221
1222 macb_init_rx_ring(bp);
1223 macb_writel(bp, RBQP, bp->rx_ring_dma);
1224
1225 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1226
1227 spin_unlock_irqrestore(&bp->lock, flags);
1228 return received;
1229 }
1230
89e5785f
HS
1231 if (first_frag != -1)
1232 bp->rx_tail = first_frag;
1233 else
1234 bp->rx_tail = tail;
1235
1236 return received;
1237}
1238
bea3348e 1239static int macb_poll(struct napi_struct *napi, int budget)
89e5785f 1240{
bea3348e 1241 struct macb *bp = container_of(napi, struct macb, napi);
bea3348e 1242 int work_done;
89e5785f
HS
1243 u32 status;
1244
1245 status = macb_readl(bp, RSR);
1246 macb_writel(bp, RSR, status);
1247
a268adb1 1248 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
aa50b552 1249 (unsigned long)status, budget);
89e5785f 1250
4df95131 1251 work_done = bp->macbgem_ops.mog_rx(bp, budget);
b336369c 1252 if (work_done < budget) {
6ad20165 1253 napi_complete_done(napi, work_done);
89e5785f 1254
8770e91a
NF
1255 /* Packets received while interrupts were disabled */
1256 status = macb_readl(bp, RSR);
504ad98d 1257 if (status) {
02f7a34f
SB
1258 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1259 macb_writel(bp, ISR, MACB_BIT(RCOMP));
8770e91a 1260 napi_reschedule(napi);
02f7a34f
SB
1261 } else {
1262 macb_writel(bp, IER, MACB_RX_INT_FLAGS);
1263 }
b336369c 1264 }
89e5785f
HS
1265
1266 /* TODO: Handle errors */
1267
bea3348e 1268 return work_done;
89e5785f
HS
1269}
1270
e0cfe809
CB
1271static void macb_tx_restart(struct macb_queue *queue)
1272{
1273 unsigned int head = queue->tx_head;
1274 unsigned int tail = queue->tx_tail;
1275 struct macb *bp = queue->bp;
1276
1277 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1278 queue_writel(queue, ISR, MACB_BIT(TXUBR));
1279
1280 if (head == tail)
1281 return;
1282
1283 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1284}
1285
89e5785f
HS
1286static irqreturn_t macb_interrupt(int irq, void *dev_id)
1287{
02c958dd
CP
1288 struct macb_queue *queue = dev_id;
1289 struct macb *bp = queue->bp;
1290 struct net_device *dev = bp->dev;
bfbb92c4 1291 u32 status, ctrl;
89e5785f 1292
02c958dd 1293 status = queue_readl(queue, ISR);
89e5785f
HS
1294
1295 if (unlikely(!status))
1296 return IRQ_NONE;
1297
1298 spin_lock(&bp->lock);
1299
1300 while (status) {
89e5785f
HS
1301 /* close possible race with dev_close */
1302 if (unlikely(!netif_running(dev))) {
02c958dd 1303 queue_writel(queue, IDR, -1);
24468374
NS
1304 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1305 queue_writel(queue, ISR, -1);
89e5785f
HS
1306 break;
1307 }
1308
02c958dd
CP
1309 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1310 (unsigned int)(queue - bp->queues),
1311 (unsigned long)status);
a268adb1 1312
89e5785f 1313 if (status & MACB_RX_INT_FLAGS) {
64ec42fe 1314 /* There's no point taking any more interrupts
b336369c
JH
1315 * until we have processed the buffers. The
1316 * scheduling call may fail if the poll routine
1317 * is already scheduled, so disable interrupts
1318 * now.
1319 */
02c958dd 1320 queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
581df9e1 1321 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
02c958dd 1322 queue_writel(queue, ISR, MACB_BIT(RCOMP));
b336369c 1323
288379f0 1324 if (napi_schedule_prep(&bp->napi)) {
a268adb1 1325 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
288379f0 1326 __napi_schedule(&bp->napi);
89e5785f
HS
1327 }
1328 }
1329
e86cd53a 1330 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
02c958dd
CP
1331 queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1332 schedule_work(&queue->tx_error_task);
6a027b70
SB
1333
1334 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
02c958dd 1335 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
6a027b70 1336
e86cd53a
NF
1337 break;
1338 }
1339
1340 if (status & MACB_BIT(TCOMP))
02c958dd 1341 macb_tx_interrupt(queue);
89e5785f 1342
e0cfe809
CB
1343 if (status & MACB_BIT(TXUBR))
1344 macb_tx_restart(queue);
1345
64ec42fe 1346 /* Link change detection isn't possible with RMII, so we'll
89e5785f
HS
1347 * add that if/when we get our hands on a full-blown MII PHY.
1348 */
1349
86b5e7de
NS
1350 /* There is a hardware issue under heavy load where DMA can
1351 * stop, this causes endless "used buffer descriptor read"
1352 * interrupts but it can be cleared by re-enabling RX. See
1353 * the at91 manual, section 41.3.1 or the Zynq manual
1354 * section 16.7.4 for details.
1355 */
bfbb92c4
NS
1356 if (status & MACB_BIT(RXUBR)) {
1357 ctrl = macb_readl(bp, NCR);
1358 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
ffac0e96 1359 wmb();
bfbb92c4
NS
1360 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1361
1362 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
ba504994 1363 queue_writel(queue, ISR, MACB_BIT(RXUBR));
bfbb92c4
NS
1364 }
1365
b19f7f71
AS
1366 if (status & MACB_BIT(ISR_ROVR)) {
1367 /* We missed at least one packet */
f75ba50b
JI
1368 if (macb_is_gem(bp))
1369 bp->hw_stats.gem.rx_overruns++;
1370 else
1371 bp->hw_stats.macb.rx_overruns++;
6a027b70
SB
1372
1373 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
02c958dd 1374 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
b19f7f71
AS
1375 }
1376
89e5785f 1377 if (status & MACB_BIT(HRESP)) {
64ec42fe 1378 /* TODO: Reset the hardware, and maybe move the
c220f8cd
JI
1379 * netdev_err to a lower-priority context as well
1380 * (work queue?)
89e5785f 1381 */
c220f8cd 1382 netdev_err(dev, "DMA bus error: HRESP not OK\n");
6a027b70
SB
1383
1384 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
02c958dd 1385 queue_writel(queue, ISR, MACB_BIT(HRESP));
89e5785f 1386 }
02c958dd 1387 status = queue_readl(queue, ISR);
89e5785f
HS
1388 }
1389
1390 spin_unlock(&bp->lock);
1391
1392 return IRQ_HANDLED;
1393}
1394
6e8cf5c0 1395#ifdef CONFIG_NET_POLL_CONTROLLER
64ec42fe 1396/* Polling receive - used by netconsole and other diagnostic tools
6e8cf5c0
TP
1397 * to allow network i/o with interrupts disabled.
1398 */
1399static void macb_poll_controller(struct net_device *dev)
1400{
02c958dd
CP
1401 struct macb *bp = netdev_priv(dev);
1402 struct macb_queue *queue;
6e8cf5c0 1403 unsigned long flags;
02c958dd 1404 unsigned int q;
6e8cf5c0
TP
1405
1406 local_irq_save(flags);
02c958dd
CP
1407 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1408 macb_interrupt(dev->irq, queue);
6e8cf5c0
TP
1409 local_irq_restore(flags);
1410}
1411#endif
1412
a4c35ed3 1413static unsigned int macb_tx_map(struct macb *bp,
02c958dd 1414 struct macb_queue *queue,
1629dd4f
RO
1415 struct sk_buff *skb,
1416 unsigned int hdrlen)
89e5785f 1417{
89e5785f 1418 dma_addr_t mapping;
02c958dd 1419 unsigned int len, entry, i, tx_head = queue->tx_head;
a4c35ed3 1420 struct macb_tx_skb *tx_skb = NULL;
55054a16 1421 struct macb_dma_desc *desc;
a4c35ed3
CP
1422 unsigned int offset, size, count = 0;
1423 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
1629dd4f
RO
1424 unsigned int eof = 1, mss_mfs = 0;
1425 u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
1426
1427 /* LSO */
1428 if (skb_shinfo(skb)->gso_size != 0) {
1429 if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1430 /* UDP - UFO */
1431 lso_ctrl = MACB_LSO_UFO_ENABLE;
1432 else
1433 /* TCP - TSO */
1434 lso_ctrl = MACB_LSO_TSO_ENABLE;
1435 }
a4c35ed3
CP
1436
1437 /* First, map non-paged data */
1438 len = skb_headlen(skb);
1629dd4f
RO
1439
1440 /* first buffer length */
1441 size = hdrlen;
1442
a4c35ed3
CP
1443 offset = 0;
1444 while (len) {
b410d13e 1445 entry = macb_tx_ring_wrap(bp, tx_head);
02c958dd 1446 tx_skb = &queue->tx_skb[entry];
a4c35ed3
CP
1447
1448 mapping = dma_map_single(&bp->pdev->dev,
1449 skb->data + offset,
1450 size, DMA_TO_DEVICE);
1451 if (dma_mapping_error(&bp->pdev->dev, mapping))
1452 goto dma_error;
1453
1454 /* Save info to properly release resources */
1455 tx_skb->skb = NULL;
1456 tx_skb->mapping = mapping;
1457 tx_skb->size = size;
1458 tx_skb->mapped_as_page = false;
1459
1460 len -= size;
1461 offset += size;
1462 count++;
1463 tx_head++;
1629dd4f
RO
1464
1465 size = min(len, bp->max_tx_length);
a4c35ed3
CP
1466 }
1467
1468 /* Then, map paged data from fragments */
1469 for (f = 0; f < nr_frags; f++) {
1470 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1471
1472 len = skb_frag_size(frag);
1473 offset = 0;
1474 while (len) {
1475 size = min(len, bp->max_tx_length);
b410d13e 1476 entry = macb_tx_ring_wrap(bp, tx_head);
02c958dd 1477 tx_skb = &queue->tx_skb[entry];
a4c35ed3
CP
1478
1479 mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
1480 offset, size, DMA_TO_DEVICE);
1481 if (dma_mapping_error(&bp->pdev->dev, mapping))
1482 goto dma_error;
1483
1484 /* Save info to properly release resources */
1485 tx_skb->skb = NULL;
1486 tx_skb->mapping = mapping;
1487 tx_skb->size = size;
1488 tx_skb->mapped_as_page = true;
1489
1490 len -= size;
1491 offset += size;
1492 count++;
1493 tx_head++;
1494 }
1495 }
1496
1497 /* Should never happen */
aa50b552 1498 if (unlikely(!tx_skb)) {
a4c35ed3
CP
1499 netdev_err(bp->dev, "BUG! empty skb!\n");
1500 return 0;
1501 }
1502
1503 /* This is the last buffer of the frame: save socket buffer */
1504 tx_skb->skb = skb;
1505
1506 /* Update TX ring: update buffer descriptors in reverse order
1507 * to avoid race condition
1508 */
1509
1510 /* Set 'TX_USED' bit in buffer descriptor at tx_head position
1511 * to set the end of TX queue
1512 */
1513 i = tx_head;
b410d13e 1514 entry = macb_tx_ring_wrap(bp, i);
a4c35ed3 1515 ctrl = MACB_BIT(TX_USED);
dc97a89e 1516 desc = macb_tx_desc(queue, entry);
a4c35ed3
CP
1517 desc->ctrl = ctrl;
1518
1629dd4f
RO
1519 if (lso_ctrl) {
1520 if (lso_ctrl == MACB_LSO_UFO_ENABLE)
1521 /* include header and FCS in value given to h/w */
1522 mss_mfs = skb_shinfo(skb)->gso_size +
1523 skb_transport_offset(skb) +
1524 ETH_FCS_LEN;
1525 else /* TSO */ {
1526 mss_mfs = skb_shinfo(skb)->gso_size;
1527 /* TCP Sequence Number Source Select
1528 * can be set only for TSO
1529 */
1530 seq_ctrl = 0;
1531 }
1532 }
1533
a4c35ed3
CP
1534 do {
1535 i--;
b410d13e 1536 entry = macb_tx_ring_wrap(bp, i);
02c958dd 1537 tx_skb = &queue->tx_skb[entry];
dc97a89e 1538 desc = macb_tx_desc(queue, entry);
a4c35ed3
CP
1539
1540 ctrl = (u32)tx_skb->size;
1541 if (eof) {
1542 ctrl |= MACB_BIT(TX_LAST);
1543 eof = 0;
1544 }
b410d13e 1545 if (unlikely(entry == (bp->tx_ring_size - 1)))
a4c35ed3
CP
1546 ctrl |= MACB_BIT(TX_WRAP);
1547
1629dd4f
RO
1548 /* First descriptor is header descriptor */
1549 if (i == queue->tx_head) {
1550 ctrl |= MACB_BF(TX_LSO, lso_ctrl);
1551 ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
1552 } else
1553 /* Only set MSS/MFS on payload descriptors
1554 * (second or later descriptor)
1555 */
1556 ctrl |= MACB_BF(MSS_MFS, mss_mfs);
1557
a4c35ed3 1558 /* Set TX buffer descriptor */
dc97a89e 1559 macb_set_addr(bp, desc, tx_skb->mapping);
a4c35ed3
CP
1560 /* desc->addr must be visible to hardware before clearing
1561 * 'TX_USED' bit in desc->ctrl.
1562 */
1563 wmb();
1564 desc->ctrl = ctrl;
02c958dd 1565 } while (i != queue->tx_head);
a4c35ed3 1566
02c958dd 1567 queue->tx_head = tx_head;
a4c35ed3
CP
1568
1569 return count;
1570
1571dma_error:
1572 netdev_err(bp->dev, "TX DMA map failed\n");
1573
02c958dd
CP
1574 for (i = queue->tx_head; i != tx_head; i++) {
1575 tx_skb = macb_tx_skb(queue, i);
a4c35ed3
CP
1576
1577 macb_tx_unmap(bp, tx_skb);
1578 }
1579
1580 return 0;
1581}
1582
1629dd4f
RO
1583static netdev_features_t macb_features_check(struct sk_buff *skb,
1584 struct net_device *dev,
1585 netdev_features_t features)
1586{
1587 unsigned int nr_frags, f;
1588 unsigned int hdrlen;
1589
1590 /* Validate LSO compatibility */
1591
0cfbaa60
HK
1592 /* there is only one buffer or protocol is not UDP */
1593 if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP))
1629dd4f
RO
1594 return features;
1595
1596 /* length of header */
1597 hdrlen = skb_transport_offset(skb);
1629dd4f 1598
0cfbaa60 1599 /* For UFO only:
1629dd4f
RO
1600 * When software supplies two or more payload buffers all payload buffers
1601 * apart from the last must be a multiple of 8 bytes in size.
1602 */
1603 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
1604 return features & ~MACB_NETIF_LSO;
1605
1606 nr_frags = skb_shinfo(skb)->nr_frags;
1607 /* No need to check last fragment */
1608 nr_frags--;
1609 for (f = 0; f < nr_frags; f++) {
1610 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1611
1612 if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN))
1613 return features & ~MACB_NETIF_LSO;
1614 }
1615 return features;
1616}
1617
007e4ba3
HB
1618static inline int macb_clear_csum(struct sk_buff *skb)
1619{
1620 /* no change for packets without checksum offloading */
1621 if (skb->ip_summed != CHECKSUM_PARTIAL)
1622 return 0;
1623
1624 /* make sure we can modify the header */
1625 if (unlikely(skb_cow_head(skb, 0)))
1626 return -1;
1627
1628 /* initialize checksum field
1629 * This is required - at least for Zynq, which otherwise calculates
1630 * wrong UDP header checksums for UDP packets with UDP data len <=2
1631 */
1632 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
1633 return 0;
1634}
1635
a4c35ed3
CP
1636static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1637{
02c958dd 1638 u16 queue_index = skb_get_queue_mapping(skb);
a4c35ed3 1639 struct macb *bp = netdev_priv(dev);
02c958dd 1640 struct macb_queue *queue = &bp->queues[queue_index];
4871953c 1641 unsigned long flags;
1629dd4f
RO
1642 unsigned int desc_cnt, nr_frags, frag_size, f;
1643 unsigned int hdrlen;
1644 bool is_lso, is_udp = 0;
1645
1646 is_lso = (skb_shinfo(skb)->gso_size != 0);
1647
1648 if (is_lso) {
1649 is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP);
1650
1651 /* length of headers */
1652 if (is_udp)
1653 /* only queue eth + ip headers separately for UDP */
1654 hdrlen = skb_transport_offset(skb);
1655 else
1656 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
1657 if (skb_headlen(skb) < hdrlen) {
1658 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
1659 /* if this is required, would need to copy to single buffer */
1660 return NETDEV_TX_BUSY;
1661 }
1662 } else
1663 hdrlen = min(skb_headlen(skb), bp->max_tx_length);
89e5785f 1664
a268adb1
HS
1665#if defined(DEBUG) && defined(VERBOSE_DEBUG)
1666 netdev_vdbg(bp->dev,
aa50b552
MF
1667 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1668 queue_index, skb->len, skb->head, skb->data,
1669 skb_tail_pointer(skb), skb_end_pointer(skb));
c220f8cd
JI
1670 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
1671 skb->data, 16, true);
89e5785f
HS
1672#endif
1673
a4c35ed3
CP
1674 /* Count how many TX buffer descriptors are needed to send this
1675 * socket buffer: skb fragments of jumbo frames may need to be
aa50b552 1676 * split into many buffer descriptors.
a4c35ed3 1677 */
1629dd4f
RO
1678 if (is_lso && (skb_headlen(skb) > hdrlen))
1679 /* extra header descriptor if also payload in first buffer */
1680 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
1681 else
1682 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
a4c35ed3
CP
1683 nr_frags = skb_shinfo(skb)->nr_frags;
1684 for (f = 0; f < nr_frags; f++) {
1685 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
1629dd4f 1686 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
a4c35ed3
CP
1687 }
1688
4871953c 1689 spin_lock_irqsave(&bp->lock, flags);
89e5785f
HS
1690
1691 /* This is a hard error, log it. */
b410d13e 1692 if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
1629dd4f 1693 bp->tx_ring_size) < desc_cnt) {
02c958dd 1694 netif_stop_subqueue(dev, queue_index);
4871953c 1695 spin_unlock_irqrestore(&bp->lock, flags);
c220f8cd 1696 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
02c958dd 1697 queue->tx_head, queue->tx_tail);
5b548140 1698 return NETDEV_TX_BUSY;
89e5785f
HS
1699 }
1700
007e4ba3
HB
1701 if (macb_clear_csum(skb)) {
1702 dev_kfree_skb_any(skb);
a7c22bda 1703 goto unlock;
007e4ba3
HB
1704 }
1705
a4c35ed3 1706 /* Map socket buffer for DMA transfer */
1629dd4f 1707 if (!macb_tx_map(bp, queue, skb, hdrlen)) {
c88b5b6a 1708 dev_kfree_skb_any(skb);
92030908
SB
1709 goto unlock;
1710 }
55054a16 1711
03dbe05f 1712 /* Make newly initialized descriptor visible to hardware */
89e5785f 1713 wmb();
e072092f
RC
1714 skb_tx_timestamp(skb);
1715
89e5785f
HS
1716 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1717
b410d13e 1718 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
02c958dd 1719 netif_stop_subqueue(dev, queue_index);
89e5785f 1720
92030908 1721unlock:
4871953c 1722 spin_unlock_irqrestore(&bp->lock, flags);
89e5785f 1723
6ed10654 1724 return NETDEV_TX_OK;
89e5785f
HS
1725}
1726
4df95131 1727static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
1b44791a
NF
1728{
1729 if (!macb_is_gem(bp)) {
1730 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
1731 } else {
4df95131 1732 bp->rx_buffer_size = size;
1b44791a 1733
1b44791a 1734 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
4df95131 1735 netdev_dbg(bp->dev,
aa50b552
MF
1736 "RX buffer must be multiple of %d bytes, expanding\n",
1737 RX_BUFFER_MULTIPLE);
1b44791a 1738 bp->rx_buffer_size =
4df95131 1739 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
1b44791a 1740 }
1b44791a 1741 }
4df95131 1742
5b5e0928 1743 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n",
4df95131 1744 bp->dev->mtu, bp->rx_buffer_size);
1b44791a
NF
1745}
1746
4df95131
NF
1747static void gem_free_rx_buffers(struct macb *bp)
1748{
1749 struct sk_buff *skb;
1750 struct macb_dma_desc *desc;
1751 dma_addr_t addr;
1752 int i;
1753
1754 if (!bp->rx_skbuff)
1755 return;
1756
b410d13e 1757 for (i = 0; i < bp->rx_ring_size; i++) {
4df95131
NF
1758 skb = bp->rx_skbuff[i];
1759
aa50b552 1760 if (!skb)
4df95131
NF
1761 continue;
1762
dc97a89e
RO
1763 desc = macb_rx_desc(bp, i);
1764 addr = macb_get_addr(bp, desc);
1765
ccd6d0a9 1766 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
4df95131
NF
1767 DMA_FROM_DEVICE);
1768 dev_kfree_skb_any(skb);
1769 skb = NULL;
1770 }
1771
1772 kfree(bp->rx_skbuff);
1773 bp->rx_skbuff = NULL;
1774}
1775
1776static void macb_free_rx_buffers(struct macb *bp)
1777{
1778 if (bp->rx_buffers) {
1779 dma_free_coherent(&bp->pdev->dev,
b410d13e 1780 bp->rx_ring_size * bp->rx_buffer_size,
4df95131
NF
1781 bp->rx_buffers, bp->rx_buffers_dma);
1782 bp->rx_buffers = NULL;
1783 }
1784}
1b44791a 1785
89e5785f
HS
1786static void macb_free_consistent(struct macb *bp)
1787{
02c958dd
CP
1788 struct macb_queue *queue;
1789 unsigned int q;
1790
4df95131 1791 bp->macbgem_ops.mog_free_rx_buffers(bp);
89e5785f 1792 if (bp->rx_ring) {
b410d13e 1793 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
89e5785f
HS
1794 bp->rx_ring, bp->rx_ring_dma);
1795 bp->rx_ring = NULL;
1796 }
02c958dd
CP
1797
1798 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1799 kfree(queue->tx_skb);
1800 queue->tx_skb = NULL;
1801 if (queue->tx_ring) {
b410d13e 1802 dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp),
02c958dd
CP
1803 queue->tx_ring, queue->tx_ring_dma);
1804 queue->tx_ring = NULL;
1805 }
89e5785f 1806 }
4df95131
NF
1807}
1808
1809static int gem_alloc_rx_buffers(struct macb *bp)
1810{
1811 int size;
1812
b410d13e 1813 size = bp->rx_ring_size * sizeof(struct sk_buff *);
4df95131
NF
1814 bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
1815 if (!bp->rx_skbuff)
1816 return -ENOMEM;
b410d13e
ZB
1817 else
1818 netdev_dbg(bp->dev,
1819 "Allocated %d RX struct sk_buff entries at %p\n",
1820 bp->rx_ring_size, bp->rx_skbuff);
4df95131
NF
1821 return 0;
1822}
1823
1824static int macb_alloc_rx_buffers(struct macb *bp)
1825{
1826 int size;
1827
b410d13e 1828 size = bp->rx_ring_size * bp->rx_buffer_size;
4df95131
NF
1829 bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
1830 &bp->rx_buffers_dma, GFP_KERNEL);
1831 if (!bp->rx_buffers)
1832 return -ENOMEM;
64ec42fe
MF
1833
1834 netdev_dbg(bp->dev,
1835 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
1836 size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
4df95131 1837 return 0;
89e5785f
HS
1838}
1839
1840static int macb_alloc_consistent(struct macb *bp)
1841{
02c958dd
CP
1842 struct macb_queue *queue;
1843 unsigned int q;
89e5785f
HS
1844 int size;
1845
02c958dd 1846 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
b410d13e 1847 size = TX_RING_BYTES(bp);
02c958dd
CP
1848 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1849 &queue->tx_ring_dma,
1850 GFP_KERNEL);
1851 if (!queue->tx_ring)
1852 goto out_err;
1853 netdev_dbg(bp->dev,
1854 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
1855 q, size, (unsigned long)queue->tx_ring_dma,
1856 queue->tx_ring);
1857
b410d13e 1858 size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
02c958dd
CP
1859 queue->tx_skb = kmalloc(size, GFP_KERNEL);
1860 if (!queue->tx_skb)
1861 goto out_err;
1862 }
89e5785f 1863
b410d13e 1864 size = RX_RING_BYTES(bp);
89e5785f
HS
1865 bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1866 &bp->rx_ring_dma, GFP_KERNEL);
1867 if (!bp->rx_ring)
1868 goto out_err;
c220f8cd
JI
1869 netdev_dbg(bp->dev,
1870 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
1871 size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
89e5785f 1872
4df95131 1873 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
89e5785f 1874 goto out_err;
89e5785f
HS
1875
1876 return 0;
1877
1878out_err:
1879 macb_free_consistent(bp);
1880 return -ENOMEM;
1881}
1882
4df95131
NF
1883static void gem_init_rings(struct macb *bp)
1884{
02c958dd 1885 struct macb_queue *queue;
dc97a89e 1886 struct macb_dma_desc *desc = NULL;
02c958dd 1887 unsigned int q;
4df95131
NF
1888 int i;
1889
02c958dd 1890 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
b410d13e 1891 for (i = 0; i < bp->tx_ring_size; i++) {
dc97a89e
RO
1892 desc = macb_tx_desc(queue, i);
1893 macb_set_addr(bp, desc, 0);
1894 desc->ctrl = MACB_BIT(TX_USED);
02c958dd 1895 }
dc97a89e 1896 desc->ctrl |= MACB_BIT(TX_WRAP);
02c958dd
CP
1897 queue->tx_head = 0;
1898 queue->tx_tail = 0;
4df95131 1899 }
4df95131 1900
02c958dd
CP
1901 bp->rx_tail = 0;
1902 bp->rx_prepared_head = 0;
4df95131
NF
1903
1904 gem_rx_refill(bp);
1905}
1906
89e5785f
HS
1907static void macb_init_rings(struct macb *bp)
1908{
1909 int i;
dc97a89e 1910 struct macb_dma_desc *desc = NULL;
89e5785f 1911
9ba723b0 1912 macb_init_rx_ring(bp);
89e5785f 1913
b410d13e 1914 for (i = 0; i < bp->tx_ring_size; i++) {
dc97a89e
RO
1915 desc = macb_tx_desc(&bp->queues[0], i);
1916 macb_set_addr(bp, desc, 0);
1917 desc->ctrl = MACB_BIT(TX_USED);
89e5785f 1918 }
21d3515c
BS
1919 bp->queues[0].tx_head = 0;
1920 bp->queues[0].tx_tail = 0;
dc97a89e 1921 desc->ctrl |= MACB_BIT(TX_WRAP);
89e5785f
HS
1922}
1923
1924static void macb_reset_hw(struct macb *bp)
1925{
02c958dd
CP
1926 struct macb_queue *queue;
1927 unsigned int q;
66354941 1928 u32 ctrl = macb_readl(bp, NCR);
02c958dd 1929
64ec42fe 1930 /* Disable RX and TX (XXX: Should we halt the transmission
89e5785f
HS
1931 * more gracefully?)
1932 */
66354941 1933 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
89e5785f
HS
1934
1935 /* Clear the stats registers (XXX: Update stats first?) */
66354941
AH
1936 ctrl |= MACB_BIT(CLRSTAT);
1937
1938 macb_writel(bp, NCR, ctrl);
89e5785f
HS
1939
1940 /* Clear all status flags */
95ebcea6
JE
1941 macb_writel(bp, TSR, -1);
1942 macb_writel(bp, RSR, -1);
89e5785f
HS
1943
1944 /* Disable all interrupts */
02c958dd
CP
1945 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1946 queue_writel(queue, IDR, -1);
1947 queue_readl(queue, ISR);
24468374
NS
1948 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1949 queue_writel(queue, ISR, -1);
02c958dd 1950 }
89e5785f
HS
1951}
1952
70c9f3d4
JI
1953static u32 gem_mdc_clk_div(struct macb *bp)
1954{
1955 u32 config;
1956 unsigned long pclk_hz = clk_get_rate(bp->pclk);
1957
1958 if (pclk_hz <= 20000000)
1959 config = GEM_BF(CLK, GEM_CLK_DIV8);
1960 else if (pclk_hz <= 40000000)
1961 config = GEM_BF(CLK, GEM_CLK_DIV16);
1962 else if (pclk_hz <= 80000000)
1963 config = GEM_BF(CLK, GEM_CLK_DIV32);
1964 else if (pclk_hz <= 120000000)
1965 config = GEM_BF(CLK, GEM_CLK_DIV48);
1966 else if (pclk_hz <= 160000000)
1967 config = GEM_BF(CLK, GEM_CLK_DIV64);
1968 else
1969 config = GEM_BF(CLK, GEM_CLK_DIV96);
1970
1971 return config;
1972}
1973
1974static u32 macb_mdc_clk_div(struct macb *bp)
1975{
1976 u32 config;
1977 unsigned long pclk_hz;
1978
1979 if (macb_is_gem(bp))
1980 return gem_mdc_clk_div(bp);
1981
1982 pclk_hz = clk_get_rate(bp->pclk);
1983 if (pclk_hz <= 20000000)
1984 config = MACB_BF(CLK, MACB_CLK_DIV8);
1985 else if (pclk_hz <= 40000000)
1986 config = MACB_BF(CLK, MACB_CLK_DIV16);
1987 else if (pclk_hz <= 80000000)
1988 config = MACB_BF(CLK, MACB_CLK_DIV32);
1989 else
1990 config = MACB_BF(CLK, MACB_CLK_DIV64);
1991
1992 return config;
1993}
1994
64ec42fe 1995/* Get the DMA bus width field of the network configuration register that we
757a03c6
JI
1996 * should program. We find the width from decoding the design configuration
1997 * register to find the maximum supported data bus width.
1998 */
1999static u32 macb_dbw(struct macb *bp)
2000{
2001 if (!macb_is_gem(bp))
2002 return 0;
2003
2004 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
2005 case 4:
2006 return GEM_BF(DBW, GEM_DBW128);
2007 case 2:
2008 return GEM_BF(DBW, GEM_DBW64);
2009 case 1:
2010 default:
2011 return GEM_BF(DBW, GEM_DBW32);
2012 }
2013}
2014
64ec42fe 2015/* Configure the receive DMA engine
b3e3bd71 2016 * - use the correct receive buffer size
e175587f 2017 * - set best burst length for DMA operations
b3e3bd71
NF
2018 * (if not supported by FIFO, it will fallback to default)
2019 * - set both rx/tx packet buffers to full memory size
2020 * These are configurable parameters for GEM.
0116da4f
JI
2021 */
2022static void macb_configure_dma(struct macb *bp)
2023{
2024 u32 dmacfg;
2025
2026 if (macb_is_gem(bp)) {
2027 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
1b44791a 2028 dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
e175587f
NF
2029 if (bp->dma_burst_length)
2030 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
b3e3bd71 2031 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
a50dad35 2032 dmacfg &= ~GEM_BIT(ENDIA_PKT);
62f6924c 2033
f2ce8a9e 2034 if (bp->native_io)
62f6924c
AC
2035 dmacfg &= ~GEM_BIT(ENDIA_DESC);
2036 else
2037 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
2038
85ff3d87
CP
2039 if (bp->dev->features & NETIF_F_HW_CSUM)
2040 dmacfg |= GEM_BIT(TXCOEN);
2041 else
2042 dmacfg &= ~GEM_BIT(TXCOEN);
fff8019a 2043
1fe4e79c 2044 dmacfg &= ~GEM_BIT(ADDR64);
fff8019a 2045#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
7b429614 2046 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
dc97a89e 2047 dmacfg |= GEM_BIT(ADDR64);
7b429614
RO
2048#endif
2049#ifdef CONFIG_MACB_USE_HWSTAMP
2050 if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
2051 dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
fff8019a 2052#endif
e175587f
NF
2053 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
2054 dmacfg);
0116da4f
JI
2055 gem_writel(bp, DMACFG, dmacfg);
2056 }
2057}
2058
89e5785f
HS
2059static void macb_init_hw(struct macb *bp)
2060{
02c958dd
CP
2061 struct macb_queue *queue;
2062 unsigned int q;
2063
89e5785f
HS
2064 u32 config;
2065
2066 macb_reset_hw(bp);
314bccc4 2067 macb_set_hwaddr(bp);
89e5785f 2068
70c9f3d4 2069 config = macb_mdc_clk_div(bp);
022be25c
PCK
2070 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
2071 config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
29bc2e1e 2072 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
89e5785f
HS
2073 config |= MACB_BIT(PAE); /* PAuse Enable */
2074 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
a104a6b3 2075 if (bp->caps & MACB_CAPS_JUMBO)
98b5a0f4
HK
2076 config |= MACB_BIT(JFRAME); /* Enable jumbo frames */
2077 else
2078 config |= MACB_BIT(BIG); /* Receive oversized frames */
89e5785f
HS
2079 if (bp->dev->flags & IFF_PROMISC)
2080 config |= MACB_BIT(CAF); /* Copy All Frames */
924ec53c
CP
2081 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
2082 config |= GEM_BIT(RXCOEN);
89e5785f
HS
2083 if (!(bp->dev->flags & IFF_BROADCAST))
2084 config |= MACB_BIT(NBC); /* No BroadCast */
757a03c6 2085 config |= macb_dbw(bp);
89e5785f 2086 macb_writel(bp, NCFGR, config);
a104a6b3 2087 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
98b5a0f4 2088 gem_writel(bp, JML, bp->jumbo_max_len);
26cdfb49
VD
2089 bp->speed = SPEED_10;
2090 bp->duplex = DUPLEX_HALF;
98b5a0f4 2091 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
a104a6b3 2092 if (bp->caps & MACB_CAPS_JUMBO)
98b5a0f4 2093 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
89e5785f 2094
0116da4f
JI
2095 macb_configure_dma(bp);
2096
89e5785f 2097 /* Initialize TX and RX buffers */
dc97a89e 2098 macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
fff8019a 2099#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
7b429614 2100 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
dc97a89e 2101 macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
fff8019a 2102#endif
02c958dd 2103 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
dc97a89e 2104 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
fff8019a 2105#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
7b429614 2106 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
dc97a89e 2107 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
fff8019a 2108#endif
02c958dd
CP
2109
2110 /* Enable interrupts */
2111 queue_writel(queue, IER,
2112 MACB_RX_INT_FLAGS |
2113 MACB_TX_INT_FLAGS |
2114 MACB_BIT(HRESP));
2115 }
89e5785f
HS
2116
2117 /* Enable TX and RX */
66354941 2118 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
89e5785f
HS
2119}
2120
64ec42fe 2121/* The hash address register is 64 bits long and takes up two
446ebd01
PV
2122 * locations in the memory map. The least significant bits are stored
2123 * in EMAC_HSL and the most significant bits in EMAC_HSH.
2124 *
2125 * The unicast hash enable and the multicast hash enable bits in the
2126 * network configuration register enable the reception of hash matched
2127 * frames. The destination address is reduced to a 6 bit index into
2128 * the 64 bit hash register using the following hash function. The
2129 * hash function is an exclusive or of every sixth bit of the
2130 * destination address.
2131 *
2132 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
2133 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
2134 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
2135 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
2136 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
2137 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
2138 *
2139 * da[0] represents the least significant bit of the first byte
2140 * received, that is, the multicast/unicast indicator, and da[47]
2141 * represents the most significant bit of the last byte received. If
2142 * the hash index, hi[n], points to a bit that is set in the hash
2143 * register then the frame will be matched according to whether the
2144 * frame is multicast or unicast. A multicast match will be signalled
2145 * if the multicast hash enable bit is set, da[0] is 1 and the hash
2146 * index points to a bit set in the hash register. A unicast match
2147 * will be signalled if the unicast hash enable bit is set, da[0] is 0
2148 * and the hash index points to a bit set in the hash register. To
2149 * receive all multicast frames, the hash register should be set with
2150 * all ones and the multicast hash enable bit should be set in the
2151 * network configuration register.
2152 */
2153
2154static inline int hash_bit_value(int bitnr, __u8 *addr)
2155{
2156 if (addr[bitnr / 8] & (1 << (bitnr % 8)))
2157 return 1;
2158 return 0;
2159}
2160
64ec42fe 2161/* Return the hash index value for the specified address. */
446ebd01
PV
2162static int hash_get_index(__u8 *addr)
2163{
2164 int i, j, bitval;
2165 int hash_index = 0;
2166
2167 for (j = 0; j < 6; j++) {
2168 for (i = 0, bitval = 0; i < 8; i++)
2fa45e22 2169 bitval ^= hash_bit_value(i * 6 + j, addr);
446ebd01
PV
2170
2171 hash_index |= (bitval << j);
2172 }
2173
2174 return hash_index;
2175}
2176
64ec42fe 2177/* Add multicast addresses to the internal multicast-hash table. */
446ebd01
PV
2178static void macb_sethashtable(struct net_device *dev)
2179{
22bedad3 2180 struct netdev_hw_addr *ha;
446ebd01 2181 unsigned long mc_filter[2];
f9dcbcc9 2182 unsigned int bitnr;
446ebd01
PV
2183 struct macb *bp = netdev_priv(dev);
2184
aa50b552
MF
2185 mc_filter[0] = 0;
2186 mc_filter[1] = 0;
446ebd01 2187
22bedad3
JP
2188 netdev_for_each_mc_addr(ha, dev) {
2189 bitnr = hash_get_index(ha->addr);
446ebd01
PV
2190 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
2191 }
2192
f75ba50b
JI
2193 macb_or_gem_writel(bp, HRB, mc_filter[0]);
2194 macb_or_gem_writel(bp, HRT, mc_filter[1]);
446ebd01
PV
2195}
2196
64ec42fe 2197/* Enable/Disable promiscuous and multicast modes. */
421d9df0 2198static void macb_set_rx_mode(struct net_device *dev)
446ebd01
PV
2199{
2200 unsigned long cfg;
2201 struct macb *bp = netdev_priv(dev);
2202
2203 cfg = macb_readl(bp, NCFGR);
2204
924ec53c 2205 if (dev->flags & IFF_PROMISC) {
446ebd01
PV
2206 /* Enable promiscuous mode */
2207 cfg |= MACB_BIT(CAF);
924ec53c
CP
2208
2209 /* Disable RX checksum offload */
2210 if (macb_is_gem(bp))
2211 cfg &= ~GEM_BIT(RXCOEN);
2212 } else {
2213 /* Disable promiscuous mode */
446ebd01
PV
2214 cfg &= ~MACB_BIT(CAF);
2215
924ec53c
CP
2216 /* Enable RX checksum offload only if requested */
2217 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
2218 cfg |= GEM_BIT(RXCOEN);
2219 }
2220
446ebd01
PV
2221 if (dev->flags & IFF_ALLMULTI) {
2222 /* Enable all multicast mode */
f75ba50b
JI
2223 macb_or_gem_writel(bp, HRB, -1);
2224 macb_or_gem_writel(bp, HRT, -1);
446ebd01 2225 cfg |= MACB_BIT(NCFGR_MTI);
4cd24eaf 2226 } else if (!netdev_mc_empty(dev)) {
446ebd01
PV
2227 /* Enable specific multicasts */
2228 macb_sethashtable(dev);
2229 cfg |= MACB_BIT(NCFGR_MTI);
2230 } else if (dev->flags & (~IFF_ALLMULTI)) {
2231 /* Disable all multicast mode */
f75ba50b
JI
2232 macb_or_gem_writel(bp, HRB, 0);
2233 macb_or_gem_writel(bp, HRT, 0);
446ebd01
PV
2234 cfg &= ~MACB_BIT(NCFGR_MTI);
2235 }
2236
2237 macb_writel(bp, NCFGR, cfg);
2238}
2239
89e5785f
HS
2240static int macb_open(struct net_device *dev)
2241{
2242 struct macb *bp = netdev_priv(dev);
4df95131 2243 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
89e5785f
HS
2244 int err;
2245
c220f8cd 2246 netdev_dbg(bp->dev, "open\n");
89e5785f 2247
03fc4721
NF
2248 /* carrier starts down */
2249 netif_carrier_off(dev);
2250
6c36a707 2251 /* if the phy is not yet register, retry later*/
0a91281e 2252 if (!dev->phydev)
6c36a707 2253 return -EAGAIN;
1b44791a
NF
2254
2255 /* RX buffers initialization */
4df95131 2256 macb_init_rx_buffer_size(bp, bufsz);
6c36a707 2257
89e5785f
HS
2258 err = macb_alloc_consistent(bp);
2259 if (err) {
c220f8cd
JI
2260 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
2261 err);
89e5785f
HS
2262 return err;
2263 }
2264
bea3348e
SH
2265 napi_enable(&bp->napi);
2266
4df95131 2267 bp->macbgem_ops.mog_init_rings(bp);
89e5785f 2268 macb_init_hw(bp);
89e5785f 2269
6c36a707 2270 /* schedule a link state check */
0a91281e 2271 phy_start(dev->phydev);
89e5785f 2272
02c958dd 2273 netif_tx_start_all_queues(dev);
89e5785f 2274
c2594d80
AP
2275 if (bp->ptp_info)
2276 bp->ptp_info->ptp_init(dev);
2277
89e5785f
HS
2278 return 0;
2279}
2280
2281static int macb_close(struct net_device *dev)
2282{
2283 struct macb *bp = netdev_priv(dev);
2284 unsigned long flags;
2285
02c958dd 2286 netif_tx_stop_all_queues(dev);
bea3348e 2287 napi_disable(&bp->napi);
89e5785f 2288
0a91281e
PR
2289 if (dev->phydev)
2290 phy_stop(dev->phydev);
6c36a707 2291
89e5785f
HS
2292 spin_lock_irqsave(&bp->lock, flags);
2293 macb_reset_hw(bp);
2294 netif_carrier_off(dev);
2295 spin_unlock_irqrestore(&bp->lock, flags);
2296
2297 macb_free_consistent(bp);
2298
c2594d80
AP
2299 if (bp->ptp_info)
2300 bp->ptp_info->ptp_remove(dev);
2301
89e5785f
HS
2302 return 0;
2303}
2304
a5898ea0
HK
2305static int macb_change_mtu(struct net_device *dev, int new_mtu)
2306{
a5898ea0
HK
2307 if (netif_running(dev))
2308 return -EBUSY;
2309
a5898ea0
HK
2310 dev->mtu = new_mtu;
2311
2312 return 0;
2313}
2314
a494ed8e
JI
2315static void gem_update_stats(struct macb *bp)
2316{
8bcbf82f 2317 unsigned int i;
a494ed8e 2318 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
a494ed8e 2319
3ff13f1c
XH
2320 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
2321 u32 offset = gem_statistics[i].offset;
7a6e0706 2322 u64 val = bp->macb_reg_readl(bp, offset);
3ff13f1c
XH
2323
2324 bp->ethtool_stats[i] += val;
2325 *p += val;
2326
2327 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
2328 /* Add GEM_OCTTXH, GEM_OCTRXH */
7a6e0706 2329 val = bp->macb_reg_readl(bp, offset + 4);
2fa45e22 2330 bp->ethtool_stats[i] += ((u64)val) << 32;
3ff13f1c
XH
2331 *(++p) += val;
2332 }
2333 }
a494ed8e
JI
2334}
2335
2336static struct net_device_stats *gem_get_stats(struct macb *bp)
2337{
2338 struct gem_stats *hwstat = &bp->hw_stats.gem;
5f1d3a5c 2339 struct net_device_stats *nstat = &bp->dev->stats;
a494ed8e
JI
2340
2341 gem_update_stats(bp);
2342
2343 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
2344 hwstat->rx_alignment_errors +
2345 hwstat->rx_resource_errors +
2346 hwstat->rx_overruns +
2347 hwstat->rx_oversize_frames +
2348 hwstat->rx_jabbers +
2349 hwstat->rx_undersized_frames +
2350 hwstat->rx_length_field_frame_errors);
2351 nstat->tx_errors = (hwstat->tx_late_collisions +
2352 hwstat->tx_excessive_collisions +
2353 hwstat->tx_underrun +
2354 hwstat->tx_carrier_sense_errors);
2355 nstat->multicast = hwstat->rx_multicast_frames;
2356 nstat->collisions = (hwstat->tx_single_collision_frames +
2357 hwstat->tx_multiple_collision_frames +
2358 hwstat->tx_excessive_collisions);
2359 nstat->rx_length_errors = (hwstat->rx_oversize_frames +
2360 hwstat->rx_jabbers +
2361 hwstat->rx_undersized_frames +
2362 hwstat->rx_length_field_frame_errors);
2363 nstat->rx_over_errors = hwstat->rx_resource_errors;
2364 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
2365 nstat->rx_frame_errors = hwstat->rx_alignment_errors;
2366 nstat->rx_fifo_errors = hwstat->rx_overruns;
2367 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
2368 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
2369 nstat->tx_fifo_errors = hwstat->tx_underrun;
2370
2371 return nstat;
2372}
2373
3ff13f1c
XH
2374static void gem_get_ethtool_stats(struct net_device *dev,
2375 struct ethtool_stats *stats, u64 *data)
2376{
2377 struct macb *bp;
2378
2379 bp = netdev_priv(dev);
2380 gem_update_stats(bp);
2fa45e22 2381 memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
3ff13f1c
XH
2382}
2383
2384static int gem_get_sset_count(struct net_device *dev, int sset)
2385{
2386 switch (sset) {
2387 case ETH_SS_STATS:
2388 return GEM_STATS_LEN;
2389 default:
2390 return -EOPNOTSUPP;
2391 }
2392}
2393
2394static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
2395{
8bcbf82f 2396 unsigned int i;
3ff13f1c
XH
2397
2398 switch (sset) {
2399 case ETH_SS_STATS:
2400 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
2401 memcpy(p, gem_statistics[i].stat_string,
2402 ETH_GSTRING_LEN);
2403 break;
2404 }
2405}
2406
421d9df0 2407static struct net_device_stats *macb_get_stats(struct net_device *dev)
89e5785f
HS
2408{
2409 struct macb *bp = netdev_priv(dev);
5f1d3a5c 2410 struct net_device_stats *nstat = &bp->dev->stats;
a494ed8e
JI
2411 struct macb_stats *hwstat = &bp->hw_stats.macb;
2412
2413 if (macb_is_gem(bp))
2414 return gem_get_stats(bp);
89e5785f 2415
6c36a707
R
2416 /* read stats from hardware */
2417 macb_update_stats(bp);
2418
89e5785f
HS
2419 /* Convert HW stats into netdevice stats */
2420 nstat->rx_errors = (hwstat->rx_fcs_errors +
2421 hwstat->rx_align_errors +
2422 hwstat->rx_resource_errors +
2423 hwstat->rx_overruns +
2424 hwstat->rx_oversize_pkts +
2425 hwstat->rx_jabbers +
2426 hwstat->rx_undersize_pkts +
89e5785f
HS
2427 hwstat->rx_length_mismatch);
2428 nstat->tx_errors = (hwstat->tx_late_cols +
2429 hwstat->tx_excessive_cols +
2430 hwstat->tx_underruns +
716723c2
WS
2431 hwstat->tx_carrier_errors +
2432 hwstat->sqe_test_errors);
89e5785f
HS
2433 nstat->collisions = (hwstat->tx_single_cols +
2434 hwstat->tx_multiple_cols +
2435 hwstat->tx_excessive_cols);
2436 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
2437 hwstat->rx_jabbers +
2438 hwstat->rx_undersize_pkts +
2439 hwstat->rx_length_mismatch);
b19f7f71
AS
2440 nstat->rx_over_errors = hwstat->rx_resource_errors +
2441 hwstat->rx_overruns;
89e5785f
HS
2442 nstat->rx_crc_errors = hwstat->rx_fcs_errors;
2443 nstat->rx_frame_errors = hwstat->rx_align_errors;
2444 nstat->rx_fifo_errors = hwstat->rx_overruns;
2445 /* XXX: What does "missed" mean? */
2446 nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
2447 nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
2448 nstat->tx_fifo_errors = hwstat->tx_underruns;
2449 /* Don't know about heartbeat or window errors... */
2450
2451 return nstat;
2452}
2453
d1d1b53d
NF
2454static int macb_get_regs_len(struct net_device *netdev)
2455{
2456 return MACB_GREGS_NBR * sizeof(u32);
2457}
2458
2459static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2460 void *p)
2461{
2462 struct macb *bp = netdev_priv(dev);
2463 unsigned int tail, head;
2464 u32 *regs_buff = p;
2465
2466 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
2467 | MACB_GREGS_VERSION;
2468
b410d13e
ZB
2469 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
2470 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
d1d1b53d
NF
2471
2472 regs_buff[0] = macb_readl(bp, NCR);
2473 regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
2474 regs_buff[2] = macb_readl(bp, NSR);
2475 regs_buff[3] = macb_readl(bp, TSR);
2476 regs_buff[4] = macb_readl(bp, RBQP);
2477 regs_buff[5] = macb_readl(bp, TBQP);
2478 regs_buff[6] = macb_readl(bp, RSR);
2479 regs_buff[7] = macb_readl(bp, IMR);
2480
2481 regs_buff[8] = tail;
2482 regs_buff[9] = head;
02c958dd
CP
2483 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
2484 regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
d1d1b53d 2485
ce721a70
NA
2486 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
2487 regs_buff[12] = macb_or_gem_readl(bp, USRIO);
64ec42fe 2488 if (macb_is_gem(bp))
d1d1b53d 2489 regs_buff[13] = gem_readl(bp, DMACFG);
d1d1b53d
NF
2490}
2491
3e2a5e15
SP
2492static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2493{
2494 struct macb *bp = netdev_priv(netdev);
2495
2496 wol->supported = 0;
2497 wol->wolopts = 0;
2498
2499 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
2500 wol->supported = WAKE_MAGIC;
2501
2502 if (bp->wol & MACB_WOL_ENABLED)
2503 wol->wolopts |= WAKE_MAGIC;
2504 }
2505}
2506
2507static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2508{
2509 struct macb *bp = netdev_priv(netdev);
2510
2511 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
2512 (wol->wolopts & ~WAKE_MAGIC))
2513 return -EOPNOTSUPP;
2514
2515 if (wol->wolopts & WAKE_MAGIC)
2516 bp->wol |= MACB_WOL_ENABLED;
2517 else
2518 bp->wol &= ~MACB_WOL_ENABLED;
2519
2520 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
2521
2522 return 0;
2523}
2524
8441bb33
ZB
2525static void macb_get_ringparam(struct net_device *netdev,
2526 struct ethtool_ringparam *ring)
2527{
2528 struct macb *bp = netdev_priv(netdev);
2529
2530 ring->rx_max_pending = MAX_RX_RING_SIZE;
2531 ring->tx_max_pending = MAX_TX_RING_SIZE;
2532
2533 ring->rx_pending = bp->rx_ring_size;
2534 ring->tx_pending = bp->tx_ring_size;
2535}
2536
2537static int macb_set_ringparam(struct net_device *netdev,
2538 struct ethtool_ringparam *ring)
2539{
2540 struct macb *bp = netdev_priv(netdev);
2541 u32 new_rx_size, new_tx_size;
2542 unsigned int reset = 0;
2543
2544 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2545 return -EINVAL;
2546
2547 new_rx_size = clamp_t(u32, ring->rx_pending,
2548 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE);
2549 new_rx_size = roundup_pow_of_two(new_rx_size);
2550
2551 new_tx_size = clamp_t(u32, ring->tx_pending,
2552 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE);
2553 new_tx_size = roundup_pow_of_two(new_tx_size);
2554
2555 if ((new_tx_size == bp->tx_ring_size) &&
2556 (new_rx_size == bp->rx_ring_size)) {
2557 /* nothing to do */
2558 return 0;
2559 }
2560
2561 if (netif_running(bp->dev)) {
2562 reset = 1;
2563 macb_close(bp->dev);
2564 }
2565
2566 bp->rx_ring_size = new_rx_size;
2567 bp->tx_ring_size = new_tx_size;
2568
2569 if (reset)
2570 macb_open(bp->dev);
2571
2572 return 0;
2573}
2574
ab91f0a9
RO
2575#ifdef CONFIG_MACB_USE_HWSTAMP
2576static unsigned int gem_get_tsu_rate(struct macb *bp)
2577{
2578 struct clk *tsu_clk;
2579 unsigned int tsu_rate;
2580
2581 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk");
2582 if (!IS_ERR(tsu_clk))
2583 tsu_rate = clk_get_rate(tsu_clk);
2584 /* try pclk instead */
2585 else if (!IS_ERR(bp->pclk)) {
2586 tsu_clk = bp->pclk;
2587 tsu_rate = clk_get_rate(tsu_clk);
2588 } else
2589 return -ENOTSUPP;
2590 return tsu_rate;
2591}
2592
2593static s32 gem_get_ptp_max_adj(void)
2594{
2595 return 64000000;
2596}
2597
2598static int gem_get_ts_info(struct net_device *dev,
2599 struct ethtool_ts_info *info)
2600{
2601 struct macb *bp = netdev_priv(dev);
2602
2603 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) {
2604 ethtool_op_get_ts_info(dev, info);
2605 return 0;
2606 }
2607
2608 info->so_timestamping =
2609 SOF_TIMESTAMPING_TX_SOFTWARE |
2610 SOF_TIMESTAMPING_RX_SOFTWARE |
2611 SOF_TIMESTAMPING_SOFTWARE |
2612 SOF_TIMESTAMPING_TX_HARDWARE |
2613 SOF_TIMESTAMPING_RX_HARDWARE |
2614 SOF_TIMESTAMPING_RAW_HARDWARE;
2615 info->tx_types =
2616 (1 << HWTSTAMP_TX_ONESTEP_SYNC) |
2617 (1 << HWTSTAMP_TX_OFF) |
2618 (1 << HWTSTAMP_TX_ON);
2619 info->rx_filters =
2620 (1 << HWTSTAMP_FILTER_NONE) |
2621 (1 << HWTSTAMP_FILTER_ALL);
2622
2623 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;
2624
2625 return 0;
2626}
2627
2628static struct macb_ptp_info gem_ptp_info = {
2629 .ptp_init = gem_ptp_init,
2630 .ptp_remove = gem_ptp_remove,
2631 .get_ptp_max_adj = gem_get_ptp_max_adj,
2632 .get_tsu_rate = gem_get_tsu_rate,
2633 .get_ts_info = gem_get_ts_info,
2634 .get_hwtst = gem_get_hwtst,
2635 .set_hwtst = gem_set_hwtst,
2636};
2637#endif
2638
c2594d80
AP
2639static int macb_get_ts_info(struct net_device *netdev,
2640 struct ethtool_ts_info *info)
2641{
2642 struct macb *bp = netdev_priv(netdev);
2643
2644 if (bp->ptp_info)
2645 return bp->ptp_info->get_ts_info(netdev, info);
2646
2647 return ethtool_op_get_ts_info(netdev, info);
2648}
2649
421d9df0 2650static const struct ethtool_ops macb_ethtool_ops = {
d1d1b53d
NF
2651 .get_regs_len = macb_get_regs_len,
2652 .get_regs = macb_get_regs,
89e5785f 2653 .get_link = ethtool_op_get_link,
17f393e8 2654 .get_ts_info = ethtool_op_get_ts_info,
3e2a5e15
SP
2655 .get_wol = macb_get_wol,
2656 .set_wol = macb_set_wol,
176275a2
PR
2657 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2658 .set_link_ksettings = phy_ethtool_set_link_ksettings,
8441bb33
ZB
2659 .get_ringparam = macb_get_ringparam,
2660 .set_ringparam = macb_set_ringparam,
8cd5a56c 2661};
8cd5a56c 2662
8093b1c3 2663static const struct ethtool_ops gem_ethtool_ops = {
8cd5a56c
XH
2664 .get_regs_len = macb_get_regs_len,
2665 .get_regs = macb_get_regs,
2666 .get_link = ethtool_op_get_link,
c2594d80 2667 .get_ts_info = macb_get_ts_info,
3ff13f1c
XH
2668 .get_ethtool_stats = gem_get_ethtool_stats,
2669 .get_strings = gem_get_ethtool_strings,
2670 .get_sset_count = gem_get_sset_count,
176275a2
PR
2671 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2672 .set_link_ksettings = phy_ethtool_set_link_ksettings,
8441bb33
ZB
2673 .get_ringparam = macb_get_ringparam,
2674 .set_ringparam = macb_set_ringparam,
89e5785f
HS
2675};
2676
421d9df0 2677static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
89e5785f 2678{
0a91281e 2679 struct phy_device *phydev = dev->phydev;
c2594d80 2680 struct macb *bp = netdev_priv(dev);
89e5785f
HS
2681
2682 if (!netif_running(dev))
2683 return -EINVAL;
2684
6c36a707
R
2685 if (!phydev)
2686 return -ENODEV;
89e5785f 2687
c2594d80
AP
2688 if (!bp->ptp_info)
2689 return phy_mii_ioctl(phydev, rq, cmd);
2690
2691 switch (cmd) {
2692 case SIOCSHWTSTAMP:
2693 return bp->ptp_info->set_hwtst(dev, rq, cmd);
2694 case SIOCGHWTSTAMP:
2695 return bp->ptp_info->get_hwtst(dev, rq);
2696 default:
2697 return phy_mii_ioctl(phydev, rq, cmd);
2698 }
89e5785f
HS
2699}
2700
85ff3d87
CP
2701static int macb_set_features(struct net_device *netdev,
2702 netdev_features_t features)
2703{
2704 struct macb *bp = netdev_priv(netdev);
2705 netdev_features_t changed = features ^ netdev->features;
2706
2707 /* TX checksum offload */
2708 if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
2709 u32 dmacfg;
2710
2711 dmacfg = gem_readl(bp, DMACFG);
2712 if (features & NETIF_F_HW_CSUM)
2713 dmacfg |= GEM_BIT(TXCOEN);
2714 else
2715 dmacfg &= ~GEM_BIT(TXCOEN);
2716 gem_writel(bp, DMACFG, dmacfg);
2717 }
2718
924ec53c
CP
2719 /* RX checksum offload */
2720 if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
2721 u32 netcfg;
2722
2723 netcfg = gem_readl(bp, NCFGR);
2724 if (features & NETIF_F_RXCSUM &&
2725 !(netdev->flags & IFF_PROMISC))
2726 netcfg |= GEM_BIT(RXCOEN);
2727 else
2728 netcfg &= ~GEM_BIT(RXCOEN);
2729 gem_writel(bp, NCFGR, netcfg);
2730 }
2731
85ff3d87
CP
2732 return 0;
2733}
2734
5f1fa992
AB
2735static const struct net_device_ops macb_netdev_ops = {
2736 .ndo_open = macb_open,
2737 .ndo_stop = macb_close,
2738 .ndo_start_xmit = macb_start_xmit,
afc4b13d 2739 .ndo_set_rx_mode = macb_set_rx_mode,
5f1fa992
AB
2740 .ndo_get_stats = macb_get_stats,
2741 .ndo_do_ioctl = macb_ioctl,
2742 .ndo_validate_addr = eth_validate_addr,
a5898ea0 2743 .ndo_change_mtu = macb_change_mtu,
5f1fa992 2744 .ndo_set_mac_address = eth_mac_addr,
6e8cf5c0
TP
2745#ifdef CONFIG_NET_POLL_CONTROLLER
2746 .ndo_poll_controller = macb_poll_controller,
2747#endif
85ff3d87 2748 .ndo_set_features = macb_set_features,
1629dd4f 2749 .ndo_features_check = macb_features_check,
5f1fa992
AB
2750};
2751
64ec42fe 2752/* Configure peripheral capabilities according to device tree
e175587f
NF
2753 * and integration options used
2754 */
64ec42fe
MF
2755static void macb_configure_caps(struct macb *bp,
2756 const struct macb_config *dt_conf)
e175587f
NF
2757{
2758 u32 dcfg;
e175587f 2759
f6970505
NF
2760 if (dt_conf)
2761 bp->caps = dt_conf->caps;
2762
f2ce8a9e 2763 if (hw_is_gem(bp->regs, bp->native_io)) {
e175587f
NF
2764 bp->caps |= MACB_CAPS_MACB_IS_GEM;
2765
e175587f
NF
2766 dcfg = gem_readl(bp, DCFG1);
2767 if (GEM_BFEXT(IRQCOR, dcfg) == 0)
2768 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
2769 dcfg = gem_readl(bp, DCFG2);
2770 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
2771 bp->caps |= MACB_CAPS_FIFO_MODE;
ab91f0a9
RO
2772#ifdef CONFIG_MACB_USE_HWSTAMP
2773 if (gem_has_ptp(bp)) {
7b429614
RO
2774 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
2775 pr_err("GEM doesn't support hardware ptp.\n");
ab91f0a9 2776 else {
7b429614 2777 bp->hw_dma_cap |= HW_DMA_CAP_PTP;
ab91f0a9
RO
2778 bp->ptp_info = &gem_ptp_info;
2779 }
7b429614 2780 }
ab91f0a9 2781#endif
e175587f
NF
2782 }
2783
a35919e1 2784 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
e175587f
NF
2785}
2786
02c958dd 2787static void macb_probe_queues(void __iomem *mem,
f2ce8a9e 2788 bool native_io,
02c958dd
CP
2789 unsigned int *queue_mask,
2790 unsigned int *num_queues)
2791{
2792 unsigned int hw_q;
02c958dd
CP
2793
2794 *queue_mask = 0x1;
2795 *num_queues = 1;
2796
da120112
NF
2797 /* is it macb or gem ?
2798 *
2799 * We need to read directly from the hardware here because
2800 * we are early in the probe process and don't have the
2801 * MACB_CAPS_MACB_IS_GEM flag positioned
2802 */
f2ce8a9e 2803 if (!hw_is_gem(mem, native_io))
02c958dd
CP
2804 return;
2805
2806 /* bit 0 is never set but queue 0 always exists */
a50dad35
AC
2807 *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
2808
02c958dd
CP
2809 *queue_mask |= 0x1;
2810
2811 for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
2812 if (*queue_mask & (1 << hw_q))
2813 (*num_queues)++;
2814}
2815
c69618b3 2816static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
aead88bd 2817 struct clk **hclk, struct clk **tx_clk,
2818 struct clk **rx_clk)
89e5785f 2819{
83a77e9e 2820 struct macb_platform_data *pdata;
421d9df0 2821 int err;
89e5785f 2822
83a77e9e
BF
2823 pdata = dev_get_platdata(&pdev->dev);
2824 if (pdata) {
2825 *pclk = pdata->pclk;
2826 *hclk = pdata->hclk;
2827 } else {
2828 *pclk = devm_clk_get(&pdev->dev, "pclk");
2829 *hclk = devm_clk_get(&pdev->dev, "hclk");
2830 }
2831
4897958f 2832 if (IS_ERR_OR_NULL(*pclk)) {
c69618b3 2833 err = PTR_ERR(*pclk);
4897958f
HK
2834 if (!err)
2835 err = -ENODEV;
2836
94218827 2837 dev_err(&pdev->dev, "failed to get macb_clk (%d)\n", err);
421d9df0 2838 return err;
0cc8674f 2839 }
461845db 2840
4897958f 2841 if (IS_ERR_OR_NULL(*hclk)) {
c69618b3 2842 err = PTR_ERR(*hclk);
4897958f
HK
2843 if (!err)
2844 err = -ENODEV;
2845
94218827 2846 dev_err(&pdev->dev, "failed to get hclk (%d)\n", err);
421d9df0 2847 return err;
b48e0bab
SB
2848 }
2849
c69618b3
NF
2850 *tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
2851 if (IS_ERR(*tx_clk))
2852 *tx_clk = NULL;
e1824dfe 2853
aead88bd 2854 *rx_clk = devm_clk_get(&pdev->dev, "rx_clk");
2855 if (IS_ERR(*rx_clk))
2856 *rx_clk = NULL;
2857
c69618b3 2858 err = clk_prepare_enable(*pclk);
b48e0bab 2859 if (err) {
94218827 2860 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
421d9df0 2861 return err;
b48e0bab
SB
2862 }
2863
c69618b3 2864 err = clk_prepare_enable(*hclk);
b48e0bab 2865 if (err) {
94218827 2866 dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err);
421d9df0 2867 goto err_disable_pclk;
89e5785f 2868 }
89e5785f 2869
c69618b3 2870 err = clk_prepare_enable(*tx_clk);
93b31f48 2871 if (err) {
94218827 2872 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
421d9df0 2873 goto err_disable_hclk;
e1824dfe
SB
2874 }
2875
aead88bd 2876 err = clk_prepare_enable(*rx_clk);
2877 if (err) {
94218827 2878 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
aead88bd 2879 goto err_disable_txclk;
2880 }
2881
c69618b3
NF
2882 return 0;
2883
aead88bd 2884err_disable_txclk:
2885 clk_disable_unprepare(*tx_clk);
2886
c69618b3
NF
2887err_disable_hclk:
2888 clk_disable_unprepare(*hclk);
2889
2890err_disable_pclk:
2891 clk_disable_unprepare(*pclk);
2892
2893 return err;
2894}
2895
2896static int macb_init(struct platform_device *pdev)
2897{
2898 struct net_device *dev = platform_get_drvdata(pdev);
2899 unsigned int hw_q, q;
2900 struct macb *bp = netdev_priv(dev);
2901 struct macb_queue *queue;
2902 int err;
2903 u32 val;
2904
b410d13e
ZB
2905 bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
2906 bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
2907
02c958dd
CP
2908 /* set the queue register mapping once for all: queue0 has a special
2909 * register mapping but we don't want to test the queue index then
2910 * compute the corresponding register offset at run time.
2911 */
cf250de0 2912 for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
bfa0914a 2913 if (!(bp->queue_mask & (1 << hw_q)))
02c958dd
CP
2914 continue;
2915
cf250de0 2916 queue = &bp->queues[q];
02c958dd
CP
2917 queue->bp = bp;
2918 if (hw_q) {
2919 queue->ISR = GEM_ISR(hw_q - 1);
2920 queue->IER = GEM_IER(hw_q - 1);
2921 queue->IDR = GEM_IDR(hw_q - 1);
2922 queue->IMR = GEM_IMR(hw_q - 1);
2923 queue->TBQP = GEM_TBQP(hw_q - 1);
fff8019a 2924#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
7b429614 2925 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
dc97a89e 2926 queue->TBQPH = GEM_TBQPH(hw_q - 1);
fff8019a 2927#endif
02c958dd
CP
2928 } else {
2929 /* queue0 uses legacy registers */
2930 queue->ISR = MACB_ISR;
2931 queue->IER = MACB_IER;
2932 queue->IDR = MACB_IDR;
2933 queue->IMR = MACB_IMR;
2934 queue->TBQP = MACB_TBQP;
fff8019a 2935#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
7b429614 2936 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
dc97a89e 2937 queue->TBQPH = MACB_TBQPH;
fff8019a 2938#endif
02c958dd
CP
2939 }
2940
2941 /* get irq: here we use the linux queue index, not the hardware
2942 * queue index. the queue irq definitions in the device tree
2943 * must remove the optional gaps that could exist in the
2944 * hardware queue mask.
2945 */
cf250de0 2946 queue->irq = platform_get_irq(pdev, q);
02c958dd 2947 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
20488239 2948 IRQF_SHARED, dev->name, queue);
02c958dd
CP
2949 if (err) {
2950 dev_err(&pdev->dev,
2951 "Unable to request IRQ %d (error %d)\n",
2952 queue->irq, err);
c69618b3 2953 return err;
02c958dd
CP
2954 }
2955
2956 INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
cf250de0 2957 q++;
89e5785f
HS
2958 }
2959
5f1fa992 2960 dev->netdev_ops = &macb_netdev_ops;
bea3348e 2961 netif_napi_add(dev, &bp->napi, macb_poll, 64);
89e5785f 2962
4df95131
NF
2963 /* setup appropriated routines according to adapter type */
2964 if (macb_is_gem(bp)) {
a4c35ed3 2965 bp->max_tx_length = GEM_MAX_TX_LEN;
4df95131
NF
2966 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
2967 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
2968 bp->macbgem_ops.mog_init_rings = gem_init_rings;
2969 bp->macbgem_ops.mog_rx = gem_rx;
8cd5a56c 2970 dev->ethtool_ops = &gem_ethtool_ops;
4df95131 2971 } else {
a4c35ed3 2972 bp->max_tx_length = MACB_MAX_TX_LEN;
4df95131
NF
2973 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
2974 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
2975 bp->macbgem_ops.mog_init_rings = macb_init_rings;
2976 bp->macbgem_ops.mog_rx = macb_rx;
8cd5a56c 2977 dev->ethtool_ops = &macb_ethtool_ops;
4df95131
NF
2978 }
2979
a4c35ed3
CP
2980 /* Set features */
2981 dev->hw_features = NETIF_F_SG;
1629dd4f
RO
2982
2983 /* Check LSO capability */
2984 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
2985 dev->hw_features |= MACB_NETIF_LSO;
2986
85ff3d87
CP
2987 /* Checksum offload is only available on gem with packet buffer */
2988 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
924ec53c 2989 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
a4c35ed3
CP
2990 if (bp->caps & MACB_CAPS_SG_DISABLED)
2991 dev->hw_features &= ~NETIF_F_SG;
2992 dev->features = dev->hw_features;
2993
ce721a70
NA
2994 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
2995 val = 0;
2996 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
2997 val = GEM_BIT(RGMII);
2998 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
6bdaa5e9 2999 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
ce721a70 3000 val = MACB_BIT(RMII);
6bdaa5e9 3001 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
ce721a70 3002 val = MACB_BIT(MII);
421d9df0 3003
ce721a70
NA
3004 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
3005 val |= MACB_BIT(CLKEN);
421d9df0 3006
ce721a70
NA
3007 macb_or_gem_writel(bp, USRIO, val);
3008 }
421d9df0 3009
89e5785f 3010 /* Set MII management clock divider */
421d9df0
CP
3011 val = macb_mdc_clk_div(bp);
3012 val |= macb_dbw(bp);
022be25c
PCK
3013 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
3014 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
421d9df0
CP
3015 macb_writel(bp, NCFGR, val);
3016
3017 return 0;
421d9df0
CP
3018}
3019
3020#if defined(CONFIG_OF)
3021/* 1518 rounded up */
3022#define AT91ETHER_MAX_RBUFF_SZ 0x600
3023/* max number of receive buffers */
3024#define AT91ETHER_MAX_RX_DESCR 9
3025
3026/* Initialize and start the Receiver and Transmit subsystems */
3027static int at91ether_start(struct net_device *dev)
3028{
3029 struct macb *lp = netdev_priv(dev);
dc97a89e 3030 struct macb_dma_desc *desc;
421d9df0
CP
3031 dma_addr_t addr;
3032 u32 ctl;
3033 int i;
3034
3035 lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
3036 (AT91ETHER_MAX_RX_DESCR *
dc97a89e 3037 macb_dma_desc_get_size(lp)),
421d9df0
CP
3038 &lp->rx_ring_dma, GFP_KERNEL);
3039 if (!lp->rx_ring)
3040 return -ENOMEM;
3041
3042 lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
3043 AT91ETHER_MAX_RX_DESCR *
3044 AT91ETHER_MAX_RBUFF_SZ,
3045 &lp->rx_buffers_dma, GFP_KERNEL);
3046 if (!lp->rx_buffers) {
3047 dma_free_coherent(&lp->pdev->dev,
3048 AT91ETHER_MAX_RX_DESCR *
dc97a89e 3049 macb_dma_desc_get_size(lp),
421d9df0
CP
3050 lp->rx_ring, lp->rx_ring_dma);
3051 lp->rx_ring = NULL;
3052 return -ENOMEM;
3053 }
3054
3055 addr = lp->rx_buffers_dma;
3056 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
dc97a89e
RO
3057 desc = macb_rx_desc(lp, i);
3058 macb_set_addr(lp, desc, addr);
3059 desc->ctrl = 0;
421d9df0
CP
3060 addr += AT91ETHER_MAX_RBUFF_SZ;
3061 }
3062
3063 /* Set the Wrap bit on the last descriptor */
dc97a89e 3064 desc->addr |= MACB_BIT(RX_WRAP);
421d9df0
CP
3065
3066 /* Reset buffer index */
3067 lp->rx_tail = 0;
3068
3069 /* Program address of descriptor list in Rx Buffer Queue register */
3070 macb_writel(lp, RBQP, lp->rx_ring_dma);
3071
3072 /* Enable Receive and Transmit */
3073 ctl = macb_readl(lp, NCR);
3074 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
3075
3076 return 0;
3077}
3078
3079/* Open the ethernet interface */
3080static int at91ether_open(struct net_device *dev)
3081{
3082 struct macb *lp = netdev_priv(dev);
3083 u32 ctl;
3084 int ret;
3085
3086 /* Clear internal statistics */
3087 ctl = macb_readl(lp, NCR);
3088 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
3089
3090 macb_set_hwaddr(lp);
3091
3092 ret = at91ether_start(dev);
3093 if (ret)
3094 return ret;
3095
3096 /* Enable MAC interrupts */
3097 macb_writel(lp, IER, MACB_BIT(RCOMP) |
3098 MACB_BIT(RXUBR) |
3099 MACB_BIT(ISR_TUND) |
3100 MACB_BIT(ISR_RLE) |
3101 MACB_BIT(TCOMP) |
3102 MACB_BIT(ISR_ROVR) |
3103 MACB_BIT(HRESP));
3104
3105 /* schedule a link state check */
0a91281e 3106 phy_start(dev->phydev);
421d9df0
CP
3107
3108 netif_start_queue(dev);
3109
3110 return 0;
3111}
3112
3113/* Close the interface */
3114static int at91ether_close(struct net_device *dev)
3115{
3116 struct macb *lp = netdev_priv(dev);
3117 u32 ctl;
3118
3119 /* Disable Receiver and Transmitter */
3120 ctl = macb_readl(lp, NCR);
3121 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
3122
3123 /* Disable MAC interrupts */
3124 macb_writel(lp, IDR, MACB_BIT(RCOMP) |
3125 MACB_BIT(RXUBR) |
3126 MACB_BIT(ISR_TUND) |
3127 MACB_BIT(ISR_RLE) |
3128 MACB_BIT(TCOMP) |
3129 MACB_BIT(ISR_ROVR) |
3130 MACB_BIT(HRESP));
3131
3132 netif_stop_queue(dev);
3133
3134 dma_free_coherent(&lp->pdev->dev,
3135 AT91ETHER_MAX_RX_DESCR *
dc97a89e 3136 macb_dma_desc_get_size(lp),
421d9df0
CP
3137 lp->rx_ring, lp->rx_ring_dma);
3138 lp->rx_ring = NULL;
3139
3140 dma_free_coherent(&lp->pdev->dev,
3141 AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
3142 lp->rx_buffers, lp->rx_buffers_dma);
3143 lp->rx_buffers = NULL;
3144
3145 return 0;
3146}
3147
3148/* Transmit packet */
3149static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
3150{
3151 struct macb *lp = netdev_priv(dev);
3152
3153 if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
3154 netif_stop_queue(dev);
3155
3156 /* Store packet information (to free when Tx completed) */
3157 lp->skb = skb;
3158 lp->skb_length = skb->len;
3159 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
3160 DMA_TO_DEVICE);
178c7ae9
AK
3161 if (dma_mapping_error(NULL, lp->skb_physaddr)) {
3162 dev_kfree_skb_any(skb);
3163 dev->stats.tx_dropped++;
3164 netdev_err(dev, "%s: DMA mapping error\n", __func__);
3165 return NETDEV_TX_OK;
3166 }
421d9df0
CP
3167
3168 /* Set address of the data in the Transmit Address register */
3169 macb_writel(lp, TAR, lp->skb_physaddr);
3170 /* Set length of the packet in the Transmit Control register */
3171 macb_writel(lp, TCR, skb->len);
89e5785f 3172
421d9df0
CP
3173 } else {
3174 netdev_err(dev, "%s called, but device is busy!\n", __func__);
3175 return NETDEV_TX_BUSY;
3176 }
3177
3178 return NETDEV_TX_OK;
3179}
3180
3181/* Extract received frame from buffer descriptors and sent to upper layers.
3182 * (Called from interrupt context)
3183 */
3184static void at91ether_rx(struct net_device *dev)
3185{
3186 struct macb *lp = netdev_priv(dev);
dc97a89e 3187 struct macb_dma_desc *desc;
421d9df0
CP
3188 unsigned char *p_recv;
3189 struct sk_buff *skb;
3190 unsigned int pktlen;
3191
dc97a89e
RO
3192 desc = macb_rx_desc(lp, lp->rx_tail);
3193 while (desc->addr & MACB_BIT(RX_USED)) {
421d9df0 3194 p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
dc97a89e 3195 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
421d9df0
CP
3196 skb = netdev_alloc_skb(dev, pktlen + 2);
3197 if (skb) {
3198 skb_reserve(skb, 2);
59ae1d12 3199 skb_put_data(skb, p_recv, pktlen);
421d9df0
CP
3200
3201 skb->protocol = eth_type_trans(skb, dev);
5f1d3a5c
TK
3202 dev->stats.rx_packets++;
3203 dev->stats.rx_bytes += pktlen;
421d9df0
CP
3204 netif_rx(skb);
3205 } else {
5f1d3a5c 3206 dev->stats.rx_dropped++;
421d9df0
CP
3207 }
3208
dc97a89e 3209 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
5f1d3a5c 3210 dev->stats.multicast++;
421d9df0
CP
3211
3212 /* reset ownership bit */
dc97a89e 3213 desc->addr &= ~MACB_BIT(RX_USED);
421d9df0
CP
3214
3215 /* wrap after last buffer */
3216 if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
3217 lp->rx_tail = 0;
3218 else
3219 lp->rx_tail++;
dc97a89e
RO
3220
3221 desc = macb_rx_desc(lp, lp->rx_tail);
421d9df0
CP
3222 }
3223}
3224
3225/* MAC interrupt handler */
3226static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
3227{
3228 struct net_device *dev = dev_id;
3229 struct macb *lp = netdev_priv(dev);
3230 u32 intstatus, ctl;
3231
3232 /* MAC Interrupt Status register indicates what interrupts are pending.
3233 * It is automatically cleared once read.
3234 */
3235 intstatus = macb_readl(lp, ISR);
3236
3237 /* Receive complete */
3238 if (intstatus & MACB_BIT(RCOMP))
3239 at91ether_rx(dev);
3240
3241 /* Transmit complete */
3242 if (intstatus & MACB_BIT(TCOMP)) {
3243 /* The TCOM bit is set even if the transmission failed */
3244 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
5f1d3a5c 3245 dev->stats.tx_errors++;
421d9df0
CP
3246
3247 if (lp->skb) {
3248 dev_kfree_skb_irq(lp->skb);
3249 lp->skb = NULL;
3250 dma_unmap_single(NULL, lp->skb_physaddr,
3251 lp->skb_length, DMA_TO_DEVICE);
5f1d3a5c
TK
3252 dev->stats.tx_packets++;
3253 dev->stats.tx_bytes += lp->skb_length;
421d9df0
CP
3254 }
3255 netif_wake_queue(dev);
3256 }
3257
3258 /* Work-around for EMAC Errata section 41.3.1 */
3259 if (intstatus & MACB_BIT(RXUBR)) {
3260 ctl = macb_readl(lp, NCR);
3261 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
ffac0e96 3262 wmb();
421d9df0
CP
3263 macb_writel(lp, NCR, ctl | MACB_BIT(RE));
3264 }
3265
3266 if (intstatus & MACB_BIT(ISR_ROVR))
3267 netdev_err(dev, "ROVR error\n");
3268
3269 return IRQ_HANDLED;
3270}
3271
3272#ifdef CONFIG_NET_POLL_CONTROLLER
3273static void at91ether_poll_controller(struct net_device *dev)
3274{
3275 unsigned long flags;
3276
3277 local_irq_save(flags);
3278 at91ether_interrupt(dev->irq, dev);
3279 local_irq_restore(flags);
3280}
3281#endif
3282
3283static const struct net_device_ops at91ether_netdev_ops = {
3284 .ndo_open = at91ether_open,
3285 .ndo_stop = at91ether_close,
3286 .ndo_start_xmit = at91ether_start_xmit,
3287 .ndo_get_stats = macb_get_stats,
3288 .ndo_set_rx_mode = macb_set_rx_mode,
3289 .ndo_set_mac_address = eth_mac_addr,
3290 .ndo_do_ioctl = macb_ioctl,
3291 .ndo_validate_addr = eth_validate_addr,
421d9df0
CP
3292#ifdef CONFIG_NET_POLL_CONTROLLER
3293 .ndo_poll_controller = at91ether_poll_controller,
3294#endif
3295};
3296
c69618b3 3297static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
aead88bd 3298 struct clk **hclk, struct clk **tx_clk,
3299 struct clk **rx_clk)
421d9df0 3300{
421d9df0 3301 int err;
421d9df0 3302
c69618b3
NF
3303 *hclk = NULL;
3304 *tx_clk = NULL;
aead88bd 3305 *rx_clk = NULL;
c69618b3
NF
3306
3307 *pclk = devm_clk_get(&pdev->dev, "ether_clk");
3308 if (IS_ERR(*pclk))
3309 return PTR_ERR(*pclk);
421d9df0 3310
c69618b3 3311 err = clk_prepare_enable(*pclk);
421d9df0 3312 if (err) {
94218827 3313 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
421d9df0
CP
3314 return err;
3315 }
3316
c69618b3
NF
3317 return 0;
3318}
3319
3320static int at91ether_init(struct platform_device *pdev)
3321{
3322 struct net_device *dev = platform_get_drvdata(pdev);
3323 struct macb *bp = netdev_priv(dev);
3324 int err;
3325 u32 reg;
3326
a0ccc402
AB
3327 bp->queues[0].bp = bp;
3328
421d9df0
CP
3329 dev->netdev_ops = &at91ether_netdev_ops;
3330 dev->ethtool_ops = &macb_ethtool_ops;
3331
3332 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
3333 0, dev->name, dev);
3334 if (err)
c69618b3 3335 return err;
421d9df0
CP
3336
3337 macb_writel(bp, NCR, 0);
3338
3339 reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
3340 if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
3341 reg |= MACB_BIT(RM9200_RMII);
3342
3343 macb_writel(bp, NCFGR, reg);
3344
3345 return 0;
421d9df0
CP
3346}
3347
3cef5c5b 3348static const struct macb_config at91sam9260_config = {
6bdaa5e9 3349 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
c69618b3 3350 .clk_init = macb_clk_init,
421d9df0
CP
3351 .init = macb_init,
3352};
3353
60b59852
NF
3354static const struct macb_config sama5d3macb_config = {
3355 .caps = MACB_CAPS_SG_DISABLED
3356 | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3357 .clk_init = macb_clk_init,
3358 .init = macb_init,
3359};
3360
3cef5c5b 3361static const struct macb_config pc302gem_config = {
421d9df0
CP
3362 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
3363 .dma_burst_length = 16,
c69618b3 3364 .clk_init = macb_clk_init,
421d9df0
CP
3365 .init = macb_init,
3366};
3367
5c8fe711 3368static const struct macb_config sama5d2_config = {
6bdaa5e9 3369 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
5c8fe711
CP
3370 .dma_burst_length = 16,
3371 .clk_init = macb_clk_init,
3372 .init = macb_init,
3373};
3374
3cef5c5b 3375static const struct macb_config sama5d3_config = {
6bdaa5e9 3376 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
233a1587 3377 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
421d9df0 3378 .dma_burst_length = 16,
c69618b3 3379 .clk_init = macb_clk_init,
421d9df0 3380 .init = macb_init,
233a1587 3381 .jumbo_max_len = 10240,
421d9df0
CP
3382};
3383
3cef5c5b 3384static const struct macb_config sama5d4_config = {
6bdaa5e9 3385 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
421d9df0 3386 .dma_burst_length = 4,
c69618b3 3387 .clk_init = macb_clk_init,
421d9df0
CP
3388 .init = macb_init,
3389};
3390
3cef5c5b 3391static const struct macb_config emac_config = {
c69618b3 3392 .clk_init = at91ether_clk_init,
421d9df0
CP
3393 .init = at91ether_init,
3394};
3395
e611b5b8
NA
3396static const struct macb_config np4_config = {
3397 .caps = MACB_CAPS_USRIO_DISABLED,
3398 .clk_init = macb_clk_init,
3399 .init = macb_init,
3400};
36583eb5 3401
7b61f9c1 3402static const struct macb_config zynqmp_config = {
ab91f0a9
RO
3403 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
3404 MACB_CAPS_JUMBO |
3405 MACB_CAPS_GEM_HAS_PTP,
7b61f9c1
HK
3406 .dma_burst_length = 16,
3407 .clk_init = macb_clk_init,
3408 .init = macb_init,
98b5a0f4 3409 .jumbo_max_len = 10240,
7b61f9c1
HK
3410};
3411
222ca8e0 3412static const struct macb_config zynq_config = {
7baaa909 3413 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
222ca8e0
NS
3414 .dma_burst_length = 16,
3415 .clk_init = macb_clk_init,
3416 .init = macb_init,
3417};
3418
421d9df0
CP
3419static const struct of_device_id macb_dt_ids[] = {
3420 { .compatible = "cdns,at32ap7000-macb" },
3421 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
3422 { .compatible = "cdns,macb" },
e611b5b8 3423 { .compatible = "cdns,np4-macb", .data = &np4_config },
421d9df0
CP
3424 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
3425 { .compatible = "cdns,gem", .data = &pc302gem_config },
5c8fe711 3426 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
421d9df0 3427 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
60b59852 3428 { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
421d9df0
CP
3429 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
3430 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
3431 { .compatible = "cdns,emac", .data = &emac_config },
7b61f9c1 3432 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
222ca8e0 3433 { .compatible = "cdns,zynq-gem", .data = &zynq_config },
421d9df0
CP
3434 { /* sentinel */ }
3435};
3436MODULE_DEVICE_TABLE(of, macb_dt_ids);
3437#endif /* CONFIG_OF */
3438
83a77e9e 3439static const struct macb_config default_gem_config = {
ab91f0a9
RO
3440 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
3441 MACB_CAPS_JUMBO |
3442 MACB_CAPS_GEM_HAS_PTP,
83a77e9e
BF
3443 .dma_burst_length = 16,
3444 .clk_init = macb_clk_init,
3445 .init = macb_init,
3446 .jumbo_max_len = 10240,
3447};
3448
421d9df0
CP
3449static int macb_probe(struct platform_device *pdev)
3450{
83a77e9e 3451 const struct macb_config *macb_config = &default_gem_config;
c69618b3 3452 int (*clk_init)(struct platform_device *, struct clk **,
aead88bd 3453 struct clk **, struct clk **, struct clk **)
83a77e9e
BF
3454 = macb_config->clk_init;
3455 int (*init)(struct platform_device *) = macb_config->init;
421d9df0 3456 struct device_node *np = pdev->dev.of_node;
270c499f 3457 struct device_node *phy_node;
aead88bd 3458 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
421d9df0
CP
3459 unsigned int queue_mask, num_queues;
3460 struct macb_platform_data *pdata;
f2ce8a9e 3461 bool native_io;
421d9df0
CP
3462 struct phy_device *phydev;
3463 struct net_device *dev;
3464 struct resource *regs;
3465 void __iomem *mem;
3466 const char *mac;
3467 struct macb *bp;
3468 int err;
3469
f2ce8a9e
AS
3470 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3471 mem = devm_ioremap_resource(&pdev->dev, regs);
3472 if (IS_ERR(mem))
3473 return PTR_ERR(mem);
3474
c69618b3
NF
3475 if (np) {
3476 const struct of_device_id *match;
3477
3478 match = of_match_node(macb_dt_ids, np);
3479 if (match && match->data) {
3480 macb_config = match->data;
3481 clk_init = macb_config->clk_init;
3482 init = macb_config->init;
3483 }
3484 }
3485
aead88bd 3486 err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk);
c69618b3
NF
3487 if (err)
3488 return err;
3489
f2ce8a9e 3490 native_io = hw_is_native_io(mem);
421d9df0 3491
f2ce8a9e 3492 macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
421d9df0 3493 dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
c69618b3
NF
3494 if (!dev) {
3495 err = -ENOMEM;
3496 goto err_disable_clocks;
3497 }
421d9df0
CP
3498
3499 dev->base_addr = regs->start;
3500
3501 SET_NETDEV_DEV(dev, &pdev->dev);
3502
3503 bp = netdev_priv(dev);
3504 bp->pdev = pdev;
3505 bp->dev = dev;
3506 bp->regs = mem;
f2ce8a9e
AS
3507 bp->native_io = native_io;
3508 if (native_io) {
7a6e0706
DM
3509 bp->macb_reg_readl = hw_readl_native;
3510 bp->macb_reg_writel = hw_writel_native;
f2ce8a9e 3511 } else {
7a6e0706
DM
3512 bp->macb_reg_readl = hw_readl;
3513 bp->macb_reg_writel = hw_writel;
f2ce8a9e 3514 }
421d9df0 3515 bp->num_queues = num_queues;
bfa0914a 3516 bp->queue_mask = queue_mask;
c69618b3
NF
3517 if (macb_config)
3518 bp->dma_burst_length = macb_config->dma_burst_length;
3519 bp->pclk = pclk;
3520 bp->hclk = hclk;
3521 bp->tx_clk = tx_clk;
aead88bd 3522 bp->rx_clk = rx_clk;
f36dbe6a 3523 if (macb_config)
98b5a0f4 3524 bp->jumbo_max_len = macb_config->jumbo_max_len;
98b5a0f4 3525
3e2a5e15 3526 bp->wol = 0;
7c4a1d0c 3527 if (of_get_property(np, "magic-packet", NULL))
3e2a5e15
SP
3528 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
3529 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
3530
421d9df0
CP
3531 spin_lock_init(&bp->lock);
3532
ad78347f 3533 /* setup capabilities */
f6970505
NF
3534 macb_configure_caps(bp, macb_config);
3535
7b429614
RO
3536#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3537 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
3538 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
3539 bp->hw_dma_cap |= HW_DMA_CAP_64B;
3540 }
3541#endif
421d9df0
CP
3542 platform_set_drvdata(pdev, dev);
3543
3544 dev->irq = platform_get_irq(pdev, 0);
c69618b3
NF
3545 if (dev->irq < 0) {
3546 err = dev->irq;
b22ae0b4 3547 goto err_out_free_netdev;
c69618b3 3548 }
421d9df0 3549
44770e11
JW
3550 /* MTU range: 68 - 1500 or 10240 */
3551 dev->min_mtu = GEM_MTU_MIN_SIZE;
3552 if (bp->caps & MACB_CAPS_JUMBO)
3553 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
3554 else
3555 dev->max_mtu = ETH_DATA_LEN;
3556
421d9df0 3557 mac = of_get_mac_address(np);
50907043 3558 if (mac)
eefb52d1 3559 ether_addr_copy(bp->dev->dev_addr, mac);
50907043 3560 else
fb97a846
JCPV
3561 macb_get_hwaddr(bp);
3562
5833e052 3563 /* Power up the PHY if there is a GPIO reset */
270c499f
GC
3564 phy_node = of_get_next_available_child(np, NULL);
3565 if (phy_node) {
3566 int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
64ec42fe 3567
0e3e7999 3568 if (gpio_is_valid(gpio)) {
270c499f 3569 bp->reset_gpio = gpio_to_desc(gpio);
0e3e7999
CK
3570 gpiod_direction_output(bp->reset_gpio, 1);
3571 }
270c499f
GC
3572 }
3573 of_node_put(phy_node);
5833e052 3574
421d9df0 3575 err = of_get_phy_mode(np);
fb97a846 3576 if (err < 0) {
c607a0d9 3577 pdata = dev_get_platdata(&pdev->dev);
fb97a846
JCPV
3578 if (pdata && pdata->is_rmii)
3579 bp->phy_interface = PHY_INTERFACE_MODE_RMII;
3580 else
3581 bp->phy_interface = PHY_INTERFACE_MODE_MII;
3582 } else {
3583 bp->phy_interface = err;
3584 }
6c36a707 3585
421d9df0
CP
3586 /* IP specific init */
3587 err = init(pdev);
3588 if (err)
3589 goto err_out_free_netdev;
89e5785f 3590
cf669660
FF
3591 err = macb_mii_init(bp);
3592 if (err)
3593 goto err_out_free_netdev;
3594
0a91281e 3595 phydev = dev->phydev;
cf669660
FF
3596
3597 netif_carrier_off(dev);
3598
89e5785f
HS
3599 err = register_netdev(dev);
3600 if (err) {
3601 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
cf669660 3602 goto err_out_unregister_mdio;
89e5785f
HS
3603 }
3604
cf669660 3605 phy_attached_info(phydev);
03fc4721 3606
5879823f
BS
3607 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
3608 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
3609 dev->base_addr, dev->irq, dev->dev_addr);
89e5785f
HS
3610
3611 return 0;
3612
cf669660 3613err_out_unregister_mdio:
0a91281e 3614 phy_disconnect(dev->phydev);
cf669660 3615 mdiobus_unregister(bp->mii_bus);
66ee6a06 3616 of_node_put(bp->phy_node);
9ce98140
MG
3617 if (np && of_phy_is_fixed_link(np))
3618 of_phy_deregister_fixed_link(np);
cf669660
FF
3619 mdiobus_free(bp->mii_bus);
3620
3621 /* Shutdown the PHY if there is a GPIO reset */
3622 if (bp->reset_gpio)
3623 gpiod_set_value(bp->reset_gpio, 0);
421d9df0 3624
cf250de0 3625err_out_free_netdev:
02c958dd 3626 free_netdev(dev);
421d9df0 3627
c69618b3
NF
3628err_disable_clocks:
3629 clk_disable_unprepare(tx_clk);
3630 clk_disable_unprepare(hclk);
3631 clk_disable_unprepare(pclk);
aead88bd 3632 clk_disable_unprepare(rx_clk);
c69618b3 3633
89e5785f
HS
3634 return err;
3635}
3636
9e86d766 3637static int macb_remove(struct platform_device *pdev)
89e5785f
HS
3638{
3639 struct net_device *dev;
3640 struct macb *bp;
9ce98140 3641 struct device_node *np = pdev->dev.of_node;
89e5785f
HS
3642
3643 dev = platform_get_drvdata(pdev);
3644
3645 if (dev) {
3646 bp = netdev_priv(dev);
0a91281e
PR
3647 if (dev->phydev)
3648 phy_disconnect(dev->phydev);
298cf9be 3649 mdiobus_unregister(bp->mii_bus);
9ce98140
MG
3650 if (np && of_phy_is_fixed_link(np))
3651 of_phy_deregister_fixed_link(np);
fa6114d4 3652 dev->phydev = NULL;
298cf9be 3653 mdiobus_free(bp->mii_bus);
5833e052
GC
3654
3655 /* Shutdown the PHY if there is a GPIO reset */
0e3e7999
CK
3656 if (bp->reset_gpio)
3657 gpiod_set_value(bp->reset_gpio, 0);
5833e052 3658
89e5785f 3659 unregister_netdev(dev);
93b31f48 3660 clk_disable_unprepare(bp->tx_clk);
ace58010 3661 clk_disable_unprepare(bp->hclk);
ace58010 3662 clk_disable_unprepare(bp->pclk);
aead88bd 3663 clk_disable_unprepare(bp->rx_clk);
dacdbb4d 3664 of_node_put(bp->phy_node);
e965be7d 3665 free_netdev(dev);
89e5785f
HS
3666 }
3667
3668 return 0;
3669}
3670
d23823dd 3671static int __maybe_unused macb_suspend(struct device *dev)
c1f598fd 3672{
0dfc3e18 3673 struct platform_device *pdev = to_platform_device(dev);
c1f598fd
HS
3674 struct net_device *netdev = platform_get_drvdata(pdev);
3675 struct macb *bp = netdev_priv(netdev);
3676
03fc4721 3677 netif_carrier_off(netdev);
c1f598fd
HS
3678 netif_device_detach(netdev);
3679
3e2a5e15
SP
3680 if (bp->wol & MACB_WOL_ENABLED) {
3681 macb_writel(bp, IER, MACB_BIT(WOL));
3682 macb_writel(bp, WOL, MACB_BIT(MAG));
3683 enable_irq_wake(bp->queues[0].irq);
3684 } else {
3685 clk_disable_unprepare(bp->tx_clk);
3686 clk_disable_unprepare(bp->hclk);
3687 clk_disable_unprepare(bp->pclk);
aead88bd 3688 clk_disable_unprepare(bp->rx_clk);
3e2a5e15 3689 }
c1f598fd
HS
3690
3691 return 0;
3692}
3693
d23823dd 3694static int __maybe_unused macb_resume(struct device *dev)
c1f598fd 3695{
0dfc3e18 3696 struct platform_device *pdev = to_platform_device(dev);
c1f598fd
HS
3697 struct net_device *netdev = platform_get_drvdata(pdev);
3698 struct macb *bp = netdev_priv(netdev);
3699
3e2a5e15
SP
3700 if (bp->wol & MACB_WOL_ENABLED) {
3701 macb_writel(bp, IDR, MACB_BIT(WOL));
3702 macb_writel(bp, WOL, 0);
3703 disable_irq_wake(bp->queues[0].irq);
3704 } else {
3705 clk_prepare_enable(bp->pclk);
3706 clk_prepare_enable(bp->hclk);
3707 clk_prepare_enable(bp->tx_clk);
aead88bd 3708 clk_prepare_enable(bp->rx_clk);
3e2a5e15 3709 }
c1f598fd
HS
3710
3711 netif_device_attach(netdev);
3712
3713 return 0;
3714}
c1f598fd 3715
0dfc3e18
SB
3716static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
3717
89e5785f 3718static struct platform_driver macb_driver = {
9e86d766
NR
3719 .probe = macb_probe,
3720 .remove = macb_remove,
89e5785f
HS
3721 .driver = {
3722 .name = "macb",
fb97a846 3723 .of_match_table = of_match_ptr(macb_dt_ids),
0dfc3e18 3724 .pm = &macb_pm_ops,
89e5785f
HS
3725 },
3726};
3727
9e86d766 3728module_platform_driver(macb_driver);
89e5785f
HS
3729
3730MODULE_LICENSE("GPL");
f75ba50b 3731MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
e05503ef 3732MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
72abb461 3733MODULE_ALIAS("platform:macb");